diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..320442ba --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,23 @@ +## Summary + +- [ ] 简述本次改动目标与范围 + +## Changes + +- [ ] 列出核心改动点(代码/文档/配置) + +## Verification + +- [ ] 本地验证步骤已执行(请附关键命令/结果) + +## WS/TaskBus Contract Checklist(必填) + +- [ ] 本次是否涉及 WS/TaskBus(topic、publish/register、envelope、鉴权、tenant/trace)? +- [ ] 若涉及,是否同步更新主契约:`specs/004-eventbus-message-fabric/spec.md` +- [ ] 若涉及 WS 传输,是否同步更新:`specs/023-websocket-notify/spec.md` 与 `specs/023-websocket-notify/contracts/http-openapi.yaml` +- [ ] 是否运行一致性检查:`bash scripts/specs/check_ws_taskbus_contracts.sh` + +## Risks + +- [ ] 已评估兼容性/回滚影响 + diff --git a/.github/workflows/web-admin-api-response-shape.yml b/.github/workflows/web-admin-api-response-shape.yml new file mode 100644 index 00000000..0f3195b1 --- /dev/null +++ b/.github/workflows/web-admin-api-response-shape.yml @@ -0,0 +1,43 @@ +name: Web Admin API Response Shape + +on: + pull_request: + paths: + - 'web-admin/app/**' + - 'web-admin/scripts/check-api-response-shape.mjs' + - 'web-admin/package.json' + - '.github/workflows/web-admin-api-response-shape.yml' + push: + branches: + - main + paths: + - 'web-admin/app/**' + - 'web-admin/scripts/check-api-response-shape.mjs' + - 'web-admin/package.json' + - '.github/workflows/web-admin-api-response-shape.yml' + +jobs: + response-shape-check: + runs-on: ubuntu-latest + defaults: + run: + working-directory: web-admin + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: web-admin/package-lock.json + + - name: Install dependencies + run: npm ci + + - name: Check API response shape + run: npm run check:api-response-shape + + - name: Build web-admin + run: npm run build diff --git a/.github/workflows/ws-taskbus-doc-consistency.yml b/.github/workflows/ws-taskbus-doc-consistency.yml new file mode 100644 index 00000000..f44d7a45 --- /dev/null +++ b/.github/workflows/ws-taskbus-doc-consistency.yml @@ -0,0 +1,35 @@ +name: WS TaskBus Doc Consistency + +on: + pull_request: + paths: + - 'specs/004-eventbus-message-fabric/**' + - 'specs/023-websocket-notify/**' + - 'scripts/specs/check_ws_taskbus_contracts.sh' + - '.github/workflows/ws-taskbus-doc-consistency.yml' + push: + branches: + - main + paths: + - 'specs/004-eventbus-message-fabric/**' + - 'specs/023-websocket-notify/**' + - 'scripts/specs/check_ws_taskbus_contracts.sh' + - '.github/workflows/ws-taskbus-doc-consistency.yml' + +jobs: + doc-consistency: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install ripgrep + run: | + if ! command -v rg >/dev/null 2>&1; then + sudo apt-get update + sudo apt-get install -y ripgrep + fi + + - name: Run WS/TaskBus contract consistency check + run: bash scripts/specs/check_ws_taskbus_contracts.sh + diff --git a/.gitignore b/.gitignore index 890a9d05..c6a72b3b 100644 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,7 @@ backend/etc/config.yaml # PowerX plugin runtime(整目录忽略) backend/plugins/ backend/.gopath +backend/.gotmp # Ignore compiled binary files and specific app directories # Binary file 'app' (compiled executable) @@ -87,3 +88,5 @@ storage/ .claude tmp +backend/reports/ +reports/ diff --git a/.specify/memory/api-naming.md b/.specify/memory/api-naming.md new file mode 100644 index 00000000..aebe1141 --- /dev/null +++ b/.specify/memory/api-naming.md @@ -0,0 +1,132 @@ +# PowerX API 命名与访问规范(全局) + +> 本文件定义 PowerX 平台所有 HTTP API 的路径前缀、用途边界、版本策略、鉴权与命名风格。适用于 CoreX 底座、插件框架、插件业务服务。 + +## 1. 路径前缀与用途边界 + +### 1.1 公共访问域(对外/客户端) + +- **/api/v1/**:对外开放的稳定 API(OpenAPI 可暴露) + - 典型对象:租户端、开放平台、第三方客户端 + - 版本语义:语义化版本 v1 / v2 + +- **/api/**:兼容入口(老路径或内部自用),可作为路由代理或重定向到 /api/v1 + - 若 /api/v1 存在同名路径,优先迁移到 /api/v1 + +> **注意:APIPrefix 可配置**(`cfg.Server.APIPrefix`)。本文档使用 `/api` 作为默认示例,实际运行路径为 `/...`,常见取值:`/api` 或 `/api/v1`。 + +### 1.2 管理/后台域(管理端/控制台) + +- **/api/v1/admin/**:管理端 API(带管理权限) + - 典型对象:管理控制台、运营/内部管理系统 + - 必须带授权 token + +### 1.3 内部/宿主域(仅内部使用) + +- **/internal/**:宿主/插件内部调用入口(不对公网开放) + - 典型对象:PowerXPlugin Framework、CLI、宿主内部服务 + - **必须最小化暴露,不写入公开 OpenAPI** + - 允许与 /api/v1 同时存在,但用途必须明确区分 + +> 说明:已有历史文档/实现中使用 `/internal/*` 或 `/api/internal/*`,统一向 `/api/internal/*` 对齐。 + +--- + +## 2. 版本策略 + +- 稳定对外接口必须挂在 `/api/v1`,有破坏性变更时升级 `/api/v2` +- `/api/internal` 不承诺稳定版本,但变更需记录在变更日志 +- `/api` 仅作为兼容入口或内部路由代理,不建议新功能落地 + +--- + +## 3. 鉴权与租户透传 + +- **所有 `/api/v1/admin` 与 `/api/internal` 必须鉴权** +- 租户信息必须通过 token(JWT claims)或 `tenant_uuid` 字段解析,不接受遗留租户头注入。 +- 内部接口也需 tenant 校验,禁止跨租户调用 + +--- + +## 4. 命名风格 + +### 4.1 资源命名 + +- REST 资源采用名词复数: + - `/api/v1/admin/agents` + - `/api/v1/admin/knowledge-spaces` + +### 4.1.1 插件相关命名 + +- 管理端插件资源:`/api/v1/admin/plugins/*` + - 示例:`/api/v1/admin/plugins`、`/api/v1/admin/plugins/:id` +- 宿主内部插件资源:`/api/internal/plugins/*` + - 示例:`/api/internal/plugins/local/reload`、`/api/internal/plugins/environments/check` +- 插件发布/治理内部分发:`/api/internal/version/*`、`/api/internal/notify/*` +- 宿主模式插件前端入口(反代):`/_p//admin/` + - 示例:`/_p/com.powerx.helloworld/admin/intro` +- 宿主模式插件后端 API(反代):`/_p//api/` + - 示例:`/_p/com.powerx.helloworld/api/healthz` + +### 4.2 行为/动作 + +- 动作用 **子路径** 或 **操作端点**: + - `/api/v1/admin/agents/:id/activate` + - `/internal/ws-bus/publish` + +### 4.3 异步任务 + +- 提交任务:`POST /.../tasks` +- 查询任务:`GET /.../tasks/:taskId` + +--- + +## 5. OpenAPI / 合同要求 + +- `/api/v1` 与 `/api/v1/admin` 必须有 OpenAPI 文档 +- `/api/internal` 默认不在公开 OpenAPI 中暴露 +- 任何新增对外接口必须更新 specs/contracts + +--- + +## 6. 日志 / 追踪 / 审计 + +- 对外与管理接口必须具备 trace_id +- `/api/internal` 必须记录 tenant/topic/trace_id(若涉及事件) + +--- + +## 7. 示例 + +### 7.1 对外 API + +``` +GET /api/v1/knowledge-spaces +``` + +### 7.2 管理端 API + +``` +POST /api/v1/admin/agents/test/connection +``` + +### 7.3 内部 API + +``` +POST /internal/ws-bus/publish +``` + +### 7.4 插件相关 API + +``` +GET /api/v1/admin/plugins +POST /api/internal/plugins/local/reload +GET /_p//admin/ +GET /_p//api/healthz +``` + +--- + +## 8. 变更记录 + +- 2026-02-03:首次定义 `/api/internal` 作为宿主/插件内部 API 前缀 diff --git a/.specify/memory/constitution.md b/.specify/memory/constitution.md index 491b85a7..0b80e7c2 100644 --- a/.specify/memory/constitution.md +++ b/.specify/memory/constitution.md @@ -6,12 +6,14 @@ manifest: .specify/memory/manifest.yaml use: - "@dev-crud-http" - "@dev-crud-grpc" + - "@api-naming" # ③ 指南文件(用于 /plan 语义扩展) include: - dev_crud_http_guides.md - dev_crud_grpc_guides.md - dev_sts_guides.md + - api-naming.md # ④ Ruleset Paths(显式暴露以便 Runner 能读取) rulesets: @@ -63,6 +65,7 @@ If a runner does not natively support `manifest.yaml`, it must treat this sectio > 领域实体说明,因为gorm即定义了model,也可以作为领域的实体使用,不需要反复定义,所以基本上都是在pkg/corex/db/persistence/model/... - **工具复用(新增)**:凡属通用的转换/JSON/随机/字符串处理等辅助函数,必须集中在 `backend/pkg/utils` 对应模块(如 `xform.go`、`json.go`、`xfind.go` 等),严禁在业务目录重复定义;如遇缺失,应先扩展 utils 模块,再在业务代码中引用。 +- **配置文件保护(新增)**:未经用户明确允许,不得修改 `backend/etc/config.yaml`(包括创建、覆盖或清空)。 - **命名规范(新增)**:CoreX 域目录名称一律使用 `snake_case`,以 `capability_registry`、`media_storage` 为例;禁止拼接式命名如 `capabilityregistry`,确保与 Go 包名区分且在跨语言环境保持一致。 - **Go 包别名/调用命名**:引用 `capability_registry` 等多词包时,import alias、局部变量与导出符号统一使用小驼峰(如 `capabilityRegistry`、`capRegPolicy`),避免 `capregpolicy`、`capabilityregistry` 这类连续小写写法。示例:`capabilityRegistry "github.com/ArtisanCloud/PowerX/internal/service/capability_registry/registry"`,通过 `capabilityRegistry.Migrate()`、`capRegPolicy.Register()` 等方式调用以保持可读性。 - **数据访问角色划分(新增)**:`repository` 负责具体持久化实现(GORM/SQL/Redis/MinIO 等),需落在 `pkg/corex/db/persistence/repository/**` 并处理事务/SQL;`interface` 用于 service 层声明所需的数据契约,便于替换实现、注入缓存/内存替身与编写单元测试。Service/handler/任务脚本必须依赖这些接口而非具体 repository,实现切换仅在依赖注入层完成,且 repository 内禁止承载业务逻辑。 @@ -247,6 +250,7 @@ Any plan missing the above gates is **invalid** and fails constitutional complia - `pkg/event_bus` 定位为**基础设施层**的发布/订阅抽象(`Publish`、`Subscribe`、`Close`),负责把事件从发布方送到订阅方,不包含主题治理、ACL、重试、死信或回放等业务语义。 - `internal/service/event_fabric/*` 是**领域编排层**,需在 CoreX 事件骨干中完成 Topic 目录、租户 ACL、可靠投递、DLQ、回放、审计等用例,并可组合底层 `pkg/event_bus` 等设施。 - 任何计划/实现不得混淆两者职责:领域服务依赖或扩展基础设施,但禁止在基础设施层堆叠领域逻辑,也不得绕过领域服务直接宣称满足事件骨干需求。 +- **实时状态更新强制规范**:Web 管理端涉及任务状态、回放状态、队列执行进度等“实时数据”时,必须走 WebSocket/SSE 推送链路;禁止在页面实现定时轮询(polling)作为主方案。若推送链路不可用,只允许短时人工诊断接口,不得固化为前端常驻轮询逻辑。 --- diff --git a/.specify/memory/dev_crud_grpc_guides.md b/.specify/memory/dev_crud_grpc_guides.md index 12869979..b65783df 100644 --- a/.specify/memory/dev_crud_grpc_guides.md +++ b/.specify/memory/dev_crud_grpc_guides.md @@ -66,7 +66,7 @@ internal/transport/grpc/ ### 5.1 绑定/校验 -* **tenant 提取**:优先读 `RequestContext.tenant_id`,其次从 Metadata 头 `x-powerx-tenant-id|tenant-id|x-tenant-id` 兜底。 +* **tenant 提取**:优先读 `RequestContext.tenant_id`,其次仅从 Metadata `tenant-id` 兜底(不接受任何 `x-powerx-*` 遗留租户头)。 你的 `tenantIDFrom()` 已经实现这一落地逻辑。 * **分页映射**:`PageRequest(offset,page_size)` → `(page,size)` 的换算统一用工具函数(如 `pageFrom()`)。 * **错误回包**:Meta 中返回 `code/message/request_id`,与你的 `okMeta/badMeta` 一致(见 `member_handler.go`/`team_handler.go` 调用)。 @@ -140,4 +140,3 @@ internal/transport/grpc/ * [ ] **与 HTTP 等价**:同一用例在 HTTP 与 gRPC 的语义、错误、分页完全可对照。 --- - diff --git a/.specify/memory/manifest.yaml b/.specify/memory/manifest.yaml index ad0f74e2..5f941814 100644 --- a/.specify/memory/manifest.yaml +++ b/.specify/memory/manifest.yaml @@ -40,9 +40,14 @@ aliases: - dev_crud_http_guides.md - dev_crud_grpc_guides.md - dev_sts_guides.md + - api-naming.md rulesets: - rulesets/crud_http.yaml - rulesets/crud_grpc.yaml - rulesets/sts.yaml # 以及两边共用/各自特有的明细 ruleset + # API 命名规范(单独引用也可) + api-naming: + include: + - api-naming.md diff --git a/.specify/memory/rulesets/crud/frontend/nuxt_components.yaml b/.specify/memory/rulesets/crud/frontend/nuxt_components.yaml index 1024c991..ed96f63c 100644 --- a/.specify/memory/rulesets/crud/frontend/nuxt_components.yaml +++ b/.specify/memory/rulesets/crud/frontend/nuxt_components.yaml @@ -38,6 +38,9 @@ nuxt_ui: preferUModal: true paddingClass: "p-4 sm:p-5" width: "max-w-3xl w-full" + notes: + - 如需更宽的编辑表单,可覆盖 `ui.content`(例如 `'max-w-6xl w-[90vw] mx-auto'`)而不是直接设置 inline 样式,保持组件一致性。 + - 需要弹层交互的表单/内容必须放在 UModal 的 `#body` 内,由 `v-model:open` 控制显隐;不要把表单直接渲染在页面上再“看起来像弹层”。 close: useCloseProp: true preventClose: true diff --git a/AGENTS.md b/AGENTS.md index fddcec74..b9b8fe2b 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -5,6 +5,8 @@ Auto-generated from all feature plans. Last updated: 2025-11-05 ## Active Technologies - Go 1.24(backend 单体,Buf toolchain) + Gin HTTP 栈、google.golang.org/grpc、Buf、GORM、Redis、PostgreSQL、EventBus、OpenTelemetry、px-plugin CLI (007-integration-gateway-and-mcp) - PostgreSQL(CapabilityRecord, CapabilitySyncJob, InvocationTrace)、Redis(Capability cache、ToolStore、RateLimit、SelectorPolicySnapshot)、MinIO/S3(插件 workflow/composite 资产引用,仅存 URI) (007-integration-gateway-and-mcp) +- Go 1.24(backend),Node 20 + Nuxt 4(web-admin) + Gin HTTP 栈、gorilla/websocket、Pinia、Nuxt UI (012-websocket-docs-plan) +- PostgreSQL(ai_model_profiles/knowledge_*),Redis(现有队列/缓存) (012-websocket-docs-plan) - Go 1.24(backend),Node 20(Web Admin 热更新面板),Go 1.21(px-plugin CLI) + Gin HTTP 栈、google.golang.org/grpc、Buf toolchain、GORM + PostgreSQL、Redis(队列与 Feature Flag)、MinIO/S3 SDK(离线包存储)、OpenTelemetry + Prometheus Exporter、PowerX CLI (`powerx`, `px-plugin`) (001-install-plugin-pxp) - Go 1.24 (backend services, CLIs), Node 20 (validation scripts), Go 1.21 (px-plugin CLI) + Gin HTTP stack, google.golang.org/grpc, Buf toolchain, GORM + PostgreSQL, Redis, MinIO/S3 SDK, OpenTelemetry + Prometheus exporters (010-agent-model-setting) @@ -36,10 +38,10 @@ tests/ Go 1.24(backend),Node 20(Web Admin 热更新面板),Go 1.21(px-plugin CLI): Follow standard conventions ## Recent Changes +- 012-websocket-docs-plan: Added Go 1.24(backend),Node 20 + Nuxt 4(web-admin) + Gin HTTP 栈、gorilla/websocket、Pinia、Nuxt UI - 007-integration-gateway-and-mcp: Added Go 1.24(backend 单体,Buf toolchain) + Gin HTTP 栈、google.golang.org/grpc、Buf、GORM、Redis、PostgreSQL、EventBus、OpenTelemetry、px-plugin CLI - 007-integration-gateway-and-mcp: Added [if applicable, e.g., PostgreSQL, CoreData, files or N/A] -- 011-docs-use-cases: Added Go 1.24 (backend services, CLIs); Node 20 + Nuxt 4 (Vue 3 Web Admin) + Gin HTTP stack, google.golang.org/grpc (Buf toolchain), GORM, Redis, PostgreSQL, MinIO/S3 SDK, OpenTelemetry, PowerX CLI, Nuxt 4, Vue 3, Pinia, Nuxt UI, VueUse, Playwright, Vites Always respond in Chinese-simplified diff --git a/README.md b/README.md index 8097b343..a77d8f4c 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,7 @@ PowerX 内置四类前端壳,共享 **统一契约**,SDK 自动生成,支 - **🛠️ [运维手册](https://powerx.artisan-cloud.com/operations)** - 监控、备份、升级 - **📘 [Knowledge Space Quickstart](specs/011-knowledge-space/quickstart.md)** - 端到端创建/入库/融合/反馈示例 - **🧯 [Knowledge Space Runbook](docs/guides/knowledge_space/runbook.md)** - 入库/融合/反馈故障处理与脚本 +- **🧭 [Knowledge Space UI Guide](docs/guides/knowledge_space/ui_guide.md)** - 管理台界面操作(含“场景→策略包/Corpus Check/OCR 提示/Playground”) - **📊 [Perf & Resiliency Validation](docs/guides/knowledge_space/perf_validation.md)** - 压测/降级/反馈风暴验证 - **✅ [Smoke Checklist](docs/guides/knowledge_space/smoke_checklist.md)** - 发布前的冒烟检查表 diff --git a/backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto b/backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto new file mode 100644 index 00000000..fea2ca98 --- /dev/null +++ b/backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +package powerx.agent.v1; + +option go_package = "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/agent/v1;agentv1"; +option java_package = "com.powerx.agent.v1"; +option java_multiple_files = true; +option csharp_namespace = "PowerX.Agent.V1"; +option php_namespace = "PowerX\\Agent\\V1"; +option php_metadata_namespace = "GPBMetadata\\PowerX\\Agent\\V1"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "common/v1/context.proto"; + +service AgentInvokeService { + rpc Invoke(InvokeRequest) returns (InvokeResponse); +} + +service AgentSessionService { + rpc CreateSession(CreateSessionRequest) returns (CreateSessionResponse); + rpc ListMessages(ListMessagesRequest) returns (ListMessagesResponse); +} + +message Attachment { + string type = 1; // image_url | audio_url | file_url + string url = 2; +} + +message Usage { + uint32 prompt_tokens = 1; + uint32 completion_tokens = 2; + uint32 total_tokens = 3; +} + +message InvokeRequest { + common.v1.RequestContext ctx = 1; + string agent_id = 2; + string session_id = 3; + string message = 4; + repeated Attachment attachments = 5; + google.protobuf.Struct meta = 6; +} + +message InvokeResponse { + string session_id = 1; + string agent_id = 2; + string reply = 3; + Usage usage = 4; +} + +message CreateSessionRequest { + common.v1.RequestContext ctx = 1; + string agent_id = 2; + string title = 3; +} + +message CreateSessionResponse { + string session_id = 1; +} + +message MessageItem { + string role = 1; // system|user|assistant|tool + google.protobuf.Struct content = 2; + google.protobuf.Timestamp created_at = 3; +} + +message ListMessagesRequest { + common.v1.RequestContext ctx = 1; + string session_id = 2; + uint32 page = 3; + uint32 page_size = 4; +} + +message ListMessagesResponse { + repeated MessageItem items = 1; +} diff --git a/backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto b/backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto new file mode 100644 index 00000000..490c333a --- /dev/null +++ b/backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package powerx.ai.v1; + +option go_package = "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/ai/v1;aiv1"; +option java_package = "com.powerx.ai.v1"; +option java_multiple_files = true; +option csharp_namespace = "PowerX.AI.V1"; +option php_namespace = "PowerX\\AI\\V1"; +option php_metadata_namespace = "GPBMetadata\\PowerX\\AI\\V1"; + +import "google/protobuf/struct.proto"; +import "common/v1/context.proto"; + +service MultimodalService { + rpc Invoke(InvokeRequest) returns (InvokeResponse); + rpc Stream(StreamRequest) returns (stream StreamResponse); +} + +service MultimodalSessionService { + rpc CreateSession(CreateSessionRequest) returns (CreateSessionResponse); + rpc AppendMessage(AppendMessageRequest) returns (AppendMessageResponse); +} + +service EmbeddingService { + rpc Embed(EmbedRequest) returns (EmbedResponse); +} + +message ContentItem { + string type = 1; // text|image_url|audio_url + string text = 2; + string url = 3; +} + +message InvokeRequest { + common.v1.RequestContext ctx = 1; + string modality = 2; // text|image|audio|video|embedding|mixed + string model_key = 3; + repeated ContentItem inputs = 4; + google.protobuf.Struct params = 5; +} + +message InvokeResponse { + google.protobuf.Struct output = 1; + google.protobuf.Struct usage = 2; +} + +message StreamRequest { + common.v1.RequestContext ctx = 1; + string session_id = 2; +} + +message StreamResponse { + string type = 1; // start|token|final|end|error + google.protobuf.Struct payload = 2; +} + +message CreateSessionRequest { + common.v1.RequestContext ctx = 1; + string model_key = 2; + string title = 3; +} + +message CreateSessionResponse { + string session_id = 1; +} + +message AppendMessageRequest { + common.v1.RequestContext ctx = 1; + string session_id = 2; + string role = 3; // system|user|assistant|tool + repeated ContentItem content = 4; + repeated google.protobuf.Struct tool_calls = 5; + repeated google.protobuf.Struct tool_results = 6; +} + +message AppendMessageResponse { + bool ok = 1; +} + +message EmbedRequest { + common.v1.RequestContext ctx = 1; + string model_key = 2; + repeated string inputs = 3; +} + +message EmbedResponse { + repeated VectorItem vectors = 1; + google.protobuf.Struct usage = 2; +} + +message VectorItem { + repeated float values = 1; +} diff --git a/backend/api/grpc/contracts/powerx/knowledge/v1/knowledge_space.proto b/backend/api/grpc/contracts/powerx/knowledge/v1/knowledge_space.proto index cecba8bc..5dbde948 100644 --- a/backend/api/grpc/contracts/powerx/knowledge/v1/knowledge_space.proto +++ b/backend/api/grpc/contracts/powerx/knowledge/v1/knowledge_space.proto @@ -4,6 +4,7 @@ package powerx.knowledge.v1; option go_package = "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/knowledge/v1;knowledgev1"; +import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; message KnowledgeSpace { @@ -63,6 +64,11 @@ message IngestionJobRequest { string priority = 4; string source_uri = 5; string masking_profile = 6; + string format = 7; + string ingestion_profile = 8; + string processor_profile = 9; + bool ocr_required = 10; + string requested_by = 11; } message IngestionJobStatus { @@ -72,6 +78,9 @@ message IngestionJobStatus { float chunk_covered_pct = 4; float embedding_success_pct = 5; float masking_coverage_pct = 6; + uint32 retry_count = 7; + string error_code = 8; + string blocked_reason = 9; } message IngestionJobResponse { @@ -124,6 +133,7 @@ message FusionStrategy { string conflict_policy = 8; DeploymentState deployment_state = 9; google.protobuf.Timestamp published_at = 10; + repeated string degrade_reasons = 11; } message FeedbackRequest { @@ -149,6 +159,45 @@ message ListFeedbackCasesResponse { repeated FeedbackCase cases = 1; } +message CloseFeedbackCaseRequest { + string space_id = 1; + string case_id = 2; + string requested_by = 3; + string resolution_notes = 4; +} + +message EscalateFeedbackCaseRequest { + string space_id = 1; + string case_id = 2; + string requested_by = 3; + string reason = 4; +} + +message ReprocessFeedbackCaseRequest { + string space_id = 1; + string case_id = 2; + string requested_by = 3; +} + +message RollbackFeedbackCaseRequest { + string space_id = 1; + string case_id = 2; + string requested_by = 3; + string reason = 4; +} + +message ExportFeedbackCasesRequest { + string space_id = 1; + string status = 2; + string severity = 3; + uint32 limit = 4; +} + +message ExportFeedbackCasesResponse { + repeated FeedbackCase cases = 1; + string export_json = 2; +} + message FeedbackCase { string case_id = 1; string space_id = 2; @@ -163,6 +212,9 @@ message FeedbackCase { google.protobuf.Timestamp sla_due_at = 11; google.protobuf.Timestamp created_at = 12; google.protobuf.Timestamp updated_at = 13; + google.protobuf.Timestamp escalated_at = 14; + google.protobuf.Timestamp closed_at = 15; + string resolution_notes = 16; } message DeltaJob { @@ -375,6 +427,11 @@ service KnowledgeSpaceAdminService { rpc RollbackFusionStrategy(RollbackFusionStrategyRequest) returns (FusionStrategyResponse); rpc SubmitFeedback(FeedbackRequest) returns (FeedbackResponse); rpc ListFeedbackCases(ListFeedbackCasesRequest) returns (ListFeedbackCasesResponse); + rpc CloseFeedbackCase(CloseFeedbackCaseRequest) returns (FeedbackResponse); + rpc EscalateFeedbackCase(EscalateFeedbackCaseRequest) returns (FeedbackResponse); + rpc ReprocessFeedbackCase(ReprocessFeedbackCaseRequest) returns (FeedbackResponse); + rpc RollbackFeedbackCase(RollbackFeedbackCaseRequest) returns (FeedbackResponse); + rpc ExportFeedbackCases(ExportFeedbackCasesRequest) returns (ExportFeedbackCasesResponse); rpc StartDeltaJob(StartDeltaJobRequest) returns (StartDeltaJobResponse); rpc GetDeltaReport(GetDeltaReportRequest) returns (GetDeltaReportResponse); rpc PublishDeltaJob(PublishDeltaJobRequest) returns (PublishDeltaJobResponse); @@ -420,6 +477,13 @@ message QATelemetry { google.protobuf.Timestamp recorded_at = 2; } +message QAPlanStage { + string name = 1; + int32 candidate_count = 2; + int32 latency_ms = 3; + string degrade_reason = 4; +} + message QARetrievalPlanResponse { string tenant_uuid = 1; string intent = 2; @@ -430,6 +494,9 @@ message QARetrievalPlanResponse { int32 degrade_count = 7; string session_id = 8; int32 latency_budget_ms = 9; + repeated QAPlanStage stages = 10; + map policy_version_snapshot = 11; + google.protobuf.Struct metadata = 12; } message QAMemoryUpdate { @@ -446,6 +513,7 @@ message QAMemorySnapshotRequest { string tenant_uuid = 1; string session_id = 2; repeated QAMemoryUpdate updates = 3; + string trace_id = 4; } message QACitationSummary { @@ -462,6 +530,7 @@ message QAMemorySnapshotResponse { string tenant_uuid = 1; string session_id = 2; repeated QACitationSummary citations = 3; + google.protobuf.Struct metadata = 4; } service KnowledgeSpaceQABridgeService { diff --git a/backend/api/grpc/contracts/powerx/scheduler/v1/scheduler.proto b/backend/api/grpc/contracts/powerx/scheduler/v1/scheduler.proto new file mode 100644 index 00000000..baa5b1fb --- /dev/null +++ b/backend/api/grpc/contracts/powerx/scheduler/v1/scheduler.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package powerx.scheduler.v1; + +option go_package = "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/scheduler/v1;schedulerv1"; + +message SchedulerJob { + string job_id = 1; + string tenant_uuid = 2; + string owner_type = 3; + string owner_id = 4; + string name = 5; + string schedule_type = 6; + string schedule_expr = 7; + string timezone = 8; + bytes payload_json = 9; + string status = 10; + string next_run_at = 11; + string last_run_at = 12; +} + +message CreateJobRequest { SchedulerJob job = 1; } +message UpdateJobRequest { SchedulerJob job = 1; } +message PauseJobRequest { string job_id = 1; string tenant_uuid = 2; } +message ResumeJobRequest { string job_id = 1; string tenant_uuid = 2; } +message TriggerJobRequest { string job_id = 1; string tenant_uuid = 2; } +message GetJobRequest { string job_id = 1; string tenant_uuid = 2; } +message ListJobsRequest { string tenant_uuid = 1; int32 limit = 2; } +message CreateJobResponse { SchedulerJob job = 1; } +message UpdateJobResponse { SchedulerJob job = 1; } +message PauseJobResponse { SchedulerJob job = 1; } +message ResumeJobResponse { SchedulerJob job = 1; } +message TriggerJobResponse { SchedulerJob job = 1; } +message GetJobResponse { SchedulerJob job = 1; } +message ListJobsResponse { repeated SchedulerJob jobs = 1; } + +service SchedulerService { + rpc CreateJob(CreateJobRequest) returns (CreateJobResponse); + rpc UpdateJob(UpdateJobRequest) returns (UpdateJobResponse); + rpc PauseJob(PauseJobRequest) returns (PauseJobResponse); + rpc ResumeJob(ResumeJobRequest) returns (ResumeJobResponse); + rpc TriggerJob(TriggerJobRequest) returns (TriggerJobResponse); + rpc GetJob(GetJobRequest) returns (GetJobResponse); + rpc ListJobs(ListJobsRequest) returns (ListJobsResponse); +} diff --git a/backend/api/grpc/gen/go/powerx/agent/v1/agent_api.pb.go b/backend/api/grpc/gen/go/powerx/agent/v1/agent_api.pb.go new file mode 100644 index 00000000..78caf9d1 --- /dev/null +++ b/backend/api/grpc/gen/go/powerx/agent/v1/agent_api.pb.go @@ -0,0 +1,756 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.4 +// protoc (unknown) +// source: powerx/agent/v1/agent_api.proto + +package agentv1 + +import ( + v1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Attachment struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // image_url | audio_url | file_url + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Attachment) Reset() { + *x = Attachment{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Attachment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Attachment) ProtoMessage() {} + +func (x *Attachment) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Attachment.ProtoReflect.Descriptor instead. +func (*Attachment) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{0} +} + +func (x *Attachment) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Attachment) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +type Usage struct { + state protoimpl.MessageState `protogen:"open.v1"` + PromptTokens uint32 `protobuf:"varint,1,opt,name=prompt_tokens,json=promptTokens,proto3" json:"prompt_tokens,omitempty"` + CompletionTokens uint32 `protobuf:"varint,2,opt,name=completion_tokens,json=completionTokens,proto3" json:"completion_tokens,omitempty"` + TotalTokens uint32 `protobuf:"varint,3,opt,name=total_tokens,json=totalTokens,proto3" json:"total_tokens,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Usage) Reset() { + *x = Usage{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Usage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Usage) ProtoMessage() {} + +func (x *Usage) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Usage.ProtoReflect.Descriptor instead. +func (*Usage) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{1} +} + +func (x *Usage) GetPromptTokens() uint32 { + if x != nil { + return x.PromptTokens + } + return 0 +} + +func (x *Usage) GetCompletionTokens() uint32 { + if x != nil { + return x.CompletionTokens + } + return 0 +} + +func (x *Usage) GetTotalTokens() uint32 { + if x != nil { + return x.TotalTokens + } + return 0 +} + +type InvokeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + AgentId string `protobuf:"bytes,2,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + SessionId string `protobuf:"bytes,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + Attachments []*Attachment `protobuf:"bytes,5,rep,name=attachments,proto3" json:"attachments,omitempty"` + Meta *structpb.Struct `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeRequest) Reset() { + *x = InvokeRequest{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeRequest) ProtoMessage() {} + +func (x *InvokeRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeRequest.ProtoReflect.Descriptor instead. +func (*InvokeRequest) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{2} +} + +func (x *InvokeRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *InvokeRequest) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +func (x *InvokeRequest) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +func (x *InvokeRequest) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *InvokeRequest) GetAttachments() []*Attachment { + if x != nil { + return x.Attachments + } + return nil +} + +func (x *InvokeRequest) GetMeta() *structpb.Struct { + if x != nil { + return x.Meta + } + return nil +} + +type InvokeResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + AgentId string `protobuf:"bytes,2,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + Reply string `protobuf:"bytes,3,opt,name=reply,proto3" json:"reply,omitempty"` + Usage *Usage `protobuf:"bytes,4,opt,name=usage,proto3" json:"usage,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeResponse) Reset() { + *x = InvokeResponse{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeResponse) ProtoMessage() {} + +func (x *InvokeResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeResponse.ProtoReflect.Descriptor instead. +func (*InvokeResponse) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{3} +} + +func (x *InvokeResponse) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +func (x *InvokeResponse) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +func (x *InvokeResponse) GetReply() string { + if x != nil { + return x.Reply + } + return "" +} + +func (x *InvokeResponse) GetUsage() *Usage { + if x != nil { + return x.Usage + } + return nil +} + +type CreateSessionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + AgentId string `protobuf:"bytes,2,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSessionRequest) Reset() { + *x = CreateSessionRequest{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSessionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSessionRequest) ProtoMessage() {} + +func (x *CreateSessionRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSessionRequest.ProtoReflect.Descriptor instead. +func (*CreateSessionRequest) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateSessionRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *CreateSessionRequest) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +func (x *CreateSessionRequest) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +type CreateSessionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSessionResponse) Reset() { + *x = CreateSessionResponse{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSessionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSessionResponse) ProtoMessage() {} + +func (x *CreateSessionResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSessionResponse.ProtoReflect.Descriptor instead. +func (*CreateSessionResponse) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateSessionResponse) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +type MessageItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` // system|user|assistant|tool + Content *structpb.Struct `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageItem) Reset() { + *x = MessageItem{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageItem) ProtoMessage() {} + +func (x *MessageItem) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageItem.ProtoReflect.Descriptor instead. +func (*MessageItem) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{6} +} + +func (x *MessageItem) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *MessageItem) GetContent() *structpb.Struct { + if x != nil { + return x.Content + } + return nil +} + +func (x *MessageItem) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +type ListMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Page uint32 `protobuf:"varint,3,opt,name=page,proto3" json:"page,omitempty"` + PageSize uint32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListMessagesRequest) Reset() { + *x = ListMessagesRequest{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListMessagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMessagesRequest) ProtoMessage() {} + +func (x *ListMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMessagesRequest.ProtoReflect.Descriptor instead. +func (*ListMessagesRequest) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{7} +} + +func (x *ListMessagesRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *ListMessagesRequest) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +func (x *ListMessagesRequest) GetPage() uint32 { + if x != nil { + return x.Page + } + return 0 +} + +func (x *ListMessagesRequest) GetPageSize() uint32 { + if x != nil { + return x.PageSize + } + return 0 +} + +type ListMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*MessageItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListMessagesResponse) Reset() { + *x = ListMessagesResponse{} + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListMessagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMessagesResponse) ProtoMessage() {} + +func (x *ListMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_agent_v1_agent_api_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMessagesResponse.ProtoReflect.Descriptor instead. +func (*ListMessagesResponse) Descriptor() ([]byte, []int) { + return file_powerx_agent_v1_agent_api_proto_rawDescGZIP(), []int{8} +} + +func (x *ListMessagesResponse) GetItems() []*MessageItem { + if x != nil { + return x.Items + } + return nil +} + +var File_powerx_agent_v1_agent_api_proto protoreflect.FileDescriptor + +var file_powerx_agent_v1_agent_api_proto_rawDesc = string([]byte{ + 0x0a, 0x1f, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76, + 0x31, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, + 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x32, 0x0a, 0x0a, 0x41, 0x74, + 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x7c, + 0x0a, 0x05, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x6d, 0x70, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, + 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x22, 0xfc, 0x01, 0x0a, + 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, + 0x0a, 0x03, 0x63, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x3d, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, + 0x74, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, + 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0x8e, 0x01, 0x0a, 0x0e, + 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x22, 0x74, 0x0a, 0x14, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x63, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, + 0x78, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, + 0x6c, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x8f, 0x01, 0x0a, 0x0b, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x31, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x92, 0x01, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x63, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, + 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, + 0x70, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x22, 0x4a, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x32, 0x5f, 0x0a, + 0x12, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x1e, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xd2, + 0x01, 0x0a, 0x13, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x24, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0xca, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x72, 0x78, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x73, 0x61, 0x6e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x58, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x3b, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x41, 0x58, 0xaa, 0x02, 0x0f, 0x50, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0f, 0x50, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31, 0xe2, 0x02, + 0x1b, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x11, 0x50, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_powerx_agent_v1_agent_api_proto_rawDescOnce sync.Once + file_powerx_agent_v1_agent_api_proto_rawDescData []byte +) + +func file_powerx_agent_v1_agent_api_proto_rawDescGZIP() []byte { + file_powerx_agent_v1_agent_api_proto_rawDescOnce.Do(func() { + file_powerx_agent_v1_agent_api_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_powerx_agent_v1_agent_api_proto_rawDesc), len(file_powerx_agent_v1_agent_api_proto_rawDesc))) + }) + return file_powerx_agent_v1_agent_api_proto_rawDescData +} + +var file_powerx_agent_v1_agent_api_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_powerx_agent_v1_agent_api_proto_goTypes = []any{ + (*Attachment)(nil), // 0: powerx.agent.v1.Attachment + (*Usage)(nil), // 1: powerx.agent.v1.Usage + (*InvokeRequest)(nil), // 2: powerx.agent.v1.InvokeRequest + (*InvokeResponse)(nil), // 3: powerx.agent.v1.InvokeResponse + (*CreateSessionRequest)(nil), // 4: powerx.agent.v1.CreateSessionRequest + (*CreateSessionResponse)(nil), // 5: powerx.agent.v1.CreateSessionResponse + (*MessageItem)(nil), // 6: powerx.agent.v1.MessageItem + (*ListMessagesRequest)(nil), // 7: powerx.agent.v1.ListMessagesRequest + (*ListMessagesResponse)(nil), // 8: powerx.agent.v1.ListMessagesResponse + (*v1.RequestContext)(nil), // 9: common.v1.RequestContext + (*structpb.Struct)(nil), // 10: google.protobuf.Struct + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp +} +var file_powerx_agent_v1_agent_api_proto_depIdxs = []int32{ + 9, // 0: powerx.agent.v1.InvokeRequest.ctx:type_name -> common.v1.RequestContext + 0, // 1: powerx.agent.v1.InvokeRequest.attachments:type_name -> powerx.agent.v1.Attachment + 10, // 2: powerx.agent.v1.InvokeRequest.meta:type_name -> google.protobuf.Struct + 1, // 3: powerx.agent.v1.InvokeResponse.usage:type_name -> powerx.agent.v1.Usage + 9, // 4: powerx.agent.v1.CreateSessionRequest.ctx:type_name -> common.v1.RequestContext + 10, // 5: powerx.agent.v1.MessageItem.content:type_name -> google.protobuf.Struct + 11, // 6: powerx.agent.v1.MessageItem.created_at:type_name -> google.protobuf.Timestamp + 9, // 7: powerx.agent.v1.ListMessagesRequest.ctx:type_name -> common.v1.RequestContext + 6, // 8: powerx.agent.v1.ListMessagesResponse.items:type_name -> powerx.agent.v1.MessageItem + 2, // 9: powerx.agent.v1.AgentInvokeService.Invoke:input_type -> powerx.agent.v1.InvokeRequest + 4, // 10: powerx.agent.v1.AgentSessionService.CreateSession:input_type -> powerx.agent.v1.CreateSessionRequest + 7, // 11: powerx.agent.v1.AgentSessionService.ListMessages:input_type -> powerx.agent.v1.ListMessagesRequest + 3, // 12: powerx.agent.v1.AgentInvokeService.Invoke:output_type -> powerx.agent.v1.InvokeResponse + 5, // 13: powerx.agent.v1.AgentSessionService.CreateSession:output_type -> powerx.agent.v1.CreateSessionResponse + 8, // 14: powerx.agent.v1.AgentSessionService.ListMessages:output_type -> powerx.agent.v1.ListMessagesResponse + 12, // [12:15] is the sub-list for method output_type + 9, // [9:12] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_powerx_agent_v1_agent_api_proto_init() } +func file_powerx_agent_v1_agent_api_proto_init() { + if File_powerx_agent_v1_agent_api_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_powerx_agent_v1_agent_api_proto_rawDesc), len(file_powerx_agent_v1_agent_api_proto_rawDesc)), + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_powerx_agent_v1_agent_api_proto_goTypes, + DependencyIndexes: file_powerx_agent_v1_agent_api_proto_depIdxs, + MessageInfos: file_powerx_agent_v1_agent_api_proto_msgTypes, + }.Build() + File_powerx_agent_v1_agent_api_proto = out.File + file_powerx_agent_v1_agent_api_proto_goTypes = nil + file_powerx_agent_v1_agent_api_proto_depIdxs = nil +} diff --git a/backend/api/grpc/gen/go/powerx/agent/v1/agent_api_grpc.pb.go b/backend/api/grpc/gen/go/powerx/agent/v1/agent_api_grpc.pb.go new file mode 100644 index 00000000..843cef36 --- /dev/null +++ b/backend/api/grpc/gen/go/powerx/agent/v1/agent_api_grpc.pb.go @@ -0,0 +1,261 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: powerx/agent/v1/agent_api.proto + +package agentv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + AgentInvokeService_Invoke_FullMethodName = "/powerx.agent.v1.AgentInvokeService/Invoke" +) + +// AgentInvokeServiceClient is the client API for AgentInvokeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AgentInvokeServiceClient interface { + Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) +} + +type agentInvokeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAgentInvokeServiceClient(cc grpc.ClientConnInterface) AgentInvokeServiceClient { + return &agentInvokeServiceClient{cc} +} + +func (c *agentInvokeServiceClient) Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(InvokeResponse) + err := c.cc.Invoke(ctx, AgentInvokeService_Invoke_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AgentInvokeServiceServer is the server API for AgentInvokeService service. +// All implementations must embed UnimplementedAgentInvokeServiceServer +// for forward compatibility. +type AgentInvokeServiceServer interface { + Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) + mustEmbedUnimplementedAgentInvokeServiceServer() +} + +// UnimplementedAgentInvokeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedAgentInvokeServiceServer struct{} + +func (UnimplementedAgentInvokeServiceServer) Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Invoke not implemented") +} +func (UnimplementedAgentInvokeServiceServer) mustEmbedUnimplementedAgentInvokeServiceServer() {} +func (UnimplementedAgentInvokeServiceServer) testEmbeddedByValue() {} + +// UnsafeAgentInvokeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AgentInvokeServiceServer will +// result in compilation errors. +type UnsafeAgentInvokeServiceServer interface { + mustEmbedUnimplementedAgentInvokeServiceServer() +} + +func RegisterAgentInvokeServiceServer(s grpc.ServiceRegistrar, srv AgentInvokeServiceServer) { + // If the following call pancis, it indicates UnimplementedAgentInvokeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&AgentInvokeService_ServiceDesc, srv) +} + +func _AgentInvokeService_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InvokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentInvokeServiceServer).Invoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AgentInvokeService_Invoke_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentInvokeServiceServer).Invoke(ctx, req.(*InvokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// AgentInvokeService_ServiceDesc is the grpc.ServiceDesc for AgentInvokeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AgentInvokeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "powerx.agent.v1.AgentInvokeService", + HandlerType: (*AgentInvokeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Invoke", + Handler: _AgentInvokeService_Invoke_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "powerx/agent/v1/agent_api.proto", +} + +const ( + AgentSessionService_CreateSession_FullMethodName = "/powerx.agent.v1.AgentSessionService/CreateSession" + AgentSessionService_ListMessages_FullMethodName = "/powerx.agent.v1.AgentSessionService/ListMessages" +) + +// AgentSessionServiceClient is the client API for AgentSessionService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AgentSessionServiceClient interface { + CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error) + ListMessages(ctx context.Context, in *ListMessagesRequest, opts ...grpc.CallOption) (*ListMessagesResponse, error) +} + +type agentSessionServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAgentSessionServiceClient(cc grpc.ClientConnInterface) AgentSessionServiceClient { + return &agentSessionServiceClient{cc} +} + +func (c *agentSessionServiceClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateSessionResponse) + err := c.cc.Invoke(ctx, AgentSessionService_CreateSession_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *agentSessionServiceClient) ListMessages(ctx context.Context, in *ListMessagesRequest, opts ...grpc.CallOption) (*ListMessagesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListMessagesResponse) + err := c.cc.Invoke(ctx, AgentSessionService_ListMessages_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AgentSessionServiceServer is the server API for AgentSessionService service. +// All implementations must embed UnimplementedAgentSessionServiceServer +// for forward compatibility. +type AgentSessionServiceServer interface { + CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error) + ListMessages(context.Context, *ListMessagesRequest) (*ListMessagesResponse, error) + mustEmbedUnimplementedAgentSessionServiceServer() +} + +// UnimplementedAgentSessionServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedAgentSessionServiceServer struct{} + +func (UnimplementedAgentSessionServiceServer) CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSession not implemented") +} +func (UnimplementedAgentSessionServiceServer) ListMessages(context.Context, *ListMessagesRequest) (*ListMessagesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMessages not implemented") +} +func (UnimplementedAgentSessionServiceServer) mustEmbedUnimplementedAgentSessionServiceServer() {} +func (UnimplementedAgentSessionServiceServer) testEmbeddedByValue() {} + +// UnsafeAgentSessionServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AgentSessionServiceServer will +// result in compilation errors. +type UnsafeAgentSessionServiceServer interface { + mustEmbedUnimplementedAgentSessionServiceServer() +} + +func RegisterAgentSessionServiceServer(s grpc.ServiceRegistrar, srv AgentSessionServiceServer) { + // If the following call pancis, it indicates UnimplementedAgentSessionServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&AgentSessionService_ServiceDesc, srv) +} + +func _AgentSessionService_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentSessionServiceServer).CreateSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AgentSessionService_CreateSession_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentSessionServiceServer).CreateSession(ctx, req.(*CreateSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AgentSessionService_ListMessages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMessagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentSessionServiceServer).ListMessages(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AgentSessionService_ListMessages_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentSessionServiceServer).ListMessages(ctx, req.(*ListMessagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// AgentSessionService_ServiceDesc is the grpc.ServiceDesc for AgentSessionService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AgentSessionService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "powerx.agent.v1.AgentSessionService", + HandlerType: (*AgentSessionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSession", + Handler: _AgentSessionService_CreateSession_Handler, + }, + { + MethodName: "ListMessages", + Handler: _AgentSessionService_ListMessages_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "powerx/agent/v1/agent_api.proto", +} diff --git a/backend/api/grpc/gen/go/powerx/ai/v1/multimodal.pb.go b/backend/api/grpc/gen/go/powerx/ai/v1/multimodal.pb.go new file mode 100644 index 00000000..5106a07b --- /dev/null +++ b/backend/api/grpc/gen/go/powerx/ai/v1/multimodal.pb.go @@ -0,0 +1,928 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.4 +// protoc (unknown) +// source: powerx/ai/v1/multimodal.proto + +package aiv1 + +import ( + v1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ContentItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // text|image_url|audio_url + Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"` + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ContentItem) Reset() { + *x = ContentItem{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ContentItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContentItem) ProtoMessage() {} + +func (x *ContentItem) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentItem.ProtoReflect.Descriptor instead. +func (*ContentItem) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{0} +} + +func (x *ContentItem) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ContentItem) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *ContentItem) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +type InvokeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + Modality string `protobuf:"bytes,2,opt,name=modality,proto3" json:"modality,omitempty"` // text|image|audio|video|embedding|mixed + ModelKey string `protobuf:"bytes,3,opt,name=model_key,json=modelKey,proto3" json:"model_key,omitempty"` + Inputs []*ContentItem `protobuf:"bytes,4,rep,name=inputs,proto3" json:"inputs,omitempty"` + Params *structpb.Struct `protobuf:"bytes,5,opt,name=params,proto3" json:"params,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeRequest) Reset() { + *x = InvokeRequest{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeRequest) ProtoMessage() {} + +func (x *InvokeRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeRequest.ProtoReflect.Descriptor instead. +func (*InvokeRequest) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{1} +} + +func (x *InvokeRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *InvokeRequest) GetModality() string { + if x != nil { + return x.Modality + } + return "" +} + +func (x *InvokeRequest) GetModelKey() string { + if x != nil { + return x.ModelKey + } + return "" +} + +func (x *InvokeRequest) GetInputs() []*ContentItem { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *InvokeRequest) GetParams() *structpb.Struct { + if x != nil { + return x.Params + } + return nil +} + +type InvokeResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Output *structpb.Struct `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Usage *structpb.Struct `protobuf:"bytes,2,opt,name=usage,proto3" json:"usage,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeResponse) Reset() { + *x = InvokeResponse{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeResponse) ProtoMessage() {} + +func (x *InvokeResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeResponse.ProtoReflect.Descriptor instead. +func (*InvokeResponse) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{2} +} + +func (x *InvokeResponse) GetOutput() *structpb.Struct { + if x != nil { + return x.Output + } + return nil +} + +func (x *InvokeResponse) GetUsage() *structpb.Struct { + if x != nil { + return x.Usage + } + return nil +} + +type StreamRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamRequest) Reset() { + *x = StreamRequest{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamRequest) ProtoMessage() {} + +func (x *StreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamRequest.ProtoReflect.Descriptor instead. +func (*StreamRequest) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{3} +} + +func (x *StreamRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *StreamRequest) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +type StreamResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // start|token|final|end|error + Payload *structpb.Struct `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamResponse) Reset() { + *x = StreamResponse{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamResponse) ProtoMessage() {} + +func (x *StreamResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamResponse.ProtoReflect.Descriptor instead. +func (*StreamResponse) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{4} +} + +func (x *StreamResponse) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *StreamResponse) GetPayload() *structpb.Struct { + if x != nil { + return x.Payload + } + return nil +} + +type CreateSessionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + ModelKey string `protobuf:"bytes,2,opt,name=model_key,json=modelKey,proto3" json:"model_key,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSessionRequest) Reset() { + *x = CreateSessionRequest{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSessionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSessionRequest) ProtoMessage() {} + +func (x *CreateSessionRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSessionRequest.ProtoReflect.Descriptor instead. +func (*CreateSessionRequest) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateSessionRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *CreateSessionRequest) GetModelKey() string { + if x != nil { + return x.ModelKey + } + return "" +} + +func (x *CreateSessionRequest) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +type CreateSessionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSessionResponse) Reset() { + *x = CreateSessionResponse{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSessionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSessionResponse) ProtoMessage() {} + +func (x *CreateSessionResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSessionResponse.ProtoReflect.Descriptor instead. +func (*CreateSessionResponse) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateSessionResponse) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +type AppendMessageRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Role string `protobuf:"bytes,3,opt,name=role,proto3" json:"role,omitempty"` // system|user|assistant|tool + Content []*ContentItem `protobuf:"bytes,4,rep,name=content,proto3" json:"content,omitempty"` + ToolCalls []*structpb.Struct `protobuf:"bytes,5,rep,name=tool_calls,json=toolCalls,proto3" json:"tool_calls,omitempty"` + ToolResults []*structpb.Struct `protobuf:"bytes,6,rep,name=tool_results,json=toolResults,proto3" json:"tool_results,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AppendMessageRequest) Reset() { + *x = AppendMessageRequest{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AppendMessageRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendMessageRequest) ProtoMessage() {} + +func (x *AppendMessageRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendMessageRequest.ProtoReflect.Descriptor instead. +func (*AppendMessageRequest) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{7} +} + +func (x *AppendMessageRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *AppendMessageRequest) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +func (x *AppendMessageRequest) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *AppendMessageRequest) GetContent() []*ContentItem { + if x != nil { + return x.Content + } + return nil +} + +func (x *AppendMessageRequest) GetToolCalls() []*structpb.Struct { + if x != nil { + return x.ToolCalls + } + return nil +} + +func (x *AppendMessageRequest) GetToolResults() []*structpb.Struct { + if x != nil { + return x.ToolResults + } + return nil +} + +type AppendMessageResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AppendMessageResponse) Reset() { + *x = AppendMessageResponse{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AppendMessageResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendMessageResponse) ProtoMessage() {} + +func (x *AppendMessageResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendMessageResponse.ProtoReflect.Descriptor instead. +func (*AppendMessageResponse) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{8} +} + +func (x *AppendMessageResponse) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +type EmbedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ctx *v1.RequestContext `protobuf:"bytes,1,opt,name=ctx,proto3" json:"ctx,omitempty"` + ModelKey string `protobuf:"bytes,2,opt,name=model_key,json=modelKey,proto3" json:"model_key,omitempty"` + Inputs []string `protobuf:"bytes,3,rep,name=inputs,proto3" json:"inputs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EmbedRequest) Reset() { + *x = EmbedRequest{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EmbedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EmbedRequest) ProtoMessage() {} + +func (x *EmbedRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EmbedRequest.ProtoReflect.Descriptor instead. +func (*EmbedRequest) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{9} +} + +func (x *EmbedRequest) GetCtx() *v1.RequestContext { + if x != nil { + return x.Ctx + } + return nil +} + +func (x *EmbedRequest) GetModelKey() string { + if x != nil { + return x.ModelKey + } + return "" +} + +func (x *EmbedRequest) GetInputs() []string { + if x != nil { + return x.Inputs + } + return nil +} + +type EmbedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Vectors []*VectorItem `protobuf:"bytes,1,rep,name=vectors,proto3" json:"vectors,omitempty"` + Usage *structpb.Struct `protobuf:"bytes,2,opt,name=usage,proto3" json:"usage,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EmbedResponse) Reset() { + *x = EmbedResponse{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EmbedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EmbedResponse) ProtoMessage() {} + +func (x *EmbedResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EmbedResponse.ProtoReflect.Descriptor instead. +func (*EmbedResponse) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{10} +} + +func (x *EmbedResponse) GetVectors() []*VectorItem { + if x != nil { + return x.Vectors + } + return nil +} + +func (x *EmbedResponse) GetUsage() *structpb.Struct { + if x != nil { + return x.Usage + } + return nil +} + +type VectorItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + Values []float32 `protobuf:"fixed32,1,rep,packed,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VectorItem) Reset() { + *x = VectorItem{} + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VectorItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VectorItem) ProtoMessage() {} + +func (x *VectorItem) ProtoReflect() protoreflect.Message { + mi := &file_powerx_ai_v1_multimodal_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VectorItem.ProtoReflect.Descriptor instead. +func (*VectorItem) Descriptor() ([]byte, []int) { + return file_powerx_ai_v1_multimodal_proto_rawDescGZIP(), []int{11} +} + +func (x *VectorItem) GetValues() []float32 { + if x != nil { + return x.Values + } + return nil +} + +var File_powerx_ai_v1_multimodal_proto protoreflect.FileDescriptor + +var file_powerx_ai_v1_multimodal_proto_rawDesc = string([]byte{ + 0x0a, 0x1d, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2f, 0x61, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0c, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x49, + 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xd9, 0x01, + 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x03, 0x63, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, 0x78, 0x12, 0x1a, 0x0a, 0x08, + 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, + 0x65, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x49, 0x74, 0x65, 0x6d, + 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x70, 0x0a, 0x0e, 0x49, 0x6e, 0x76, + 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x2d, 0x0a, 0x05, + 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x22, 0x5b, 0x0a, 0x0d, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, + 0x63, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x57, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x31, + 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x22, 0x76, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x63, 0x74, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x03, 0x63, 0x74, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x65, 0x6c, + 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x63, 0x74, + 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x6f, + 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x36, 0x0a, 0x0a, 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x74, 0x6f, + 0x6f, 0x6c, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x74, 0x6f, 0x6f, 0x6c, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, 0x74, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x22, 0x27, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x70, 0x0a, 0x0c, + 0x45, 0x6d, 0x62, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, + 0x63, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x52, 0x03, 0x63, 0x74, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, + 0x65, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, + 0x64, 0x65, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x72, + 0x0a, 0x0d, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x32, 0x0a, 0x07, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x07, 0x76, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x75, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x24, 0x0a, 0x0a, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x02, + 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x32, 0x9f, 0x01, 0x0a, 0x11, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, + 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x1b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1b, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x32, 0xce, 0x01, 0x0a, 0x18, 0x4d, + 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x22, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x54, 0x0a, 0x10, 0x45, + 0x6d, 0x62, 0x65, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x40, 0x0a, 0x05, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x12, 0x1a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x61, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0xb7, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x61, 0x69, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x73, 0x61, 0x6e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2f, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2f, 0x61, 0x69, 0x2f, 0x76, 0x31, 0x3b, 0x61, 0x69, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x41, + 0x58, 0xaa, 0x02, 0x0c, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x41, 0x69, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x0c, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x41, 0x69, 0x5c, 0x56, 0x31, 0xe2, + 0x02, 0x18, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x41, 0x69, 0x5c, 0x56, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x50, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x3a, 0x3a, 0x41, 0x69, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +}) + +var ( + file_powerx_ai_v1_multimodal_proto_rawDescOnce sync.Once + file_powerx_ai_v1_multimodal_proto_rawDescData []byte +) + +func file_powerx_ai_v1_multimodal_proto_rawDescGZIP() []byte { + file_powerx_ai_v1_multimodal_proto_rawDescOnce.Do(func() { + file_powerx_ai_v1_multimodal_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_powerx_ai_v1_multimodal_proto_rawDesc), len(file_powerx_ai_v1_multimodal_proto_rawDesc))) + }) + return file_powerx_ai_v1_multimodal_proto_rawDescData +} + +var file_powerx_ai_v1_multimodal_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_powerx_ai_v1_multimodal_proto_goTypes = []any{ + (*ContentItem)(nil), // 0: powerx.ai.v1.ContentItem + (*InvokeRequest)(nil), // 1: powerx.ai.v1.InvokeRequest + (*InvokeResponse)(nil), // 2: powerx.ai.v1.InvokeResponse + (*StreamRequest)(nil), // 3: powerx.ai.v1.StreamRequest + (*StreamResponse)(nil), // 4: powerx.ai.v1.StreamResponse + (*CreateSessionRequest)(nil), // 5: powerx.ai.v1.CreateSessionRequest + (*CreateSessionResponse)(nil), // 6: powerx.ai.v1.CreateSessionResponse + (*AppendMessageRequest)(nil), // 7: powerx.ai.v1.AppendMessageRequest + (*AppendMessageResponse)(nil), // 8: powerx.ai.v1.AppendMessageResponse + (*EmbedRequest)(nil), // 9: powerx.ai.v1.EmbedRequest + (*EmbedResponse)(nil), // 10: powerx.ai.v1.EmbedResponse + (*VectorItem)(nil), // 11: powerx.ai.v1.VectorItem + (*v1.RequestContext)(nil), // 12: common.v1.RequestContext + (*structpb.Struct)(nil), // 13: google.protobuf.Struct +} +var file_powerx_ai_v1_multimodal_proto_depIdxs = []int32{ + 12, // 0: powerx.ai.v1.InvokeRequest.ctx:type_name -> common.v1.RequestContext + 0, // 1: powerx.ai.v1.InvokeRequest.inputs:type_name -> powerx.ai.v1.ContentItem + 13, // 2: powerx.ai.v1.InvokeRequest.params:type_name -> google.protobuf.Struct + 13, // 3: powerx.ai.v1.InvokeResponse.output:type_name -> google.protobuf.Struct + 13, // 4: powerx.ai.v1.InvokeResponse.usage:type_name -> google.protobuf.Struct + 12, // 5: powerx.ai.v1.StreamRequest.ctx:type_name -> common.v1.RequestContext + 13, // 6: powerx.ai.v1.StreamResponse.payload:type_name -> google.protobuf.Struct + 12, // 7: powerx.ai.v1.CreateSessionRequest.ctx:type_name -> common.v1.RequestContext + 12, // 8: powerx.ai.v1.AppendMessageRequest.ctx:type_name -> common.v1.RequestContext + 0, // 9: powerx.ai.v1.AppendMessageRequest.content:type_name -> powerx.ai.v1.ContentItem + 13, // 10: powerx.ai.v1.AppendMessageRequest.tool_calls:type_name -> google.protobuf.Struct + 13, // 11: powerx.ai.v1.AppendMessageRequest.tool_results:type_name -> google.protobuf.Struct + 12, // 12: powerx.ai.v1.EmbedRequest.ctx:type_name -> common.v1.RequestContext + 11, // 13: powerx.ai.v1.EmbedResponse.vectors:type_name -> powerx.ai.v1.VectorItem + 13, // 14: powerx.ai.v1.EmbedResponse.usage:type_name -> google.protobuf.Struct + 1, // 15: powerx.ai.v1.MultimodalService.Invoke:input_type -> powerx.ai.v1.InvokeRequest + 3, // 16: powerx.ai.v1.MultimodalService.Stream:input_type -> powerx.ai.v1.StreamRequest + 5, // 17: powerx.ai.v1.MultimodalSessionService.CreateSession:input_type -> powerx.ai.v1.CreateSessionRequest + 7, // 18: powerx.ai.v1.MultimodalSessionService.AppendMessage:input_type -> powerx.ai.v1.AppendMessageRequest + 9, // 19: powerx.ai.v1.EmbeddingService.Embed:input_type -> powerx.ai.v1.EmbedRequest + 2, // 20: powerx.ai.v1.MultimodalService.Invoke:output_type -> powerx.ai.v1.InvokeResponse + 4, // 21: powerx.ai.v1.MultimodalService.Stream:output_type -> powerx.ai.v1.StreamResponse + 6, // 22: powerx.ai.v1.MultimodalSessionService.CreateSession:output_type -> powerx.ai.v1.CreateSessionResponse + 8, // 23: powerx.ai.v1.MultimodalSessionService.AppendMessage:output_type -> powerx.ai.v1.AppendMessageResponse + 10, // 24: powerx.ai.v1.EmbeddingService.Embed:output_type -> powerx.ai.v1.EmbedResponse + 20, // [20:25] is the sub-list for method output_type + 15, // [15:20] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_powerx_ai_v1_multimodal_proto_init() } +func file_powerx_ai_v1_multimodal_proto_init() { + if File_powerx_ai_v1_multimodal_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_powerx_ai_v1_multimodal_proto_rawDesc), len(file_powerx_ai_v1_multimodal_proto_rawDesc)), + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 3, + }, + GoTypes: file_powerx_ai_v1_multimodal_proto_goTypes, + DependencyIndexes: file_powerx_ai_v1_multimodal_proto_depIdxs, + MessageInfos: file_powerx_ai_v1_multimodal_proto_msgTypes, + }.Build() + File_powerx_ai_v1_multimodal_proto = out.File + file_powerx_ai_v1_multimodal_proto_goTypes = nil + file_powerx_ai_v1_multimodal_proto_depIdxs = nil +} diff --git a/backend/api/grpc/gen/go/powerx/ai/v1/multimodal_grpc.pb.go b/backend/api/grpc/gen/go/powerx/ai/v1/multimodal_grpc.pb.go new file mode 100644 index 00000000..81930dfc --- /dev/null +++ b/backend/api/grpc/gen/go/powerx/ai/v1/multimodal_grpc.pb.go @@ -0,0 +1,406 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: powerx/ai/v1/multimodal.proto + +package aiv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + MultimodalService_Invoke_FullMethodName = "/powerx.ai.v1.MultimodalService/Invoke" + MultimodalService_Stream_FullMethodName = "/powerx.ai.v1.MultimodalService/Stream" +) + +// MultimodalServiceClient is the client API for MultimodalService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MultimodalServiceClient interface { + Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) + Stream(ctx context.Context, in *StreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamResponse], error) +} + +type multimodalServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMultimodalServiceClient(cc grpc.ClientConnInterface) MultimodalServiceClient { + return &multimodalServiceClient{cc} +} + +func (c *multimodalServiceClient) Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(InvokeResponse) + err := c.cc.Invoke(ctx, MultimodalService_Invoke_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *multimodalServiceClient) Stream(ctx context.Context, in *StreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &MultimodalService_ServiceDesc.Streams[0], MultimodalService_Stream_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StreamRequest, StreamResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type MultimodalService_StreamClient = grpc.ServerStreamingClient[StreamResponse] + +// MultimodalServiceServer is the server API for MultimodalService service. +// All implementations must embed UnimplementedMultimodalServiceServer +// for forward compatibility. +type MultimodalServiceServer interface { + Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) + Stream(*StreamRequest, grpc.ServerStreamingServer[StreamResponse]) error + mustEmbedUnimplementedMultimodalServiceServer() +} + +// UnimplementedMultimodalServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedMultimodalServiceServer struct{} + +func (UnimplementedMultimodalServiceServer) Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Invoke not implemented") +} +func (UnimplementedMultimodalServiceServer) Stream(*StreamRequest, grpc.ServerStreamingServer[StreamResponse]) error { + return status.Errorf(codes.Unimplemented, "method Stream not implemented") +} +func (UnimplementedMultimodalServiceServer) mustEmbedUnimplementedMultimodalServiceServer() {} +func (UnimplementedMultimodalServiceServer) testEmbeddedByValue() {} + +// UnsafeMultimodalServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MultimodalServiceServer will +// result in compilation errors. +type UnsafeMultimodalServiceServer interface { + mustEmbedUnimplementedMultimodalServiceServer() +} + +func RegisterMultimodalServiceServer(s grpc.ServiceRegistrar, srv MultimodalServiceServer) { + // If the following call pancis, it indicates UnimplementedMultimodalServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&MultimodalService_ServiceDesc, srv) +} + +func _MultimodalService_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InvokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MultimodalServiceServer).Invoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MultimodalService_Invoke_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MultimodalServiceServer).Invoke(ctx, req.(*InvokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MultimodalService_Stream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MultimodalServiceServer).Stream(m, &grpc.GenericServerStream[StreamRequest, StreamResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type MultimodalService_StreamServer = grpc.ServerStreamingServer[StreamResponse] + +// MultimodalService_ServiceDesc is the grpc.ServiceDesc for MultimodalService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MultimodalService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "powerx.ai.v1.MultimodalService", + HandlerType: (*MultimodalServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Invoke", + Handler: _MultimodalService_Invoke_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Stream", + Handler: _MultimodalService_Stream_Handler, + ServerStreams: true, + }, + }, + Metadata: "powerx/ai/v1/multimodal.proto", +} + +const ( + MultimodalSessionService_CreateSession_FullMethodName = "/powerx.ai.v1.MultimodalSessionService/CreateSession" + MultimodalSessionService_AppendMessage_FullMethodName = "/powerx.ai.v1.MultimodalSessionService/AppendMessage" +) + +// MultimodalSessionServiceClient is the client API for MultimodalSessionService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MultimodalSessionServiceClient interface { + CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error) + AppendMessage(ctx context.Context, in *AppendMessageRequest, opts ...grpc.CallOption) (*AppendMessageResponse, error) +} + +type multimodalSessionServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMultimodalSessionServiceClient(cc grpc.ClientConnInterface) MultimodalSessionServiceClient { + return &multimodalSessionServiceClient{cc} +} + +func (c *multimodalSessionServiceClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateSessionResponse) + err := c.cc.Invoke(ctx, MultimodalSessionService_CreateSession_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *multimodalSessionServiceClient) AppendMessage(ctx context.Context, in *AppendMessageRequest, opts ...grpc.CallOption) (*AppendMessageResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(AppendMessageResponse) + err := c.cc.Invoke(ctx, MultimodalSessionService_AppendMessage_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MultimodalSessionServiceServer is the server API for MultimodalSessionService service. +// All implementations must embed UnimplementedMultimodalSessionServiceServer +// for forward compatibility. +type MultimodalSessionServiceServer interface { + CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error) + AppendMessage(context.Context, *AppendMessageRequest) (*AppendMessageResponse, error) + mustEmbedUnimplementedMultimodalSessionServiceServer() +} + +// UnimplementedMultimodalSessionServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedMultimodalSessionServiceServer struct{} + +func (UnimplementedMultimodalSessionServiceServer) CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSession not implemented") +} +func (UnimplementedMultimodalSessionServiceServer) AppendMessage(context.Context, *AppendMessageRequest) (*AppendMessageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppendMessage not implemented") +} +func (UnimplementedMultimodalSessionServiceServer) mustEmbedUnimplementedMultimodalSessionServiceServer() { +} +func (UnimplementedMultimodalSessionServiceServer) testEmbeddedByValue() {} + +// UnsafeMultimodalSessionServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MultimodalSessionServiceServer will +// result in compilation errors. +type UnsafeMultimodalSessionServiceServer interface { + mustEmbedUnimplementedMultimodalSessionServiceServer() +} + +func RegisterMultimodalSessionServiceServer(s grpc.ServiceRegistrar, srv MultimodalSessionServiceServer) { + // If the following call pancis, it indicates UnimplementedMultimodalSessionServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&MultimodalSessionService_ServiceDesc, srv) +} + +func _MultimodalSessionService_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MultimodalSessionServiceServer).CreateSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MultimodalSessionService_CreateSession_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MultimodalSessionServiceServer).CreateSession(ctx, req.(*CreateSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MultimodalSessionService_AppendMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AppendMessageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MultimodalSessionServiceServer).AppendMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MultimodalSessionService_AppendMessage_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MultimodalSessionServiceServer).AppendMessage(ctx, req.(*AppendMessageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// MultimodalSessionService_ServiceDesc is the grpc.ServiceDesc for MultimodalSessionService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MultimodalSessionService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "powerx.ai.v1.MultimodalSessionService", + HandlerType: (*MultimodalSessionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSession", + Handler: _MultimodalSessionService_CreateSession_Handler, + }, + { + MethodName: "AppendMessage", + Handler: _MultimodalSessionService_AppendMessage_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "powerx/ai/v1/multimodal.proto", +} + +const ( + EmbeddingService_Embed_FullMethodName = "/powerx.ai.v1.EmbeddingService/Embed" +) + +// EmbeddingServiceClient is the client API for EmbeddingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EmbeddingServiceClient interface { + Embed(ctx context.Context, in *EmbedRequest, opts ...grpc.CallOption) (*EmbedResponse, error) +} + +type embeddingServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewEmbeddingServiceClient(cc grpc.ClientConnInterface) EmbeddingServiceClient { + return &embeddingServiceClient{cc} +} + +func (c *embeddingServiceClient) Embed(ctx context.Context, in *EmbedRequest, opts ...grpc.CallOption) (*EmbedResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(EmbedResponse) + err := c.cc.Invoke(ctx, EmbeddingService_Embed_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EmbeddingServiceServer is the server API for EmbeddingService service. +// All implementations must embed UnimplementedEmbeddingServiceServer +// for forward compatibility. +type EmbeddingServiceServer interface { + Embed(context.Context, *EmbedRequest) (*EmbedResponse, error) + mustEmbedUnimplementedEmbeddingServiceServer() +} + +// UnimplementedEmbeddingServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedEmbeddingServiceServer struct{} + +func (UnimplementedEmbeddingServiceServer) Embed(context.Context, *EmbedRequest) (*EmbedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Embed not implemented") +} +func (UnimplementedEmbeddingServiceServer) mustEmbedUnimplementedEmbeddingServiceServer() {} +func (UnimplementedEmbeddingServiceServer) testEmbeddedByValue() {} + +// UnsafeEmbeddingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EmbeddingServiceServer will +// result in compilation errors. +type UnsafeEmbeddingServiceServer interface { + mustEmbedUnimplementedEmbeddingServiceServer() +} + +func RegisterEmbeddingServiceServer(s grpc.ServiceRegistrar, srv EmbeddingServiceServer) { + // If the following call pancis, it indicates UnimplementedEmbeddingServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&EmbeddingService_ServiceDesc, srv) +} + +func _EmbeddingService_Embed_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EmbedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EmbeddingServiceServer).Embed(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: EmbeddingService_Embed_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EmbeddingServiceServer).Embed(ctx, req.(*EmbedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// EmbeddingService_ServiceDesc is the grpc.ServiceDesc for EmbeddingService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var EmbeddingService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "powerx.ai.v1.EmbeddingService", + HandlerType: (*EmbeddingServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Embed", + Handler: _EmbeddingService_Embed_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "powerx/ai/v1/multimodal.proto", +} diff --git a/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space.pb.go b/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space.pb.go index 61b3e843..eb479243 100644 --- a/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space.pb.go +++ b/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space.pb.go @@ -9,6 +9,7 @@ package knowledgev1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" @@ -559,15 +560,20 @@ func (x *RetireKnowledgeSpaceResponse) GetSpace() *KnowledgeSpace { } type IngestionJobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` - SourceId string `protobuf:"bytes,2,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` - SourceType string `protobuf:"bytes,3,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - Priority string `protobuf:"bytes,4,opt,name=priority,proto3" json:"priority,omitempty"` - SourceUri string `protobuf:"bytes,5,opt,name=source_uri,json=sourceUri,proto3" json:"source_uri,omitempty"` - MaskingProfile string `protobuf:"bytes,6,opt,name=masking_profile,json=maskingProfile,proto3" json:"masking_profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + SourceId string `protobuf:"bytes,2,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` + SourceType string `protobuf:"bytes,3,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + Priority string `protobuf:"bytes,4,opt,name=priority,proto3" json:"priority,omitempty"` + SourceUri string `protobuf:"bytes,5,opt,name=source_uri,json=sourceUri,proto3" json:"source_uri,omitempty"` + MaskingProfile string `protobuf:"bytes,6,opt,name=masking_profile,json=maskingProfile,proto3" json:"masking_profile,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + IngestionProfile string `protobuf:"bytes,8,opt,name=ingestion_profile,json=ingestionProfile,proto3" json:"ingestion_profile,omitempty"` + ProcessorProfile string `protobuf:"bytes,9,opt,name=processor_profile,json=processorProfile,proto3" json:"processor_profile,omitempty"` + OcrRequired bool `protobuf:"varint,10,opt,name=ocr_required,json=ocrRequired,proto3" json:"ocr_required,omitempty"` + RequestedBy string `protobuf:"bytes,11,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *IngestionJobRequest) Reset() { @@ -642,6 +648,41 @@ func (x *IngestionJobRequest) GetMaskingProfile() string { return "" } +func (x *IngestionJobRequest) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *IngestionJobRequest) GetIngestionProfile() string { + if x != nil { + return x.IngestionProfile + } + return "" +} + +func (x *IngestionJobRequest) GetProcessorProfile() string { + if x != nil { + return x.ProcessorProfile + } + return "" +} + +func (x *IngestionJobRequest) GetOcrRequired() bool { + if x != nil { + return x.OcrRequired + } + return false +} + +func (x *IngestionJobRequest) GetRequestedBy() string { + if x != nil { + return x.RequestedBy + } + return "" +} + type IngestionJobStatus struct { state protoimpl.MessageState `protogen:"open.v1"` JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` @@ -650,6 +691,9 @@ type IngestionJobStatus struct { ChunkCoveredPct float32 `protobuf:"fixed32,4,opt,name=chunk_covered_pct,json=chunkCoveredPct,proto3" json:"chunk_covered_pct,omitempty"` EmbeddingSuccessPct float32 `protobuf:"fixed32,5,opt,name=embedding_success_pct,json=embeddingSuccessPct,proto3" json:"embedding_success_pct,omitempty"` MaskingCoveragePct float32 `protobuf:"fixed32,6,opt,name=masking_coverage_pct,json=maskingCoveragePct,proto3" json:"masking_coverage_pct,omitempty"` + RetryCount uint32 `protobuf:"varint,7,opt,name=retry_count,json=retryCount,proto3" json:"retry_count,omitempty"` + ErrorCode string `protobuf:"bytes,8,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + BlockedReason string `protobuf:"bytes,9,opt,name=blocked_reason,json=blockedReason,proto3" json:"blocked_reason,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -726,6 +770,27 @@ func (x *IngestionJobStatus) GetMaskingCoveragePct() float32 { return 0 } +func (x *IngestionJobStatus) GetRetryCount() uint32 { + if x != nil { + return x.RetryCount + } + return 0 +} + +func (x *IngestionJobStatus) GetErrorCode() string { + if x != nil { + return x.ErrorCode + } + return "" +} + +func (x *IngestionJobStatus) GetBlockedReason() string { + if x != nil { + return x.BlockedReason + } + return "" +} + type IngestionJobResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Job *IngestionJobStatus `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` @@ -1066,6 +1131,7 @@ type FusionStrategy struct { ConflictPolicy string `protobuf:"bytes,8,opt,name=conflict_policy,json=conflictPolicy,proto3" json:"conflict_policy,omitempty"` DeploymentState FusionStrategy_DeploymentState `protobuf:"varint,9,opt,name=deployment_state,json=deploymentState,proto3,enum=powerx.knowledge.v1.FusionStrategy_DeploymentState" json:"deployment_state,omitempty"` PublishedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=published_at,json=publishedAt,proto3" json:"published_at,omitempty"` + DegradeReasons []string `protobuf:"bytes,11,rep,name=degrade_reasons,json=degradeReasons,proto3" json:"degrade_reasons,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1170,6 +1236,13 @@ func (x *FusionStrategy) GetPublishedAt() *timestamppb.Timestamp { return nil } +func (x *FusionStrategy) GetDegradeReasons() []string { + if x != nil { + return x.DegradeReasons + } + return nil +} + type FeedbackRequest struct { state protoimpl.MessageState `protogen:"open.v1"` SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` @@ -1402,39 +1475,30 @@ func (x *ListFeedbackCasesResponse) GetCases() []*FeedbackCase { return nil } -type FeedbackCase struct { - state protoimpl.MessageState `protogen:"open.v1"` - CaseId string `protobuf:"bytes,1,opt,name=case_id,json=caseId,proto3" json:"case_id,omitempty"` - SpaceId string `protobuf:"bytes,2,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - Severity string `protobuf:"bytes,4,opt,name=severity,proto3" json:"severity,omitempty"` - IssueType string `protobuf:"bytes,5,opt,name=issue_type,json=issueType,proto3" json:"issue_type,omitempty"` - LinkedChunks []string `protobuf:"bytes,6,rep,name=linked_chunks,json=linkedChunks,proto3" json:"linked_chunks,omitempty"` - ReportedBy string `protobuf:"bytes,7,opt,name=reported_by,json=reportedBy,proto3" json:"reported_by,omitempty"` - Notes string `protobuf:"bytes,8,opt,name=notes,proto3" json:"notes,omitempty"` - ToolTraceRef string `protobuf:"bytes,9,opt,name=tool_trace_ref,json=toolTraceRef,proto3" json:"tool_trace_ref,omitempty"` - QualityScore float64 `protobuf:"fixed64,10,opt,name=quality_score,json=qualityScore,proto3" json:"quality_score,omitempty"` - SlaDueAt *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=sla_due_at,json=slaDueAt,proto3" json:"sla_due_at,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +type CloseFeedbackCaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + CaseId string `protobuf:"bytes,2,opt,name=case_id,json=caseId,proto3" json:"case_id,omitempty"` + RequestedBy string `protobuf:"bytes,3,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` + ResolutionNotes string `protobuf:"bytes,4,opt,name=resolution_notes,json=resolutionNotes,proto3" json:"resolution_notes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *FeedbackCase) Reset() { - *x = FeedbackCase{} +func (x *CloseFeedbackCaseRequest) Reset() { + *x = CloseFeedbackCaseRequest{} mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *FeedbackCase) String() string { +func (x *CloseFeedbackCaseRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FeedbackCase) ProtoMessage() {} +func (*CloseFeedbackCaseRequest) ProtoMessage() {} -func (x *FeedbackCase) ProtoReflect() protoreflect.Message { +func (x *CloseFeedbackCaseRequest) ProtoReflect() protoreflect.Message { mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1446,134 +1510,131 @@ func (x *FeedbackCase) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FeedbackCase.ProtoReflect.Descriptor instead. -func (*FeedbackCase) Descriptor() ([]byte, []int) { +// Deprecated: Use CloseFeedbackCaseRequest.ProtoReflect.Descriptor instead. +func (*CloseFeedbackCaseRequest) Descriptor() ([]byte, []int) { return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{20} } -func (x *FeedbackCase) GetCaseId() string { +func (x *CloseFeedbackCaseRequest) GetSpaceId() string { if x != nil { - return x.CaseId + return x.SpaceId } return "" } -func (x *FeedbackCase) GetSpaceId() string { +func (x *CloseFeedbackCaseRequest) GetCaseId() string { if x != nil { - return x.SpaceId + return x.CaseId } return "" } -func (x *FeedbackCase) GetStatus() string { +func (x *CloseFeedbackCaseRequest) GetRequestedBy() string { if x != nil { - return x.Status + return x.RequestedBy } return "" } -func (x *FeedbackCase) GetSeverity() string { +func (x *CloseFeedbackCaseRequest) GetResolutionNotes() string { if x != nil { - return x.Severity + return x.ResolutionNotes } return "" } -func (x *FeedbackCase) GetIssueType() string { - if x != nil { - return x.IssueType - } - return "" +type EscalateFeedbackCaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + CaseId string `protobuf:"bytes,2,opt,name=case_id,json=caseId,proto3" json:"case_id,omitempty"` + RequestedBy string `protobuf:"bytes,3,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *FeedbackCase) GetLinkedChunks() []string { - if x != nil { - return x.LinkedChunks - } - return nil +func (x *EscalateFeedbackCaseRequest) Reset() { + *x = EscalateFeedbackCaseRequest{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *FeedbackCase) GetReportedBy() string { - if x != nil { - return x.ReportedBy - } - return "" +func (x *EscalateFeedbackCaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *FeedbackCase) GetNotes() string { +func (*EscalateFeedbackCaseRequest) ProtoMessage() {} + +func (x *EscalateFeedbackCaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[21] if x != nil { - return x.Notes + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *FeedbackCase) GetToolTraceRef() string { - if x != nil { - return x.ToolTraceRef - } - return "" +// Deprecated: Use EscalateFeedbackCaseRequest.ProtoReflect.Descriptor instead. +func (*EscalateFeedbackCaseRequest) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{21} } -func (x *FeedbackCase) GetQualityScore() float64 { +func (x *EscalateFeedbackCaseRequest) GetSpaceId() string { if x != nil { - return x.QualityScore + return x.SpaceId } - return 0 + return "" } -func (x *FeedbackCase) GetSlaDueAt() *timestamppb.Timestamp { +func (x *EscalateFeedbackCaseRequest) GetCaseId() string { if x != nil { - return x.SlaDueAt + return x.CaseId } - return nil + return "" } -func (x *FeedbackCase) GetCreatedAt() *timestamppb.Timestamp { +func (x *EscalateFeedbackCaseRequest) GetRequestedBy() string { if x != nil { - return x.CreatedAt + return x.RequestedBy } - return nil + return "" } -func (x *FeedbackCase) GetUpdatedAt() *timestamppb.Timestamp { +func (x *EscalateFeedbackCaseRequest) GetReason() string { if x != nil { - return x.UpdatedAt + return x.Reason } - return nil + return "" } -type DeltaJob struct { - state protoimpl.MessageState `protogen:"open.v1"` - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` - SpaceId string `protobuf:"bytes,2,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` - Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` - Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` - ApprovalState string `protobuf:"bytes,5,opt,name=approval_state,json=approvalState,proto3" json:"approval_state,omitempty"` - DiffAccuracy float64 `protobuf:"fixed64,6,opt,name=diff_accuracy,json=diffAccuracy,proto3" json:"diff_accuracy,omitempty"` - PartialRelease bool `protobuf:"varint,7,opt,name=partial_release,json=partialRelease,proto3" json:"partial_release,omitempty"` - RollbackCount int32 `protobuf:"varint,8,opt,name=rollback_count,json=rollbackCount,proto3" json:"rollback_count,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - PublishedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=published_at,json=publishedAt,proto3" json:"published_at,omitempty"` - ReportJson string `protobuf:"bytes,11,opt,name=report_json,json=reportJson,proto3" json:"report_json,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +type ReprocessFeedbackCaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + CaseId string `protobuf:"bytes,2,opt,name=case_id,json=caseId,proto3" json:"case_id,omitempty"` + RequestedBy string `protobuf:"bytes,3,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DeltaJob) Reset() { - *x = DeltaJob{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[21] +func (x *ReprocessFeedbackCaseRequest) Reset() { + *x = ReprocessFeedbackCaseRequest{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *DeltaJob) String() string { +func (x *ReprocessFeedbackCaseRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeltaJob) ProtoMessage() {} +func (*ReprocessFeedbackCaseRequest) ProtoMessage() {} -func (x *DeltaJob) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[21] +func (x *ReprocessFeedbackCaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1584,115 +1645,535 @@ func (x *DeltaJob) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeltaJob.ProtoReflect.Descriptor instead. -func (*DeltaJob) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{21} +// Deprecated: Use ReprocessFeedbackCaseRequest.ProtoReflect.Descriptor instead. +func (*ReprocessFeedbackCaseRequest) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{22} } -func (x *DeltaJob) GetJobId() string { +func (x *ReprocessFeedbackCaseRequest) GetSpaceId() string { if x != nil { - return x.JobId + return x.SpaceId } return "" } -func (x *DeltaJob) GetSpaceId() string { +func (x *ReprocessFeedbackCaseRequest) GetCaseId() string { if x != nil { - return x.SpaceId + return x.CaseId } return "" } -func (x *DeltaJob) GetSource() string { +func (x *ReprocessFeedbackCaseRequest) GetRequestedBy() string { if x != nil { - return x.Source + return x.RequestedBy } return "" } -func (x *DeltaJob) GetStatus() string { - if x != nil { - return x.Status - } - return "" +type RollbackFeedbackCaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + CaseId string `protobuf:"bytes,2,opt,name=case_id,json=caseId,proto3" json:"case_id,omitempty"` + RequestedBy string `protobuf:"bytes,3,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DeltaJob) GetApprovalState() string { - if x != nil { - return x.ApprovalState - } - return "" +func (x *RollbackFeedbackCaseRequest) Reset() { + *x = RollbackFeedbackCaseRequest{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DeltaJob) GetDiffAccuracy() float64 { - if x != nil { - return x.DiffAccuracy - } - return 0 +func (x *RollbackFeedbackCaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *DeltaJob) GetPartialRelease() bool { +func (*RollbackFeedbackCaseRequest) ProtoMessage() {} + +func (x *RollbackFeedbackCaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[23] if x != nil { - return x.PartialRelease + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *DeltaJob) GetRollbackCount() int32 { +// Deprecated: Use RollbackFeedbackCaseRequest.ProtoReflect.Descriptor instead. +func (*RollbackFeedbackCaseRequest) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{23} +} + +func (x *RollbackFeedbackCaseRequest) GetSpaceId() string { if x != nil { - return x.RollbackCount + return x.SpaceId } - return 0 + return "" } -func (x *DeltaJob) GetCreatedAt() *timestamppb.Timestamp { +func (x *RollbackFeedbackCaseRequest) GetCaseId() string { if x != nil { - return x.CreatedAt + return x.CaseId } - return nil + return "" } -func (x *DeltaJob) GetPublishedAt() *timestamppb.Timestamp { +func (x *RollbackFeedbackCaseRequest) GetRequestedBy() string { if x != nil { - return x.PublishedAt + return x.RequestedBy } - return nil + return "" } -func (x *DeltaJob) GetReportJson() string { +func (x *RollbackFeedbackCaseRequest) GetReason() string { if x != nil { - return x.ReportJson + return x.Reason } return "" } -type StartDeltaJobRequest struct { +type ExportFeedbackCasesRequest struct { state protoimpl.MessageState `protogen:"open.v1"` SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` - Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` - PackageUri string `protobuf:"bytes,3,opt,name=package_uri,json=packageUri,proto3" json:"package_uri,omitempty"` - DiffAccuracy float64 `protobuf:"fixed64,4,opt,name=diff_accuracy,json=diffAccuracy,proto3" json:"diff_accuracy,omitempty"` - RequestedBy string `protobuf:"bytes,5,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` - Notes string `protobuf:"bytes,6,opt,name=notes,proto3" json:"notes,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + Severity string `protobuf:"bytes,3,opt,name=severity,proto3" json:"severity,omitempty"` + Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *StartDeltaJobRequest) Reset() { - *x = StartDeltaJobRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[22] +func (x *ExportFeedbackCasesRequest) Reset() { + *x = ExportFeedbackCasesRequest{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *StartDeltaJobRequest) String() string { +func (x *ExportFeedbackCasesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StartDeltaJobRequest) ProtoMessage() {} +func (*ExportFeedbackCasesRequest) ProtoMessage() {} -func (x *StartDeltaJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[22] +func (x *ExportFeedbackCasesRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportFeedbackCasesRequest.ProtoReflect.Descriptor instead. +func (*ExportFeedbackCasesRequest) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{24} +} + +func (x *ExportFeedbackCasesRequest) GetSpaceId() string { + if x != nil { + return x.SpaceId + } + return "" +} + +func (x *ExportFeedbackCasesRequest) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *ExportFeedbackCasesRequest) GetSeverity() string { + if x != nil { + return x.Severity + } + return "" +} + +func (x *ExportFeedbackCasesRequest) GetLimit() uint32 { + if x != nil { + return x.Limit + } + return 0 +} + +type ExportFeedbackCasesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Cases []*FeedbackCase `protobuf:"bytes,1,rep,name=cases,proto3" json:"cases,omitempty"` + ExportJson string `protobuf:"bytes,2,opt,name=export_json,json=exportJson,proto3" json:"export_json,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExportFeedbackCasesResponse) Reset() { + *x = ExportFeedbackCasesResponse{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExportFeedbackCasesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportFeedbackCasesResponse) ProtoMessage() {} + +func (x *ExportFeedbackCasesResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportFeedbackCasesResponse.ProtoReflect.Descriptor instead. +func (*ExportFeedbackCasesResponse) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{25} +} + +func (x *ExportFeedbackCasesResponse) GetCases() []*FeedbackCase { + if x != nil { + return x.Cases + } + return nil +} + +func (x *ExportFeedbackCasesResponse) GetExportJson() string { + if x != nil { + return x.ExportJson + } + return "" +} + +type FeedbackCase struct { + state protoimpl.MessageState `protogen:"open.v1"` + CaseId string `protobuf:"bytes,1,opt,name=case_id,json=caseId,proto3" json:"case_id,omitempty"` + SpaceId string `protobuf:"bytes,2,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + Severity string `protobuf:"bytes,4,opt,name=severity,proto3" json:"severity,omitempty"` + IssueType string `protobuf:"bytes,5,opt,name=issue_type,json=issueType,proto3" json:"issue_type,omitempty"` + LinkedChunks []string `protobuf:"bytes,6,rep,name=linked_chunks,json=linkedChunks,proto3" json:"linked_chunks,omitempty"` + ReportedBy string `protobuf:"bytes,7,opt,name=reported_by,json=reportedBy,proto3" json:"reported_by,omitempty"` + Notes string `protobuf:"bytes,8,opt,name=notes,proto3" json:"notes,omitempty"` + ToolTraceRef string `protobuf:"bytes,9,opt,name=tool_trace_ref,json=toolTraceRef,proto3" json:"tool_trace_ref,omitempty"` + QualityScore float64 `protobuf:"fixed64,10,opt,name=quality_score,json=qualityScore,proto3" json:"quality_score,omitempty"` + SlaDueAt *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=sla_due_at,json=slaDueAt,proto3" json:"sla_due_at,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + EscalatedAt *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=escalated_at,json=escalatedAt,proto3" json:"escalated_at,omitempty"` + ClosedAt *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=closed_at,json=closedAt,proto3" json:"closed_at,omitempty"` + ResolutionNotes string `protobuf:"bytes,16,opt,name=resolution_notes,json=resolutionNotes,proto3" json:"resolution_notes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeedbackCase) Reset() { + *x = FeedbackCase{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeedbackCase) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeedbackCase) ProtoMessage() {} + +func (x *FeedbackCase) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeedbackCase.ProtoReflect.Descriptor instead. +func (*FeedbackCase) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{26} +} + +func (x *FeedbackCase) GetCaseId() string { + if x != nil { + return x.CaseId + } + return "" +} + +func (x *FeedbackCase) GetSpaceId() string { + if x != nil { + return x.SpaceId + } + return "" +} + +func (x *FeedbackCase) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *FeedbackCase) GetSeverity() string { + if x != nil { + return x.Severity + } + return "" +} + +func (x *FeedbackCase) GetIssueType() string { + if x != nil { + return x.IssueType + } + return "" +} + +func (x *FeedbackCase) GetLinkedChunks() []string { + if x != nil { + return x.LinkedChunks + } + return nil +} + +func (x *FeedbackCase) GetReportedBy() string { + if x != nil { + return x.ReportedBy + } + return "" +} + +func (x *FeedbackCase) GetNotes() string { + if x != nil { + return x.Notes + } + return "" +} + +func (x *FeedbackCase) GetToolTraceRef() string { + if x != nil { + return x.ToolTraceRef + } + return "" +} + +func (x *FeedbackCase) GetQualityScore() float64 { + if x != nil { + return x.QualityScore + } + return 0 +} + +func (x *FeedbackCase) GetSlaDueAt() *timestamppb.Timestamp { + if x != nil { + return x.SlaDueAt + } + return nil +} + +func (x *FeedbackCase) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *FeedbackCase) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *FeedbackCase) GetEscalatedAt() *timestamppb.Timestamp { + if x != nil { + return x.EscalatedAt + } + return nil +} + +func (x *FeedbackCase) GetClosedAt() *timestamppb.Timestamp { + if x != nil { + return x.ClosedAt + } + return nil +} + +func (x *FeedbackCase) GetResolutionNotes() string { + if x != nil { + return x.ResolutionNotes + } + return "" +} + +type DeltaJob struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + SpaceId string `protobuf:"bytes,2,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + ApprovalState string `protobuf:"bytes,5,opt,name=approval_state,json=approvalState,proto3" json:"approval_state,omitempty"` + DiffAccuracy float64 `protobuf:"fixed64,6,opt,name=diff_accuracy,json=diffAccuracy,proto3" json:"diff_accuracy,omitempty"` + PartialRelease bool `protobuf:"varint,7,opt,name=partial_release,json=partialRelease,proto3" json:"partial_release,omitempty"` + RollbackCount int32 `protobuf:"varint,8,opt,name=rollback_count,json=rollbackCount,proto3" json:"rollback_count,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + PublishedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=published_at,json=publishedAt,proto3" json:"published_at,omitempty"` + ReportJson string `protobuf:"bytes,11,opt,name=report_json,json=reportJson,proto3" json:"report_json,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeltaJob) Reset() { + *x = DeltaJob{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeltaJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeltaJob) ProtoMessage() {} + +func (x *DeltaJob) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeltaJob.ProtoReflect.Descriptor instead. +func (*DeltaJob) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{27} +} + +func (x *DeltaJob) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *DeltaJob) GetSpaceId() string { + if x != nil { + return x.SpaceId + } + return "" +} + +func (x *DeltaJob) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *DeltaJob) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *DeltaJob) GetApprovalState() string { + if x != nil { + return x.ApprovalState + } + return "" +} + +func (x *DeltaJob) GetDiffAccuracy() float64 { + if x != nil { + return x.DiffAccuracy + } + return 0 +} + +func (x *DeltaJob) GetPartialRelease() bool { + if x != nil { + return x.PartialRelease + } + return false +} + +func (x *DeltaJob) GetRollbackCount() int32 { + if x != nil { + return x.RollbackCount + } + return 0 +} + +func (x *DeltaJob) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *DeltaJob) GetPublishedAt() *timestamppb.Timestamp { + if x != nil { + return x.PublishedAt + } + return nil +} + +func (x *DeltaJob) GetReportJson() string { + if x != nil { + return x.ReportJson + } + return "" +} + +type StartDeltaJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SpaceId string `protobuf:"bytes,1,opt,name=space_id,json=spaceId,proto3" json:"space_id,omitempty"` + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + PackageUri string `protobuf:"bytes,3,opt,name=package_uri,json=packageUri,proto3" json:"package_uri,omitempty"` + DiffAccuracy float64 `protobuf:"fixed64,4,opt,name=diff_accuracy,json=diffAccuracy,proto3" json:"diff_accuracy,omitempty"` + RequestedBy string `protobuf:"bytes,5,opt,name=requested_by,json=requestedBy,proto3" json:"requested_by,omitempty"` + Notes string `protobuf:"bytes,6,opt,name=notes,proto3" json:"notes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartDeltaJobRequest) Reset() { + *x = StartDeltaJobRequest{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartDeltaJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartDeltaJobRequest) ProtoMessage() {} + +func (x *StartDeltaJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1705,7 +2186,7 @@ func (x *StartDeltaJobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartDeltaJobRequest.ProtoReflect.Descriptor instead. func (*StartDeltaJobRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{22} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{28} } func (x *StartDeltaJobRequest) GetSpaceId() string { @@ -1759,7 +2240,7 @@ type StartDeltaJobResponse struct { func (x *StartDeltaJobResponse) Reset() { *x = StartDeltaJobResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[23] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1771,7 +2252,7 @@ func (x *StartDeltaJobResponse) String() string { func (*StartDeltaJobResponse) ProtoMessage() {} func (x *StartDeltaJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[23] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1784,7 +2265,7 @@ func (x *StartDeltaJobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartDeltaJobResponse.ProtoReflect.Descriptor instead. func (*StartDeltaJobResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{23} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{29} } func (x *StartDeltaJobResponse) GetJob() *DeltaJob { @@ -1803,7 +2284,7 @@ type GetDeltaReportRequest struct { func (x *GetDeltaReportRequest) Reset() { *x = GetDeltaReportRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[24] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1815,7 +2296,7 @@ func (x *GetDeltaReportRequest) String() string { func (*GetDeltaReportRequest) ProtoMessage() {} func (x *GetDeltaReportRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[24] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1828,7 +2309,7 @@ func (x *GetDeltaReportRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetDeltaReportRequest.ProtoReflect.Descriptor instead. func (*GetDeltaReportRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{24} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{30} } func (x *GetDeltaReportRequest) GetJobId() string { @@ -1847,7 +2328,7 @@ type GetDeltaReportResponse struct { func (x *GetDeltaReportResponse) Reset() { *x = GetDeltaReportResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[25] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1859,7 +2340,7 @@ func (x *GetDeltaReportResponse) String() string { func (*GetDeltaReportResponse) ProtoMessage() {} func (x *GetDeltaReportResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[25] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1872,7 +2353,7 @@ func (x *GetDeltaReportResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetDeltaReportResponse.ProtoReflect.Descriptor instead. func (*GetDeltaReportResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{25} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{31} } func (x *GetDeltaReportResponse) GetJob() *DeltaJob { @@ -1895,7 +2376,7 @@ type PublishDeltaJobRequest struct { func (x *PublishDeltaJobRequest) Reset() { *x = PublishDeltaJobRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[26] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1907,7 +2388,7 @@ func (x *PublishDeltaJobRequest) String() string { func (*PublishDeltaJobRequest) ProtoMessage() {} func (x *PublishDeltaJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[26] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1920,7 +2401,7 @@ func (x *PublishDeltaJobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PublishDeltaJobRequest.ProtoReflect.Descriptor instead. func (*PublishDeltaJobRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{26} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{32} } func (x *PublishDeltaJobRequest) GetJobId() string { @@ -1967,7 +2448,7 @@ type PublishDeltaJobResponse struct { func (x *PublishDeltaJobResponse) Reset() { *x = PublishDeltaJobResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[27] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1979,7 +2460,7 @@ func (x *PublishDeltaJobResponse) String() string { func (*PublishDeltaJobResponse) ProtoMessage() {} func (x *PublishDeltaJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[27] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1992,7 +2473,7 @@ func (x *PublishDeltaJobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PublishDeltaJobResponse.ProtoReflect.Descriptor instead. func (*PublishDeltaJobResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{27} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{33} } func (x *PublishDeltaJobResponse) GetJob() *DeltaJob { @@ -2013,7 +2494,7 @@ type RollbackDeltaRequest struct { func (x *RollbackDeltaRequest) Reset() { *x = RollbackDeltaRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[28] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2025,7 +2506,7 @@ func (x *RollbackDeltaRequest) String() string { func (*RollbackDeltaRequest) ProtoMessage() {} func (x *RollbackDeltaRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[28] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2038,7 +2519,7 @@ func (x *RollbackDeltaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackDeltaRequest.ProtoReflect.Descriptor instead. func (*RollbackDeltaRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{28} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{34} } func (x *RollbackDeltaRequest) GetJobId() string { @@ -2071,7 +2552,7 @@ type RollbackDeltaResponse struct { func (x *RollbackDeltaResponse) Reset() { *x = RollbackDeltaResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[29] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2083,7 +2564,7 @@ func (x *RollbackDeltaResponse) String() string { func (*RollbackDeltaResponse) ProtoMessage() {} func (x *RollbackDeltaResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[29] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2096,7 +2577,7 @@ func (x *RollbackDeltaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackDeltaResponse.ProtoReflect.Descriptor instead. func (*RollbackDeltaResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{29} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{35} } func (x *RollbackDeltaResponse) GetJob() *DeltaJob { @@ -2119,7 +2600,7 @@ type ApplyEventRequest struct { func (x *ApplyEventRequest) Reset() { *x = ApplyEventRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[30] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2131,7 +2612,7 @@ func (x *ApplyEventRequest) String() string { func (*ApplyEventRequest) ProtoMessage() {} func (x *ApplyEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[30] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2144,7 +2625,7 @@ func (x *ApplyEventRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyEventRequest.ProtoReflect.Descriptor instead. func (*ApplyEventRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{30} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{36} } func (x *ApplyEventRequest) GetEventId() string { @@ -2193,7 +2674,7 @@ type ApplyEventResponse struct { func (x *ApplyEventResponse) Reset() { *x = ApplyEventResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[31] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2205,7 +2686,7 @@ func (x *ApplyEventResponse) String() string { func (*ApplyEventResponse) ProtoMessage() {} func (x *ApplyEventResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[31] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2218,7 +2699,7 @@ func (x *ApplyEventResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyEventResponse.ProtoReflect.Descriptor instead. func (*ApplyEventResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{31} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{37} } func (x *ApplyEventResponse) GetStatus() string { @@ -2255,7 +2736,7 @@ type RetryEventRequest struct { func (x *RetryEventRequest) Reset() { *x = RetryEventRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[32] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2267,7 +2748,7 @@ func (x *RetryEventRequest) String() string { func (*RetryEventRequest) ProtoMessage() {} func (x *RetryEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[32] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2280,7 +2761,7 @@ func (x *RetryEventRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryEventRequest.ProtoReflect.Descriptor instead. func (*RetryEventRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{32} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{38} } func (x *RetryEventRequest) GetEventId() string { @@ -2329,7 +2810,7 @@ type RetryEventResponse struct { func (x *RetryEventResponse) Reset() { *x = RetryEventResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[33] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2341,7 +2822,7 @@ func (x *RetryEventResponse) String() string { func (*RetryEventResponse) ProtoMessage() {} func (x *RetryEventResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[33] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2354,7 +2835,7 @@ func (x *RetryEventResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryEventResponse.ProtoReflect.Descriptor instead. func (*RetryEventResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{33} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{39} } func (x *RetryEventResponse) GetStatus() string { @@ -2388,7 +2869,7 @@ type HotUpdateRequest struct { func (x *HotUpdateRequest) Reset() { *x = HotUpdateRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[34] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2400,7 +2881,7 @@ func (x *HotUpdateRequest) String() string { func (*HotUpdateRequest) ProtoMessage() {} func (x *HotUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[34] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2413,7 +2894,7 @@ func (x *HotUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use HotUpdateRequest.ProtoReflect.Descriptor instead. func (*HotUpdateRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{34} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{40} } func (x *HotUpdateRequest) GetSpaceId() string { @@ -2439,7 +2920,7 @@ type HotUpdateResponse struct { func (x *HotUpdateResponse) Reset() { *x = HotUpdateResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[35] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2451,7 +2932,7 @@ func (x *HotUpdateResponse) String() string { func (*HotUpdateResponse) ProtoMessage() {} func (x *HotUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[35] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2464,7 +2945,7 @@ func (x *HotUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HotUpdateResponse.ProtoReflect.Descriptor instead. func (*HotUpdateResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{35} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{41} } func (x *HotUpdateResponse) GetStatus() string { @@ -2483,7 +2964,7 @@ type RefreshAgentRequest struct { func (x *RefreshAgentRequest) Reset() { *x = RefreshAgentRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[36] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2495,7 +2976,7 @@ func (x *RefreshAgentRequest) String() string { func (*RefreshAgentRequest) ProtoMessage() {} func (x *RefreshAgentRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[36] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2508,7 +2989,7 @@ func (x *RefreshAgentRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshAgentRequest.ProtoReflect.Descriptor instead. func (*RefreshAgentRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{36} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{42} } func (x *RefreshAgentRequest) GetTenantUuid() string { @@ -2527,7 +3008,7 @@ type RefreshAgentResponse struct { func (x *RefreshAgentResponse) Reset() { *x = RefreshAgentResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[37] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2539,7 +3020,7 @@ func (x *RefreshAgentResponse) String() string { func (*RefreshAgentResponse) ProtoMessage() {} func (x *RefreshAgentResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[37] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2552,7 +3033,7 @@ func (x *RefreshAgentResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshAgentResponse.ProtoReflect.Descriptor instead. func (*RefreshAgentResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{37} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{43} } func (x *RefreshAgentResponse) GetStatus() string { @@ -2578,7 +3059,7 @@ type DecayTask struct { func (x *DecayTask) Reset() { *x = DecayTask{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[38] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2590,7 +3071,7 @@ func (x *DecayTask) String() string { func (*DecayTask) ProtoMessage() {} func (x *DecayTask) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[38] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[44] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2603,7 +3084,7 @@ func (x *DecayTask) ProtoReflect() protoreflect.Message { // Deprecated: Use DecayTask.ProtoReflect.Descriptor instead. func (*DecayTask) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{38} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{44} } func (x *DecayTask) GetTaskId() string { @@ -2672,7 +3153,7 @@ type RunDecayScanRequest struct { func (x *RunDecayScanRequest) Reset() { *x = RunDecayScanRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[39] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2684,7 +3165,7 @@ func (x *RunDecayScanRequest) String() string { func (*RunDecayScanRequest) ProtoMessage() {} func (x *RunDecayScanRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[39] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[45] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2697,7 +3178,7 @@ func (x *RunDecayScanRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RunDecayScanRequest.ProtoReflect.Descriptor instead. func (*RunDecayScanRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{39} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{45} } func (x *RunDecayScanRequest) GetSpaceId() string { @@ -2723,7 +3204,7 @@ type RunDecayScanResponse struct { func (x *RunDecayScanResponse) Reset() { *x = RunDecayScanResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[40] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2735,7 +3216,7 @@ func (x *RunDecayScanResponse) String() string { func (*RunDecayScanResponse) ProtoMessage() {} func (x *RunDecayScanResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[40] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[46] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2748,7 +3229,7 @@ func (x *RunDecayScanResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RunDecayScanResponse.ProtoReflect.Descriptor instead. func (*RunDecayScanResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{40} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{46} } func (x *RunDecayScanResponse) GetTasks() []*DecayTask { @@ -2767,7 +3248,7 @@ type ListDecayTasksRequest struct { func (x *ListDecayTasksRequest) Reset() { *x = ListDecayTasksRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[41] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2779,7 +3260,7 @@ func (x *ListDecayTasksRequest) String() string { func (*ListDecayTasksRequest) ProtoMessage() {} func (x *ListDecayTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[41] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[47] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2792,7 +3273,7 @@ func (x *ListDecayTasksRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListDecayTasksRequest.ProtoReflect.Descriptor instead. func (*ListDecayTasksRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{41} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{47} } func (x *ListDecayTasksRequest) GetSpaceId() string { @@ -2811,7 +3292,7 @@ type ListDecayTasksResponse struct { func (x *ListDecayTasksResponse) Reset() { *x = ListDecayTasksResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[42] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2823,7 +3304,7 @@ func (x *ListDecayTasksResponse) String() string { func (*ListDecayTasksResponse) ProtoMessage() {} func (x *ListDecayTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[42] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[48] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2836,7 +3317,7 @@ func (x *ListDecayTasksResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListDecayTasksResponse.ProtoReflect.Descriptor instead. func (*ListDecayTasksResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{42} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{48} } func (x *ListDecayTasksResponse) GetTasks() []*DecayTask { @@ -2857,7 +3338,7 @@ type RestoreDecayTaskRequest struct { func (x *RestoreDecayTaskRequest) Reset() { *x = RestoreDecayTaskRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[43] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2869,7 +3350,7 @@ func (x *RestoreDecayTaskRequest) String() string { func (*RestoreDecayTaskRequest) ProtoMessage() {} func (x *RestoreDecayTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[43] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[49] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2882,7 +3363,7 @@ func (x *RestoreDecayTaskRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreDecayTaskRequest.ProtoReflect.Descriptor instead. func (*RestoreDecayTaskRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{43} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{49} } func (x *RestoreDecayTaskRequest) GetTaskId() string { @@ -2915,7 +3396,7 @@ type RestoreDecayTaskResponse struct { func (x *RestoreDecayTaskResponse) Reset() { *x = RestoreDecayTaskResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[44] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2927,7 +3408,7 @@ func (x *RestoreDecayTaskResponse) String() string { func (*RestoreDecayTaskResponse) ProtoMessage() {} func (x *RestoreDecayTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[44] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[50] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2940,7 +3421,7 @@ func (x *RestoreDecayTaskResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreDecayTaskResponse.ProtoReflect.Descriptor instead. func (*RestoreDecayTaskResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{44} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{50} } func (x *RestoreDecayTaskResponse) GetTask() *DecayTask { @@ -2964,7 +3445,7 @@ type UpsertReleasePolicyRequest struct { func (x *UpsertReleasePolicyRequest) Reset() { *x = UpsertReleasePolicyRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[45] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2976,7 +3457,7 @@ func (x *UpsertReleasePolicyRequest) String() string { func (*UpsertReleasePolicyRequest) ProtoMessage() {} func (x *UpsertReleasePolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[45] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[51] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2989,7 +3470,7 @@ func (x *UpsertReleasePolicyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpsertReleasePolicyRequest.ProtoReflect.Descriptor instead. func (*UpsertReleasePolicyRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{45} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{51} } func (x *UpsertReleasePolicyRequest) GetMatrixVersion() string { @@ -3044,7 +3525,7 @@ type ReleaseBatch struct { func (x *ReleaseBatch) Reset() { *x = ReleaseBatch{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[46] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3056,7 +3537,7 @@ func (x *ReleaseBatch) String() string { func (*ReleaseBatch) ProtoMessage() {} func (x *ReleaseBatch) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[46] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[52] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3069,7 +3550,7 @@ func (x *ReleaseBatch) ProtoReflect() protoreflect.Message { // Deprecated: Use ReleaseBatch.ProtoReflect.Descriptor instead. func (*ReleaseBatch) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{46} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{52} } func (x *ReleaseBatch) GetName() string { @@ -3096,7 +3577,7 @@ type UpsertReleasePolicyResponse struct { func (x *UpsertReleasePolicyResponse) Reset() { *x = UpsertReleasePolicyResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[47] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3108,7 +3589,7 @@ func (x *UpsertReleasePolicyResponse) String() string { func (*UpsertReleasePolicyResponse) ProtoMessage() {} func (x *UpsertReleasePolicyResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[47] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[53] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3121,7 +3602,7 @@ func (x *UpsertReleasePolicyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpsertReleasePolicyResponse.ProtoReflect.Descriptor instead. func (*UpsertReleasePolicyResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{47} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{53} } func (x *UpsertReleasePolicyResponse) GetPolicyId() string { @@ -3149,7 +3630,7 @@ type PublishReleaseRequest struct { func (x *PublishReleaseRequest) Reset() { *x = PublishReleaseRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[48] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3161,7 +3642,7 @@ func (x *PublishReleaseRequest) String() string { func (*PublishReleaseRequest) ProtoMessage() {} func (x *PublishReleaseRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[48] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[54] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3174,7 +3655,7 @@ func (x *PublishReleaseRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PublishReleaseRequest.ProtoReflect.Descriptor instead. func (*PublishReleaseRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{48} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{54} } func (x *PublishReleaseRequest) GetPolicyId() string { @@ -3211,7 +3692,7 @@ type PublishReleaseResponse struct { func (x *PublishReleaseResponse) Reset() { *x = PublishReleaseResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[49] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3223,7 +3704,7 @@ func (x *PublishReleaseResponse) String() string { func (*PublishReleaseResponse) ProtoMessage() {} func (x *PublishReleaseResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[49] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[55] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3236,7 +3717,7 @@ func (x *PublishReleaseResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PublishReleaseResponse.ProtoReflect.Descriptor instead. func (*PublishReleaseResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{49} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{55} } func (x *PublishReleaseResponse) GetReleaseId() string { @@ -3287,7 +3768,7 @@ type PromoteReleaseRequest struct { func (x *PromoteReleaseRequest) Reset() { *x = PromoteReleaseRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[50] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3299,7 +3780,7 @@ func (x *PromoteReleaseRequest) String() string { func (*PromoteReleaseRequest) ProtoMessage() {} func (x *PromoteReleaseRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[50] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[56] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3312,7 +3793,7 @@ func (x *PromoteReleaseRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteReleaseRequest.ProtoReflect.Descriptor instead. func (*PromoteReleaseRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{50} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{56} } func (x *PromoteReleaseRequest) GetPolicyId() string { @@ -3363,7 +3844,7 @@ type PromoteReleaseResponse struct { func (x *PromoteReleaseResponse) Reset() { *x = PromoteReleaseResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[51] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3375,7 +3856,7 @@ func (x *PromoteReleaseResponse) String() string { func (*PromoteReleaseResponse) ProtoMessage() {} func (x *PromoteReleaseResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[51] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[57] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3388,7 +3869,7 @@ func (x *PromoteReleaseResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteReleaseResponse.ProtoReflect.Descriptor instead. func (*PromoteReleaseResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{51} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{57} } func (x *PromoteReleaseResponse) GetNextBatchToken() string { @@ -3438,7 +3919,7 @@ type RollbackReleaseRequest struct { func (x *RollbackReleaseRequest) Reset() { *x = RollbackReleaseRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[52] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3450,7 +3931,7 @@ func (x *RollbackReleaseRequest) String() string { func (*RollbackReleaseRequest) ProtoMessage() {} func (x *RollbackReleaseRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[52] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[58] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3463,7 +3944,7 @@ func (x *RollbackReleaseRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackReleaseRequest.ProtoReflect.Descriptor instead. func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{52} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{58} } func (x *RollbackReleaseRequest) GetPolicyId() string { @@ -3503,7 +3984,7 @@ type RollbackReleaseResponse struct { func (x *RollbackReleaseResponse) Reset() { *x = RollbackReleaseResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[53] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3515,7 +3996,7 @@ func (x *RollbackReleaseResponse) String() string { func (*RollbackReleaseResponse) ProtoMessage() {} func (x *RollbackReleaseResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[53] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[59] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3528,7 +4009,7 @@ func (x *RollbackReleaseResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackReleaseResponse.ProtoReflect.Descriptor instead. func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{53} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{59} } func (x *RollbackReleaseResponse) GetStatus() string { @@ -3551,7 +4032,7 @@ type QARetrievalPlanRequest struct { func (x *QARetrievalPlanRequest) Reset() { *x = QARetrievalPlanRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[54] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3563,7 +4044,7 @@ func (x *QARetrievalPlanRequest) String() string { func (*QARetrievalPlanRequest) ProtoMessage() {} func (x *QARetrievalPlanRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[54] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[60] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3576,7 +4057,7 @@ func (x *QARetrievalPlanRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QARetrievalPlanRequest.ProtoReflect.Descriptor instead. func (*QARetrievalPlanRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{54} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{60} } func (x *QARetrievalPlanRequest) GetTenantUuid() string { @@ -3627,7 +4108,7 @@ type QACandidateSpace struct { func (x *QACandidateSpace) Reset() { *x = QACandidateSpace{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[55] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3639,7 +4120,7 @@ func (x *QACandidateSpace) String() string { func (*QACandidateSpace) ProtoMessage() {} func (x *QACandidateSpace) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[55] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[61] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3652,7 +4133,7 @@ func (x *QACandidateSpace) ProtoReflect() protoreflect.Message { // Deprecated: Use QACandidateSpace.ProtoReflect.Descriptor instead. func (*QACandidateSpace) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{55} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{61} } func (x *QACandidateSpace) GetSpaceId() string { @@ -3702,7 +4183,7 @@ type QAToolMetadata struct { func (x *QAToolMetadata) Reset() { *x = QAToolMetadata{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[56] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3714,7 +4195,7 @@ func (x *QAToolMetadata) String() string { func (*QAToolMetadata) ProtoMessage() {} func (x *QAToolMetadata) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[56] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[62] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3727,7 +4208,7 @@ func (x *QAToolMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use QAToolMetadata.ProtoReflect.Descriptor instead. func (*QAToolMetadata) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{56} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{62} } func (x *QAToolMetadata) GetToolId() string { @@ -3768,7 +4249,7 @@ type QATelemetry struct { func (x *QATelemetry) Reset() { *x = QATelemetry{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[57] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3780,7 +4261,7 @@ func (x *QATelemetry) String() string { func (*QATelemetry) ProtoMessage() {} func (x *QATelemetry) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[57] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[63] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3793,7 +4274,7 @@ func (x *QATelemetry) ProtoReflect() protoreflect.Message { // Deprecated: Use QATelemetry.ProtoReflect.Descriptor instead. func (*QATelemetry) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{57} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{63} } func (x *QATelemetry) GetTraceId() string { @@ -3810,24 +4291,95 @@ func (x *QATelemetry) GetRecordedAt() *timestamppb.Timestamp { return nil } +type QAPlanStage struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CandidateCount int32 `protobuf:"varint,2,opt,name=candidate_count,json=candidateCount,proto3" json:"candidate_count,omitempty"` + LatencyMs int32 `protobuf:"varint,3,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"` + DegradeReason string `protobuf:"bytes,4,opt,name=degrade_reason,json=degradeReason,proto3" json:"degrade_reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QAPlanStage) Reset() { + *x = QAPlanStage{} + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QAPlanStage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QAPlanStage) ProtoMessage() {} + +func (x *QAPlanStage) ProtoReflect() protoreflect.Message { + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[64] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QAPlanStage.ProtoReflect.Descriptor instead. +func (*QAPlanStage) Descriptor() ([]byte, []int) { + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{64} +} + +func (x *QAPlanStage) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QAPlanStage) GetCandidateCount() int32 { + if x != nil { + return x.CandidateCount + } + return 0 +} + +func (x *QAPlanStage) GetLatencyMs() int32 { + if x != nil { + return x.LatencyMs + } + return 0 +} + +func (x *QAPlanStage) GetDegradeReason() string { + if x != nil { + return x.DegradeReason + } + return "" +} + type QARetrievalPlanResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - TenantUuid string `protobuf:"bytes,1,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` - Intent string `protobuf:"bytes,2,opt,name=intent,proto3" json:"intent,omitempty"` - DomainTags []string `protobuf:"bytes,3,rep,name=domain_tags,json=domainTags,proto3" json:"domain_tags,omitempty"` - CandidateSpaces []*QACandidateSpace `protobuf:"bytes,4,rep,name=candidate_spaces,json=candidateSpaces,proto3" json:"candidate_spaces,omitempty"` - Toolings []*QAToolMetadata `protobuf:"bytes,5,rep,name=toolings,proto3" json:"toolings,omitempty"` - Telemetry *QATelemetry `protobuf:"bytes,6,opt,name=telemetry,proto3" json:"telemetry,omitempty"` - DegradeCount int32 `protobuf:"varint,7,opt,name=degrade_count,json=degradeCount,proto3" json:"degrade_count,omitempty"` - SessionId string `protobuf:"bytes,8,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` - LatencyBudgetMs int32 `protobuf:"varint,9,opt,name=latency_budget_ms,json=latencyBudgetMs,proto3" json:"latency_budget_ms,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TenantUuid string `protobuf:"bytes,1,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + Intent string `protobuf:"bytes,2,opt,name=intent,proto3" json:"intent,omitempty"` + DomainTags []string `protobuf:"bytes,3,rep,name=domain_tags,json=domainTags,proto3" json:"domain_tags,omitempty"` + CandidateSpaces []*QACandidateSpace `protobuf:"bytes,4,rep,name=candidate_spaces,json=candidateSpaces,proto3" json:"candidate_spaces,omitempty"` + Toolings []*QAToolMetadata `protobuf:"bytes,5,rep,name=toolings,proto3" json:"toolings,omitempty"` + Telemetry *QATelemetry `protobuf:"bytes,6,opt,name=telemetry,proto3" json:"telemetry,omitempty"` + DegradeCount int32 `protobuf:"varint,7,opt,name=degrade_count,json=degradeCount,proto3" json:"degrade_count,omitempty"` + SessionId string `protobuf:"bytes,8,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + LatencyBudgetMs int32 `protobuf:"varint,9,opt,name=latency_budget_ms,json=latencyBudgetMs,proto3" json:"latency_budget_ms,omitempty"` + Stages []*QAPlanStage `protobuf:"bytes,10,rep,name=stages,proto3" json:"stages,omitempty"` + PolicyVersionSnapshot map[string]string `protobuf:"bytes,11,rep,name=policy_version_snapshot,json=policyVersionSnapshot,proto3" json:"policy_version_snapshot,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Metadata *structpb.Struct `protobuf:"bytes,12,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *QARetrievalPlanResponse) Reset() { *x = QARetrievalPlanResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[58] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3839,7 +4391,7 @@ func (x *QARetrievalPlanResponse) String() string { func (*QARetrievalPlanResponse) ProtoMessage() {} func (x *QARetrievalPlanResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[58] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[65] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3852,7 +4404,7 @@ func (x *QARetrievalPlanResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QARetrievalPlanResponse.ProtoReflect.Descriptor instead. func (*QARetrievalPlanResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{58} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{65} } func (x *QARetrievalPlanResponse) GetTenantUuid() string { @@ -3918,6 +4470,27 @@ func (x *QARetrievalPlanResponse) GetLatencyBudgetMs() int32 { return 0 } +func (x *QARetrievalPlanResponse) GetStages() []*QAPlanStage { + if x != nil { + return x.Stages + } + return nil +} + +func (x *QARetrievalPlanResponse) GetPolicyVersionSnapshot() map[string]string { + if x != nil { + return x.PolicyVersionSnapshot + } + return nil +} + +func (x *QARetrievalPlanResponse) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + type QAMemoryUpdate struct { state protoimpl.MessageState `protogen:"open.v1"` ChunkId string `protobuf:"bytes,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"` @@ -3933,7 +4506,7 @@ type QAMemoryUpdate struct { func (x *QAMemoryUpdate) Reset() { *x = QAMemoryUpdate{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[59] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3945,7 +4518,7 @@ func (x *QAMemoryUpdate) String() string { func (*QAMemoryUpdate) ProtoMessage() {} func (x *QAMemoryUpdate) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[59] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[66] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3958,7 +4531,7 @@ func (x *QAMemoryUpdate) ProtoReflect() protoreflect.Message { // Deprecated: Use QAMemoryUpdate.ProtoReflect.Descriptor instead. func (*QAMemoryUpdate) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{59} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{66} } func (x *QAMemoryUpdate) GetChunkId() string { @@ -4015,13 +4588,14 @@ type QAMemorySnapshotRequest struct { TenantUuid string `protobuf:"bytes,1,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` Updates []*QAMemoryUpdate `protobuf:"bytes,3,rep,name=updates,proto3" json:"updates,omitempty"` + TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *QAMemorySnapshotRequest) Reset() { *x = QAMemorySnapshotRequest{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[60] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4033,7 +4607,7 @@ func (x *QAMemorySnapshotRequest) String() string { func (*QAMemorySnapshotRequest) ProtoMessage() {} func (x *QAMemorySnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[60] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[67] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4046,7 +4620,7 @@ func (x *QAMemorySnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QAMemorySnapshotRequest.ProtoReflect.Descriptor instead. func (*QAMemorySnapshotRequest) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{60} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{67} } func (x *QAMemorySnapshotRequest) GetTenantUuid() string { @@ -4070,6 +4644,13 @@ func (x *QAMemorySnapshotRequest) GetUpdates() []*QAMemoryUpdate { return nil } +func (x *QAMemorySnapshotRequest) GetTraceId() string { + if x != nil { + return x.TraceId + } + return "" +} + type QACitationSummary struct { state protoimpl.MessageState `protogen:"open.v1"` ChunkId string `protobuf:"bytes,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"` @@ -4085,7 +4666,7 @@ type QACitationSummary struct { func (x *QACitationSummary) Reset() { *x = QACitationSummary{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[61] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4097,7 +4678,7 @@ func (x *QACitationSummary) String() string { func (*QACitationSummary) ProtoMessage() {} func (x *QACitationSummary) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[61] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[68] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4110,7 +4691,7 @@ func (x *QACitationSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use QACitationSummary.ProtoReflect.Descriptor instead. func (*QACitationSummary) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{61} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{68} } func (x *QACitationSummary) GetChunkId() string { @@ -4167,13 +4748,14 @@ type QAMemorySnapshotResponse struct { TenantUuid string `protobuf:"bytes,1,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` Citations []*QACitationSummary `protobuf:"bytes,3,rep,name=citations,proto3" json:"citations,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *QAMemorySnapshotResponse) Reset() { *x = QAMemorySnapshotResponse{} - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[62] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4185,7 +4767,7 @@ func (x *QAMemorySnapshotResponse) String() string { func (*QAMemorySnapshotResponse) ProtoMessage() {} func (x *QAMemorySnapshotResponse) ProtoReflect() protoreflect.Message { - mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[62] + mi := &file_powerx_knowledge_v1_knowledge_space_proto_msgTypes[69] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4198,7 +4780,7 @@ func (x *QAMemorySnapshotResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QAMemorySnapshotResponse.ProtoReflect.Descriptor instead. func (*QAMemorySnapshotResponse) Descriptor() ([]byte, []int) { - return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{62} + return file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP(), []int{69} } func (x *QAMemorySnapshotResponse) GetTenantUuid() string { @@ -4222,6 +4804,13 @@ func (x *QAMemorySnapshotResponse) GetCitations() []*QACitationSummary { return nil } +func (x *QAMemorySnapshotResponse) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + var File_powerx_knowledge_v1_knowledge_space_proto protoreflect.FileDescriptor var file_powerx_knowledge_v1_knowledge_space_proto_rawDesc = string([]byte{ @@ -4229,863 +4818,1016 @@ var file_powerx_knowledge_v1_knowledge_space_proto_rawDesc = string([]byte{ 0x67, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xc0, 0x03, 0x0a, 0x0e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x70, 0x61, 0x72, 0x74, 0x6d, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, - 0x65, 0x70, 0x61, 0x72, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x63, - 0x70, 0x75, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x43, - 0x70, 0x75, 0x12, 0x28, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x71, 0x75, - 0x6f, 0x74, 0x61, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, - 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x39, - 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x41, 0x74, 0x22, 0xa4, 0x02, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xc0, 0x03, 0x0a, 0x0e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x70, 0x61, 0x72, 0x74, 0x6d, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, 0x70, + 0x61, 0x72, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x63, 0x70, 0x75, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x70, 0x75, + 0x12, 0x28, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x67, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x71, 0x75, 0x6f, 0x74, + 0x61, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, + 0x3b, 0x0a, 0x1a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x17, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x0a, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x22, 0xa4, 0x02, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, + 0x75, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x70, 0x61, 0x72, + 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x64, 0x65, 0x70, 0x61, 0x72, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x63, 0x70, 0x75, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x70, 0x75, 0x12, 0x28, 0x0a, + 0x10, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, + 0x62, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x62, 0x12, 0x3b, 0x0a, 0x1a, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, + 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x59, 0x0a, 0x1c, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, - 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, - 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x70, - 0x61, 0x72, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, 0x70, 0x61, 0x72, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x63, 0x70, 0x75, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x70, 0x75, 0x12, - 0x28, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x67, 0x62, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x71, 0x75, 0x6f, 0x74, 0x61, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x62, 0x12, 0x3b, 0x0a, 0x1a, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x59, 0x0a, 0x1c, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x63, 0x70, 0x75, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x70, 0x75, 0x12, 0x28, 0x0a, 0x10, + 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x62, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x17, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x59, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x39, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x50, 0x0a, 0x1b, 0x52, + 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x59, 0x0a, + 0x1c, 0x52, 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, + 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x13, 0x49, 0x6e, 0x67, + 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x55, 0x72, 0x69, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x73, 0x6b, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, + 0x61, 0x73, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x6f, 0x63, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x63, 0x72, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0xdd, 0x02, 0x0a, 0x12, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, + 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, + 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, + 0x62, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0a, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x2a, 0x0a, 0x11, + 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x70, 0x63, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x43, 0x6f, + 0x76, 0x65, 0x72, 0x65, 0x64, 0x50, 0x63, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x6d, 0x62, 0x65, + 0x64, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x63, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x69, + 0x6e, 0x67, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x14, + 0x6d, 0x61, 0x73, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x70, 0x63, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x61, 0x73, 0x6b, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x50, 0x63, 0x74, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x14, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, + 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x63, 0x70, 0x75, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x70, 0x75, 0x12, 0x28, - 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3b, 0x0a, - 0x1a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x17, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x22, 0x59, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x50, 0x0a, - 0x1b, 0x52, 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, - 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, - 0x59, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, - 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x39, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xd2, 0x01, 0x0a, 0x13, 0x49, - 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x46, 0x75, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x73, 0x6b, 0x69, 0x6e, - 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x6d, 0x61, 0x73, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, - 0xf6, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, - 0x63, 0x6f, 0x76, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x02, 0x52, 0x0f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x65, 0x64, 0x50, - 0x63, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x02, 0x52, 0x13, 0x65, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x50, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x61, 0x73, 0x6b, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x61, 0x73, 0x6b, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x76, - 0x65, 0x72, 0x61, 0x67, 0x65, 0x50, 0x63, 0x74, 0x22, 0x51, 0x0a, 0x14, 0x49, 0x6e, 0x67, 0x65, - 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x39, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x89, 0x02, 0x0a, 0x15, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6d, 0x32, 0x35, 0x5f, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x62, 0x6d, 0x32, 0x35, 0x57, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x76, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x67, 0x72, 0x61, + 0x70, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x72, 0x61, 0x6e, 0x6b, 0x65, 0x72, + 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, + 0x72, 0x61, 0x6e, 0x6b, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, + 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, + 0x4e, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, + 0x63, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x43, 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x1d, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x49, + 0x64, 0x22, 0xf9, 0x04, 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x65, 0x67, 0x79, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6d, 0x32, 0x35, 0x5f, 0x77, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x62, 0x6d, 0x32, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x62, 0x6d, 0x32, 0x35, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, + 0x72, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x43, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x72, 0x61, 0x6e, - 0x6b, 0x65, 0x72, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x6b, 0x65, 0x72, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x72, 0x61, 0x6e, 0x6b, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x75, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x79, 0x22, 0x4e, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5e, 0x0a, 0x10, 0x64, 0x65, 0x70, 0x6c, 0x6f, + 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x33, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0e, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x22, + 0x8b, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, + 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, + 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x52, 0x41, 0x46, 0x54, 0x10, + 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x1d, + 0x0a, 0x19, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x5f, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x22, 0xe9, 0x01, + 0x0a, 0x0f, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x22, 0x63, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0a, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x1d, 0x52, 0x6f, 0x6c, 0x6c, 0x62, - 0x61, 0x63, 0x6b, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x79, 0x49, 0x64, 0x22, 0xd0, 0x04, 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x65, 0x67, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, - 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6d, 0x32, - 0x35, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, - 0x62, 0x6d, 0x32, 0x35, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0c, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x29, 0x0a, 0x10, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, - 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, - 0x72, 0x61, 0x6e, 0x6b, 0x65, 0x72, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x72, 0x61, 0x6e, 0x6b, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, - 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5e, 0x0a, 0x10, 0x64, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x64, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x41, 0x74, 0x22, 0x8b, 0x01, 0x0a, 0x0f, 0x44, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, - 0x1c, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x1a, 0x0a, 0x16, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x44, 0x52, 0x41, 0x46, 0x54, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x44, - 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, - 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x45, 0x50, 0x4c, - 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x52, 0x4f, 0x4c, - 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x22, 0xe9, 0x01, 0x0a, 0x0f, 0x46, 0x65, 0x65, 0x64, - 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6e, 0x6b, - 0x65, 0x64, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, - 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x24, 0x0a, - 0x0e, 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6f, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x66, 0x22, 0x49, 0x0a, 0x10, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x63, 0x61, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, - 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, - 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x04, 0x63, 0x61, 0x73, 0x65, 0x22, 0x4b, - 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x43, 0x68, 0x75, + 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x42, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6f, + 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x66, 0x22, 0x49, 0x0a, 0x10, 0x46, 0x65, 0x65, + 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, + 0x04, 0x63, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, + 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x04, + 0x63, 0x61, 0x73, 0x65, 0x22, 0x4b, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, + 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x22, 0x54, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, + 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, + 0x0a, 0x05, 0x63, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, + 0x52, 0x05, 0x63, 0x61, 0x73, 0x65, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x6f, 0x73, + 0x65, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x63, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x1b, 0x45, 0x73, 0x63, 0x61, 0x6c, + 0x61, 0x74, 0x65, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, + 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x63, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x75, 0x0a, 0x1c, 0x52, 0x65, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x63, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0x8c, 0x01, 0x0a, + 0x1b, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, + 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x61, 0x73, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x61, 0x73, 0x65, 0x49, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x42, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x1a, + 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x54, 0x0a, 0x19, 0x4c, - 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x63, 0x61, 0x73, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, - 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x05, 0x63, 0x61, 0x73, 0x65, - 0x73, 0x22, 0xec, 0x03, 0x0a, 0x0c, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, - 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6e, - 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0c, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, - 0x6f, 0x6f, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x66, 0x12, 0x23, 0x0a, 0x0d, 0x71, - 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0c, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, - 0x12, 0x38, 0x0a, 0x0a, 0x73, 0x6c, 0x61, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x08, 0x73, 0x6c, 0x61, 0x44, 0x75, 0x65, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, + 0x77, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, + 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, + 0x0a, 0x05, 0x63, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, + 0x52, 0x05, 0x63, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x73, 0x6f, 0x6e, 0x22, 0x8f, 0x05, 0x0a, 0x0c, 0x46, 0x65, 0x65, + 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x61, 0x73, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x61, 0x73, 0x65, + 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, + 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6f, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x66, 0x12, 0x23, 0x0a, 0x0d, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x71, 0x75, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x38, 0x0a, 0x0a, 0x73, 0x6c, 0x61, 0x5f, 0x64, + 0x75, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x73, 0x6c, 0x61, 0x44, 0x75, 0x65, 0x41, + 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x73, 0x63, 0x61, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x73, 0x63, 0x61, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, - 0x22, 0xa3, 0x03, 0x0a, 0x08, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x12, 0x15, 0x0a, - 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, - 0x6f, 0x62, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, - 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x61, - 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x64, - 0x69, 0x66, 0x66, 0x41, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x70, - 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x6f, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x4a, 0x73, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x72, - 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x55, 0x72, 0x69, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x61, 0x63, 0x63, 0x75, - 0x72, 0x61, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x64, 0x69, 0x66, 0x66, - 0x41, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, - 0x6f, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, - 0x73, 0x22, 0x48, 0x0a, 0x15, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, - 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x03, 0x6a, 0x6f, - 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x2e, 0x0a, 0x15, 0x47, - 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x22, 0x49, 0x0a, 0x16, 0x47, - 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, - 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0xba, 0x01, 0x0a, 0x16, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x72, 0x6f, - 0x76, 0x65, 0x64, 0x42, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x61, 0x63, - 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x64, 0x69, - 0x66, 0x66, 0x41, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x22, 0x4a, 0x0a, 0x17, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, - 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, - 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, - 0x68, 0x0a, 0x14, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x15, 0x52, 0x6f, 0x6c, - 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, - 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x03, - 0x6a, 0x6f, 0x62, 0x22, 0xb6, 0x02, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x1a, 0x3a, 0x0a, 0x0c, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, 0x01, 0x0a, - 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x65, 0x64, 0x41, 0x74, 0x22, 0xb6, 0x02, 0x0a, 0x11, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, - 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x29, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x22, 0xa3, 0x03, 0x0a, 0x08, 0x44, + 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x70, 0x70, + 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, + 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x64, 0x69, 0x66, 0x66, 0x41, 0x63, 0x63, + 0x75, 0x72, 0x61, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x41, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x1a, 0x3a, 0x0a, 0x0c, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, - 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, - 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x65, 0x64, 0x41, 0x74, 0x22, 0x45, 0x0a, 0x10, 0x48, 0x6f, 0x74, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x2b, - 0x0a, 0x11, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x36, 0x0a, 0x13, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, - 0x75, 0x69, 0x64, 0x22, 0x2e, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0xad, 0x02, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, - 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x41, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x73, 0x6c, 0x61, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x61, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x08, 0x73, 0x6c, 0x61, 0x44, 0x75, 0x65, 0x41, 0x74, 0x12, 0x25, 0x0a, 0x0e, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x22, 0x4c, 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, - 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x22, 0x4c, 0x0a, 0x14, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x74, 0x61, 0x73, - 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, - 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, - 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, - 0x32, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, 0x61, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, - 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, + 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x73, 0x6f, 0x6e, + 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x55, 0x72, 0x69, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x64, 0x69, 0x66, 0x66, 0x41, 0x63, 0x63, 0x75, 0x72, 0x61, + 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x22, 0x48, 0x0a, 0x15, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, + 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x2e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, + 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x22, 0x49, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, - 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, - 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, - 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x22, 0x4e, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, - 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x32, 0x0a, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, + 0x22, 0xba, 0x01, 0x0a, 0x16, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, + 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x42, 0x79, 0x12, + 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x64, 0x69, 0x66, 0x66, 0x41, 0x63, 0x63, 0x75, + 0x72, 0x61, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x22, 0x4a, 0x0a, + 0x17, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x68, 0x0a, 0x14, 0x52, 0x6f, 0x6c, + 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x15, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, + 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x03, + 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0xb6, 0x02, + 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x04, - 0x74, 0x61, 0x73, 0x6b, 0x22, 0x85, 0x03, 0x0a, 0x1a, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, - 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x72, 0x69, 0x78, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, - 0x72, 0x69, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x69, - 0x6c, 0x6f, 0x74, 0x5f, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x70, 0x69, 0x6c, 0x6f, 0x74, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x12, - 0x3b, 0x0a, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x42, 0x61, - 0x74, 0x63, 0x68, 0x52, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0a, - 0x67, 0x75, 0x61, 0x72, 0x64, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3f, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x47, 0x75, 0x61, 0x72, 0x64, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0a, 0x67, 0x75, 0x61, 0x72, 0x64, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x42, 0x79, 0x12, 0x1d, - 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x1a, 0x3d, 0x0a, - 0x0f, 0x47, 0x75, 0x61, 0x72, 0x64, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3c, 0x0a, 0x0c, - 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x52, 0x0a, 0x1b, 0x55, 0x70, - 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x76, - 0x0a, 0x15, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x16, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, - 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xaf, 0x01, 0x0a, 0x15, - 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, - 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x06, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0xbc, 0x01, - 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x74, 0x65, - 0x6e, 0x61, 0x6e, 0x74, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x22, 0x8f, 0x01, 0x0a, - 0x16, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0x31, - 0x0a, 0x17, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0xbd, 0x01, 0x0a, 0x16, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, - 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3b, 0x0a, 0x0b, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x3a, 0x0a, 0x0c, 0x50, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, 0x01, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x41, 0x74, 0x22, + 0xb6, 0x02, 0x0a, 0x11, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x4d, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3b, + 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0a, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x3a, 0x0a, 0x0c, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x41, + 0x74, 0x22, 0x45, 0x0a, 0x10, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x2b, 0x0a, 0x11, 0x48, 0x6f, 0x74, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x36, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, - 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x4d, - 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x10, 0x51, 0x41, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x2b, 0x0a, 0x11, - 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x22, 0x75, 0x0a, 0x0e, 0x51, 0x41, 0x54, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x22, 0x2e, 0x0a, + 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xad, 0x02, + 0x0a, 0x09, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, + 0x73, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x65, 0x0a, 0x0b, 0x51, 0x41, 0x54, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x41, 0x74, 0x22, 0xb6, - 0x03, 0x0a, 0x17, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, 0x50, 0x6c, - 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, - 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x69, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x61, - 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x54, 0x61, 0x67, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x74, 0x6f, 0x6f, 0x6c, 0x69, 0x6e, - 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, - 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, - 0x41, 0x54, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x74, - 0x6f, 0x6f, 0x6c, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3e, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x6f, 0x77, + 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x38, 0x0a, 0x0a, + 0x73, 0x6c, 0x61, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x73, 0x6c, + 0x61, 0x44, 0x75, 0x65, 0x41, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x22, 0x4c, 0x0a, + 0x13, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x22, 0x4c, 0x0a, 0x14, 0x52, + 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x32, 0x0a, 0x15, 0x4c, 0x69, 0x73, + 0x74, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0x4e, 0x0a, + 0x16, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, + 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x6f, 0x0a, + 0x17, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x22, 0x4e, + 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x61, + 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x22, 0x85, + 0x03, 0x0a, 0x1a, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, + 0x0e, 0x6d, 0x61, 0x74, 0x72, 0x69, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x72, 0x69, 0x78, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x69, 0x6c, 0x6f, 0x74, 0x5f, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x69, 0x6c, + 0x6f, 0x74, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x51, 0x41, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, - 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x6c, - 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x73, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x42, - 0x75, 0x64, 0x67, 0x65, 0x74, 0x4d, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x0e, 0x51, 0x41, 0x4d, 0x65, - 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, - 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x74, 0x61, - 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x98, 0x01, 0x0a, 0x17, 0x51, - 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07, 0x62, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0a, 0x67, 0x75, 0x61, 0x72, 0x64, 0x72, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x47, 0x75, 0x61, 0x72, + 0x64, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x72, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x72, 0x6f, + 0x76, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, + 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x42, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x47, 0x75, 0x61, 0x72, 0x64, + 0x72, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3c, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x65, 0x6e, + 0x61, 0x6e, 0x74, 0x73, 0x22, 0x52, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x76, 0x0a, 0x15, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, + 0x22, 0xb2, 0x01, 0x0a, 0x16, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x74, + 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x62, 0x61, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xaf, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, + 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0xbc, 0x01, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, + 0x6f, 0x74, 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, + 0x78, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, + 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, + 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x0a, + 0x0f, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x43, 0x6f, + 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x16, 0x52, 0x6f, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0x31, 0x0a, 0x17, 0x52, 0x6f, 0x6c, 0x6c, + 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xbd, 0x01, 0x0a, 0x16, + 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, - 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x67, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, + 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, + 0x74, 0x5f, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x4d, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x10, + 0x51, 0x41, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x10, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x76, 0x65, 0x72, + 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x75, 0x0a, 0x0e, 0x51, 0x41, + 0x54, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, 0x0a, 0x07, + 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x22, 0x65, 0x0a, 0x0b, 0x51, 0x41, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x72, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x41, 0x74, 0x22, 0x90, 0x01, 0x0a, 0x0b, 0x51, 0x41, 0x50, + 0x6c, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x5f, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x4d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0xf0, 0x05, 0x0a, 0x17, + 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x67, + 0x73, 0x12, 0x50, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x6f, + 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x51, 0x41, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x0f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x74, 0x6f, 0x6f, 0x6c, 0x69, 0x6e, 0x67, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x54, 0x6f, + 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x74, 0x6f, 0x6f, 0x6c, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3e, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, - 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x07, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x73, 0x22, 0xe3, 0x01, 0x0a, 0x11, 0x51, 0x41, 0x43, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, - 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, - 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x74, - 0x61, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0xa0, 0x01, 0x0a, 0x18, - 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, - 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, - 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x44, 0x0a, 0x09, 0x63, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x6f, + 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x64, 0x65, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x73, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x42, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x4d, 0x73, 0x12, 0x38, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x50, 0x6c, 0x61, + 0x6e, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, 0x12, 0x7f, + 0x0a, 0x17, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x47, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, + 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x15, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, + 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x1a, 0x48, 0x0a, 0x1a, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe0, + 0x01, 0x0a, 0x0e, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x69, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x22, 0xb3, 0x01, 0x0a, 0x17, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x3d, 0x0a, + 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xe3, 0x01, 0x0a, 0x11, 0x51, 0x41, 0x43, 0x69, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, + 0x6c, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0xd5, 0x01, + 0x0a, 0x18, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x44, 0x0a, 0x09, 0x63, 0x69, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x43, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x32, 0xa6, 0x19, 0x0a, 0x1a, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x7b, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, + 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x30, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7b, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x51, 0x41, 0x43, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x09, 0x63, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0xec, - 0x14, 0x0a, 0x1a, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7b, 0x0a, - 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, - 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, - 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x14, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, - 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x14, 0x52, 0x65, 0x74, 0x69, 0x72, - 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, - 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, - 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x10, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x49, - 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, - 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, - 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, - 0x15, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7b, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, - 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, + 0x0a, 0x14, 0x52, 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, + 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x74, 0x69, 0x72, 0x65, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x10, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x16, - 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x32, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, - 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, - 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, - 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6d, 0x69, - 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x24, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, 0x15, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x46, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x2a, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, - 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, - 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x70, 0x6f, - 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, - 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x70, 0x6f, 0x77, - 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x12, 0x29, 0x2e, 0x70, 0x6f, - 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, - 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, - 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x12, 0x30, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x75, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x16, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x32, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, 0x61, - 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, 0x52, - 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x29, 0x2e, 0x70, - 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x12, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x75, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, + 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, + 0x12, 0x24, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, + 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, + 0x11, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, + 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2e, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x65, 0x65, 0x64, + 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x69, 0x0a, 0x11, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, + 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x12, 0x2d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x73, 0x65, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, 0x64, + 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x14, + 0x45, 0x73, 0x63, 0x61, 0x6c, 0x61, 0x74, 0x65, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, + 0x43, 0x61, 0x73, 0x65, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x73, 0x63, 0x61, 0x6c, + 0x61, 0x74, 0x65, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x65, + 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, + 0x15, 0x52, 0x65, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, + 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x12, 0x31, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x12, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x6f, 0x0a, 0x14, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x65, 0x65, 0x64, + 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x12, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x77, + 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, + 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x6a, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x12, 0x28, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x2e, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x78, 0x0a, 0x13, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, + 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x78, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, 0x73, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, - 0x68, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, - 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, 0x6e, 0x12, 0x28, + 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x65, 0x65, 0x64, 0x62, 0x61, 0x63, 0x6b, 0x43, 0x61, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x12, 0x29, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, + 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, + 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, + 0x62, 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, + 0x65, 0x6c, 0x74, 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, + 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x29, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, 0x61, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, - 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, 0x61, - 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, - 0x0a, 0x10, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x12, 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, - 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x78, 0x0a, 0x13, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, - 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, - 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x2e, 0x70, 0x6f, + 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x12, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, - 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, - 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, - 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x31, 0x2e, 0x48, 0x6f, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x12, 0x28, 0x2e, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x63, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, 0x6e, 0x12, + 0x28, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, + 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x75, 0x6e, 0x44, 0x65, 0x63, 0x61, 0x79, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, 0x61, + 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, - 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x6c, 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, - 0x73, 0x65, 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, - 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, - 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x80, 0x02, - 0x0a, 0x1d, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, - 0x51, 0x41, 0x42, 0x72, 0x69, 0x64, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x6a, 0x0a, 0x0d, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, - 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, - 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, - 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, - 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, 0x50, - 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x14, 0x55, - 0x70, 0x73, 0x65, 0x72, 0x74, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x65, 0x63, + 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x6f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, + 0x61, 0x73, 0x6b, 0x12, 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x44, 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x42, 0xec, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4b, 0x6e, - 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x4e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x41, 0x72, 0x74, 0x69, 0x73, 0x61, 0x6e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x50, 0x6f, 0x77, - 0x65, 0x72, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x65, 0x6e, - 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x4b, 0x58, 0xaa, 0x02, 0x13, 0x50, 0x6f, 0x77, 0x65, - 0x72, 0x78, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x56, 0x31, 0xca, - 0x02, 0x13, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, - 0x67, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x4b, - 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, - 0x3a, 0x3a, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, + 0x65, 0x63, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x78, 0x0a, 0x13, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x73, 0x65, 0x72, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, + 0x65, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x6c, 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x80, + 0x02, 0x0a, 0x1d, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, + 0x65, 0x51, 0x41, 0x42, 0x72, 0x69, 0x64, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x6a, 0x0a, 0x0d, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, + 0x6c, 0x12, 0x2b, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, + 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x14, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x4d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x41, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0xec, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x41, 0x72, 0x74, 0x69, 0x73, 0x61, 0x6e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x50, 0x6f, + 0x77, 0x65, 0x72, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x4b, 0x58, 0xaa, 0x02, 0x13, 0x50, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x13, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x50, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x3a, 0x3a, 0x4b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( @@ -5101,7 +5843,7 @@ func file_powerx_knowledge_v1_knowledge_space_proto_rawDescGZIP() []byte { } var file_powerx_knowledge_v1_knowledge_space_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_powerx_knowledge_v1_knowledge_space_proto_msgTypes = make([]protoimpl.MessageInfo, 66) +var file_powerx_knowledge_v1_knowledge_space_proto_msgTypes = make([]protoimpl.MessageInfo, 74) var file_powerx_knowledge_v1_knowledge_space_proto_goTypes = []any{ (FusionStrategy_DeploymentState)(0), // 0: powerx.knowledge.v1.FusionStrategy.DeploymentState (*KnowledgeSpace)(nil), // 1: powerx.knowledge.v1.KnowledgeSpace @@ -5124,57 +5866,66 @@ var file_powerx_knowledge_v1_knowledge_space_proto_goTypes = []any{ (*FeedbackResponse)(nil), // 18: powerx.knowledge.v1.FeedbackResponse (*ListFeedbackCasesRequest)(nil), // 19: powerx.knowledge.v1.ListFeedbackCasesRequest (*ListFeedbackCasesResponse)(nil), // 20: powerx.knowledge.v1.ListFeedbackCasesResponse - (*FeedbackCase)(nil), // 21: powerx.knowledge.v1.FeedbackCase - (*DeltaJob)(nil), // 22: powerx.knowledge.v1.DeltaJob - (*StartDeltaJobRequest)(nil), // 23: powerx.knowledge.v1.StartDeltaJobRequest - (*StartDeltaJobResponse)(nil), // 24: powerx.knowledge.v1.StartDeltaJobResponse - (*GetDeltaReportRequest)(nil), // 25: powerx.knowledge.v1.GetDeltaReportRequest - (*GetDeltaReportResponse)(nil), // 26: powerx.knowledge.v1.GetDeltaReportResponse - (*PublishDeltaJobRequest)(nil), // 27: powerx.knowledge.v1.PublishDeltaJobRequest - (*PublishDeltaJobResponse)(nil), // 28: powerx.knowledge.v1.PublishDeltaJobResponse - (*RollbackDeltaRequest)(nil), // 29: powerx.knowledge.v1.RollbackDeltaRequest - (*RollbackDeltaResponse)(nil), // 30: powerx.knowledge.v1.RollbackDeltaResponse - (*ApplyEventRequest)(nil), // 31: powerx.knowledge.v1.ApplyEventRequest - (*ApplyEventResponse)(nil), // 32: powerx.knowledge.v1.ApplyEventResponse - (*RetryEventRequest)(nil), // 33: powerx.knowledge.v1.RetryEventRequest - (*RetryEventResponse)(nil), // 34: powerx.knowledge.v1.RetryEventResponse - (*HotUpdateRequest)(nil), // 35: powerx.knowledge.v1.HotUpdateRequest - (*HotUpdateResponse)(nil), // 36: powerx.knowledge.v1.HotUpdateResponse - (*RefreshAgentRequest)(nil), // 37: powerx.knowledge.v1.RefreshAgentRequest - (*RefreshAgentResponse)(nil), // 38: powerx.knowledge.v1.RefreshAgentResponse - (*DecayTask)(nil), // 39: powerx.knowledge.v1.DecayTask - (*RunDecayScanRequest)(nil), // 40: powerx.knowledge.v1.RunDecayScanRequest - (*RunDecayScanResponse)(nil), // 41: powerx.knowledge.v1.RunDecayScanResponse - (*ListDecayTasksRequest)(nil), // 42: powerx.knowledge.v1.ListDecayTasksRequest - (*ListDecayTasksResponse)(nil), // 43: powerx.knowledge.v1.ListDecayTasksResponse - (*RestoreDecayTaskRequest)(nil), // 44: powerx.knowledge.v1.RestoreDecayTaskRequest - (*RestoreDecayTaskResponse)(nil), // 45: powerx.knowledge.v1.RestoreDecayTaskResponse - (*UpsertReleasePolicyRequest)(nil), // 46: powerx.knowledge.v1.UpsertReleasePolicyRequest - (*ReleaseBatch)(nil), // 47: powerx.knowledge.v1.ReleaseBatch - (*UpsertReleasePolicyResponse)(nil), // 48: powerx.knowledge.v1.UpsertReleasePolicyResponse - (*PublishReleaseRequest)(nil), // 49: powerx.knowledge.v1.PublishReleaseRequest - (*PublishReleaseResponse)(nil), // 50: powerx.knowledge.v1.PublishReleaseResponse - (*PromoteReleaseRequest)(nil), // 51: powerx.knowledge.v1.PromoteReleaseRequest - (*PromoteReleaseResponse)(nil), // 52: powerx.knowledge.v1.PromoteReleaseResponse - (*RollbackReleaseRequest)(nil), // 53: powerx.knowledge.v1.RollbackReleaseRequest - (*RollbackReleaseResponse)(nil), // 54: powerx.knowledge.v1.RollbackReleaseResponse - (*QARetrievalPlanRequest)(nil), // 55: powerx.knowledge.v1.QARetrievalPlanRequest - (*QACandidateSpace)(nil), // 56: powerx.knowledge.v1.QACandidateSpace - (*QAToolMetadata)(nil), // 57: powerx.knowledge.v1.QAToolMetadata - (*QATelemetry)(nil), // 58: powerx.knowledge.v1.QATelemetry - (*QARetrievalPlanResponse)(nil), // 59: powerx.knowledge.v1.QARetrievalPlanResponse - (*QAMemoryUpdate)(nil), // 60: powerx.knowledge.v1.QAMemoryUpdate - (*QAMemorySnapshotRequest)(nil), // 61: powerx.knowledge.v1.QAMemorySnapshotRequest - (*QACitationSummary)(nil), // 62: powerx.knowledge.v1.QACitationSummary - (*QAMemorySnapshotResponse)(nil), // 63: powerx.knowledge.v1.QAMemorySnapshotResponse - nil, // 64: powerx.knowledge.v1.ApplyEventRequest.PayloadEntry - nil, // 65: powerx.knowledge.v1.RetryEventRequest.PayloadEntry - nil, // 66: powerx.knowledge.v1.UpsertReleasePolicyRequest.GuardrailsEntry - (*timestamppb.Timestamp)(nil), // 67: google.protobuf.Timestamp + (*CloseFeedbackCaseRequest)(nil), // 21: powerx.knowledge.v1.CloseFeedbackCaseRequest + (*EscalateFeedbackCaseRequest)(nil), // 22: powerx.knowledge.v1.EscalateFeedbackCaseRequest + (*ReprocessFeedbackCaseRequest)(nil), // 23: powerx.knowledge.v1.ReprocessFeedbackCaseRequest + (*RollbackFeedbackCaseRequest)(nil), // 24: powerx.knowledge.v1.RollbackFeedbackCaseRequest + (*ExportFeedbackCasesRequest)(nil), // 25: powerx.knowledge.v1.ExportFeedbackCasesRequest + (*ExportFeedbackCasesResponse)(nil), // 26: powerx.knowledge.v1.ExportFeedbackCasesResponse + (*FeedbackCase)(nil), // 27: powerx.knowledge.v1.FeedbackCase + (*DeltaJob)(nil), // 28: powerx.knowledge.v1.DeltaJob + (*StartDeltaJobRequest)(nil), // 29: powerx.knowledge.v1.StartDeltaJobRequest + (*StartDeltaJobResponse)(nil), // 30: powerx.knowledge.v1.StartDeltaJobResponse + (*GetDeltaReportRequest)(nil), // 31: powerx.knowledge.v1.GetDeltaReportRequest + (*GetDeltaReportResponse)(nil), // 32: powerx.knowledge.v1.GetDeltaReportResponse + (*PublishDeltaJobRequest)(nil), // 33: powerx.knowledge.v1.PublishDeltaJobRequest + (*PublishDeltaJobResponse)(nil), // 34: powerx.knowledge.v1.PublishDeltaJobResponse + (*RollbackDeltaRequest)(nil), // 35: powerx.knowledge.v1.RollbackDeltaRequest + (*RollbackDeltaResponse)(nil), // 36: powerx.knowledge.v1.RollbackDeltaResponse + (*ApplyEventRequest)(nil), // 37: powerx.knowledge.v1.ApplyEventRequest + (*ApplyEventResponse)(nil), // 38: powerx.knowledge.v1.ApplyEventResponse + (*RetryEventRequest)(nil), // 39: powerx.knowledge.v1.RetryEventRequest + (*RetryEventResponse)(nil), // 40: powerx.knowledge.v1.RetryEventResponse + (*HotUpdateRequest)(nil), // 41: powerx.knowledge.v1.HotUpdateRequest + (*HotUpdateResponse)(nil), // 42: powerx.knowledge.v1.HotUpdateResponse + (*RefreshAgentRequest)(nil), // 43: powerx.knowledge.v1.RefreshAgentRequest + (*RefreshAgentResponse)(nil), // 44: powerx.knowledge.v1.RefreshAgentResponse + (*DecayTask)(nil), // 45: powerx.knowledge.v1.DecayTask + (*RunDecayScanRequest)(nil), // 46: powerx.knowledge.v1.RunDecayScanRequest + (*RunDecayScanResponse)(nil), // 47: powerx.knowledge.v1.RunDecayScanResponse + (*ListDecayTasksRequest)(nil), // 48: powerx.knowledge.v1.ListDecayTasksRequest + (*ListDecayTasksResponse)(nil), // 49: powerx.knowledge.v1.ListDecayTasksResponse + (*RestoreDecayTaskRequest)(nil), // 50: powerx.knowledge.v1.RestoreDecayTaskRequest + (*RestoreDecayTaskResponse)(nil), // 51: powerx.knowledge.v1.RestoreDecayTaskResponse + (*UpsertReleasePolicyRequest)(nil), // 52: powerx.knowledge.v1.UpsertReleasePolicyRequest + (*ReleaseBatch)(nil), // 53: powerx.knowledge.v1.ReleaseBatch + (*UpsertReleasePolicyResponse)(nil), // 54: powerx.knowledge.v1.UpsertReleasePolicyResponse + (*PublishReleaseRequest)(nil), // 55: powerx.knowledge.v1.PublishReleaseRequest + (*PublishReleaseResponse)(nil), // 56: powerx.knowledge.v1.PublishReleaseResponse + (*PromoteReleaseRequest)(nil), // 57: powerx.knowledge.v1.PromoteReleaseRequest + (*PromoteReleaseResponse)(nil), // 58: powerx.knowledge.v1.PromoteReleaseResponse + (*RollbackReleaseRequest)(nil), // 59: powerx.knowledge.v1.RollbackReleaseRequest + (*RollbackReleaseResponse)(nil), // 60: powerx.knowledge.v1.RollbackReleaseResponse + (*QARetrievalPlanRequest)(nil), // 61: powerx.knowledge.v1.QARetrievalPlanRequest + (*QACandidateSpace)(nil), // 62: powerx.knowledge.v1.QACandidateSpace + (*QAToolMetadata)(nil), // 63: powerx.knowledge.v1.QAToolMetadata + (*QATelemetry)(nil), // 64: powerx.knowledge.v1.QATelemetry + (*QAPlanStage)(nil), // 65: powerx.knowledge.v1.QAPlanStage + (*QARetrievalPlanResponse)(nil), // 66: powerx.knowledge.v1.QARetrievalPlanResponse + (*QAMemoryUpdate)(nil), // 67: powerx.knowledge.v1.QAMemoryUpdate + (*QAMemorySnapshotRequest)(nil), // 68: powerx.knowledge.v1.QAMemorySnapshotRequest + (*QACitationSummary)(nil), // 69: powerx.knowledge.v1.QACitationSummary + (*QAMemorySnapshotResponse)(nil), // 70: powerx.knowledge.v1.QAMemorySnapshotResponse + nil, // 71: powerx.knowledge.v1.ApplyEventRequest.PayloadEntry + nil, // 72: powerx.knowledge.v1.RetryEventRequest.PayloadEntry + nil, // 73: powerx.knowledge.v1.UpsertReleasePolicyRequest.GuardrailsEntry + nil, // 74: powerx.knowledge.v1.QARetrievalPlanResponse.PolicyVersionSnapshotEntry + (*timestamppb.Timestamp)(nil), // 75: google.protobuf.Timestamp + (*structpb.Struct)(nil), // 76: google.protobuf.Struct } var file_powerx_knowledge_v1_knowledge_space_proto_depIdxs = []int32{ - 67, // 0: powerx.knowledge.v1.KnowledgeSpace.created_at:type_name -> google.protobuf.Timestamp - 67, // 1: powerx.knowledge.v1.KnowledgeSpace.updated_at:type_name -> google.protobuf.Timestamp + 75, // 0: powerx.knowledge.v1.KnowledgeSpace.created_at:type_name -> google.protobuf.Timestamp + 75, // 1: powerx.knowledge.v1.KnowledgeSpace.updated_at:type_name -> google.protobuf.Timestamp 1, // 2: powerx.knowledge.v1.CreateKnowledgeSpaceResponse.space:type_name -> powerx.knowledge.v1.KnowledgeSpace 1, // 3: powerx.knowledge.v1.UpdateKnowledgeSpaceResponse.space:type_name -> powerx.knowledge.v1.KnowledgeSpace 1, // 4: powerx.knowledge.v1.RetireKnowledgeSpaceResponse.space:type_name -> powerx.knowledge.v1.KnowledgeSpace @@ -5182,94 +5933,111 @@ var file_powerx_knowledge_v1_knowledge_space_proto_depIdxs = []int32{ 16, // 6: powerx.knowledge.v1.FusionStrategyResponse.strategy:type_name -> powerx.knowledge.v1.FusionStrategy 16, // 7: powerx.knowledge.v1.ListFusionStrategiesResponse.strategies:type_name -> powerx.knowledge.v1.FusionStrategy 0, // 8: powerx.knowledge.v1.FusionStrategy.deployment_state:type_name -> powerx.knowledge.v1.FusionStrategy.DeploymentState - 67, // 9: powerx.knowledge.v1.FusionStrategy.published_at:type_name -> google.protobuf.Timestamp - 21, // 10: powerx.knowledge.v1.FeedbackResponse.case:type_name -> powerx.knowledge.v1.FeedbackCase - 21, // 11: powerx.knowledge.v1.ListFeedbackCasesResponse.cases:type_name -> powerx.knowledge.v1.FeedbackCase - 67, // 12: powerx.knowledge.v1.FeedbackCase.sla_due_at:type_name -> google.protobuf.Timestamp - 67, // 13: powerx.knowledge.v1.FeedbackCase.created_at:type_name -> google.protobuf.Timestamp - 67, // 14: powerx.knowledge.v1.FeedbackCase.updated_at:type_name -> google.protobuf.Timestamp - 67, // 15: powerx.knowledge.v1.DeltaJob.created_at:type_name -> google.protobuf.Timestamp - 67, // 16: powerx.knowledge.v1.DeltaJob.published_at:type_name -> google.protobuf.Timestamp - 22, // 17: powerx.knowledge.v1.StartDeltaJobResponse.job:type_name -> powerx.knowledge.v1.DeltaJob - 22, // 18: powerx.knowledge.v1.GetDeltaReportResponse.job:type_name -> powerx.knowledge.v1.DeltaJob - 22, // 19: powerx.knowledge.v1.PublishDeltaJobResponse.job:type_name -> powerx.knowledge.v1.DeltaJob - 22, // 20: powerx.knowledge.v1.RollbackDeltaResponse.job:type_name -> powerx.knowledge.v1.DeltaJob - 64, // 21: powerx.knowledge.v1.ApplyEventRequest.payload:type_name -> powerx.knowledge.v1.ApplyEventRequest.PayloadEntry - 67, // 22: powerx.knowledge.v1.ApplyEventRequest.received_at:type_name -> google.protobuf.Timestamp - 67, // 23: powerx.knowledge.v1.ApplyEventResponse.processed_at:type_name -> google.protobuf.Timestamp - 65, // 24: powerx.knowledge.v1.RetryEventRequest.payload:type_name -> powerx.knowledge.v1.RetryEventRequest.PayloadEntry - 67, // 25: powerx.knowledge.v1.RetryEventRequest.received_at:type_name -> google.protobuf.Timestamp - 67, // 26: powerx.knowledge.v1.RetryEventResponse.processed_at:type_name -> google.protobuf.Timestamp - 67, // 27: powerx.knowledge.v1.DecayTask.detected_at:type_name -> google.protobuf.Timestamp - 67, // 28: powerx.knowledge.v1.DecayTask.sla_due_at:type_name -> google.protobuf.Timestamp - 39, // 29: powerx.knowledge.v1.RunDecayScanResponse.tasks:type_name -> powerx.knowledge.v1.DecayTask - 39, // 30: powerx.knowledge.v1.ListDecayTasksResponse.tasks:type_name -> powerx.knowledge.v1.DecayTask - 39, // 31: powerx.knowledge.v1.RestoreDecayTaskResponse.task:type_name -> powerx.knowledge.v1.DecayTask - 47, // 32: powerx.knowledge.v1.UpsertReleasePolicyRequest.batches:type_name -> powerx.knowledge.v1.ReleaseBatch - 66, // 33: powerx.knowledge.v1.UpsertReleasePolicyRequest.guardrails:type_name -> powerx.knowledge.v1.UpsertReleasePolicyRequest.GuardrailsEntry - 67, // 34: powerx.knowledge.v1.QATelemetry.recorded_at:type_name -> google.protobuf.Timestamp - 56, // 35: powerx.knowledge.v1.QARetrievalPlanResponse.candidate_spaces:type_name -> powerx.knowledge.v1.QACandidateSpace - 57, // 36: powerx.knowledge.v1.QARetrievalPlanResponse.toolings:type_name -> powerx.knowledge.v1.QAToolMetadata - 58, // 37: powerx.knowledge.v1.QARetrievalPlanResponse.telemetry:type_name -> powerx.knowledge.v1.QATelemetry - 60, // 38: powerx.knowledge.v1.QAMemorySnapshotRequest.updates:type_name -> powerx.knowledge.v1.QAMemoryUpdate - 62, // 39: powerx.knowledge.v1.QAMemorySnapshotResponse.citations:type_name -> powerx.knowledge.v1.QACitationSummary - 2, // 40: powerx.knowledge.v1.KnowledgeSpaceAdminService.CreateKnowledgeSpace:input_type -> powerx.knowledge.v1.CreateKnowledgeSpaceRequest - 4, // 41: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpdateKnowledgeSpace:input_type -> powerx.knowledge.v1.UpdateKnowledgeSpaceRequest - 6, // 42: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetireKnowledgeSpace:input_type -> powerx.knowledge.v1.RetireKnowledgeSpaceRequest - 8, // 43: powerx.knowledge.v1.KnowledgeSpaceAdminService.TriggerIngestion:input_type -> powerx.knowledge.v1.IngestionJobRequest - 11, // 44: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishFusionStrategy:input_type -> powerx.knowledge.v1.FusionStrategyRequest - 13, // 45: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFusionStrategies:input_type -> powerx.knowledge.v1.ListFusionStrategiesRequest - 15, // 46: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackFusionStrategy:input_type -> powerx.knowledge.v1.RollbackFusionStrategyRequest - 17, // 47: powerx.knowledge.v1.KnowledgeSpaceAdminService.SubmitFeedback:input_type -> powerx.knowledge.v1.FeedbackRequest - 19, // 48: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFeedbackCases:input_type -> powerx.knowledge.v1.ListFeedbackCasesRequest - 23, // 49: powerx.knowledge.v1.KnowledgeSpaceAdminService.StartDeltaJob:input_type -> powerx.knowledge.v1.StartDeltaJobRequest - 25, // 50: powerx.knowledge.v1.KnowledgeSpaceAdminService.GetDeltaReport:input_type -> powerx.knowledge.v1.GetDeltaReportRequest - 27, // 51: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishDeltaJob:input_type -> powerx.knowledge.v1.PublishDeltaJobRequest - 29, // 52: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackDelta:input_type -> powerx.knowledge.v1.RollbackDeltaRequest - 31, // 53: powerx.knowledge.v1.KnowledgeSpaceAdminService.ApplyEvent:input_type -> powerx.knowledge.v1.ApplyEventRequest - 33, // 54: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetryEvent:input_type -> powerx.knowledge.v1.RetryEventRequest - 35, // 55: powerx.knowledge.v1.KnowledgeSpaceAdminService.HotUpdateIndex:input_type -> powerx.knowledge.v1.HotUpdateRequest - 37, // 56: powerx.knowledge.v1.KnowledgeSpaceAdminService.RefreshAgentWeights:input_type -> powerx.knowledge.v1.RefreshAgentRequest - 40, // 57: powerx.knowledge.v1.KnowledgeSpaceAdminService.RunDecayScan:input_type -> powerx.knowledge.v1.RunDecayScanRequest - 42, // 58: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListDecayTasks:input_type -> powerx.knowledge.v1.ListDecayTasksRequest - 44, // 59: powerx.knowledge.v1.KnowledgeSpaceAdminService.RestoreDecayTask:input_type -> powerx.knowledge.v1.RestoreDecayTaskRequest - 46, // 60: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpsertReleasePolicy:input_type -> powerx.knowledge.v1.UpsertReleasePolicyRequest - 49, // 61: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishRelease:input_type -> powerx.knowledge.v1.PublishReleaseRequest - 51, // 62: powerx.knowledge.v1.KnowledgeSpaceAdminService.PromoteRelease:input_type -> powerx.knowledge.v1.PromoteReleaseRequest - 53, // 63: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackRelease:input_type -> powerx.knowledge.v1.RollbackReleaseRequest - 55, // 64: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.PlanRetrieval:input_type -> powerx.knowledge.v1.QARetrievalPlanRequest - 61, // 65: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.UpsertMemorySnapshot:input_type -> powerx.knowledge.v1.QAMemorySnapshotRequest - 3, // 66: powerx.knowledge.v1.KnowledgeSpaceAdminService.CreateKnowledgeSpace:output_type -> powerx.knowledge.v1.CreateKnowledgeSpaceResponse - 5, // 67: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpdateKnowledgeSpace:output_type -> powerx.knowledge.v1.UpdateKnowledgeSpaceResponse - 7, // 68: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetireKnowledgeSpace:output_type -> powerx.knowledge.v1.RetireKnowledgeSpaceResponse - 10, // 69: powerx.knowledge.v1.KnowledgeSpaceAdminService.TriggerIngestion:output_type -> powerx.knowledge.v1.IngestionJobResponse - 12, // 70: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishFusionStrategy:output_type -> powerx.knowledge.v1.FusionStrategyResponse - 14, // 71: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFusionStrategies:output_type -> powerx.knowledge.v1.ListFusionStrategiesResponse - 12, // 72: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackFusionStrategy:output_type -> powerx.knowledge.v1.FusionStrategyResponse - 18, // 73: powerx.knowledge.v1.KnowledgeSpaceAdminService.SubmitFeedback:output_type -> powerx.knowledge.v1.FeedbackResponse - 20, // 74: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFeedbackCases:output_type -> powerx.knowledge.v1.ListFeedbackCasesResponse - 24, // 75: powerx.knowledge.v1.KnowledgeSpaceAdminService.StartDeltaJob:output_type -> powerx.knowledge.v1.StartDeltaJobResponse - 26, // 76: powerx.knowledge.v1.KnowledgeSpaceAdminService.GetDeltaReport:output_type -> powerx.knowledge.v1.GetDeltaReportResponse - 28, // 77: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishDeltaJob:output_type -> powerx.knowledge.v1.PublishDeltaJobResponse - 30, // 78: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackDelta:output_type -> powerx.knowledge.v1.RollbackDeltaResponse - 32, // 79: powerx.knowledge.v1.KnowledgeSpaceAdminService.ApplyEvent:output_type -> powerx.knowledge.v1.ApplyEventResponse - 34, // 80: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetryEvent:output_type -> powerx.knowledge.v1.RetryEventResponse - 36, // 81: powerx.knowledge.v1.KnowledgeSpaceAdminService.HotUpdateIndex:output_type -> powerx.knowledge.v1.HotUpdateResponse - 38, // 82: powerx.knowledge.v1.KnowledgeSpaceAdminService.RefreshAgentWeights:output_type -> powerx.knowledge.v1.RefreshAgentResponse - 41, // 83: powerx.knowledge.v1.KnowledgeSpaceAdminService.RunDecayScan:output_type -> powerx.knowledge.v1.RunDecayScanResponse - 43, // 84: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListDecayTasks:output_type -> powerx.knowledge.v1.ListDecayTasksResponse - 45, // 85: powerx.knowledge.v1.KnowledgeSpaceAdminService.RestoreDecayTask:output_type -> powerx.knowledge.v1.RestoreDecayTaskResponse - 48, // 86: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpsertReleasePolicy:output_type -> powerx.knowledge.v1.UpsertReleasePolicyResponse - 50, // 87: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishRelease:output_type -> powerx.knowledge.v1.PublishReleaseResponse - 52, // 88: powerx.knowledge.v1.KnowledgeSpaceAdminService.PromoteRelease:output_type -> powerx.knowledge.v1.PromoteReleaseResponse - 54, // 89: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackRelease:output_type -> powerx.knowledge.v1.RollbackReleaseResponse - 59, // 90: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.PlanRetrieval:output_type -> powerx.knowledge.v1.QARetrievalPlanResponse - 63, // 91: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.UpsertMemorySnapshot:output_type -> powerx.knowledge.v1.QAMemorySnapshotResponse - 66, // [66:92] is the sub-list for method output_type - 40, // [40:66] is the sub-list for method input_type - 40, // [40:40] is the sub-list for extension type_name - 40, // [40:40] is the sub-list for extension extendee - 0, // [0:40] is the sub-list for field type_name + 75, // 9: powerx.knowledge.v1.FusionStrategy.published_at:type_name -> google.protobuf.Timestamp + 27, // 10: powerx.knowledge.v1.FeedbackResponse.case:type_name -> powerx.knowledge.v1.FeedbackCase + 27, // 11: powerx.knowledge.v1.ListFeedbackCasesResponse.cases:type_name -> powerx.knowledge.v1.FeedbackCase + 27, // 12: powerx.knowledge.v1.ExportFeedbackCasesResponse.cases:type_name -> powerx.knowledge.v1.FeedbackCase + 75, // 13: powerx.knowledge.v1.FeedbackCase.sla_due_at:type_name -> google.protobuf.Timestamp + 75, // 14: powerx.knowledge.v1.FeedbackCase.created_at:type_name -> google.protobuf.Timestamp + 75, // 15: powerx.knowledge.v1.FeedbackCase.updated_at:type_name -> google.protobuf.Timestamp + 75, // 16: powerx.knowledge.v1.FeedbackCase.escalated_at:type_name -> google.protobuf.Timestamp + 75, // 17: powerx.knowledge.v1.FeedbackCase.closed_at:type_name -> google.protobuf.Timestamp + 75, // 18: powerx.knowledge.v1.DeltaJob.created_at:type_name -> google.protobuf.Timestamp + 75, // 19: powerx.knowledge.v1.DeltaJob.published_at:type_name -> google.protobuf.Timestamp + 28, // 20: powerx.knowledge.v1.StartDeltaJobResponse.job:type_name -> powerx.knowledge.v1.DeltaJob + 28, // 21: powerx.knowledge.v1.GetDeltaReportResponse.job:type_name -> powerx.knowledge.v1.DeltaJob + 28, // 22: powerx.knowledge.v1.PublishDeltaJobResponse.job:type_name -> powerx.knowledge.v1.DeltaJob + 28, // 23: powerx.knowledge.v1.RollbackDeltaResponse.job:type_name -> powerx.knowledge.v1.DeltaJob + 71, // 24: powerx.knowledge.v1.ApplyEventRequest.payload:type_name -> powerx.knowledge.v1.ApplyEventRequest.PayloadEntry + 75, // 25: powerx.knowledge.v1.ApplyEventRequest.received_at:type_name -> google.protobuf.Timestamp + 75, // 26: powerx.knowledge.v1.ApplyEventResponse.processed_at:type_name -> google.protobuf.Timestamp + 72, // 27: powerx.knowledge.v1.RetryEventRequest.payload:type_name -> powerx.knowledge.v1.RetryEventRequest.PayloadEntry + 75, // 28: powerx.knowledge.v1.RetryEventRequest.received_at:type_name -> google.protobuf.Timestamp + 75, // 29: powerx.knowledge.v1.RetryEventResponse.processed_at:type_name -> google.protobuf.Timestamp + 75, // 30: powerx.knowledge.v1.DecayTask.detected_at:type_name -> google.protobuf.Timestamp + 75, // 31: powerx.knowledge.v1.DecayTask.sla_due_at:type_name -> google.protobuf.Timestamp + 45, // 32: powerx.knowledge.v1.RunDecayScanResponse.tasks:type_name -> powerx.knowledge.v1.DecayTask + 45, // 33: powerx.knowledge.v1.ListDecayTasksResponse.tasks:type_name -> powerx.knowledge.v1.DecayTask + 45, // 34: powerx.knowledge.v1.RestoreDecayTaskResponse.task:type_name -> powerx.knowledge.v1.DecayTask + 53, // 35: powerx.knowledge.v1.UpsertReleasePolicyRequest.batches:type_name -> powerx.knowledge.v1.ReleaseBatch + 73, // 36: powerx.knowledge.v1.UpsertReleasePolicyRequest.guardrails:type_name -> powerx.knowledge.v1.UpsertReleasePolicyRequest.GuardrailsEntry + 75, // 37: powerx.knowledge.v1.QATelemetry.recorded_at:type_name -> google.protobuf.Timestamp + 62, // 38: powerx.knowledge.v1.QARetrievalPlanResponse.candidate_spaces:type_name -> powerx.knowledge.v1.QACandidateSpace + 63, // 39: powerx.knowledge.v1.QARetrievalPlanResponse.toolings:type_name -> powerx.knowledge.v1.QAToolMetadata + 64, // 40: powerx.knowledge.v1.QARetrievalPlanResponse.telemetry:type_name -> powerx.knowledge.v1.QATelemetry + 65, // 41: powerx.knowledge.v1.QARetrievalPlanResponse.stages:type_name -> powerx.knowledge.v1.QAPlanStage + 74, // 42: powerx.knowledge.v1.QARetrievalPlanResponse.policy_version_snapshot:type_name -> powerx.knowledge.v1.QARetrievalPlanResponse.PolicyVersionSnapshotEntry + 76, // 43: powerx.knowledge.v1.QARetrievalPlanResponse.metadata:type_name -> google.protobuf.Struct + 67, // 44: powerx.knowledge.v1.QAMemorySnapshotRequest.updates:type_name -> powerx.knowledge.v1.QAMemoryUpdate + 69, // 45: powerx.knowledge.v1.QAMemorySnapshotResponse.citations:type_name -> powerx.knowledge.v1.QACitationSummary + 76, // 46: powerx.knowledge.v1.QAMemorySnapshotResponse.metadata:type_name -> google.protobuf.Struct + 2, // 47: powerx.knowledge.v1.KnowledgeSpaceAdminService.CreateKnowledgeSpace:input_type -> powerx.knowledge.v1.CreateKnowledgeSpaceRequest + 4, // 48: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpdateKnowledgeSpace:input_type -> powerx.knowledge.v1.UpdateKnowledgeSpaceRequest + 6, // 49: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetireKnowledgeSpace:input_type -> powerx.knowledge.v1.RetireKnowledgeSpaceRequest + 8, // 50: powerx.knowledge.v1.KnowledgeSpaceAdminService.TriggerIngestion:input_type -> powerx.knowledge.v1.IngestionJobRequest + 11, // 51: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishFusionStrategy:input_type -> powerx.knowledge.v1.FusionStrategyRequest + 13, // 52: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFusionStrategies:input_type -> powerx.knowledge.v1.ListFusionStrategiesRequest + 15, // 53: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackFusionStrategy:input_type -> powerx.knowledge.v1.RollbackFusionStrategyRequest + 17, // 54: powerx.knowledge.v1.KnowledgeSpaceAdminService.SubmitFeedback:input_type -> powerx.knowledge.v1.FeedbackRequest + 19, // 55: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFeedbackCases:input_type -> powerx.knowledge.v1.ListFeedbackCasesRequest + 21, // 56: powerx.knowledge.v1.KnowledgeSpaceAdminService.CloseFeedbackCase:input_type -> powerx.knowledge.v1.CloseFeedbackCaseRequest + 22, // 57: powerx.knowledge.v1.KnowledgeSpaceAdminService.EscalateFeedbackCase:input_type -> powerx.knowledge.v1.EscalateFeedbackCaseRequest + 23, // 58: powerx.knowledge.v1.KnowledgeSpaceAdminService.ReprocessFeedbackCase:input_type -> powerx.knowledge.v1.ReprocessFeedbackCaseRequest + 24, // 59: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackFeedbackCase:input_type -> powerx.knowledge.v1.RollbackFeedbackCaseRequest + 25, // 60: powerx.knowledge.v1.KnowledgeSpaceAdminService.ExportFeedbackCases:input_type -> powerx.knowledge.v1.ExportFeedbackCasesRequest + 29, // 61: powerx.knowledge.v1.KnowledgeSpaceAdminService.StartDeltaJob:input_type -> powerx.knowledge.v1.StartDeltaJobRequest + 31, // 62: powerx.knowledge.v1.KnowledgeSpaceAdminService.GetDeltaReport:input_type -> powerx.knowledge.v1.GetDeltaReportRequest + 33, // 63: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishDeltaJob:input_type -> powerx.knowledge.v1.PublishDeltaJobRequest + 35, // 64: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackDelta:input_type -> powerx.knowledge.v1.RollbackDeltaRequest + 37, // 65: powerx.knowledge.v1.KnowledgeSpaceAdminService.ApplyEvent:input_type -> powerx.knowledge.v1.ApplyEventRequest + 39, // 66: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetryEvent:input_type -> powerx.knowledge.v1.RetryEventRequest + 41, // 67: powerx.knowledge.v1.KnowledgeSpaceAdminService.HotUpdateIndex:input_type -> powerx.knowledge.v1.HotUpdateRequest + 43, // 68: powerx.knowledge.v1.KnowledgeSpaceAdminService.RefreshAgentWeights:input_type -> powerx.knowledge.v1.RefreshAgentRequest + 46, // 69: powerx.knowledge.v1.KnowledgeSpaceAdminService.RunDecayScan:input_type -> powerx.knowledge.v1.RunDecayScanRequest + 48, // 70: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListDecayTasks:input_type -> powerx.knowledge.v1.ListDecayTasksRequest + 50, // 71: powerx.knowledge.v1.KnowledgeSpaceAdminService.RestoreDecayTask:input_type -> powerx.knowledge.v1.RestoreDecayTaskRequest + 52, // 72: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpsertReleasePolicy:input_type -> powerx.knowledge.v1.UpsertReleasePolicyRequest + 55, // 73: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishRelease:input_type -> powerx.knowledge.v1.PublishReleaseRequest + 57, // 74: powerx.knowledge.v1.KnowledgeSpaceAdminService.PromoteRelease:input_type -> powerx.knowledge.v1.PromoteReleaseRequest + 59, // 75: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackRelease:input_type -> powerx.knowledge.v1.RollbackReleaseRequest + 61, // 76: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.PlanRetrieval:input_type -> powerx.knowledge.v1.QARetrievalPlanRequest + 68, // 77: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.UpsertMemorySnapshot:input_type -> powerx.knowledge.v1.QAMemorySnapshotRequest + 3, // 78: powerx.knowledge.v1.KnowledgeSpaceAdminService.CreateKnowledgeSpace:output_type -> powerx.knowledge.v1.CreateKnowledgeSpaceResponse + 5, // 79: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpdateKnowledgeSpace:output_type -> powerx.knowledge.v1.UpdateKnowledgeSpaceResponse + 7, // 80: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetireKnowledgeSpace:output_type -> powerx.knowledge.v1.RetireKnowledgeSpaceResponse + 10, // 81: powerx.knowledge.v1.KnowledgeSpaceAdminService.TriggerIngestion:output_type -> powerx.knowledge.v1.IngestionJobResponse + 12, // 82: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishFusionStrategy:output_type -> powerx.knowledge.v1.FusionStrategyResponse + 14, // 83: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFusionStrategies:output_type -> powerx.knowledge.v1.ListFusionStrategiesResponse + 12, // 84: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackFusionStrategy:output_type -> powerx.knowledge.v1.FusionStrategyResponse + 18, // 85: powerx.knowledge.v1.KnowledgeSpaceAdminService.SubmitFeedback:output_type -> powerx.knowledge.v1.FeedbackResponse + 20, // 86: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListFeedbackCases:output_type -> powerx.knowledge.v1.ListFeedbackCasesResponse + 18, // 87: powerx.knowledge.v1.KnowledgeSpaceAdminService.CloseFeedbackCase:output_type -> powerx.knowledge.v1.FeedbackResponse + 18, // 88: powerx.knowledge.v1.KnowledgeSpaceAdminService.EscalateFeedbackCase:output_type -> powerx.knowledge.v1.FeedbackResponse + 18, // 89: powerx.knowledge.v1.KnowledgeSpaceAdminService.ReprocessFeedbackCase:output_type -> powerx.knowledge.v1.FeedbackResponse + 18, // 90: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackFeedbackCase:output_type -> powerx.knowledge.v1.FeedbackResponse + 26, // 91: powerx.knowledge.v1.KnowledgeSpaceAdminService.ExportFeedbackCases:output_type -> powerx.knowledge.v1.ExportFeedbackCasesResponse + 30, // 92: powerx.knowledge.v1.KnowledgeSpaceAdminService.StartDeltaJob:output_type -> powerx.knowledge.v1.StartDeltaJobResponse + 32, // 93: powerx.knowledge.v1.KnowledgeSpaceAdminService.GetDeltaReport:output_type -> powerx.knowledge.v1.GetDeltaReportResponse + 34, // 94: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishDeltaJob:output_type -> powerx.knowledge.v1.PublishDeltaJobResponse + 36, // 95: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackDelta:output_type -> powerx.knowledge.v1.RollbackDeltaResponse + 38, // 96: powerx.knowledge.v1.KnowledgeSpaceAdminService.ApplyEvent:output_type -> powerx.knowledge.v1.ApplyEventResponse + 40, // 97: powerx.knowledge.v1.KnowledgeSpaceAdminService.RetryEvent:output_type -> powerx.knowledge.v1.RetryEventResponse + 42, // 98: powerx.knowledge.v1.KnowledgeSpaceAdminService.HotUpdateIndex:output_type -> powerx.knowledge.v1.HotUpdateResponse + 44, // 99: powerx.knowledge.v1.KnowledgeSpaceAdminService.RefreshAgentWeights:output_type -> powerx.knowledge.v1.RefreshAgentResponse + 47, // 100: powerx.knowledge.v1.KnowledgeSpaceAdminService.RunDecayScan:output_type -> powerx.knowledge.v1.RunDecayScanResponse + 49, // 101: powerx.knowledge.v1.KnowledgeSpaceAdminService.ListDecayTasks:output_type -> powerx.knowledge.v1.ListDecayTasksResponse + 51, // 102: powerx.knowledge.v1.KnowledgeSpaceAdminService.RestoreDecayTask:output_type -> powerx.knowledge.v1.RestoreDecayTaskResponse + 54, // 103: powerx.knowledge.v1.KnowledgeSpaceAdminService.UpsertReleasePolicy:output_type -> powerx.knowledge.v1.UpsertReleasePolicyResponse + 56, // 104: powerx.knowledge.v1.KnowledgeSpaceAdminService.PublishRelease:output_type -> powerx.knowledge.v1.PublishReleaseResponse + 58, // 105: powerx.knowledge.v1.KnowledgeSpaceAdminService.PromoteRelease:output_type -> powerx.knowledge.v1.PromoteReleaseResponse + 60, // 106: powerx.knowledge.v1.KnowledgeSpaceAdminService.RollbackRelease:output_type -> powerx.knowledge.v1.RollbackReleaseResponse + 66, // 107: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.PlanRetrieval:output_type -> powerx.knowledge.v1.QARetrievalPlanResponse + 70, // 108: powerx.knowledge.v1.KnowledgeSpaceQABridgeService.UpsertMemorySnapshot:output_type -> powerx.knowledge.v1.QAMemorySnapshotResponse + 78, // [78:109] is the sub-list for method output_type + 47, // [47:78] is the sub-list for method input_type + 47, // [47:47] is the sub-list for extension type_name + 47, // [47:47] is the sub-list for extension extendee + 0, // [0:47] is the sub-list for field type_name } func init() { file_powerx_knowledge_v1_knowledge_space_proto_init() } @@ -5283,7 +6051,7 @@ func file_powerx_knowledge_v1_knowledge_space_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_powerx_knowledge_v1_knowledge_space_proto_rawDesc), len(file_powerx_knowledge_v1_knowledge_space_proto_rawDesc)), NumEnums: 1, - NumMessages: 66, + NumMessages: 74, NumExtensions: 0, NumServices: 2, }, diff --git a/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space_grpc.pb.go b/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space_grpc.pb.go index af511018..eda232a5 100644 --- a/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space_grpc.pb.go +++ b/backend/api/grpc/gen/go/powerx/knowledge/v1/knowledge_space_grpc.pb.go @@ -28,6 +28,11 @@ const ( KnowledgeSpaceAdminService_RollbackFusionStrategy_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/RollbackFusionStrategy" KnowledgeSpaceAdminService_SubmitFeedback_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/SubmitFeedback" KnowledgeSpaceAdminService_ListFeedbackCases_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/ListFeedbackCases" + KnowledgeSpaceAdminService_CloseFeedbackCase_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/CloseFeedbackCase" + KnowledgeSpaceAdminService_EscalateFeedbackCase_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/EscalateFeedbackCase" + KnowledgeSpaceAdminService_ReprocessFeedbackCase_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/ReprocessFeedbackCase" + KnowledgeSpaceAdminService_RollbackFeedbackCase_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/RollbackFeedbackCase" + KnowledgeSpaceAdminService_ExportFeedbackCases_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/ExportFeedbackCases" KnowledgeSpaceAdminService_StartDeltaJob_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/StartDeltaJob" KnowledgeSpaceAdminService_GetDeltaReport_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/GetDeltaReport" KnowledgeSpaceAdminService_PublishDeltaJob_FullMethodName = "/powerx.knowledge.v1.KnowledgeSpaceAdminService/PublishDeltaJob" @@ -58,6 +63,11 @@ type KnowledgeSpaceAdminServiceClient interface { RollbackFusionStrategy(ctx context.Context, in *RollbackFusionStrategyRequest, opts ...grpc.CallOption) (*FusionStrategyResponse, error) SubmitFeedback(ctx context.Context, in *FeedbackRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) ListFeedbackCases(ctx context.Context, in *ListFeedbackCasesRequest, opts ...grpc.CallOption) (*ListFeedbackCasesResponse, error) + CloseFeedbackCase(ctx context.Context, in *CloseFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) + EscalateFeedbackCase(ctx context.Context, in *EscalateFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) + ReprocessFeedbackCase(ctx context.Context, in *ReprocessFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) + RollbackFeedbackCase(ctx context.Context, in *RollbackFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) + ExportFeedbackCases(ctx context.Context, in *ExportFeedbackCasesRequest, opts ...grpc.CallOption) (*ExportFeedbackCasesResponse, error) StartDeltaJob(ctx context.Context, in *StartDeltaJobRequest, opts ...grpc.CallOption) (*StartDeltaJobResponse, error) GetDeltaReport(ctx context.Context, in *GetDeltaReportRequest, opts ...grpc.CallOption) (*GetDeltaReportResponse, error) PublishDeltaJob(ctx context.Context, in *PublishDeltaJobRequest, opts ...grpc.CallOption) (*PublishDeltaJobResponse, error) @@ -173,6 +183,56 @@ func (c *knowledgeSpaceAdminServiceClient) ListFeedbackCases(ctx context.Context return out, nil } +func (c *knowledgeSpaceAdminServiceClient) CloseFeedbackCase(ctx context.Context, in *CloseFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(FeedbackResponse) + err := c.cc.Invoke(ctx, KnowledgeSpaceAdminService_CloseFeedbackCase_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *knowledgeSpaceAdminServiceClient) EscalateFeedbackCase(ctx context.Context, in *EscalateFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(FeedbackResponse) + err := c.cc.Invoke(ctx, KnowledgeSpaceAdminService_EscalateFeedbackCase_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *knowledgeSpaceAdminServiceClient) ReprocessFeedbackCase(ctx context.Context, in *ReprocessFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(FeedbackResponse) + err := c.cc.Invoke(ctx, KnowledgeSpaceAdminService_ReprocessFeedbackCase_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *knowledgeSpaceAdminServiceClient) RollbackFeedbackCase(ctx context.Context, in *RollbackFeedbackCaseRequest, opts ...grpc.CallOption) (*FeedbackResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(FeedbackResponse) + err := c.cc.Invoke(ctx, KnowledgeSpaceAdminService_RollbackFeedbackCase_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *knowledgeSpaceAdminServiceClient) ExportFeedbackCases(ctx context.Context, in *ExportFeedbackCasesRequest, opts ...grpc.CallOption) (*ExportFeedbackCasesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ExportFeedbackCasesResponse) + err := c.cc.Invoke(ctx, KnowledgeSpaceAdminService_ExportFeedbackCases_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *knowledgeSpaceAdminServiceClient) StartDeltaJob(ctx context.Context, in *StartDeltaJobRequest, opts ...grpc.CallOption) (*StartDeltaJobResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartDeltaJobResponse) @@ -336,6 +396,11 @@ type KnowledgeSpaceAdminServiceServer interface { RollbackFusionStrategy(context.Context, *RollbackFusionStrategyRequest) (*FusionStrategyResponse, error) SubmitFeedback(context.Context, *FeedbackRequest) (*FeedbackResponse, error) ListFeedbackCases(context.Context, *ListFeedbackCasesRequest) (*ListFeedbackCasesResponse, error) + CloseFeedbackCase(context.Context, *CloseFeedbackCaseRequest) (*FeedbackResponse, error) + EscalateFeedbackCase(context.Context, *EscalateFeedbackCaseRequest) (*FeedbackResponse, error) + ReprocessFeedbackCase(context.Context, *ReprocessFeedbackCaseRequest) (*FeedbackResponse, error) + RollbackFeedbackCase(context.Context, *RollbackFeedbackCaseRequest) (*FeedbackResponse, error) + ExportFeedbackCases(context.Context, *ExportFeedbackCasesRequest) (*ExportFeedbackCasesResponse, error) StartDeltaJob(context.Context, *StartDeltaJobRequest) (*StartDeltaJobResponse, error) GetDeltaReport(context.Context, *GetDeltaReportRequest) (*GetDeltaReportResponse, error) PublishDeltaJob(context.Context, *PublishDeltaJobRequest) (*PublishDeltaJobResponse, error) @@ -388,6 +453,21 @@ func (UnimplementedKnowledgeSpaceAdminServiceServer) SubmitFeedback(context.Cont func (UnimplementedKnowledgeSpaceAdminServiceServer) ListFeedbackCases(context.Context, *ListFeedbackCasesRequest) (*ListFeedbackCasesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListFeedbackCases not implemented") } +func (UnimplementedKnowledgeSpaceAdminServiceServer) CloseFeedbackCase(context.Context, *CloseFeedbackCaseRequest) (*FeedbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseFeedbackCase not implemented") +} +func (UnimplementedKnowledgeSpaceAdminServiceServer) EscalateFeedbackCase(context.Context, *EscalateFeedbackCaseRequest) (*FeedbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EscalateFeedbackCase not implemented") +} +func (UnimplementedKnowledgeSpaceAdminServiceServer) ReprocessFeedbackCase(context.Context, *ReprocessFeedbackCaseRequest) (*FeedbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReprocessFeedbackCase not implemented") +} +func (UnimplementedKnowledgeSpaceAdminServiceServer) RollbackFeedbackCase(context.Context, *RollbackFeedbackCaseRequest) (*FeedbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RollbackFeedbackCase not implemented") +} +func (UnimplementedKnowledgeSpaceAdminServiceServer) ExportFeedbackCases(context.Context, *ExportFeedbackCasesRequest) (*ExportFeedbackCasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExportFeedbackCases not implemented") +} func (UnimplementedKnowledgeSpaceAdminServiceServer) StartDeltaJob(context.Context, *StartDeltaJobRequest) (*StartDeltaJobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartDeltaJob not implemented") } @@ -617,6 +697,96 @@ func _KnowledgeSpaceAdminService_ListFeedbackCases_Handler(srv interface{}, ctx return interceptor(ctx, in, info, handler) } +func _KnowledgeSpaceAdminService_CloseFeedbackCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseFeedbackCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KnowledgeSpaceAdminServiceServer).CloseFeedbackCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KnowledgeSpaceAdminService_CloseFeedbackCase_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KnowledgeSpaceAdminServiceServer).CloseFeedbackCase(ctx, req.(*CloseFeedbackCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KnowledgeSpaceAdminService_EscalateFeedbackCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EscalateFeedbackCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KnowledgeSpaceAdminServiceServer).EscalateFeedbackCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KnowledgeSpaceAdminService_EscalateFeedbackCase_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KnowledgeSpaceAdminServiceServer).EscalateFeedbackCase(ctx, req.(*EscalateFeedbackCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KnowledgeSpaceAdminService_ReprocessFeedbackCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReprocessFeedbackCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KnowledgeSpaceAdminServiceServer).ReprocessFeedbackCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KnowledgeSpaceAdminService_ReprocessFeedbackCase_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KnowledgeSpaceAdminServiceServer).ReprocessFeedbackCase(ctx, req.(*ReprocessFeedbackCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KnowledgeSpaceAdminService_RollbackFeedbackCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackFeedbackCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KnowledgeSpaceAdminServiceServer).RollbackFeedbackCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KnowledgeSpaceAdminService_RollbackFeedbackCase_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KnowledgeSpaceAdminServiceServer).RollbackFeedbackCase(ctx, req.(*RollbackFeedbackCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KnowledgeSpaceAdminService_ExportFeedbackCases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportFeedbackCasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KnowledgeSpaceAdminServiceServer).ExportFeedbackCases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KnowledgeSpaceAdminService_ExportFeedbackCases_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KnowledgeSpaceAdminServiceServer).ExportFeedbackCases(ctx, req.(*ExportFeedbackCasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _KnowledgeSpaceAdminService_StartDeltaJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartDeltaJobRequest) if err := dec(in); err != nil { @@ -930,6 +1100,26 @@ var KnowledgeSpaceAdminService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListFeedbackCases", Handler: _KnowledgeSpaceAdminService_ListFeedbackCases_Handler, }, + { + MethodName: "CloseFeedbackCase", + Handler: _KnowledgeSpaceAdminService_CloseFeedbackCase_Handler, + }, + { + MethodName: "EscalateFeedbackCase", + Handler: _KnowledgeSpaceAdminService_EscalateFeedbackCase_Handler, + }, + { + MethodName: "ReprocessFeedbackCase", + Handler: _KnowledgeSpaceAdminService_ReprocessFeedbackCase_Handler, + }, + { + MethodName: "RollbackFeedbackCase", + Handler: _KnowledgeSpaceAdminService_RollbackFeedbackCase_Handler, + }, + { + MethodName: "ExportFeedbackCases", + Handler: _KnowledgeSpaceAdminService_ExportFeedbackCases_Handler, + }, { MethodName: "StartDeltaJob", Handler: _KnowledgeSpaceAdminService_StartDeltaJob_Handler, diff --git a/backend/api/grpc/gen/go/powerx/scheduler/v1/scheduler.pb.go b/backend/api/grpc/gen/go/powerx/scheduler/v1/scheduler.pb.go new file mode 100644 index 00000000..354130c9 --- /dev/null +++ b/backend/api/grpc/gen/go/powerx/scheduler/v1/scheduler.pb.go @@ -0,0 +1,1048 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.4 +// protoc (unknown) +// source: powerx/scheduler/v1/scheduler.proto + +package schedulerv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SchedulerJob struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + TenantUuid string `protobuf:"bytes,2,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + OwnerType string `protobuf:"bytes,3,opt,name=owner_type,json=ownerType,proto3" json:"owner_type,omitempty"` + OwnerId string `protobuf:"bytes,4,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + ScheduleType string `protobuf:"bytes,6,opt,name=schedule_type,json=scheduleType,proto3" json:"schedule_type,omitempty"` + ScheduleExpr string `protobuf:"bytes,7,opt,name=schedule_expr,json=scheduleExpr,proto3" json:"schedule_expr,omitempty"` + Timezone string `protobuf:"bytes,8,opt,name=timezone,proto3" json:"timezone,omitempty"` + PayloadJson []byte `protobuf:"bytes,9,opt,name=payload_json,json=payloadJson,proto3" json:"payload_json,omitempty"` + Status string `protobuf:"bytes,10,opt,name=status,proto3" json:"status,omitempty"` + NextRunAt string `protobuf:"bytes,11,opt,name=next_run_at,json=nextRunAt,proto3" json:"next_run_at,omitempty"` + LastRunAt string `protobuf:"bytes,12,opt,name=last_run_at,json=lastRunAt,proto3" json:"last_run_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchedulerJob) Reset() { + *x = SchedulerJob{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchedulerJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulerJob) ProtoMessage() {} + +func (x *SchedulerJob) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulerJob.ProtoReflect.Descriptor instead. +func (*SchedulerJob) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{0} +} + +func (x *SchedulerJob) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *SchedulerJob) GetTenantUuid() string { + if x != nil { + return x.TenantUuid + } + return "" +} + +func (x *SchedulerJob) GetOwnerType() string { + if x != nil { + return x.OwnerType + } + return "" +} + +func (x *SchedulerJob) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +func (x *SchedulerJob) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SchedulerJob) GetScheduleType() string { + if x != nil { + return x.ScheduleType + } + return "" +} + +func (x *SchedulerJob) GetScheduleExpr() string { + if x != nil { + return x.ScheduleExpr + } + return "" +} + +func (x *SchedulerJob) GetTimezone() string { + if x != nil { + return x.Timezone + } + return "" +} + +func (x *SchedulerJob) GetPayloadJson() []byte { + if x != nil { + return x.PayloadJson + } + return nil +} + +func (x *SchedulerJob) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *SchedulerJob) GetNextRunAt() string { + if x != nil { + return x.NextRunAt + } + return "" +} + +func (x *SchedulerJob) GetLastRunAt() string { + if x != nil { + return x.LastRunAt + } + return "" +} + +type CreateJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateJobRequest) Reset() { + *x = CreateJobRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateJobRequest) ProtoMessage() {} + +func (x *CreateJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateJobRequest.ProtoReflect.Descriptor instead. +func (*CreateJobRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateJobRequest) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type UpdateJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateJobRequest) Reset() { + *x = UpdateJobRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateJobRequest) ProtoMessage() {} + +func (x *UpdateJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateJobRequest.ProtoReflect.Descriptor instead. +func (*UpdateJobRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{2} +} + +func (x *UpdateJobRequest) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type PauseJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + TenantUuid string `protobuf:"bytes,2,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PauseJobRequest) Reset() { + *x = PauseJobRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PauseJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PauseJobRequest) ProtoMessage() {} + +func (x *PauseJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PauseJobRequest.ProtoReflect.Descriptor instead. +func (*PauseJobRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{3} +} + +func (x *PauseJobRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *PauseJobRequest) GetTenantUuid() string { + if x != nil { + return x.TenantUuid + } + return "" +} + +type ResumeJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + TenantUuid string `protobuf:"bytes,2,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResumeJobRequest) Reset() { + *x = ResumeJobRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResumeJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumeJobRequest) ProtoMessage() {} + +func (x *ResumeJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumeJobRequest.ProtoReflect.Descriptor instead. +func (*ResumeJobRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{4} +} + +func (x *ResumeJobRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *ResumeJobRequest) GetTenantUuid() string { + if x != nil { + return x.TenantUuid + } + return "" +} + +type TriggerJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + TenantUuid string `protobuf:"bytes,2,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TriggerJobRequest) Reset() { + *x = TriggerJobRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TriggerJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerJobRequest) ProtoMessage() {} + +func (x *TriggerJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerJobRequest.ProtoReflect.Descriptor instead. +func (*TriggerJobRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{5} +} + +func (x *TriggerJobRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *TriggerJobRequest) GetTenantUuid() string { + if x != nil { + return x.TenantUuid + } + return "" +} + +type GetJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + TenantUuid string `protobuf:"bytes,2,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetJobRequest) Reset() { + *x = GetJobRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetJobRequest) ProtoMessage() {} + +func (x *GetJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetJobRequest.ProtoReflect.Descriptor instead. +func (*GetJobRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{6} +} + +func (x *GetJobRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *GetJobRequest) GetTenantUuid() string { + if x != nil { + return x.TenantUuid + } + return "" +} + +type ListJobsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TenantUuid string `protobuf:"bytes,1,opt,name=tenant_uuid,json=tenantUuid,proto3" json:"tenant_uuid,omitempty"` + Limit int32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListJobsRequest) Reset() { + *x = ListJobsRequest{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListJobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListJobsRequest) ProtoMessage() {} + +func (x *ListJobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListJobsRequest.ProtoReflect.Descriptor instead. +func (*ListJobsRequest) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{7} +} + +func (x *ListJobsRequest) GetTenantUuid() string { + if x != nil { + return x.TenantUuid + } + return "" +} + +func (x *ListJobsRequest) GetLimit() int32 { + if x != nil { + return x.Limit + } + return 0 +} + +type CreateJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateJobResponse) Reset() { + *x = CreateJobResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateJobResponse) ProtoMessage() {} + +func (x *CreateJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateJobResponse.ProtoReflect.Descriptor instead. +func (*CreateJobResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{8} +} + +func (x *CreateJobResponse) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type UpdateJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateJobResponse) Reset() { + *x = UpdateJobResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateJobResponse) ProtoMessage() {} + +func (x *UpdateJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateJobResponse.ProtoReflect.Descriptor instead. +func (*UpdateJobResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{9} +} + +func (x *UpdateJobResponse) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type PauseJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PauseJobResponse) Reset() { + *x = PauseJobResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PauseJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PauseJobResponse) ProtoMessage() {} + +func (x *PauseJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PauseJobResponse.ProtoReflect.Descriptor instead. +func (*PauseJobResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{10} +} + +func (x *PauseJobResponse) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type ResumeJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResumeJobResponse) Reset() { + *x = ResumeJobResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResumeJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumeJobResponse) ProtoMessage() {} + +func (x *ResumeJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumeJobResponse.ProtoReflect.Descriptor instead. +func (*ResumeJobResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{11} +} + +func (x *ResumeJobResponse) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type TriggerJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TriggerJobResponse) Reset() { + *x = TriggerJobResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TriggerJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerJobResponse) ProtoMessage() {} + +func (x *TriggerJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerJobResponse.ProtoReflect.Descriptor instead. +func (*TriggerJobResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{12} +} + +func (x *TriggerJobResponse) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type GetJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *SchedulerJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetJobResponse) Reset() { + *x = GetJobResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetJobResponse) ProtoMessage() {} + +func (x *GetJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetJobResponse.ProtoReflect.Descriptor instead. +func (*GetJobResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{13} +} + +func (x *GetJobResponse) GetJob() *SchedulerJob { + if x != nil { + return x.Job + } + return nil +} + +type ListJobsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Jobs []*SchedulerJob `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListJobsResponse) Reset() { + *x = ListJobsResponse{} + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListJobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListJobsResponse) ProtoMessage() {} + +func (x *ListJobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_powerx_scheduler_v1_scheduler_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListJobsResponse.ProtoReflect.Descriptor instead. +func (*ListJobsResponse) Descriptor() ([]byte, []int) { + return file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP(), []int{14} +} + +func (x *ListJobsResponse) GetJobs() []*SchedulerJob { + if x != nil { + return x.Jobs + } + return nil +} + +var File_powerx_scheduler_v1_scheduler_proto protoreflect.FileDescriptor + +var file_powerx_scheduler_v1_scheduler_proto_rawDesc = string([]byte{ + 0x0a, 0x23, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x22, 0xf5, 0x02, 0x0a, 0x0c, 0x53, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x12, 0x15, 0x0a, 0x06, 0x6a, + 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, + 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, + 0x75, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x45, 0x78, 0x70, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, + 0x69, 0x6d, 0x65, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x69, 0x6d, 0x65, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x61, + 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x75, 0x6e, + 0x41, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x61, + 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x75, 0x6e, + 0x41, 0x74, 0x22, 0x47, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x47, 0x0a, 0x10, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x33, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, + 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x49, 0x0a, 0x0f, 0x50, 0x61, 0x75, 0x73, 0x65, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1f, + 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x22, + 0x4a, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x22, 0x4b, 0x0a, 0x11, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, 0x64, 0x22, 0x47, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, + 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x55, 0x75, 0x69, + 0x64, 0x22, 0x48, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x48, 0x0a, 0x11, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x33, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, + 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x48, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x03, 0x6a, 0x6f, + 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, + 0x47, 0x0a, 0x10, 0x50, 0x61, 0x75, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, + 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x48, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, + 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, + 0x6f, 0x62, 0x22, 0x49, 0x0a, 0x12, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x45, 0x0a, + 0x0e, 0x47, 0x65, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x33, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, + 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x49, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x6a, 0x6f, 0x62, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x32, + 0x8a, 0x05, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x5a, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, + 0x62, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, + 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5a, 0x0a, 0x09, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x25, 0x2e, + 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x08, + 0x50, 0x61, 0x75, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x24, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x75, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4a, + 0x6f, 0x62, 0x12, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, + 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x12, + 0x26, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, + 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, + 0x69, 0x67, 0x67, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x51, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x22, 0x2e, 0x70, 0x6f, 0x77, + 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, + 0x24, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xe7, 0x01, 0x0a, + 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x73, 0x61, 0x6e, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x2f, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x78, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x53, 0x58, + 0xaa, 0x02, 0x13, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, + 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1f, 0x50, + 0x6f, 0x77, 0x65, 0x72, 0x78, 0x5c, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x5c, + 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x15, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x78, 0x3a, 0x3a, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_powerx_scheduler_v1_scheduler_proto_rawDescOnce sync.Once + file_powerx_scheduler_v1_scheduler_proto_rawDescData []byte +) + +func file_powerx_scheduler_v1_scheduler_proto_rawDescGZIP() []byte { + file_powerx_scheduler_v1_scheduler_proto_rawDescOnce.Do(func() { + file_powerx_scheduler_v1_scheduler_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_powerx_scheduler_v1_scheduler_proto_rawDesc), len(file_powerx_scheduler_v1_scheduler_proto_rawDesc))) + }) + return file_powerx_scheduler_v1_scheduler_proto_rawDescData +} + +var file_powerx_scheduler_v1_scheduler_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_powerx_scheduler_v1_scheduler_proto_goTypes = []any{ + (*SchedulerJob)(nil), // 0: powerx.scheduler.v1.SchedulerJob + (*CreateJobRequest)(nil), // 1: powerx.scheduler.v1.CreateJobRequest + (*UpdateJobRequest)(nil), // 2: powerx.scheduler.v1.UpdateJobRequest + (*PauseJobRequest)(nil), // 3: powerx.scheduler.v1.PauseJobRequest + (*ResumeJobRequest)(nil), // 4: powerx.scheduler.v1.ResumeJobRequest + (*TriggerJobRequest)(nil), // 5: powerx.scheduler.v1.TriggerJobRequest + (*GetJobRequest)(nil), // 6: powerx.scheduler.v1.GetJobRequest + (*ListJobsRequest)(nil), // 7: powerx.scheduler.v1.ListJobsRequest + (*CreateJobResponse)(nil), // 8: powerx.scheduler.v1.CreateJobResponse + (*UpdateJobResponse)(nil), // 9: powerx.scheduler.v1.UpdateJobResponse + (*PauseJobResponse)(nil), // 10: powerx.scheduler.v1.PauseJobResponse + (*ResumeJobResponse)(nil), // 11: powerx.scheduler.v1.ResumeJobResponse + (*TriggerJobResponse)(nil), // 12: powerx.scheduler.v1.TriggerJobResponse + (*GetJobResponse)(nil), // 13: powerx.scheduler.v1.GetJobResponse + (*ListJobsResponse)(nil), // 14: powerx.scheduler.v1.ListJobsResponse +} +var file_powerx_scheduler_v1_scheduler_proto_depIdxs = []int32{ + 0, // 0: powerx.scheduler.v1.CreateJobRequest.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 1: powerx.scheduler.v1.UpdateJobRequest.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 2: powerx.scheduler.v1.CreateJobResponse.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 3: powerx.scheduler.v1.UpdateJobResponse.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 4: powerx.scheduler.v1.PauseJobResponse.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 5: powerx.scheduler.v1.ResumeJobResponse.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 6: powerx.scheduler.v1.TriggerJobResponse.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 7: powerx.scheduler.v1.GetJobResponse.job:type_name -> powerx.scheduler.v1.SchedulerJob + 0, // 8: powerx.scheduler.v1.ListJobsResponse.jobs:type_name -> powerx.scheduler.v1.SchedulerJob + 1, // 9: powerx.scheduler.v1.SchedulerService.CreateJob:input_type -> powerx.scheduler.v1.CreateJobRequest + 2, // 10: powerx.scheduler.v1.SchedulerService.UpdateJob:input_type -> powerx.scheduler.v1.UpdateJobRequest + 3, // 11: powerx.scheduler.v1.SchedulerService.PauseJob:input_type -> powerx.scheduler.v1.PauseJobRequest + 4, // 12: powerx.scheduler.v1.SchedulerService.ResumeJob:input_type -> powerx.scheduler.v1.ResumeJobRequest + 5, // 13: powerx.scheduler.v1.SchedulerService.TriggerJob:input_type -> powerx.scheduler.v1.TriggerJobRequest + 6, // 14: powerx.scheduler.v1.SchedulerService.GetJob:input_type -> powerx.scheduler.v1.GetJobRequest + 7, // 15: powerx.scheduler.v1.SchedulerService.ListJobs:input_type -> powerx.scheduler.v1.ListJobsRequest + 8, // 16: powerx.scheduler.v1.SchedulerService.CreateJob:output_type -> powerx.scheduler.v1.CreateJobResponse + 9, // 17: powerx.scheduler.v1.SchedulerService.UpdateJob:output_type -> powerx.scheduler.v1.UpdateJobResponse + 10, // 18: powerx.scheduler.v1.SchedulerService.PauseJob:output_type -> powerx.scheduler.v1.PauseJobResponse + 11, // 19: powerx.scheduler.v1.SchedulerService.ResumeJob:output_type -> powerx.scheduler.v1.ResumeJobResponse + 12, // 20: powerx.scheduler.v1.SchedulerService.TriggerJob:output_type -> powerx.scheduler.v1.TriggerJobResponse + 13, // 21: powerx.scheduler.v1.SchedulerService.GetJob:output_type -> powerx.scheduler.v1.GetJobResponse + 14, // 22: powerx.scheduler.v1.SchedulerService.ListJobs:output_type -> powerx.scheduler.v1.ListJobsResponse + 16, // [16:23] is the sub-list for method output_type + 9, // [9:16] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_powerx_scheduler_v1_scheduler_proto_init() } +func file_powerx_scheduler_v1_scheduler_proto_init() { + if File_powerx_scheduler_v1_scheduler_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_powerx_scheduler_v1_scheduler_proto_rawDesc), len(file_powerx_scheduler_v1_scheduler_proto_rawDesc)), + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_powerx_scheduler_v1_scheduler_proto_goTypes, + DependencyIndexes: file_powerx_scheduler_v1_scheduler_proto_depIdxs, + MessageInfos: file_powerx_scheduler_v1_scheduler_proto_msgTypes, + }.Build() + File_powerx_scheduler_v1_scheduler_proto = out.File + file_powerx_scheduler_v1_scheduler_proto_goTypes = nil + file_powerx_scheduler_v1_scheduler_proto_depIdxs = nil +} diff --git a/backend/api/grpc/gen/go/powerx/scheduler/v1/scheduler_grpc.pb.go b/backend/api/grpc/gen/go/powerx/scheduler/v1/scheduler_grpc.pb.go new file mode 100644 index 00000000..d8813ca9 --- /dev/null +++ b/backend/api/grpc/gen/go/powerx/scheduler/v1/scheduler_grpc.pb.go @@ -0,0 +1,349 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: powerx/scheduler/v1/scheduler.proto + +package schedulerv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + SchedulerService_CreateJob_FullMethodName = "/powerx.scheduler.v1.SchedulerService/CreateJob" + SchedulerService_UpdateJob_FullMethodName = "/powerx.scheduler.v1.SchedulerService/UpdateJob" + SchedulerService_PauseJob_FullMethodName = "/powerx.scheduler.v1.SchedulerService/PauseJob" + SchedulerService_ResumeJob_FullMethodName = "/powerx.scheduler.v1.SchedulerService/ResumeJob" + SchedulerService_TriggerJob_FullMethodName = "/powerx.scheduler.v1.SchedulerService/TriggerJob" + SchedulerService_GetJob_FullMethodName = "/powerx.scheduler.v1.SchedulerService/GetJob" + SchedulerService_ListJobs_FullMethodName = "/powerx.scheduler.v1.SchedulerService/ListJobs" +) + +// SchedulerServiceClient is the client API for SchedulerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SchedulerServiceClient interface { + CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*CreateJobResponse, error) + UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*UpdateJobResponse, error) + PauseJob(ctx context.Context, in *PauseJobRequest, opts ...grpc.CallOption) (*PauseJobResponse, error) + ResumeJob(ctx context.Context, in *ResumeJobRequest, opts ...grpc.CallOption) (*ResumeJobResponse, error) + TriggerJob(ctx context.Context, in *TriggerJobRequest, opts ...grpc.CallOption) (*TriggerJobResponse, error) + GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error) + ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) +} + +type schedulerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSchedulerServiceClient(cc grpc.ClientConnInterface) SchedulerServiceClient { + return &schedulerServiceClient{cc} +} + +func (c *schedulerServiceClient) CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*CreateJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateJobResponse) + err := c.cc.Invoke(ctx, SchedulerService_CreateJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*UpdateJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateJobResponse) + err := c.cc.Invoke(ctx, SchedulerService_UpdateJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) PauseJob(ctx context.Context, in *PauseJobRequest, opts ...grpc.CallOption) (*PauseJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(PauseJobResponse) + err := c.cc.Invoke(ctx, SchedulerService_PauseJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) ResumeJob(ctx context.Context, in *ResumeJobRequest, opts ...grpc.CallOption) (*ResumeJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ResumeJobResponse) + err := c.cc.Invoke(ctx, SchedulerService_ResumeJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) TriggerJob(ctx context.Context, in *TriggerJobRequest, opts ...grpc.CallOption) (*TriggerJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(TriggerJobResponse) + err := c.cc.Invoke(ctx, SchedulerService_TriggerJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetJobResponse) + err := c.cc.Invoke(ctx, SchedulerService_GetJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListJobsResponse) + err := c.cc.Invoke(ctx, SchedulerService_ListJobs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SchedulerServiceServer is the server API for SchedulerService service. +// All implementations must embed UnimplementedSchedulerServiceServer +// for forward compatibility. +type SchedulerServiceServer interface { + CreateJob(context.Context, *CreateJobRequest) (*CreateJobResponse, error) + UpdateJob(context.Context, *UpdateJobRequest) (*UpdateJobResponse, error) + PauseJob(context.Context, *PauseJobRequest) (*PauseJobResponse, error) + ResumeJob(context.Context, *ResumeJobRequest) (*ResumeJobResponse, error) + TriggerJob(context.Context, *TriggerJobRequest) (*TriggerJobResponse, error) + GetJob(context.Context, *GetJobRequest) (*GetJobResponse, error) + ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) + mustEmbedUnimplementedSchedulerServiceServer() +} + +// UnimplementedSchedulerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSchedulerServiceServer struct{} + +func (UnimplementedSchedulerServiceServer) CreateJob(context.Context, *CreateJobRequest) (*CreateJobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateJob not implemented") +} +func (UnimplementedSchedulerServiceServer) UpdateJob(context.Context, *UpdateJobRequest) (*UpdateJobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateJob not implemented") +} +func (UnimplementedSchedulerServiceServer) PauseJob(context.Context, *PauseJobRequest) (*PauseJobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PauseJob not implemented") +} +func (UnimplementedSchedulerServiceServer) ResumeJob(context.Context, *ResumeJobRequest) (*ResumeJobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResumeJob not implemented") +} +func (UnimplementedSchedulerServiceServer) TriggerJob(context.Context, *TriggerJobRequest) (*TriggerJobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TriggerJob not implemented") +} +func (UnimplementedSchedulerServiceServer) GetJob(context.Context, *GetJobRequest) (*GetJobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetJob not implemented") +} +func (UnimplementedSchedulerServiceServer) ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListJobs not implemented") +} +func (UnimplementedSchedulerServiceServer) mustEmbedUnimplementedSchedulerServiceServer() {} +func (UnimplementedSchedulerServiceServer) testEmbeddedByValue() {} + +// UnsafeSchedulerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SchedulerServiceServer will +// result in compilation errors. +type UnsafeSchedulerServiceServer interface { + mustEmbedUnimplementedSchedulerServiceServer() +} + +func RegisterSchedulerServiceServer(s grpc.ServiceRegistrar, srv SchedulerServiceServer) { + // If the following call pancis, it indicates UnimplementedSchedulerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&SchedulerService_ServiceDesc, srv) +} + +func _SchedulerService_CreateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).CreateJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_CreateJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).CreateJob(ctx, req.(*CreateJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_UpdateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).UpdateJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_UpdateJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).UpdateJob(ctx, req.(*UpdateJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_PauseJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).PauseJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_PauseJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).PauseJob(ctx, req.(*PauseJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_ResumeJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResumeJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).ResumeJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_ResumeJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).ResumeJob(ctx, req.(*ResumeJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_TriggerJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TriggerJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).TriggerJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_TriggerJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).TriggerJob(ctx, req.(*TriggerJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).GetJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_GetJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).GetJob(ctx, req.(*GetJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).ListJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_ListJobs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).ListJobs(ctx, req.(*ListJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SchedulerService_ServiceDesc is the grpc.ServiceDesc for SchedulerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SchedulerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "powerx.scheduler.v1.SchedulerService", + HandlerType: (*SchedulerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateJob", + Handler: _SchedulerService_CreateJob_Handler, + }, + { + MethodName: "UpdateJob", + Handler: _SchedulerService_UpdateJob_Handler, + }, + { + MethodName: "PauseJob", + Handler: _SchedulerService_PauseJob_Handler, + }, + { + MethodName: "ResumeJob", + Handler: _SchedulerService_ResumeJob_Handler, + }, + { + MethodName: "TriggerJob", + Handler: _SchedulerService_TriggerJob_Handler, + }, + { + MethodName: "GetJob", + Handler: _SchedulerService_GetJob_Handler, + }, + { + MethodName: "ListJobs", + Handler: _SchedulerService_ListJobs_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "powerx/scheduler/v1/scheduler.proto", +} diff --git a/backend/cmd/app/main.go b/backend/cmd/app/main.go index a03dfb6f..3b77a3e0 100644 --- a/backend/cmd/app/main.go +++ b/backend/cmd/app/main.go @@ -4,6 +4,8 @@ import ( "context" "fmt" "log" + "os" + "path/filepath" "github.com/ArtisanCloud/PowerX/config" "github.com/ArtisanCloud/PowerX/internal/bootstrap" @@ -11,6 +13,7 @@ import ( "github.com/ArtisanCloud/PowerX/internal/openapi" grpcserver "github.com/ArtisanCloud/PowerX/internal/server/grpc" authorizationService "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/authorization" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" "github.com/ArtisanCloud/PowerX/pkg/corex/audit" "github.com/ArtisanCloud/PowerX/pkg/utils/logger" "github.com/gin-gonic/gin" @@ -27,6 +30,20 @@ import ( func main() { ctx := context.Background() + // 加载全局配置 + cfg := config.GetGlobalConfig() + if cfg == nil { + log.Fatalf("加载配置文件失败") + } + apikeypermissions.SetIntroducedVersion(cfg.EffectiveSystemVersion()) + + // Gin 的 debug 路由打印按 log.http_debug 控制 + if cfg.LogConfig.HttpDebug { + gin.SetMode(gin.DebugMode) + } else { + gin.SetMode(gin.ReleaseMode) + } + r := gin.New() // Swagger 文档元信息(也可在 api/openapi/docs.go 中修改默认生成的内容) @@ -35,12 +52,6 @@ func main() { docs.SwaggerInfo.Version = "v1.0.0" docs.SwaggerInfo.BasePath = "/" - // 加载全局配置 - cfg := config.GetGlobalConfig() - if cfg == nil { - log.Fatalf("加载配置文件失败") - } - // 初始化应用核心依赖 deps, err := bootstrap.BootstrapApp(ctx, cfg) if err != nil { @@ -72,16 +83,27 @@ func main() { return } - if deps.EventFabric != nil && deps.EventFabric.Authorization != nil { - if deps.EventFabric.Authorization.TimeoutWorker != nil { - go deps.EventFabric.Authorization.TimeoutWorker.Run(ctx) + if deps.EventFabric != nil { + if deps.EventFabric.RetryWorker != nil { + go deps.EventFabric.RetryWorker.Run(ctx) + } + if deps.EventFabric.CronDispatcherWorker != nil { + go deps.EventFabric.CronDispatcherWorker.Run(ctx) } - if deps.EventFabric.Authorization.Service != nil { - go func() { - if err := deps.EventFabric.Authorization.Service.ListenCacheInvalidation(ctx); err != nil && err != authorizationService.ErrOperationUnsupported { - logger.WarnF(ctx, "authorization cache listener stopped: %v", err) - } - }() + if deps.EventFabric.NotificationWorker != nil { + go deps.EventFabric.NotificationWorker.Run(ctx) + } + if deps.EventFabric.Authorization != nil { + if deps.EventFabric.Authorization.TimeoutTaskWorker != nil { + go deps.EventFabric.Authorization.TimeoutTaskWorker.Run(ctx) + } + if deps.EventFabric.Authorization.Service != nil { + go func() { + if err := deps.EventFabric.Authorization.Service.ListenCacheInvalidation(ctx); err != nil && err != authorizationService.ErrOperationUnsupported { + logger.WarnF(ctx, "authorization cache listener stopped: %v", err) + } + }() + } } } @@ -102,15 +124,21 @@ func main() { Title: "PowerX Admin API (Minimal)", Version: "v1.0.0", }) - // 生成并保存最小 OpenAPI 文档文件 - if err := openapi.SaveMinimalDoc(r, openapi.Info{ + // 生成并保存最小 OpenAPI 文档文件(兼容不同启动目录) + docInfo := openapi.Info{ Title: "PowerX Admin API (Minimal)", Version: "v1.0.0", BaseURL: "/", - }, "./api/openapi"); err != nil { + } + if err := saveMinimalOpenAPIDocs(r, docInfo); err != nil { logger.ErrorF(ctx, "写入最小 OpenAPI 文档失败: %s", err.Error()) } + // 打印路由(受 log.http_debug 控制) + // if cfg.LogConfig.HttpDebug { + // http.PrintRouteInfo(r, cfg) + // } + // 运行 HTTP 服务 err = r.Run(addr) if err != nil { @@ -118,3 +146,40 @@ func main() { } } + +func saveMinimalOpenAPIDocs(r *gin.Engine, info openapi.Info) error { + cwd, _ := os.Getwd() + candidates := []string{"./api/openapi", "./backend/api/openapi"} + ordered := make([]string, 0, len(candidates)) + for _, path := range candidates { + absPath := path + if !filepath.IsAbs(path) { + absPath = filepath.Join(cwd, path) + } + if stat, err := os.Stat(absPath); err == nil && stat.IsDir() { + ordered = append(ordered, absPath) + } + } + for _, path := range candidates { + absPath := path + if !filepath.IsAbs(path) { + absPath = filepath.Join(cwd, path) + } + exists := false + for _, done := range ordered { + if done == absPath { + exists = true + break + } + } + if !exists { + ordered = append(ordered, absPath) + } + } + for _, absPath := range ordered { + if err := openapi.SaveMinimalDoc(r, info, absPath); err == nil { + return nil + } + } + return openapi.SaveMinimalDoc(r, info, ordered[0]) +} diff --git a/backend/cmd/database/main.go b/backend/cmd/database/main.go index 73398246..da8aa46e 100644 --- a/backend/cmd/database/main.go +++ b/backend/cmd/database/main.go @@ -36,7 +36,7 @@ func main() { switch cmd { case "migrate": - if err := MigrateDatabase(ctx, db); err != nil { + if err := MigrateDatabase(ctx, db, cfg); err != nil { log.Fatal("migrate failed:", err) } fmt.Println("migrate ok") @@ -55,7 +55,7 @@ func main() { fmt.Println("reset ok") // 再 migrate - if err := MigrateDatabase(ctx, db); err != nil { + if err := MigrateDatabase(ctx, db, cfg); err != nil { log.Fatal("migrate failed:", err) } fmt.Println("migrate ok") diff --git a/backend/cmd/database/migrate.go b/backend/cmd/database/migrate.go index d87327c4..0551d928 100644 --- a/backend/cmd/database/migrate.go +++ b/backend/cmd/database/migrate.go @@ -3,22 +3,110 @@ package main import ( "context" - "log" + "fmt" + "strings" + "github.com/ArtisanCloud/PowerX/config" "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence" "github.com/ArtisanCloud/PowerX/pkg/corex/db/database" + "github.com/ArtisanCloud/PowerX/pkg/corex/db/migration" "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" + pgvectorcfg "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore/pgvector" "gorm.io/gorm" ) -func MigrateDatabase(ctx context.Context, db *gorm.DB) error { +func MigrateDatabase(ctx context.Context, db *gorm.DB, cfg *config.Config) error { if err := database.MigrateCoreModels(db); err != nil { - log.Fatalf("CoreX 迁移失败: %v", err) + return fmt.Errorf("CoreX 迁移失败: %w", err) } if err := persistence.MigrateAgentModels(db); err != nil { - log.Fatalf("Agent 迁移失败: %v", err) + return fmt.Errorf("Agent 迁移失败: %w", err) + } + + // Knowledge Space migrations (tables that are not covered by GORM models). + // - KG assist tables are cheap and enable `index.kg` readiness. + // - pgvector is conditional on driver=pgvector. + if cfg != nil && cfg.FeatureGate.EnableKnowledgeSpace { + driver := strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.Driver) + fmt.Printf("[migrate] knowledge_space enabled=true vector_store.driver=%q db=%s@%s:%d/%s\n", + driver, + strings.TrimSpace(cfg.Database.UserName), + strings.TrimSpace(cfg.Database.Host), + cfg.Database.Port, + strings.TrimSpace(cfg.Database.Database), + ) + + if err := migration.EnsureKnowledgeKGAssistTables(db); err != nil { + return fmt.Errorf("Knowledge Space KG 表迁移失败: %w", err) + } + + if driver == "pgvector" { + fmt.Printf("[migrate] pgvector migration target=%s.%s\n", + coalesce(strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Schema), "public"), + coalesce(strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Table), "knowledge_vectors_v1_1536"), + ) + dsn := strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.DSN) + if dsn == "" { + dsn = strings.TrimSpace(cfg.Database.DSN) + } + if dsn == "" { + // Fallback to the same DSN composition logic as `database.Connect`. + dsn = fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s TimeZone=%s", + cfg.Database.Host, + cfg.Database.Port, + cfg.Database.UserName, + cfg.Database.Password, + cfg.Database.Database, + coalesce(cfg.Database.SSLMode, "disable"), + coalesce(cfg.Database.Timezone, "UTC"), + ) + } + + pgCfg := pgvectorcfg.Config{ + DSN: strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.DSN), + Schema: strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Schema), + Table: strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Table), + Dimensions: cfg.KnowledgeSpace.VectorStore.PgVector.Dimensions, + EnableMigrations: cfg.KnowledgeSpace.VectorStore.PgVector.EnableMigrations, + BatchSize: cfg.KnowledgeSpace.VectorStore.PgVector.BatchSize, + Lists: cfg.KnowledgeSpace.VectorStore.PgVector.Lists, + TimeoutSeconds: cfg.KnowledgeSpace.VectorStore.PgVector.TimeoutSeconds, + } + if err := migration.EnsureKnowledgeVectorsPGVector(ctx, dsn, pgCfg); err != nil { + return fmt.Errorf("Knowledge Space pgvector 迁移失败: %w", err) + } + + // Sanity check for local Postgres: ensure the vectors table is actually visible. + if strings.EqualFold(strings.TrimSpace(cfg.Database.Driver), "postgres") { + schema := coalesce(strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Schema), "public") + table := coalesce(strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Table), "knowledge_vectors_v1_1536") + var regclass string + if err := db.WithContext(ctx).Raw(`SELECT to_regclass(?)`, fmt.Sprintf("%s.%s", schema, table)).Scan(®class).Error; err != nil { + return fmt.Errorf("pgvector 表可见性检查失败: %w", err) + } + if strings.TrimSpace(regclass) == "" { + return fmt.Errorf("pgvector 迁移已执行但未发现表:%s.%s(请检查 schema/search_path/权限)", schema, table) + } + fmt.Printf("[migrate] pgvector table ready: %s\n", strings.TrimSpace(regclass)) + } + } + + // Postgres-backed sparse/structured/hier readiness (B方案默认) is controlled by explicit backend selection. + backends := cfg.KnowledgeSpace.IndexBackends + needsChunkStore := backends.Sparse == "postgres_fts" || backends.StructuredFields == "postgres_jsonb" || backends.Hier == "postgres_links" + if needsChunkStore { + if err := migration.EnsureKnowledgeChunkStoreTables(db); err != nil { + return fmt.Errorf("Knowledge Space chunk store 迁移失败: %w", err) + } + } + if backends.Hier == "postgres_links" { + if err := migration.EnsureKnowledgeChunkLinkTables(db); err != nil { + return fmt.Errorf("Knowledge Space chunk links 迁移失败: %w", err) + } + } } return nil @@ -34,3 +122,11 @@ func ResetDatabase(ctx context.Context, db *gorm.DB) error { } return nil } + +func coalesce(v string, fallback string) string { + v = strings.TrimSpace(v) + if v == "" { + return fallback + } + return v +} diff --git a/backend/cmd/database/seed/openapi_path.go b/backend/cmd/database/seed/openapi_path.go new file mode 100644 index 00000000..c5a9a750 --- /dev/null +++ b/backend/cmd/database/seed/openapi_path.go @@ -0,0 +1,16 @@ +package seed + +import "os" + +func resolveSwaggerPath() string { + candidates := []string{ + "./api/openapi/swagger.json", + "./backend/api/openapi/swagger.json", + } + for _, path := range candidates { + if _, err := os.Stat(path); err == nil { + return path + } + } + return candidates[0] +} diff --git a/backend/cmd/database/seed/seed.go b/backend/cmd/database/seed/seed.go index 10389629..baa3453b 100644 --- a/backend/cmd/database/seed/seed.go +++ b/backend/cmd/database/seed/seed.go @@ -2,11 +2,13 @@ package seed import ( "context" - "github.com/ArtisanCloud/PowerX/config" "log" "os" "strings" + "github.com/ArtisanCloud/PowerX/config" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" + "gorm.io/gorm" "github.com/ArtisanCloud/PowerX/pkg/corex/db/database" @@ -22,6 +24,10 @@ func envOrDefault(key, def string) string { } func SeedCoreX(ctx context.Context, db *gorm.DB, cfg *config.Config) error { + if cfg != nil { + config.GlobalConfig = cfg + apikeypermissions.SetIntroducedVersion(cfg.EffectiveSystemVersion()) + } db, err := database.Connect(cfg.Database) if err != nil { //log.Fatal(err) @@ -33,6 +39,13 @@ func SeedCoreX(ctx context.Context, db *gorm.DB, cfg *config.Config) error { return err } + if err = SeedEventFabricTopics(db); err != nil { + return err + } + if err = SeedEventFabricDefaultACL(db); err != nil { + return err + } + if err = SeedSMEDepartments(db, "system"); err != nil { //log.Fatal(err) return err @@ -47,6 +60,14 @@ func SeedCoreX(ctx context.Context, db *gorm.DB, cfg *config.Config) error { return err } + if err = SeedKnowledgePolicyTemplates(db); err != nil { + return err + } + + if err = SeedKnowledgeProfiles(db, "system"); err != nil { + return err + } + log.Println("seed ok") return nil diff --git a/backend/cmd/database/seed/seed_admin.go b/backend/cmd/database/seed/seed_admin.go index 0201d32f..79c31eab 100644 --- a/backend/cmd/database/seed/seed_admin.go +++ b/backend/cmd/database/seed/seed_admin.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" dbm "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/tenant" tenantrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" @@ -22,9 +23,12 @@ func SeedRoot(db *gorm.DB) error { return fmt.Errorf("seed system permissions: %w", err) } - if err := SeedSwaggerPermissions(db, "./backend/api/openapi/swagger.json"); err != nil { + if err := SeedSwaggerPermissions(db, resolveSwaggerPath()); err != nil { return fmt.Errorf("seed swagger permissions: %w", err) } + if err := apikeypermissions.EnsureTemplatePermissions(seedCtx(), infraiam.NewPermissionRepository(db)); err != nil { + return fmt.Errorf("seed api key permissions: %w", err) + } // 3) 确保 system 租户存在 const tenantKey = "system" @@ -36,6 +40,9 @@ func SeedRoot(db *gorm.DB) error { } tenantUUID := ten.UUID.String() + if _, _, err := apikeypermissions.EnsureTenantDefaultProfile(seedCtx(), db, tenantUUID, nil); err != nil { + return fmt.Errorf("ensure default api key profile for tenant(%s): %w", tenantUUID, err) + } // 4) 为该租户完成内置角色与授权(root(system) & tenant_admin(tenant)) if err := SeedBuiltInRolesAndGrants(db, tenantUUID); err != nil { diff --git a/backend/cmd/database/seed/seed_agent.go b/backend/cmd/database/seed/seed_agent.go index c9f80cf6..cc8565c7 100644 --- a/backend/cmd/database/seed/seed_agent.go +++ b/backend/cmd/database/seed/seed_agent.go @@ -8,6 +8,7 @@ import ( agentr "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/repository" tenantmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/tenant" tenantrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" + "github.com/google/uuid" "gorm.io/datatypes" "gorm.io/gorm" ) @@ -30,6 +31,7 @@ func SeedSystemDefaultAgent(db *gorm.DB) error { const ( agentKey = "core.system.default" agentName = "System Default Agent" + agentUUID = "6b31adf3-4f7d-4c77-9d1d-c58fb3a7cf2a" ) // 已存在直接返回(幂等) @@ -66,6 +68,7 @@ func SeedSystemDefaultAgent(db *gorm.DB) error { "tags": []string{"system", "default"}, }, } + a.UUID = uuid.MustParse(agentUUID) // 用仓库 Upsert(租户级唯一:env + tenant_id + key) if err := agentRepo.UpsertByScopeKey(ctx, env, &tenantUUID, a); err != nil { diff --git a/backend/cmd/database/seed/seed_event_fabric.go b/backend/cmd/database/seed/seed_event_fabric.go new file mode 100644 index 00000000..50f47ebb --- /dev/null +++ b/backend/cmd/database/seed/seed_event_fabric.go @@ -0,0 +1,158 @@ +package seed + +import ( + "fmt" + "strings" + "time" + + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" + coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" + eventfabricrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/event_fabric" + "gorm.io/datatypes" + "gorm.io/gorm" +) + +type eventFabricTopicSeed struct { + TenantKey string + Namespace string + Name string + PayloadFormat string + CreatedBy string +} + +func SeedEventFabricTopics(db *gorm.DB) error { + repo := eventfabricrepo.NewTopicRepository(db) + + builtinSystemTopics := []string{ + eventbus.TopicKnowledgeIngestionJob, + eventbus.TopicKnowledgeCorpusCheckJob, + eventbus.TopicSystemNotification, + eventbus.TopicIntegrationGatewayRouteCreated, + eventbus.TopicIntegrationGatewayRouteUpdated, + eventbus.TopicIntegrationGatewayInvocationSucceeded, + eventbus.TopicIntegrationGatewayInvocationFailed, + eventbus.TopicIntegrationGatewayInvocationFallback, + eventbus.TopicCapabilityCatalogSyncStarted, + eventbus.TopicCapabilityCatalogSyncSucceeded, + eventbus.TopicCapabilityCatalogSyncFailed, + eventbus.TopicCapabilityPolicyDegraded, + eventbus.TopicKnowledgeFeedbackReprocess, + } + + seeds := make([]eventFabricTopicSeed, 0, len(builtinSystemTopics)) + for _, topic := range builtinSystemTopics { + namespace, name, ok := splitTopic(topic) + if !ok { + continue + } + seeds = append(seeds, eventFabricTopicSeed{ + TenantKey: "global", + Namespace: namespace, + Name: name, + PayloadFormat: "json", + CreatedBy: "seed", + }) + } + + for i := range seeds { + seed := seeds[i] + existing, err := repo.FindByComposite(seedCtx(), seed.TenantKey, seed.Namespace, seed.Name) + if err != nil { + return fmt.Errorf("query topic %s.%s.%s failed: %w", seed.TenantKey, seed.Namespace, seed.Name, err) + } + if existing != nil { + continue + } + + row := &eventfabricmodel.TopicDefinition{ + ScopeType: eventfabricmodel.TopicScopeSystem, + ScopeID: seed.TenantKey, + TenantKey: seed.TenantKey, + Namespace: seed.Namespace, + Name: seed.Name, + Lifecycle: eventfabricmodel.TopicLifecycleActive, + PayloadFormat: seed.PayloadFormat, + VersioningMode: "strict", + MaxRetry: 5, + AckTimeoutSec: 30, + RetentionPolicy: datatypes.JSON([]byte(`{"mode":"standard"}`)), + Metadata: datatypes.JSON([]byte(`{"seed":"event_fabric"}`)), + CreatedBy: seed.CreatedBy, + Status: 1, + } + if _, err := repo.Create(seedCtx(), row); err != nil { + return fmt.Errorf("create topic %s.%s.%s failed: %w", seed.TenantKey, seed.Namespace, seed.Name, err) + } + } + + fmt.Printf("[seed] event fabric topics ready: %d\n", len(seeds)) + return nil +} + +func splitTopic(topic string) (namespace string, name string, ok bool) { + trimmed := strings.TrimSpace(topic) + if trimmed == "" { + return "", "", false + } + parts := strings.Split(trimmed, ".") + if len(parts) < 2 { + return "", "", false + } + namespace = strings.Join(parts[:len(parts)-1], ".") + name = strings.TrimSpace(parts[len(parts)-1]) + if namespace == "" || name == "" { + return "", "", false + } + return namespace, name, true +} + +func SeedEventFabricDefaultACL(db *gorm.DB) error { + if db == nil { + return fmt.Errorf("db is required") + } + var topics []*eventfabricmodel.TopicDefinition + if err := db.WithContext(seedCtx()). + Where("deleted_at IS NULL"). + Find(&topics).Error; err != nil { + return fmt.Errorf("list event topics failed: %w", err) + } + if len(topics) == 0 { + fmt.Println("[seed] event fabric default acl skipped: no topics") + return nil + } + + now := time.Now().UTC() + bindings := make([]*eventfabricmodel.AclBinding, 0, len(topics)*3) + for _, topic := range topics { + if topic == nil { + continue + } + for _, action := range []string{"publish", "subscribe", "replay"} { + bindings = append(bindings, &eventfabricmodel.AclBinding{ + TenantKey: topic.TenantKey, + TopicUUID: topic.UUID, + PrincipalType: "role", + PrincipalID: "role:role_admin", + Action: action, + GrantedBy: "seed", + Justification: "seed default admin access", + Status: 1, + PowerUUIDModel: coremodel.PowerUUIDModel{ + CreatedAt: now, + UpdatedAt: now, + }, + }) + } + } + if len(bindings) == 0 { + return nil + } + + repo := eventfabricrepo.NewAclRepository(db) + if _, err := repo.UpsertBindings(seedCtx(), bindings); err != nil { + return fmt.Errorf("upsert event fabric default acl failed: %w", err) + } + fmt.Printf("[seed] event fabric default acl ready: topics=%d bindings=%d\n", len(topics), len(bindings)) + return nil +} diff --git a/backend/cmd/database/seed/seed_knowledge_profiles.go b/backend/cmd/database/seed/seed_knowledge_profiles.go new file mode 100644 index 00000000..91ae3b44 --- /dev/null +++ b/backend/cmd/database/seed/seed_knowledge_profiles.go @@ -0,0 +1,132 @@ +package seed + +import ( + "encoding/json" + "fmt" + "time" + + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + tenantModel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/tenant" + tenantRepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" + "gorm.io/datatypes" + "gorm.io/gorm" +) + +type profileSeed struct { + Key string + DisplayName string + Config map[string]any +} + +func SeedKnowledgeProfiles(db *gorm.DB, tenantKey string) error { + tenRepo := tenantRepo.NewTenantRepository(db) + ten, err := tenRepo.EnsureByKey(seedCtx(), tenantKey, "SME Org", tenantModel.TenantPlanBasic, tenantModel.TenantTypeEnterprise) + if err != nil { + return fmt.Errorf("ensure tenant(%s): %w", tenantKey, err) + } + + items := []profileSeed{ + {Key: "default", DisplayName: "Default", Config: map[string]any{"bundle": "p1_general"}}, + {Key: "p0_basic", DisplayName: "P0 基础(最小闭环)", Config: map[string]any{"bundle": "p0_basic"}}, + {Key: "p1_general", DisplayName: "P1 通用推荐(企业默认)", Config: map[string]any{"bundle": "p1_general"}}, + {Key: "p2_high_accuracy", DisplayName: "P2 高准确/合规(证据优先)", Config: map[string]any{"bundle": "p2_high_accuracy"}}, + {Key: "p3_kg_strong", DisplayName: "P3 KG 约束(关系驱动)", Config: map[string]any{"bundle": "p3_kg_strong"}}, + } + + now := time.Now().UTC() + for _, it := range items { + raw, _ := json.Marshal(it.Config) + cfg := datatypes.JSON(raw) + + if err := ensurePublishedIngestionProfile(db, ten.UUID.String(), it.Key, it.DisplayName, cfg, &now); err != nil { + return err + } + if err := ensurePublishedIndexProfile(db, ten.UUID.String(), it.Key, it.DisplayName, cfg, &now); err != nil { + return err + } + if err := ensurePublishedRAGProfile(db, ten.UUID.String(), it.Key, it.DisplayName, cfg, &now); err != nil { + return err + } + } + + fmt.Printf("[seed] knowledge profiles ready for tenant=%s (uuid=%s)\n", tenantKey, ten.UUID.String()) + return nil +} + +func ensurePublishedIngestionProfile(db *gorm.DB, tenantUUID, key, name string, cfg datatypes.JSON, now *time.Time) error { + var existing models.IngestionProfileVersion + err := db.WithContext(seedCtx()). + Where("tenant_uuid = ? AND profile_key = ? AND status = ?", tenantUUID, key, models.ProfileStatusPublished). + Order("version desc"). + Take(&existing).Error + if err == nil && existing.UUID.String() != "" { + return nil + } + if err != nil && err != gorm.ErrRecordNotFound { + return err + } + row := &models.IngestionProfileVersion{ + TenantUUID: tenantUUID, + ProfileKey: key, + Version: 1, + Status: models.ProfileStatusPublished, + DisplayName: name, + Config: cfg, + PublishedAt: now, + PublishedBy: "seed", + CreatedBy: "seed", + } + return db.WithContext(seedCtx()).Create(row).Error +} + +func ensurePublishedIndexProfile(db *gorm.DB, tenantUUID, key, name string, cfg datatypes.JSON, now *time.Time) error { + var existing models.IndexProfileVersion + err := db.WithContext(seedCtx()). + Where("tenant_uuid = ? AND profile_key = ? AND status = ?", tenantUUID, key, models.ProfileStatusPublished). + Order("version desc"). + Take(&existing).Error + if err == nil && existing.UUID.String() != "" { + return nil + } + if err != nil && err != gorm.ErrRecordNotFound { + return err + } + row := &models.IndexProfileVersion{ + TenantUUID: tenantUUID, + ProfileKey: key, + Version: 1, + Status: models.ProfileStatusPublished, + DisplayName: name, + Config: cfg, + PublishedAt: now, + PublishedBy: "seed", + CreatedBy: "seed", + } + return db.WithContext(seedCtx()).Create(row).Error +} + +func ensurePublishedRAGProfile(db *gorm.DB, tenantUUID, key, name string, cfg datatypes.JSON, now *time.Time) error { + var existing models.RAGProfileVersion + err := db.WithContext(seedCtx()). + Where("tenant_uuid = ? AND profile_key = ? AND status = ?", tenantUUID, key, models.ProfileStatusPublished). + Order("version desc"). + Take(&existing).Error + if err == nil && existing.UUID.String() != "" { + return nil + } + if err != nil && err != gorm.ErrRecordNotFound { + return err + } + row := &models.RAGProfileVersion{ + TenantUUID: tenantUUID, + ProfileKey: key, + Version: 1, + Status: models.ProfileStatusPublished, + DisplayName: name, + Config: cfg, + PublishedAt: now, + PublishedBy: "seed", + CreatedBy: "seed", + } + return db.WithContext(seedCtx()).Create(row).Error +} diff --git a/backend/cmd/database/seed/seed_knowledge_space.go b/backend/cmd/database/seed/seed_knowledge_space.go new file mode 100644 index 00000000..83ee1b22 --- /dev/null +++ b/backend/cmd/database/seed/seed_knowledge_space.go @@ -0,0 +1,96 @@ +package seed + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "time" + + "gorm.io/datatypes" + "gorm.io/gorm" + + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" +) + +type policyTemplateSeed struct { + TemplateName string + Version string + RAGProfile map[string]any + GraphProfile map[string]any + Masking map[string]any + Alerting map[string]any +} + +func SeedKnowledgePolicyTemplates(db *gorm.DB) error { + items := []policyTemplateSeed{ + { + TemplateName: "default", + Version: "v1", + RAGProfile: map[string]any{"profile": "default"}, + GraphProfile: map[string]any{}, + Masking: map[string]any{}, + Alerting: map[string]any{}, + }, + } + + now := time.Now().UTC() + for _, it := range items { + name := strings.TrimSpace(it.TemplateName) + ver := strings.TrimSpace(it.Version) + if name == "" || ver == "" { + continue + } + + ragRaw, _ := json.Marshal(it.RAGProfile) + graphRaw, _ := json.Marshal(it.GraphProfile) + maskingRaw, _ := json.Marshal(it.Masking) + alertRaw, _ := json.Marshal(it.Alerting) + + fp := sha256.Sum256([]byte(name + ":" + ver + ":" + string(ragRaw) + ":" + string(graphRaw) + ":" + string(maskingRaw) + ":" + string(alertRaw))) + hash := hex.EncodeToString(fp[:]) + + var existing models.PolicyTemplateVersion + err := db.WithContext(seedCtx()). + Where("template_name = ? AND version = ?", name, ver). + Take(&existing).Error + if err != nil && err != gorm.ErrRecordNotFound { + return err + } + + if err == nil && existing.ID > 0 { + // 若已存在,仅在 hash 为空时补齐(避免覆盖你手工调优过的 profile) + if strings.TrimSpace(existing.ImmutableHash) == "" { + if err := db.WithContext(seedCtx()). + Model(&models.PolicyTemplateVersion{}). + Where("id = ?", existing.ID). + Updates(map[string]any{ + "immutable_hash": hash, + "approved_by": "seed", + "approved_at": &now, + }).Error; err != nil { + return err + } + } + continue + } + + row := &models.PolicyTemplateVersion{ + TemplateName: name, + Version: ver, + RAGProfile: datatypes.JSON(ragRaw), + GraphProfile: datatypes.JSON(graphRaw), + MaskingProfile: datatypes.JSON(maskingRaw), + AlertingProfile: datatypes.JSON(alertRaw), + ApprovedBy: "seed", + ApprovedAt: &now, + ImmutableHash: hash, + } + if err := db.WithContext(seedCtx()).Create(row).Error; err != nil { + return err + } + fmt.Printf("[seed] policy templates ready: %s-%s (id=%d)\n", name, ver, row.ID) + } + return nil +} diff --git a/backend/cmd/database/seed/seed_permission.go b/backend/cmd/database/seed/seed_permission.go index e29e4138..110d87be 100644 --- a/backend/cmd/database/seed/seed_permission.go +++ b/backend/cmd/database/seed/seed_permission.go @@ -4,6 +4,9 @@ import ( "context" "encoding/json" "fmt" + + "github.com/ArtisanCloud/PowerX/config" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" "gorm.io/gorm" "gorm.io/gorm/clause" @@ -21,28 +24,41 @@ func mAction(plugin, resource, action string) []byte { return b } +func systemPerm(module, resource, action string) dbm.Permission { + permission := dbm.Permission{ + Module: module, + Resource: resource, + Action: action, + Source: "core", + Introduced: config.GetSystemVersion(), + Meta: mAction(module, resource, action), + } + permission.AllowAPIKey = apikeypermissions.DefaultAllowAPIKey(permission) + return permission +} + // SeedSystemPermissions:把核心模块(IAM)的一批常用权限写入 iam_permission(幂等) func SeedSystemPermissions(db *gorm.DB) error { pr := infraiam.NewPermissionRepository(db) perms := []dbm.Permission{ // IAM / Role - {Plugin: "iam", Resource: "role", Action: "read", Meta: mAction("iam", "role", "read")}, - {Plugin: "iam", Resource: "role", Action: "write", Meta: mAction("iam", "role", "write")}, - {Plugin: "iam", Resource: "role", Action: "delete", Meta: mAction("iam", "role", "delete")}, - {Plugin: "iam", Resource: "role", Action: "bind", Meta: mAction("iam", "role", "bind")}, + systemPerm("iam", "role", "read"), + systemPerm("iam", "role", "write"), + systemPerm("iam", "role", "delete"), + systemPerm("iam", "role", "bind"), // IAM / User - {Plugin: "iam", Resource: "user", Action: "read", Meta: mAction("iam", "user", "read")}, - {Plugin: "iam", Resource: "user", Action: "write", Meta: mAction("iam", "user", "write")}, - {Plugin: "iam", Resource: "user", Action: "delete", Meta: mAction("iam", "user", "delete")}, + systemPerm("iam", "user", "read"), + systemPerm("iam", "user", "write"), + systemPerm("iam", "user", "delete"), // IAM / Department - {Plugin: "iam", Resource: "department", Action: "read", Meta: mAction("iam", "department", "read")}, - {Plugin: "iam", Resource: "department", Action: "write", Meta: mAction("iam", "department", "write")}, - {Plugin: "iam", Resource: "department", Action: "delete", Meta: mAction("iam", "department", "delete")}, + systemPerm("iam", "department", "read"), + systemPerm("iam", "department", "write"), + systemPerm("iam", "department", "delete"), // IAM / Permission(只读) - {Plugin: "iam", Resource: "permission", Action: "read", Meta: mAction("iam", "permission", "read")}, + systemPerm("iam", "permission", "read"), // Admin root guard(用于开放市场/发布候选菜单) - {Plugin: "admin", Resource: "root", Action: "view", Meta: mAction("admin", "root", "view")}, + systemPerm("admin", "root", "view"), } // 你仓储里已有 UpsertBatch:幂等插入/更新 diff --git a/backend/cmd/database/seed/swagger_permissions.go b/backend/cmd/database/seed/swagger_permissions.go index e6053fab..9305b219 100644 --- a/backend/cmd/database/seed/swagger_permissions.go +++ b/backend/cmd/database/seed/swagger_permissions.go @@ -7,6 +7,8 @@ import ( "os" "strings" + "github.com/ArtisanCloud/PowerX/config" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" "gorm.io/gorm" dbm "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/iam" @@ -46,7 +48,7 @@ func SeedSwaggerPermissions(db *gorm.DB, swaggerPath string) error { return nil } - // ★ 关键:按 (plugin, resource, action) 去重,避免一次 upsert 命中同一行两次 → 21000 + // ★ 关键:按 (module, resource, action) 去重,避免一次 upsert 命中同一行两次 → 21000 rows = dedupPerms(rows) pr := repo.NewPermissionRepository(db) @@ -86,16 +88,16 @@ func permFromPathAndMethod(path, method string) dbm.Permission { i++ } - plugin := "core" + moduleName := "core" if i < len(segs) && segs[i] != "" && !strings.HasPrefix(segs[i], "{") { - plugin = segs[i] // iam / system / marketplace / tenant / ... + moduleName = segs[i] // iam / system / marketplace / tenant / ... } // module 规则: // - 后台管理且 plugin=system => system(平台级) // - 否则用 plugin 归类(iam/tenant/marketplace/...) - module := plugin - if isAdmin && plugin == "system" { + module := moduleName + if isAdmin && moduleName == "system" { module = "system" } @@ -130,18 +132,26 @@ func permFromPathAndMethod(path, method string) dbm.Permission { "http_method": method, "api_endpoint": path, } - mb, _ := json.Marshal(meta) - - return dbm.Permission{ - Plugin: plugin, // 关键:来自路径 + permission := dbm.Permission{ + Module: moduleName, // 关键:来自路径 Resource: res, Action: act, Effect: "allow", Status: dbm.PermissionStatusActive, - Source: plugin, // 也用 plugin - Introduced: "v1.0.0", - Meta: mb, + Source: moduleName, // 也用 module + Introduced: config.GetSystemVersion(), + } + baseMetaBytes, _ := json.Marshal(meta) + permission.Meta = baseMetaBytes + permission.AllowAPIKey = apikeypermissions.DefaultAllowAPIKey(permission) + if permission.AllowAPIKey { + if apiMeta := apikeypermissions.BuildAPIKeyMeta(permission); len(apiMeta) > 0 { + meta["api_key"] = apiMeta + } } + mb, _ := json.Marshal(meta) + permission.Meta = mb + return permission } func trimParam(s string) string { @@ -151,15 +161,15 @@ func trimParam(s string) string { return s } -// —— 去重:按唯一键 (plugin, resource, action) +// —— 去重:按唯一键 (module, resource, action) func dedupPerms(in []dbm.Permission) []dbm.Permission { type key struct { - Plugin, Resource, Action string + Module, Resource, Action string } m := make(map[key]dbm.Permission, len(in)) for _, p := range in { k := key{ - Plugin: strings.TrimSpace(p.Plugin), + Module: strings.TrimSpace(p.Module), Resource: strings.TrimSpace(p.Resource), Action: strings.TrimSpace(p.Action), } diff --git a/backend/cmd/knowledge/release.go b/backend/cmd/knowledge/release.go index a4439dc4..41b9a6bc 100644 --- a/backend/cmd/knowledge/release.go +++ b/backend/cmd/knowledge/release.go @@ -16,34 +16,38 @@ import ( func main() { endpoint := flag.String("endpoint", "127.0.0.1:9001", "KnowledgeSpace gRPC endpoint") - cmd := flag.String("cmd", "upsert", "Command: upsert|publish|promote|rollback") - matrixPath := flag.String("matrix", "configs/knowledge/tenant_release_matrix.yaml", "Matrix file for upsert") + cmd := flag.String("cmd", "upsert", "Command: validate|upsert|publish|promote|rollback|export-report") + matrixPath := flag.String("matrix", "backend/config/knowledge/tenant_release_matrix.yaml", "Matrix file for validate/upsert") policyID := flag.Uint64("policy", 0, "Policy ID") versionID := flag.String("version", "", "Knowledge version ID") batchToken := flag.String("batch", "", "Batch token") alerts := flag.String("alerts", "", "Comma separated alert codes") reason := flag.String("reason", "", "Rollback reason") requestedBy := flag.String("by", "cli@powerx.io", "Requested by") + releaseReport := flag.String("release-report", "backend/reports/_state/knowledge-release.json", "Release report path") + aggregateReport := flag.String("aggregate-report", "reports/_state/knowledge-update.json", "Aggregate report path") flag.Parse() ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() - conn, err := grpc.DialContext(ctx, *endpoint, grpc.WithInsecure(), grpc.WithBlock()) - if err != nil { - log.Fatalf("dial gRPC failed: %v", err) - } - defer conn.Close() - - client := knowledgev1.NewKnowledgeSpaceAdminServiceClient(conn) - switch strings.ToLower(*cmd) { + case "validate": + assertFlagEnabled("PX_TENANT_RELEASE_MATRIX") + _ = buildUpsertRequest(*matrixPath) + fmt.Println("✅ matrix validate ok") case "upsert": + assertFlagEnabled("PX_TENANT_RELEASE_MATRIX") + client, closeFn := dialClient(ctx, *endpoint) + defer closeFn() req := buildUpsertRequest(*matrixPath) resp, err := client.UpsertReleasePolicy(ctx, req) exitOnErr(err) fmt.Printf("Policy saved: id=%s status=%s\n", resp.GetPolicyId(), resp.GetStatus()) case "publish": + assertFlagEnabled("PX_KNOWLEDGE_GRAY_RELEASE") + client, closeFn := dialClient(ctx, *endpoint) + defer closeFn() ensure(*policyID > 0, "missing --policy") ensure(strings.TrimSpace(*versionID) != "", "missing --version") resp, err := client.PublishRelease(ctx, &knowledgev1.PublishReleaseRequest{ @@ -54,6 +58,9 @@ func main() { exitOnErr(err) fmt.Printf("Release published: releaseId=%s batchToken=%s tenants=%v\n", resp.GetReleaseId(), resp.GetBatchToken(), resp.GetTenants()) case "promote": + assertFlagEnabled("PX_KNOWLEDGE_GRAY_RELEASE") + client, closeFn := dialClient(ctx, *endpoint) + defer closeFn() ensure(*policyID > 0, "missing --policy") ensure(strings.TrimSpace(*versionID) != "", "missing --version") ensure(strings.TrimSpace(*batchToken) != "", "missing --batch") @@ -67,6 +74,10 @@ func main() { exitOnErr(err) fmt.Printf("Batch promoted: state=%s nextToken=%s tenants=%v coverage=%.2f\n", resp.GetState(), resp.GetNextBatchToken(), resp.GetTenants(), resp.GetTenantCoverage()) case "rollback": + assertFlagEnabled("PX_KNOWLEDGE_GRAY_RELEASE") + assertFlagEnabled("PX_KNOWLEDGE_RELEASE_GUARD") + client, closeFn := dialClient(ctx, *endpoint) + defer closeFn() ensure(*policyID > 0, "missing --policy") ensure(strings.TrimSpace(*versionID) != "", "missing --version") resp, err := client.RollbackRelease(ctx, &knowledgev1.RollbackReleaseRequest{ @@ -77,11 +88,21 @@ func main() { }) exitOnErr(err) fmt.Printf("Rollback completed: status=%s\n", resp.GetStatus()) + case "export-report": + printReport(*releaseReport, *aggregateReport) default: log.Fatalf("unknown cmd %s", *cmd) } } +func dialClient(ctx context.Context, endpoint string) (knowledgev1.KnowledgeSpaceAdminServiceClient, func()) { + conn, err := grpc.DialContext(ctx, endpoint, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + log.Fatalf("dial gRPC failed: %v", err) + } + return knowledgev1.NewKnowledgeSpaceAdminServiceClient(conn), func() { _ = conn.Close() } +} + func buildUpsertRequest(matrixPath string) *knowledgev1.UpsertReleasePolicyRequest { data, err := os.ReadFile(matrixPath) if err != nil { @@ -114,6 +135,42 @@ func buildUpsertRequest(matrixPath string) *knowledgev1.UpsertReleasePolicyReque return req } +func assertFlagEnabled(flag string) { + flag = strings.TrimSpace(flag) + if flag == "" { + return + } + value := strings.TrimSpace(os.Getenv(flag)) + if value == "" { + return + } + value = strings.ToLower(value) + if value == "0" || value == "false" || value == "disabled" || value == "off" || value == "no" { + log.Fatalf("feature flag disabled: %s", flag) + } +} + +func printReport(releasePath, aggregatePath string) { + releasePath = strings.TrimSpace(releasePath) + aggregatePath = strings.TrimSpace(aggregatePath) + if releasePath != "" { + if data, err := os.ReadFile(releasePath); err == nil { + fmt.Println("--- knowledge-release.json ---") + fmt.Println(string(data)) + } else { + fmt.Printf("[warn] cannot read %s: %v\n", releasePath, err) + } + } + if aggregatePath != "" { + if data, err := os.ReadFile(aggregatePath); err == nil { + fmt.Println("--- knowledge-update.json ---") + fmt.Println(string(data)) + } else { + fmt.Printf("[warn] cannot read %s: %v\n", aggregatePath, err) + } + } +} + func splitAlerts(raw string) []string { raw = strings.TrimSpace(raw) if raw == "" { diff --git a/backend/cmd/perm_gen/main.go b/backend/cmd/perm_gen/main.go index 509a079a..324ec03f 100644 --- a/backend/cmd/perm_gen/main.go +++ b/backend/cmd/perm_gen/main.go @@ -92,7 +92,7 @@ func generatePermissionsFromOpenAPI(doc *openapi3.T, source, introduced string) for path, pi := range doc.Paths { for method, op := range operationsOf(pi) { perm := dbm.Permission{ - Plugin: "core", // 或按你的模块名设置 + Module: "core", // 或按你的模块名设置 Resource: guessResource(path), Action: guessAction(method, path, op), Effect: "allow", diff --git a/backend/cmd/px/commands/plugin/dev_watch.go b/backend/cmd/px/commands/plugin/dev_watch.go index a7b4853f..7eebf0b5 100644 --- a/backend/cmd/px/commands/plugin/dev_watch.go +++ b/backend/cmd/px/commands/plugin/dev_watch.go @@ -519,9 +519,6 @@ func doHostAPIRequest(ctx context.Context, method, path string, payload any, des if payload != nil { req.Header.Set("Content-Type", "application/json") } - if tenant := strings.TrimSpace(devWatchOpts.tenantUUID); tenant != "" { - req.Header.Set("X-Tenant-UUID", tenant) - } token := strings.TrimSpace(devWatchOpts.token) if token != "" { if !strings.HasPrefix(strings.ToLower(token), "bearer ") { diff --git a/backend/cmd/px/commands/version/version_test.go b/backend/cmd/px/commands/version/version_test.go index c2a58117..3aab982c 100644 --- a/backend/cmd/px/commands/version/version_test.go +++ b/backend/cmd/px/commands/version/version_test.go @@ -8,11 +8,13 @@ import ( "net/http/httptest" "testing" + "github.com/ArtisanCloud/PowerX/pkg/utils/testutil" "github.com/spf13/cobra" "github.com/stretchr/testify/require" ) func TestRunVersionScan(t *testing.T) { + testutil.SkipIfNoLocalListener(t) orig := scanOpts t.Cleanup(func() { scanOpts = orig }) @@ -48,6 +50,7 @@ func TestRunVersionScan(t *testing.T) { } func TestRunVersionBoard(t *testing.T) { + testutil.SkipIfNoLocalListener(t) orig := boardOpts t.Cleanup(func() { boardOpts = orig }) @@ -88,6 +91,7 @@ func TestRunVersionBoard(t *testing.T) { } func TestCompatCommands(t *testing.T) { + testutil.SkipIfNoLocalListener(t) origCheck := compatCheckOpts origException := compatExceptionOpts origApprove := compatApproveOpts diff --git a/backend/config/agents/providers.d/anthropic.yaml b/backend/config/agents/providers.d/anthropic.yaml index 7e10609a..f4636b21 100644 --- a/backend/config/agents/providers.d/anthropic.yaml +++ b/backend/config/agents/providers.d/anthropic.yaml @@ -6,7 +6,8 @@ drivers: llm: anthropic embedding: anthropic image: anthropic - audio: anthropic + audio_tts: anthropic + audio_asr: anthropic video: anthropic rerank: anthropic @@ -49,9 +50,11 @@ modalities: models: [] # 有 Embeddings 能力,但当前以 Messages API 为主;如需可后续补充 image: models: [] - audio: + audio_tts: + models: [] + audio_asr: models: [] video: models: [] rerank: - models: [] \ No newline at end of file + models: [] diff --git a/backend/config/agents/providers.d/baidu_wenxin.yaml b/backend/config/agents/providers.d/baidu_wenxin.yaml index 74e64353..644d9e22 100644 --- a/backend/config/agents/providers.d/baidu_wenxin.yaml +++ b/backend/config/agents/providers.d/baidu_wenxin.yaml @@ -6,7 +6,8 @@ drivers: llm: baidu embedding: baidu image: baidu - audio: baidu + audio_tts: baidu + audio_asr: baidu video: baidu rerank: baidu @@ -79,7 +80,9 @@ modalities: # 官方未公开统一维度,这里不写 dims,避免类型不符 image: models: [] - audio: + audio_tts: + models: [] + audio_asr: models: [] video: models: [] diff --git a/backend/config/agents/providers.d/comfyui.yaml b/backend/config/agents/providers.d/comfyui.yaml index 818c81ab..da7dc25e 100644 --- a/backend/config/agents/providers.d/comfyui.yaml +++ b/backend/config/agents/providers.d/comfyui.yaml @@ -27,3 +27,7 @@ modalities: video: note: "ComfyUI Video(UI 目录占位)。" models: [] + audio_tts: + models: [] + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/coze.yaml b/backend/config/agents/providers.d/coze.yaml index ec1549fc..42cc649a 100644 --- a/backend/config/agents/providers.d/coze.yaml +++ b/backend/config/agents/providers.d/coze.yaml @@ -6,7 +6,8 @@ drivers: llm: coze embedding: coze image: coze - audio: coze + audio_tts: coze + audio_asr: coze video: coze rerank: coze @@ -35,9 +36,11 @@ modalities: models: [] # 如你的 driver 支持 Coze 向量服务,可在此处补充 image: models: [] - audio: + audio_tts: + models: [] + audio_asr: models: [] video: models: [] rerank: - models: [] \ No newline at end of file + models: [] diff --git a/backend/config/agents/providers.d/deepseek.yaml b/backend/config/agents/providers.d/deepseek.yaml index ec19db3b..e5480a2c 100644 --- a/backend/config/agents/providers.d/deepseek.yaml +++ b/backend/config/agents/providers.d/deepseek.yaml @@ -6,7 +6,8 @@ drivers: llm: deepseek embedding: deepseek image: deepseek - audio: deepseek + audio_tts: deepseek + audio_asr: deepseek video: deepseek rerank: deepseek @@ -31,9 +32,11 @@ modalities: models: [] # 暂未公开官方 Embedding API image: models: [] - audio: + audio_tts: + models: [] + audio_asr: models: [] video: models: [] rerank: - models: [] \ No newline at end of file + models: [] diff --git a/backend/config/agents/providers.d/google.yaml b/backend/config/agents/providers.d/google.yaml new file mode 100644 index 00000000..66fb5da4 --- /dev/null +++ b/backend/config/agents/providers.d/google.yaml @@ -0,0 +1,26 @@ +id: google +name: Google Gemini +aliases: ["google", "gemini", "ai.google.dev"] + +drivers: + image: google + +auth: + scheme: api_key + fields: ["api_key", "base_url"] + defaults: + base_url: "https://generativelanguage.googleapis.com/v1beta" + +modalities: + image: + models: + - id: gemini-2.5-flash-image + label: "Gemini 2.5 Flash Image (Nano Banana)" + tags: ["image", "fast"] + - id: gemini-3-pro-image-preview + label: "Gemini 3 Pro Image Preview (Nano Banana Pro)" + tags: ["image", "high-quality"] + audio_tts: + models: [] + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/hash.yaml b/backend/config/agents/providers.d/hash.yaml new file mode 100644 index 00000000..a89b0287 --- /dev/null +++ b/backend/config/agents/providers.d/hash.yaml @@ -0,0 +1,23 @@ +id: hash +name: Hash (Local) +aliases: ["hash32", "local_hash"] + +drivers: + embedding: hash + +auth: + scheme: none + fields: [] + +modalities: + embedding: + models: + - id: hash + label: "Hash Embedding (Local, non-semantic)" + tags: ["local","dev","non-semantic"] + defaults: + dimensions: 1536 + audio_tts: + models: [] + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/huggingface.yaml b/backend/config/agents/providers.d/huggingface.yaml index 759c4dca..1346c856 100644 --- a/backend/config/agents/providers.d/huggingface.yaml +++ b/backend/config/agents/providers.d/huggingface.yaml @@ -6,7 +6,8 @@ drivers: llm: huggingface embedding: huggingface image: huggingface - audio: huggingface + audio_tts: huggingface + audio_asr: huggingface video: huggingface rerank: huggingface @@ -14,50 +15,87 @@ auth: scheme: bearer fields: ["api_key", "base_url"] defaults: - base_url: "https://api-inference.huggingface.co" + base_url: "https://router.huggingface.co/v1" modalities: llm: models: - - id: meta-llama/Meta-Llama-3.1-8B-Instruct - label: "Llama 3.1 8B Instruct" - tags: ["open","general","tools"] + - id: Qwen/Qwen2.5-72B-Instruct + label: "Qwen 2.5 72B Instruct" + tags: ["open","zh+en","top-tier","long-context"] defaults: { temperature: 0.7, max_tokens: 1024 } - - id: mistralai/Mistral-7B-Instruct-v0.3 - label: "Mistral 7B Instruct v0.3" - tags: ["open","fast","cheap"] + - id: Qwen/Qwen2.5-32B-Instruct + label: "Qwen 2.5 32B Instruct" + tags: ["open","zh+en","balanced","quality"] defaults: { temperature: 0.7, max_tokens: 1024 } - id: Qwen/Qwen2.5-7B-Instruct - label: "Qwen2.5 7B Instruct" - tags: ["open","balanced","zh+en"] + label: "Qwen 2.5 7B Instruct" + tags: ["open","zh+en","fast","cheap"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: Qwen/Qwen2.5-Coder-32B-Instruct + label: "Qwen 2.5 Coder 32B Instruct" + tags: ["open","code","quality"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: meta-llama/Llama-3.3-70B-Instruct + label: "Llama 3.3 70B Instruct" + tags: ["open","en","quality","ecosystem"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: meta-llama/Llama-3.1-8B-Instruct + label: "Llama 3.1 8B Instruct" + tags: ["open","en","balanced"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: deepseek-ai/DeepSeek-V3 + label: "DeepSeek V3" + tags: ["open","moe","reasoning","code"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: google/gemma-2-9b-it + label: "Gemma 2 9B Instruct" + tags: ["open","en","compact"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: mistralai/Mistral-Nemo-Instruct-2407 + label: "Mistral Nemo Instruct 2407" + tags: ["open","balanced","fast"] + defaults: { temperature: 0.7, max_tokens: 1024 } + - id: microsoft/Phi-4 + label: "Phi-4" + tags: ["open","compact","reasoning"] defaults: { temperature: 0.7, max_tokens: 1024 } embedding: models: - - id: sentence-transformers/all-MiniLM-L6-v2 - label: "all-MiniLM-L6-v2" - tags: ["general","cheap"] - limits: { dims: 384 } - - id: sentence-transformers/all-mpnet-base-v2 - label: "all-mpnet-base-v2" - tags: ["general","higher-accuracy"] - limits: { dims: 768 } + - id: BAAI/bge-m3 + label: "BGE M3 (multi-lingual)" + tags: ["zh+en","mteb","general","long-context"] + - id: BAAI/bge-large-zh-v1.5 + label: "BGE Large ZH v1.5" + tags: ["zh","mteb","general"] + - id: Alibaba-NLP/gte-Qwen2-7B-instruct + label: "GTE Qwen2 7B Instruct" + tags: ["zh+en","mteb","large","quality"] + - id: Alibaba-NLP/gte-large-en-v1.5 + label: "GTE Large EN v1.5" + tags: ["en","mteb","general"] + - id: intfloat/multilingual-e5-large + label: "E5 Multilingual Large" + tags: ["zh+en","mteb","query-prefix"] image: models: - id: stabilityai/stable-diffusion-2 label: "Stable Diffusion 2 (text-to-image)" tags: ["img-gen","open"] - audio: + audio_tts: models: - - id: openai/whisper-large-v3 - label: "Whisper Large v3 (ASR)" - tags: ["asr","open"] - id: coqui/XTTS-v2 label: "Coqui XTTS v2 (TTS)" tags: ["tts","multilingual"] + audio_asr: + models: + - id: openai/whisper-large-v3 + label: "Whisper Large v3 (ASR)" + tags: ["asr","open"] video: models: [] rerank: models: - id: cross-encoder/ms-marco-MiniLM-L-6-v2 label: "MS MARCO MiniLM-L-6 (Cross-Encoder)" - tags: ["rerank","open"] \ No newline at end of file + tags: ["rerank","open"] diff --git a/backend/config/agents/providers.d/hunyuan.yaml b/backend/config/agents/providers.d/hunyuan.yaml index aafca192..183dd406 100644 --- a/backend/config/agents/providers.d/hunyuan.yaml +++ b/backend/config/agents/providers.d/hunyuan.yaml @@ -126,3 +126,9 @@ modalities: - id: hunyuan-video label: "混元 视频生成 (placeholder)" tags: ["video","tencent","placeholder"] + + audio_tts: + models: [] + + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/moonshot.yaml b/backend/config/agents/providers.d/moonshot.yaml index b0db6da4..cabac94f 100644 --- a/backend/config/agents/providers.d/moonshot.yaml +++ b/backend/config/agents/providers.d/moonshot.yaml @@ -6,7 +6,8 @@ drivers: llm: moonshot embedding: moonshot image: moonshot - audio: moonshot + audio_tts: moonshot + audio_asr: moonshot video: moonshot rerank: moonshot @@ -39,9 +40,11 @@ modalities: models: [] # 官方暂未公开独立 Embedding API image: models: [] - audio: + audio_tts: + models: [] + audio_asr: models: [] video: models: [] rerank: - models: [] \ No newline at end of file + models: [] diff --git a/backend/config/agents/providers.d/ollama.yaml b/backend/config/agents/providers.d/ollama.yaml index 5ec727d4..2b0e00fe 100644 --- a/backend/config/agents/providers.d/ollama.yaml +++ b/backend/config/agents/providers.d/ollama.yaml @@ -65,16 +65,16 @@ modalities: pooling: "mean" truncation: "end" models: + - id: bge-m3 + label: "BGE M3" - id: mxbai-embed-large label: "MXBAI Embed Large" - id: nomic-embed-text label: "Nomic Embed Text" - id: all-minilm:22m label: "All-MiniLM-L6-v2 (22M)" - - id: bge-base-en - label: "BGE Base EN" - - id: bge-large-zh - label: "BGE Large ZH" + - id: bge-large + label: "BGE Large" # ─────────────────────────── # Image(图像模态)→ 这里定位为 **图像理解 VLM:image+text → text** diff --git a/backend/config/agents/providers.d/openai.yaml b/backend/config/agents/providers.d/openai.yaml index 2ca07bc8..335dd338 100644 --- a/backend/config/agents/providers.d/openai.yaml +++ b/backend/config/agents/providers.d/openai.yaml @@ -6,7 +6,8 @@ drivers: llm: openai embedding: openai image: openai - audio: openai + audio_tts: openai + audio_asr: openai video: openai rerank: openai @@ -84,12 +85,26 @@ modalities: - id: gpt-image-1 label: "GPT-Image-1" tags: ["img-gen","inpainting","variation"] + - id: gpt-image-1-mini + label: "GPT-Image-1 mini" + tags: ["img-gen","fast","cheap"] + - id: gpt-image-1.5 + label: "GPT-Image-1.5" + tags: ["img-gen","high-quality"] + - id: dall-e-3 + label: "DALL·E 3" + tags: ["img-gen","legacy"] + - id: dall-e-2 + label: "DALL·E 2" + tags: ["img-gen","legacy"] - audio: + audio_tts: models: - id: gpt-4o-mini-tts label: "GPT-4o mini TTS" tags: ["audio","tts","expressive"] + audio_asr: + models: - id: gpt-4o-transcribe label: "GPT-4o Transcribe" tags: ["audio","asr","multilingual"] @@ -101,7 +116,6 @@ modalities: tags: ["audio","asr","legacy"] video: - # 预置 UI 可选项(部分环境通过网关/代理支持) models: - id: sora label: "Sora" @@ -111,4 +125,4 @@ modalities: tags: ["video","gen","placeholder"] rerank: - models: [] # OpenAI 暂无独立 rerank 模型;通常用 LLM/Embedding 组合处理 + models: [] diff --git a/backend/config/agents/providers.d/openai_compatible.yaml b/backend/config/agents/providers.d/openai_compatible.yaml new file mode 100644 index 00000000..915098ed --- /dev/null +++ b/backend/config/agents/providers.d/openai_compatible.yaml @@ -0,0 +1,29 @@ +id: openai_compatible +name: OpenAI Compatible +aliases: ["openai-compatible", "openai compat", "vllm", "llama.cpp", "lmstudio"] + +drivers: + llm: openai_compatible + embedding: openai_compatible + +auth: + scheme: none + fields: ["base_url"] + defaults: + base_url: "http://127.0.0.1:11434/v1" + +modalities: + llm: + models: + - id: "*" + label: "Any (OpenAI-compatible)" + tags: ["local","gateway"] + embedding: + models: + - id: "*" + label: "Any (OpenAI-compatible)" + tags: ["local","gateway"] + audio_tts: + models: [] + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/qwen-cn.yaml b/backend/config/agents/providers.d/qwen-cn.yaml new file mode 100644 index 00000000..2e1c44eb --- /dev/null +++ b/backend/config/agents/providers.d/qwen-cn.yaml @@ -0,0 +1,51 @@ +id: qwen-cn +name: Qwen(官方国内) +aliases: ["qwen-mainland", "tongyi-cn", "dashscope-cn", "alibaba-cn"] + +drivers: + image: qwen + audio_tts: qwen + +auth: + scheme: bearer + fields: ["api_key", "base_url"] + defaults: + base_url: "https://dashscope.aliyuncs.com/compatible-mode/v1" + +modalities: + image: + models: + - id: qwen-vl-max-latest + label: "Qwen VL Max Latest" + - id: qwen-vl-plus-latest + label: "Qwen VL Plus Latest" + - id: qwen-vl-max + label: "Qwen VL Max" + - id: qwen-vl-plus + label: "Qwen VL Plus" + - id: qwen3-vl-plus + label: "Qwen3 VL Plus" + - id: qwen3-vl-flash + label: "Qwen3 VL Flash" + - id: qwen2.5-vl-72b-instruct + label: "Qwen 2.5 VL 72B Instruct" + - id: "*" + label: "Qwen VLM (Any)" + audio_tts: + models: + - id: qwen-tts-latest + label: "Qwen TTS Latest" + - id: qwen-tts-1 + label: "Qwen TTS 1" + - id: qwen3-tts-flash + label: "Qwen3 TTS Flash" + - id: qwen3-tts-instruct-flash + label: "Qwen3 TTS Instruct Flash" + - id: qwen3-tts-vd + label: "Qwen3 TTS VD" + - id: qwen3-tts-vc + label: "Qwen3 TTS VC" + - id: "*" + label: "Qwen TTS (Any)" + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/qwen.yaml b/backend/config/agents/providers.d/qwen.yaml new file mode 100644 index 00000000..d0b551af --- /dev/null +++ b/backend/config/agents/providers.d/qwen.yaml @@ -0,0 +1,51 @@ +id: qwen-intl +name: Qwen(官方国际) +aliases: ["qwen", "tongyi", "dashscope", "alibaba"] + +drivers: + image: qwen + audio_tts: qwen + +auth: + scheme: bearer + fields: ["api_key", "base_url"] + defaults: + base_url: "https://dashscope-intl.aliyuncs.com/compatible-mode/v1" + +modalities: + image: + models: + - id: qwen-vl-max-latest + label: "Qwen VL Max Latest" + - id: qwen-vl-plus-latest + label: "Qwen VL Plus Latest" + - id: qwen-vl-max + label: "Qwen VL Max" + - id: qwen-vl-plus + label: "Qwen VL Plus" + - id: qwen3-vl-plus + label: "Qwen3 VL Plus" + - id: qwen3-vl-flash + label: "Qwen3 VL Flash" + - id: qwen2.5-vl-72b-instruct + label: "Qwen 2.5 VL 72B Instruct" + - id: "*" + label: "Qwen VLM (Any)" + audio_tts: + models: + - id: qwen-tts-latest + label: "Qwen TTS Latest" + - id: qwen-tts-1 + label: "Qwen TTS 1" + - id: qwen3-tts-flash + label: "Qwen3 TTS Flash" + - id: qwen3-tts-instruct-flash + label: "Qwen3 TTS Instruct Flash" + - id: qwen3-tts-vd + label: "Qwen3 TTS VD" + - id: qwen3-tts-vc + label: "Qwen3 TTS VC" + - id: "*" + label: "Qwen TTS (Any)" + audio_asr: + models: [] diff --git a/backend/config/agents/providers.d/sentence-transformers.yaml b/backend/config/agents/providers.d/sentence-transformers.yaml index 7c93c10a..9656fc70 100644 --- a/backend/config/agents/providers.d/sentence-transformers.yaml +++ b/backend/config/agents/providers.d/sentence-transformers.yaml @@ -6,7 +6,8 @@ drivers: llm: sentence_transformers embedding: sentence_transformers image: sentence_transformers - audio: sentence_transformers + audio_tts: sentence_transformers + audio_asr: sentence_transformers video: sentence_transformers rerank: sentence_transformers @@ -39,7 +40,9 @@ modalities: limits: { dims: 384 } image: models: [] - audio: + audio_tts: + models: [] + audio_asr: models: [] video: models: [] @@ -50,4 +53,4 @@ modalities: tags: ["rerank","fast"] - id: cross-encoder/ms-marco-MiniLM-L-12-v2 label: "MS MARCO MiniLM-L-12 (Cross-Encoder)" - tags: ["rerank","accurate"] \ No newline at end of file + tags: ["rerank","accurate"] diff --git a/backend/config/agents/providers.d/volcengine.yaml b/backend/config/agents/providers.d/volcengine.yaml new file mode 100644 index 00000000..0e17e839 --- /dev/null +++ b/backend/config/agents/providers.d/volcengine.yaml @@ -0,0 +1,235 @@ +id: volcengine +name: Volcengine (ByteDance) +aliases: ["volcengine", "volc", "volcano", "bytedance"] + +drivers: + llm: volcengine + image: volcengine + video: volcengine + +auth: + scheme: mixed + fields: ["base_url", "region"] + defaults: + base_url: "https://visual.volcengineapi.com" + modes: + - id: "aksk" + label: "AccessKey / SecretKey" + scheme: "aksk" + fields: ["secret_id", "secret_key", "base_url", "region"] + defaults: + base_url: "https://visual.volcengineapi.com" + - id: "api_key" + label: "API Key (Seedance)" + scheme: "bearer" + fields: ["api_key", "base_url"] + defaults: + base_url: "https://operator.las.cn-beijing.volces.com" + +modalities: + audio_tts: + models: [] + audio_asr: + models: [] + +apps: + jimeng: + name: 即梦 AI + modalities: + image: + models: + - id: "doubao-seedream-3-0-t2i-250415" + label: "Seedream 3.0 文生图" + tags: ["image", "volcengine", "seedream"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "doubao-seedream-2-0-t2i-250428" + label: "Seedream 2.0 文生图" + tags: ["image", "volcengine", "seedream"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "doubao-seedream-3-0-i2i-250628" + label: "Seedream 3.0 图生图" + tags: ["image", "volcengine", "seedream"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "jimeng_t2i_v40" + label: "即梦-图片生成4.0" + tags: ["image", "volcengine"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "jimeng_t2i_v31" + label: "即梦-文生图3.1" + tags: ["image", "volcengine"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "jimeng_t2i_v30" + label: "即梦-文生图3.0" + tags: ["image", "volcengine"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "jimeng_i2i_v30" + label: "即梦-图生图3.0-智能参考" + tags: ["image", "volcengine"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "jimeng_t2i_v21" + label: "即梦-文生图2.1" + tags: ["image", "volcengine"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + size: "1024x1024" + force_single: true + - id: "jimeng-image-4.0" + label: "即梦AI-图片生成4.0(目录)" + tags: ["image", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-material-extract-pod" + label: "即梦AI-素材提取(POD按需定制)(目录)" + tags: ["image", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-material-extract-commerce" + label: "即梦AI-素材提取(商品提取)(目录)" + tags: ["image", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-inpainting" + label: "即梦AI-交互编辑inpainting(目录)" + tags: ["image", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-super-resolution" + label: "即梦AI-智能超清(目录)" + tags: ["image", "volcengine", "jimeng", "catalog_only"] + video: + models: + - id: "jimeng-video-3.0-pro" + label: "即梦AI-视频生成3.0 Pro" + tags: ["video", "volcengine", "jimeng"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + - id: "jimeng-video-3.0-720p" + label: "即梦AI-视频生成3.0 720P" + tags: ["video", "volcengine", "jimeng"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + resolution: "720p" + - id: "jimeng-video-3.0-1080p" + label: "即梦AI-视频生成3.0 1080P" + tags: ["video", "volcengine", "jimeng"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + resolution: "1080p" + - id: "jimeng-video-s2.0-pro" + label: "即梦AI-视频生成S2.0 Pro(下线中)" + tags: ["video", "volcengine", "jimeng", "deprecated"] + defaults: + action: "CVSync2AsyncSubmitTask" + action_poll: "CVSync2AsyncGetResult" + version: "2022-08-31" + service: "cv" + - id: "jimeng-action-mimic" + label: "动作模仿(目录)" + tags: ["video", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-action-mimic-2.0" + label: "动作模仿2.0(目录)" + tags: ["video", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-omnihuman-1.0" + label: "数字人快速模式 OmniHuman1.0(目录)" + tags: ["video", "volcengine", "jimeng", "catalog_only"] + - id: "jimeng-omnihuman-1.5" + label: "OmniHuman1.5(目录)" + tags: ["video", "volcengine", "jimeng", "catalog_only"] + - id: "*" + label: "即梦视频 (Any)" + tags: ["video", "volcengine", "jimeng", "wildcard"] + seedance: + name: Seedance + modalities: + video: + models: + - id: "doubao-seedance-1-5-pro-251215" + label: "Seedance 1.5 Pro" + tags: ["video", "volcengine", "seedance"] + defaults: + base_url: "https://operator.las.cn-beijing.volces.com" + api_path_submit: "/api/v1/contents/generations/tasks" + api_path_poll: "/api/v1/contents/generations/tasks/%s" + - id: "doubao-seedance-1-0-pro-250528" + label: "Seedance 1.0 Pro" + tags: ["video", "volcengine", "seedance"] + defaults: + base_url: "https://operator.las.cn-beijing.volces.com" + api_path_submit: "/api/v1/contents/generations/tasks" + api_path_poll: "/api/v1/contents/generations/tasks/%s" + - id: "doubao-seedance-1-0-pro-fast-251015" + label: "Seedance 1.0 Pro Fast" + tags: ["video", "volcengine", "seedance"] + defaults: + base_url: "https://operator.las.cn-beijing.volces.com" + api_path_submit: "/api/v1/contents/generations/tasks" + api_path_poll: "/api/v1/contents/generations/tasks/%s" + - id: "doubao-seedance-1-0-lite-t2v-250428" + label: "Seedance 1.0 Lite T2V" + tags: ["video", "volcengine", "seedance"] + defaults: + base_url: "https://operator.las.cn-beijing.volces.com" + api_path_submit: "/api/v1/contents/generations/tasks" + api_path_poll: "/api/v1/contents/generations/tasks/%s" + - id: "*" + label: "Seedance Video (Any)" + tags: ["video", "volcengine", "seedance", "wildcard"] + coze: + name: Coze + modalities: + llm: + models: + - id: "coze-pro" + label: "Coze Pro (LLM)" + tags: ["llm", "volcengine"] + image: + models: + - id: "coze-image" + label: "Coze Image (Preview)" + tags: ["image", "volcengine"] diff --git a/backend/config/config.go b/backend/config/config.go index 15b44e33..34903873 100644 --- a/backend/config/config.go +++ b/backend/config/config.go @@ -11,6 +11,7 @@ import ( "gopkg.in/yaml.v3" "log" "os" + "path/filepath" "strconv" "strings" ) @@ -43,12 +44,37 @@ func GetGlobalConfig() *Config { if GlobalConfig == nil { // 初始化全局配置 if err := InitGlobalConfig("etc/config.yaml"); err != nil { + if alt := findConfigPath("etc/config.yaml"); alt != "" { + if retryErr := InitGlobalConfig(alt); retryErr == nil { + return GlobalConfig + } + } log.Fatalf("初始化全局配置失败: %v", err) } } return GlobalConfig } +func findConfigPath(relPath string) string { + wd, err := os.Getwd() + if err != nil { + return "" + } + dir := filepath.Clean(wd) + for { + candidate := filepath.Join(dir, relPath) + if _, err := os.Stat(candidate); err == nil { + return candidate + } + next := filepath.Dir(dir) + if next == dir || next == "." || next == string(filepath.Separator) { + break + } + dir = next + } + return "" +} + type HTTPSecurityConfig struct { // 允许作为父页面的来源(CSP frame-ancestors 白名单) // 取值示例: "https://admin.powerx.io", "http://localhost:3030", "https://*.powerx.io", "'self'" @@ -62,6 +88,7 @@ type TenantConfig struct { // CoreX 全局配置 type Config struct { + Version string `yaml:"version"` // 系统版本(用于权限 introduced 等) Server ServerConfig `yaml:"server"` // HTTP/gRPC 监听与行为 Auth AuthConfig `yaml:"auth"` // JWT / 认证相关 Event EventConfig `yaml:"event"` // 事件配置(系统总线 + Event Fabric) @@ -73,9 +100,9 @@ type Config struct { KnowledgeSpace KnowledgeSpaceConfig `yaml:"knowledge_space"` // 知识空间治理 LowCode LowCodeConfig `yaml:"low_code"` // flow 执行相关 FeatureGate FeatureGateConfig `yaml:"feature_gate"` // 细粒度开关、license - Database dbCfg.DatabaseConfig `yaml:"database"` // 数据库配置 - Cache cacheCfg.CacheConfig `yaml:"cache"` // 缓存配置 - LogConfig logCfg.LogConfig `yaml:"log"` // 输出配置 + Database dbCfg.DatabaseConfig `yaml:"database"` // 数据库配置 + Cache cacheCfg.CacheConfig `yaml:"cache"` // 缓存配置 + LogConfig logCfg.LogConfig `yaml:"log"` // 输出配置 AI agentCfg.AIConfig `yaml:"ai"` Agent agentCfg.AgentConfig `yaml:"agent"` // 智能体工具注册/限流等 Plugin PluginAggregateConfig `yaml:"plugin"` @@ -84,6 +111,26 @@ type Config struct { Tenants TenantConfig `yaml:"tenants"` } +const DefaultSystemVersion = "v1.0.0" + +func (c *Config) EffectiveSystemVersion() string { + if c == nil { + return DefaultSystemVersion + } + version := strings.TrimSpace(c.Version) + if version == "" { + return DefaultSystemVersion + } + return version +} + +func GetSystemVersion() string { + if GlobalConfig == nil { + return DefaultSystemVersion + } + return GlobalConfig.EffectiveSystemVersion() +} + // EffectiveMCPConfig 返回当前应使用的 MCP 配置。 func (c *Config) EffectiveMCPConfig() *mcpCfg.MCPConfig { if c == nil { @@ -132,8 +179,11 @@ type EventBusConfig struct { // QueueConfig 统一队列配置(允许被多个模块引用) type QueueConfig struct { - Driver string `yaml:"driver"` // redis/local - Redis QueueRedisConfig `yaml:"redis"` + Driver string `yaml:"driver"` // redis/local/kafka/rabbitmq/nats + Redis QueueRedisConfig `yaml:"redis"` + Kafka QueueKafkaConfig `yaml:"kafka"` + Rabbit QueueRabbitMQConfig `yaml:"rabbitmq"` + NATS QueueNATSConfig `yaml:"nats"` } // QueueRedisConfig 描述 Redis 连接信息 @@ -143,6 +193,32 @@ type QueueRedisConfig struct { DB int `yaml:"db"` } +// QueueKafkaConfig 描述 Kafka 任务驱动连接参数。 +type QueueKafkaConfig struct { + Brokers []string `yaml:"brokers"` + TopicPrefix string `yaml:"topic_prefix"` + ConsumerGroup string `yaml:"consumer_group"` + PollTimeoutMs int `yaml:"poll_timeout_ms"` +} + +// QueueRabbitMQConfig 描述 RabbitMQ 驱动连接参数。 +type QueueRabbitMQConfig struct { + URL string `yaml:"url"` + Exchange string `yaml:"exchange"` + QueuePrefix string `yaml:"queue_prefix"` + ConsumerTag string `yaml:"consumer_tag"` + Prefetch int `yaml:"prefetch"` + PollTimeoutMs int `yaml:"poll_timeout_ms"` +} + +// QueueNATSConfig 描述 NATS 驱动连接参数。 +type QueueNATSConfig struct { + URLs []string `yaml:"urls"` + SubjectPrefix string `yaml:"subject_prefix"` + QueueGroup string `yaml:"queue_group"` + PollTimeoutMs int `yaml:"poll_timeout_ms"` +} + // SchedulerConfig 统一的任务调度配置 type SchedulerConfig struct { Driver string `yaml:"driver"` // builtin/cron @@ -296,22 +372,34 @@ type AgentLifecycleNotificationConfig struct { // KnowledgeSpaceConfig 描述知识空间模块运行参数。 type KnowledgeSpaceConfig struct { - RedisAddr string `yaml:"redis_addr"` - RedisPassword string `yaml:"redis_password"` - RedisDB int `yaml:"redis_db"` - LockKeyPrefix string `yaml:"lock_key_prefix"` - MetricsKeyPrefix string `yaml:"metrics_key_prefix"` - DefaultRetentionMonths int `yaml:"default_retention_months"` - ProvisioningSLASeconds int `yaml:"provisioning_sla_seconds"` - IngestionSLASeconds int `yaml:"ingestion_sla_seconds"` - EventTopics KnowledgeSpaceEventTopics `yaml:"event_topics"` - Notifications KnowledgeSpaceNotificationConfig `yaml:"notifications"` - VectorStore KnowledgeSpaceVectorStoreConfig `yaml:"vector_store"` - Delta KnowledgeSpaceDeltaConfig `yaml:"delta"` - Reports KnowledgeSpaceReportConfig `yaml:"reports"` - EventHotfix KnowledgeSpaceEventHotfixConfig `yaml:"event_hotfix"` - Decay KnowledgeSpaceDecayConfig `yaml:"decay"` - Release KnowledgeSpaceReleaseConfig `yaml:"release"` + RedisAddr string `yaml:"redis_addr"` + RedisPassword string `yaml:"redis_password"` + RedisDB int `yaml:"redis_db"` + LockKeyPrefix string `yaml:"lock_key_prefix"` + MetricsKeyPrefix string `yaml:"metrics_key_prefix"` + DefaultRetentionMonths int `yaml:"default_retention_months"` + ProvisioningSLASeconds int `yaml:"provisioning_sla_seconds"` + IngestionSLASeconds int `yaml:"ingestion_sla_seconds"` + SceneStrategyCatalogPath string `yaml:"scene_strategy_catalog_path"` + IngestionProcessors KnowledgeSpaceIngestionProcessorConfig `yaml:"ingestion_processors"` + EventTopics KnowledgeSpaceEventTopics `yaml:"event_topics"` + Notifications KnowledgeSpaceNotificationConfig `yaml:"notifications"` + VectorStore KnowledgeSpaceVectorStoreConfig `yaml:"vector_store"` + IndexBackends KnowledgeSpaceIndexBackendConfig `yaml:"index_backends"` + Delta KnowledgeSpaceDeltaConfig `yaml:"delta"` + Reports KnowledgeSpaceReportConfig `yaml:"reports"` + EventHotfix KnowledgeSpaceEventHotfixConfig `yaml:"event_hotfix"` + Decay KnowledgeSpaceDecayConfig `yaml:"decay"` + Release KnowledgeSpaceReleaseConfig `yaml:"release"` +} + +// KnowledgeSpaceIngestionProcessorConfig 控制入库处理器能力开关(用于部署环境可控启停)。 +// 留空则使用运行时自动探测(PATH 中是否存在对应命令)。 +type KnowledgeSpaceIngestionProcessorConfig struct { + // PDF 内嵌文本抽取:依赖 `pdftotext`(poppler-utils) + PDFTextAvailable *bool `yaml:"pdf_text_available"` + // OCR Plan B:依赖 `tesseract` + (`pdftoppm` 或 `mutool`) + OCRAvailable *bool `yaml:"ocr_available"` } // KnowledgeSpaceEventTopics 定义事件主题。 @@ -338,6 +426,19 @@ type KnowledgeSpaceVectorStoreConfig struct { Pinecone KnowledgeSpaceVectorStorePineconeConfig `yaml:"pinecone"` } +// KnowledgeSpaceIndexBackendConfig defines storage backends for non-dense indices. +// Values are intentionally explicit so `make db-migrate` can decide whether to create assist tables. +type KnowledgeSpaceIndexBackendConfig struct { + // Sparse index backend (for `index.sparse`): `postgres_fts` or `external`. + Sparse string `yaml:"sparse"` + // Hier index backend (for `index.hier`): `postgres_links` or `external`. + Hier string `yaml:"hier"` + // Structured field filtering backend (for `index.structured_fields`): `postgres_jsonb` or `external`. + StructuredFields string `yaml:"structured_fields"` + // KG backend (for `index.kg`): `postgres` or `external`. + KG string `yaml:"kg"` +} + type KnowledgeSpaceDeltaConfig struct { SourcesConfig string `yaml:"sources_config"` PartialReleaseConfig string `yaml:"partial_release_config"` diff --git a/backend/config/defaults.go b/backend/config/defaults.go index cdc2cf92..8763eccf 100644 --- a/backend/config/defaults.go +++ b/backend/config/defaults.go @@ -9,6 +9,7 @@ import ( // GetDefaults 返回默认配置(已对齐新版 AuthConfig 字段) func GetDefaults() *Config { return &Config{ + Version: DefaultSystemVersion, Server: ServerConfig{ Port: 8077, ReadTimeoutSeconds: 5, @@ -121,6 +122,26 @@ func GetDefaults() *Config { Password: "", DB: 5, }, + Kafka: QueueKafkaConfig{ + Brokers: []string{"localhost:9092"}, + TopicPrefix: "event_fabric.task", + ConsumerGroup: "powerx.event_fabric", + PollTimeoutMs: 1000, + }, + Rabbit: QueueRabbitMQConfig{ + URL: "amqp://guest:guest@localhost:5672/", + Exchange: "event_fabric.task", + QueuePrefix: "event_fabric.task", + ConsumerTag: "powerx.event_fabric", + Prefetch: 50, + PollTimeoutMs: 1000, + }, + NATS: QueueNATSConfig{ + URLs: []string{"nats://localhost:4222"}, + SubjectPrefix: "event_fabric.task", + QueueGroup: "powerx.event_fabric", + PollTimeoutMs: 1000, + }, }, Scheduler: SchedulerConfig{ Driver: "builtin", @@ -183,14 +204,15 @@ func GetDefaults() *Config { }, }, KnowledgeSpace: KnowledgeSpaceConfig{ - RedisAddr: "", - RedisPassword: "", - RedisDB: 0, - LockKeyPrefix: "knowledge_space:lock", - MetricsKeyPrefix: "knowledge_space:metrics", - DefaultRetentionMonths: 13, - ProvisioningSLASeconds: 120, - IngestionSLASeconds: 4 * 3600, + RedisAddr: "", + RedisPassword: "", + RedisDB: 0, + LockKeyPrefix: "knowledge_space:lock", + MetricsKeyPrefix: "knowledge_space:metrics", + DefaultRetentionMonths: 13, + ProvisioningSLASeconds: 120, + IngestionSLASeconds: 4 * 3600, + SceneStrategyCatalogPath: "backend/config/knowledge/scene_strategy_catalog.yaml", EventTopics: KnowledgeSpaceEventTopics{ Provisioning: "knowledge.space.provisioning", Ingestion: "knowledge.space.ingestion", @@ -207,7 +229,7 @@ func GetDefaults() *Config { Driver: "", PgVector: KnowledgeSpaceVectorStorePGVectorConfig{ Schema: "public", - Table: "knowledge_vectors", + Table: "knowledge_vectors_v1_1536", Dimensions: 1536, EnableMigrations: false, BatchSize: 128, @@ -215,9 +237,16 @@ func GetDefaults() *Config { TimeoutSeconds: 30, }, }, + // B 方案默认:使用 Postgres-backed 的 sparse/hier/structured/kg(外部实现可覆盖为 external)。 + IndexBackends: KnowledgeSpaceIndexBackendConfig{ + Sparse: "postgres_fts", + Hier: "postgres_links", + StructuredFields: "postgres_jsonb", + KG: "postgres", + }, Delta: KnowledgeSpaceDeltaConfig{ - SourcesConfig: "configs/knowledge/delta_sources.yaml", - PartialReleaseConfig: "configs/knowledge/partial_release.yaml", + SourcesConfig: "backend/config/knowledge/delta_sources.yaml", + PartialReleaseConfig: "backend/config/knowledge/partial_release.yaml", ReportPath: "backend/reports/_state/knowledge-delta.json", AggregateReportPath: "reports/_state/knowledge-update.json", SLAMinutes: 30, @@ -229,20 +258,20 @@ func GetDefaults() *Config { QABridgePath: "reports/_state/qa-reasoning.json", }, EventHotfix: KnowledgeSpaceEventHotfixConfig{ - PoliciesPath: "configs/knowledge/event_hotfix_policies.yaml", - AgentMatrixPath: "configs/knowledge/agent_weight_matrix.yaml", + PoliciesPath: "backend/config/knowledge/event_hotfix_policies.yaml", + AgentMatrixPath: "backend/config/knowledge/agent_weight_matrix.yaml", ReportPath: "backend/reports/_state/knowledge-event.json", AggregateReportPath: "reports/_state/knowledge-update.json", RetryMax: 3, ReplayWindowSeconds: 300, }, Decay: KnowledgeSpaceDecayConfig{ - ThresholdPath: "configs/knowledge/decay_thresholds.yaml", + ThresholdPath: "backend/config/knowledge/decay_thresholds.yaml", ReportPath: "backend/reports/_state/knowledge-decay.json", AggregateReportPath: "reports/_state/knowledge-update.json", }, Release: KnowledgeSpaceReleaseConfig{ - MatrixPath: "configs/knowledge/tenant_release_matrix.yaml", + MatrixPath: "backend/config/knowledge/tenant_release_matrix.yaml", GuardrailsDoc: "docs/ops/release_guardrails.md", ReportPath: "backend/reports/_state/knowledge-release.json", AggregateReportPath: "reports/_state/knowledge-update.json", @@ -280,7 +309,7 @@ func GetDefaults() *Config { }, FeatureGate: FeatureGateConfig{ LicenseKey: "demo-license-xyz", - EnableEventFabric: true, + EnableEventFabric: false, EnableWorkflow: true, EnableKnowledgeSpace: true, EnableMediaPlatform: true, @@ -293,6 +322,7 @@ func GetDefaults() *Config { BasePath: "./storage/media", PublicBaseURL: "http://localhost:8077/media", UploadTokenSecret: "", + PublicTokenSecret: "", MaxUploadSizeBytes: 100 << 20, // 100MB }, S3: S3StorageConfig{ diff --git a/backend/config/knowledge/scene_strategy_catalog.yaml b/backend/config/knowledge/scene_strategy_catalog.yaml new file mode 100644 index 00000000..53685b0b --- /dev/null +++ b/backend/config/knowledge/scene_strategy_catalog.yaml @@ -0,0 +1,859 @@ +version: 1 +kind: scene_strategy_catalog + +# 单一事实来源(SSOT):Web Admin 的“场景(L1)→策略包(L2)”选择、以及后端的前置依赖校验/推荐规则,都应以此文件为准。 +# 设计依据: +# - docs/plan/AI_engineering/knowledge/rag.md +# - docs/plan/AI_engineering/knowledge/rag_scene_strategy_mode.md + +modules: + # A: Simple RAG(简单切块 + 向量召回) + A_simple: + label: "Simple RAG" + requires: ["index.dense"] + # A1: Query Routing(查询路由) + A1_routing: + label: "Query Routing" + requires: [] + # A2: Time-aware RAG(时间感知) + A2_time_aware: + label: "Time-aware" + requires: ["index.time_fields"] + # B: Semantic Chunking(语义切块) + B_semantic_chunking: + label: "Semantic Chunking" + requires: ["asset.section_summaries"] + # C: Context Enriched Retrieval(上下文扩展) + C_context_enriched: + label: "Context Enriched" + requires: ["index.hier"] + # D: Document Augmentation(离线增强) + D_doc_augmentation: + label: "Document Augmentation" + requires: ["asset.augmented_fields"] + # E: Query Transformation(查询转换) + E_query_transform: + label: "Query Transformation" + requires: [] + # F: Reranker(重排) + F_rerank: + label: "Reranker" + requires: ["runtime.rerank"] + # G: RSE(语义扩展重排) + G_rse: + label: "RSE" + requires: ["asset.domain_lexicon"] + # H: Fusion(融合:vector+bm25+kg+hier) + H_fusion: + label: "Fusion (Hybrid/RRF)" + requires: ["index.dense", "index.sparse"] + # I: HyDE + I_hyde: + label: "HyDE" + requires: ["runtime.llm", "index.dense"] + # J: Hierarchical Indices(层次索引) + J_hier: + label: "Hierarchical Indices" + requires: ["index.hier", "asset.section_summaries"] + # K: Knowledge Graph(知识图谱) + K_kg: + label: "Knowledge Graph" + requires: ["index.kg"] + # L: Feedback Loop(反馈闭环) + L_feedback: + label: "Feedback Loop" + requires: ["runtime.feedback"] + # M: Adaptive RAG(自适应) + M_adaptive: + label: "Adaptive RAG" + requires: ["runtime.policy_router"] + # N: Self RAG(自反思回路) + N_self_rag: + label: "Self RAG" + requires: ["runtime.llm", "runtime.evidence_checker"] + # O: CRAG(纠错) + O_crag: + label: "CRAG" + requires: ["runtime.evidence_checker", "index.sparse"] + +strategy_packages: + A0_acl: + label: "A0 Metadata/ACL 优先过滤" + summary: "先做租户/权限/元数据过滤,再做召回,保证合规降噪。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: + [ + "product_specs", + "product_compat", + "product_selection", + "sop", + "contract_quote", + "research_longdoc", + "ledger_table", + "support_faq", + "support_policy", + "support_troubleshooting", + "eng_runbook", + "eng_incident", + "eng_change", + "api_reference", + "data_dictionary", + "sales_enablement", + "marketing_promo_rules", + "compliance_regulation", + "billing_pricing", + "meeting_minutes", + "project_docs", + "ticket_conversations", + "onboarding_training", + "sql_kg", + "custom_expert", + ] + dependencies: + index: [] + runtime: ["acl_enforcer"] + assets: ["metadata.normalized"] + + A1_routing: + label: "A1 Query Routing(查询路由)" + summary: "按意图/领域把 query 路由到不同索引通道或空间。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: ["product_specs", "support_faq", "support_policy", "eng_runbook", "compliance_regulation", "sales_enablement", "marketing_promo_rules", "project_docs", "custom_expert"] + dependencies: + index: [] + runtime: ["routing_policy"] + assets: ["domain_router_map"] + + A2_time_aware: + label: "A2 Time-aware(时间/版本)" + summary: "对版本/生效时间做过滤或权重衰减。" + coupling: "strong" + recommended_profile_key: "p2_high_accuracy" + recommended_scenes: ["contract_quote", "compliance_regulation", "billing_pricing", "marketing_promo_rules", "product_specs"] + dependencies: + index: ["index.time_fields"] + runtime: ["versioning_policy"] + assets: ["effective_date_fields"] + + A_simple: + label: "A Simple RAG(最小闭环)" + summary: "简单切块 + 向量召回,适合快速验证。" + coupling: "weak" + recommended_profile_key: "p0_basic" + recommended_scenes: ["sop", "support_faq", "support_policy", "support_troubleshooting", "eng_runbook", "meeting_minutes", "project_docs", "onboarding_training"] + dependencies: + index: ["index.dense"] + runtime: [] + assets: [] + + B_semantic_chunking: + label: "B Semantic Chunking(语义切块)" + summary: "按语义边界切块,适合论文/长报告。" + coupling: "strong" + recommended_profile_key: "p1_general" + recommended_scenes: ["research_longdoc", "project_docs", "meeting_minutes"] + dependencies: + index: ["index.dense"] + runtime: [] + assets: ["semantic_boundaries"] + + C_context_enriched: + label: "C Context Enriched(上下文增强)" + summary: "召回后扩展同章节邻居/父摘要。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: ["sop", "contract_quote", "eng_runbook", "support_policy", "compliance_regulation"] + dependencies: + index: ["index.hier"] + runtime: [] + assets: ["section_neighbors"] + + D_doc_augmentation: + label: "D Doc Augmentation(离线增强)" + summary: "生成摘要/关键词/实体标签等增强字段。" + coupling: "strong" + recommended_profile_key: "p2_high_accuracy" + recommended_scenes: ["contract_quote", "product_specs", "sales_enablement", "marketing_promo_rules", "compliance_regulation"] + dependencies: + index: ["index.structured_fields"] + runtime: ["offline_pipeline"] + assets: ["augmented_fields"] + + E_query_transform: + label: "E Query Transformation(查询转换)" + summary: "同义扩展/结构化抽取/纠错。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: ["product_specs", "data_dictionary", "api_reference", "billing_pricing"] + dependencies: + index: ["index.structured_fields"] + runtime: ["query_rewrite"] + assets: ["synonyms_rules"] + + F_rerank: + label: "F Reranker(重排序)" + summary: "降低相似候选误命中。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: ["product_specs", "product_compat", "contract_quote", "compliance_regulation", "data_dictionary", "api_reference"] + dependencies: + index: ["index.dense"] + runtime: ["reranker_model"] + assets: [] + + G_rse: + label: "G RSE(语义扩展重排)" + summary: "语义扩展 + 重排,适合术语多的场景。" + coupling: "weak" + recommended_profile_key: "p2_high_accuracy" + recommended_scenes: ["product_specs", "product_compat", "data_dictionary", "api_reference", "sql_kg"] + dependencies: + index: ["index.dense"] + runtime: ["reranker_model"] + assets: ["domain_lexicon"] + + H_fusion: + label: "H Fusion(融合检索)" + summary: "dense+sparse(+kg/hier) 多路召回融合。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: + [ + "product_specs", + "product_compat", + "product_selection", + "sop", + "contract_quote", + "research_longdoc", + "ledger_table", + "support_faq", + "support_policy", + "support_troubleshooting", + "eng_runbook", + "eng_incident", + "eng_change", + "api_reference", + "data_dictionary", + "sales_enablement", + "marketing_promo_rules", + "compliance_regulation", + "billing_pricing", + "meeting_minutes", + "project_docs", + "ticket_conversations", + "onboarding_training", + "sql_kg", + ] + dependencies: + index: ["index.dense", "index.sparse"] + runtime: ["score_normalizer"] + assets: [] + + I_hyde: + label: "I HyDE(假设文档检索)" + summary: "生成假设答案后再向量检索。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: ["research_longdoc", "project_docs", "meeting_minutes"] + dependencies: + index: ["index.dense"] + runtime: ["llm_generate"] + assets: [] + + J_hier: + label: "J Hierarchical Indices(层次索引)" + summary: "先章节/摘要,再下钻 chunk。" + coupling: "strong" + recommended_profile_key: "p1_general" + recommended_scenes: ["sop", "research_longdoc", "eng_runbook", "compliance_regulation"] + dependencies: + index: ["index.hier"] + runtime: [] + assets: ["section_summaries"] + + K_kg: + label: "K Knowledge Graph(知识图谱)" + summary: "实体/关系召回与约束。" + coupling: "strong" + recommended_profile_key: "p3_kg_strong" + recommended_scenes: ["product_compat", "data_dictionary", "api_reference", "sql_kg", "compliance_regulation"] + dependencies: + index: ["index.kg"] + runtime: ["graph_query"] + assets: ["kg_entities_relations"] + + L_feedback: + label: "L Feedback Loop(反馈闭环)" + summary: "标注→再加工→回归评估。" + coupling: "weak" + recommended_profile_key: "p1_general" + recommended_scenes: + [ + "product_specs", + "product_compat", + "product_selection", + "sop", + "contract_quote", + "research_longdoc", + "ledger_table", + "support_faq", + "support_policy", + "support_troubleshooting", + "eng_runbook", + "eng_incident", + "eng_change", + "api_reference", + "data_dictionary", + "sales_enablement", + "marketing_promo_rules", + "compliance_regulation", + "billing_pricing", + "meeting_minutes", + "project_docs", + "ticket_conversations", + "onboarding_training", + "sql_kg", + "custom_expert", + ] + dependencies: + index: [] + runtime: ["feedback_workflow"] + assets: ["feedback_case"] + + M_adaptive: + label: "M Adaptive RAG(自适应策略)" + summary: "按置信度/成本动态启用策略。" + coupling: "weak" + recommended_profile_key: "p2_high_accuracy" + recommended_scenes: + [ + "product_specs", + "product_compat", + "product_selection", + "sop", + "contract_quote", + "research_longdoc", + "ledger_table", + "support_faq", + "support_policy", + "support_troubleshooting", + "eng_runbook", + "eng_incident", + "eng_change", + "api_reference", + "data_dictionary", + "sales_enablement", + "marketing_promo_rules", + "compliance_regulation", + "billing_pricing", + "meeting_minutes", + "project_docs", + "ticket_conversations", + "onboarding_training", + "sql_kg", + "custom_expert", + ] + dependencies: + index: [] + runtime: ["policy_router"] + assets: ["observability_metrics"] + + N_self_rag: + label: "N Self RAG(自反思回路)" + summary: "自检证据与冲突,触发二次检索。" + coupling: "weak" + recommended_profile_key: "p2_high_accuracy" + recommended_scenes: ["contract_quote", "compliance_regulation", "billing_pricing"] + dependencies: + index: [] + runtime: ["consistency_checker"] + assets: ["evidence_guardrails"] + + O_crag: + label: "O CRAG(纠错)" + summary: "证据不足/冲突时触发纠错检索。" + coupling: "strong" + recommended_profile_key: "p2_high_accuracy" + recommended_scenes: ["contract_quote", "compliance_regulation", "billing_pricing", "product_specs"] + dependencies: + index: ["index.dense", "index.sparse"] + runtime: ["consistency_checker"] + assets: [] + +strategy_bundles: + p0_basic: + label: "P0 基础(最小闭环)" + description: "最少干预:仅 dense 召回 + 引用返回;不开 multiquery/HyDE/rerank/KG/self-check。" + prerequisites: ["index.dense"] + default_modules: ["A_simple", "L_feedback"] + guardrails: + must_cite_sources: true + max_queries: 1 + rerank_topk: 0 + self_rag_max_loops: 0 + hyde_enabled: false + + p1_general: + label: "P1 通用推荐(企业默认)" + description: "hybrid + RRF + 轻量 rerank + contextual(按场景启用 hier)。" + prerequisites: ["index.dense", "index.sparse"] + default_modules: ["H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + guardrails: + must_cite_sources: true + max_queries: 3 + rerank_topk: 10 + self_rag_max_loops: 0 + hyde_enabled: false + + p2_high_accuracy: + label: "P2 高准确/合规(证据优先)" + description: "sparse 优先 + hybrid + CRAG/可选 self-check + time-aware(按场景)。" + prerequisites: ["index.dense", "index.sparse", "runtime.evidence_checker"] + default_modules: ["H_fusion", "O_crag", "F_rerank", "L_feedback"] + guardrails: + must_cite_sources: true + min_evidence_chunks: 2 + max_queries: 3 + rerank_topk: 10 + self_rag_max_loops: 1 + hyde_enabled: false + + p3_kg_strong: + label: "P3 KG 约束(关系驱动)" + description: "KG recall/filter + hybrid + cite,适用于依赖/约束/关系查询。" + prerequisites: ["index.kg", "index.sparse", "index.dense"] + default_modules: ["K_kg", "H_fusion", "C_context_enriched", "L_feedback"] + guardrails: + must_cite_sources: true + max_queries: 2 + rerank_topk: 10 + self_rag_max_loops: 0 + hyde_enabled: false + +scenes: + product_specs: + label: "产品库 / 规格参数查询" + description: "参数/型号/价格/口径等事实精确型查询;混合数据源常见(DB+文档)。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "O_crag", "F_rerank", "E_query_transform", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.structured_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "structured_fields_card" + unit: "chars" + chunk_size: 700 + overlap: 120 + separators: ["\\n\\n", "\\n", "。", ";", ":", ":"] + + product_compat: + label: "产品库 / 兼容性与配件关系" + description: "兼容矩阵/配件关系/约束条件;可选 KG 强化解释与过滤。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "O_crag", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.structured_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "structured_compat" + unit: "chars" + chunk_size: 800 + overlap: 120 + separators: ["\\n\\n", "\\n", "兼容", "不兼容", "支持", "不支持", ":", ":"] + + product_selection: + label: "产品库 / 选型对比与推荐理由" + description: "方案对比/选型决策;更偏解释归纳,但仍需可追溯引用。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse"] + assets: [] + ingestion_defaults: + chunking: + mode: "structured" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "对比", "推荐", "理由", "。", ";"] + + sop: + label: "SOP/制度/产品说明" + description: "Markdown/Word 为主,结构清晰,常见查询为流程/解释归纳。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "F_rerank", "C_context_enriched", "J_hier", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "structured" + unit: "chars" + chunk_size: 800 + overlap: 120 + separators: ["\\n\\n", "\\n", "。", "!", "?", ";"] + + contract_quote: + label: "合同/报价" + description: "PDF/Word/Excel 表格多;事实查找+合规风险高,默认证据优先。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "A2_time_aware", "O_crag", "F_rerank", "D_doc_augmentation", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.time_fields", "index.structured_fields"] + assets: ["asset.augmented_fields"] + ingestion_defaults: + chunking: + mode: "structured_clause_table" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "条", "款", "。", ";"] + + research_longdoc: + label: "论文/研究/长报告" + description: "PDF 长文;需要语义切块与层次索引,支持 hier-first 与 HyDE(可选)。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["B_semantic_chunking", "J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "semantic" + unit: "chars" + chunk_size: 1200 + overlap: 200 + separators: ["\\n\\n", "\\n", "。", "!", "?"] + + ledger_table: + label: "台账/清单(表格)" + description: "Excel/CSV 结构化行;以精确过滤/字段查询为主。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "F_rerank", "O_crag", "L_feedback"] + prerequisites: + index: ["index.sparse", "index.structured_fields", "index.dense"] + assets: [] + ingestion_defaults: + chunking: + mode: "row" + unit: "records" + chunk_size: 1 + overlap: 0 + separators: [] + + support_faq: + label: "客服 / FAQ 与使用说明" + description: "问答粒度、同义表达多;强调命中率与可解释引用。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "E_query_transform", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse"] + assets: [] + ingestion_defaults: + chunking: + mode: "qna" + unit: "chars" + chunk_size: 500 + overlap: 80 + separators: ["\\n\\n", "\\n", "Q:", "A:", "?", "。"] + + support_policy: + label: "客服 / 售后政策与规则" + description: "退款/保修/条款政策;高风险答复需要强证据与纠错。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "O_crag", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse"] + assets: [] + ingestion_defaults: + chunking: + mode: "policy_clause" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "条", "款", "。", ";"] + + support_troubleshooting: + label: "客服 / 故障现象与排查" + description: "现象→原因→处理步骤;需要步骤链路与上下文补齐。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "structured_steps" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "步骤", "Step", "。", ";"] + + eng_runbook: + label: "工程 / Runbook 与标准操作" + description: "标准操作/手册;章节与步骤链路清晰,强调上下文。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "structured_steps" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "步骤", "Step", "。", ";"] + + eng_incident: + label: "工程 / 故障排查与应急响应" + description: "应急响应/故障处理;需要时间线、步骤链路与降级策略(可选)。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "structured_steps" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "现象", "原因", "处理", "步骤", "。", ";"] + + eng_change: + label: "工程 / 变更与发布" + description: "变更评审/发布/回滚;高风险需要证据优先与版本/时间过滤。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "A2_time_aware", "O_crag", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.time_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "change_log" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "变更", "发布", "回滚", "风险", "。", ";"] + + api_reference: + label: "API/接口文档 / 参数与返回" + description: "字段/参数/返回值/示例为主;事实精确优先,适合接口问答。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "O_crag", "E_query_transform", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.structured_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "api_section" + unit: "chars" + chunk_size: 800 + overlap: 120 + separators: ["\\n\\n", "\\n", "参数", "字段", "返回", "示例", ":", ":"] + + data_dictionary: + label: "数据字典 / 表结构与字段口径" + description: "表/字段/口径/示例;精确定位字段含义,支持结构化过滤。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "O_crag", "E_query_transform", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.structured_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "schema_table" + unit: "chars" + chunk_size: 800 + overlap: 120 + separators: ["\\n\\n", "\\n", "表", "字段", "类型", "口径", ":", ":"] + + sales_enablement: + label: "销售材料 / 话术与竞品" + description: "销售话术、竞品对比、案例材料;偏解释归纳,但仍需可追溯引用。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse"] + assets: [] + ingestion_defaults: + chunking: + mode: "structured" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "对比", "优势", "劣势", "。", ";"] + + marketing_promo_rules: + label: "市场活动 / 促销规则" + description: "活动规则、优惠口径、例外条款;高风险答复需要强证据。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "O_crag", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse"] + assets: [] + ingestion_defaults: + chunking: + mode: "policy_clause" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "规则", "例外", "不适用", "。", ";"] + + compliance_regulation: + label: "法规/监管政策 / 口径与约束" + description: "法规/监管/政策口径;时效强,建议启用时间字段过滤。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "A2_time_aware", "O_crag", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.time_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "policy_clause" + unit: "chars" + chunk_size: 1100 + overlap: 180 + separators: ["\\n\\n", "\\n", "条", "款", "。", ";"] + + billing_pricing: + label: "计费/价格规则 / 口径与例外" + description: "计费口径、价格规则、版本差异与例外;事实精确 + 时效要求高。" + default_bundle: "p2_high_accuracy" + allowed_bundles: ["p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "A2_time_aware", "O_crag", "F_rerank", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.structured_fields", "index.time_fields"] + assets: [] + ingestion_defaults: + chunking: + mode: "structured_fields_card" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "价格", "计费", "规则", "例外", ":", ":"] + + meeting_minutes: + label: "会议纪要 / 决议与行动项" + description: "会议纪要、决议、行动项;偏总结归纳并要求引用定位。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "minutes" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "决议", "行动项", "Owner", "Due", "。", ";"] + + project_docs: + label: "项目方案 / 交付文档" + description: "项目方案、需求、设计与交付资料;章节上下文与引用定位重要。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "structured" + unit: "chars" + chunk_size: 1000 + overlap: 180 + separators: ["\\n\\n", "\\n", "。", "!", "?", ";"] + + ticket_conversations: + label: "工单/聊天记录 / 问题追踪" + description: "工单对话、聊天记录、问题追踪;需要按话题/时间窗聚合与可追溯引用。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["H_fusion", "E_query_transform", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse"] + assets: [] + ingestion_defaults: + chunking: + mode: "conversation_window" + unit: "chars" + chunk_size: 700 + overlap: 120 + separators: ["\\n\\n", "\\n", "时间", "用户", "客服", ":", ":"] + + onboarding_training: + label: "入职/培训资料 / 学习路径" + description: "培训课件、学习资料、入职手册;强调归纳总结与引用定位。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy"] + default_modules: ["J_hier", "H_fusion", "F_rerank", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.dense", "index.sparse", "index.hier"] + assets: ["asset.section_summaries"] + ingestion_defaults: + chunking: + mode: "structured" + unit: "chars" + chunk_size: 900 + overlap: 150 + separators: ["\\n\\n", "\\n", "课程", "章节", "。", ";"] + + sql_kg: + label: "SQL/配置/依赖关系(KG 强)" + description: "关系/依赖驱动:KG 为主召回/约束通道,dense 作为摘要补充。" + default_bundle: "p3_kg_strong" + allowed_bundles: ["p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["K_kg", "H_fusion", "C_context_enriched", "L_feedback"] + prerequisites: + index: ["index.kg", "index.sparse", "index.dense"] + assets: ["asset.kg_entities", "asset.kg_relations", "asset.kg_provenance"] + ingestion_defaults: + chunking: + mode: "ast_object" + unit: "objects" + chunk_size: 1 + overlap: 0 + separators: [] + + custom_expert: + label: "自定义(专家)" + description: "允许选择全部策略模块与策略包,但必须进行依赖校验与成本护栏。" + default_bundle: "p1_general" + allowed_bundles: ["p0_basic", "p1_general", "p2_high_accuracy", "p3_kg_strong"] + default_modules: ["H_fusion", "L_feedback"] + prerequisites: + index: ["index.dense"] + assets: [] + ingestion_defaults: + chunking: + mode: "structured" + unit: "chars" + chunk_size: 800 + overlap: 120 + separators: ["\\n\\n", "\\n", "。", "!", "?", ";"] diff --git a/backend/config/knowledge/tenant_release_matrix.yaml b/backend/config/knowledge/tenant_release_matrix.yaml index c94c4d90..4736e2c8 100644 --- a/backend/config/knowledge/tenant_release_matrix.yaml +++ b/backend/config/knowledge/tenant_release_matrix.yaml @@ -7,6 +7,9 @@ ], "guardrails": { "latency_p95": "<5m", - "rollback_sla_minutes": "5" - } + "rollback_sla_minutes": "5", + "version_drift": "<=1" + }, + "approvedBy": "governance@powerx.io", + "createdBy": "ops@powerx.io" } diff --git a/backend/config/platform_capabilities/agent.yaml b/backend/config/platform_capabilities/agent.yaml new file mode 100644 index 00000000..1129a692 --- /dev/null +++ b/backend/config/platform_capabilities/agent.yaml @@ -0,0 +1,79 @@ +version: 1 +capabilities: + - capability_id: com.corex.agent.invoke + module: agent + title: Agent Invoke + description: 非流式调用 Agent 对话。 + categories: [agent, ai] + intents: [agent.invoke] + tool_scopes: [agent.runtime] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml + - backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto + protocols: + - channel: rest + endpoint: /api/v1/agents/invoke + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml#/paths/~1agents~1invoke/post + auth_type: tenant_jwt + tool_scope: agent.runtime + - channel: grpc + endpoint: powerx.agent.v1.AgentInvokeService + rpc: Invoke + schema_ref: backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto#AgentInvokeService + auth_type: tenant_jwt + tool_scope: agent.runtime + - capability_id: com.corex.agent.stream + module: agent + title: Agent Stream + description: 通过 SSE/gRPC 流式输出 Agent 对话内容。 + categories: [agent, ai] + intents: [agent.stream] + tool_scopes: [agent.runtime] + policy: + prefer: rest + docs: + - specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml + - backend/api/grpc/contracts/powerx/agent/v1/stream.proto + protocols: + - channel: rest + endpoint: /api/v1/agents/stream/sse + method: GET + schema_ref: specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml#/paths/~1agents~1stream~1sse/get + auth_type: tenant_jwt + tool_scope: agent.runtime + - channel: grpc + endpoint: powerx.agent.v1.AgentStreamService + rpc: Stream + schema_ref: backend/api/grpc/contracts/powerx/agent/v1/stream.proto#AgentStreamService + auth_type: tenant_jwt + tool_scope: agent.runtime + - capability_id: com.corex.agent.session.manage + module: agent + title: Agent Session Management + description: 创建会话与管理消息。 + categories: [agent, ai] + intents: [agent.session.manage] + tool_scopes: [agent.session] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml + - backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto + protocols: + - channel: rest + endpoint: /api/v1/agents/sessions + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml#/paths/~1agents~1sessions/post + auth_type: tenant_jwt + tool_scope: agent.session + - channel: grpc + endpoint: powerx.agent.v1.AgentSessionService + rpc: CreateSession + schema_ref: backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto#AgentSessionService + auth_type: tenant_jwt + tool_scope: agent.session diff --git a/backend/config/platform_capabilities/ai.yaml b/backend/config/platform_capabilities/ai.yaml new file mode 100644 index 00000000..ff686922 --- /dev/null +++ b/backend/config/platform_capabilities/ai.yaml @@ -0,0 +1,209 @@ +version: 1 +capabilities: + - capability_id: com.corex.ai.llm.invoke + module: ai + title: LLM Invoke + description: 大语言模型无状态调用。 + categories: [ai, llm] + intents: [ai.llm.invoke] + tool_scopes: [ai.llm] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/llm/invoke + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1invoke/post + auth_type: tenant_jwt + tool_scope: ai.llm + - channel: grpc + endpoint: powerx.ai.v1.MultimodalService + rpc: Invoke + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService + auth_type: tenant_jwt + tool_scope: ai.llm + - capability_id: com.corex.ai.llm.stream + module: ai + title: LLM Stream + description: LLM 会话流式输出。 + categories: [ai, llm] + intents: [ai.llm.stream] + tool_scopes: [ai.llm] + policy: + prefer: rest + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/llm/sessions/{session_id}/stream + method: GET + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1sessions~1{session_id}~1stream/get + auth_type: tenant_jwt + tool_scope: ai.llm + - channel: grpc + endpoint: powerx.ai.v1.MultimodalService + rpc: Stream + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService + auth_type: tenant_jwt + tool_scope: ai.llm + - capability_id: com.corex.ai.llm.session.create + module: ai + title: LLM Session Create + description: 创建 LLM 会话。 + categories: [ai, llm] + intents: [ai.llm.session.create] + tool_scopes: [ai.llm.session] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/llm/sessions + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1sessions/post + auth_type: tenant_jwt + tool_scope: ai.llm.session + - channel: grpc + endpoint: powerx.ai.v1.MultimodalSessionService + rpc: CreateSession + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalSessionService + auth_type: tenant_jwt + tool_scope: ai.llm.session + - capability_id: com.corex.ai.llm.session.append + module: ai + title: LLM Session Append + description: 追加 LLM 会话消息。 + categories: [ai, llm] + intents: [ai.llm.session.append] + tool_scopes: [ai.llm.session] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/llm/sessions/{session_id}/messages + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1sessions~1{session_id}~1messages/post + auth_type: tenant_jwt + tool_scope: ai.llm.session + - channel: grpc + endpoint: powerx.ai.v1.MultimodalSessionService + rpc: AppendMessage + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalSessionService + auth_type: tenant_jwt + tool_scope: ai.llm.session + - capability_id: com.corex.ai.image.invoke + module: ai + title: Image Invoke + description: 图像生成/理解调用。 + categories: [ai, image] + intents: [ai.image.invoke] + tool_scopes: [ai.image] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/image/invoke + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1image~1invoke/post + auth_type: tenant_jwt + tool_scope: ai.image + - channel: grpc + endpoint: powerx.ai.v1.MultimodalService + rpc: Invoke + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService + auth_type: tenant_jwt + tool_scope: ai.image + - capability_id: com.corex.ai.video.invoke + module: ai + title: Video Invoke + description: 视频生成/理解调用。 + categories: [ai, video] + intents: [ai.video.invoke] + tool_scopes: [ai.video] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/video/invoke + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1video~1invoke/post + auth_type: tenant_jwt + tool_scope: ai.video + - channel: grpc + endpoint: powerx.ai.v1.MultimodalService + rpc: Invoke + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService + auth_type: tenant_jwt + tool_scope: ai.video + - capability_id: com.corex.ai.tts.invoke + module: ai + title: TTS Invoke + description: 语音合成调用。 + categories: [ai, tts] + intents: [ai.tts.invoke] + tool_scopes: [ai.tts] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/tts/invoke + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1tts~1invoke/post + auth_type: tenant_jwt + tool_scope: ai.tts + - channel: grpc + endpoint: powerx.ai.v1.MultimodalService + rpc: Invoke + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService + auth_type: tenant_jwt + tool_scope: ai.tts + - capability_id: com.corex.ai.embedding.invoke + module: ai + title: Embedding Invoke + description: 向量生成(embedding)。 + categories: [ai, embedding] + intents: [ai.embedding.invoke] + tool_scopes: [ai.embedding] + policy: + prefer: rest + fallback: [grpc] + docs: + - specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml + - backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto + protocols: + - channel: rest + endpoint: /api/v1/ai/embedding/invoke + method: POST + schema_ref: specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1embedding~1invoke/post + auth_type: tenant_jwt + tool_scope: ai.embedding + - channel: grpc + endpoint: powerx.ai.v1.EmbeddingService + rpc: Embed + schema_ref: backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#EmbeddingService + auth_type: tenant_jwt + tool_scope: ai.embedding diff --git a/backend/config/storage.go b/backend/config/storage.go index afbcd5f9..10059c00 100644 --- a/backend/config/storage.go +++ b/backend/config/storage.go @@ -13,6 +13,9 @@ type LocalStorageConfig struct { BasePath string `yaml:"base_path"` PublicBaseURL string `yaml:"public_base_url"` UploadTokenSecret string `yaml:"upload_token_secret"` + // PublicTokenSecret 用于公开资源入口(/media/:uuid/resource)的临时访问 token(HMAC)签名与校验。 + // 为空时:公开入口仅允许 published 资源访问;非 published 资源无法生成可直接打开的 presign(download) 链接。 + PublicTokenSecret string `yaml:"public_token_secret"` MaxUploadSizeBytes int64 `yaml:"max_upload_size_bytes"` } diff --git a/backend/config/validator.go b/backend/config/validator.go index 1e7fa3d0..93c8527a 100644 --- a/backend/config/validator.go +++ b/backend/config/validator.go @@ -52,73 +52,75 @@ func (c *Config) Validate() error { } // --- Event Fabric --- - if c.Event.Fabric.AckTimeoutSeconds <= 0 { - errors = append(errors, "event_fabric.ack_timeout_seconds 必须大于0") - } - if c.Event.Fabric.DefaultMaxRetry <= 0 { - errors = append(errors, "event_fabric.default_max_retry 必须大于0") - } - if strings.TrimSpace(c.Event.Fabric.RetryKeyPrefix) == "" { - errors = append(errors, "event_fabric.retry_key_prefix 不能为空") - } - if strings.TrimSpace(c.Event.Fabric.ReplayKeyPrefix) == "" { - errors = append(errors, "event_fabric.replay_key_prefix 不能为空") - } - if c.Event.Fabric.SchedulerInterval <= 0 { - errors = append(errors, "event_fabric.scheduler_interval 必须大于0") - } - if strings.TrimSpace(c.Event.Fabric.RedisAddr) == "" { - errors = append(errors, "event_fabric.redis_addr 不能为空") - } - if strings.TrimSpace(c.Event.Fabric.Security.SignatureSecret) != "" { - if strings.TrimSpace(c.Event.Fabric.Security.SignatureHeader) == "" { - errors = append(errors, "event_fabric.security.signature_header 不能为空") + if c.FeatureGate.EnableEventFabric { + if c.Event.Fabric.AckTimeoutSeconds <= 0 { + errors = append(errors, "event_fabric.ack_timeout_seconds 必须大于0") } - if strings.TrimSpace(c.Event.Fabric.Security.TimestampHeader) == "" { - errors = append(errors, "event_fabric.security.timestamp_header 不能为空") + if c.Event.Fabric.DefaultMaxRetry <= 0 { + errors = append(errors, "event_fabric.default_max_retry 必须大于0") } - if c.Event.Fabric.Security.AllowedClockSkewSeconds <= 0 { - errors = append(errors, "event_fabric.security.allowed_clock_skew_seconds 必须大于0") + if strings.TrimSpace(c.Event.Fabric.RetryKeyPrefix) == "" { + errors = append(errors, "event_fabric.retry_key_prefix 不能为空") + } + if strings.TrimSpace(c.Event.Fabric.ReplayKeyPrefix) == "" { + errors = append(errors, "event_fabric.replay_key_prefix 不能为空") + } + if c.Event.Fabric.SchedulerInterval <= 0 { + errors = append(errors, "event_fabric.scheduler_interval 必须大于0") + } + if strings.TrimSpace(c.Event.Fabric.RedisAddr) == "" { + errors = append(errors, "event_fabric.redis_addr 不能为空") + } + if strings.TrimSpace(c.Event.Fabric.Security.SignatureSecret) != "" { + if strings.TrimSpace(c.Event.Fabric.Security.SignatureHeader) == "" { + errors = append(errors, "event_fabric.security.signature_header 不能为空") + } + if strings.TrimSpace(c.Event.Fabric.Security.TimestampHeader) == "" { + errors = append(errors, "event_fabric.security.timestamp_header 不能为空") + } + if c.Event.Fabric.Security.AllowedClockSkewSeconds <= 0 { + errors = append(errors, "event_fabric.security.allowed_clock_skew_seconds 必须大于0") + } + } + if c.Event.Fabric.Authorization.CacheTTLSeconds <= 0 { + errors = append(errors, "event_fabric.authorization.cache_ttl_seconds 必须大于0") + } + if c.Event.Fabric.Authorization.LocalCacheTTLSeconds <= 0 { + errors = append(errors, "event_fabric.authorization.local_cache_ttl_seconds 必须大于0") + } + if strings.TrimSpace(c.Event.Fabric.Authorization.RedisAddr) == "" { + errors = append(errors, "event_fabric.authorization.redis_addr 不能为空") + } + if strings.TrimSpace(c.Event.Fabric.Authorization.CacheInvalidateChannel) == "" { + errors = append(errors, "event_fabric.authorization.cache_invalidate_channel 不能为空") + } + if c.Event.Fabric.Authorization.ChallengeSLASeconds <= 0 { + errors = append(errors, "event_fabric.authorization.challenge_sla_seconds 必须大于0") + } + if strings.TrimSpace(c.Event.Fabric.Authorization.ChallengeTopic) == "" { + errors = append(errors, "event_fabric.authorization.challenge_topic 不能为空") + } + if strings.TrimSpace(c.Event.Fabric.Authorization.ChallengeConsumerGroup) == "" { + errors = append(errors, "event_fabric.authorization.challenge_consumer_group 不能为空") + } + if c.Event.Fabric.Authorization.TimeoutSweepIntervalSeconds <= 0 { + errors = append(errors, "event_fabric.authorization.timeout_sweep_interval_seconds 必须大于0") + } + if c.Event.Fabric.Authorization.AuditRetentionDays <= 0 { + errors = append(errors, "event_fabric.authorization.audit_retention_days 必须大于0") + } + if strings.TrimSpace(c.Event.Fabric.Authorization.AuditArchiveBucket) == "" { + errors = append(errors, "event_fabric.authorization.audit_archive_bucket 不能为空") + } + if strings.TrimSpace(c.Event.Fabric.Authorization.AuditArchivePrefix) == "" { + errors = append(errors, "event_fabric.authorization.audit_archive_prefix 不能为空") + } + if c.Event.Fabric.Authorization.Secrets.CacheTTLSeconds < 0 { + errors = append(errors, "event_fabric.authorization.secrets.cache_ttl_seconds 不能为负数") + } + if c.Event.Fabric.Authorization.Secrets.RotationIntervalSeconds < 0 { + errors = append(errors, "event_fabric.authorization.secrets.rotation_interval_seconds 不能为负数") } - } - if c.Event.Fabric.Authorization.CacheTTLSeconds <= 0 { - errors = append(errors, "event_fabric.authorization.cache_ttl_seconds 必须大于0") - } - if c.Event.Fabric.Authorization.LocalCacheTTLSeconds <= 0 { - errors = append(errors, "event_fabric.authorization.local_cache_ttl_seconds 必须大于0") - } - if strings.TrimSpace(c.Event.Fabric.Authorization.RedisAddr) == "" { - errors = append(errors, "event_fabric.authorization.redis_addr 不能为空") - } - if strings.TrimSpace(c.Event.Fabric.Authorization.CacheInvalidateChannel) == "" { - errors = append(errors, "event_fabric.authorization.cache_invalidate_channel 不能为空") - } - if c.Event.Fabric.Authorization.ChallengeSLASeconds <= 0 { - errors = append(errors, "event_fabric.authorization.challenge_sla_seconds 必须大于0") - } - if strings.TrimSpace(c.Event.Fabric.Authorization.ChallengeTopic) == "" { - errors = append(errors, "event_fabric.authorization.challenge_topic 不能为空") - } - if strings.TrimSpace(c.Event.Fabric.Authorization.ChallengeConsumerGroup) == "" { - errors = append(errors, "event_fabric.authorization.challenge_consumer_group 不能为空") - } - if c.Event.Fabric.Authorization.TimeoutSweepIntervalSeconds <= 0 { - errors = append(errors, "event_fabric.authorization.timeout_sweep_interval_seconds 必须大于0") - } - if c.Event.Fabric.Authorization.AuditRetentionDays <= 0 { - errors = append(errors, "event_fabric.authorization.audit_retention_days 必须大于0") - } - if strings.TrimSpace(c.Event.Fabric.Authorization.AuditArchiveBucket) == "" { - errors = append(errors, "event_fabric.authorization.audit_archive_bucket 不能为空") - } - if strings.TrimSpace(c.Event.Fabric.Authorization.AuditArchivePrefix) == "" { - errors = append(errors, "event_fabric.authorization.audit_archive_prefix 不能为空") - } - if c.Event.Fabric.Authorization.Secrets.CacheTTLSeconds < 0 { - errors = append(errors, "event_fabric.authorization.secrets.cache_ttl_seconds 不能为负数") - } - if c.Event.Fabric.Authorization.Secrets.RotationIntervalSeconds < 0 { - errors = append(errors, "event_fabric.authorization.secrets.rotation_interval_seconds 不能为负数") } // --- Capability Registry --- @@ -205,8 +207,11 @@ func (c *Config) Validate() error { if vectorDriver != "" { switch strings.ToLower(vectorDriver) { case vectorstore.DriverPGVector: - if strings.TrimSpace(c.KnowledgeSpace.VectorStore.PgVector.DSN) == "" { - errors = append(errors, "knowledge_space.vector_store.pgvector.dsn 不能为空") + // Allow reusing main database.dsn (or host/user/...) when pgvector.dsn is empty. + if strings.TrimSpace(c.KnowledgeSpace.VectorStore.PgVector.DSN) == "" && + strings.TrimSpace(c.Database.DSN) == "" && + strings.TrimSpace(c.Database.Host) == "" { + errors = append(errors, "knowledge_space.vector_store.pgvector.dsn 不能为空(或提供 database.dsn/database.host 用于复用)") } case vectorstore.DriverMilvus: if strings.TrimSpace(c.KnowledgeSpace.VectorStore.Milvus.Endpoint) == "" { diff --git a/backend/etc/config_example.yaml b/backend/etc/config_example.yaml index 252c150d..5c584d8a 100644 --- a/backend/etc/config_example.yaml +++ b/backend/etc/config_example.yaml @@ -1,3 +1,5 @@ +version: v1.0.0 + server: port: 8077 read_timeout_seconds: 5 @@ -139,12 +141,12 @@ event: fabric: ack_timeout_seconds: 30 default_max_retry: 5 - redis_addr: "" # 留空则复用 queue.redis.addr + redis_addr: "127.0.0.1:6379" redis_password: "" # 留空则复用 queue.redis.password redis_db: 0 # 0 表示复用 queue.redis.db retry_key_prefix: event_fabric:retry replay_key_prefix: event_fabric:replay - scheduler_interval: 0 # 0 表示复用 scheduler.interval_seconds + scheduler_interval: 5 security: require_tls: false signature_secret: "" @@ -161,7 +163,7 @@ event: authorization: cache_ttl_seconds: 60 local_cache_ttl_seconds: 30 - redis_addr: "" # 可单独指定,留空则复用 queue.redis + redis_addr: "127.0.0.1:6379" redis_password: "" redis_db: 0 cache_invalidate_channel: event_fabric:authorization:invalidate @@ -172,8 +174,8 @@ event: rate_limit_prefix: event_fabric:authorization:rl timeout_sweep_interval_seconds: 60 audit_retention_days: 7 - audit_archive_bucket: "" - audit_archive_prefix: "" + audit_archive_bucket: "local-audit-archive" + audit_archive_prefix: "event-fabric/auth" secrets: provider: "" key_id: "" @@ -186,6 +188,25 @@ queue: addr: localhost:6379 password: "" db: 5 + kafka: + brokers: + - localhost:9092 + topic_prefix: event_fabric.task + consumer_group: powerx.event_fabric + poll_timeout_ms: 1000 + rabbitmq: + url: amqp://guest:guest@localhost:5672/ + exchange: event_fabric.task + queue_prefix: event_fabric.task + consumer_tag: powerx.event_fabric + prefetch: 50 + poll_timeout_ms: 1000 + nats: + urls: + - nats://localhost:4222 + subject_prefix: event_fabric.task + queue_group: powerx.event_fabric + poll_timeout_ms: 1000 scheduler: driver: builtin @@ -221,6 +242,7 @@ ai: top_p: 1.0 stream: false embedding: + # 默认使用 OpenAI embedding(需要在 Web Admin「AI Settings」或此处配置 api_key 才能生效)。 provider: openai endpoint: "https://api.openai.com/v1" model: text-embedding-3-small @@ -321,6 +343,15 @@ knowledge_space: default_retention_months: 13 provisioning_sla_seconds: 120 ingestion_sla_seconds: 14400 + scene_strategy_catalog_path: backend/config/knowledge/scene_strategy_catalog.yaml + ingestion_processors: + # PDF 内嵌文本抽取(需要镜像/机器安装 `pdftotext`,通常来自 poppler-utils) + # - true: 强制启用 + # - false: 强制禁用 + # - null/不写: 自动探测 + pdf_text_available: null + # OCR Plan B(需要 tesseract + pdftoppm 或 mutool) + ocr_available: null event_topics: provisioning: knowledge.space.provisioning ingestion: knowledge.space.ingestion @@ -331,6 +362,31 @@ knowledge_space: retry_interval_seconds: 60 retry_max_attempts: 3 http_timeout_seconds: 5 + vector_store: + driver: "pgvector" # pgvector / milvus / pinecone + pgvector: + dsn: "" # 留空则复用 database.dsn + schema: public + table: knowledge_vectors_v1_1536 + dimensions: 1536 + enable_migrations: false + batch_size: 128 + ivfflat_lists: 100 + timeout_seconds: 30 + milvus: + endpoint: "" + api_key: "" + project: "" + pinecone: + endpoint: "" + api_key: "" + index: "" + namespace: "" + index_backends: + sparse: postgres_fts # or external + hier: postgres_links # or external + structured_fields: postgres_jsonb # or external + kg: postgres # or external delta: sources_config: configs/knowledge/delta_sources.yaml partial_release_config: configs/knowledge/partial_release.yaml diff --git a/backend/internal/agent/toolstore/store.go b/backend/internal/agent/toolstore/store.go index d7cff23d..6bdd3502 100644 --- a/backend/internal/agent/toolstore/store.go +++ b/backend/internal/agent/toolstore/store.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" capregistry "github.com/ArtisanCloud/PowerX/internal/service/capability_registry" "github.com/ArtisanCloud/PowerX/pkg/event_bus" pxlog "github.com/ArtisanCloud/PowerX/pkg/utils/logger" diff --git a/backend/internal/agent/toolstore/store_test.go b/backend/internal/agent/toolstore/store_test.go index cb888f1e..47066030 100644 --- a/backend/internal/agent/toolstore/store_test.go +++ b/backend/internal/agent/toolstore/store_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" capregistry "github.com/ArtisanCloud/PowerX/internal/service/capability_registry" "github.com/ArtisanCloud/PowerX/pkg/event_bus" "github.com/stretchr/testify/require" diff --git a/backend/internal/agent/toolstore/version_lock.go b/backend/internal/agent/toolstore/version_lock.go index 03617878..ac75299c 100644 --- a/backend/internal/agent/toolstore/version_lock.go +++ b/backend/internal/agent/toolstore/version_lock.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" "github.com/ArtisanCloud/PowerX/pkg/event_bus" "github.com/redis/go-redis/v9" ) diff --git a/backend/internal/app/shared/deps.go b/backend/internal/app/shared/deps.go index 468770e3..1565cacf 100644 --- a/backend/internal/app/shared/deps.go +++ b/backend/internal/app/shared/deps.go @@ -9,16 +9,19 @@ import ( "os" "path/filepath" "strings" + "sync" "time" toolstore "github.com/ArtisanCloud/PowerX/internal/agent/toolstore" workers "github.com/ArtisanCloud/PowerX/internal/app/shared/workers" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" discoverycache "github.com/ArtisanCloud/PowerX/internal/infra/cache/discovery" mediamgr "github.com/ArtisanCloud/PowerX/internal/infra/media/manager" imnotify "github.com/ArtisanCloud/PowerX/internal/notifications/im" capmetrics "github.com/ArtisanCloud/PowerX/internal/observability/metrics" agentrepo "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/repository" igdeps "github.com/ArtisanCloud/PowerX/internal/server/mcp/tools/integration_gateway/deps" + agentsettings "github.com/ArtisanCloud/PowerX/internal/service/agent" agentlifecycle "github.com/ArtisanCloud/PowerX/internal/service/agent_lifecycle" agentinstr "github.com/ArtisanCloud/PowerX/internal/service/agent_lifecycle/instrumentation" authsvc "github.com/ArtisanCloud/PowerX/internal/service/auth" @@ -40,7 +43,9 @@ import ( manifestService "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/manifest" eventmetrics "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/metrics" replayService "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/replay" + cronschedulersvc "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/scheduler" security "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/security" + eventshared "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/shared" iamsvc "github.com/ArtisanCloud/PowerX/internal/service/iam" ticketbridge "github.com/ArtisanCloud/PowerX/internal/service/integration/ticketbridge" integrationgateway "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway" @@ -58,6 +63,7 @@ import ( tenant_release "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/tenant_release" kntoolchain "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/toolchain" mediasvc "github.com/ArtisanCloud/PowerX/internal/service/media" + notificationssvc "github.com/ArtisanCloud/PowerX/internal/service/notifications" pluginbootstrap "github.com/ArtisanCloud/PowerX/internal/service/plugin_bootstrap" plugincompat "github.com/ArtisanCloud/PowerX/internal/service/plugin_compat" plugindiag "github.com/ArtisanCloud/PowerX/internal/service/plugin_debug/diagnostics" @@ -68,6 +74,7 @@ import ( pluginsandbox "github.com/ArtisanCloud/PowerX/internal/service/plugin_sandbox" tenantsvc "github.com/ArtisanCloud/PowerX/internal/service/tenant" workflowsvc "github.com/ArtisanCloud/PowerX/internal/service/workflow" + wsbus "github.com/ArtisanCloud/PowerX/internal/transport/websocket/bus" knowledgeworkflow "github.com/ArtisanCloud/PowerX/internal/workflow/knowledge_space" "github.com/ArtisanCloud/PowerX/pkg/cache" auditsvc "github.com/ArtisanCloud/PowerX/pkg/corex/audit" @@ -81,9 +88,12 @@ import ( pluginReleaseRepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/plugin_release" pluginsandboxrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/plugin_sandbox" vectorstorepkg "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/event_bus" + kafkadriver "github.com/ArtisanCloud/PowerX/pkg/event_bus/drivers/kafka" + natsdriver "github.com/ArtisanCloud/PowerX/pkg/event_bus/drivers/nats" + rabbitdriver "github.com/ArtisanCloud/PowerX/pkg/event_bus/drivers/rabbitmq" pxlog "github.com/ArtisanCloud/PowerX/pkg/utils/logger" - "github.com/google/uuid" "github.com/redis/go-redis/v9" "gorm.io/gorm" @@ -133,9 +143,10 @@ type Deps struct { AuditSvc auditsvc.Service // 底层批量写库 + sink Auditor auditsvc.Auditor // 门面,兼容 LogAPI/LogRBAC 等调用 - TenantSvc *tenantsvc.TenantService - MediaMgr *mediamgr.MediaManager - MediaSvc *mediasvc.MediaService + TenantSvc *tenantsvc.TenantService + MediaMgr *mediamgr.MediaManager + MediaSvc *mediasvc.MediaService + Notifications *notificationssvc.Service EventBus event_bus.EventBus CapabilityRegistrySvc *capabilityRegistry.Service @@ -282,7 +293,7 @@ func NewDeps(db *gorm.DB, opts *DepsOptions) *Deps { RouterService: routerSvc, }) - eventFabricDeps := newEventFabricDeps(db, opts.EventFabric, bus, svc, tenantSvc) + eventFabricDeps := newEventFabricDeps(db, opts.EventFabric, opts.Queue, bus, svc, tenantSvc) var ( workflowReliable event_bus.ReliableQueue @@ -389,14 +400,15 @@ func NewDeps(db *gorm.DB, opts *DepsOptions) *Deps { } capabilityInvocationSvc = capabilitycatalog.NewInvocationService(capabilitycatalog.InvocationServiceOptions{ - Catalog: capabilityCatalogSvc, - Router: routerSvc, - Audit: capAuditSvc, - Clock: time.Now, - VersionLock: versionLockStore, - HTTPClient: httpProxyClient, - HTTPBaseURL: httpBaseURL, - GRPCConn: invocationGRPCConn, + Catalog: capabilityCatalogSvc, + Router: routerSvc, + Audit: capAuditSvc, + Clock: time.Now, + VersionLock: versionLockStore, + HTTPClient: httpProxyClient, + HTTPBaseURL: httpBaseURL, + GRPCConn: invocationGRPCConn, + ModelVerifier: capabilitycatalog.NewTenantModelKeyVerifier(db), }) var snapshotProvider capabilitycatalog.SnapshotProviderFunc if toolStore != nil { @@ -459,7 +471,7 @@ func NewDeps(db *gorm.DB, opts *DepsOptions) *Deps { } agentLifecycleDeps := newAgentLifecycleDeps(db, opts.AgentLifecycle, bus, svc) - knowledgeDeps := newKnowledgeSpaceDeps(db, opts.KnowledgeSpace, bus, svc) + knowledgeDeps := newKnowledgeSpaceDeps(db, opts.KnowledgeSpace, bus, svc, eventFabricDeps) pluginReleaseCandidateRepo := pluginReleaseRepo.NewReleaseCandidateRepository(db) pluginReleasePlanRepo := pluginReleaseRepo.NewReleasePlanRepository(db) @@ -645,6 +657,7 @@ func NewDeps(db *gorm.DB, opts *DepsOptions) *Deps { Auditor: aud, MediaMgr: mediaManager, MediaSvc: mediaSvc, + Notifications: notificationssvc.NewService(db), EventBus: bus, CapabilityRegistrySvc: capRegistrySvc, CapabilityCatalogSvc: capabilityCatalogSvc, @@ -737,24 +750,27 @@ func convertDevHotloadOptions(src DevHotloadOptions) devhotloadservice.Options { // EventFabricDeps 聚合事件骨干运行时依赖。 type EventFabricDeps struct { - RedisClient *redis.Client - EventBus event_bus.EventBus - Config EventFabricRuntimeConfig - Directory *directoryService.DirectoryService - ACL *aclService.ACLService - Enforcer *aclService.ACLEnforcer - Seeder *manifestService.SeedService - BindingStore manifestService.BindingStore - Reliable event_bus.ReliableQueue - Scheduler *deliveryService.BackoffScheduler - Delivery deliveryService.Service - DLQ dlqService.Service - Audit auditService.Service - Replay *replayService.Service - RetryWorker *workers.EventFabricRetryWorker - Metrics eventmetrics.Recorder - Security *security.Verifier - Authorization *AuthorizationDeps + RedisClient *redis.Client + EventBus event_bus.EventBus + Config EventFabricRuntimeConfig + Directory *directoryService.DirectoryService + ACL *aclService.ACLService + Enforcer *aclService.ACLEnforcer + Seeder *manifestService.SeedService + BindingStore manifestService.BindingStore + Reliable event_bus.ReliableQueue + TaskDriver event_bus.TaskDriver + Scheduler *deliveryService.BackoffScheduler + Delivery deliveryService.Service + DLQ dlqService.Service + Audit auditService.Service + Replay *replayService.Service + RetryWorker *workers.EventFabricRetryWorker + CronDispatcherWorker *workers.EventFabricCronDispatcherWorker + NotificationWorker *workers.EventFabricSystemNotificationDispatchWorker + Metrics eventmetrics.Recorder + Security *security.Verifier + Authorization *AuthorizationDeps } // WorkflowDeps 聚合工作流域运行时依赖。 @@ -784,6 +800,7 @@ type KnowledgeSpaceDeps struct { Ingestion *knowledgeService.IngestionService Fusion *knowledgeService.FusionService Feedback *knowledgeService.FeedbackService + CorpusCheck *knowledgeService.CorpusCheckService Delta *ksdelta.Service EventHotfix *event_hotfix.Service DecayGuard *decay_guard.Service @@ -794,13 +811,14 @@ type KnowledgeSpaceDeps struct { // KnowledgeSpaceRuntimeConfig 描述运行期常用配置。 type KnowledgeSpaceRuntimeConfig struct { - LockKeyPrefix string - MetricsKeyPrefix string - DefaultRetentionMonths int - ProvisioningSLA time.Duration - IngestionSLA time.Duration - EventTopics KnowledgeSpaceEventTopicsOptions - Notifications KnowledgeSpaceNotificationOptions + LockKeyPrefix string + MetricsKeyPrefix string + DefaultRetentionMonths int + ProvisioningSLA time.Duration + IngestionSLA time.Duration + SceneStrategyCatalogPath string + EventTopics KnowledgeSpaceEventTopicsOptions + Notifications KnowledgeSpaceNotificationOptions } // AgentLifecycleDeps 聚合 Agent 生命周期运行所需依赖。 @@ -841,15 +859,15 @@ type EventFabricRuntimeConfig struct { // AuthorizationDeps 聚合授权域依赖。 type AuthorizationDeps struct { - Service authorizationService.Service - Templates authorizationService.TemplateService - Cache authorizationService.Cache - Dispatcher authorizationService.ChallengeDispatcher - Secrets *authorizationService.SecretsManager - Limiter authorizationService.RateLimiter - Alerts authorizationService.AlertEmitter - Reporting authorizationService.ReportingService - TimeoutWorker *workers.EventFabricAuthorizationTimeoutWorker + Service authorizationService.Service + Templates authorizationService.TemplateService + Cache authorizationService.Cache + Dispatcher authorizationService.ChallengeDispatcher + Secrets *authorizationService.SecretsManager + Limiter authorizationService.RateLimiter + Alerts authorizationService.AlertEmitter + Reporting authorizationService.ReportingService + TimeoutTaskWorker *workers.EventFabricAuthorizationTimeoutTaskWorker } // IntegrationGatewayRuntimeConfig 简化运行时常用参数。 @@ -859,7 +877,7 @@ type IntegrationGatewayRuntimeConfig struct { EventTopics IntegrationGatewayEventTopicsOptions } -func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, bus event_bus.EventBus, auditSvc auditsvc.Service, tenantSvc *tenantsvc.TenantService) *EventFabricDeps { +func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, queueOpts QueueOptions, bus event_bus.EventBus, auditSvc auditsvc.Service, tenantSvc *tenantsvc.TenantService) *EventFabricDeps { const ( fallbackAckTimeout = 30 * time.Second fallbackDefaultMaxRetry = 5 @@ -907,25 +925,96 @@ func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, bus event_bus.Even } var reliableQueue event_bus.ReliableQueue + var taskDriver event_bus.TaskDriver var scheduler *deliveryService.BackoffScheduler + retryWorkerFallbackEnabled := false + retryWorkerDriverName := "" if redisClient != nil { reliableQueue = event_bus.NewRedisReliableQueue(redisClient) + redisTaskDriver := event_bus.NewRedisTaskDriver(event_bus.RedisTaskDriverOptions{ + Client: redisClient, + Prefix: "event_fabric:task", + BlockingTimeout: cfg.SchedulerInterval, + ProcessingExpiry: cfg.AckTimeout * 2, + }) + taskDriver = redisTaskDriver + retryWorkerDriverName = string(taskDriver.Type()) + retryWorkerFallbackEnabled = true + switch strings.ToLower(strings.TrimSpace(queueOpts.Driver)) { + case "kafka": + taskDriver = kafkadriver.NewDriver(kafkadriver.DriverOptions{ + Brokers: append([]string{}, queueOpts.Kafka.Brokers...), + TopicPrefix: strings.TrimSpace(queueOpts.Kafka.TopicPrefix), + ConsumerGroup: strings.TrimSpace(queueOpts.Kafka.ConsumerGroup), + PollTimeout: time.Duration(queueOpts.Kafka.PollTimeoutMs) * time.Millisecond, + FallbackDriver: redisTaskDriver, + }) + pxlog.WarnF(context.Background(), "[event_fabric.task_driver] queue.driver=kafka enabled; using kafka adapter with redis fallback") + retryWorkerDriverName = string(taskDriver.Type()) + case "rabbitmq": + taskDriver = rabbitdriver.NewDriver(rabbitdriver.DriverOptions{ + URL: strings.TrimSpace(queueOpts.Rabbit.URL), + Exchange: strings.TrimSpace(queueOpts.Rabbit.Exchange), + QueuePrefix: strings.TrimSpace(queueOpts.Rabbit.QueuePrefix), + ConsumerTag: strings.TrimSpace(queueOpts.Rabbit.ConsumerTag), + Prefetch: queueOpts.Rabbit.Prefetch, + PollTimeout: time.Duration(queueOpts.Rabbit.PollTimeoutMs) * time.Millisecond, + FallbackDriver: redisTaskDriver, + }) + pxlog.WarnF(context.Background(), "[event_fabric.task_driver] queue.driver=rabbitmq enabled; using rabbitmq adapter with redis fallback") + retryWorkerDriverName = string(taskDriver.Type()) + case "nats": + taskDriver = natsdriver.NewDriver(natsdriver.DriverOptions{ + URLs: append([]string{}, queueOpts.NATS.URLs...), + SubjectPrefix: strings.TrimSpace(queueOpts.NATS.SubjectPrefix), + QueueGroup: strings.TrimSpace(queueOpts.NATS.QueueGroup), + PollTimeout: time.Duration(queueOpts.NATS.PollTimeoutMs) * time.Millisecond, + FallbackDriver: redisTaskDriver, + }) + pxlog.WarnF(context.Background(), "[event_fabric.task_driver] queue.driver=nats enabled; using nats adapter with redis fallback") + retryWorkerDriverName = string(taskDriver.Type()) + } scheduler = deliveryService.NewBackoffScheduler(reliableQueue) + if metricsRecorder != nil { + capability := taskDriver.Capability() + metricsRecorder.ObserveTaskDriverInit(context.Background(), string(taskDriver.Type()), capability.SupportsBlockingDequeue) + pxlog.InfoF(context.Background(), "[event_fabric.task_driver] initialized driver=%s blocking=%s ack_timeout=%s", + taskDriver.Type(), cfg.SchedulerInterval, cfg.AckTimeout) + } + } + if taskDriver != nil && db != nil { + taskDriver = newTaskHistoryDriver( + taskDriver, + eventfabricrepo.NewTaskHistoryRepository(db), + time.Now, + ) } + topicLookup := eventshared.NewCachedTopicLookup(eventfabricrepo.NewTopicRepository(db), eventshared.CachedTopicLookupOptions{ + Cache: func() cache.ICache { + if redisClient == nil { + return nil + } + return cache.NewRedisCache(redisClient) + }(), + TTL: 180 * time.Second, + MissTTL: 30 * time.Second, + }) + + aclSvc := aclService.NewACLService(aclService.Options{ + DB: db, + TopicStore: topicLookup, + Clock: time.Now, + }) directorySvc := directoryService.NewDirectoryService(directoryService.Options{ DB: db, + ACL: aclSvc, EventBus: bus, Clock: time.Now, ActorResolver: func(context.Context) string { return "system" }, DefaultMaxRetry: cfg.DefaultMaxRetry, DefaultAckTimeout: cfg.AckTimeout, }) - - aclSvc := aclService.NewACLService(aclService.Options{ - DB: db, - Clock: time.Now, - }) aclEnforcer := aclService.NewACLEnforcer(aclSvc) bindingRepo := eventfabricrepo.NewManifestBindingRepository(db) @@ -953,7 +1042,7 @@ func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, bus event_bus.Even Audit: auditSvcEF, Logger: pxlog.GetGlobalLogger(), Clock: time.Now, - Bindings: bindingStore, + Bindings: bindingStore, }) } @@ -961,13 +1050,15 @@ func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, bus event_bus.Even if scheduler != nil { var err error deliverySvc, err = deliveryService.NewService(deliveryService.Options{ - DB: db, - ACL: aclSvc, - Scheduler: scheduler, - Clock: time.Now, - MaxRetry: cfg.DefaultMaxRetry, - Audit: auditSvcEF, - Metrics: metricsRecorder, + DB: db, + Topics: topicLookup, + ACL: aclSvc, + Scheduler: scheduler, + Clock: time.Now, + MaxRetry: cfg.DefaultMaxRetry, + Audit: auditSvcEF, + Metrics: metricsRecorder, + EnableDatabaseFallbackLookup: retryWorkerFallbackEnabled, }) if err != nil { pxlog.WarnF(context.Background(), "init delivery service failed: %v", err) @@ -995,37 +1086,64 @@ func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, bus event_bus.Even if deliverySvc != nil { replaySvc = replayService.NewService(replayService.Options{ DB: db, + Topics: topicLookup, + ACL: aclSvc, Delivery: deliverySvc, + History: eventfabricrepo.NewTaskHistoryRepository(db), Clock: time.Now, Metrics: metricsRecorder, + Emitter: newReplayTaskWSStatusEmitter(), }) } var retryWorker *workers.EventFabricRetryWorker if deliverySvc != nil && bus != nil { - tenantProvider := func(ctx context.Context) ([]string, error) { - if tenantSvc == nil { - return []string{"global"}, nil - } - items, _, _, err := tenantSvc.List(ctx, tenantsvc.ListTenantsOption{Page: 1, PageSize: 1000}) - if err != nil { - return nil, err - } - keys := make([]string, 0, len(items)+1) - for _, item := range items { - if key := strings.TrimSpace(item.Key); key != "" { - keys = append(keys, key) - } - } - keys = append(keys, "global") - return keys, nil + tenantProvider := newRetryQueueTenantKeyProvider(redisClient, 10*time.Second) + if tenantProvider == nil { + tenantProvider = newCachedTenantKeyProvider(tenantSvc, 30*time.Second) } retryWorker = workers.NewEventFabricRetryWorker(workers.EventFabricRetryWorkerOptions{ - Delivery: deliverySvc, - EventBus: bus, - TenantProvider: tenantProvider, - Interval: cfg.SchedulerInterval, - BatchSize: 100, + Delivery: deliverySvc, + EventBus: bus, + TenantProvider: tenantProvider, + Interval: cfg.SchedulerInterval, + BatchSize: 100, + EnableDBPollingFallback: retryWorkerFallbackEnabled, + DriverName: retryWorkerDriverName, + }) + } + + var cronDispatcherWorker *workers.EventFabricCronDispatcherWorker + if db != nil && taskDriver != nil { + taskRepo := eventfabricrepo.NewScheduledTaskRepository(db) + taskRunRepo := eventfabricrepo.NewScheduledTaskRunRepository(db) + cronDispatcherWorker = workers.NewEventFabricCronDispatcherWorker(workers.EventFabricCronDispatcherWorkerOptions{ + TaskRepository: taskRepo, + TaskRunRepository: taskRunRepo, + TaskDriver: taskDriver, + Scheduler: cronschedulersvc.NewService(), + SubscriberID: eventbus.SubscriberEventFabricCronDispatch, + Topic: "event_fabric.cron.dispatch", + Interval: cfg.SchedulerInterval, + BatchSize: 100, + Logger: pxlog.GetGlobalLogger(), + Clock: time.Now, + }) + } + var notificationWorker *workers.EventFabricSystemNotificationDispatchWorker + if taskDriver != nil { + notificationWorker = workers.NewEventFabricSystemNotificationDispatchWorker(workers.EventFabricSystemNotificationDispatchWorkerOptions{ + TaskDriver: taskDriver, + SubscriberID: eventbus.SubscriberSystemNotificationDispatch, + TenantKey: "global", + BatchSize: 100, + WaitTimeout: 3 * time.Second, + RetryDelay: 5 * time.Second, + Publish: func(tenantKey, topic string, payload any, traceID string) { + wsbus.DefaultHub.Publish(tenantKey, topic, payload, traceID) + }, + Logger: pxlog.GetGlobalLogger(), + Clock: time.Now, }) } @@ -1116,61 +1234,63 @@ func newEventFabricDeps(db *gorm.DB, opts EventFabricOptions, bus event_bus.Even Logger: pxlog.GetGlobalLogger(), }) - var timeoutWorker *workers.EventFabricAuthorizationTimeoutWorker - if authService != nil && tenantSvc != nil { - tenantProvider := func(ctx context.Context) ([]uuid.UUID, error) { - items, _, _, err := tenantSvc.List(ctx, tenantsvc.ListTenantsOption{Page: 1, PageSize: 1000}) - if err != nil { - return nil, err - } - ids := make([]uuid.UUID, 0, len(items)) - for _, t := range items { - if t.UUID != uuid.Nil { - ids = append(ids, t.UUID) - } - } - return ids, nil - } - timeoutWorker = workers.NewEventFabricAuthorizationTimeoutWorker(workers.EventFabricAuthorizationTimeoutWorkerOptions{ - Service: authService, - TenantProvider: tenantProvider, - Interval: time.Duration(authCfg.TimeoutSweepIntervalSeconds) * time.Second, - Logger: pxlog.GetGlobalLogger(), + var timeoutTaskWorker *workers.EventFabricAuthorizationTimeoutTaskWorker + if authService != nil && taskDriver != nil && bus != nil { + workers.RegisterAuthorizationChallengeTimeoutTaskScheduler( + bus, + taskDriver, + authCfg.ChallengeTopic, + pxlog.GetGlobalLogger(), + time.Now, + ) + timeoutTaskWorker = workers.NewEventFabricAuthorizationTimeoutTaskWorker(workers.EventFabricAuthorizationTimeoutTaskWorkerOptions{ + Service: authService, + TaskDriver: taskDriver, + SubscriberID: eventbus.SubscriberAuthorizationChallengeTime, + TenantKey: "global", + BatchSize: 100, + WaitTimeout: 3 * time.Second, + RetryDelay: 5 * time.Second, + Logger: pxlog.GetGlobalLogger(), + Clock: time.Now, }) } authDeps = &AuthorizationDeps{ - Service: authService, - Templates: templateService, - Cache: cache, - Dispatcher: dispatcher, - Secrets: secretsManager, - Limiter: rateLimiter, - Alerts: alertEmitter, - Reporting: reportingService, - TimeoutWorker: timeoutWorker, + Service: authService, + Templates: templateService, + Cache: cache, + Dispatcher: dispatcher, + Secrets: secretsManager, + Limiter: rateLimiter, + Alerts: alertEmitter, + Reporting: reportingService, + TimeoutTaskWorker: timeoutTaskWorker, } } return &EventFabricDeps{ - RedisClient: redisClient, - EventBus: bus, - Config: cfg, - Directory: directorySvc, - ACL: aclSvc, - Enforcer: aclEnforcer, - Seeder: seedSvc, - BindingStore: bindingStore, - Reliable: reliableQueue, - Scheduler: scheduler, - Delivery: deliverySvc, - DLQ: dlqSvc, - Audit: auditSvcEF, - Replay: replaySvc, - RetryWorker: retryWorker, - Metrics: metricsRecorder, - Security: securityVerifier, - Authorization: authDeps, + RedisClient: redisClient, + EventBus: bus, + Config: cfg, + Directory: directorySvc, + ACL: aclSvc, + Enforcer: aclEnforcer, + Seeder: seedSvc, + BindingStore: bindingStore, + Reliable: reliableQueue, + TaskDriver: taskDriver, + Scheduler: scheduler, + Delivery: deliverySvc, + DLQ: dlqSvc, + Audit: auditSvcEF, + Replay: replaySvc, + RetryWorker: retryWorker, + CronDispatcherWorker: cronDispatcherWorker, + NotificationWorker: notificationWorker, + Metrics: metricsRecorder, + Security: securityVerifier, + Authorization: authDeps, } } @@ -1263,7 +1383,7 @@ func newAgentLifecycleDeps(db *gorm.DB, opts AgentLifecycleOptions, bus event_bu } } -func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bus.EventBus, auditSvc auditsvc.Service) *KnowledgeSpaceDeps { +func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bus.EventBus, auditSvc auditsvc.Service, eventFabric *EventFabricDeps) *KnowledgeSpaceDeps { var redisClient *redis.Client if addr := strings.TrimSpace(opts.RedisAddr); addr != "" { redisClient = redis.NewClient(&redis.Options{ @@ -1306,13 +1426,14 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu } cfg := KnowledgeSpaceRuntimeConfig{ - LockKeyPrefix: strings.TrimSpace(opts.LockKeyPrefix), - MetricsKeyPrefix: strings.TrimSpace(opts.MetricsKeyPrefix), - DefaultRetentionMonths: opts.DefaultRetentionMonths, - ProvisioningSLA: opts.ProvisioningSLA, - IngestionSLA: opts.IngestionSLA, - EventTopics: opts.EventTopics, - Notifications: opts.Notifications, + LockKeyPrefix: strings.TrimSpace(opts.LockKeyPrefix), + MetricsKeyPrefix: strings.TrimSpace(opts.MetricsKeyPrefix), + DefaultRetentionMonths: opts.DefaultRetentionMonths, + ProvisioningSLA: opts.ProvisioningSLA, + IngestionSLA: opts.IngestionSLA, + SceneStrategyCatalogPath: strings.TrimSpace(opts.SceneStrategyCatalogPath), + EventTopics: opts.EventTopics, + Notifications: opts.Notifications, } if cfg.LockKeyPrefix == "" { @@ -1330,6 +1451,9 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu if cfg.IngestionSLA <= 0 { cfg.IngestionSLA = 4 * time.Hour } + if cfg.SceneStrategyCatalogPath == "" { + cfg.SceneStrategyCatalogPath = "backend/config/knowledge/scene_strategy_catalog.yaml" + } if cfg.EventTopics.Provisioning == "" { cfg.EventTopics.Provisioning = "knowledge.space.provisioning" } @@ -1353,9 +1477,10 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu } serviceCfg := knowledgeService.RuntimeConfig{ - LockKeyPrefix: cfg.LockKeyPrefix, - DefaultRetentionMonths: cfg.DefaultRetentionMonths, - ProvisioningSLA: cfg.ProvisioningSLA, + LockKeyPrefix: cfg.LockKeyPrefix, + DefaultRetentionMonths: cfg.DefaultRetentionMonths, + ProvisioningSLA: cfg.ProvisioningSLA, + SceneStrategyCatalogPath: strings.TrimSpace(cfg.SceneStrategyCatalogPath), EventTopics: knowledgeService.EventTopics{ Provisioning: cfg.EventTopics.Provisioning, Ingestion: cfg.EventTopics.Ingestion, @@ -1420,24 +1545,90 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu releaseMetricsWriter := knowledgeinstr.NewReleaseMetricsWriter(releaseReportPath, releaseAggregatePath) decayMetricsWriter := knowledgeinstr.NewDecayMetricsWriter(decayReportPath, decayAggregatePath) + processors := knowledgeService.NewProcessorRegistry() + // config.yaml 显式配置优先于自动探测/环境变量 + if opts.IngestionProcessors.PDFTextAvailable != nil { + processors.SetPDFTextAvailable(*opts.IngestionProcessors.PDFTextAvailable) + } + if opts.IngestionProcessors.OCRAvailable != nil { + processors.SetOCRAvailable(*opts.IngestionProcessors.OCRAvailable) + } + + agentSettingSvc := agentsettings.NewAgentSettingService(db) + + routedVectorStore := vectorStore + if strings.EqualFold(strings.TrimSpace(driverName), vectorstorepkg.DriverPGVector) { + routedVectorStore = knowledgeService.NewRoutedVectorStore(knowledgeService.RoutedVectorStoreOptions{ + DB: db, + BaseDriver: driverName, + BaseStore: vectorStore, + PGVector: opts.VectorStore.PGVector, + }) + } ingestionSvc := knowledgeService.NewIngestionService(knowledgeService.IngestionServiceOptions{ DB: db, Instrumentation: inst, - VectorStore: vectorStore, + VectorStore: routedVectorStore, MetricsWriter: metricsWriter, + Processors: processors, + MaxRetries: 1, + AgentSettings: agentSettingSvc, + VectorDimension: 0, + ProgressPublisher: knowledgeService.IngestionProgressPublisherFunc(func(ctx context.Context, update knowledgeService.IngestionProgressUpdate) { + if strings.TrimSpace(update.TenantUUID) == "" { + return + } + wsbus.DefaultHub.Publish(update.TenantUUID, eventbus.TopicKnowledgeIngestionJob, update, reqctx.GetTraceID(ctx)) + }), }) svc.AttachIngestion(ingestionSvc) fusionSvc := knowledgeService.NewFusionService(knowledgeService.FusionServiceOptions{ DB: db, Instrumentation: inst, - VectorStore: vectorStore, + VectorStore: routedVectorStore, + SparseIndex: nil, EventBus: bus, EventTopic: cfg.EventTopics.Fusion, Clock: time.Now, }) - reprocessPipeline := knowledgeworkflow.NewReprocessPipeline(bus, time.Now) + reprocessTopic := cfg.EventTopics.Feedback + ".reprocess" + var reprocessPipeline knowledgeworkflow.ReprocessPipeline + if eventFabric != nil && eventFabric.Delivery != nil && eventFabric.Directory != nil && eventFabric.ACL != nil { + reprocessPipeline = knowledgeworkflow.NewEventFabricReprocessPipeline(knowledgeworkflow.EventFabricReprocessPipelineOptions{ + Delivery: eventFabric.Delivery, + Directory: eventFabric.Directory, + ACL: eventFabric.ACL, + SubscriberID: eventbus.SubscriberKnowledgeSpaceReprocess, + Namespace: cfg.EventTopics.Feedback, + Name: "reprocess", + PayloadFormat: "json", + MaxRetry: int32(eventFabric.Config.DefaultMaxRetry), + AckTimeoutSec: int32(eventFabric.Config.AckTimeout / time.Second), + Clock: time.Now, + }) + knowledgeworkflow.RegisterEventFabricReprocessDispatchHandler(knowledgeworkflow.EventFabricReprocessDispatchHandlerOptions{ + EventBus: bus, + DB: db, + VectorStore: routedVectorStore, + SubscriberID: eventbus.SubscriberKnowledgeSpaceReprocess, + Clock: time.Now, + }) + } else { + reprocessPipeline = knowledgeworkflow.NewReprocessPipeline(knowledgeworkflow.ReprocessPipelineOptions{ + EventBus: bus, + EventTopic: reprocessTopic, + Clock: time.Now, + }) + _ = knowledgeworkflow.NewReprocessWorker(knowledgeworkflow.ReprocessWorkerOptions{ + DB: db, + VectorStore: routedVectorStore, + EventBus: bus, + EventTopic: reprocessTopic, + Clock: time.Now, + }).Start() + } feedbackSvc := knowledgeService.NewFeedbackService(knowledgeService.FeedbackServiceOptions{ DB: db, Instrumentation: inst, @@ -1447,6 +1638,45 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu Clock: time.Now, }) + var corpusCheckPipeline knowledgeworkflow.CorpusCheckPipeline + if eventFabric != nil && eventFabric.Delivery != nil && eventFabric.Directory != nil && eventFabric.ACL != nil { + corpusCheckPipeline = knowledgeworkflow.NewEventFabricCorpusCheckPipeline(knowledgeworkflow.EventFabricCorpusCheckPipelineOptions{ + Delivery: eventFabric.Delivery, + Directory: eventFabric.Directory, + ACL: eventFabric.ACL, + SubscriberID: eventbus.SubscriberKnowledgeSpaceCorpusCheck, + Namespace: "_topic.knowledge.space.corpuscheck", + Name: "run", + PayloadFormat: "json", + MaxRetry: int32(eventFabric.Config.DefaultMaxRetry), + AckTimeoutSec: int32(eventFabric.Config.AckTimeout / time.Second), + Clock: time.Now, + }) + knowledgeworkflow.RegisterEventFabricCorpusCheckDispatchHandler(knowledgeworkflow.EventFabricCorpusCheckDispatchHandlerOptions{ + EventBus: bus, + DB: db, + SubscriberID: eventbus.SubscriberKnowledgeSpaceCorpusCheck, + Clock: time.Now, + }) + } else { + corpusCheckPipeline = knowledgeworkflow.NewCorpusCheckPipeline(knowledgeworkflow.CorpusCheckPipelineOptions{ + EventBus: bus, + EventTopic: "knowledge.corpus_check.run", + Clock: time.Now, + }) + _ = knowledgeworkflow.NewCorpusCheckWorker(knowledgeworkflow.CorpusCheckWorkerOptions{ + DB: db, + EventBus: bus, + EventTopic: "knowledge.corpus_check.run", + Clock: time.Now, + }).Start() + } + corpusCheckSvc := knowledgeService.NewCorpusCheckService(knowledgeService.CorpusCheckServiceOptions{ + DB: db, + Pipeline: corpusCheckPipeline, + Clock: time.Now, + }) + deltaSvc := ksdelta.NewService(ksdelta.Options{ DB: db, Instrumentation: inst, @@ -1462,7 +1692,7 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu qaBridgeSvc := knowledgeqa.NewService(knowledgeqa.Options{ DB: db, Instrumentation: inst, - VectorStore: vectorStore, + VectorStore: routedVectorStore, SnapshotStore: snapshotStore, ToolRegistry: toolRegistry, Guard: guard, @@ -1472,6 +1702,7 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu agentNotifier := event_hotfix.NewAgentNotifier(opts.EventHotfix.AgentMatrixPath) eventHotfixSvc := event_hotfix.NewService(event_hotfix.Options{ + DB: db, Instrumentation: inst, EventBus: bus, MetricsWriter: eventMetricsWriter, @@ -1480,6 +1711,7 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu ReportPath: eventReportPath, Clock: time.Now, RetryMax: opts.EventHotfix.RetryMax, + ReplayWindow: opts.EventHotfix.ReplayWindow, }) releaseSvc := tenant_release.NewService(tenant_release.Options{ DB: db, @@ -1492,6 +1724,7 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu Instrumentation: inst, MetricsWriter: decayMetricsWriter, ThresholdsPath: opts.Decay.ThresholdPath, + EventBus: bus, Clock: time.Now, }) @@ -1504,11 +1737,12 @@ func newKnowledgeSpaceDeps(db *gorm.DB, opts KnowledgeSpaceOptions, bus event_bu Ingestion: ingestionSvc, Fusion: fusionSvc, Feedback: feedbackSvc, + CorpusCheck: corpusCheckSvc, Delta: deltaSvc, EventHotfix: eventHotfixSvc, DecayGuard: decaySvc, Release: releaseSvc, - VectorStore: vectorStore, + VectorStore: routedVectorStore, QABridge: qaBridgeSvc, } } @@ -1586,3 +1820,106 @@ func newIntegrationGatewayDeps(db *gorm.DB, opts IntegrationGatewayOptions, bus Instrumentation: inst, } } + +func newRetryQueueTenantKeyProvider(redisClient *redis.Client, ttl time.Duration) func(context.Context) ([]string, error) { + if redisClient == nil { + return nil + } + if ttl <= 0 { + ttl = 10 * time.Second + } + const retryKeyPrefix = "event:retry:" + var mu sync.Mutex + var cached []string + var expireAt time.Time + + return func(ctx context.Context) ([]string, error) { + now := time.Now() + forceRefresh := workers.RetryTenantProviderBypassCache(ctx) + mu.Lock() + if !forceRefresh && now.Before(expireAt) && len(cached) > 0 { + out := append([]string(nil), cached...) + mu.Unlock() + return out, nil + } + mu.Unlock() + + tenants := make(map[string]struct{}, 8) + cursor := uint64(0) + for { + keys, nextCursor, err := redisClient.Scan(ctx, cursor, retryKeyPrefix+"*", 200).Result() + if err != nil { + return nil, err + } + for _, key := range keys { + tenant := strings.TrimSpace(strings.TrimPrefix(key, retryKeyPrefix)) + if tenant != "" { + tenants[tenant] = struct{}{} + } + } + cursor = nextCursor + if cursor == 0 { + break + } + } + + filtered := make([]string, 0, len(tenants)+1) + for tenant := range tenants { + filtered = append(filtered, tenant) + } + if len(filtered) == 0 { + filtered = append(filtered, "global") + } + + mu.Lock() + cached = append([]string(nil), filtered...) + expireAt = time.Now().Add(ttl) + mu.Unlock() + + return filtered, nil + } +} + +func newCachedTenantKeyProvider(tenantSvc *tenantsvc.TenantService, ttl time.Duration) func(context.Context) ([]string, error) { + if ttl <= 0 { + ttl = 30 * time.Second + } + var mu sync.Mutex + var cached []string + var expireAt time.Time + + return func(ctx context.Context) ([]string, error) { + if tenantSvc == nil { + return []string{"global"}, nil + } + + now := time.Now() + forceRefresh := workers.RetryTenantProviderBypassCache(ctx) + mu.Lock() + if !forceRefresh && now.Before(expireAt) && len(cached) > 0 { + out := append([]string(nil), cached...) + mu.Unlock() + return out, nil + } + mu.Unlock() + + keys, err := tenantSvc.Repo.ListActiveKeys(ctx) + if err != nil { + return nil, err + } + filtered := make([]string, 0, len(keys)+1) + for _, key := range keys { + if key = strings.TrimSpace(key); key != "" { + filtered = append(filtered, key) + } + } + filtered = append(filtered, "global") + + mu.Lock() + cached = append([]string(nil), filtered...) + expireAt = time.Now().Add(ttl) + mu.Unlock() + + return filtered, nil + } +} diff --git a/backend/internal/app/shared/options.go b/backend/internal/app/shared/options.go index e96d2b31..689f9096 100644 --- a/backend/internal/app/shared/options.go +++ b/backend/internal/app/shared/options.go @@ -19,6 +19,7 @@ type DepsOptions struct { AuthCustomer auth.AuthOptions // 给客户/插件端的 Audience Audit auditsvc.AuditOptions // 批量大小、等待等 Storage mediasvc.StorageOptions + Queue QueueOptions // 以后需要别的也放在这里(如默认租户、开关等) EventFabric EventFabricOptions Workflow WorkflowOptions @@ -33,6 +34,40 @@ type DepsOptions struct { Server ServerOptions } +// QueueOptions 描述任务驱动的配置入口。 +type QueueOptions struct { + Driver string + Kafka QueueKafkaOptions + Rabbit QueueRabbitMQOptions + NATS QueueNATSOptions +} + +// QueueKafkaOptions 描述 Kafka 驱动连接参数。 +type QueueKafkaOptions struct { + Brokers []string + TopicPrefix string + ConsumerGroup string + PollTimeoutMs int +} + +// QueueRabbitMQOptions 描述 RabbitMQ 驱动连接参数。 +type QueueRabbitMQOptions struct { + URL string + Exchange string + QueuePrefix string + ConsumerTag string + Prefetch int + PollTimeoutMs int +} + +// QueueNATSOptions 描述 NATS 驱动连接参数。 +type QueueNATSOptions struct { + URLs []string + SubjectPrefix string + QueueGroup string + PollTimeoutMs int +} + type ServerOptions struct { GRPC GRPCServerOptions } @@ -151,22 +186,30 @@ type AgentLifecycleNotificationOptions struct { // KnowledgeSpaceOptions 描述知识空间域依赖。 type KnowledgeSpaceOptions struct { - RedisAddr string - RedisPassword string - RedisDB int - LockKeyPrefix string - MetricsKeyPrefix string - DefaultRetentionMonths int - ProvisioningSLA time.Duration - IngestionSLA time.Duration - EventTopics KnowledgeSpaceEventTopicsOptions - Notifications KnowledgeSpaceNotificationOptions - VectorStore KnowledgeSpaceVectorStoreOptions - Delta KnowledgeSpaceDeltaOptions - Reports KnowledgeSpaceReportOptions - EventHotfix KnowledgeSpaceEventHotfixOptions - Decay KnowledgeSpaceDecayOptions - Release KnowledgeSpaceReleaseOptions + RedisAddr string + RedisPassword string + RedisDB int + LockKeyPrefix string + MetricsKeyPrefix string + DefaultRetentionMonths int + ProvisioningSLA time.Duration + IngestionSLA time.Duration + SceneStrategyCatalogPath string + IngestionProcessors KnowledgeSpaceIngestionProcessorOptions + EventTopics KnowledgeSpaceEventTopicsOptions + Notifications KnowledgeSpaceNotificationOptions + VectorStore KnowledgeSpaceVectorStoreOptions + Delta KnowledgeSpaceDeltaOptions + Reports KnowledgeSpaceReportOptions + EventHotfix KnowledgeSpaceEventHotfixOptions + Decay KnowledgeSpaceDecayOptions + Release KnowledgeSpaceReleaseOptions +} + +// KnowledgeSpaceIngestionProcessorOptions 控制入库处理器能力开关(nil 表示自动探测)。 +type KnowledgeSpaceIngestionProcessorOptions struct { + PDFTextAvailable *bool + OCRAvailable *bool } type KnowledgeSpaceEventTopicsOptions struct { diff --git a/backend/internal/app/shared/workers/event_fabric_authorization_timeout.go b/backend/internal/app/shared/workers/event_fabric_authorization_timeout.go deleted file mode 100644 index a09fd3ec..00000000 --- a/backend/internal/app/shared/workers/event_fabric_authorization_timeout.go +++ /dev/null @@ -1,105 +0,0 @@ -package workers - -import ( - "context" - "time" - - authsvc "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/authorization" - "github.com/google/uuid" - - pxlog "github.com/ArtisanCloud/PowerX/pkg/utils/logger" -) - -// TenantUUIDProvider 返回需要扫描 Challenge 超时的租户列表。 -type TenantUUIDProvider func(ctx context.Context) ([]uuid.UUID, error) - -// EventFabricAuthorizationTimeoutWorkerOptions 控制 Challenge 超时 worker。 -type EventFabricAuthorizationTimeoutWorkerOptions struct { - Service authsvc.Service - TenantProvider TenantUUIDProvider - Interval time.Duration - Logger *pxlog.Logger - Clock func() time.Time -} - -// EventFabricAuthorizationTimeoutWorker 负责周期检查 Challenge 超时并触发处理。 -type EventFabricAuthorizationTimeoutWorker struct { - service authsvc.Service - tenantProvider TenantUUIDProvider - interval time.Duration - logger *pxlog.Logger - clock func() time.Time -} - -// NewEventFabricAuthorizationTimeoutWorker 构建 Challenge 超时 worker。 -func NewEventFabricAuthorizationTimeoutWorker(opts EventFabricAuthorizationTimeoutWorkerOptions) *EventFabricAuthorizationTimeoutWorker { - logger := opts.Logger - if logger == nil { - logger = pxlog.GetGlobalLogger() - } - interval := opts.Interval - if interval <= 0 { - interval = 30 * time.Second - } - clock := opts.Clock - if clock == nil { - clock = time.Now - } - return &EventFabricAuthorizationTimeoutWorker{ - service: opts.Service, - tenantProvider: opts.TenantProvider, - interval: interval, - logger: logger, - clock: clock, - } -} - -// Run 启动 worker。 -func (w *EventFabricAuthorizationTimeoutWorker) Run(ctx context.Context) { - if w == nil || w.service == nil || w.tenantProvider == nil { - return - } - ticker := time.NewTicker(w.interval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - default: - w.sweep(ctx) - } - - select { - case <-ctx.Done(): - return - case <-ticker.C: - } - } -} - -func (w *EventFabricAuthorizationTimeoutWorker) sweep(ctx context.Context) { - tenantIDs, err := w.tenantProvider(ctx) - if err != nil { - w.logger.WarnF(ctx, "[authorization.timeout] tenant provider failed: %v", err) - return - } - if len(tenantIDs) == 0 { - return - } - - now := w.clock().UTC() - for _, tenantID := range tenantIDs { - if tenantID == uuid.Nil { - continue - } - count, err := w.service.ProcessExpiredChallenges(ctx, tenantID, now) - if err != nil && err != authsvc.ErrOperationUnsupported { - w.logger.WarnF(ctx, "[authorization.timeout] process tenant=%s err=%v", tenantID, err) - continue - } - if count > 0 { - w.logger.InfoF(ctx, "[authorization.timeout] processed %d tickets for tenant=%s", count, tenantID) - } - } -} diff --git a/backend/internal/app/shared/workers/event_fabric_retry.go b/backend/internal/app/shared/workers/event_fabric_retry.go index f3e818ec..c01cf7e4 100644 --- a/backend/internal/app/shared/workers/event_fabric_retry.go +++ b/backend/internal/app/shared/workers/event_fabric_retry.go @@ -4,6 +4,7 @@ import ( "context" "errors" "strings" + "sync/atomic" "time" "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/delivery" @@ -17,21 +18,38 @@ type TenantProvider func(ctx context.Context) ([]string, error) // EventFabricRetryWorkerOptions 配置重试 Worker。 type EventFabricRetryWorkerOptions struct { - Delivery delivery.Service - EventBus event_bus.EventBus - TenantProvider TenantProvider - Interval time.Duration - BatchSize int + Delivery delivery.Service + EventBus event_bus.EventBus + TenantProvider TenantProvider + Interval time.Duration + BatchSize int + EnableDBPollingFallback bool + DriverName string } // EventFabricRetryWorker 周期拉取重试队列,将事件分发到内部总线。 type EventFabricRetryWorker struct { - delivery delivery.Service - eventBus event_bus.EventBus - tenantProvider TenantProvider - interval time.Duration - batchSize int - logger *pxlog.Logger + delivery delivery.Service + eventBus event_bus.EventBus + tenantProvider TenantProvider + interval time.Duration + batchSize int + logger *pxlog.Logger + enableDBPollingFallback bool + driverName string + paused atomic.Bool +} + +type retryTenantProviderBypassCacheKey struct{} + +// RetryTenantProviderBypassCache 标记当前上下文是否需要跳过租户缓存。 +func RetryTenantProviderBypassCache(ctx context.Context) bool { + if ctx == nil { + return false + } + raw := ctx.Value(retryTenantProviderBypassCacheKey{}) + flag, ok := raw.(bool) + return ok && flag } // RetryDispatchEvent 是 worker 推送到 EventBus 的事件载荷。 @@ -63,12 +81,14 @@ func NewEventFabricRetryWorker(opts EventFabricRetryWorkerOptions) *EventFabricR batchSize = 50 } return &EventFabricRetryWorker{ - delivery: opts.Delivery, - eventBus: opts.EventBus, - tenantProvider: opts.TenantProvider, - interval: interval, - batchSize: batchSize, - logger: pxlog.GetGlobalLogger(), + delivery: opts.Delivery, + eventBus: opts.EventBus, + tenantProvider: opts.TenantProvider, + interval: interval, + batchSize: batchSize, + logger: pxlog.GetGlobalLogger(), + enableDBPollingFallback: opts.EnableDBPollingFallback, + driverName: strings.TrimSpace(opts.DriverName), } } @@ -77,6 +97,10 @@ func (w *EventFabricRetryWorker) Run(ctx context.Context) { if w.delivery == nil || w.eventBus == nil || w.tenantProvider == nil { return } + if !w.enableDBPollingFallback { + w.logger.InfoF(ctx, "[event_fabric.retry] skip db polling fallback driver=%s reason=fallback_disabled", w.driverName) + return + } ticker := time.NewTicker(w.interval) defer ticker.Stop() @@ -86,7 +110,9 @@ func (w *EventFabricRetryWorker) Run(ctx context.Context) { case <-ctx.Done(): return default: - w.flush(ctx) + if !w.paused.Load() { + w.flush(ctx) + } } select { @@ -97,6 +123,49 @@ func (w *EventFabricRetryWorker) Run(ctx context.Context) { } } +func (w *EventFabricRetryWorker) Pause() { + if w == nil { + return + } + w.paused.Store(true) +} + +func (w *EventFabricRetryWorker) Resume() { + if w == nil { + return + } + w.paused.Store(false) +} + +func (w *EventFabricRetryWorker) IsPaused() bool { + if w == nil { + return false + } + return w.paused.Load() +} + +func (w *EventFabricRetryWorker) TriggerNow(ctx context.Context) { + if w == nil || w.delivery == nil || w.eventBus == nil || w.tenantProvider == nil { + return + } + ctx = context.WithValue(ctx, retryTenantProviderBypassCacheKey{}, true) + w.flush(ctx) +} + +func (w *EventFabricRetryWorker) Interval() time.Duration { + if w == nil { + return 0 + } + return w.interval +} + +func (w *EventFabricRetryWorker) BatchSize() int { + if w == nil { + return 0 + } + return w.batchSize +} + func (w *EventFabricRetryWorker) flush(ctx context.Context) { tenantKeys, err := w.tenantProvider(ctx) if err != nil { @@ -117,6 +186,8 @@ func (w *EventFabricRetryWorker) flush(ctx context.Context) { func (w *EventFabricRetryWorker) drainTenant(ctx context.Context, tenantKey string) { pollCtx := context.WithValue(ctx, sharedsvc.ContextTenantKey, tenantKey) + pollCtx = context.WithValue(pollCtx, sharedsvc.ContextCompatibilityMode, "any") + pollCtx = context.WithValue(pollCtx, sharedsvc.ContextAcceptedVersions, []string{"v1"}) for { events, err := w.delivery.PollRetry(pollCtx, w.batchSize) diff --git a/backend/internal/bootstrap/app.go b/backend/internal/bootstrap/app.go index f499600f..2bd011a1 100644 --- a/backend/internal/bootstrap/app.go +++ b/backend/internal/bootstrap/app.go @@ -26,6 +26,25 @@ import ( "github.com/ArtisanCloud/PowerX/pkg/utils/logger" ) +func composePostgresDSNFromDB(driver string, host string, port int, user string, password string, database string, sslMode string, timezone string) string { + if strings.TrimSpace(host) == "" { + return "" + } + if strings.TrimSpace(sslMode) == "" { + sslMode = "disable" + } + if strings.TrimSpace(timezone) == "" { + timezone = "UTC" + } + if driver != "" && !strings.EqualFold(strings.TrimSpace(driver), "postgres") && !strings.EqualFold(strings.TrimSpace(driver), "pg") { + return "" + } + return fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s TimeZone=%s", + host, port, user, password, database, sslMode, timezone, + ) +} + func BootstrapApp(ctx context.Context, cfg *config.Config) (*shared.Deps, error) { // 初始化全局 Logger @@ -101,6 +120,16 @@ func BootstrapApp(ctx context.Context, cfg *config.Config) (*shared.Deps, error) if localTokenSecret == "" { logger.WarnF(ctx, "storage.local.upload_token_secret 未配置,本地上传端点将跳过 Token 校验,不建议在生产环境使用") } + publicTokenSecret := strings.TrimSpace(cfg.Storage.Local.PublicTokenSecret) + if publicTokenSecret == "" { + // 兼容:未显式配置时,优先复用 upload_token_secret,其次回退到 JWTSecret(避免本地环境“复制下载链接”不可用)。 + if localTokenSecret != "" { + publicTokenSecret = localTokenSecret + } else if strings.TrimSpace(cfg.Auth.JWTSecret) != "" { + publicTokenSecret = strings.TrimSpace(cfg.Auth.JWTSecret) + logger.WarnF(ctx, "storage.local.public_token_secret 未配置,已回退使用 auth.jwt_secret 作为公开下载 token 密钥(建议在生产环境显式配置 public_token_secret)") + } + } maxUploadSize := cfg.Storage.Local.MaxUploadSizeBytes if maxUploadSize < 0 { maxUploadSize = 0 @@ -175,6 +204,7 @@ func BootstrapApp(ctx context.Context, cfg *config.Config) (*shared.Deps, error) BasePath: cfg.Storage.Local.BasePath, PublicBaseURL: cfg.Storage.Local.PublicBaseURL, UploadTokenSecret: localTokenSecret, + PublicTokenSecret: publicTokenSecret, MaxUploadSizeBytes: maxUploadSize, }, S3: mediasvc.StorageS3Options{ @@ -190,6 +220,29 @@ func BootstrapApp(ctx context.Context, cfg *config.Config) (*shared.Deps, error) PresignEndpoint: cfg.Storage.S3.PresignEndpoint, }, }, + Queue: shared.QueueOptions{ + Driver: cfg.Queue.Driver, + Kafka: shared.QueueKafkaOptions{ + Brokers: append([]string{}, cfg.Queue.Kafka.Brokers...), + TopicPrefix: cfg.Queue.Kafka.TopicPrefix, + ConsumerGroup: cfg.Queue.Kafka.ConsumerGroup, + PollTimeoutMs: cfg.Queue.Kafka.PollTimeoutMs, + }, + Rabbit: shared.QueueRabbitMQOptions{ + URL: cfg.Queue.Rabbit.URL, + Exchange: cfg.Queue.Rabbit.Exchange, + QueuePrefix: cfg.Queue.Rabbit.QueuePrefix, + ConsumerTag: cfg.Queue.Rabbit.ConsumerTag, + Prefetch: cfg.Queue.Rabbit.Prefetch, + PollTimeoutMs: cfg.Queue.Rabbit.PollTimeoutMs, + }, + NATS: shared.QueueNATSOptions{ + URLs: append([]string{}, cfg.Queue.NATS.URLs...), + SubjectPrefix: cfg.Queue.NATS.SubjectPrefix, + QueueGroup: cfg.Queue.NATS.QueueGroup, + PollTimeoutMs: cfg.Queue.NATS.PollTimeoutMs, + }, + }, EventFabric: shared.EventFabricOptions{ AckTimeoutSeconds: cfg.Event.Fabric.AckTimeoutSeconds, DefaultMaxRetry: cfg.Event.Fabric.DefaultMaxRetry, @@ -289,14 +342,19 @@ func BootstrapApp(ctx context.Context, cfg *config.Config) (*shared.Deps, error) }, }, KnowledgeSpace: shared.KnowledgeSpaceOptions{ - RedisAddr: knowledgeRedisAddr, - RedisPassword: knowledgeRedisPassword, - RedisDB: knowledgeRedisDB, - LockKeyPrefix: cfg.KnowledgeSpace.LockKeyPrefix, - MetricsKeyPrefix: cfg.KnowledgeSpace.MetricsKeyPrefix, - DefaultRetentionMonths: cfg.KnowledgeSpace.DefaultRetentionMonths, - ProvisioningSLA: time.Duration(cfg.KnowledgeSpace.ProvisioningSLASeconds) * time.Second, - IngestionSLA: time.Duration(cfg.KnowledgeSpace.IngestionSLASeconds) * time.Second, + RedisAddr: knowledgeRedisAddr, + RedisPassword: knowledgeRedisPassword, + RedisDB: knowledgeRedisDB, + LockKeyPrefix: cfg.KnowledgeSpace.LockKeyPrefix, + MetricsKeyPrefix: cfg.KnowledgeSpace.MetricsKeyPrefix, + DefaultRetentionMonths: cfg.KnowledgeSpace.DefaultRetentionMonths, + ProvisioningSLA: time.Duration(cfg.KnowledgeSpace.ProvisioningSLASeconds) * time.Second, + IngestionSLA: time.Duration(cfg.KnowledgeSpace.IngestionSLASeconds) * time.Second, + SceneStrategyCatalogPath: cfg.KnowledgeSpace.SceneStrategyCatalogPath, + IngestionProcessors: shared.KnowledgeSpaceIngestionProcessorOptions{ + PDFTextAvailable: cfg.KnowledgeSpace.IngestionProcessors.PDFTextAvailable, + OCRAvailable: cfg.KnowledgeSpace.IngestionProcessors.OCRAvailable, + }, EventTopics: shared.KnowledgeSpaceEventTopicsOptions{ Provisioning: cfg.KnowledgeSpace.EventTopics.Provisioning, Ingestion: cfg.KnowledgeSpace.EventTopics.Ingestion, @@ -312,7 +370,26 @@ func BootstrapApp(ctx context.Context, cfg *config.Config) (*shared.Deps, error) VectorStore: shared.KnowledgeSpaceVectorStoreOptions{ Driver: cfg.KnowledgeSpace.VectorStore.Driver, PGVector: pgvectorcfg.Config{ - DSN: cfg.KnowledgeSpace.VectorStore.PgVector.DSN, + DSN: func() string { + dsn := strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.DSN) + if dsn != "" { + return dsn + } + dsn = strings.TrimSpace(cfg.Database.DSN) + if dsn != "" { + return dsn + } + return composePostgresDSNFromDB( + cfg.Database.Driver, + cfg.Database.Host, + cfg.Database.Port, + cfg.Database.UserName, + cfg.Database.Password, + cfg.Database.Database, + cfg.Database.SSLMode, + cfg.Database.Timezone, + ) + }(), Schema: cfg.KnowledgeSpace.VectorStore.PgVector.Schema, Table: cfg.KnowledgeSpace.VectorStore.PgVector.Table, Dimensions: cfg.KnowledgeSpace.VectorStore.PgVector.Dimensions, diff --git a/backend/internal/bootstrap/plugin.go b/backend/internal/bootstrap/plugin.go index 697745e0..13ef755d 100644 --- a/backend/internal/bootstrap/plugin.go +++ b/backend/internal/bootstrap/plugin.go @@ -83,6 +83,9 @@ func BootstrapPlugin(ctx context.Context, deps *shared.Deps, cfg *config.Config, Registry: pmimpl.NewJSONRegistry(registryFile), HTTP: dr, Supervisor: sup, + PostInstallManifest: func(ctx context.Context, manifest pm.Manifest) error { + return syncPluginManifestPermissions(ctx, deps.DB, manifest) + }, PostEnable: func(ctx context.Context, tenantUUID, pluginID string) error { svc := setting.NewPluginInstanceConfigService(deps) diff --git a/backend/internal/bootstrap/plugin_permission_sync.go b/backend/internal/bootstrap/plugin_permission_sync.go new file mode 100644 index 00000000..1ce9672d --- /dev/null +++ b/backend/internal/bootstrap/plugin_permission_sync.go @@ -0,0 +1,128 @@ +package bootstrap + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + iamsvc "github.com/ArtisanCloud/PowerX/internal/service/iam" + modelsiam "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/iam" + "github.com/ArtisanCloud/PowerX/pkg/plugin_mgr" + "gorm.io/gorm" +) + +func syncPluginManifestPermissions(ctx context.Context, db *gorm.DB, manifest plugin_mgr.Manifest) error { + if db == nil { + return nil + } + rows := buildPluginPermissionRows(manifest) + if len(rows) == 0 { + return nil + } + service := iamsvc.NewPermissionService(db) + source := "plugin:" + strings.TrimSpace(manifest.ID) + _, err := service.SyncPermissions(ctx, source, strings.TrimSpace(manifest.Version), rows, false) + if err != nil { + return fmt.Errorf("sync plugin permissions failed: %w", err) + } + return nil +} + +func buildPluginPermissionRows(manifest plugin_mgr.Manifest) []modelsiam.Permission { + pluginID := strings.TrimSpace(manifest.ID) + if pluginID == "" { + return nil + } + specs := manifest.Permissions + if len(specs) == 0 && len(manifest.RBAC.Resources) > 0 { + specs = make([]plugin_mgr.PermissionSpec, 0, len(manifest.RBAC.Resources)) + for _, item := range manifest.RBAC.Resources { + specs = append(specs, plugin_mgr.PermissionSpec{ + Resource: item.Resource, + Actions: item.Actions, + Module: pluginID, + Type: "action", + }) + } + } + if len(specs) == 0 { + return nil + } + seen := make(map[string]struct{}) + out := make([]modelsiam.Permission, 0, len(specs)*2) + for _, spec := range specs { + resource := strings.TrimSpace(spec.Resource) + if resource == "" { + continue + } + actions := dedupeNonEmpty(spec.Actions) + for _, action := range actions { + key := pluginID + "|" + resource + "|" + action + if _, exists := seen[key]; exists { + continue + } + seen[key] = struct{}{} + meta := map[string]any{ + "label": firstNonEmpty(strings.TrimSpace(spec.Label), fmt.Sprintf("%s.%s.%s", pluginID, resource, action)), + "module": firstNonEmpty(strings.TrimSpace(spec.Module), pluginID), + "type": firstNonEmpty(strings.TrimSpace(spec.Type), "action"), + } + allowAPIKey := true + if spec.APIKey != nil { + meta["api_key"] = map[string]any{ + "scope": strings.TrimSpace(spec.APIKey.Scope), + "action": firstNonEmpty(strings.TrimSpace(spec.APIKey.Action), action), + "resource_type": strings.TrimSpace(spec.APIKey.ResourceType), + "resource_pattern": strings.TrimSpace(spec.APIKey.ResourcePattern), + "plugin_id": strings.TrimSpace(spec.APIKey.PluginID), + "effect": firstNonEmpty(strings.TrimSpace(spec.APIKey.Effect), "allow"), + } + allowAPIKey = true + } + metaBytes, _ := json.Marshal(meta) + out = append(out, modelsiam.Permission{ + Module: pluginID, + Resource: resource, + Action: action, + Effect: "allow", + Description: strings.TrimSpace(spec.Description), + AllowAPIKey: allowAPIKey, + Meta: metaBytes, + Status: modelsiam.PermissionStatusActive, + Source: "plugin:" + pluginID, + Introduced: strings.TrimSpace(manifest.Version), + }) + } + } + return out +} + +func dedupeNonEmpty(items []string) []string { + if len(items) == 0 { + return nil + } + seen := map[string]struct{}{} + out := make([]string, 0, len(items)) + for _, item := range items { + normalized := strings.TrimSpace(item) + if normalized == "" { + continue + } + if _, ok := seen[normalized]; ok { + continue + } + seen[normalized] = struct{}{} + out = append(out, normalized) + } + return out +} + +func firstNonEmpty(items ...string) string { + for _, item := range items { + if strings.TrimSpace(item) != "" { + return item + } + } + return "" +} diff --git a/backend/internal/event_bus/subscribers.go b/backend/internal/event_bus/subscribers.go new file mode 100644 index 00000000..8f777e90 --- /dev/null +++ b/backend/internal/event_bus/subscribers.go @@ -0,0 +1,10 @@ +package eventbus + +const ( + SubscriberEventFabricCronDispatch = "_subscriber.event_fabric.cron_dispatch" + SubscriberEventFabricReplay = "_subscriber.event_fabric.replay" + SubscriberKnowledgeSpaceReprocess = "_subscriber.knowledge_space.reprocess" + SubscriberKnowledgeSpaceCorpusCheck = "_subscriber.knowledge_space.corpus_check" + SubscriberAuthorizationChallengeTime = "_subscriber.authorization.challenge_timeout" + SubscriberSystemNotificationDispatch = "_subscriber.system.notification_dispatch" +) diff --git a/backend/internal/event_bus/topics.go b/backend/internal/event_bus/topics.go new file mode 100644 index 00000000..5a992b88 --- /dev/null +++ b/backend/internal/event_bus/topics.go @@ -0,0 +1,23 @@ +package eventbus + +const ( + TopicKnowledgeIngestionJob = "_topic.knowledge.ingestion.job" + TopicKnowledgeCorpusCheckJob = "_topic.knowledge.corpus_check.job" + TopicKnowledgeFeedbackReprocess = "_topic.knowledge.space.feedback.reprocess" + TopicSystemNotification = "_topic.system.notification" + + TopicIntegrationGatewayRouteCreated = "_topic.integration.gateway.route.created" + TopicIntegrationGatewayRouteUpdated = "_topic.integration.gateway.route.updated" + + TopicIntegrationGatewayInvocationSucceeded = "_topic.integration.gateway.invocation.succeeded" + TopicIntegrationGatewayInvocationFailed = "_topic.integration.gateway.invocation.failed" + TopicIntegrationGatewayInvocationFallback = "_topic.integration.gateway.invocation.fallback" + + TopicCapabilityCatalogSyncStarted = "_topic.capability.catalog.sync_started" + TopicCapabilityCatalogSyncSucceeded = "_topic.capability.catalog.sync_succeeded" + TopicCapabilityCatalogSyncFailed = "_topic.capability.catalog.sync_failed" + + TopicCapabilityPolicyDegraded = "_topic.capability.policy.degraded" + + NotificationKindEventFabricReplayTask = "_kind.event_fabric.replay.task" +) diff --git a/backend/internal/eventbus/topics.go b/backend/internal/eventbus/topics.go deleted file mode 100644 index 0ba50d10..00000000 --- a/backend/internal/eventbus/topics.go +++ /dev/null @@ -1,22 +0,0 @@ -package eventbus - -// Canonical event topics shared by Integration Gateway, Capability Registry and Agent Hub. -// 按 spec requirement(FR-005/FR-011)统一罗列,便于事件生产者/消费者引用。 -const ( - // Integration Gateway lifecycle topics. - TopicIntegrationGatewayRouteCreated = "integration.gateway.route.created" - TopicIntegrationGatewayRouteUpdated = "integration.gateway.route.updated" - - // Integration Gateway invocation topics. - TopicIntegrationGatewayInvocationSucceeded = "integration.gateway.invocation.succeeded" - TopicIntegrationGatewayInvocationFailed = "integration.gateway.invocation.failed" - TopicIntegrationGatewayInvocationFallback = "integration.gateway.invocation.fallback" - - // Capability Catalog sync topics. - TopicCapabilityCatalogSyncStarted = "capability.catalog.sync_started" - TopicCapabilityCatalogSyncSucceeded = "capability.catalog.sync_succeeded" - TopicCapabilityCatalogSyncFailed = "capability.catalog.sync_failed" - - // Capability policy governance topics. - TopicCapabilityPolicyDegraded = "capability.policy.degraded" -) diff --git a/backend/internal/http/auth_subject_validator.go b/backend/internal/http/auth_subject_validator.go new file mode 100644 index 00000000..e158d6e9 --- /dev/null +++ b/backend/internal/http/auth_subject_validator.go @@ -0,0 +1,288 @@ +package http + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + tenantmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/tenant" + iamrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/iam" + tenantrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "gorm.io/gorm" + + "github.com/ArtisanCloud/PowerX/pkg/auth/middleware" + "github.com/ArtisanCloud/PowerX/pkg/cache" +) + +const authSubjectCacheTTL = 60 * time.Second + +var authCacheCallbackOnce sync.Once + +type userSnapshot struct { + Status int16 `json:"status"` +} + +type memberSnapshot struct { + Status int16 `json:"status"` + UserID uint64 `json:"user_id"` + TenantUUID string `json:"tenant_uuid"` +} + +type tenantSnapshot struct { + Status int16 `json:"status"` +} + +func buildJWTSubjectValidationCallback(db *gorm.DB) func(ctx context.Context, claims *reqctx.CoreXClaims) error { + if db == nil { + return nil + } + userRepo := iamrepo.NewUserRepository(db) + memberRepo := iamrepo.NewMemberRepository(db) + tenantRepo := tenantrepo.NewTenantRepository(db) + + return func(ctx context.Context, claims *reqctx.CoreXClaims) error { + if claims == nil { + return fmt.Errorf("claims missing") + } + if claims.UserID == 0 { + return fmt.Errorf("user id missing") + } + + currentTenant, err := reqctx.RequireTenantUUID(ctx) + if err != nil { + return fmt.Errorf("tenant uuid missing") + } + tenantUUID, err := reqctx.CanonicalTenantUUID(currentTenant) + if err != nil { + return fmt.Errorf("tenant uuid invalid") + } + + tenantItem, err := loadTenantSnapshot(ctx, tenantRepo, tenantUUID) + if err != nil { + return err + } + if tenantItem.Status != tenantmodel.TenantStatusActive { + return fmt.Errorf("tenant disabled") + } + + userItem, err := loadUserSnapshot(ctx, userRepo, claims.UserID) + if err != nil { + return err + } + if userItem.Status != 1 { + return fmt.Errorf("user disabled") + } + + if claims.IsRoot { + return nil + } + if claims.MemberID == 0 { + return fmt.Errorf("member id missing") + } + memberItem, err := loadMemberSnapshot(ctx, memberRepo, claims.MemberID) + if err != nil { + return err + } + if memberItem.Status != 1 { + return fmt.Errorf("member disabled") + } + if memberItem.UserID != claims.UserID { + return fmt.Errorf("member user mismatch") + } + if !strings.EqualFold(strings.TrimSpace(memberItem.TenantUUID), tenantUUID) { + return fmt.Errorf("member tenant mismatch") + } + return nil + } +} + +func loadTenantSnapshot(ctx context.Context, repo *tenantrepo.TenantRepository, tenantUUID string) (*tenantSnapshot, error) { + cacheStore := cache.GetCache() + cacheKey := authTenantKey(tenantUUID) + if cacheStore != nil { + if raw, err := cacheStore.Get(ctx, cacheKey); err == nil && len(raw) > 0 { + var out tenantSnapshot + if json.Unmarshal(raw, &out) == nil { + return &out, nil + } + } + } + + item, err := repo.GetByUUID(ctx, tenantUUID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("tenant not found") + } + return nil, fmt.Errorf("load tenant failed") + } + out := &tenantSnapshot{Status: item.Status} + if cacheStore != nil { + if payload, marshalErr := json.Marshal(out); marshalErr == nil { + _ = cacheStore.Set(ctx, cacheKey, payload, authSubjectCacheTTL) + } + } + return out, nil +} + +func loadUserSnapshot(ctx context.Context, repo *iamrepo.UserRepository, userID uint64) (*userSnapshot, error) { + cacheStore := cache.GetCache() + cacheKey := middleware.KUser(userID) + if cacheStore != nil { + if raw, err := cacheStore.Get(ctx, cacheKey); err == nil && len(raw) > 0 { + var out userSnapshot + if json.Unmarshal(raw, &out) == nil { + return &out, nil + } + } + } + + item, err := repo.FindByID(ctx, userID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("user not found") + } + return nil, fmt.Errorf("load user failed") + } + out := &userSnapshot{Status: item.Status} + if cacheStore != nil { + if payload, marshalErr := json.Marshal(out); marshalErr == nil { + _ = cacheStore.Set(ctx, cacheKey, payload, authSubjectCacheTTL) + } + } + return out, nil +} + +func loadMemberSnapshot(ctx context.Context, repo *iamrepo.MemberRepository, memberID uint64) (*memberSnapshot, error) { + cacheStore := cache.GetCache() + cacheKey := middleware.KMember(memberID) + if cacheStore != nil { + if raw, err := cacheStore.Get(ctx, cacheKey); err == nil && len(raw) > 0 { + var out memberSnapshot + if json.Unmarshal(raw, &out) == nil { + return &out, nil + } + } + } + + item, err := repo.FindByID(ctx, memberID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, fmt.Errorf("member not found") + } + return nil, fmt.Errorf("load member failed") + } + out := &memberSnapshot{ + Status: item.Status, + UserID: item.UserID, + TenantUUID: strings.TrimSpace(item.TenantUUID), + } + if cacheStore != nil { + if payload, marshalErr := json.Marshal(out); marshalErr == nil { + _ = cacheStore.Set(ctx, cacheKey, payload, authSubjectCacheTTL) + } + } + return out, nil +} + +func authTenantKey(tenantUUID string) string { + return "auth:tenant_uuid:" + strings.ToLower(strings.TrimSpace(tenantUUID)) +} + +func registerAuthSubjectCacheInvalidation(db *gorm.DB) { + if db == nil { + return + } + authCacheCallbackOnce.Do(func() { + updateHook := db.Callback().Update().After("gorm:after_update") + _ = updateHook.Register("powerx:auth_cache_invalidate:update", invalidateAuthCacheByStatement) + deleteHook := db.Callback().Delete().After("gorm:after_delete") + _ = deleteHook.Register("powerx:auth_cache_invalidate:delete", invalidateAuthCacheByStatement) + }) +} + +func invalidateAuthCacheByStatement(tx *gorm.DB) { + if tx == nil || tx.Statement == nil { + return + } + store := cache.GetCache() + if store == nil { + return + } + table := strings.ToLower(strings.TrimSpace(tx.Statement.Table)) + if table == "" { + return + } + if !strings.HasSuffix(table, "iam_user") && + !strings.HasSuffix(table, "iam_member") && + !strings.HasSuffix(table, "iam_tenant") { + return + } + invalidateByReflectValue(tx.Statement.Context, store, table, tx.Statement.ReflectValue) +} + +func invalidateByReflectValue(ctx context.Context, store cache.ICache, table string, value reflect.Value) { + if !value.IsValid() { + return + } + if value.Kind() == reflect.Pointer { + if value.IsNil() { + return + } + invalidateByReflectValue(ctx, store, table, value.Elem()) + return + } + if value.Kind() == reflect.Slice || value.Kind() == reflect.Array { + for i := 0; i < value.Len(); i++ { + invalidateByReflectValue(ctx, store, table, value.Index(i)) + } + return + } + if value.Kind() != reflect.Struct { + return + } + + switch { + case strings.HasSuffix(table, "iam_user"): + if field := value.FieldByName("ID"); field.IsValid() { + if userID, ok := toUint64(field); ok && userID > 0 { + _ = store.Delete(ctx, middleware.KUser(userID)) + } + } + case strings.HasSuffix(table, "iam_member"): + if field := value.FieldByName("ID"); field.IsValid() { + if memberID, ok := toUint64(field); ok && memberID > 0 { + _ = store.Delete(ctx, middleware.KMember(memberID)) + } + } + case strings.HasSuffix(table, "iam_tenant"): + if field := value.FieldByName("UUID"); field.IsValid() { + raw := strings.TrimSpace(fmt.Sprint(field.Interface())) + if raw != "" && raw != "" { + _ = store.Delete(ctx, authTenantKey(raw)) + } + } + } +} + +func toUint64(v reflect.Value) (uint64, bool) { + if !v.IsValid() { + return 0, false + } + switch v.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint(), true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.Int() < 0 { + return 0, false + } + return uint64(v.Int()), true + default: + return 0, false + } +} diff --git a/backend/internal/http/middleware.go b/backend/internal/http/middleware.go index 07d678ae..81c4192d 100644 --- a/backend/internal/http/middleware.go +++ b/backend/internal/http/middleware.go @@ -2,6 +2,9 @@ package http import ( "context" + "net/url" + "sort" + "strings" "time" "github.com/ArtisanCloud/PowerX/pkg/corex/audit" @@ -27,9 +30,11 @@ func RequestLoggingMiddleware() gin.HandlerFunc { status := c.Writer.Status() tenantUUID := reqctx.GetTenantUUID(c.Request.Context()) traceID := audit.GetTraceID(c.Request.Context()) + query := sanitizeQuery(c.Request.URL.Query()) logger.Info(c.Request.Context(), "http_request", zap.String("method", c.Request.Method), zap.String("path", c.FullPath()), + zap.String("query", query), zap.Int("status", status), zap.Int64("latency_ms", latency.Milliseconds()), zap.String("tenant_uuid", tenantUUID), @@ -38,6 +43,74 @@ func RequestLoggingMiddleware() gin.HandlerFunc { } } +func sanitizeQuery(v url.Values) string { + if len(v) == 0 { + return "" + } + // 避免把用户内容/敏感信息写入日志(SSE 的 q/message、token 等) + redactKeys := map[string]struct{}{ + "q": {}, + "message": {}, + "prompt": {}, + "system_prompt": {}, + "systemPrompt": {}, + "api_key": {}, + "apiKey": {}, + "authorization": {}, + "access_token": {}, + "refresh_token": {}, + "token": {}, + "bearer": {}, + "password": {}, + "secret": {}, + "client_secret": {}, + "private_key": {}, + "signature": {}, + "sig": {}, + "x-api-key": {}, + "x_api_key": {}, + "apikey": {}, + "openai_api_key": {}, + } + + // clone + redact + out := make(url.Values, len(v)) + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + kl := strings.ToLower(strings.TrimSpace(k)) + if _, ok := redactKeys[kl]; ok { + out[k] = []string{""} + continue + } + vals := v[k] + clean := make([]string, 0, len(vals)) + for _, s := range vals { + s = strings.TrimSpace(s) + if s == "" { + continue + } + // 单个值也做截断,避免日志过长 + if len(s) > 200 { + s = s[:200] + "…" + } + clean = append(clean, s) + } + if len(clean) > 0 { + out[k] = clean + } + } + + encoded := out.Encode() + if len(encoded) > 800 { + return encoded[:800] + "…" + } + return encoded +} + // TraceInjectionMiddleware 确保每个请求都有 trace_id(从 header 继承或新建)并注入 context func TraceInjectionMiddleware() gin.HandlerFunc { return func(c *gin.Context) { diff --git a/backend/internal/http/router.go b/backend/internal/http/router.go index 08c3d6a7..9a107e1e 100644 --- a/backend/internal/http/router.go +++ b/backend/internal/http/router.go @@ -16,6 +16,7 @@ import ( // SetupRouter 构造带基础中间件的 Gin 引擎,外部传入 auth middleware 和自定义 route 注册函数。 // registerFunc 会在 corexGroup 上执行(即 /{prefix}/... 下面),返回 engine 供外部再挂载其他 group/handler。 func SetupRouter(cfg *config.Config, r *gin.Engine, deps *shared.Deps) error { + registerAuthSubjectCacheInvalidation(deps.DB) // 全局中间件:恢复/日志/trace/feature 等 r.Use(RecoveryMiddleware()) @@ -23,12 +24,13 @@ func SetupRouter(cfg *config.Config, r *gin.Engine, deps *shared.Deps) error { r.Use(TraceInjectionMiddleware()) r.Use(FeatureInjectionMiddleware()) - authUser := middleware.JwtMiddleware( + authUser := middleware.APIKeyOrJwtMiddleware( + deps.DB, []byte(cfg.Auth.JWTSecret), cfg.Auth.Issuer, []string{cfg.Auth.AudienceUser}, []string{"access"}, - nil, + buildJWTSubjectValidationCallback(deps.DB), middleware.WithTenantHeaderPolicy(middleware.TenantHeaderPolicy{ RequireUUID: cfg.Tenants.RequireUUID, }), diff --git a/backend/internal/infra/plugin/manager/install.go b/backend/internal/infra/plugin/manager/install.go index c16b393c..5c06a3bb 100644 --- a/backend/internal/infra/plugin/manager/install.go +++ b/backend/internal/infra/plugin/manager/install.go @@ -130,6 +130,17 @@ func (m *managerImpl) InstallFromFile(ctx context.Context, srcDir string, opts p } else if rec != nil { desc.Migration = rec } + if m.opts.PostInstallManifest != nil { + if err := m.opts.PostInstallManifest(ctx, man); err != nil { + return plugin_mgr.Plugin{}, plugin_mgr.Wrap( + plugin_mgr.CodeInternal, + err, + plugin_mgr.WithOp("install_file.register_permissions"), + plugin_mgr.WithPlugin(man.ID), + plugin_mgr.WithVersion(man.Version), + ) + } + } // 5) 登记为 installed(Bootstrap 已处理“同版本跳过”,这里就是新装) if err := m.opts.Registry.Put(ctx, desc, plugin_mgr.StateInstalled); err != nil { diff --git a/backend/internal/infra/plugin/manager/manager.go b/backend/internal/infra/plugin/manager/manager.go index cf19ff50..69a59e80 100644 --- a/backend/internal/infra/plugin/manager/manager.go +++ b/backend/internal/infra/plugin/manager/manager.go @@ -14,6 +14,7 @@ import ( ) type PostEnableHook func(ctx context.Context, tenantUUID, pluginID string) error +type PostInstallManifestHook func(ctx context.Context, manifest plugin_mgr.Manifest) error // Options 注入依赖与基础配置 type Options struct { @@ -24,11 +25,12 @@ type Options struct { CoreConfig *config.Config - Loader Loader - Registry Registry - HTTP *router.DynamicRouter - Supervisor *supervisor.Supervisor - PostEnable PostEnableHook + Loader Loader + Registry Registry + HTTP *router.DynamicRouter + Supervisor *supervisor.Supervisor + PostEnable PostEnableHook + PostInstallManifest PostInstallManifestHook } // managerImpl 是内嵌版的具体实现(满足 plugin_mgr.Manager) diff --git a/backend/internal/infra/plugin/manager/router/router.go b/backend/internal/infra/plugin/manager/router/router.go index 6a42978d..f793b84b 100644 --- a/backend/internal/infra/plugin/manager/router/router.go +++ b/backend/internal/infra/plugin/manager/router/router.go @@ -426,7 +426,6 @@ func (r *DynamicRouter) serveAPIProxy(c *gin.Context) { tenantUUID = strings.TrimSpace(claims.TenantUUID) } if tenantUUID != "" { - req.Header.Set("X-Tenant-UUID", tenantUUID) log.Printf("[PROXY-CTX] plugin=%s tenantUUID=%s", pluginID, tenantUUID) } else { log.Printf("[PROXY-CTX] plugin=%s tenantUUID missing", pluginID) diff --git a/backend/internal/server/agent/catalog/auth_catelog.go b/backend/internal/server/agent/catalog/auth_catelog.go index 6b2be15c..5619287d 100644 --- a/backend/internal/server/agent/catalog/auth_catelog.go +++ b/backend/internal/server/agent/catalog/auth_catelog.go @@ -53,3 +53,14 @@ func AuthReqFromCatalog(provider string) authReq { return req } + +// DefaultBaseURLForModel returns a model-specific base URL when a provider requires it. +// Currently used to map Hugging Face embeddings to the OpenAI-compatible endpoint. +func DefaultBaseURLForModel(provider, model string) string { + p := strings.ToLower(strings.TrimSpace(provider)) + if p == "huggingface" || p == "hf" { + _ = model + return "https://router.huggingface.co/v1" + } + return "" +} diff --git a/backend/internal/server/agent/catalog/registry.go b/backend/internal/server/agent/catalog/registry.go index 0214302c..c5c17634 100644 --- a/backend/internal/server/agent/catalog/registry.go +++ b/backend/internal/server/agent/catalog/registry.go @@ -32,6 +32,7 @@ type Manifest struct { Drivers map[string]string `yaml:"drivers,omitempty" json:"drivers,omitempty"` // modality -> driverKey Auth AuthSpec `yaml:"auth" json:"auth"` Modalities map[string]ModalityManifest `yaml:"modalities" json:"modalities"` + Apps map[string]AppManifest `yaml:"apps,omitempty" json:"apps,omitempty"` } type AuthSpec struct { @@ -53,6 +54,11 @@ type ModalityManifest struct { Models []ModelManifest `yaml:"models" json:"models"` } +type AppManifest struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Modalities map[string]ModalityManifest `yaml:"modalities" json:"modalities"` +} + type ModelManifest struct { ID string `yaml:"id" json:"id"` Label string `yaml:"label" json:"label"` @@ -68,6 +74,11 @@ type ProviderItem struct { Name string `json:"name"` } +type AppItem struct { + ID string `json:"id"` + Name string `json:"name"` +} + type Registry struct { mu sync.RWMutex providers map[string]*Manifest // canonical id -> manifest @@ -200,15 +211,11 @@ func (r *Registry) Providers(modality string) []ProviderItem { defer r.mu.RUnlock() out := make([]ProviderItem, 0, len(r.providers)) - // 支持 "*" 或空:返回所有有任一模态模型的 Provider + // 支持 "*" 或空:返回所有有任一模态模型的 Provider(含 apps) if mod == "" || mod == "*" || mod == "any" { for id, m := range r.providers { - // 只要任一模态下有模型就算可用 - for _, mm := range m.Modalities { - if len(mm.Models) > 0 { - out = append(out, ProviderItem{ID: id, Name: m.Name}) - break - } + if hasAnyModels(m) { + out = append(out, ProviderItem{ID: id, Name: m.Name}) } } sort.Slice(out, func(i, j int) bool { return out[i].Name < out[j].Name }) @@ -216,7 +223,7 @@ func (r *Registry) Providers(modality string) []ProviderItem { } for id, m := range r.providers { - if mm, ok := m.Modalities[mod]; ok && len(mm.Models) > 0 { + if hasModelsForModality(m, mod) { out = append(out, ProviderItem{ID: id, Name: m.Name}) } } @@ -225,6 +232,10 @@ func (r *Registry) Providers(modality string) []ProviderItem { } func (r *Registry) Models(modality, provider string) ([]ModelManifest, error) { + return r.ModelsByApp(modality, provider, "") +} + +func (r *Registry) ModelsByApp(modality, provider, app string) ([]ModelManifest, error) { if r == nil { return nil, errors.New("catalog not initialized") } @@ -239,19 +250,75 @@ func (r *Registry) Models(modality, provider string) ([]ModelManifest, error) { if m == nil { return nil, errors.New("provider not found: " + pid) } - // 支持 "*" 或空:聚合该 provider 的所有模态模型 + app = strings.TrimSpace(strings.ToLower(app)) + // 有 app:只在 app 内查 + if app != "" && m.Apps != nil { + if am, ok := m.Apps[app]; ok { + if mod == "" || mod == "*" || mod == "any" { + var all []ModelManifest + for _, mm := range am.Modalities { + all = append(all, mm.Models...) + } + return all, nil + } + if mm, ok := am.Modalities[mod]; ok { + return mm.Models, nil + } + return []ModelManifest{}, nil + } + } + // 无 app:聚合 base + apps if mod == "" || mod == "*" || mod == "any" { var all []ModelManifest for _, mm := range m.Modalities { all = append(all, mm.Models...) } + for _, am := range m.Apps { + for _, mm := range am.Modalities { + all = append(all, mm.Models...) + } + } return all, nil } - mm, ok := m.Modalities[mod] - if !ok { - return []ModelManifest{}, nil + var out []ModelManifest + if mm, ok := m.Modalities[mod]; ok { + out = append(out, mm.Models...) + } + for _, am := range m.Apps { + if mm, ok := am.Modalities[mod]; ok { + out = append(out, mm.Models...) + } } - return mm.Models, nil + return out, nil +} + +func (r *Registry) Apps(provider, modality string) []AppItem { + if r == nil { + return nil + } + mod := NormalizeModality(modality) + pid := r.CanonicalProvider(provider) + if pid == "" { + return nil + } + r.mu.RLock() + defer r.mu.RUnlock() + m := r.providers[pid] + if m == nil || len(m.Apps) == 0 { + return nil + } + out := make([]AppItem, 0, len(m.Apps)) + for id, app := range m.Apps { + if mod == "" || mod == "*" || mod == "any" || hasAppModelsForModality(&app, mod) { + name := app.Name + if strings.TrimSpace(name) == "" { + name = id + } + out = append(out, AppItem{ID: id, Name: name}) + } + } + sort.Slice(out, func(i, j int) bool { return out[i].Name < out[j].Name }) + return out } func (r *Registry) CanonicalProvider(nameOrAlias string) string { @@ -260,6 +327,50 @@ func (r *Registry) CanonicalProvider(nameOrAlias string) string { return r.alias[strings.ToLower(strings.TrimSpace(nameOrAlias))] } +func hasAnyModels(m *Manifest) bool { + if m == nil { + return false + } + for _, mm := range m.Modalities { + if len(mm.Models) > 0 { + return true + } + } + for _, am := range m.Apps { + for _, mm := range am.Modalities { + if len(mm.Models) > 0 { + return true + } + } + } + return false +} + +func hasModelsForModality(m *Manifest, modality string) bool { + if m == nil { + return false + } + if mm, ok := m.Modalities[modality]; ok && len(mm.Models) > 0 { + return true + } + for _, am := range m.Apps { + if mm, ok := am.Modalities[modality]; ok && len(mm.Models) > 0 { + return true + } + } + return false +} + +func hasAppModelsForModality(a *AppManifest, modality string) bool { + if a == nil { + return false + } + if mm, ok := a.Modalities[modality]; ok && len(mm.Models) > 0 { + return true + } + return false +} + // Global:全局注册表(启动时初始化) var GlobalAIRegister *Registry @@ -311,5 +422,48 @@ func expandPath(p string) string { return filepath.Clean(p) } wd, _ := os.Getwd() - return filepath.Clean(filepath.Join(wd, p)) + clean := filepath.Clean(filepath.Join(wd, p)) + if pathExists(clean) { + return clean + } + if root := findRepoRoot(wd); root != "" { + trim := strings.TrimPrefix(p, "."+string(filepath.Separator)) + if candidate := filepath.Clean(filepath.Join(root, trim)); pathExists(candidate) { + return candidate + } + if candidate := filepath.Clean(filepath.Join(root, "backend", trim)); pathExists(candidate) { + return candidate + } + } + return clean +} + +func pathExists(p string) bool { + if p == "" { + return false + } + if _, err := os.Stat(p); err == nil { + return true + } + return false +} + +func findRepoRoot(start string) string { + dir := filepath.Clean(start) + for { + if dir == "" || dir == string(filepath.Separator) || dir == "." { + return "" + } + if _, err := os.Stat(filepath.Join(dir, ".specify")); err == nil { + return dir + } + if _, err := os.Stat(filepath.Join(dir, ".git")); err == nil { + return dir + } + next := filepath.Dir(dir) + if next == dir { + return "" + } + dir = next + } } diff --git a/backend/internal/server/agent/contract/types.go b/backend/internal/server/agent/contract/types.go index 309a4130..2fad81c6 100644 --- a/backend/internal/server/agent/contract/types.go +++ b/backend/internal/server/agent/contract/types.go @@ -7,6 +7,7 @@ type Modality string const ( ModLLM Modality = "llm" + ModVLM Modality = "vlm" ModImage Modality = "image" ModEmbed Modality = "embedding" ModAudioTTS Modality = "audio_tts" diff --git a/backend/internal/server/agent/contract/vlm.go b/backend/internal/server/agent/contract/vlm.go new file mode 100644 index 00000000..5469bdaf --- /dev/null +++ b/backend/internal/server/agent/contract/vlm.go @@ -0,0 +1,28 @@ +package contract + +import "context" + +// VLM (Vision-Language Model): image/text -> text +// 与 LLM 区分,便于路由与能力标注。 +type VLMClient interface { + Invoke(ctx context.Context, in VLMRequest) (*VLMResponse, error) + Stream(ctx context.Context, in VLMRequest, onDelta func(string)) (*VLMResponse, error) + + Cap() ModelCapabilities + Health(ctx context.Context) error +} + +type VLMRequest struct { + Messages []Message + Temperature float64 + TopP float64 + MaxTokens int + JSONMode bool + Runtime map[string]any +} + +type VLMResponse struct { + Text string + Usage map[string]int + Provider, Model string +} diff --git a/backend/internal/server/agent/drivers/eino/agent.go b/backend/internal/server/agent/drivers/eino/agent.go index 6f55bf39..50656ee8 100644 --- a/backend/internal/server/agent/drivers/eino/agent.go +++ b/backend/internal/server/agent/drivers/eino/agent.go @@ -14,8 +14,8 @@ import ( "github.com/ArtisanCloud/PowerX/internal/server/agent" "github.com/ArtisanCloud/PowerX/internal/server/agent/config" "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/llm" agentschema "github.com/ArtisanCloud/PowerX/internal/server/agent/schemas" + "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/llm" "github.com/ArtisanCloud/PowerX/pkg/corex/flow/loader" flowschema "github.com/ArtisanCloud/PowerX/pkg/corex/flow/schemas" "github.com/ArtisanCloud/PowerX/pkg/dto" diff --git a/backend/internal/server/agent/drivers/eino/audio/audio.go b/backend/internal/server/agent/drivers/eino/audio/audio.go deleted file mode 100644 index ecc9c08a..00000000 --- a/backend/internal/server/agent/drivers/eino/audio/audio.go +++ /dev/null @@ -1,14 +0,0 @@ -package audio - -import ( - "context" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" -) - -// internal/server/agent/drivers/eino/llm/audio.go -type AudioTTSClient interface { - TTS(ctx context.Context, mc config.ModelConfig, text string) ([]byte, error) -} -type AudioASRClient interface { - Transcribe(ctx context.Context, mc config.ModelConfig, audio []byte, mime string) (string, error) -} diff --git a/backend/internal/server/agent/drivers/eino/config/config.go b/backend/internal/server/agent/drivers/eino/config/config.go index f83e6f99..1f80488a 100644 --- a/backend/internal/server/agent/drivers/eino/config/config.go +++ b/backend/internal/server/agent/drivers/eino/config/config.go @@ -1,116 +1,11 @@ package config -import ( - "time" -) +import aiconfig "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" -// 统一配置(融合运行时请求 + agent 默认配置) -type ModelConfig struct { - Provider string `yaml:"provider" json:"provider"` - Endpoint string `yaml:"endpoint" json:"endpoint"` - APIKey string `yaml:"api_key" json:"api_key"` - SecretID string `yaml:"secret_id" json:"secret_id"` - SecretKey string `yaml:"secret_key" json:"secret_key"` - Region string `yaml:"region" json:"region"` - Model string `yaml:"model" json:"model"` - SystemPrompt string `yaml:"system_prompt" json:"system_prompt"` - Temperature float64 `yaml:"temperature" json:"temperature"` - MaxTokens int `yaml:"max_tokens" json:"max_tokens"` - TopP float32 `yaml:"top_p" json:"top_p"` - Extra map[string]any `yaml:"extra" json:"extra"` - Timeout time.Duration `yaml:"timeout" json:"timeout"` +// 仅保留类型别名,避免 agent driver 与 ai driver 的 ModelConfig 类型分裂。 +type ModelConfig = aiconfig.ModelConfig - // 可选:OpenAI - Organization string `yaml:"organization" json:"organization"` - AzureDeployment string `yaml:"azure_deployment" json:"azure_deployment"` - APIVersion string `yaml:"api_version" json:"api_version"` - APIType string `yaml:"api_type" json:"api_type"` - - // 可选:百度 - AccessToken string `yaml:"access_token" json:"access_token"` - BaiduAK string `yaml:"baidu_ak" json:"baidu_ak"` - BaiduSK string `yaml:"baidu_sk" json:"baidu_sk"` -} - -// ——— 配置融合 ——— -// 以 Agent 默认配置为“底”,运行时覆盖它(req.Config > agent cfg) +// 直接复用 AI 层 MergeConfig。 func MergeConfig(base *ModelConfig, override *ModelConfig) *ModelConfig { - out := &ModelConfig{} - if base != nil { - *out = *base - } - if override == nil { - return out - } - // string fields - if override.Provider != "" { - out.Provider = override.Provider - } - if override.Endpoint != "" { - out.Endpoint = override.Endpoint - } - if override.APIKey != "" { - out.APIKey = override.APIKey - } - if override.SecretID != "" { - out.SecretID = override.SecretID - } - if override.SecretKey != "" { - out.SecretKey = override.SecretKey - } - if override.Region != "" { - out.Region = override.Region - } - if override.Model != "" { - out.Model = override.Model - } - if override.SystemPrompt != "" { - out.SystemPrompt = override.SystemPrompt - } - if override.Organization != "" { - out.Organization = override.Organization - } - if override.AzureDeployment != "" { - out.AzureDeployment = override.AzureDeployment - } - if override.APIVersion != "" { - out.APIVersion = override.APIVersion - } - if override.APIType != "" { - out.APIType = override.APIType - } - if override.AccessToken != "" { - out.AccessToken = override.AccessToken - } - if override.BaiduAK != "" { - out.BaiduAK = override.BaiduAK - } - if override.BaiduSK != "" { - out.BaiduSK = override.BaiduSK - } - - // numeric fields (0 usually means "unspecified" in this project) - if override.Temperature > 0 { - out.Temperature = override.Temperature - } - if override.MaxTokens > 0 { - out.MaxTokens = override.MaxTokens - } - if override.TopP > 0 { - out.TopP = override.TopP - } - if override.Timeout > 0 { - out.Timeout = override.Timeout - } - - // maps - if len(override.Extra) > 0 { - if out.Extra == nil { - out.Extra = map[string]any{} - } - for k, v := range override.Extra { - out.Extra[k] = v - } - } - return out + return aiconfig.MergeConfig(base, override) } diff --git a/backend/internal/server/agent/drivers/eino/image/image.go b/backend/internal/server/agent/drivers/eino/image/image.go deleted file mode 100644 index 6f3233f8..00000000 --- a/backend/internal/server/agent/drivers/eino/image/image.go +++ /dev/null @@ -1,15 +0,0 @@ -package image - -import ( - "context" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" -) - -// internal/server/agent/drivers/eino/llm/image.go -type ImageClient interface { - GenerateImage(ctx context.Context, mc config.ModelConfig, prompt string, opt ImageOptions) ([]byte, error) -} -type ImageOptions struct { - Size string // 1024x1024 - Format string // png|jpeg|webp -} diff --git a/backend/internal/server/agent/factory/intent/embed.go b/backend/internal/server/agent/factory/intent/embed.go index 9c5a6329..a5f08ecc 100644 --- a/backend/internal/server/agent/factory/intent/embed.go +++ b/backend/internal/server/agent/factory/intent/embed.go @@ -2,33 +2,74 @@ package intent import ( "fmt" + "time" + "github.com/ArtisanCloud/PowerX/internal/server/agent/config" "github.com/ArtisanCloud/PowerX/internal/server/agent/contract/embed" - embed2 "github.com/ArtisanCloud/PowerX/internal/server/agent/intent/embed" - "time" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/baidu" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/hash" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/huggingface" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/ollama" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/openai" ) func NewVectorizerFromConfig(llm config.EmbeddingConfig) (embed.Vectorizer, error) { switch llm.Provider { case "", "none", "disabled": return nil, nil - case "openai": + case "hash", "hash32", "local_hash": + dim := llm.Dim + if dim <= 0 { + dim = 1536 + } + return &hash.HashEmbedder{Dim: dim}, nil + case "huggingface", "hf": if llm.APIKey == "" { - return nil, fmt.Errorf("openai api_key is empty") + return nil, fmt.Errorf("huggingface api_key is empty") } - return &embed2.OpenAIEmbedder{ + return &huggingface.HuggingFaceEmbedder{ + BaseURL: llm.Endpoint, + APIKey: llm.APIKey, + Model: llm.Model, + Timeout: 20 * time.Second, + MaxBatch: 64, + }, nil + case "baidu", "qianfan": + if llm.APIKey == "" { + return nil, fmt.Errorf("baidu api_key is empty") + } + return &baidu.BaiduQianfanEmbedder{ + BaseURL: llm.Endpoint, + APIKey: llm.APIKey, + Model: llm.Model, + Timeout: 20 * time.Second, + MaxBatch: 64, + }, nil + case "sentence_transformers", "sentence-transformers", "sbert": + // 无官方统一 HTTP 协议;若你本地已部署 OpenAI-compatible embeddings 网关,请改用 provider=openai_compatible。 + return nil, fmt.Errorf("sentence-transformers embedding 未提供内置直连实现,请使用 OpenAI-compatible 网关(provider=openai_compatible)或在后端补充该 provider 的 HTTP 协议适配") + case "openai_compatible", "openai-compatible", "openai_compat": + return &openai.OpenAIEmbedder{ + BaseURL: llm.Endpoint, // e.g. http://localhost:8080/v1 + APIKey: llm.APIKey, // optional for local gateways + Model: llm.Model, + Timeout: 60 * time.Second, + MaxBatch: llm.MaxBatch, + }, nil + case "openai": + return &openai.OpenAIEmbedder{ BaseURL: llm.Endpoint, // e.g. https://api.openai.com/v1 APIKey: llm.APIKey, Model: llm.Model, // e.g. text-embedding-3-small - Timeout: 15 * time.Second, - MaxBatch: 128, + Timeout: 60 * time.Second, + MaxBatch: llm.MaxBatch, }, nil case "ollama": - return &embed2.OllamaEmbedder{ + return &ollama.OllamaEmbedder{ BaseURL: llm.Endpoint, // e.g. http://localhost:11434 Model: llm.Model, // e.g. bge-m3 - Timeout: 15 * time.Second, - MaxBatch: 128, + Timeout: 60 * time.Second, + MaxBatch: llm.MaxBatch, }, nil default: return nil, fmt.Errorf("unsupported embedding provider: %s", llm.Provider) diff --git a/backend/internal/server/agent/persistence/model/agent_gorm.go b/backend/internal/server/agent/persistence/model/agent_gorm.go index a2b43735..5bafa15b 100644 --- a/backend/internal/server/agent/persistence/model/agent_gorm.go +++ b/backend/internal/server/agent/persistence/model/agent_gorm.go @@ -3,7 +3,9 @@ package model import ( coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" + "github.com/google/uuid" "gorm.io/datatypes" + "gorm.io/gorm" ) // ---------- 表名常量 ---------- @@ -44,7 +46,7 @@ const ( // ---------- 1) Agent 主表 ---------- // 说明:沿用 Env + TenantUUID 的作用域与索引策略;(Env, TenantUUID, Key) 在同一租户内唯一。 type Agent struct { - coremodel.PowerModel + coremodel.PowerUUIDModel // 作用域 Env string `gorm:"size:32;index:agent_key_uniq_global,unique,priority:1,where:tenant_uuid IS NULL;index:agent_key_uniq_tenant,unique,priority:1" json:"-"` @@ -77,6 +79,13 @@ type Agent struct { Meta datatypes.JSONMap `gorm:"type:jsonb;default:'{}'::jsonb" json:"meta"` } +func (mdl *Agent) BeforeCreate(tx *gorm.DB) error { + if mdl.UUID == uuid.Nil { + mdl.UUID = uuid.New() + } + return nil +} + // 表名(带 schema) func (mdl *Agent) TableName() string { return coremodel.PowerXSchema + "." + TableAgent diff --git a/backend/internal/server/agent/persistence/model/session_gorm.go b/backend/internal/server/agent/persistence/model/session_gorm.go index 023bba43..043d2c35 100644 --- a/backend/internal/server/agent/persistence/model/session_gorm.go +++ b/backend/internal/server/agent/persistence/model/session_gorm.go @@ -2,13 +2,18 @@ package model import ( coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" - "gorm.io/datatypes" "time" + + "github.com/google/uuid" + "gorm.io/datatypes" + "gorm.io/gorm" ) type AgentChatSession struct { coremodel.PowerModel + UUID uuid.UUID `gorm:"type:uuid;column:uuid;uniqueIndex;index" json:"uuid"` + // 作用域(与现有 Agent 表一致) Env string `gorm:"size:32;index:agent_sess_scope" json:"-"` TenantUUID *string `gorm:"column:tenant_uuid;index:agent_sess_scope" json:"-"` @@ -38,6 +43,13 @@ type AgentChatSession struct { func (mdl *AgentChatSession) TableName() string { return coremodel.PowerXSchema + "." + TableAgentChatSession } + +func (mdl *AgentChatSession) BeforeCreate(tx *gorm.DB) error { + if mdl.UUID == uuid.Nil { + mdl.UUID = uuid.New() + } + return nil +} func (mdl *AgentChatSession) GetTableName(needFull bool) string { if needFull { return mdl.TableName() diff --git a/backend/internal/server/agent/persistence/repository/agent_chat_session_repo.go b/backend/internal/server/agent/persistence/repository/agent_chat_session_repo.go index df0109ef..da5f69ef 100644 --- a/backend/internal/server/agent/persistence/repository/agent_chat_session_repo.go +++ b/backend/internal/server/agent/persistence/repository/agent_chat_session_repo.go @@ -103,6 +103,20 @@ func (r *AgentChatSessionRepository) FindByID( return &out, nil } +// FindByUUID(带作用域) +func (r *AgentChatSessionRepository) FindByUUID( + ctx context.Context, env string, tenantUUID *string, uid string, +) (*dbmodel.AgentChatSession, error) { + var out dbmodel.AgentChatSession + err := r.db.WithContext(ctx). + Scopes(dbmodel.WithScope(env, tenantUUID)). + Where("uuid = ?", uid).First(&out).Error + if err != nil { + return nil, err + } + return &out, nil +} + // ListByAgent:按 Agent 列表会话(可选 status 过滤) func (r *AgentChatSessionRepository) ListByAgent( ctx context.Context, diff --git a/backend/internal/server/agent/persistence/repository/agent_repo.go b/backend/internal/server/agent/persistence/repository/agent_repo.go index 9cf2b277..b2734241 100644 --- a/backend/internal/server/agent/persistence/repository/agent_repo.go +++ b/backend/internal/server/agent/persistence/repository/agent_repo.go @@ -8,6 +8,7 @@ import ( dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" coreRepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository" + "github.com/google/uuid" "gorm.io/gorm" ) @@ -79,6 +80,18 @@ func (r *AgentRepository) FindByScopeKey(ctx context.Context, env string, tenant return &out, nil } +func (r *AgentRepository) FindByScopeUUID(ctx context.Context, env string, tenantUUID *string, agentUUID uuid.UUID) (*dbmodel.Agent, error) { + var out dbmodel.Agent + err := r.db.WithContext(ctx). + Scopes(dbmodel.WithScope(env, tenantUUID)). + Where("uuid = ?", agentUUID). + First(&out).Error + if err != nil { + return nil, err + } + return &out, nil +} + func (r *AgentRepository) GetByID(ctx context.Context, id uint64) (*dbmodel.Agent, error) { var out dbmodel.Agent if err := r.db.WithContext(ctx).First(&out, id).Error; err != nil { diff --git a/backend/internal/server/agent/persistence/repository/profile_repo.go b/backend/internal/server/agent/persistence/repository/profile_repo.go index f269c90e..8366dbbf 100644 --- a/backend/internal/server/agent/persistence/repository/profile_repo.go +++ b/backend/internal/server/agent/persistence/repository/profile_repo.go @@ -33,6 +33,7 @@ func (r *AIModelProfileRepository) UpsertByScopeModalityProviderModel( assign := clause.Assignments(map[string]any{ "label": in.Label, "defaults": in.Defaults, + "cap_cache": in.CapCache, "tags": in.Tags, "updated_at": gorm.Expr("NOW()"), }) diff --git a/backend/internal/server/ai/drivers/baidu/embedding.go b/backend/internal/server/ai/drivers/baidu/embedding.go new file mode 100644 index 00000000..91404c13 --- /dev/null +++ b/backend/internal/server/ai/drivers/baidu/embedding.go @@ -0,0 +1,149 @@ +package baidu + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +// BaiduQianfanEmbedder implements a best-effort qianfan v2 embeddings caller. +// +// Endpoint (best-effort): +// +// POST {base_url}/embeddings +// +// Body (OpenAI-like): +// +// {"model":"embedding-v1","input":["t1","t2"]} +// +// NOTE: 百度千帆 embedding 的字段/鉴权可能因版本而异;如你们有网关做 OpenAI-compatible, +// 建议改用 provider=openai_compatible 复用 OpenAIEmbedder。 +type BaiduQianfanEmbedder struct { + BaseURL string + APIKey string + Model string + Timeout time.Duration + HTTP *http.Client + MaxBatch int +} + +type baiduEmbReq struct { + Input any `json:"input"` + Model string `json:"model,omitempty"` +} + +type baiduEmbResp struct { + Data []struct { + Embedding []float64 `json:"embedding"` + Index int `json:"index"` + } `json:"data"` +} + +func (e *BaiduQianfanEmbedder) client() *http.Client { + if e.HTTP != nil { + return e.HTTP + } + to := e.Timeout + if to <= 0 { + to = 15 * time.Second + } + return &http.Client{Timeout: to} +} + +func (e *BaiduQianfanEmbedder) base() string { + if strings.TrimSpace(e.BaseURL) == "" { + return "https://qianfan.baidubce.com/v2" + } + return strings.TrimRight(strings.TrimSpace(e.BaseURL), "/") +} + +func (e *BaiduQianfanEmbedder) endpoint() string { + return e.base() + "/embeddings" +} + +func (e *BaiduQianfanEmbedder) batchSize() int { + if e.MaxBatch <= 0 { + return 64 + } + return e.MaxBatch +} + +func (e *BaiduQianfanEmbedder) Embed(ctx context.Context, texts []string) ([][]float32, error) { + if len(texts) == 0 { + return [][]float32{}, nil + } + if strings.TrimSpace(e.APIKey) == "" { + return nil, fmt.Errorf("baidu embeddings: api_key is empty") + } + if strings.TrimSpace(e.Model) == "" { + return nil, fmt.Errorf("baidu embeddings: model is empty") + } + bs := e.batchSize() + out := make([][]float32, 0, len(texts)) + for i := 0; i < len(texts); i += bs { + j := i + bs + if j > len(texts) { + j = len(texts) + } + vecs, err := e.embedOnce(ctx, texts[i:j]) + if err != nil { + return nil, err + } + out = append(out, vecs...) + } + return out, nil +} + +func (e *BaiduQianfanEmbedder) embedOnce(ctx context.Context, batch []string) ([][]float32, error) { + reqBody := baiduEmbReq{ + Input: batch, + Model: strings.TrimSpace(e.Model), + } + bs, _ := json.Marshal(reqBody) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, e.endpoint(), bytes.NewReader(bs)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(e.APIKey)) + + resp, err := e.client().Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode/100 != 2 { + return nil, fmt.Errorf("baidu embeddings HTTP %d: %s", resp.StatusCode, strings.TrimSpace(string(body))) + } + + var or baiduEmbResp + if err := json.Unmarshal(body, &or); err != nil { + return nil, err + } + vecs := make([][]float32, len(or.Data)) + for _, d := range or.Data { + f := make([]float32, len(d.Embedding)) + for i := range d.Embedding { + f[i] = float32(d.Embedding[i]) + } + if d.Index >= 0 && d.Index < len(or.Data) { + vecs[d.Index] = f + } else { + vecs = append(vecs, f) + } + } + out := make([][]float32, 0, len(vecs)) + for _, v := range vecs { + if v != nil { + out = append(out, v) + } + } + return out, nil +} diff --git a/backend/internal/server/agent/drivers/eino/llm/baidu.go b/backend/internal/server/ai/drivers/baidu/llm.go similarity index 97% rename from backend/internal/server/agent/drivers/eino/llm/baidu.go rename to backend/internal/server/ai/drivers/baidu/llm.go index 5c8ef1b8..fe28af9e 100644 --- a/backend/internal/server/agent/drivers/eino/llm/baidu.go +++ b/backend/internal/server/ai/drivers/baidu/llm.go @@ -1,4 +1,4 @@ -package llm +package baidu import ( "bufio" @@ -7,7 +7,8 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/core" "io" "net/http" "strings" @@ -23,7 +24,7 @@ import ( type baiduClient struct{} -func NewBaiduClient() LLMClient { return &baiduClient{} } +func NewLLMClient() *baiduClient { return &baiduClient{} } const ( defaultQianfanV2Base = "https://qianfan.baidubce.com/v2" @@ -261,7 +262,7 @@ func (c *baiduClient) Stream(ctx context.Context, mc *config.ModelConfig, prompt continue } // 兼容 "data: {...}" 前缀 - line = TrimDataPrefix(line) + line = core.TrimDataPrefix(line) // [DONE] / 结束标记(不同版本可能不同) if bytes.EqualFold(line, []byte("[DONE]")) { diff --git a/backend/internal/server/ai/drivers/config/config.go b/backend/internal/server/ai/drivers/config/config.go new file mode 100644 index 00000000..f83e6f99 --- /dev/null +++ b/backend/internal/server/ai/drivers/config/config.go @@ -0,0 +1,116 @@ +package config + +import ( + "time" +) + +// 统一配置(融合运行时请求 + agent 默认配置) +type ModelConfig struct { + Provider string `yaml:"provider" json:"provider"` + Endpoint string `yaml:"endpoint" json:"endpoint"` + APIKey string `yaml:"api_key" json:"api_key"` + SecretID string `yaml:"secret_id" json:"secret_id"` + SecretKey string `yaml:"secret_key" json:"secret_key"` + Region string `yaml:"region" json:"region"` + Model string `yaml:"model" json:"model"` + SystemPrompt string `yaml:"system_prompt" json:"system_prompt"` + Temperature float64 `yaml:"temperature" json:"temperature"` + MaxTokens int `yaml:"max_tokens" json:"max_tokens"` + TopP float32 `yaml:"top_p" json:"top_p"` + Extra map[string]any `yaml:"extra" json:"extra"` + Timeout time.Duration `yaml:"timeout" json:"timeout"` + + // 可选:OpenAI + Organization string `yaml:"organization" json:"organization"` + AzureDeployment string `yaml:"azure_deployment" json:"azure_deployment"` + APIVersion string `yaml:"api_version" json:"api_version"` + APIType string `yaml:"api_type" json:"api_type"` + + // 可选:百度 + AccessToken string `yaml:"access_token" json:"access_token"` + BaiduAK string `yaml:"baidu_ak" json:"baidu_ak"` + BaiduSK string `yaml:"baidu_sk" json:"baidu_sk"` +} + +// ——— 配置融合 ——— +// 以 Agent 默认配置为“底”,运行时覆盖它(req.Config > agent cfg) +func MergeConfig(base *ModelConfig, override *ModelConfig) *ModelConfig { + out := &ModelConfig{} + if base != nil { + *out = *base + } + if override == nil { + return out + } + // string fields + if override.Provider != "" { + out.Provider = override.Provider + } + if override.Endpoint != "" { + out.Endpoint = override.Endpoint + } + if override.APIKey != "" { + out.APIKey = override.APIKey + } + if override.SecretID != "" { + out.SecretID = override.SecretID + } + if override.SecretKey != "" { + out.SecretKey = override.SecretKey + } + if override.Region != "" { + out.Region = override.Region + } + if override.Model != "" { + out.Model = override.Model + } + if override.SystemPrompt != "" { + out.SystemPrompt = override.SystemPrompt + } + if override.Organization != "" { + out.Organization = override.Organization + } + if override.AzureDeployment != "" { + out.AzureDeployment = override.AzureDeployment + } + if override.APIVersion != "" { + out.APIVersion = override.APIVersion + } + if override.APIType != "" { + out.APIType = override.APIType + } + if override.AccessToken != "" { + out.AccessToken = override.AccessToken + } + if override.BaiduAK != "" { + out.BaiduAK = override.BaiduAK + } + if override.BaiduSK != "" { + out.BaiduSK = override.BaiduSK + } + + // numeric fields (0 usually means "unspecified" in this project) + if override.Temperature > 0 { + out.Temperature = override.Temperature + } + if override.MaxTokens > 0 { + out.MaxTokens = override.MaxTokens + } + if override.TopP > 0 { + out.TopP = override.TopP + } + if override.Timeout > 0 { + out.Timeout = override.Timeout + } + + // maps + if len(override.Extra) > 0 { + if out.Extra == nil { + out.Extra = map[string]any{} + } + for k, v := range override.Extra { + out.Extra[k] = v + } + } + return out +} diff --git a/backend/internal/server/ai/drivers/core/errors.go b/backend/internal/server/ai/drivers/core/errors.go new file mode 100644 index 00000000..d02bf132 --- /dev/null +++ b/backend/internal/server/ai/drivers/core/errors.go @@ -0,0 +1,5 @@ +package core + +import "errors" + +var ErrStreamNotSupported = errors.New("llm: stream not supported") diff --git a/backend/internal/server/ai/drivers/core/stream.go b/backend/internal/server/ai/drivers/core/stream.go new file mode 100644 index 00000000..c6bb8650 --- /dev/null +++ b/backend/internal/server/ai/drivers/core/stream.go @@ -0,0 +1,13 @@ +package core + +import "bytes" + +// TrimDataPrefix trims a "data:" prefix for SSE-style lines. +func TrimDataPrefix(line []byte) []byte { + b := bytes.TrimSpace(line) + if bytes.HasPrefix(b, []byte("data:")) { + b = b[len("data:"):] + b = bytes.TrimLeft(b, " \t") + } + return b +} diff --git a/backend/internal/server/ai/drivers/google/image.go b/backend/internal/server/ai/drivers/google/image.go new file mode 100644 index 00000000..2acded90 --- /dev/null +++ b/backend/internal/server/ai/drivers/google/image.go @@ -0,0 +1,291 @@ +package google + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" +) + +const defaultGeminiEndpoint = "https://generativelanguage.googleapis.com/v1beta" + +type imageClient struct { + rawProvider string +} + +func NewImageClient(rawProvider string) *imageClient { + return &imageClient{rawProvider: strings.TrimSpace(rawProvider)} +} + +func (c *imageClient) Cap() contract.ModelCapabilities { + return contract.ModelCapabilities{} +} + +func (c *imageClient) Health(ctx context.Context) error { + return nil +} + +type geminiContent struct { + Parts []geminiPart `json:"parts"` +} + +type geminiPart struct { + Text string `json:"text,omitempty"` + InlineData *geminiInlineData `json:"inlineData,omitempty"` +} + +type geminiInlineData struct { + MimeType string `json:"mimeType"` + Data string `json:"data"` +} + +type geminiImageConfig struct { + AspectRatio string `json:"aspectRatio,omitempty"` + ImageSize string `json:"imageSize,omitempty"` +} + +type geminiGenerationConfig struct { + ResponseModalities []string `json:"responseModalities,omitempty"` + ImageConfig *geminiImageConfig `json:"imageConfig,omitempty"` +} + +type geminiGenerateReq struct { + Contents []geminiContent `json:"contents"` + GenerationConfig *geminiGenerationConfig `json:"generationConfig,omitempty"` +} + +type geminiGenerateResp struct { + Candidates []struct { + Content struct { + Parts []struct { + Text string `json:"text,omitempty"` + InlineData *geminiInlineData `json:"inlineData,omitempty"` + } `json:"parts"` + } `json:"content"` + } `json:"candidates"` + Error *struct { + Message string `json:"message"` + Code int `json:"code,omitempty"` + } `json:"error,omitempty"` +} + +func (c *imageClient) Generate(ctx context.Context, in contract.ImageRequest) (*contract.ImageResponse, error) { + mc, err := modelConfigFromRuntime(in.Runtime) + if err != nil { + return nil, err + } + if mc == nil { + return nil, errors.New("gemini image: missing model config") + } + prompt := strings.TrimSpace(in.Prompt) + if prompt == "" { + return nil, errors.New("gemini image: prompt required") + } + if len(in.RefImages) > 0 { + return nil, errors.New("gemini image: reference images not supported") + } + if strings.TrimSpace(mc.Model) == "" { + return nil, errors.New("gemini image: missing model") + } + + aspectRatio, imageSize := normalizeGeminiImageConfig(strings.TrimSpace(in.Size)) + if in.Runtime != nil { + if v, ok := in.Runtime["aspect_ratio"]; ok { + if s, ok2 := v.(string); ok2 && strings.TrimSpace(s) != "" { + aspectRatio = strings.TrimSpace(s) + } + } + if v, ok := in.Runtime["image_size"]; ok { + if s, ok2 := v.(string); ok2 && strings.TrimSpace(s) != "" { + imageSize = strings.TrimSpace(s) + } + } + } + + req := geminiGenerateReq{ + Contents: []geminiContent{{Parts: []geminiPart{{Text: prompt}}}}, + GenerationConfig: &geminiGenerationConfig{ + ResponseModalities: []string{"TEXT", "IMAGE"}, + ImageConfig: &geminiImageConfig{ + AspectRatio: aspectRatio, + ImageSize: imageSize, + }, + }, + } + + body, err := json.Marshal(req) + if err != nil { + return nil, err + } + + endpoint, headers, err := buildGeminiEndpointAndHeaders(mc) + if err != nil { + return nil, err + } + + httpClient := geminiHTTPClient(mc) + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) + if err != nil { + return nil, err + } + httpReq.Header = headers + + start := time.Now() + resp, err := httpClient.Do(httpReq) + if err != nil { + return nil, err + } + defer resp.Body.Close() + logger.InfoF(ctx, "[ai-image] gemini http_call provider=%s model=%s status=%d latency_ms=%d", strings.TrimSpace(c.rawProvider), strings.TrimSpace(mc.Model), resp.StatusCode, time.Since(start).Milliseconds()) + + raw, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("gemini image status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(raw))) + } + + var out geminiGenerateResp + if err := json.Unmarshal(raw, &out); err != nil { + return nil, err + } + if out.Error != nil && strings.TrimSpace(out.Error.Message) != "" { + return nil, errors.New(out.Error.Message) + } + + imgs := make([][]byte, 0) + if len(out.Candidates) > 0 { + for _, part := range out.Candidates[0].Content.Parts { + if part.InlineData == nil || strings.TrimSpace(part.InlineData.Data) == "" { + continue + } + decoded, err := base64.StdEncoding.DecodeString(part.InlineData.Data) + if err != nil { + return nil, fmt.Errorf("gemini image: decode base64 failed: %w", err) + } + imgs = append(imgs, decoded) + } + } + if len(imgs) == 0 { + return nil, errors.New("gemini image: empty response") + } + + return &contract.ImageResponse{ + Images: imgs, + Provider: strings.TrimSpace(c.rawProvider), + Model: strings.TrimSpace(mc.Model), + LatencyMS: int(time.Since(start).Milliseconds()), + }, nil +} + +func modelConfigFromRuntime(runtime map[string]any) (*config.ModelConfig, error) { + if runtime == nil { + return nil, errors.New("image runtime config missing") + } + if raw, ok := runtime["config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + if raw, ok := runtime["model_config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + return nil, errors.New("image runtime config invalid") +} + +func buildGeminiEndpointAndHeaders(mc *config.ModelConfig) (string, http.Header, error) { + base := strings.TrimRight(mc.Endpoint, "/") + if base == "" { + base = defaultGeminiEndpoint + } + if strings.TrimSpace(mc.APIKey) == "" { + return "", nil, errors.New("gemini image: missing api_key") + } + if strings.TrimSpace(mc.Model) == "" { + return "", nil, errors.New("gemini image: missing model") + } + path := "/models/" + strings.TrimPrefix(strings.TrimSpace(mc.Model), "/") + ":generateContent" + endpoint := base + path + + h := http.Header{} + h.Set("Content-Type", "application/json") + h.Set("x-goog-api-key", strings.TrimSpace(mc.APIKey)) + return endpoint, h, nil +} + +func normalizeGeminiImageConfig(size string) (string, string) { + if size == "" { + return "1:1", "1K" + } + width, height := parseSize(size) + if width == 0 || height == 0 { + return "1:1", "1K" + } + aspect := aspectRatioFromSize(width, height) + imageSize := "1K" + if width >= 1536 || height >= 1536 { + imageSize = "2K" + } + return aspect, imageSize +} + +func parseSize(size string) (int, int) { + parts := strings.Split(strings.ToLower(strings.TrimSpace(size)), "x") + if len(parts) != 2 { + return 0, 0 + } + w, _ := strconv.Atoi(strings.TrimSpace(parts[0])) + h, _ := strconv.Atoi(strings.TrimSpace(parts[1])) + return w, h +} + +func aspectRatioFromSize(width, height int) string { + if width == 0 || height == 0 { + return "1:1" + } + if width == height { + return "1:1" + } + ratio := float64(width) / float64(height) + switch { + case ratio >= 1.65 && ratio <= 1.9: + return "16:9" + case ratio >= 1.45 && ratio < 1.65: + return "3:2" + case ratio >= 1.2 && ratio < 1.45: + return "4:3" + case ratio >= 0.6 && ratio <= 0.8: + return "2:3" + case ratio >= 0.8 && ratio < 0.9: + return "3:4" + case ratio < 0.6: + return "9:16" + default: + return "1:1" + } +} + +func geminiHTTPClient(mc *config.ModelConfig) *http.Client { + to := mc.Timeout + if to <= 0 { + to = 5 * time.Minute + } + return &http.Client{Timeout: to} +} diff --git a/backend/internal/server/ai/drivers/google/vlm.go b/backend/internal/server/ai/drivers/google/vlm.go new file mode 100644 index 00000000..8472e4db --- /dev/null +++ b/backend/internal/server/ai/drivers/google/vlm.go @@ -0,0 +1,20 @@ +package google + +import ( + "context" + "errors" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type vlmClient struct{} + +func NewVLMClient() *vlmClient { return &vlmClient{} } + +func (c *vlmClient) Invoke(ctx context.Context, in contract.VLMRequest) (*contract.VLMResponse, error) { + return nil, errors.New("vlm provider not implemented") +} + +func (c *vlmClient) Stream(ctx context.Context, in contract.VLMRequest, onDelta func(string)) (*contract.VLMResponse, error) { + return nil, errors.New("vlm provider not implemented") +} diff --git a/backend/internal/server/ai/drivers/hash/embedding.go b/backend/internal/server/ai/drivers/hash/embedding.go new file mode 100644 index 00000000..bc3f0da1 --- /dev/null +++ b/backend/internal/server/ai/drivers/hash/embedding.go @@ -0,0 +1,37 @@ +package hash + +import ( + "context" + "crypto/sha256" + "encoding/binary" +) + +// HashEmbedder 提供“零依赖”的本地向量化兜底: +// - 纯确定性 hash → float32 向量(不具备语义能力) +// - 仅用于开发/演示/联调,生产环境应替换为真实 embedding 模型 +type HashEmbedder struct { + Dim int +} + +func (e *HashEmbedder) Embed(ctx context.Context, texts []string) ([][]float32, error) { + _ = ctx + if len(texts) == 0 { + return [][]float32{}, nil + } + dim := e.Dim + if dim <= 0 { + dim = 1536 + } + out := make([][]float32, len(texts)) + for i, t := range texts { + sum := sha256.Sum256([]byte(t)) + vec := make([]float32, dim) + for j := 0; j < dim; j++ { + offset := (j * 4) % len(sum) + u := binary.BigEndian.Uint32(sum[offset : offset+4]) + vec[j] = float32(u%10_000) / 10_000.0 + } + out[i] = vec + } + return out, nil +} diff --git a/backend/internal/server/ai/drivers/huggingface/embedding.go b/backend/internal/server/ai/drivers/huggingface/embedding.go new file mode 100644 index 00000000..b571ed72 --- /dev/null +++ b/backend/internal/server/ai/drivers/huggingface/embedding.go @@ -0,0 +1,339 @@ +package huggingface + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/openai" +) + +// HuggingFaceEmbedder calls Hugging Face Router Inference API and pools to a single vector. +// It prefers OpenAI-compatible /v1/embeddings when configured, then falls back to the +// pipeline/feature-extraction endpoints. +// +// API (typical): +// +// POST https://router.huggingface.co/hf-inference/models/{model}/pipeline/feature-extraction +// POST https://router.huggingface.co/pipeline/feature-extraction/{model} +// Authorization: Bearer +// Body: {"inputs": "text"} or {"inputs": ["t1","t2"]} +// +// Response shape varies by model: +// - [D] => already pooled vector +// - [T][D] => token embeddings (we avg-pool over T) +// - [B][D] or [B][T][D] => batch +type HuggingFaceEmbedder struct { + BaseURL string + APIKey string + Model string + Timeout time.Duration + HTTP *http.Client + MaxBatch int +} + +type hfEmbReq struct { + Inputs any `json:"inputs"` // string or []string +} + +type hfReqTry struct { + url string + body any +} + +func (e *HuggingFaceEmbedder) client() *http.Client { + if e.HTTP != nil { + return e.HTTP + } + to := e.Timeout + if to <= 0 { + to = 15 * time.Second + } + return &http.Client{Timeout: to} +} + +func (e *HuggingFaceEmbedder) base() string { + base := strings.TrimSpace(e.BaseURL) + if base == "" { + return "https://router.huggingface.co" + } + base = strings.TrimRight(base, "/") + if strings.HasSuffix(base, "/v1") { + base = strings.TrimSuffix(base, "/v1") + } + return base +} + +func (e *HuggingFaceEmbedder) baseV1() string { + base := strings.TrimSpace(e.BaseURL) + if base == "" { + return "https://router.huggingface.co/v1" + } + base = strings.TrimRight(base, "/") + if strings.HasSuffix(base, "/v1") { + return base + } + return base + "/v1" +} + +func (e *HuggingFaceEmbedder) endpoint() string { + // NOTE: model contains slashes (org/name). It must be preserved in the path. + return e.base() + "/pipeline/feature-extraction/" + strings.TrimSpace(e.Model) +} + +func (e *HuggingFaceEmbedder) endpoints(inputs any) []hfReqTry { + model := strings.TrimSpace(e.Model) + base := e.base() + modelEsc := url.QueryEscape(model) + return []hfReqTry{ + {url: base + "/hf-inference/models/" + model + "/pipeline/feature-extraction", body: hfEmbReq{Inputs: inputs}}, + {url: base + "/pipeline/feature-extraction/" + model, body: hfEmbReq{Inputs: inputs}}, + {url: base + "/hf-inference/pipeline/feature-extraction/" + model, body: hfEmbReq{Inputs: inputs}}, + {url: base + "/pipeline/feature-extraction?model=" + modelEsc, body: hfEmbReq{Inputs: inputs}}, + {url: base + "/hf-inference/pipeline/feature-extraction?model=" + modelEsc, body: hfEmbReq{Inputs: inputs}}, + { + url: base + "/hf-inference/models/" + model, + body: map[string]any{ + "inputs": inputs, + "task": "feature-extraction", + }, + }, + { + url: base + "/models/" + model, + body: map[string]any{ + "inputs": inputs, + "task": "feature-extraction", + }, + }, + } +} + +func (e *HuggingFaceEmbedder) batchSize() int { + if e.MaxBatch <= 0 { + return 64 + } + return e.MaxBatch +} + +func (e *HuggingFaceEmbedder) Embed(ctx context.Context, texts []string) ([][]float32, error) { + if len(texts) == 0 { + return [][]float32{}, nil + } + if strings.TrimSpace(e.Model) == "" { + return nil, fmt.Errorf("huggingface embedding: model is empty") + } + bs := e.batchSize() + out := make([][]float32, 0, len(texts)) + for i := 0; i < len(texts); i += bs { + j := i + bs + if j > len(texts) { + j = len(texts) + } + vecs, err := e.embedOnce(ctx, texts[i:j]) + if err != nil { + return nil, err + } + out = append(out, vecs...) + } + return out, nil +} + +func (e *HuggingFaceEmbedder) embedOnce(ctx context.Context, batch []string) ([][]float32, error) { + var in any + if len(batch) == 1 { + in = batch[0] + } else { + in = batch + } + var lastErr error + attempted := make([]string, 0, 8) + + // 1) Prefer OpenAI-compatible embeddings when configured. + if baseV1 := e.baseV1(); strings.TrimSpace(baseV1) != "" { + oai := openai.OpenAIEmbedder{ + BaseURL: baseV1, + APIKey: e.APIKey, + Model: e.Model, + Timeout: e.Timeout, + HTTP: e.HTTP, + MaxBatch: len(batch), + } + attempted = append(attempted, strings.TrimRight(oai.BaseURL, "/")+"/embeddings") + if vecs, err := oai.Embed(ctx, batch); err == nil { + return vecs, nil + } else if !shouldFallbackToPipeline(err) { + return nil, err + } else { + lastErr = err + } + } + + tries := e.endpoints(in) + for i, t := range tries { + attempted = append(attempted, t.url) + bs, _ := json.Marshal(t.body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, t.url, bytes.NewReader(bs)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + if strings.TrimSpace(e.APIKey) != "" { + req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(e.APIKey)) + } + + resp, err := e.client().Do(req) + if err != nil { + return nil, err + } + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + if resp.StatusCode/100 != 2 { + msg := strings.TrimSpace(string(body)) + lastErr = fmt.Errorf("huggingface embeddings HTTP %d (%s): %s", resp.StatusCode, t.url, msg) + if resp.StatusCode == http.StatusNotFound || + (resp.StatusCode == http.StatusBadRequest && strings.Contains(msg, "SentenceSimilarityPipeline")) { + if i < len(tries)-1 { + continue + } + } + return nil, lastErr + } + + var raw any + if err := json.Unmarshal(body, &raw); err != nil { + return nil, err + } + + // Normalize to batch output. + switch v := raw.(type) { + case []any: + // could be [D] (single vector), [T][D] (single token embeddings), [B][D], or [B][T][D] + if isNumberSlice(v) { + return [][]float32{toF32(v)}, nil + } + if isNumberMatrix(v) { + // token embeddings => pool to one + return [][]float32{poolAvg(toF32Matrix(v))}, nil + } + if isBatchVectors(v) { + out := make([][]float32, 0, len(v)) + for _, item := range v { + switch vv := item.(type) { + case []any: + if isNumberSlice(vv) { + out = append(out, toF32(vv)) + continue + } + if isNumberMatrix(vv) { + out = append(out, poolAvg(toF32Matrix(vv))) + continue + } + } + return nil, fmt.Errorf("huggingface embeddings: unexpected batch item shape") + } + return out, nil + } + } + + return nil, fmt.Errorf("huggingface embeddings: unexpected response shape") + } + + if lastErr != nil { + return nil, fmt.Errorf("%v; attempted=%s", lastErr, strings.Join(attempted, ",")) + } + return nil, fmt.Errorf("huggingface embeddings: request failed") +} + +func shouldFallbackToPipeline(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + return strings.Contains(msg, "openai embeddings http 404") || + strings.Contains(msg, "openai embeddings http 410") +} + +func isNumberSlice(v []any) bool { + if len(v) == 0 { + return false + } + for _, x := range v { + if _, ok := x.(float64); !ok { + return false + } + } + return true +} + +func isNumberMatrix(v []any) bool { + if len(v) == 0 { + return false + } + for _, row := range v { + r, ok := row.([]any) + if !ok || !isNumberSlice(r) { + return false + } + } + return true +} + +func isBatchVectors(v []any) bool { + // detect [B][D] or [B][T][D] + if len(v) == 0 { + return false + } + _, ok := v[0].([]any) + return ok +} + +func toF32(v []any) []float32 { + out := make([]float32, len(v)) + for i := range v { + out[i] = float32(v[i].(float64)) + } + return out +} + +func toF32Matrix(v []any) [][]float32 { + out := make([][]float32, len(v)) + for i := range v { + row := v[i].([]any) + out[i] = toF32(row) + } + return out +} + +func poolAvg(tokens [][]float32) []float32 { + if len(tokens) == 0 { + return nil + } + dim := len(tokens[0]) + if dim == 0 { + return nil + } + sum := make([]float32, dim) + var n float32 + for _, t := range tokens { + if len(t) != dim { + continue + } + for i := 0; i < dim; i++ { + sum[i] += t[i] + } + n++ + } + if n <= 0 { + return sum + } + for i := 0; i < dim; i++ { + sum[i] /= n + } + return sum +} diff --git a/backend/internal/server/agent/drivers/eino/llm/hunyuan.go b/backend/internal/server/ai/drivers/hunyuan/llm.go similarity index 94% rename from backend/internal/server/agent/drivers/eino/llm/hunyuan.go rename to backend/internal/server/ai/drivers/hunyuan/llm.go index 19327ef6..245aba2f 100644 --- a/backend/internal/server/agent/drivers/eino/llm/hunyuan.go +++ b/backend/internal/server/ai/drivers/hunyuan/llm.go @@ -1,4 +1,4 @@ -package llm +package hunyuan import ( "bytes" @@ -16,14 +16,15 @@ import ( "strings" "time" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/core" ) // 腾讯云 混元(Hunyuan)LLM:TC3-HMAC-SHA256 签名 + ChatCompletions // 参考:腾讯云通用 API 签名(TC3)与 Hunyuan ChatCompletions 接口。 -type hunyuanClient struct{ NoopStream } +type hunyuanClient struct{} -func NewHunyuanClient() LLMClient { return &hunyuanClient{} } +func NewLLMClient() *hunyuanClient { return &hunyuanClient{} } const ( defaultHunyuanEndpoint = "https://hunyuan.tencentcloudapi.com" @@ -204,6 +205,10 @@ func (c *hunyuanClient) Invoke(ctx context.Context, mc *config.ModelConfig, prom return out.Response.Choices[0].Message.Content, nil } +func (c *hunyuanClient) Stream(ctx context.Context, mc *config.ModelConfig, prompt string, onDelta func(string)) (string, error) { + return "", core.ErrStreamNotSupported +} + func effectiveTimeout(v time.Duration, def time.Duration) time.Duration { if v <= 0 { return def diff --git a/backend/internal/server/ai/drivers/hunyuan/vlm.go b/backend/internal/server/ai/drivers/hunyuan/vlm.go new file mode 100644 index 00000000..92179f6b --- /dev/null +++ b/backend/internal/server/ai/drivers/hunyuan/vlm.go @@ -0,0 +1,20 @@ +package hunyuan + +import ( + "context" + "errors" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type vlmClient struct{} + +func NewVLMClient() *vlmClient { return &vlmClient{} } + +func (c *vlmClient) Invoke(ctx context.Context, in contract.VLMRequest) (*contract.VLMResponse, error) { + return nil, errors.New("vlm provider not implemented") +} + +func (c *vlmClient) Stream(ctx context.Context, in contract.VLMRequest, onDelta func(string)) (*contract.VLMResponse, error) { + return nil, errors.New("vlm provider not implemented") +} diff --git a/backend/internal/server/agent/intent/embed/ollama_vectorizer.go b/backend/internal/server/ai/drivers/ollama/embedding.go similarity index 99% rename from backend/internal/server/agent/intent/embed/ollama_vectorizer.go rename to backend/internal/server/ai/drivers/ollama/embedding.go index 59795270..1f692636 100644 --- a/backend/internal/server/agent/intent/embed/ollama_vectorizer.go +++ b/backend/internal/server/ai/drivers/ollama/embedding.go @@ -1,5 +1,5 @@ // services/agent/intent/embed/ollama_vectorizer.go -package embed +package ollama import ( "bytes" diff --git a/backend/internal/server/agent/drivers/eino/llm/ollama.go b/backend/internal/server/ai/drivers/ollama/llm.go similarity index 95% rename from backend/internal/server/agent/drivers/eino/llm/ollama.go rename to backend/internal/server/ai/drivers/ollama/llm.go index c7167e3a..c9b2d3a4 100644 --- a/backend/internal/server/agent/drivers/eino/llm/ollama.go +++ b/backend/internal/server/ai/drivers/ollama/llm.go @@ -1,4 +1,4 @@ -package llm +package ollama import ( "bufio" @@ -7,7 +7,8 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/core" "io" "net/http" "strings" @@ -24,7 +25,7 @@ import ( type ollamaClient struct{} -func NewOllamaClient() LLMClient { return &ollamaClient{} } +func NewLLMClient() *ollamaClient { return &ollamaClient{} } type ollamaChatReq struct { Model string `json:"model"` @@ -149,7 +150,7 @@ func (c *ollamaClient) Stream(ctx context.Context, mc *config.ModelConfig, promp // Ollama 的流是 NDJSON。如果返回不是 2xx,认为不支持或失败 if resp.StatusCode/100 != 2 { // 让上层决定是否回退到 Invoke - return "", ErrStreamNotSupported + return "", core.ErrStreamNotSupported } reader := bufio.NewScanner(resp.Body) diff --git a/backend/internal/server/agent/intent/embed/openai_vectorizer.go b/backend/internal/server/ai/drivers/openai/embedding.go similarity index 88% rename from backend/internal/server/agent/intent/embed/openai_vectorizer.go rename to backend/internal/server/ai/drivers/openai/embedding.go index 99dc4025..d96c111b 100644 --- a/backend/internal/server/agent/intent/embed/openai_vectorizer.go +++ b/backend/internal/server/ai/drivers/openai/embedding.go @@ -1,10 +1,11 @@ -package embed +package openai import ( "bytes" "context" "encoding/json" "fmt" + "io" "net/http" "time" ) @@ -100,11 +101,17 @@ func (e *OpenAIEmbedder) embedOnce(ctx context.Context, batch []string) ([][]flo } defer resp.Body.Close() if resp.StatusCode/100 != 2 { - return nil, fmt.Errorf("openai embeddings HTTP %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + msg := string(bytes.TrimSpace(body)) + if msg != "" { + return nil, fmt.Errorf("openai embeddings HTTP %d (%s): %s", resp.StatusCode, e.endpoint(), msg) + } + return nil, fmt.Errorf("openai embeddings HTTP %d (%s)", resp.StatusCode, e.endpoint()) } var or openaiEmbResp - if err := json.NewDecoder(resp.Body).Decode(&or); err != nil { + body, _ := io.ReadAll(resp.Body) + if err := json.Unmarshal(body, &or); err != nil { return nil, err } // OpenAI 不保证返回顺序绝对与输入一致,但一般 index 对应;按 index 排序更严谨(此处直接按顺序读取) diff --git a/backend/internal/server/ai/drivers/openai/image.go b/backend/internal/server/ai/drivers/openai/image.go new file mode 100644 index 00000000..0c335ea4 --- /dev/null +++ b/backend/internal/server/ai/drivers/openai/image.go @@ -0,0 +1,314 @@ +package openai + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" +) + +const defaultOpenAIImagePath = "/v1/images/generations" + +type imageClient struct { + rawProvider string +} + +func NewImageClient(rawProvider string) *imageClient { + return &imageClient{rawProvider: strings.TrimSpace(rawProvider)} +} + +func (c *imageClient) Cap() contract.ModelCapabilities { + return contract.ModelCapabilities{} +} + +func (c *imageClient) Health(ctx context.Context) error { + return nil +} + +type openAIImageReq struct { + Model string `json:"model"` + Prompt string `json:"prompt"` + Size string `json:"size,omitempty"` + Quality string `json:"quality,omitempty"` + OutputFormat string `json:"output_format,omitempty"` + ResponseFormat string `json:"response_format,omitempty"` + N int `json:"n,omitempty"` + Background string `json:"background,omitempty"` + OutputCompression int `json:"output_compression,omitempty"` +} + +type openAIImageResp struct { + Data []struct { + B64JSON string `json:"b64_json"` + URL string `json:"url"` + } `json:"data"` + Usage map[string]int `json:"usage"` + Error *struct { + Message string `json:"message"` + Type string `json:"type,omitempty"` + Code any `json:"code,omitempty"` + Param any `json:"param,omitempty"` + } `json:"error,omitempty"` +} + +func (c *imageClient) Generate(ctx context.Context, in contract.ImageRequest) (*contract.ImageResponse, error) { + mc, err := modelConfigFromRuntime(in.Runtime) + if err != nil { + return nil, err + } + if mc == nil { + return nil, errors.New("openai image: missing model config") + } + if strings.TrimSpace(mc.AzureDeployment) != "" { + return nil, errors.New("openai image: azure deployment not supported yet") + } + prompt := strings.TrimSpace(in.Prompt) + if prompt == "" { + return nil, errors.New("openai image: prompt required") + } + if len(in.RefImages) > 0 { + return nil, errors.New("openai image: reference images not supported") + } + + req := openAIImageReq{ + Model: strings.TrimSpace(mc.Model), + Prompt: prompt, + Size: strings.TrimSpace(in.Size), + Quality: normalizeImageQuality(strings.TrimSpace(in.Quality), mc.Model), + OutputFormat: normalizeImageFormat(strings.TrimSpace(in.Format)), + } + + if in.Runtime != nil { + if v, ok := in.Runtime["response_format"]; ok { + if s, ok2 := v.(string); ok2 { + req.ResponseFormat = strings.TrimSpace(s) + } + } + if v, ok := in.Runtime["n"]; ok { + if n := intFromRuntime(v); n > 0 { + req.N = n + } + } + if v, ok := in.Runtime["background"]; ok { + if s, ok2 := v.(string); ok2 { + req.Background = strings.TrimSpace(s) + } + } + if v, ok := in.Runtime["output_compression"]; ok { + if n := intFromRuntime(v); n > 0 { + req.OutputCompression = n + } + } + } + + body, err := json.Marshal(req) + if err != nil { + return nil, err + } + + endpoint, headers, err := buildImageEndpointAndHeaders(mc) + if err != nil { + return nil, err + } + + httpClient := imageHTTPClient(mc) + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) + if err != nil { + return nil, err + } + httpReq.Header = headers + + start := time.Now() + resp, err := httpClient.Do(httpReq) + if err != nil { + return nil, err + } + defer resp.Body.Close() + logger.InfoF(ctx, "[ai-image] openai http_call provider=%s model=%s status=%d latency_ms=%d", strings.TrimSpace(c.rawProvider), strings.TrimSpace(mc.Model), resp.StatusCode, time.Since(start).Milliseconds()) + + raw, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("openai image status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(raw))) + } + + var out openAIImageResp + if err := json.Unmarshal(raw, &out); err != nil { + return nil, err + } + if out.Error != nil && strings.TrimSpace(out.Error.Message) != "" { + return nil, errors.New(out.Error.Message) + } + if len(out.Data) == 0 { + return nil, errors.New("openai image: empty response") + } + + imgs := make([][]byte, 0, len(out.Data)) + urls := make([]string, 0, len(out.Data)) + for _, item := range out.Data { + if strings.TrimSpace(item.B64JSON) != "" { + decoded, err := base64.StdEncoding.DecodeString(item.B64JSON) + if err != nil { + return nil, fmt.Errorf("openai image: decode base64 failed: %w", err) + } + imgs = append(imgs, decoded) + } else if strings.TrimSpace(item.URL) != "" { + urls = append(urls, strings.TrimSpace(item.URL)) + } + } + return &contract.ImageResponse{ + Images: imgs, + ImageURLs: urls, + Provider: strings.TrimSpace(c.rawProvider), + Model: strings.TrimSpace(mc.Model), + Usage: out.Usage, + LatencyMS: int(time.Since(start).Milliseconds()), + }, nil +} + +func intFromRuntime(val any) int { + switch v := val.(type) { + case int: + return v + case int64: + return int(v) + case float64: + return int(v) + case float32: + return int(v) + case json.Number: + if n, err := v.Int64(); err == nil { + return int(n) + } + case string: + if n, err := strconv.Atoi(strings.TrimSpace(v)); err == nil { + return n + } + } + return 0 +} + +func modelConfigFromRuntime(runtime map[string]any) (*config.ModelConfig, error) { + if runtime == nil { + return nil, errors.New("image runtime config missing") + } + if raw, ok := runtime["config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + if raw, ok := runtime["model_config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + return nil, errors.New("image runtime config invalid") +} + +func buildImageEndpointAndHeaders(mc *config.ModelConfig) (string, http.Header, error) { + base := strings.TrimRight(mc.Endpoint, "/") + if base == "" { + base = "https://api.openai.com" + } + if strings.TrimSpace(mc.APIKey) == "" { + return "", nil, errors.New("openai image: missing api_key") + } + h := http.Header{} + h.Set("Authorization", "Bearer "+strings.TrimSpace(mc.APIKey)) + h.Set("Content-Type", "application/json") + if strings.TrimSpace(mc.Organization) != "" { + h.Set("OpenAI-Organization", strings.TrimSpace(mc.Organization)) + } + path := resolveImageAPIPath(mc) + return joinEndpoint(base, path), h, nil +} + +func resolveImageAPIPath(mc *config.ModelConfig) string { + if mc != nil && mc.Extra != nil { + if raw, ok := mc.Extra["api_path"]; ok { + if s, ok2 := raw.(string); ok2 { + s = strings.TrimSpace(s) + if s != "" { + if !strings.HasPrefix(s, "/") { + s = "/" + s + } + return s + } + } + } + } + return defaultOpenAIImagePath +} + +func imageHTTPClient(mc *config.ModelConfig) *http.Client { + to := mc.Timeout + if to <= 0 { + to = 5 * time.Minute + } + return &http.Client{Timeout: to} +} + +func normalizeImageQuality(q, model string) string { + if q == "" { + return "" + } + lower := strings.ToLower(strings.TrimSpace(q)) + modelLower := strings.ToLower(strings.TrimSpace(model)) + if strings.HasPrefix(modelLower, "gpt-image") { + switch lower { + case "standard": + return "medium" + case "hd": + return "high" + case "auto", "low", "medium", "high": + return lower + } + return lower + } + if strings.Contains(modelLower, "dall-e-3") { + switch lower { + case "hd", "standard": + return lower + case "high": + return "hd" + default: + return "standard" + } + } + if strings.Contains(modelLower, "dall-e-2") { + return "standard" + } + return lower +} + +func normalizeImageFormat(format string) string { + if format == "" { + return "" + } + lower := strings.ToLower(strings.TrimSpace(format)) + switch lower { + case "png", "jpeg", "jpg", "webp": + if lower == "jpg" { + return "jpeg" + } + return lower + default: + return lower + } +} diff --git a/backend/internal/server/agent/drivers/eino/llm/openai.go b/backend/internal/server/ai/drivers/openai/llm.go similarity index 63% rename from backend/internal/server/agent/drivers/eino/llm/openai.go rename to backend/internal/server/ai/drivers/openai/llm.go index d4011bb7..0f781def 100644 --- a/backend/internal/server/agent/drivers/eino/llm/openai.go +++ b/backend/internal/server/ai/drivers/openai/llm.go @@ -1,4 +1,4 @@ -package llm +package openai import ( "bufio" @@ -7,17 +7,27 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/core" + "github.com/ArtisanCloud/PowerX/pkg/corex/audit" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" "io" "net/http" + "net/url" "strings" "time" + + "go.uber.org/zap" ) -// 兼容现有接口 -type openaiClient struct{ NoopStream } +type openaiClient struct { + rawProvider string +} -func NewOpenAIClient() LLMClient { return &openaiClient{} } +func NewLLMClient(rawProvider string) *openaiClient { + return &openaiClient{rawProvider: strings.TrimSpace(rawProvider)} +} const defaultOpenAIPath = "/v1/chat/completions" @@ -131,6 +141,43 @@ func (c *openaiClient) makeBody(mc *config.ModelConfig, userMessage string, stre return json.Marshal(m) } +func sanitizeLLMURL(raw string) string { + raw = strings.TrimSpace(raw) + if raw == "" { + return "" + } + u, err := url.Parse(raw) + if err != nil || u == nil { + if len(raw) > 300 { + return raw[:300] + "…" + } + return raw + } + u.Fragment = "" + s := u.String() + if len(s) > 300 { + return s[:300] + "…" + } + return s +} + +func (c *openaiClient) logRequest(ctx context.Context, reqURL string, mc *config.ModelConfig, streaming bool) { + provider := strings.TrimSpace(c.rawProvider) + if provider == "" { + provider = "openai" + } + logger.Info(ctx, "llm_request", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("tenant_uuid", reqctx.GetTenantUUID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", provider), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", streaming), + zap.Bool("azure", strings.TrimSpace(mc.AzureDeployment) != ""), + zap.String("url", sanitizeLLMURL(reqURL)), + ) +} + func (c *openaiClient) httpClient(mc *config.ModelConfig) *http.Client { to := mc.Timeout if to <= 0 { @@ -144,10 +191,12 @@ func (c *openaiClient) httpClient(mc *config.ModelConfig) *http.Client { /* ------------ Invoke(非流) ------------ */ func (c *openaiClient) Invoke(ctx context.Context, mc *config.ModelConfig, userMessage string) (string, error) { + start := time.Now() url, headers, err := c.buildEndpointAndHeaders(mc, false) if err != nil { return "", err } + c.logRequest(ctx, url, mc, false) body, err := c.makeBody(mc, userMessage, false) if err != nil { return "", err @@ -158,20 +207,58 @@ func (c *openaiClient) Invoke(ctx context.Context, mc *config.ModelConfig, userM resp, err := c.httpClient(mc).Do(req) if err != nil { + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", false), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + zap.String("error", err.Error()), + ) return "", err } defer resp.Body.Close() bt, _ := io.ReadAll(resp.Body) if resp.StatusCode/100 != 2 { + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", false), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + ) return "", fmt.Errorf("openai invoke url=%s status=%d body=%s", url, resp.StatusCode, string(bt)) } var jr openAINonStreamResp if err := json.Unmarshal(bt, &jr); err != nil { + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", false), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + zap.String("error", err.Error()), + ) return "", fmt.Errorf("openai decode failed: %w (body=%s)", err, string(bt)) } if jr.Error != nil && strings.TrimSpace(jr.Error.Message) != "" { + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", false), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + zap.String("error", strings.TrimSpace(jr.Error.Message)), + ) return "", fmt.Errorf("openai error: %s", strings.TrimSpace(jr.Error.Message)) } if len(jr.Choices) == 0 { @@ -198,19 +285,40 @@ func (c *openaiClient) Invoke(ctx context.Context, mc *config.ModelConfig, userM if len(trim) > 2000 { trim = trim[:2000] + "…" } + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", false), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + zap.String("error", "empty choices"), + ) return "", fmt.Errorf("openai: empty choices (url=%s body=%s)", url, trim) } + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", false), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + ) return jr.Choices[0].Message.Content, nil } /* ------------ Stream(优先流;不支持时自动回退) ------------ */ func (c *openaiClient) Stream(ctx context.Context, mc *config.ModelConfig, prompt string, onDelta func(string)) (string, error) { + start := time.Now() // 若上层“配置禁用流”,你可以在外部判断;这里即便请求流,也能自动回退到 Invoke url, headers, err := c.buildEndpointAndHeaders(mc, true) if err != nil { return "", err } + c.logRequest(ctx, url, mc, true) body, err := c.makeBody(mc, prompt, true) if err != nil { return "", err @@ -221,6 +329,15 @@ func (c *openaiClient) Stream(ctx context.Context, mc *config.ModelConfig, promp resp, err := c.httpClient(mc).Do(req) if err != nil { + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", true), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + zap.String("error", err.Error()), + ) return "", err } defer resp.Body.Close() @@ -236,6 +353,17 @@ func (c *openaiClient) Stream(ctx context.Context, mc *config.ModelConfig, promp // 仍然尝试回退 _ = bt } + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", true), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + zap.Bool("fallback_invoke", true), + zap.String("content_type", resp.Header.Get("Content-Type")), + ) // 回退 return c.Invoke(ctx, mc, prompt) } @@ -261,7 +389,7 @@ func (c *openaiClient) Stream(ctx context.Context, mc *config.ModelConfig, promp if !bytes.HasPrefix(line, []byte("data:")) { continue } - payload := TrimDataPrefix(line) // 你已有的小工具 + payload := core.TrimDataPrefix(line) // 结束标志 if bytes.EqualFold(bytes.TrimSpace(payload), []byte("[DONE]")) { @@ -284,6 +412,15 @@ func (c *openaiClient) Stream(ctx context.Context, mc *config.ModelConfig, promp final.WriteString(delta) } } + logger.Info(ctx, "llm_response", + zap.String("trace_id", audit.GetTraceID(ctx)), + zap.String("driver", "openai"), + zap.String("provider", strings.TrimSpace(c.rawProvider)), + zap.String("model", strings.TrimSpace(mc.Model)), + zap.Bool("stream", true), + zap.Int("status", resp.StatusCode), + zap.Int64("latency_ms", time.Since(start).Milliseconds()), + ) return final.String(), nil } diff --git a/backend/internal/server/ai/drivers/openai/vlm.go b/backend/internal/server/ai/drivers/openai/vlm.go new file mode 100644 index 00000000..c65f9ad0 --- /dev/null +++ b/backend/internal/server/ai/drivers/openai/vlm.go @@ -0,0 +1,20 @@ +package openai + +import ( + "context" + "errors" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type vlmClient struct{} + +func NewVLMClient() *vlmClient { return &vlmClient{} } + +func (c *vlmClient) Invoke(ctx context.Context, in contract.VLMRequest) (*contract.VLMResponse, error) { + return nil, errors.New("vlm provider not implemented") +} + +func (c *vlmClient) Stream(ctx context.Context, in contract.VLMRequest, onDelta func(string)) (*contract.VLMResponse, error) { + return nil, errors.New("vlm provider not implemented") +} diff --git a/backend/internal/server/ai/drivers/qwen/common.go b/backend/internal/server/ai/drivers/qwen/common.go new file mode 100644 index 00000000..102eaf49 --- /dev/null +++ b/backend/internal/server/ai/drivers/qwen/common.go @@ -0,0 +1,53 @@ +package qwen + +import ( + "errors" + "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" +) + +const defaultQwenEndpoint = "https://dashscope.aliyuncs.com/compatible-mode/v1" + +func modelConfigFromRuntime(runtime map[string]any) (*config.ModelConfig, error) { + if runtime == nil { + return nil, errors.New("qwen runtime config missing") + } + if raw, ok := runtime["config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + if raw, ok := runtime["model_config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + return nil, errors.New("qwen runtime config invalid") +} + +func resolveEndpoint(base string) string { + base = strings.TrimSpace(base) + if base == "" { + return defaultQwenEndpoint + } + return strings.TrimRight(base, "/") +} + +func resolveModel(model string) string { + raw := strings.TrimSpace(model) + if raw == "" { + return "" + } + if strings.Contains(raw, ":") { + parts := strings.SplitN(raw, ":", 2) + return strings.TrimSpace(parts[1]) + } + return raw +} diff --git a/backend/internal/server/ai/drivers/qwen/tts.go b/backend/internal/server/ai/drivers/qwen/tts.go new file mode 100644 index 00000000..58c21fb2 --- /dev/null +++ b/backend/internal/server/ai/drivers/qwen/tts.go @@ -0,0 +1,142 @@ +package qwen + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type ttsClient struct{} + +func NewTTSClient() *ttsClient { return &ttsClient{} } + +func (c *ttsClient) Synthesize(ctx context.Context, in contract.TTSRequest) (*contract.TTSResponse, error) { + mc, err := modelConfigFromRuntime(in.Runtime) + if err != nil { + return nil, err + } + if mc == nil { + return nil, fmt.Errorf("qwen tts: missing model config") + } + if strings.TrimSpace(mc.APIKey) == "" { + return nil, fmt.Errorf("qwen tts: missing api_key") + } + model := resolveModel(mc.Model) + if model == "" { + return nil, fmt.Errorf("qwen tts: missing model") + } + text := strings.TrimSpace(in.Text) + if text == "" { + return nil, fmt.Errorf("qwen tts: empty text") + } + + body := map[string]any{ + "model": model, + "input": text, + } + if voice := strings.TrimSpace(in.Voice); voice != "" { + body["voice"] = voice + } + if speed := in.Speed; speed > 0 { + body["speed"] = speed + } + if format := strings.TrimSpace(in.Format); format != "" { + body["response_format"] = format + } + if mc.Extra != nil { + for k, v := range mc.Extra { + if _, exists := body[k]; exists { + continue + } + body[k] = v + } + } + + raw, err := json.Marshal(body) + if err != nil { + return nil, err + } + base := resolveEndpoint(mc.Endpoint) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, base+"/audio/speech", bytes.NewReader(raw)) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(mc.APIKey)) + req.Header.Set("Content-Type", "application/json") + + start := time.Now() + resp, err := (&http.Client{Timeout: resolveTimeout(mc.Timeout)}).Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + payload, _ := io.ReadAll(io.LimitReader(resp.Body, 16<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("qwen tts status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(payload))) + } + + ct := strings.ToLower(strings.TrimSpace(resp.Header.Get("Content-Type"))) + if strings.Contains(ct, "application/json") { + var out struct { + Audio string `json:"audio"` + AudioBase64 string `json:"audio_base64"` + Output struct { + AudioURL string `json:"audio_url"` + Audio string `json:"audio"` + AudioBase64 string `json:"audio_base64"` + } `json:"output"` + Error *struct { + Message string `json:"message"` + } `json:"error"` + Usage map[string]int `json:"usage"` + } + if err := json.Unmarshal(payload, &out); err != nil { + return nil, fmt.Errorf("qwen tts decode failed: %w", err) + } + if out.Error != nil && strings.TrimSpace(out.Error.Message) != "" { + return nil, fmt.Errorf("qwen tts error: %s", strings.TrimSpace(out.Error.Message)) + } + audioURL := strings.TrimSpace(out.Output.AudioURL) + audioBase64 := firstNonEmpty(out.AudioBase64, out.Audio, out.Output.AudioBase64, out.Output.Audio) + var audio []byte + if strings.TrimSpace(audioBase64) != "" { + decoded, err := base64.StdEncoding.DecodeString(strings.TrimSpace(audioBase64)) + if err == nil { + audio = decoded + } + } + return &contract.TTSResponse{ + Audio: audio, + AudioURL: audioURL, + Provider: strings.TrimSpace(mc.Provider), + Model: strings.TrimSpace(mc.Model), + Usage: out.Usage, + LatencyMS: int(time.Since(start).Milliseconds()), + }, nil + } + + return &contract.TTSResponse{ + Audio: payload, + Provider: strings.TrimSpace(mc.Provider), + Model: strings.TrimSpace(mc.Model), + LatencyMS: int(time.Since(start).Milliseconds()), + }, nil +} + +func firstNonEmpty(items ...string) string { + for _, item := range items { + if strings.TrimSpace(item) != "" { + return strings.TrimSpace(item) + } + } + return "" +} diff --git a/backend/internal/server/ai/drivers/qwen/vlm.go b/backend/internal/server/ai/drivers/qwen/vlm.go new file mode 100644 index 00000000..d17654cf --- /dev/null +++ b/backend/internal/server/ai/drivers/qwen/vlm.go @@ -0,0 +1,212 @@ +package qwen + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type vlmClient struct{} + +func NewVLMClient() *vlmClient { return &vlmClient{} } + +func (c *vlmClient) Invoke(ctx context.Context, in contract.VLMRequest) (*contract.VLMResponse, error) { + mc, err := modelConfigFromRuntime(in.Runtime) + if err != nil { + return nil, err + } + if mc == nil { + return nil, fmt.Errorf("qwen vlm: missing model config") + } + if strings.TrimSpace(mc.APIKey) == "" { + return nil, fmt.Errorf("qwen vlm: missing api_key") + } + model := resolveModel(mc.Model) + if model == "" { + return nil, fmt.Errorf("qwen vlm: missing model") + } + + reqBody := map[string]any{ + "model": model, + "messages": buildMessages(in.Messages), + "stream": false, + } + if in.Temperature > 0 { + reqBody["temperature"] = in.Temperature + } + if in.TopP > 0 { + reqBody["top_p"] = in.TopP + } + if in.MaxTokens > 0 { + reqBody["max_tokens"] = in.MaxTokens + } + if in.JSONMode { + reqBody["response_format"] = map[string]any{"type": "json_object"} + } + + rawReq, err := json.Marshal(reqBody) + if err != nil { + return nil, err + } + + base := resolveEndpoint(mc.Endpoint) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, base+"/chat/completions", bytes.NewReader(rawReq)) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(mc.APIKey)) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: resolveTimeout(mc.Timeout)} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + msg := strings.TrimSpace(string(body)) + // 常见误配:国际站(ap-southeast-1/us-east-1) Key 调了中国内地域名,返回 invalid_api_key。 + if resp.StatusCode == http.StatusUnauthorized && + strings.Contains(strings.ToLower(msg), "invalid_api_key") && + strings.Contains(strings.ToLower(base), "dashscope.aliyuncs.com") { + return nil, fmt.Errorf( + "qwen vlm 鉴权失败:当前 endpoint=%s 可能与 API Key 所属地域不匹配。若 Key 来自 ap-southeast-1,请使用 https://dashscope-intl.aliyuncs.com/compatible-mode/v1;原始响应=%s", + base, + msg, + ) + } + return nil, fmt.Errorf("qwen vlm status=%d body=%s", resp.StatusCode, msg) + } + + var result struct { + Choices []struct { + Message struct { + Content any `json:"content"` + } `json:"message"` + } `json:"choices"` + Usage map[string]int `json:"usage"` + Error *struct { + Message string `json:"message"` + } `json:"error"` + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, fmt.Errorf("qwen vlm decode failed: %w", err) + } + if result.Error != nil && strings.TrimSpace(result.Error.Message) != "" { + return nil, fmt.Errorf("qwen vlm error: %s", strings.TrimSpace(result.Error.Message)) + } + if len(result.Choices) == 0 { + return nil, fmt.Errorf("qwen vlm: empty choices") + } + + return &contract.VLMResponse{ + Text: contentToText(result.Choices[0].Message.Content), + Usage: result.Usage, + Provider: strings.TrimSpace(mc.Provider), + Model: strings.TrimSpace(mc.Model), + }, nil +} + +func (c *vlmClient) Stream(ctx context.Context, in contract.VLMRequest, onDelta func(string)) (*contract.VLMResponse, error) { + _ = onDelta + return c.Invoke(ctx, in) +} + +func resolveTimeout(timeout time.Duration) time.Duration { + if timeout <= 0 { + return 120 * time.Second + } + return timeout +} + +func buildMessages(messages []contract.Message) []map[string]any { + if len(messages) == 0 { + return []map[string]any{ + { + "role": "user", + "content": []map[string]any{ + {"type": "text", "text": "Describe the image."}, + }, + }, + } + } + out := make([]map[string]any, 0, len(messages)) + for _, msg := range messages { + role := strings.TrimSpace(msg.Role) + if role == "" { + role = "user" + } + content := make([]map[string]any, 0, len(msg.Content)) + for _, part := range msg.Content { + switch strings.ToLower(strings.TrimSpace(part.Type)) { + case contract.ContentTypeImageURL: + if strings.TrimSpace(part.URL) == "" { + continue + } + content = append(content, map[string]any{ + "type": "image_url", + "image_url": map[string]any{ + "url": strings.TrimSpace(part.URL), + }, + }) + default: + txt := strings.TrimSpace(part.Text) + if txt == "" { + continue + } + content = append(content, map[string]any{ + "type": "text", + "text": txt, + }) + } + } + if len(content) == 0 { + continue + } + out = append(out, map[string]any{ + "role": role, + "content": content, + }) + } + if len(out) == 0 { + out = append(out, map[string]any{ + "role": "user", + "content": []map[string]any{ + {"type": "text", "text": "Describe the image."}, + }, + }) + } + return out +} + +func contentToText(content any) string { + switch v := content.(type) { + case string: + return strings.TrimSpace(v) + case []any: + parts := make([]string, 0, len(v)) + for _, item := range v { + m, ok := item.(map[string]any) + if !ok { + continue + } + txt, _ := m["text"].(string) + if strings.TrimSpace(txt) == "" { + continue + } + parts = append(parts, strings.TrimSpace(txt)) + } + return strings.Join(parts, "\n") + default: + return "" + } +} diff --git a/backend/internal/server/ai/drivers/volcengine/image.go b/backend/internal/server/ai/drivers/volcengine/image.go new file mode 100644 index 00000000..adf18807 --- /dev/null +++ b/backend/internal/server/ai/drivers/volcengine/image.go @@ -0,0 +1,633 @@ +package volcengine + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" +) + +const defaultVolcengineImageEndpoint = "https://visual.volcengineapi.com" + +const ( + defaultVolcengineAction = "CVSync2AsyncSubmitTask" + defaultVolcenginePollAction = "CVSync2AsyncGetResult" + defaultVolcengineVersion = "2022-08-31" + defaultVolcengineService = "cv" +) + +type imageClient struct { + rawProvider string +} + +func NewImageClient(rawProvider string) *imageClient { + return &imageClient{rawProvider: strings.TrimSpace(rawProvider)} +} + +func (c *imageClient) Cap() contract.ModelCapabilities { + return contract.ModelCapabilities{} +} + +func (c *imageClient) Health(ctx context.Context) error { + return nil +} + +type volcengineSubmitReq struct { + ReqKey string `json:"req_key"` + Prompt string `json:"prompt,omitempty"` + ImageURLs []string `json:"image_urls,omitempty"` + Size int `json:"size,omitempty"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` + Scale float64 `json:"scale,omitempty"` + ForceSingle bool `json:"force_single,omitempty"` + MinRatio float64 `json:"min_ratio,omitempty"` + MaxRatio float64 `json:"max_ratio,omitempty"` +} + +type volcengineSubmitResp struct { + Code int `json:"code"` + Message string `json:"message"` + RequestID string `json:"request_id"` + Data *struct { + TaskID string `json:"task_id"` + } `json:"data"` +} + +type volcengineResultReq struct { + ReqKey string `json:"req_key"` + TaskID string `json:"task_id"` + ReqJSON string `json:"req_json,omitempty"` +} + +type volcengineResultData struct { + Status string `json:"status"` + ImageURLs []string `json:"image_urls"` + BinaryDataBase64 []string `json:"binary_data_base64"` +} + +type volcengineResultResp struct { + Code int `json:"code"` + Message string `json:"message"` + RequestID string `json:"request_id"` + Data *volcengineResultData `json:"data"` +} + +func (c *imageClient) Generate(ctx context.Context, in contract.ImageRequest) (*contract.ImageResponse, error) { + mc, err := modelConfigFromRuntime(in.Runtime) + if err != nil { + return nil, err + } + if mc == nil { + return nil, errors.New("volcengine image: missing model config") + } + prompt := strings.TrimSpace(in.Prompt) + if prompt == "" { + return nil, errors.New("volcengine image: prompt required") + } + if strings.TrimSpace(mc.SecretID) == "" || strings.TrimSpace(mc.SecretKey) == "" { + return nil, errors.New("volcengine image: missing secret_id/secret_key") + } + + modelID := resolveModelID(mc.Model) + if modelID == "" { + return nil, errors.New("volcengine image: missing req_key") + } + imageURLs, err := refImagesToURLs(in.RefImages) + if err != nil { + return nil, err + } + + submitReq := volcengineSubmitReq{ + ReqKey: modelID, + Prompt: prompt, + ImageURLs: imageURLs, + } + applySizeParams(&submitReq, in.Size) + applyRuntimeParams(&submitReq, in.Runtime) + applyExtraParams(&submitReq, mc.Extra) + + submitBody, err := json.Marshal(submitReq) + if err != nil { + return nil, err + } + + submitEndpoint, err := buildVolcengineEndpoint(mc, resolveAction(mc)) + if err != nil { + return nil, err + } + pollEndpoint, err := buildVolcengineEndpoint(mc, resolvePollAction(mc)) + if err != nil { + return nil, err + } + + start := time.Now() + submitResp, err := doVolcengineRequest[volcengineSubmitResp](ctx, submitEndpoint, mc, submitBody) + if err != nil { + return nil, err + } + if submitResp.Code != 10000 || submitResp.Data == nil || strings.TrimSpace(submitResp.Data.TaskID) == "" { + return nil, fmt.Errorf("volcengine image submit failed code=%d message=%s request_id=%s", submitResp.Code, submitResp.Message, submitResp.RequestID) + } + + pollReq := volcengineResultReq{ + ReqKey: modelID, + TaskID: submitResp.Data.TaskID, + ReqJSON: buildResultReqJSON(in.Runtime, mc.Extra), + } + pollBody, err := json.Marshal(pollReq) + if err != nil { + return nil, err + } + + images, urls, traceID, err := pollVolcengineResult(ctx, pollEndpoint, mc, pollBody) + if err != nil { + return nil, err + } + + return &contract.ImageResponse{ + Images: images, + ImageURLs: urls, + Provider: strings.TrimSpace(c.rawProvider), + Model: strings.TrimSpace(mc.Model), + LatencyMS: int(time.Since(start).Milliseconds()), + TraceID: traceID, + }, nil +} + +func modelConfigFromRuntime(runtime map[string]any) (*config.ModelConfig, error) { + if runtime == nil { + return nil, errors.New("image runtime config missing") + } + if raw, ok := runtime["config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + if raw, ok := runtime["model_config"]; ok { + switch v := raw.(type) { + case *config.ModelConfig: + return v, nil + case config.ModelConfig: + return &v, nil + } + } + return nil, errors.New("image runtime config invalid") +} + +func buildVolcengineEndpoint(mc *config.ModelConfig, action string) (string, error) { + base := strings.TrimRight(mc.Endpoint, "/") + if base == "" { + base = defaultVolcengineImageEndpoint + } + version := resolveVersion(mc) + q := url.Values{} + q.Set("Action", action) + q.Set("Version", version) + return base + "/?" + q.Encode(), nil +} + +func resolveAction(mc *config.ModelConfig) string { + if mc != nil && mc.Extra != nil { + if v, ok := mc.Extra["action"]; ok { + if s, ok2 := v.(string); ok2 && strings.TrimSpace(s) != "" { + return strings.TrimSpace(s) + } + } + } + return defaultVolcengineAction +} + +func resolvePollAction(mc *config.ModelConfig) string { + if mc != nil && mc.Extra != nil { + if v, ok := mc.Extra["action_poll"]; ok { + if s, ok2 := v.(string); ok2 && strings.TrimSpace(s) != "" { + return strings.TrimSpace(s) + } + } + } + return defaultVolcenginePollAction +} + +func resolveVersion(mc *config.ModelConfig) string { + if mc != nil && mc.Extra != nil { + if v, ok := mc.Extra["version"]; ok { + if s, ok2 := v.(string); ok2 && strings.TrimSpace(s) != "" { + return strings.TrimSpace(s) + } + } + } + return defaultVolcengineVersion +} + +func resolveRegion(mc *config.ModelConfig) string { + region := "cn-north-1" + if mc != nil && strings.TrimSpace(mc.Region) != "" { + region = strings.TrimSpace(mc.Region) + } + return region +} + +func resolveService(mc *config.ModelConfig) string { + if mc != nil && mc.Extra != nil { + if v, ok := mc.Extra["service"]; ok { + if s, ok2 := v.(string); ok2 && strings.TrimSpace(s) != "" { + return strings.TrimSpace(s) + } + } + } + return defaultVolcengineService +} + +func resolveModelID(model string) string { + raw := strings.TrimSpace(model) + if raw == "" { + return "" + } + if strings.Contains(raw, ":") { + parts := strings.SplitN(raw, ":", 2) + return strings.TrimSpace(parts[1]) + } + return raw +} + +func refImagesToURLs(parts []contract.ContentPart) ([]string, error) { + if len(parts) == 0 { + return nil, nil + } + urls := make([]string, 0, len(parts)) + for _, part := range parts { + switch part.Type { + case contract.ContentTypeImageURL: + if strings.TrimSpace(part.URL) != "" { + urls = append(urls, strings.TrimSpace(part.URL)) + } + case contract.ContentTypeImageBase64: + return nil, errors.New("volcengine image: base64 reference images not supported") + } + } + return urls, nil +} + +func applySizeParams(req *volcengineSubmitReq, size string) { + if req == nil { + return + } + w, h := parseSize(size) + if w > 0 && h > 0 { + req.Width = w + req.Height = h + } +} + +func applyRuntimeParams(req *volcengineSubmitReq, runtime map[string]any) { + if req == nil || runtime == nil { + return + } + if v, ok := runtime["scale"]; ok { + if f, ok2 := toFloat64(v); ok2 { + req.Scale = f + } + } + if v, ok := runtime["force_single"]; ok { + if b, ok2 := v.(bool); ok2 { + req.ForceSingle = b + } + } + if v, ok := runtime["min_ratio"]; ok { + if f, ok2 := toFloat64(v); ok2 { + req.MinRatio = f + } + } + if v, ok := runtime["max_ratio"]; ok { + if f, ok2 := toFloat64(v); ok2 { + req.MaxRatio = f + } + } +} + +func applyExtraParams(req *volcengineSubmitReq, extra map[string]any) { + if req == nil || extra == nil { + return + } + if v, ok := extra["scale"]; ok { + if f, ok2 := toFloat64(v); ok2 { + req.Scale = f + } + } + if v, ok := extra["force_single"]; ok { + if b, ok2 := v.(bool); ok2 { + req.ForceSingle = b + } + } + if v, ok := extra["min_ratio"]; ok { + if f, ok2 := toFloat64(v); ok2 { + req.MinRatio = f + } + } + if v, ok := extra["max_ratio"]; ok { + if f, ok2 := toFloat64(v); ok2 { + req.MaxRatio = f + } + } +} + +func buildResultReqJSON(runtime map[string]any, extra map[string]any) string { + payload := map[string]any{"return_url": true} + if extra != nil { + if v, ok := extra["result_req_json"]; ok { + switch vv := v.(type) { + case string: + if strings.TrimSpace(vv) != "" { + return strings.TrimSpace(vv) + } + case map[string]any: + for k, val := range vv { + payload[k] = val + } + } + } + } + if runtime != nil { + if v, ok := runtime["result_req_json"]; ok { + switch vv := v.(type) { + case string: + if strings.TrimSpace(vv) != "" { + return strings.TrimSpace(vv) + } + case map[string]any: + for k, val := range vv { + payload[k] = val + } + } + } + } + if len(payload) == 0 { + return "" + } + b, err := json.Marshal(payload) + if err != nil { + return "" + } + return string(b) +} + +func pollVolcengineResult(ctx context.Context, endpoint string, mc *config.ModelConfig, body []byte) ([][]byte, []string, string, error) { + interval := 2 * time.Second + for { + select { + case <-ctx.Done(): + return nil, nil, "", ctx.Err() + case <-time.After(interval): + } + resp, err := doVolcengineRequest[volcengineResultResp](ctx, endpoint, mc, body) + if err != nil { + return nil, nil, "", err + } + if resp.Code != 10000 { + return nil, nil, resp.RequestID, fmt.Errorf("volcengine image result failed code=%d message=%s request_id=%s", resp.Code, resp.Message, resp.RequestID) + } + if resp.Data == nil { + return nil, nil, resp.RequestID, errors.New("volcengine image result missing data") + } + switch strings.ToLower(strings.TrimSpace(resp.Data.Status)) { + case "done": + images, urls, err := extractResultImages(resp.Data) + return images, urls, resp.RequestID, err + case "in_queue", "generating": + continue + case "not_found", "expired": + return nil, nil, resp.RequestID, fmt.Errorf("volcengine image result status=%s", resp.Data.Status) + default: + return nil, nil, resp.RequestID, fmt.Errorf("volcengine image result status=%s", resp.Data.Status) + } + } +} + +func extractResultImages(data *volcengineResultData) ([][]byte, []string, error) { + if data == nil { + return nil, nil, errors.New("volcengine image: empty result") + } + if len(data.ImageURLs) > 0 { + return nil, data.ImageURLs, nil + } + if len(data.BinaryDataBase64) == 0 { + return nil, nil, errors.New("volcengine image: empty result") + } + imgs := make([][]byte, 0, len(data.BinaryDataBase64)) + for _, item := range data.BinaryDataBase64 { + decoded, err := base64.StdEncoding.DecodeString(item) + if err != nil { + return nil, nil, fmt.Errorf("volcengine image: decode base64 failed: %w", err) + } + imgs = append(imgs, decoded) + } + return imgs, nil, nil +} + +func doVolcengineRequest[T any](ctx context.Context, endpoint string, mc *config.ModelConfig, body []byte) (*T, error) { + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) + if err != nil { + return nil, err + } + httpReq.Header.Set("Content-Type", "application/json") + + if err := signVolcengineRequest(httpReq, mc.SecretID, mc.SecretKey, resolveRegion(mc), resolveService(mc), body); err != nil { + return nil, err + } + + start := time.Now() + httpClient := volcengineHTTPClient(mc) + resp, err := httpClient.Do(httpReq) + if err != nil { + return nil, err + } + defer resp.Body.Close() + logger.InfoF(ctx, "[ai-image] volcengine http_call provider=%s model=%s status=%d latency_ms=%d", strings.TrimSpace(mc.Provider), strings.TrimSpace(mc.Model), resp.StatusCode, time.Since(start).Milliseconds()) + + raw, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("volcengine image status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(raw))) + } + + var out T + if err := json.Unmarshal(raw, &out); err != nil { + return nil, err + } + return &out, nil +} + +func parseSize(size string) (int, int) { + parts := strings.Split(strings.ToLower(strings.TrimSpace(size)), "x") + if len(parts) != 2 { + return 0, 0 + } + w, _ := strconv.Atoi(strings.TrimSpace(parts[0])) + h, _ := strconv.Atoi(strings.TrimSpace(parts[1])) + return w, h +} + +func toFloat64(val any) (float64, bool) { + switch v := val.(type) { + case float64: + return v, true + case float32: + return float64(v), true + case int: + return float64(v), true + case int64: + return float64(v), true + case json.Number: + f, err := v.Float64() + return f, err == nil + case string: + f, err := strconv.ParseFloat(strings.TrimSpace(v), 64) + return f, err == nil + default: + return 0, false + } +} + +func volcengineHTTPClient(mc *config.ModelConfig) *http.Client { + to := mc.Timeout + if to <= 0 { + to = 5 * time.Minute + } + return &http.Client{Timeout: to} +} + +func signVolcengineRequest(req *http.Request, accessKey, secretKey, region, service string, body []byte) error { + if req == nil { + return errors.New("volcengine image: request nil") + } + if strings.TrimSpace(accessKey) == "" || strings.TrimSpace(secretKey) == "" { + return errors.New("volcengine image: missing access key") + } + now := time.Now().UTC() + amzDate := now.Format("20060102T150405Z") + dateStamp := now.Format("20060102") + payloadHash := sha256Hex(body) + + req.Host = req.URL.Host + req.Header.Set("Host", req.URL.Host) + req.Header.Set("X-Date", amzDate) + req.Header.Set("X-Content-Sha256", payloadHash) + + canonicalURI := req.URL.EscapedPath() + if canonicalURI == "" { + canonicalURI = "/" + } + canonicalQuery := canonicalQueryString(req.URL.Query()) + canonicalHeaders := buildCanonicalHeaders(req) + signedHeaders := "content-type;host;x-content-sha256;x-date" + + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI, + canonicalQuery, + canonicalHeaders, + signedHeaders, + payloadHash, + }, "\n") + + stringToSign := strings.Join([]string{ + "HMAC-SHA256", + amzDate, + dateStamp + "/" + region + "/" + service + "/request", + sha256Hex([]byte(canonicalRequest)), + }, "\n") + + signingKey := deriveSigningKey(secretKey, dateStamp, region, service) + signature := hex.EncodeToString(hmacSHA256(signingKey, []byte(stringToSign))) + authorization := fmt.Sprintf( + "HMAC-SHA256 Credential=%s/%s/%s/%s/request, SignedHeaders=%s, Signature=%s", + strings.TrimSpace(accessKey), + dateStamp, + region, + service, + signedHeaders, + signature, + ) + req.Header.Set("Authorization", authorization) + return nil +} + +func sha256Hex(data []byte) string { + h := sha256.Sum256(data) + return hex.EncodeToString(h[:]) +} + +func deriveSigningKey(secretKey, date, region, service string) []byte { + kDate := hmacSHA256([]byte(secretKey), []byte(date)) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + return hmacSHA256(kService, []byte("request")) +} + +func hmacSHA256(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + _, _ = h.Write(data) + return h.Sum(nil) +} + +func canonicalQueryString(values url.Values) string { + if len(values) == 0 { + return "" + } + keys := make([]string, 0, len(values)) + for key := range values { + keys = append(keys, key) + } + sort.Strings(keys) + + pairs := make([]string, 0) + for _, key := range keys { + vals := values[key] + sort.Strings(vals) + for _, val := range vals { + pairs = append(pairs, encodeQuery(key)+"="+encodeQuery(val)) + } + } + return strings.Join(pairs, "&") +} + +func encodeQuery(input string) string { + encoded := url.QueryEscape(input) + encoded = strings.ReplaceAll(encoded, "+", "%20") + encoded = strings.ReplaceAll(encoded, "%7E", "~") + return encoded +} + +func buildCanonicalHeaders(req *http.Request) string { + contentType := strings.TrimSpace(req.Header.Get("Content-Type")) + host := strings.TrimSpace(req.Header.Get("Host")) + sha := strings.TrimSpace(req.Header.Get("X-Content-Sha256")) + xDate := strings.TrimSpace(req.Header.Get("X-Date")) + + return strings.Join([]string{ + "content-type:" + contentType, + "host:" + host, + "x-content-sha256:" + sha, + "x-date:" + xDate, + "", + }, "\n") +} diff --git a/backend/internal/server/ai/drivers/volcengine/video.go b/backend/internal/server/ai/drivers/volcengine/video.go new file mode 100644 index 00000000..9e2178b8 --- /dev/null +++ b/backend/internal/server/ai/drivers/volcengine/video.go @@ -0,0 +1,602 @@ +package volcengine + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" +) + +const ( + defaultSeedanceSubmitPath = "/api/v1/contents/generations/tasks" + defaultSeedancePollPath = "/api/v1/contents/generations/tasks/%s" +) + +type videoClient struct { + rawProvider string +} + +func NewVideoClient(rawProvider string) *videoClient { + return &videoClient{rawProvider: strings.TrimSpace(rawProvider)} +} + +func (c *videoClient) Cap() contract.ModelCapabilities { + return contract.ModelCapabilities{} +} + +func (c *videoClient) Health(ctx context.Context) error { + return nil +} + +type volcengineVideoSubmitResp struct { + Code int `json:"code"` + Message string `json:"message"` + RequestID string `json:"request_id"` + Data *struct { + TaskID string `json:"task_id"` + } `json:"data"` +} + +type volcengineVideoResultResp struct { + Code int `json:"code"` + Message string `json:"message"` + RequestID string `json:"request_id"` + Data map[string]interface{} `json:"data"` +} + +type seedanceTaskResp struct { + ID string `json:"id"` + TaskID string `json:"task_id"` + Status string `json:"status"` + State string `json:"state"` + Content map[string]interface{} `json:"content"` + Output map[string]interface{} `json:"output"` + Data map[string]interface{} `json:"data"` + Error *struct { + Message string `json:"message"` + Code any `json:"code"` + } `json:"error"` +} + +func (c *videoClient) Generate(ctx context.Context, in contract.VideoRequest) (*contract.VideoResponse, error) { + mc, err := modelConfigFromRuntime(in.Runtime) + if err != nil { + return nil, err + } + if mc == nil { + return nil, errors.New("volcengine video: missing model config") + } + modelID := resolveModelID(mc.Model) + if modelID == "" { + return nil, errors.New("volcengine video: missing model") + } + prompt := strings.TrimSpace(in.Prompt) + if prompt == "" { + return nil, errors.New("volcengine video: prompt required") + } + + if shouldUseSeedanceAPI(modelID, mc.Extra) { + return c.generateBySeedanceTasks(ctx, in, mc, modelID) + } + return c.generateByVisualAPI(ctx, in, mc, modelID) +} + +func (c *videoClient) generateByVisualAPI(ctx context.Context, in contract.VideoRequest, mc *config.ModelConfig, modelID string) (*contract.VideoResponse, error) { + if strings.TrimSpace(mc.SecretID) == "" || strings.TrimSpace(mc.SecretKey) == "" { + return nil, errors.New("volcengine video: missing secret_id/secret_key") + } + + submitPayload := map[string]interface{}{ + "req_key": modelID, + "prompt": strings.TrimSpace(in.Prompt), + } + refImages, err := refImagesToURLs(in.RefImages) + if err != nil { + return nil, err + } + if len(refImages) > 0 { + submitPayload["image_urls"] = refImages + } + refVideos := refVideosToURLs(in.RefVideos) + if len(refVideos) > 0 { + submitPayload["video_urls"] = refVideos + } + if w, h := parseSize(in.Resolution); w > 0 && h > 0 { + submitPayload["width"] = w + submitPayload["height"] = h + } + if in.FPS > 0 { + submitPayload["fps"] = in.FPS + } + if in.MaxDurationS > 0 { + submitPayload["duration"] = in.MaxDurationS + } + mergePayload(submitPayload, mc.Extra) + + submitBody, err := json.Marshal(submitPayload) + if err != nil { + return nil, err + } + + submitEndpoint, err := buildVolcengineEndpoint(mc, resolveAction(mc)) + if err != nil { + return nil, err + } + pollEndpoint, err := buildVolcengineEndpoint(mc, resolvePollAction(mc)) + if err != nil { + return nil, err + } + + start := time.Now() + submitResp, err := doVolcengineRequest[volcengineVideoSubmitResp](ctx, submitEndpoint, mc, submitBody) + if err != nil { + return nil, err + } + if submitResp.Code != 10000 || submitResp.Data == nil || strings.TrimSpace(submitResp.Data.TaskID) == "" { + return nil, fmt.Errorf("volcengine video submit failed code=%d message=%s request_id=%s", submitResp.Code, submitResp.Message, submitResp.RequestID) + } + + pollPayload := map[string]interface{}{ + "req_key": modelID, + "task_id": strings.TrimSpace(submitResp.Data.TaskID), + "req_json": buildResultReqJSON(in.Runtime, mc.Extra), + } + pollBody, err := json.Marshal(pollPayload) + if err != nil { + return nil, err + } + + urls, traceID, err := pollVolcengineVideoResult(ctx, pollEndpoint, mc, pollBody) + if err != nil { + return nil, err + } + + return &contract.VideoResponse{ + VideoURLs: urls, + TaskID: strings.TrimSpace(submitResp.Data.TaskID), + Provider: strings.TrimSpace(c.rawProvider), + Model: strings.TrimSpace(mc.Model), + LatencyMS: int(time.Since(start).Milliseconds()), + TraceID: traceID, + }, nil +} + +func (c *videoClient) generateBySeedanceTasks(ctx context.Context, in contract.VideoRequest, mc *config.ModelConfig, modelID string) (*contract.VideoResponse, error) { + if strings.TrimSpace(mc.APIKey) == "" { + return nil, errors.New("volcengine seedance video: missing api_key") + } + + submitURL, pollURLPattern, err := buildSeedanceURLs(mc) + if err != nil { + return nil, err + } + + payload := map[string]any{ + "model": modelID, + "content": buildSeedanceContent(strings.TrimSpace(in.Prompt), in.RefImages, in.RefVideos), + "prompt": strings.TrimSpace(in.Prompt), + } + params := map[string]any{} + if in.Resolution != "" { + params["resolution"] = strings.TrimSpace(in.Resolution) + } + if in.FPS > 0 { + params["fps"] = in.FPS + } + if in.MaxDurationS > 0 { + params["duration"] = in.MaxDurationS + } + if len(params) > 0 { + payload["parameters"] = params + } + if mc.Extra != nil { + if extParams, ok := mc.Extra["parameters"].(map[string]any); ok { + if payloadParams, ok2 := payload["parameters"].(map[string]any); ok2 { + for k, v := range extParams { + if _, exists := payloadParams[k]; !exists { + payloadParams[k] = v + } + } + } else { + payload["parameters"] = extParams + } + } + } + + start := time.Now() + submitResp, err := doSeedanceSubmit(ctx, submitURL, strings.TrimSpace(mc.APIKey), payload, mc.Timeout) + if err != nil { + return nil, err + } + taskID := strings.TrimSpace(firstNonEmptyString( + submitResp.TaskID, + submitResp.ID, + getString(submitResp.Data, "task_id"), + getString(submitResp.Data, "id"), + )) + if taskID == "" { + return nil, errors.New("volcengine seedance video: empty task id") + } + + pollURL := buildSeedancePollURL(submitURL, pollURLPattern, taskID) + urls, traceID, err := pollSeedanceResult(ctx, pollURL, strings.TrimSpace(mc.APIKey), mc.Timeout) + if err != nil { + return nil, err + } + + return &contract.VideoResponse{ + VideoURLs: urls, + TaskID: taskID, + PollURL: pollURL, + Provider: strings.TrimSpace(c.rawProvider), + Model: strings.TrimSpace(mc.Model), + LatencyMS: int(time.Since(start).Milliseconds()), + TraceID: traceID, + }, nil +} + +func shouldUseSeedanceAPI(modelID string, extra map[string]any) bool { + mid := strings.ToLower(strings.TrimSpace(modelID)) + if strings.Contains(mid, "seedance") { + return true + } + if extra == nil { + return false + } + if v, ok := extra["api_path_submit"].(string); ok && strings.TrimSpace(v) != "" { + return true + } + if v, ok := extra["api_path_poll"].(string); ok && strings.TrimSpace(v) != "" { + return true + } + if v, ok := extra["base_url"].(string); ok && strings.Contains(strings.ToLower(strings.TrimSpace(v)), "volces.com") { + return true + } + return false +} + +func buildSeedanceURLs(mc *config.ModelConfig) (submitURL string, pollURLPattern string, err error) { + base := strings.TrimSpace(mc.Endpoint) + if mc.Extra != nil { + if v, ok := mc.Extra["base_url"].(string); ok && strings.TrimSpace(v) != "" { + base = strings.TrimSpace(v) + } + } + if base == "" || strings.Contains(base, "{your-id}") { + return "", "", errors.New("volcengine seedance video: invalid base_url, please set your operator endpoint") + } + u, err := url.Parse(base) + if err != nil { + return "", "", fmt.Errorf("volcengine seedance video: invalid base_url: %w", err) + } + if u.Scheme == "" || u.Host == "" { + return "", "", errors.New("volcengine seedance video: invalid base_url") + } + + submitPath := defaultSeedanceSubmitPath + pollPath := defaultSeedancePollPath + if mc.Extra != nil { + if v, ok := mc.Extra["api_path_submit"].(string); ok && strings.TrimSpace(v) != "" { + submitPath = strings.TrimSpace(v) + } + if v, ok := mc.Extra["api_path_poll"].(string); ok && strings.TrimSpace(v) != "" { + pollPath = strings.TrimSpace(v) + } + } + if !strings.HasPrefix(submitPath, "/") { + submitPath = "/" + submitPath + } + if !strings.HasPrefix(pollPath, "/") { + pollPath = "/" + pollPath + } + + u.Path = strings.TrimRight(path.Join("/", submitPath), "/") + u.RawPath = "" + return strings.TrimRight(u.String(), "/"), pollPath, nil +} + +func buildSeedancePollURL(submitURL, pollPattern, taskID string) string { + if strings.TrimSpace(taskID) == "" { + return "" + } + if strings.Contains(pollPattern, "%s") { + if u, err := url.Parse(submitURL); err == nil { + u.Path = fmt.Sprintf(strings.TrimSpace(pollPattern), url.PathEscape(strings.TrimSpace(taskID))) + u.RawPath = "" + return u.String() + } + } + return strings.TrimRight(submitURL, "/") + "/" + url.PathEscape(strings.TrimSpace(taskID)) +} + +func doSeedanceSubmit(ctx context.Context, submitURL, apiKey string, payload map[string]any, timeout time.Duration) (*seedanceTaskResp, error) { + raw, err := json.Marshal(payload) + if err != nil { + return nil, err + } + resp, err := doSeedanceRequest(ctx, http.MethodPost, submitURL, apiKey, raw, timeout) + if err != nil { + return nil, err + } + return resp, nil +} + +func pollSeedanceResult(ctx context.Context, pollURL, apiKey string, timeout time.Duration) ([]string, string, error) { + interval := 2 * time.Second + for { + select { + case <-ctx.Done(): + return nil, "", ctx.Err() + case <-time.After(interval): + } + + resp, err := doSeedanceRequest(ctx, http.MethodGet, pollURL, apiKey, nil, timeout) + if err != nil { + return nil, "", err + } + + status := strings.ToLower(strings.TrimSpace(firstNonEmptyString(resp.Status, resp.State, getString(resp.Data, "status")))) + switch status { + case "", "pending", "queued", "running", "processing", "in_progress", "in_queue", "generating": + continue + case "done", "completed", "succeeded", "success": + urls := extractSeedanceVideoURLs(resp) + if len(urls) == 0 { + return nil, "", errors.New("volcengine seedance video: empty result") + } + traceID := strings.TrimSpace(firstNonEmptyString(getString(resp.Data, "request_id"), getString(resp.Output, "request_id"))) + return urls, traceID, nil + case "failed", "error", "cancelled", "canceled", "expired": + errMsg := "volcengine seedance video failed" + if resp.Error != nil && strings.TrimSpace(resp.Error.Message) != "" { + errMsg = strings.TrimSpace(resp.Error.Message) + } + return nil, "", errors.New(errMsg) + default: + return nil, "", fmt.Errorf("volcengine seedance video result status=%s", status) + } + } +} + +func doSeedanceRequest(ctx context.Context, method, endpoint, apiKey string, body []byte, timeout time.Duration) (*seedanceTaskResp, error) { + var reader io.Reader + if len(body) > 0 { + reader = bytes.NewReader(body) + } + req, err := http.NewRequestWithContext(ctx, method, endpoint, reader) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(apiKey)) + req.Header.Set("Accept", "application/json") + if method == http.MethodPost { + req.Header.Set("Content-Type", "application/json") + } + + client := &http.Client{Timeout: timeoutOrDefault(timeout)} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + raw, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("volcengine seedance video status=%d body=%s", resp.StatusCode, strings.TrimSpace(string(raw))) + } + + var out seedanceTaskResp + if err := json.Unmarshal(raw, &out); err != nil { + return nil, fmt.Errorf("volcengine seedance video decode failed: %w", err) + } + return &out, nil +} + +func timeoutOrDefault(to time.Duration) time.Duration { + if to <= 0 { + return 5 * time.Minute + } + return to +} + +func buildSeedanceContent(prompt string, refImages []contract.ContentPart, refVideos []contract.ContentPart) []map[string]any { + content := make([]map[string]any, 0, 1+len(refImages)+len(refVideos)) + if strings.TrimSpace(prompt) != "" { + content = append(content, map[string]any{ + "type": "text", + "text": strings.TrimSpace(prompt), + }) + } + for _, p := range refImages { + if p.Type != contract.ContentTypeImageURL || strings.TrimSpace(p.URL) == "" { + continue + } + content = append(content, map[string]any{ + "type": "image_url", + "image_url": map[string]any{"url": strings.TrimSpace(p.URL)}, + }) + } + for _, p := range refVideos { + if p.Type != contract.ContentTypeVideoURL || strings.TrimSpace(p.URL) == "" { + continue + } + content = append(content, map[string]any{ + "type": "video_url", + "video_url": map[string]any{"url": strings.TrimSpace(p.URL)}, + }) + } + return content +} + +func extractSeedanceVideoURLs(resp *seedanceTaskResp) []string { + if resp == nil { + return nil + } + candidates := []any{resp.Content, resp.Output, resp.Data, resp} + for _, c := range candidates { + if out := extractVideoURLsFromAny(c); len(out) > 0 { + return out + } + } + return nil +} + +func extractVideoURLsFromAny(raw any) []string { + switch v := raw.(type) { + case map[string]any: + for _, key := range []string{"video_urls", "videos", "video_url", "output_url", "url"} { + if out := readStringArray(v[key]); len(out) > 0 { + return out + } + } + for _, item := range v { + if out := extractVideoURLsFromAny(item); len(out) > 0 { + return out + } + } + case []any: + collected := make([]string, 0) + for _, item := range v { + if out := extractVideoURLsFromAny(item); len(out) > 0 { + collected = append(collected, out...) + } + } + if len(collected) > 0 { + return collected + } + } + return nil +} + +func refVideosToURLs(parts []contract.ContentPart) []string { + if len(parts) == 0 { + return nil + } + urls := make([]string, 0, len(parts)) + for _, part := range parts { + if part.Type != contract.ContentTypeVideoURL { + continue + } + if strings.TrimSpace(part.URL) == "" { + continue + } + urls = append(urls, strings.TrimSpace(part.URL)) + } + return urls +} + +func mergePayload(dst map[string]interface{}, extra map[string]interface{}) { + if dst == nil || extra == nil { + return + } + for k, v := range extra { + if _, ok := dst[k]; ok { + continue + } + dst[k] = v + } +} + +func pollVolcengineVideoResult(ctx context.Context, endpoint string, mc *config.ModelConfig, body []byte) ([]string, string, error) { + interval := 2 * time.Second + for { + select { + case <-ctx.Done(): + return nil, "", ctx.Err() + case <-time.After(interval): + } + + resp, err := doVolcengineRequest[volcengineVideoResultResp](ctx, endpoint, mc, body) + if err != nil { + return nil, "", err + } + if resp.Code != 10000 { + return nil, resp.RequestID, fmt.Errorf("volcengine video result failed code=%d message=%s request_id=%s", resp.Code, resp.Message, resp.RequestID) + } + status := strings.ToLower(strings.TrimSpace(getString(resp.Data, "status"))) + switch status { + case "done": + urls := extractVideoURLs(resp.Data) + if len(urls) == 0 { + return nil, resp.RequestID, errors.New("volcengine video: empty result") + } + return urls, resp.RequestID, nil + case "in_queue", "generating", "running", "": + continue + default: + return nil, resp.RequestID, fmt.Errorf("volcengine video result status=%s", status) + } + } +} + +func extractVideoURLs(data map[string]interface{}) []string { + if data == nil { + return nil + } + keys := []string{"video_urls", "videos", "video_url", "output_url", "url"} + for _, key := range keys { + if vals := readStringArray(data[key]); len(vals) > 0 { + return vals + } + } + return nil +} + +func getString(data map[string]interface{}, key string) string { + if data == nil { + return "" + } + v, _ := data[key] + if s, ok := v.(string); ok { + return strings.TrimSpace(s) + } + return "" +} + +func readStringArray(raw interface{}) []string { + switch v := raw.(type) { + case string: + if strings.TrimSpace(v) == "" { + return nil + } + return []string{strings.TrimSpace(v)} + case []string: + out := make([]string, 0, len(v)) + for _, item := range v { + if strings.TrimSpace(item) == "" { + continue + } + out = append(out, strings.TrimSpace(item)) + } + return out + case []interface{}: + out := make([]string, 0, len(v)) + for _, item := range v { + if s, ok := item.(string); ok && strings.TrimSpace(s) != "" { + out = append(out, strings.TrimSpace(s)) + } + } + return out + default: + return nil + } +} + +func firstNonEmptyString(items ...string) string { + for _, item := range items { + if strings.TrimSpace(item) != "" { + return strings.TrimSpace(item) + } + } + return "" +} diff --git a/backend/internal/server/ai/factory/image/factory.go b/backend/internal/server/ai/factory/image/factory.go new file mode 100644 index 00000000..b97402f7 --- /dev/null +++ b/backend/internal/server/ai/factory/image/factory.go @@ -0,0 +1,28 @@ +package image + +import ( + "fmt" + "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/google" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/openai" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/volcengine" +) + +// NewClient returns provider-specific Image driver. +func NewClient(provider string) (Client, error) { + switch normalize(provider) { + case "openai": + return openai.NewImageClient(provider), nil + case "google", "gemini": + return google.NewImageClient(provider), nil + case "volcengine", "volc", "volcano", "bytedance": + return volcengine.NewImageClient(provider), nil + default: + return nil, fmt.Errorf("unknown image provider: %s", provider) + } +} + +func normalize(s string) string { + return strings.ToLower(strings.TrimSpace(s)) +} diff --git a/backend/internal/server/ai/factory/image/image.go b/backend/internal/server/ai/factory/image/image.go new file mode 100644 index 00000000..cec53029 --- /dev/null +++ b/backend/internal/server/ai/factory/image/image.go @@ -0,0 +1,14 @@ +package image + +import ( + "context" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +// Client defines the Image driver interface. +type Client interface { + Generate(ctx context.Context, in contract.ImageRequest) (*contract.ImageResponse, error) + Cap() contract.ModelCapabilities + Health(ctx context.Context) error +} diff --git a/backend/internal/server/agent/drivers/eino/llm/chatutil.go b/backend/internal/server/ai/factory/llm/chatutil.go similarity index 69% rename from backend/internal/server/agent/drivers/eino/llm/chatutil.go rename to backend/internal/server/ai/factory/llm/chatutil.go index f99c456f..cbaae214 100644 --- a/backend/internal/server/agent/drivers/eino/llm/chatutil.go +++ b/backend/internal/server/ai/factory/llm/chatutil.go @@ -1,9 +1,10 @@ package llm import ( - "bytes" "context" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/core" ) // StreamOrFallback:优先真流式;不支持则使用 ChatOnce 并按 rune 模拟 token @@ -20,7 +21,7 @@ func StreamOrFallback( if err == nil { return final, nil } - if err != nil && err != ErrStreamNotSupported { + if err != nil && err != core.ErrStreamNotSupported { return "", err } // 不支持流式:回退 @@ -38,12 +39,3 @@ func StreamOrFallback( } return final, nil } - -func TrimDataPrefix(line []byte) []byte { - b := bytes.TrimSpace(line) - if bytes.HasPrefix(b, []byte("data:")) { - b = b[len("data:"):] - b = bytes.TrimLeft(b, " \t") - } - return b -} diff --git a/backend/internal/server/agent/drivers/eino/llm/factory.go b/backend/internal/server/ai/factory/llm/factory.go similarity index 53% rename from backend/internal/server/agent/drivers/eino/llm/factory.go rename to backend/internal/server/ai/factory/llm/factory.go index 40cd3bdd..f77d7390 100644 --- a/backend/internal/server/agent/drivers/eino/llm/factory.go +++ b/backend/internal/server/ai/factory/llm/factory.go @@ -3,19 +3,24 @@ package llm import ( "fmt" "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/baidu" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/hunyuan" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/ollama" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/openai" ) // NewClient:按 provider 返回具体实现(都满足 LLMClient) func NewClient(provider string) (LLMClient, error) { switch normalize(provider) { case "openai": - return NewOpenAIClient(), nil + return openai.NewLLMClient(provider), nil case "hunyuan": - return NewHunyuanClient(), nil + return hunyuan.NewLLMClient(), nil case "ollama": - return NewOllamaClient(), nil + return ollama.NewLLMClient(), nil case "baidu", "qianfan": - return NewBaiduClient(), nil + return baidu.NewLLMClient(), nil // ... 其他厂商 default: return nil, fmt.Errorf("unknown llm provider: %s", provider) @@ -29,7 +34,7 @@ func normalize(s string) string { } // OpenAI-compatible providers → reuse openai client switch p { - case "openrouter", "vllm", "deepseek", "moonshot": + case "openrouter", "vllm", "deepseek", "moonshot", "huggingface", "hf": return "openai" } return p diff --git a/backend/internal/server/agent/drivers/eino/llm/llm.go b/backend/internal/server/ai/factory/llm/llm.go similarity index 71% rename from backend/internal/server/agent/drivers/eino/llm/llm.go rename to backend/internal/server/ai/factory/llm/llm.go index 779bbc18..573ca10b 100644 --- a/backend/internal/server/agent/drivers/eino/llm/llm.go +++ b/backend/internal/server/ai/factory/llm/llm.go @@ -1,23 +1,15 @@ package llm -// internal/server/agent/drivers/eino/llm/llm.go - -import ( - "errors" - "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" -) - -// services/agent/drivers/eino/llm/llm.go - import ( "context" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/core" ) // 运行时透传(来自 ChatRequest.config) type RuntimeConfig map[string]any -var ErrStreamNotSupported = errors.New("llm: stream not supported") - // LLMClient:统一包含同步与流式 type LLMClient interface { // 一次性完成(completion) @@ -31,5 +23,5 @@ type LLMClient interface { type NoopStream struct{} func (NoopStream) Stream(ctx context.Context, mc *config.ModelConfig, prompt string, onDelta func(string)) (string, error) { - return "", ErrStreamNotSupported + return "", core.ErrStreamNotSupported } diff --git a/backend/internal/server/ai/factory/tts/factory.go b/backend/internal/server/ai/factory/tts/factory.go new file mode 100644 index 00000000..bc879050 --- /dev/null +++ b/backend/internal/server/ai/factory/tts/factory.go @@ -0,0 +1,21 @@ +package tts + +import ( + "fmt" + "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/qwen" +) + +func NewClient(provider string) (Client, error) { + switch normalize(provider) { + case "qwen", "dashscope", "tongyi": + return qwen.NewTTSClient(), nil + default: + return nil, fmt.Errorf("unknown tts provider: %s", provider) + } +} + +func normalize(s string) string { + return strings.ToLower(strings.TrimSpace(s)) +} diff --git a/backend/internal/server/ai/factory/tts/tts.go b/backend/internal/server/ai/factory/tts/tts.go new file mode 100644 index 00000000..ad01ffb3 --- /dev/null +++ b/backend/internal/server/ai/factory/tts/tts.go @@ -0,0 +1,11 @@ +package tts + +import ( + "context" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type Client interface { + Synthesize(ctx context.Context, in contract.TTSRequest) (*contract.TTSResponse, error) +} diff --git a/backend/internal/server/ai/factory/video/factory.go b/backend/internal/server/ai/factory/video/factory.go new file mode 100644 index 00000000..fb505e97 --- /dev/null +++ b/backend/internal/server/ai/factory/video/factory.go @@ -0,0 +1,21 @@ +package video + +import ( + "fmt" + "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/volcengine" +) + +func NewClient(provider string) (Client, error) { + switch normalize(provider) { + case "volcengine", "volc", "volcano", "bytedance": + return volcengine.NewVideoClient(provider), nil + default: + return nil, fmt.Errorf("unknown video provider: %s", provider) + } +} + +func normalize(s string) string { + return strings.ToLower(strings.TrimSpace(s)) +} diff --git a/backend/internal/server/ai/factory/video/video.go b/backend/internal/server/ai/factory/video/video.go new file mode 100644 index 00000000..ae02809c --- /dev/null +++ b/backend/internal/server/ai/factory/video/video.go @@ -0,0 +1,11 @@ +package video + +import ( + "context" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +type Client interface { + Generate(ctx context.Context, in contract.VideoRequest) (*contract.VideoResponse, error) +} diff --git a/backend/internal/server/ai/factory/vlm/factory.go b/backend/internal/server/ai/factory/vlm/factory.go new file mode 100644 index 00000000..08cfbcdd --- /dev/null +++ b/backend/internal/server/ai/factory/vlm/factory.go @@ -0,0 +1,31 @@ +package vlm + +import ( + "fmt" + "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/google" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/hunyuan" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/openai" + "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/qwen" +) + +// NewClient returns provider-specific VLM driver (placeholder for now). +func NewClient(provider string) (Client, error) { + switch normalize(provider) { + case "openai": + return openai.NewVLMClient(), nil + case "google", "gemini": + return google.NewVLMClient(), nil + case "hunyuan": + return hunyuan.NewVLMClient(), nil + case "qwen": + return qwen.NewVLMClient(), nil + default: + return nil, fmt.Errorf("unknown vlm provider: %s", provider) + } +} + +func normalize(s string) string { + return strings.ToLower(strings.TrimSpace(s)) +} diff --git a/backend/internal/server/ai/factory/vlm/vlm.go b/backend/internal/server/ai/factory/vlm/vlm.go new file mode 100644 index 00000000..78d84407 --- /dev/null +++ b/backend/internal/server/ai/factory/vlm/vlm.go @@ -0,0 +1,13 @@ +package vlm + +import ( + "context" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" +) + +// Client defines the VLM driver interface. +type Client interface { + Invoke(ctx context.Context, in contract.VLMRequest) (*contract.VLMResponse, error) + Stream(ctx context.Context, in contract.VLMRequest, onDelta func(string)) (*contract.VLMResponse, error) +} diff --git a/backend/internal/service/agent/agent_service.go b/backend/internal/service/agent/agent_service.go index bd2d274c..b8c099d9 100644 --- a/backend/internal/service/agent/agent_service.go +++ b/backend/internal/service/agent/agent_service.go @@ -9,6 +9,7 @@ import ( dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" repo "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/repository" + "github.com/google/uuid" "gorm.io/datatypes" "gorm.io/gorm" ) @@ -185,6 +186,17 @@ func (s *AgentService) Get(ctx context.Context, env string, tenantUUID *string, return out, nil } +func (s *AgentService) GetByUUID(ctx context.Context, env string, tenantUUID *string, agentUUID uuid.UUID) (*dbmodel.Agent, error) { + out, err := s.agRepo.FindByScopeUUID(ctx, env, tenantUUID, agentUUID) + if err != nil { + return nil, err + } + if !equalTenant(tenantUUID, out.TenantUUID) { + return nil, gorm.ErrRecordNotFound + } + return out, nil +} + //func (s *AgentService) List( // ctx context.Context, env string, tenantUUID *string, statuses ...string, //) ([]dbmodel.Agent, error) { diff --git a/backend/internal/service/agent/agent_setting_service.go b/backend/internal/service/agent/agent_setting_service.go index e3e6583e..e35d4e73 100644 --- a/backend/internal/service/agent/agent_setting_service.go +++ b/backend/internal/service/agent/agent_setting_service.go @@ -1,6 +1,7 @@ package agent import ( + "bytes" "context" "encoding/json" "errors" @@ -8,31 +9,38 @@ import ( "io" "net/http" "net/url" + "regexp" + "sort" + "strconv" "strings" "time" "github.com/ArtisanCloud/PowerX/internal/server/agent/catalog" agentconf "github.com/ArtisanCloud/PowerX/internal/server/agent/config" "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" - agentcfg "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/config" - agentllm "github.com/ArtisanCloud/PowerX/internal/server/agent/drivers/eino/llm" + intentfactory "github.com/ArtisanCloud/PowerX/internal/server/agent/factory/intent" dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" repoai "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/repository" - tenantrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" - settingrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/setting" + agentcfg "github.com/ArtisanCloud/PowerX/internal/server/ai/drivers/config" + imagefactory "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/image" + agentllm "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/llm" + vlmfactory "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/vlm" dbsetting "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/setting" + settingrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/setting" + tenantrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" "github.com/ArtisanCloud/PowerX/pkg/corex/tenantkeys" "github.com/ArtisanCloud/PowerX/pkg/utils" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" "gorm.io/datatypes" "gorm.io/gorm" ) type ModelRule struct { - RequireAPIKey bool + RequireAPIKey bool RequireSecretID bool RequireSecretKey bool - RequireBaseURL bool - DefaultBaseURL string + RequireBaseURL bool + DefaultBaseURL string } var sensitiveCredentialKeys = []string{"api_key", "secret_id", "secret_key", "secret", "client_secret", "access_token"} @@ -47,25 +55,25 @@ type ProviderHealthRecord struct { } type AgentSettingService struct { - db *gorm.DB - credRepo *repoai.AIProviderCredentialRepository - profRepo *repoai.AIModelProfileRepository - routeRepo *repoai.AIRoutePolicyRepository - usageRepo *repoai.AIUsageLogRepository - tks *tenantkeys.TenantKeyService - tenantRepo *tenantrepo.TenantRepository + db *gorm.DB + credRepo *repoai.AIProviderCredentialRepository + profRepo *repoai.AIModelProfileRepository + routeRepo *repoai.AIRoutePolicyRepository + usageRepo *repoai.AIUsageLogRepository + tks *tenantkeys.TenantKeyService + tenantRepo *tenantrepo.TenantRepository settingRepo *settingrepo.TenantSettingRepository } func NewAgentSettingService(db *gorm.DB) *AgentSettingService { return &AgentSettingService{ - db: db, - credRepo: repoai.NewAIProviderCredentialRepository(db), - profRepo: repoai.NewAIModelProfileRepository(db), - routeRepo: repoai.NewAIRoutePolicyRepository(db), - usageRepo: repoai.NewAIUsageLogRepository(db), - tks: tenantkeys.NewTenantKeyService(db), - tenantRepo: tenantrepo.NewTenantRepository(db), + db: db, + credRepo: repoai.NewAIProviderCredentialRepository(db), + profRepo: repoai.NewAIModelProfileRepository(db), + routeRepo: repoai.NewAIRoutePolicyRepository(db), + usageRepo: repoai.NewAIUsageLogRepository(db), + tks: tenantkeys.NewTenantKeyService(db), + tenantRepo: tenantrepo.NewTenantRepository(db), settingRepo: settingrepo.NewTenantSettingRepository(db), } } @@ -204,12 +212,16 @@ func (s *AgentSettingService) Providers(modality string) []aiProviderItem { items := catalogGetProviders(strings.TrimSpace(modality)) out := make([]aiProviderItem, 0, len(items)) for _, it := range items { - out = append(out, aiProviderItem{ID: it.ID, Name: it.Name}) + out = append(out, aiProviderItem{ + ID: it.ID, + Name: it.Name, + Apps: it.Apps, + }) } return out } -func (s *AgentSettingService) Models(modality, provider string) ([]string, error) { - models, err := catalogGetModels(strings.TrimSpace(modality), strings.TrimSpace(provider)) +func (s *AgentSettingService) Models(modality, provider, app string) ([]string, error) { + models, err := catalogGetModels(strings.TrimSpace(modality), strings.TrimSpace(provider), strings.TrimSpace(app)) if err != nil { return nil, err } @@ -227,9 +239,11 @@ func (s *AgentSettingService) ModelsForTenant( tenantUUID *string, modality string, provider string, + app string, ) ([]string, error) { mod := strings.TrimSpace(strings.ToLower(modality)) prov := strings.TrimSpace(strings.ToLower(provider)) + app = strings.TrimSpace(strings.ToLower(app)) // OpenRouter:模型目录变化快,优先走远端 /models;失败则回退到本地目录(占位/示例)。 if prov == "openrouter" && (mod == "llm" || mod == "embedding") { @@ -237,12 +251,16 @@ func (s *AgentSettingService) ModelsForTenant( return remote, nil } } - return s.Models(mod, prov) + return s.Models(mod, prov, app) } type openRouterModelsResponse struct { - Data []struct{ ID string `json:"id"` } `json:"data"` - Models []struct{ ID string `json:"id"` } `json:"models"` + Data []struct { + ID string `json:"id"` + } `json:"data"` + Models []struct { + ID string `json:"id"` + } `json:"models"` } func (s *AgentSettingService) fetchOpenRouterModels( @@ -524,6 +542,7 @@ func (s *AgentSettingService) resolveConnFromStore( cred, err := s.credRepo.FindByScopeNameProvider(ctx, env, tenantUUID, name, provider) if err != nil { + logger.WarnF(ctx, "[agent_setting] credential lookup failed env=%s tenant=%s provider=%s name=%s err=%v", env, s.tenantScopeKey(tenantUUID), provider, name, err) return baseURL, apiKey, err } // 先用存量 base_url @@ -541,14 +560,28 @@ func (s *AgentSettingService) resolveConnFromStore( apiKey = strings.TrimSpace(v) } - var sec struct { - APIKey string `json:"api_key"` - Secret string `json:"secret"` - } + sec := map[string]any{} if e := s.tks.UnsealSensitive(ctx, env, s.tenantScopeKey(tenantUUID), cred.Data, &sec); e == nil { if apiKey == "" { - apiKey = strings.TrimSpace(sec.APIKey) + if v, ok := sec["api_key"].(string); ok && strings.TrimSpace(v) != "" { + apiKey = strings.TrimSpace(v) + } else if v, ok := sec["access_token"].(string); ok && strings.TrimSpace(v) != "" { + apiKey = strings.TrimSpace(v) + } else if v, ok := sec["secret"].(string); ok && strings.TrimSpace(v) != "" { + apiKey = strings.TrimSpace(v) + } } + if apiKey == "" { + keys := make([]string, 0, len(sec)) + for k := range sec { + keys = append(keys, k) + } + logger.WarnF(ctx, "[agent_setting] resolved empty api_key after unseal env=%s tenant=%s provider=%s sealed_keys=%v", env, s.tenantScopeKey(tenantUUID), provider, keys) + } + } else if cred.Data != nil && cred.Data["__sealed"] != nil { + logger.WarnF(ctx, "[agent_setting] unseal api_key failed env=%s tenant=%s provider=%s err=%v", env, s.tenantScopeKey(tenantUUID), provider, e) + } else { + logger.WarnF(ctx, "[agent_setting] credential missing __sealed env=%s tenant=%s provider=%s", env, s.tenantScopeKey(tenantUUID), provider) } } return baseURL, apiKey, nil @@ -648,6 +681,8 @@ func (s *AgentSettingService) TestConnectionPreferInput( } return s.PingStrict(ctx, mod, prov, model, bu, ak, "", "", "") + case contract.ModImage: + return s.PingImage(ctx, env, tenantUUID, provider, model, baseURL, apiKey, secretID, secretKey, region, "") default: return s.PingGeneric(ctx, env, tenantUUID, contract.Modality(mod), provider, model, baseURL, apiKey) } @@ -748,6 +783,109 @@ func (s *AgentSettingService) PingLLM(ctx context.Context, env string, tenantUUI return err } +func (s *AgentSettingService) PingImage( + ctx context.Context, env string, tenantUUID *string, + provider, model, baseURL, apiKey, secretID, secretKey, region, organization string, +) error { + logger.InfoF(ctx, "[agent_setting] ping_image start env=%s provider=%s model=%s", env, strings.TrimSpace(provider), strings.TrimSpace(model)) + if err := ensureModelExists(string(contract.ModImage), provider, model); err != nil { + return err + } + p := strings.TrimSpace(strings.ToLower(provider)) + req := catalog.AuthReqFromCatalog(provider) + var err error + baseURL, apiKey, err = s.prepareAuthInputs(ctx, env, tenantUUID, p, baseURL, apiKey, req.NeedBaseURL, req.DefaultBaseURL, req.NeedKey) + if err != nil { + return err + } + if err := validateEndpoint(baseURL); err != nil { + return err + } + + manifest := findModelManifest(string(contract.ModImage), provider, model) + size := "256x256" + quality := "auto" + format := "png" + promptHint := "" + if manifest != nil && manifest.Defaults != nil { + if v, ok := manifest.Defaults["size"].(string); ok && strings.TrimSpace(v) != "" { + size = strings.TrimSpace(v) + } + if v, ok := manifest.Defaults["quality"].(string); ok && strings.TrimSpace(v) != "" { + quality = strings.TrimSpace(v) + } + if v, ok := manifest.Defaults["format"].(string); ok && strings.TrimSpace(v) != "" { + format = strings.TrimSpace(v) + } + if v, ok := manifest.Defaults["promptHint"].(string); ok && strings.TrimSpace(v) != "" { + promptHint = strings.TrimSpace(v) + } + } + prompt := "A tiny white cube on a blue background." + if promptHint != "" { + prompt = strings.TrimSpace(prompt + "\n" + promptHint) + } + + mc := agentcfg.ModelConfig{ + Provider: provider, + Endpoint: baseURL, + APIKey: apiKey, + SecretID: secretID, + SecretKey: secretKey, + Region: region, + Model: model, + Organization: organization, + Extra: s.buildModelExtras(contract.ModImage, provider, model), + } + + driverKey := p + if man, ok := catalog.GetGlobalAIRegister().Manifest(p); ok && man != nil { + if dk := strings.ToLower(strings.TrimSpace(man.Drivers["image"])); dk != "" { + driverKey = dk + } + } + + ctx, cancel := context.WithTimeout(ctx, 45*time.Second) + defer cancel() + + var invokeErr error + switch driverKey { + case "qwen": + cli, err := vlmfactory.NewClient(driverKey) + if err != nil { + return err + } + _, invokeErr = cli.Invoke(ctx, contract.VLMRequest{ + Messages: []contract.Message{ + { + Role: "user", + Content: []contract.ContentPart{ + {Type: contract.ContentTypeText, Text: "请回复 ok"}, + }, + }, + }, + MaxTokens: 8, + Runtime: map[string]any{"config": &mc}, + }) + default: + cli, err := imagefactory.NewClient(driverKey) + if err != nil { + return err + } + _, invokeErr = cli.Generate(ctx, contract.ImageRequest{ + Prompt: prompt, + Size: size, + Quality: quality, + Format: format, + Runtime: map[string]any{"config": &mc}, + }) + } + if invokeErr == nil { + logger.InfoF(ctx, "[agent_setting] ping_image success env=%s provider=%s model=%s", env, strings.TrimSpace(provider), strings.TrimSpace(model)) + } + return invokeErr +} + func (s *AgentSettingService) PingGeneric( ctx context.Context, env string, tenantUUID *string, modality contract.Modality, provider, model, baseURL, apiKey string, @@ -767,6 +905,565 @@ func (s *AgentSettingService) PingGeneric( return nil } +// ProbeEmbeddingDimensionsPreferInput performs a real embedding call to discover vector dimensions, and writes it back to the profile. +// NOTE: This is used by AI Settings "测试连接",不做任何向量表创建,仅探测 provider/model 的 embedding 输出。 +func (s *AgentSettingService) ProbeEmbeddingDimensionsPreferInput( + ctx context.Context, + env string, + tenantUUID *string, + provider string, + model string, + baseURL string, + apiKey string, +) (int, error) { + if err := ensureModelExists(string(contract.ModEmbed), provider, model); err != nil { + return 0, err + } + p := strings.ToLower(strings.TrimSpace(provider)) + m := strings.TrimSpace(model) + if p == "" || m == "" { + return 0, fmt.Errorf("provider/model 不能为空") + } + + req := catalog.AuthReqFromCatalog(p) + if strings.TrimSpace(baseURL) == "" { + if v := catalog.DefaultBaseURLForModel(p, m); strings.TrimSpace(v) != "" { + baseURL = v + } + } + bu, ak, err := s.prepareAuthInputs(ctx, env, tenantUUID, p, baseURL, apiKey, req.NeedBaseURL, req.DefaultBaseURL, req.NeedKey) + if err != nil { + return 0, err + } + if err := validateEndpoint(bu); err != nil { + return 0, err + } + + // driver mapping: provider may declare a different embedding driver (OpenAI-compatible etc.) + driverKey := p + if man, ok := catalog.GetGlobalAIRegister().Manifest(p); ok && man != nil { + if dk := strings.ToLower(strings.TrimSpace(man.Drivers["embedding"])); dk != "" { + driverKey = dk + } + } + + embCfg := agentconf.EmbeddingConfig{ + Enabled: true, + Provider: driverKey, + Endpoint: bu, + Model: m, + APIKey: ak, + MaxBatch: 8, + Dim: 0, + } + vec, err := intentfactory.NewVectorizerFromConfig(embCfg) + if err != nil { + return 0, err + } + if vec == nil { + return 0, fmt.Errorf("embedding vectorizer unavailable (provider=%s model=%s)", p, m) + } + out, err := vec.Embed(ctx, []string{"powerx-dim-probe"}) + if err != nil { + return 0, err + } + if len(out) == 0 || len(out[0]) == 0 { + return 0, fmt.Errorf("embedding probe returned empty vector") + } + dim := len(out[0]) + maxInputTokens := probeEmbeddingMaxInputTokens(ctx, p, bu, m) + if maxInputTokens > 0 { + logger.InfoF(ctx, "[agent_setting] embedding max_input_tokens probed provider=%s model=%s tokens=%d", p, m, maxInputTokens) + } else { + logger.InfoF(ctx, "[agent_setting] embedding max_input_tokens probe empty provider=%s model=%s base=%s", p, m, bu) + } + + // write back to model profile (defaults + cap_cache) + profile := &dbmodel.AIModelProfile{ + Modality: "embedding", + Provider: p, + Model: m, + Label: "probe.embedding", + Defaults: datatypes.JSONMap{ + "dimensions": dim, + }, + CapCache: datatypes.JSONMap{ + "dimensions": dim, + "probed_at": time.Now().UTC().Format(time.RFC3339Nano), + }, + Tags: []string{"embedding", "probed"}, + } + if maxInputTokens > 0 { + profile.Defaults["max_input_tokens"] = maxInputTokens + profile.CapCache["max_input_tokens"] = maxInputTokens + } + // keep existing defaults/cap_cache best-effort (no hard dependency) + if exist, e := s.profRepo.FindByScopeModalityProviderModel(ctx, env, tenantUUID, "embedding", p, m); e == nil && exist != nil { + if exist.Defaults != nil { + for k, v := range exist.Defaults { + if _, ok := profile.Defaults[k]; ok { + continue + } + profile.Defaults[k] = v + } + } + if exist.CapCache != nil { + for k, v := range exist.CapCache { + profile.CapCache[k] = v + } + profile.CapCache["dimensions"] = dim + profile.CapCache["probed_at"] = time.Now().UTC().Format(time.RFC3339Nano) + if maxInputTokens > 0 { + profile.CapCache["max_input_tokens"] = maxInputTokens + } + } + if strings.TrimSpace(exist.Label) != "" { + profile.Label = exist.Label + } + if len(exist.Tags) > 0 { + profile.Tags = exist.Tags + } + } + if maxInputTokens <= 0 { + delete(profile.Defaults, "max_input_tokens") + delete(profile.CapCache, "max_input_tokens") + } + _ = s.profRepo.UpsertByScopeModalityProviderModel(ctx, env, tenantUUID, profile) + + return dim, nil +} + +// BuildEmbeddingConfig resolves tenant embedding connection info and returns a ready config. +func (s *AgentSettingService) BuildEmbeddingConfig( + ctx context.Context, + env string, + tenantUUID *string, + provider string, + model string, + baseURL string, + apiKey string, +) (agentconf.EmbeddingConfig, error) { + if err := ensureModelExists(string(contract.ModEmbed), provider, model); err != nil { + return agentconf.EmbeddingConfig{}, err + } + p := strings.ToLower(strings.TrimSpace(provider)) + m := strings.TrimSpace(model) + if p == "" || m == "" { + return agentconf.EmbeddingConfig{}, fmt.Errorf("provider/model 不能为空") + } + req := catalog.AuthReqFromCatalog(p) + if strings.TrimSpace(baseURL) == "" { + if v := catalog.DefaultBaseURLForModel(p, m); strings.TrimSpace(v) != "" { + baseURL = v + } + } + bu, ak, err := s.prepareAuthInputs(ctx, env, tenantUUID, p, baseURL, apiKey, req.NeedBaseURL, req.DefaultBaseURL, req.NeedKey) + if err != nil { + return agentconf.EmbeddingConfig{}, err + } + if err := validateEndpoint(bu); err != nil { + return agentconf.EmbeddingConfig{}, err + } + + driverKey := p + if man, ok := catalog.GetGlobalAIRegister().Manifest(p); ok && man != nil { + if dk := strings.ToLower(strings.TrimSpace(man.Drivers["embedding"])); dk != "" { + driverKey = dk + } + } + + return agentconf.EmbeddingConfig{ + Enabled: true, + Provider: driverKey, + Endpoint: bu, + Model: m, + APIKey: ak, + MaxBatch: 8, + Dim: 0, + }, nil +} + +// BuildImageConfig resolves tenant image connection info and returns a ready config. +func (s *AgentSettingService) BuildImageConfig( + ctx context.Context, + env string, + tenantUUID *string, + provider string, + model string, + baseURL string, + apiKey string, + secretID string, + secretKey string, + region string, + organization string, +) (*agentcfg.ModelConfig, error) { + if err := ensureModelExists(string(contract.ModImage), provider, model); err != nil { + return nil, err + } + p := strings.ToLower(strings.TrimSpace(provider)) + m := strings.TrimSpace(model) + if p == "" || m == "" { + return nil, fmt.Errorf("provider/model 不能为空") + } + + req := catalog.AuthReqFromCatalog(p) + if strings.TrimSpace(baseURL) == "" { + if v := catalog.DefaultBaseURLForModel(p, m); strings.TrimSpace(v) != "" { + baseURL = v + } + } + bu, ak, err := s.prepareAuthInputs(ctx, env, tenantUUID, p, baseURL, apiKey, req.NeedBaseURL, req.DefaultBaseURL, req.NeedKey) + if err != nil { + return nil, err + } + if err := validateEndpoint(bu); err != nil { + return nil, err + } + + org := strings.TrimSpace(organization) + rg := strings.TrimSpace(region) + azureDeployment := "" + name := utils.Slug(env + "-" + p) + if cred, err := s.credRepo.FindByScopeNameProvider(ctx, env, tenantUUID, name, p); err == nil && cred != nil { + if org == "" { + if v, ok := cred.Data["organization"].(string); ok { + org = strings.TrimSpace(v) + } + } + if rg == "" { + if v, ok := cred.Data["region"].(string); ok { + rg = strings.TrimSpace(v) + } + } + if v, ok := cred.Data["azure_deployment"].(string); ok { + azureDeployment = strings.TrimSpace(v) + } + } + + return &agentcfg.ModelConfig{ + Provider: provider, + Endpoint: bu, + APIKey: ak, + SecretID: strings.TrimSpace(secretID), + SecretKey: strings.TrimSpace(secretKey), + Region: rg, + Model: m, + Organization: org, + AzureDeployment: azureDeployment, + Extra: s.buildModelExtras(contract.ModImage, provider, model), + }, nil +} + +// BuildModelConfig resolves tenant provider credentials for any modality and returns runtime model config. +func (s *AgentSettingService) BuildModelConfig( + ctx context.Context, + env string, + tenantUUID *string, + modality string, + provider string, + model string, + baseURL string, + apiKey string, + secretID string, + secretKey string, + region string, + organization string, +) (*agentcfg.ModelConfig, error) { + mod := strings.ToLower(strings.TrimSpace(modality)) + if mod == "" { + return nil, fmt.Errorf("modality 不能为空") + } + if err := ensureModelExists(mod, provider, model); err != nil { + return nil, err + } + p := strings.ToLower(strings.TrimSpace(provider)) + m := strings.TrimSpace(model) + if p == "" || m == "" { + return nil, fmt.Errorf("provider/model 不能为空") + } + + req := catalog.AuthReqFromCatalog(p) + if strings.TrimSpace(baseURL) == "" { + if manifest := findModelManifest(mod, p, m); manifest != nil { + if v, ok := manifest.Defaults["base_url"].(string); ok && strings.TrimSpace(v) != "" { + baseURL = strings.TrimSpace(v) + } + } + if strings.TrimSpace(baseURL) == "" { + if v := catalog.DefaultBaseURLForModel(p, m); strings.TrimSpace(v) != "" { + baseURL = v + } + } + } + bu, ak, err := s.prepareAuthInputs(ctx, env, tenantUUID, p, baseURL, apiKey, req.NeedBaseURL, req.DefaultBaseURL, req.NeedKey) + if err != nil { + return nil, err + } + if err := validateEndpoint(bu); err != nil { + return nil, err + } + + org := strings.TrimSpace(organization) + rg := strings.TrimSpace(region) + sid := strings.TrimSpace(secretID) + sk := strings.TrimSpace(secretKey) + azureDeployment := "" + + name := utils.Slug(env + "-" + p) + if cred, err := s.credRepo.FindByScopeNameProvider(ctx, env, tenantUUID, name, p); err == nil && cred != nil { + if org == "" { + if v, ok := cred.Data["organization"].(string); ok { + org = strings.TrimSpace(v) + } + } + if rg == "" { + if v, ok := cred.Data["region"].(string); ok { + rg = strings.TrimSpace(v) + } + } + if sid == "" { + if v, ok := cred.Data["secret_id"].(string); ok && strings.TrimSpace(v) != "" { + sid = strings.TrimSpace(v) + } else if v, ok := cred.Data["secretId"].(string); ok && strings.TrimSpace(v) != "" { + sid = strings.TrimSpace(v) + } + } + if sk == "" { + if v, ok := cred.Data["secret_key"].(string); ok && strings.TrimSpace(v) != "" { + sk = strings.TrimSpace(v) + } else if v, ok := cred.Data["secretKey"].(string); ok && strings.TrimSpace(v) != "" { + sk = strings.TrimSpace(v) + } + } + if v, ok := cred.Data["azure_deployment"].(string); ok { + azureDeployment = strings.TrimSpace(v) + } + if sid == "" || sk == "" { + sec := map[string]any{} + if e := s.tks.UnsealSensitive(ctx, env, s.tenantScopeKey(tenantUUID), cred.Data, &sec); e == nil { + if sid == "" { + if v, ok := sec["secret_id"].(string); ok && strings.TrimSpace(v) != "" { + sid = strings.TrimSpace(v) + } + } + if sk == "" { + if v, ok := sec["secret_key"].(string); ok && strings.TrimSpace(v) != "" { + sk = strings.TrimSpace(v) + } + } + } + } + } + + return &agentcfg.ModelConfig{ + Provider: provider, + Endpoint: bu, + APIKey: ak, + SecretID: sid, + SecretKey: sk, + Region: rg, + Model: m, + Organization: org, + AzureDeployment: azureDeployment, + Extra: s.buildModelExtras(contract.Modality(mod), provider, model), + }, nil +} + +func probeEmbeddingMaxInputTokens(ctx context.Context, provider, baseURL, model string) int { + if strings.ToLower(strings.TrimSpace(provider)) != "ollama" { + return 0 + } + base := strings.TrimRight(strings.TrimSpace(baseURL), "/") + if strings.HasSuffix(base, "/v1") { + base = strings.TrimSuffix(base, "/v1") + } + if base == "" || strings.TrimSpace(model) == "" { + return 0 + } + payload := map[string]string{"name": strings.TrimSpace(model)} + body, err := json.Marshal(payload) + if err != nil { + return 0 + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, base+"/api/show", bytes.NewReader(body)) + if err != nil { + return 0 + } + req.Header.Set("Content-Type", "application/json") + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Do(req) + if err != nil { + logger.WarnF(ctx, "[agent_setting] ollama show failed provider=%s model=%s base=%s err=%v", provider, model, base, err) + return 0 + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + logger.WarnF(ctx, "[agent_setting] ollama show non-2xx provider=%s model=%s base=%s status=%d", provider, model, base, resp.StatusCode) + return 0 + } + raw, err := io.ReadAll(resp.Body) + if err != nil || len(raw) == 0 { + logger.WarnF(ctx, "[agent_setting] ollama show empty body provider=%s model=%s base=%s", provider, model, base) + return 0 + } + var data map[string]any + if err := json.Unmarshal(raw, &data); err != nil { + logger.WarnF(ctx, "[agent_setting] ollama show invalid json provider=%s model=%s base=%s err=%v", provider, model, base, err) + return 0 + } + logger.InfoF(ctx, "[agent_setting] ollama show keys provider=%s model=%s keys=%v", provider, model, mapKeys(data)) + if params, ok := data["parameters"].(map[string]any); ok { + if v := parseAnyInt(params["num_ctx"]); v > 0 { + return v + } + if v := parseAnyInt(params["context_length"]); v > 0 { + return v + } + if v := parseAnyInt(params["context_len"]); v > 0 { + return v + } + if v := parseAnyInt(params["context"]); v > 0 { + return v + } + } + if params, ok := data["parameters"].(string); ok && strings.TrimSpace(params) != "" { + re := regexp.MustCompile(`(?mi)\b(num_ctx|context_length|context_len|context\s+length)\s*[:=]?\s*(\d+)\b`) + if m := re.FindStringSubmatch(params); len(m) > 2 { + if n, err := strconv.Atoi(strings.TrimSpace(m[2])); err == nil && n > 0 { + return n + } + } + } + if v := parseAnyInt(data["context_length"]); v > 0 { + return v + } + if v := parseAnyInt(data["context_len"]); v > 0 { + return v + } + if v := parseAnyInt(data["context"]); v > 0 { + return v + } + if info, ok := data["model_info"]; ok { + if v := extractOllamaContext(info); v > 0 { + return v + } + } + if details, ok := data["details"]; ok { + if v := extractOllamaContext(details); v > 0 { + return v + } + } + if modelInfo, ok := data["model"]; ok { + if v := extractOllamaContext(modelInfo); v > 0 { + return v + } + } + if v := parseAnyInt(data["num_ctx"]); v > 0 { + return v + } + if mf, ok := data["modelfile"].(string); ok && strings.TrimSpace(mf) != "" { + re := regexp.MustCompile(`(?mi)^\s*PARAMETER\s+num_ctx\s+(\d+)\s*$`) + if m := re.FindStringSubmatch(mf); len(m) > 1 { + if n, err := strconv.Atoi(strings.TrimSpace(m[1])); err == nil && n > 0 { + return n + } + } + } + return 0 +} + +func extractOllamaContext(raw any) int { + switch info := raw.(type) { + case map[string]any: + if v := parseAnyInt(info["num_ctx"]); v > 0 { + return v + } + if v := parseAnyInt(info["context_length"]); v > 0 { + return v + } + if v := parseAnyInt(info["context_len"]); v > 0 { + return v + } + if v := parseAnyInt(info["context"]); v > 0 { + return v + } + // Ollama 的 model_info/details 常见 key: "bert.context_length" / "llama.context_length" + for k, v := range info { + lk := strings.ToLower(strings.TrimSpace(k)) + if strings.Contains(lk, "context_length") || strings.HasSuffix(lk, ".context_length") || strings.HasSuffix(lk, "context_length") { + if n := parseAnyInt(v); n > 0 { + return n + } + } + if strings.Contains(lk, "num_ctx") || strings.HasSuffix(lk, ".num_ctx") { + if n := parseAnyInt(v); n > 0 { + return n + } + } + } + case string: + if v := parseContextFromString(info); v > 0 { + return v + } + } + return 0 +} + +func parseContextFromString(s string) int { + if strings.TrimSpace(s) == "" { + return 0 + } + re := regexp.MustCompile(`(?mi)\b(num_ctx|context_length|context_len|context\s+length)\s*[:=]?\s*(\d+)\b`) + if m := re.FindStringSubmatch(s); len(m) > 2 { + if n, err := strconv.Atoi(strings.TrimSpace(m[2])); err == nil && n > 0 { + return n + } + } + return 0 +} + +func mapKeys(m map[string]any) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func parseAnyInt(v any) int { + switch val := v.(type) { + case int: + if val > 0 { + return val + } + case int64: + if val > 0 { + return int(val) + } + case float64: + if int(val) > 0 { + return int(val) + } + case float32: + if int(val) > 0 { + return int(val) + } + case string: + if parsed, err := strconv.Atoi(strings.TrimSpace(val)); err == nil && parsed > 0 { + return parsed + } + case json.Number: + if parsed, err := val.Int64(); err == nil && parsed > 0 { + return int(parsed) + } + if parsed, err := strconv.Atoi(strings.TrimSpace(val.String())); err == nil && parsed > 0 { + return parsed + } + } + return 0 +} + // 修改 QuickCallLLM:同样带 env/tenantUUID + 回退解密 func (s *AgentSettingService) QuickCallLLM( ctx context.Context, env string, tenantUUID *string, @@ -836,7 +1533,8 @@ func (s *AgentSettingService) QuickCallLLM( // 回退解密(OpenAI-compatible 等) var err error baseURL, apiKey, err = s.resolveConnFromStore(ctx, env, tenantUUID, provider, baseURL, apiKey) - if err != nil { /* 忽略错误,尽量继续 */ } + if err != nil { /* 忽略错误,尽量继续 */ + } mc := agentcfg.ModelConfig{ Provider: provider, @@ -1177,12 +1875,25 @@ func (s *AgentSettingService) buildModelExtras(modality contract.Modality, provi if manifest == nil || manifest.Defaults == nil { return nil } + out := map[string]any{} if raw, ok := manifest.Defaults["api_path"]; ok { if path, ok2 := raw.(string); ok2 && strings.TrimSpace(path) != "" { - return map[string]any{"api_path": path} + out["api_path"] = strings.TrimSpace(path) } } - return nil + for _, key := range []string{ + "action", "action_poll", "version", "service", "service_id", + "req_json", "result_req_json", "force_single", "scale", "min_ratio", "max_ratio", + "base_url", "api_path_submit", "api_path_poll", "parameters", + } { + if raw, ok := manifest.Defaults[key]; ok { + out[key] = raw + } + } + if len(out) == 0 { + return nil + } + return out } func mergeManifestDefaults(user datatypes.JSONMap, manifest map[string]any) datatypes.JSONMap { @@ -1226,24 +1937,55 @@ func mergeTags(modality string, manifestTags []string, existing datatypes.JSONSl func findModelManifest(modality, provider, model string) *catalog.ModelManifest { reg := catalog.GetGlobalAIRegister() - models, err := reg.Models(modality, provider) - if err != nil { - return nil + app, pureModel := splitAppModel(reg, provider, model) + candidates := []string{strings.TrimSpace(strings.ToLower(modality))} + if candidates[0] == "audio_tts" || candidates[0] == "audio_asr" { + candidates = append(candidates, "audio") } - for _, m := range models { - if strings.EqualFold(m.ID, model) { - copy := m - return © + for _, mod := range candidates { + models, err := reg.ModelsByApp(mod, provider, app) + if err != nil { + continue + } + for _, m := range models { + if strings.EqualFold(m.ID, pureModel) || strings.EqualFold(m.ID, model) { + copy := m + return © + } } } return nil } +func splitAppModel(reg *catalog.Registry, provider, model string) (string, string) { + raw := strings.TrimSpace(model) + if raw == "" { + return "", "" + } + parts := strings.SplitN(raw, ":", 2) + if len(parts) != 2 { + return "", raw + } + app := strings.ToLower(strings.TrimSpace(parts[0])) + if app == "" { + return "", raw + } + // only treat prefix as app if provider declares it + apps := reg.Apps(provider, "") + for _, it := range apps { + if strings.EqualFold(it.ID, app) { + return app, strings.TrimSpace(parts[1]) + } + } + return "", raw +} + // —— catalog 适配:在 service 层做轻薄封装,避免 handler 直依赖 —— // type aiProviderItem struct { ID string Name string + Apps []aiAppItem } type aiModelItem struct { @@ -1251,6 +1993,11 @@ type aiModelItem struct { Name string } +type aiAppItem struct { + ID string + Name string +} + // 这两个函数直接调用你现有的 catalog,全局注册器:catalog.GetGlobalAIRegister() func catalogGetProviders(mod string) []aiProviderItem { m := strings.TrimSpace(strings.ToLower(mod)) @@ -1258,7 +2005,7 @@ func catalogGetProviders(mod string) []aiProviderItem { // ✅ 对齐:图像/视频两套 Provider 列表保持一致:image ∪ video var items []catalog.ProviderItem - if m == "video" || m == "image" { + if m == "video" || m == "image" || m == "audio_tts" || m == "audio_asr" { seen := map[string]struct{}{} add := func(list []catalog.ProviderItem) { for _, it := range list { @@ -1272,34 +2019,65 @@ func catalogGetProviders(mod string) []aiProviderItem { items = append(items, it) } } - add(reg.Providers("image")) - add(reg.Providers("video")) + if m == "video" || m == "image" { + add(reg.Providers("image")) + add(reg.Providers("video")) + } + if m == "audio_tts" || m == "audio_asr" { + add(reg.Providers(m)) + add(reg.Providers("audio")) + } } else { items = reg.Providers(m) } out := make([]aiProviderItem, 0, len(items)) for _, it := range items { - out = append(out, aiProviderItem{ID: it.ID, Name: it.Name}) + appItems := reg.Apps(it.ID, m) + // 若按当前模态没有 app,回退到 provider 全量 app,避免 UI 丢失 app 层级。 + if len(appItems) == 0 { + if allApps := reg.Apps(it.ID, ""); len(allApps) > 0 { + appItems = allApps + } + } + apps := make([]aiAppItem, 0, len(appItems)) + for _, a := range appItems { + apps = append(apps, aiAppItem{ID: a.ID, Name: a.Name}) + } + out = append(out, aiProviderItem{ID: it.ID, Name: it.Name, Apps: apps}) } return out } -func catalogGetModels(mod, prov string) ([]aiModelItem, error) { +func catalogGetModels(mod, prov, app string) ([]aiModelItem, error) { m := strings.TrimSpace(strings.ToLower(mod)) reg := catalog.GetGlobalAIRegister() + p := strings.TrimSpace(strings.ToLower(prov)) + a := strings.TrimSpace(strings.ToLower(app)) + + // 对有 apps 的 provider:要求显式 app 过滤,避免跨 app 混合模型列表。 + if a == "" { + if apps := reg.Apps(p, m); len(apps) > 0 { + return []aiModelItem{}, nil + } + } // ✅ 对齐:图像/视频如果该模态没模型,则回退到另一模态(避免下拉为空) - ms, err := reg.Models(m, prov) + ms, err := reg.ModelsByApp(m, p, a) if err != nil { return nil, err } if len(ms) == 0 { if m == "video" { - if fallback, e := reg.Models("image", prov); e == nil && len(fallback) > 0 { + if fallback, e := reg.ModelsByApp("image", p, a); e == nil && len(fallback) > 0 { ms = fallback } } if m == "image" { - if fallback, e := reg.Models("video", prov); e == nil && len(fallback) > 0 { + if fallback, e := reg.ModelsByApp("video", p, a); e == nil && len(fallback) > 0 { + ms = fallback + } + } + if m == "audio_tts" || m == "audio_asr" { + if fallback, e := reg.ModelsByApp("audio", p, a); e == nil && len(fallback) > 0 { ms = fallback } } @@ -1432,6 +2210,13 @@ func (s *AgentSettingService) GetActiveProfile( return &latest, nil } +// GetProfile returns the profile row for a specific (env, scope, modality, provider, model). +func (s *AgentSettingService) GetProfile( + ctx context.Context, env string, tenantUUID *string, modality, provider, model string, +) (*dbmodel.AIModelProfile, error) { + return s.profRepo.FindByScopeModalityProviderModel(ctx, env, tenantUUID, modality, provider, model) +} + // service:设置某模态的“当前激活” func (s *AgentSettingService) SetActiveProfile( ctx context.Context, env string, tenantUUID *string, modality, provider, model string, @@ -1466,9 +2251,10 @@ func (s *AgentSettingService) resolveModelRule(modality, provider, model string) // 2) 默认 base_url: model.defaults 覆盖 auth.defaults def := "" // 先 model 级 - if models, _ := reg.Models(modality, provider); len(models) > 0 { + app, pureModel := splitAppModel(reg, provider, model) + if models, _ := reg.ModelsByApp(modality, provider, app); len(models) > 0 { for _, mm := range models { - if strings.EqualFold(mm.ID, model) { + if strings.EqualFold(mm.ID, pureModel) || strings.EqualFold(mm.ID, model) { if v, ok := mm.Defaults["base_url"].(string); ok && strings.TrimSpace(v) != "" { def = v } diff --git a/backend/internal/service/agent/chat_history_service.go b/backend/internal/service/agent/chat_history_service.go index f974fa8c..0f36e9cd 100644 --- a/backend/internal/service/agent/chat_history_service.go +++ b/backend/internal/service/agent/chat_history_service.go @@ -93,6 +93,12 @@ func (s *ChatHistoryService) FindSessionByID( return s.sess.FindByID(ctx, env, tenantUUID, id) } +func (s *ChatHistoryService) FindSessionByUUID( + ctx context.Context, env string, tenantUUID *string, uid string, +) (*dbmodel.AgentChatSession, error) { + return s.sess.FindByUUID(ctx, env, tenantUUID, uid) +} + // ListSessions:按 Agent 维度分页查询(statuses 可为空) func (s *ChatHistoryService) ListSessions( ctx context.Context, env string, tenantUUID *string, diff --git a/backend/internal/service/agent/embedding_profile.go b/backend/internal/service/agent/embedding_profile.go new file mode 100644 index 00000000..702d64a4 --- /dev/null +++ b/backend/internal/service/agent/embedding_profile.go @@ -0,0 +1,96 @@ +package agent + +import ( + "encoding/json" + "strconv" + "strings" + + dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" +) + +// ResolveEmbeddingDimensions returns embedding dimensions from profile defaults/cap_cache. +func ResolveEmbeddingDimensions(profile *dbmodel.AIModelProfile) int { + if profile == nil { + return 0 + } + if dim := readDimensionFromMap(profile.Defaults); dim > 0 { + return dim + } + return readDimensionFromMap(profile.CapCache) +} + +// EmbeddingProfileProbed reports whether profile has a valid probe stamp. +func EmbeddingProfileProbed(profile *dbmodel.AIModelProfile) bool { + if profile == nil || profile.CapCache == nil { + return false + } + val, ok := profile.CapCache["probed_at"] + if !ok { + return false + } + switch v := val.(type) { + case string: + return strings.TrimSpace(v) != "" + default: + return v != nil + } +} + +// EmbeddingProfileReady checks both dimension and probe stamp. +func EmbeddingProfileReady(profile *dbmodel.AIModelProfile) bool { + return ResolveEmbeddingDimensions(profile) > 0 && EmbeddingProfileProbed(profile) +} + +func readDimensionFromMap(values map[string]any) int { + if len(values) == 0 { + return 0 + } + if v, ok := values["dimensions"]; ok { + if d := parseDimension(v); d > 0 { + return d + } + } + if v, ok := values["dim"]; ok { + if d := parseDimension(v); d > 0 { + return d + } + } + return 0 +} + +func parseDimension(v any) int { + switch val := v.(type) { + case float64: + if int(val) > 0 { + return int(val) + } + case float32: + if int(val) > 0 { + return int(val) + } + case int: + if val > 0 { + return val + } + case int32: + if val > 0 { + return int(val) + } + case int64: + if val > 0 { + return int(val) + } + case string: + if parsed, err := strconv.Atoi(strings.TrimSpace(val)); err == nil && parsed > 0 { + return parsed + } + case json.Number: + if parsed, err := val.Int64(); err == nil && parsed > 0 { + return int(parsed) + } + if parsed, err := strconv.Atoi(strings.TrimSpace(val.String())); err == nil && parsed > 0 { + return parsed + } + } + return 0 +} diff --git a/backend/internal/service/ai/service.go b/backend/internal/service/ai/service.go new file mode 100644 index 00000000..40e4b169 --- /dev/null +++ b/backend/internal/service/ai/service.go @@ -0,0 +1,761 @@ +package ai + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "strconv" + "strings" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/catalog" + "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" + intentfactory "github.com/ArtisanCloud/PowerX/internal/server/agent/factory/intent" + repoai "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/repository" + imagefactory "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/image" + ttsfactory "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/tts" + videofactory "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/video" + vlmfactory "github.com/ArtisanCloud/PowerX/internal/server/ai/factory/vlm" + agentsettings "github.com/ArtisanCloud/PowerX/internal/service/agent" + "gorm.io/gorm" +) + +var ( + ErrInvalidModelKey = errors.New("invalid model_key") + ErrModelNotConfigured = errors.New("model not configured for tenant") + ErrPromptRequired = errors.New("inputs must include text") + ErrProviderUnsupported = errors.New("provider driver not implemented") +) + +type Service struct { + settings *agentsettings.AgentSettingService + profiles *repoai.AIModelProfileRepository +} + +func NewService(db *gorm.DB) *Service { + if db == nil { + return nil + } + return &Service{ + settings: agentsettings.NewAgentSettingService(db), + profiles: repoai.NewAIModelProfileRepository(db), + } +} + +func (s *Service) ResolveTenantEnv(ctx context.Context, tenantUUID string) (string, bool, error) { + if s == nil || s.settings == nil { + return "", false, nil + } + return s.settings.GetTenantCurrentAIEnv(ctx, tenantUUID) +} + +func (s *Service) LLMInvoke( + ctx context.Context, + env string, + tenantUUID string, + modelKey string, + inputs []ContentItem, + params map[string]interface{}, +) (string, error) { + if s == nil || s.settings == nil || s.profiles == nil { + return "", errors.New("ai service unavailable") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return "", ErrInvalidModelKey + } + prof, err := s.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, "llm", provider, model) + defaults, _ := resolveDefaults("llm", provider, model) + if prof == nil || err != nil { + if !s.allowUnprofiled(ctx, env, tenantUUID, "llm", provider) { + return "", ErrModelNotConfigured + } + } else { + defaults = prof.Defaults + } + prompt := BuildPrompt(inputs) + if strings.TrimSpace(prompt) == "" { + return "", ErrPromptRequired + } + temperature := floatFromAny(defaults["temperature"]) + maxTokens := intFromAny(defaults["maxTokens"]) + if maxTokens == 0 { + maxTokens = intFromAny(defaults["max_tokens"]) + } + if params != nil { + if v, ok := params["temperature"]; ok { + if t := floatFromAny(v); t > 0 { + temperature = t + } + } + if v, ok := params["max_tokens"]; ok { + if mt := intFromAny(v); mt > 0 { + maxTokens = mt + } + } + if v, ok := params["maxTokens"]; ok { + if mt := intFromAny(v); mt > 0 { + maxTokens = mt + } + } + } + return s.settings.QuickCallLLM( + ctx, env, &tenantUUID, + provider, model, "", "", "", "", "", "", + temperature, maxTokens, + prompt, + ) +} + +func (s *Service) EmbeddingInvoke( + ctx context.Context, + env string, + tenantUUID string, + modelKey string, + inputs []string, + params map[string]interface{}, +) ([][]float32, error) { + if s == nil || s.settings == nil || s.profiles == nil { + return nil, errors.New("ai service unavailable") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return nil, ErrInvalidModelKey + } + prof, err := s.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, "embedding", provider, model) + defaults, ok := resolveDefaults("embedding", provider, model) + if prof == nil || err != nil { + if !s.allowUnprofiled(ctx, env, tenantUUID, "embedding", provider) { + return nil, ErrModelNotConfigured + } + if !ok { + return nil, ErrInvalidModelKey + } + } else { + defaults = prof.Defaults + } + cfg, err := s.settings.BuildEmbeddingConfig(ctx, env, &tenantUUID, provider, model, "", "") + if err != nil { + return nil, err + } + if batch := intFromAny(defaults["batch"]); batch > 0 { + cfg.MaxBatch = batch + } + vec, err := intentfactory.NewVectorizerFromConfig(cfg) + if err != nil { + return nil, err + } + if vec == nil { + return nil, errors.New("embedding vectorizer unavailable") + } + return vec.Embed(ctx, inputs) +} + +func (s *Service) ImageInvoke( + ctx context.Context, + env string, + tenantUUID string, + modelKey string, + inputs []ContentItem, + params map[string]interface{}, +) (map[string]interface{}, error) { + if s == nil || s.settings == nil || s.profiles == nil { + return nil, errors.New("ai service unavailable") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return nil, ErrInvalidModelKey + } + imageDriver := resolveProviderDriver(provider, "image") + // Qwen 的 image 模态在当前实现上是 VLM 能力(图像/视觉问答), + // 不支持 imagefactory 的生图协议,需分流到 VLM 客户端。 + if imageDriver == "qwen" { + return s.VLMInvoke(ctx, env, tenantUUID, modelKey, inputs, params) + } + prof, err := s.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, "image", provider, model) + defaults, ok := resolveDefaults("image", provider, model) + if prof == nil || err != nil { + if !s.allowUnprofiled(ctx, env, tenantUUID, "image", provider) { + return nil, ErrModelNotConfigured + } + if !ok { + return nil, ErrInvalidModelKey + } + } else { + defaults = prof.Defaults + } + prompt := BuildPrompt(inputs) + if strings.TrimSpace(prompt) == "" { + return nil, ErrPromptRequired + } + refImages := buildImageRefParts(inputs) + if hint := stringFromAny(defaults["promptHint"]); hint != "" { + prompt = strings.TrimSpace(prompt + "\n" + hint) + } + + size := stringFromAny(defaults["size"]) + quality := stringFromAny(defaults["quality"]) + format := stringFromAny(defaults["format"]) + if params != nil { + if v := stringFromAny(params["size"]); v != "" { + size = v + } + if v := stringFromAny(params["quality"]); v != "" { + quality = v + } + if v := stringFromAny(params["format"]); v != "" { + format = v + } + if v := stringFromAny(params["output_format"]); v != "" { + format = v + } + } + + mc, err := s.settings.BuildImageConfig(ctx, env, &tenantUUID, provider, model, "", "", "", "", "", "") + if err != nil { + return nil, err + } + cli, err := imagefactory.NewClient(imageDriver) + if err != nil { + return nil, err + } + req := contract.ImageRequest{ + Prompt: prompt, + Size: size, + Quality: quality, + Format: format, + RefImages: refImages, + Runtime: map[string]any{ + "config": mc, + }, + } + resp, err := cli.Generate(ctx, req) + if err != nil { + return nil, err + } + + out := map[string]interface{}{ + "provider": resp.Provider, + "model": resp.Model, + } + if len(resp.Images) > 0 { + out["images"] = resp.Images + } + if len(resp.ImageURLs) > 0 { + out["image_urls"] = resp.ImageURLs + } + if resp.Usage != nil { + out["usage"] = resp.Usage + } + if resp.LatencyMS > 0 { + out["latency_ms"] = resp.LatencyMS + } + if resp.TraceID != "" { + out["trace_id"] = resp.TraceID + } + return out, nil +} + +func (s *Service) VLMInvoke( + ctx context.Context, + env string, + tenantUUID string, + modelKey string, + inputs []ContentItem, + params map[string]interface{}, +) (map[string]interface{}, error) { + if s == nil || s.settings == nil || s.profiles == nil { + return nil, errors.New("ai service unavailable") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return nil, ErrInvalidModelKey + } + prof, err := s.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, "image", provider, model) + defaults, ok := resolveDefaults("image", provider, model) + if prof == nil || err != nil { + if !s.allowUnprofiled(ctx, env, tenantUUID, "image", provider) { + return nil, ErrModelNotConfigured + } + if !ok { + return nil, ErrInvalidModelKey + } + } else { + defaults = prof.Defaults + } + + mc, err := s.settings.BuildModelConfig(ctx, env, &tenantUUID, "image", provider, model, "", "", "", "", "", "") + if err != nil { + return nil, err + } + vlmDriver := resolveProviderDriver(provider, "image") + cli, err := vlmfactory.NewClient(vlmDriver) + if err != nil { + return nil, err + } + + req := contract.VLMRequest{ + Messages: buildVLMMessage(inputs), + Temperature: floatFromAny(defaults["temperature"]), + TopP: floatFromAny(defaults["top_p"]), + MaxTokens: intFromAny(defaults["max_tokens"]), + Runtime: map[string]any{ + "config": mc, + }, + } + if req.MaxTokens <= 0 { + req.MaxTokens = intFromAny(defaults["maxTokens"]) + } + if params != nil { + if v, ok := params["temperature"]; ok { + if val := floatFromAny(v); val > 0 { + req.Temperature = val + } + } + if v, ok := params["top_p"]; ok { + if val := floatFromAny(v); val > 0 { + req.TopP = val + } + } + if v, ok := params["max_tokens"]; ok { + if val := intFromAny(v); val > 0 { + req.MaxTokens = val + } + } + if v, ok := params["json_mode"]; ok { + if b, ok2 := v.(bool); ok2 { + req.JSONMode = b + } + } + } + + resp, err := cli.Invoke(ctx, req) + if err != nil { + return nil, err + } + return map[string]interface{}{ + "provider": resp.Provider, + "model": resp.Model, + "text": resp.Text, + "usage": resp.Usage, + }, nil +} + +func (s *Service) VideoInvoke( + ctx context.Context, + env string, + tenantUUID string, + modelKey string, + inputs []ContentItem, + params map[string]interface{}, +) (map[string]interface{}, error) { + if s == nil || s.settings == nil || s.profiles == nil { + return nil, errors.New("ai service unavailable") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return nil, ErrInvalidModelKey + } + prof, err := s.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, "video", provider, model) + defaults, ok := resolveDefaults("video", provider, model) + if prof == nil || err != nil { + if !s.allowUnprofiled(ctx, env, tenantUUID, "video", provider) { + return nil, ErrModelNotConfigured + } + if !ok { + return nil, ErrInvalidModelKey + } + } else { + defaults = prof.Defaults + } + + prompt := BuildPrompt(inputs) + if strings.TrimSpace(prompt) == "" { + return nil, ErrPromptRequired + } + mc, err := s.settings.BuildModelConfig(ctx, env, &tenantUUID, "video", provider, model, "", "", "", "", "", "") + if err != nil { + return nil, err + } + videoDriver := resolveProviderDriver(provider, "video") + cli, err := videofactory.NewClient(videoDriver) + if err != nil { + return nil, err + } + req := contract.VideoRequest{ + Prompt: prompt, + Resolution: stringFromAny(defaults["resolution"]), + FPS: intFromAny(defaults["fps"]), + MaxDurationS: intFromAny(defaults["maxDurationSec"]), + RefImages: buildImageRefParts(inputs), + RefVideos: buildVideoRefParts(inputs), + Runtime: map[string]any{ + "config": mc, + }, + } + if params != nil { + if v := stringFromAny(params["resolution"]); v != "" { + req.Resolution = v + } + if v := intFromAny(params["fps"]); v > 0 { + req.FPS = v + } + if v := intFromAny(params["max_duration_sec"]); v > 0 { + req.MaxDurationS = v + } + } + resp, err := cli.Generate(ctx, req) + if err != nil { + return nil, err + } + out := map[string]interface{}{ + "provider": resp.Provider, + "model": resp.Model, + } + if len(resp.VideoURLs) > 0 { + out["video_urls"] = resp.VideoURLs + } + if resp.TaskID != "" { + out["task_id"] = resp.TaskID + } + if resp.PollURL != "" { + out["poll_url"] = resp.PollURL + } + if resp.Usage != nil { + out["usage"] = resp.Usage + } + if resp.TraceID != "" { + out["trace_id"] = resp.TraceID + } + return out, nil +} + +func (s *Service) TTSInvoke( + ctx context.Context, + env string, + tenantUUID string, + modelKey string, + inputs []ContentItem, + params map[string]interface{}, +) (map[string]interface{}, error) { + if s == nil || s.settings == nil || s.profiles == nil { + return nil, errors.New("ai service unavailable") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return nil, ErrInvalidModelKey + } + prof, err := s.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, "audio_tts", provider, model) + defaults, ok := resolveDefaults("audio_tts", provider, model) + if prof == nil || err != nil { + if !s.allowUnprofiled(ctx, env, tenantUUID, "audio_tts", provider) { + return nil, ErrModelNotConfigured + } + if !ok { + return nil, ErrInvalidModelKey + } + } else { + defaults = prof.Defaults + } + text := BuildPrompt(inputs) + if strings.TrimSpace(text) == "" { + return nil, ErrPromptRequired + } + mc, err := s.settings.BuildModelConfig(ctx, env, &tenantUUID, "audio_tts", provider, model, "", "", "", "", "", "") + if err != nil { + return nil, err + } + ttsDriver := resolveProviderDriver(provider, "audio_tts") + cli, err := ttsfactory.NewClient(ttsDriver) + if err != nil { + return nil, err + } + req := contract.TTSRequest{ + Text: text, + Voice: stringFromAny(defaults["voice"]), + Speed: floatFromAny(defaults["speed"]), + Format: stringFromAny(defaults["format"]), + Runtime: map[string]any{ + "config": mc, + }, + } + if params != nil { + if v := stringFromAny(params["voice"]); v != "" { + req.Voice = v + } + if v := floatFromAny(params["speed"]); v > 0 { + req.Speed = v + } + if v := stringFromAny(params["format"]); v != "" { + req.Format = v + } + } + resp, err := cli.Synthesize(ctx, req) + if err != nil { + return nil, err + } + out := map[string]interface{}{ + "provider": resp.Provider, + "model": resp.Model, + } + if resp.AudioURL != "" { + out["audio_url"] = resp.AudioURL + } + if len(resp.Audio) > 0 { + out["audio_base64"] = base64.StdEncoding.EncodeToString(resp.Audio) + } + if resp.Usage != nil { + out["usage"] = resp.Usage + } + if resp.TraceID != "" { + out["trace_id"] = resp.TraceID + } + return out, nil +} + +type ContentItem struct { + Type string `json:"type"` + Text string `json:"text"` + URL string `json:"url"` +} + +func BuildPrompt(items []ContentItem) string { + parts := make([]string, 0, len(items)) + for _, item := range items { + if strings.EqualFold(strings.TrimSpace(item.Type), "text") && strings.TrimSpace(item.Text) != "" { + parts = append(parts, strings.TrimSpace(item.Text)) + } + } + return strings.Join(parts, "\n") +} + +func buildImageRefParts(items []ContentItem) []contract.ContentPart { + out := make([]contract.ContentPart, 0, len(items)) + for _, item := range items { + t := strings.TrimSpace(strings.ToLower(item.Type)) + switch t { + case contract.ContentTypeImageURL: + if strings.TrimSpace(item.URL) == "" { + continue + } + out = append(out, contract.ContentPart{Type: contract.ContentTypeImageURL, URL: strings.TrimSpace(item.URL)}) + case contract.ContentTypeImageBase64: + raw := strings.TrimSpace(item.Text) + if raw == "" { + raw = strings.TrimSpace(item.URL) + } + if raw == "" { + continue + } + out = append(out, contract.ContentPart{Type: contract.ContentTypeImageBase64, Text: raw}) + } + } + return out +} + +func buildVideoRefParts(items []ContentItem) []contract.ContentPart { + out := make([]contract.ContentPart, 0, len(items)) + for _, item := range items { + if strings.TrimSpace(strings.ToLower(item.Type)) != contract.ContentTypeVideoURL { + continue + } + if strings.TrimSpace(item.URL) == "" { + continue + } + out = append(out, contract.ContentPart{Type: contract.ContentTypeVideoURL, URL: strings.TrimSpace(item.URL)}) + } + return out +} + +func buildVLMMessage(items []ContentItem) []contract.Message { + parts := make([]contract.ContentPart, 0, len(items)) + for _, item := range items { + switch strings.TrimSpace(strings.ToLower(item.Type)) { + case contract.ContentTypeImageURL: + if strings.TrimSpace(item.URL) == "" { + continue + } + parts = append(parts, contract.ContentPart{ + Type: contract.ContentTypeImageURL, + URL: strings.TrimSpace(item.URL), + }) + default: + txt := strings.TrimSpace(item.Text) + if txt == "" { + continue + } + parts = append(parts, contract.ContentPart{ + Type: contract.ContentTypeText, + Text: txt, + }) + } + } + if len(parts) == 0 { + parts = append(parts, contract.ContentPart{Type: contract.ContentTypeText, Text: "Describe the image."}) + } + return []contract.Message{{Role: "user", Content: parts}} +} + +func splitModelKey(modelKey string) (string, string) { + if strings.Contains(modelKey, "/") { + parts := strings.SplitN(modelKey, "/", 2) + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + } + if strings.Contains(modelKey, ":") { + parts := strings.SplitN(modelKey, ":", 2) + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + } + return "", "" +} + +func resolveDefaults(modality, provider, model string) (map[string]any, bool) { + app, modelID := splitAppModel(model) + manifest := findModelManifest(modality, provider, app, modelID) + if manifest == nil || manifest.Defaults == nil { + return map[string]any{}, false + } + out := map[string]any{} + for k, v := range manifest.Defaults { + out[k] = v + } + return out, true +} + +func splitAppModel(model string) (string, string) { + raw := strings.TrimSpace(model) + if raw == "" { + return "", "" + } + if strings.Contains(raw, ":") { + parts := strings.SplitN(raw, ":", 2) + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + } + return "", raw +} + +func findModelManifest(modality, provider, app, model string) *catalog.ModelManifest { + reg := catalog.GetGlobalAIRegister() + candidates := []string{strings.TrimSpace(strings.ToLower(modality))} + if candidates[0] == "audio_tts" || candidates[0] == "audio_asr" { + candidates = append(candidates, "audio") + } + for _, mod := range candidates { + models, err := reg.ModelsByApp(mod, provider, app) + if err != nil { + continue + } + for _, m := range models { + if strings.EqualFold(m.ID, model) { + copy := m + return © + } + } + } + return nil +} + +func resolveProviderDriver(provider, modality string) string { + p := strings.ToLower(strings.TrimSpace(provider)) + if p == "" { + return "" + } + mod := strings.ToLower(strings.TrimSpace(modality)) + if mod == "" { + return p + } + reg := catalog.GetGlobalAIRegister() + if man, ok := reg.Manifest(p); ok && man != nil { + if dk := strings.ToLower(strings.TrimSpace(man.Drivers[mod])); dk != "" { + return dk + } + } + return p +} + +func (s *Service) allowUnprofiled(ctx context.Context, env, tenantUUID, modality, provider string) bool { + if s == nil || s.settings == nil { + return false + } + healthMap, configured, err := s.settings.GetTenantProviderHealthMap(ctx, tenantUUID, env, modality) + if err != nil { + return false + } + if !configured { + return false + } + status := "" + if healthMap != nil { + if rec, ok := healthMap[strings.ToLower(strings.TrimSpace(provider))]; ok { + status = strings.ToLower(strings.TrimSpace(rec.Status)) + } + } + if status != "healthy" { + return false + } + creds, err := s.settings.ListCredentials(ctx, env, &tenantUUID) + if err != nil { + return false + } + for _, c := range creds { + if strings.EqualFold(strings.TrimSpace(c.Provider), strings.TrimSpace(provider)) { + return true + } + } + return false +} + +func floatFromAny(val interface{}) float64 { + switch v := val.(type) { + case float64: + return v + case float32: + return float64(v) + case int: + return float64(v) + case int64: + return float64(v) + case json.Number: + f, _ := v.Float64() + return f + case string: + if f, err := strconv.ParseFloat(strings.TrimSpace(v), 64); err == nil { + return f + } + } + return 0 +} + +func intFromAny(val interface{}) int { + switch v := val.(type) { + case int: + return v + case int64: + return int(v) + case float64: + return int(v) + case float32: + return int(v) + case json.Number: + if i, err := v.Int64(); err == nil { + return int(i) + } + case string: + if i, err := strconv.Atoi(strings.TrimSpace(v)); err == nil { + return i + } + } + return 0 +} + +func stringFromAny(val interface{}) string { + switch v := val.(type) { + case string: + return strings.TrimSpace(v) + case json.Number: + return strings.TrimSpace(v.String()) + case []byte: + return strings.TrimSpace(string(v)) + default: + return "" + } +} diff --git a/backend/internal/service/auth/me_service.go b/backend/internal/service/auth/me_service.go index ecd05287..b19919bc 100644 --- a/backend/internal/service/auth/me_service.go +++ b/backend/internal/service/auth/me_service.go @@ -110,6 +110,7 @@ func (s *MeService) GetMeContext(ctx context.Context) (*MeContextResp, error) { // 4) 组装 members brief(is_admin 先 false,等你接 RBAC 再填充) brs := make([]MeMemberBrief, 0, len(members)) + memberByTenant := make(map[string]uint64, len(members)) for _, mem := range members { info := tenantBasicMap[mem.TenantUUID] uuidStr := strings.TrimSpace(mem.TenantUUID) @@ -118,6 +119,7 @@ func (s *MeService) GetMeContext(ctx context.Context) (*MeContextResp, error) { uuidStr = info.UUID.String() name = info.Name } + memberByTenant[uuidStr] = mem.ID brs = append(brs, MeMemberBrief{ TenantUUID: uuidStr, TenantName: name, @@ -126,6 +128,32 @@ func (s *MeService) GetMeContext(ctx context.Context) (*MeContextResp, error) { }) } + // 5) 修正 current_tenant_uuid: + // db-refresh/本地缓存/token stale 时,ctx 中的 tenant_uuid 可能不在 members 里,导致前端永远查不到数据。 + // 规则:若当前 tenant 不在 members,优先选 "System" 租户,否则选第一个 member 租户。 + if len(brs) > 0 { + tenantInMembers := false + for _, b := range brs { + if b.TenantUUID == tenantUUID { + tenantInMembers = true + break + } + } + if tenantUUID == "" || !tenantInMembers { + preferred := brs[0].TenantUUID + for _, b := range brs { + if strings.EqualFold(strings.TrimSpace(b.TenantName), "system") { + preferred = b.TenantUUID + break + } + } + tenantUUID = preferred + if mid, ok := memberByTenant[tenantUUID]; ok { + currentMemberID = &mid + } + } + } + return &MeContextResp{ IsRoot: isRoot, CurrentTenantUUID: tenantUUID, diff --git a/backend/internal/service/capability_registry/audit.go b/backend/internal/service/capability_registry/audit.go index f8450419..f01dfba1 100644 --- a/backend/internal/service/capability_registry/audit.go +++ b/backend/internal/service/capability_registry/audit.go @@ -10,7 +10,7 @@ import ( "github.com/google/uuid" "gorm.io/datatypes" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" capmetrics "github.com/ArtisanCloud/PowerX/internal/observability/metrics" auditpkg "github.com/ArtisanCloud/PowerX/pkg/corex/audit" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" diff --git a/backend/internal/service/capability_registry/audit_test.go b/backend/internal/service/capability_registry/audit_test.go index 9d436b03..a6085fec 100644 --- a/backend/internal/service/capability_registry/audit_test.go +++ b/backend/internal/service/capability_registry/audit_test.go @@ -9,7 +9,7 @@ import ( "gorm.io/driver/sqlite" "gorm.io/gorm" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/capability_registry" diff --git a/backend/internal/service/capability_registry/cache_test.go b/backend/internal/service/capability_registry/cache_test.go index c76b62d9..fb7eae21 100644 --- a/backend/internal/service/capability_registry/cache_test.go +++ b/backend/internal/service/capability_registry/cache_test.go @@ -7,6 +7,7 @@ import ( "time" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" + "github.com/ArtisanCloud/PowerX/pkg/utils/testutil" "github.com/alicebob/miniredis/v2" "github.com/redis/go-redis/v9" "github.com/stretchr/testify/require" @@ -120,8 +121,11 @@ func TestNewCacheManagerNilRedis(t *testing.T) { func newTestRedis(t *testing.T) (*miniredis.Miniredis, redis.UniversalClient) { t.Helper() + testutil.SkipIfNoLocalListener(t) srv, err := miniredis.Run() - require.NoError(t, err) + if err != nil { + t.Skipf("miniredis unavailable: %v", err) + } t.Cleanup(srv.Close) client := redis.NewClient(&redis.Options{Addr: srv.Addr()}) diff --git a/backend/internal/service/capability_registry/invocation_service.go b/backend/internal/service/capability_registry/invocation_service.go index 6c658177..dac47301 100644 --- a/backend/internal/service/capability_registry/invocation_service.go +++ b/backend/internal/service/capability_registry/invocation_service.go @@ -43,6 +43,7 @@ type InvocationServiceOptions struct { HTTPClient *http.Client HTTPBaseURL string GRPCConn *grpc.ClientConn + ModelVerifier ModelKeyVerifier } // InvocationService 负责触发能力调用并记录追踪。 @@ -52,12 +53,18 @@ type InvocationService struct { traces *repo.InvocationTraceRepository audit *AuditService versionLock VersionLock + modelVerifier ModelKeyVerifier now func() time.Time httpClient *http.Client httpBaseURL string grpcConn *grpc.ClientConn } +// ModelKeyVerifier validates tenant-scoped model_key access. +type ModelKeyVerifier interface { + VerifyModelKey(ctx context.Context, tenantUUID, env, modality, modelKey string) error +} + // InvocationInput 描述调用��求。 type InvocationInput struct { CapabilityID string @@ -108,6 +115,7 @@ func NewInvocationService(opts InvocationServiceOptions) *InvocationService { traces: opts.TraceRepo, audit: audit, versionLock: opts.VersionLock, + modelVerifier: opts.ModelVerifier, now: clock, httpClient: httpClient, httpBaseURL: strings.TrimSuffix(strings.TrimSpace(opts.HTTPBaseURL), "/"), @@ -156,6 +164,20 @@ func (s *InvocationService) Invoke(ctx context.Context, in InvocationInput) (Inv if !strings.EqualFold(strings.TrimSpace(record.Status), "published") { return result, fmt.Errorf("capability %s is not published", capabilityID) } + if s.modelVerifier != nil && strings.HasPrefix(strings.ToLower(capabilityID), "com.corex.ai.") { + modality := extractStringFromBody(in.Payload, "modality") + modelKey := extractStringFromBody(in.Payload, "model_key") + if modality == "" { + modality = defaultModalityForCapability(capabilityID) + } + if modelKey == "" && strings.Contains(strings.ToLower(capabilityID), ".stream") { + return result, fmt.Errorf("model_key required") + } + env := extractQueryString(in.Payload, "env") + if err := s.modelVerifier.VerifyModelKey(ctx, tenantUUID, env, modality, modelKey); err != nil { + return result, err + } + } if s.versionLock != nil && strings.TrimSpace(record.CapabilitiesHash) != "" { if err := s.versionLock.Enforce(ctx, tenantUUID, capabilityID, strings.TrimSpace(record.CapabilitiesHash)); err != nil { @@ -279,7 +301,7 @@ func (s *InvocationService) executeAdapterCall(ctx context.Context, routerResult if s.httpClient == nil || s.httpBaseURL == "" { return nil, nil } - restPayload, err := buildRESTInvokePayload(in.Payload) + restPayload, err := buildRESTInvokePayloadWithDefaults(in.Payload, routerResult.Endpoint, routerResult.Labels) if err != nil { return nil, err } @@ -288,7 +310,7 @@ func (s *InvocationService) executeAdapterCall(ctx context.Context, routerResult if s.grpcConn == nil { return nil, nil } - grpcPayload, err := buildGRPCInvokePayload(in.Payload) + grpcPayload, err := buildGRPCInvokePayloadWithDefaults(in.Payload, routerResult.Endpoint, routerResult.Labels) if err != nil { return nil, err } @@ -314,14 +336,24 @@ type grpcInvokePayload struct { } func buildRESTInvokePayload(raw map[string]interface{}) (restInvokePayload, error) { + return buildRESTInvokePayloadWithDefaults(raw, "", nil) +} + +func buildRESTInvokePayloadWithDefaults(raw map[string]interface{}, defaultEndpoint string, labels map[string]string) (restInvokePayload, error) { if len(raw) == 0 { return restInvokePayload{}, errors.New("payload required for REST invocation") } method := strings.ToUpper(strings.TrimSpace(getString(raw["method"]))) + if method == "" { + method = strings.ToUpper(strings.TrimSpace(getLabel(labels, "method"))) + } if method == "" { method = http.MethodGet } endpoint := strings.TrimSpace(getString(raw["endpoint"])) + if endpoint == "" { + endpoint = strings.TrimSpace(defaultEndpoint) + } if endpoint == "" { return restInvokePayload{}, errors.New("payload.endpoint required for REST invocation") } @@ -330,7 +362,9 @@ func buildRESTInvokePayload(raw map[string]interface{}) (restInvokePayload, erro var body interface{} if b, ok := raw["body"]; ok { - body = b + body = mergeBodyWithTopLevel(raw, b) + } else if method != http.MethodGet && method != http.MethodHead { + body = stripEnvelopeKeys(raw) } return restInvokePayload{ @@ -343,14 +377,24 @@ func buildRESTInvokePayload(raw map[string]interface{}) (restInvokePayload, erro } func buildGRPCInvokePayload(raw map[string]interface{}) (grpcInvokePayload, error) { + return buildGRPCInvokePayloadWithDefaults(raw, "", nil) +} + +func buildGRPCInvokePayloadWithDefaults(raw map[string]interface{}, endpoint string, labels map[string]string) (grpcInvokePayload, error) { if len(raw) == 0 { return grpcInvokePayload{}, errors.New("payload required for gRPC invocation") } service := strings.TrimSpace(getString(raw["endpoint"])) + if service == "" { + service = strings.TrimSpace(endpoint) + } if service == "" { return grpcInvokePayload{}, errors.New("payload.endpoint required for gRPC invocation") } method := strings.TrimSpace(getString(raw["rpc"])) + if method == "" { + method = strings.TrimSpace(getLabel(labels, "rpc")) + } if method == "" { return grpcInvokePayload{}, errors.New("payload.rpc required for gRPC invocation") } @@ -365,7 +409,7 @@ func buildGRPCInvokePayload(raw map[string]interface{}) (grpcInvokePayload, erro body[k] = v } case nil: - // leave empty + body = stripEnvelopeKeys(raw) default: return grpcInvokePayload{}, errors.New("payload.body for gRPC invocation must be an object") } @@ -548,6 +592,132 @@ func getString(v interface{}) string { } } +func extractString(m map[string]interface{}, key string) string { + if len(m) == 0 { + return "" + } + if value, ok := m[key]; ok { + return strings.TrimSpace(getString(value)) + } + return "" +} + +func extractStringFromBody(payload map[string]interface{}, key string) string { + if len(payload) == 0 { + return "" + } + body, ok := payload["body"] + if !ok || body == nil { + return "" + } + switch typed := body.(type) { + case map[string]interface{}: + return extractString(typed, key) + default: + return "" + } +} + +func extractQueryString(payload map[string]interface{}, key string) string { + if len(payload) == 0 { + return "" + } + query, ok := payload["query"] + if !ok || query == nil { + return "" + } + switch typed := query.(type) { + case map[string]interface{}: + return extractString(typed, key) + default: + return "" + } +} + +func getLabel(labels map[string]string, key string) string { + if len(labels) == 0 { + return "" + } + return strings.TrimSpace(labels[key]) +} + +func stripEnvelopeKeys(raw map[string]interface{}) map[string]interface{} { + if len(raw) == 0 { + return nil + } + result := make(map[string]interface{}, len(raw)) + for k, v := range raw { + switch strings.ToLower(strings.TrimSpace(k)) { + case "method", "endpoint", "headers", "query", "body", "rpc", "stream": + continue + default: + result[k] = v + } + } + if len(result) == 0 { + return nil + } + return result +} + +func mergeBodyWithTopLevel(raw map[string]interface{}, body interface{}) interface{} { + if len(raw) == 0 || body == nil { + return body + } + bodyMap, ok := toStringAnyMap(body) + if !ok { + return body + } + merged := make(map[string]interface{}, len(bodyMap)+len(raw)) + for k, v := range bodyMap { + merged[k] = v + } + for k, v := range raw { + key := strings.ToLower(strings.TrimSpace(k)) + switch key { + case "method", "endpoint", "headers", "query", "body", "rpc", "stream": + continue + } + if _, exists := merged[k]; exists { + continue + } + merged[k] = v + } + return merged +} + +func toStringAnyMap(v interface{}) (map[string]interface{}, bool) { + switch typed := v.(type) { + case map[string]interface{}: + return typed, true + default: + return nil, false + } +} + +func defaultModalityForCapability(capabilityID string) string { + lower := strings.ToLower(strings.TrimSpace(capabilityID)) + if strings.Contains(lower, "llm") { + return "llm" + } + if strings.Contains(lower, "image") { + return "image" + } + if strings.Contains(lower, "video") { + return "video" + } + if strings.Contains(lower, "tts") { + return "audio_tts" + } + if strings.Contains(lower, "embedding") || strings.Contains(lower, "embeddings") { + return "embedding" + } + if strings.Contains(lower, "multimodal") { + return "mixed" + } + return "" +} + func extractStickyKey(ctx map[string]interface{}) string { if len(ctx) == 0 { return "" diff --git a/backend/internal/service/capability_registry/invocation_service_payload_test.go b/backend/internal/service/capability_registry/invocation_service_payload_test.go new file mode 100644 index 00000000..5ba13746 --- /dev/null +++ b/backend/internal/service/capability_registry/invocation_service_payload_test.go @@ -0,0 +1,34 @@ +package capability_registry + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestExtractStringFromBody_IgnoresTopLevel(t *testing.T) { + t.Parallel() + + payload := map[string]interface{}{ + "model_key": "top/provider:model", + "body": map[string]interface{}{ + "model_key": "body/provider:model", + }, + } + + require.Equal(t, "body/provider:model", extractStringFromBody(payload, "model_key")) +} + +func TestExtractStringFromBody_ReadsBodyWhenTopLevelMissing(t *testing.T) { + t.Parallel() + + payload := map[string]interface{}{ + "body": map[string]interface{}{ + "model_key": "ollama/qwen3:8b", + "modality": "llm", + }, + } + + require.Equal(t, "ollama/qwen3:8b", extractStringFromBody(payload, "model_key")) + require.Equal(t, "llm", extractStringFromBody(payload, "modality")) +} diff --git a/backend/internal/service/capability_registry/invocation_service_test.go b/backend/internal/service/capability_registry/invocation_service_test.go index 4b76d4e4..4da6b22a 100644 --- a/backend/internal/service/capability_registry/invocation_service_test.go +++ b/backend/internal/service/capability_registry/invocation_service_test.go @@ -7,6 +7,7 @@ import ( "net/http/httptest" "testing" + "github.com/ArtisanCloud/PowerX/pkg/utils/testutil" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -17,6 +18,7 @@ import ( func TestInvocationServiceInvokeREST(t *testing.T) { t.Parallel() + testutil.SkipIfNoLocalListener(t) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, http.MethodGet, r.Method) diff --git a/backend/internal/service/capability_registry/model_key_verifier.go b/backend/internal/service/capability_registry/model_key_verifier.go new file mode 100644 index 00000000..b9222cca --- /dev/null +++ b/backend/internal/service/capability_registry/model_key_verifier.go @@ -0,0 +1,69 @@ +package capability_registry + +import ( + "context" + "fmt" + "strings" + + repoai "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/repository" + "gorm.io/gorm" +) + +// TenantModelKeyVerifier validates model_key ownership for a tenant. +type TenantModelKeyVerifier struct { + profiles *repoai.AIModelProfileRepository +} + +// NewTenantModelKeyVerifier constructs a tenant-scoped model_key verifier. +func NewTenantModelKeyVerifier(db *gorm.DB) *TenantModelKeyVerifier { + if db == nil { + return nil + } + return &TenantModelKeyVerifier{ + profiles: repoai.NewAIModelProfileRepository(db), + } +} + +// VerifyModelKey checks if model_key belongs to tenant configuration. +func (v *TenantModelKeyVerifier) VerifyModelKey(ctx context.Context, tenantUUID, env, modality, modelKey string) error { + if v == nil || v.profiles == nil { + return nil + } + tenantUUID = strings.TrimSpace(tenantUUID) + if tenantUUID == "" { + return fmt.Errorf("tenant_uuid required for model verification") + } + modality = strings.TrimSpace(modality) + if modality == "" { + return fmt.Errorf("modality required for model verification") + } + modelKey = strings.TrimSpace(modelKey) + if modelKey == "" { + return fmt.Errorf("model_key required") + } + provider, model := splitModelKey(modelKey) + if provider == "" || model == "" { + return fmt.Errorf("invalid model_key %s", modelKey) + } + env = strings.TrimSpace(env) + if env == "" { + env = "default" + } + _, err := v.profiles.FindByScopeModalityProviderModel(ctx, env, &tenantUUID, modality, provider, model) + if err != nil { + return fmt.Errorf("model_key %s not configured for tenant", modelKey) + } + return nil +} + +func splitModelKey(modelKey string) (string, string) { + if strings.Contains(modelKey, "/") { + parts := strings.SplitN(modelKey, "/", 2) + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + } + if strings.Contains(modelKey, ":") { + parts := strings.SplitN(modelKey, ":", 2) + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + } + return "", "" +} diff --git a/backend/internal/service/capability_registry/policy_generator_test.go b/backend/internal/service/capability_registry/policy_generator_test.go index 4199bdd3..5f3084c3 100644 --- a/backend/internal/service/capability_registry/policy_generator_test.go +++ b/backend/internal/service/capability_registry/policy_generator_test.go @@ -9,6 +9,7 @@ import ( coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/capability_registry" + "github.com/ArtisanCloud/PowerX/pkg/utils/testutil" "github.com/alicebob/miniredis/v2" "github.com/redis/go-redis/v9" "github.com/stretchr/testify/require" @@ -113,8 +114,11 @@ func newPolicyMemoryDB(t *testing.T) *gorm.DB { } func newTestRedisClient(t *testing.T) redis.UniversalClient { + testutil.SkipIfNoLocalListener(t) srv, err := miniredis.Run() - require.NoError(t, err) + if err != nil { + t.Skipf("miniredis unavailable: %v", err) + } t.Cleanup(srv.Close) client := redis.NewClient(&redis.Options{Addr: srv.Addr()}) t.Cleanup(func() { _ = client.Close() }) diff --git a/backend/internal/service/capability_registry/registry/policy.go b/backend/internal/service/capability_registry/registry/policy.go index 5683375e..10878c1b 100644 --- a/backend/internal/service/capability_registry/registry/policy.go +++ b/backend/internal/service/capability_registry/registry/policy.go @@ -163,7 +163,7 @@ func adminPermissions() ([]dbm.Permission, error) { } perms = append(perms, dbm.Permission{ - Plugin: PluginCapabilityRegistry, + Module: PluginCapabilityRegistry, Resource: spec.Resource, Action: spec.Action, Description: spec.Description, diff --git a/backend/internal/service/capability_registry/registry_service_source_filter_test.go b/backend/internal/service/capability_registry/registry_service_source_filter_test.go new file mode 100644 index 00000000..644cab69 --- /dev/null +++ b/backend/internal/service/capability_registry/registry_service_source_filter_test.go @@ -0,0 +1,41 @@ +package capability_registry + +import ( + "testing" + + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" +) + +func TestRecordMatchesFilters_Source(t *testing.T) { + t.Parallel() + + corexRecord := models.CapabilityRecord{ + CapabilityID: "corex.capability.a", + PluginID: "corex.platform", + } + pluginRecord := models.CapabilityRecord{ + CapabilityID: "plugin.capability.b", + PluginID: "com.powerx.plugins.base.template", + } + + // source empty means no source filtering (equivalent to source=all/any). + if !recordMatchesFilters(corexRecord, CapabilityListOptions{Source: ""}) { + t.Fatal("expected corex record to pass when source filter is empty") + } + if !recordMatchesFilters(pluginRecord, CapabilityListOptions{Source: ""}) { + t.Fatal("expected plugin record to pass when source filter is empty") + } + + if !recordMatchesFilters(corexRecord, CapabilityListOptions{Source: CapabilitySourceCoreX}) { + t.Fatal("expected corex record to pass corex filter") + } + if recordMatchesFilters(pluginRecord, CapabilityListOptions{Source: CapabilitySourceCoreX}) { + t.Fatal("expected plugin record to be filtered out by corex filter") + } + if !recordMatchesFilters(pluginRecord, CapabilityListOptions{Source: CapabilitySourcePlugin}) { + t.Fatal("expected plugin record to pass plugin filter") + } + if recordMatchesFilters(corexRecord, CapabilityListOptions{Source: CapabilitySourcePlugin}) { + t.Fatal("expected corex record to be filtered out by plugin filter") + } +} diff --git a/backend/internal/service/capability_registry/router/service.go b/backend/internal/service/capability_registry/router/service.go index c78bf724..005d636e 100644 --- a/backend/internal/service/capability_registry/router/service.go +++ b/backend/internal/service/capability_registry/router/service.go @@ -172,6 +172,7 @@ func (s *Service) routeWithRegistration(ctx context.Context, reg registry.Regist FallbackUsed: selection.fallbackUsed, Payload: selection.payload, Latency: selection.latency, + Labels: selection.labels, } s.observeInvocation(ctx, reg, selection, result, mutate, nil) if mutate { @@ -251,6 +252,7 @@ type adapterSelection struct { fallbackUsed bool payload []byte latency time.Duration + labels map[string]string } func (s *Service) selectAdapter(ctx context.Context, reg registry.Registration, in InvokeRequest) (adapterSelection, error) { @@ -281,6 +283,7 @@ func (s *Service) selectAdapter(ctx context.Context, reg registry.Registration, adapter.adapterID = ep.AdapterID adapter.endpoint = preferredEndpoint(ep) adapter.transport = ep.TransportType + adapter.labels = ep.Labels return adapter, nil } } @@ -293,6 +296,7 @@ func (s *Service) selectAdapter(ctx context.Context, reg registry.Registration, adapter.adapterID = ep.AdapterID adapter.endpoint = preferredEndpoint(ep) adapter.transport = ep.TransportType + adapter.labels = ep.Labels return adapter, nil } diff --git a/backend/internal/service/capability_registry/router/types.go b/backend/internal/service/capability_registry/router/types.go index 95073291..f83f72a8 100644 --- a/backend/internal/service/capability_registry/router/types.go +++ b/backend/internal/service/capability_registry/router/types.go @@ -36,6 +36,7 @@ type InvokeResult struct { Payload []byte Latency time.Duration Error error + Labels map[string]string } // ReportHealthInput 描述健康上报。 diff --git a/backend/internal/service/capability_registry/selector_hooks.go b/backend/internal/service/capability_registry/selector_hooks.go index 0011978f..b420349f 100644 --- a/backend/internal/service/capability_registry/selector_hooks.go +++ b/backend/internal/service/capability_registry/selector_hooks.go @@ -5,7 +5,7 @@ import ( "errors" "strings" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" capmetrics "github.com/ArtisanCloud/PowerX/internal/observability/metrics" "github.com/ArtisanCloud/PowerX/pkg/event_bus" ) diff --git a/backend/internal/service/capability_registry/source_test.go b/backend/internal/service/capability_registry/source_test.go new file mode 100644 index 00000000..3da2e426 --- /dev/null +++ b/backend/internal/service/capability_registry/source_test.go @@ -0,0 +1,42 @@ +package capability_registry + +import "testing" + +func TestNormalizeCapabilitySource(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + want string + wantErr bool + }{ + {name: "empty means all", input: "", want: ""}, + {name: "all means all", input: "all", want: ""}, + {name: "any means all", input: "any", want: ""}, + {name: "corex canonical", input: "corex", want: CapabilitySourceCoreX}, + {name: "platform alias", input: "platform", want: CapabilitySourceCoreX}, + {name: "plugin canonical", input: "plugin", want: CapabilitySourcePlugin}, + {name: "invalid source", input: "foobar", wantErr: true}, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got, err := NormalizeCapabilitySource(tc.input) + if tc.wantErr { + if err == nil { + t.Fatalf("expected error, got nil (value=%q)", got) + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tc.want { + t.Fatalf("expected %q, got %q", tc.want, got) + } + }) + } +} diff --git a/backend/internal/service/capability_registry/sync_worker.go b/backend/internal/service/capability_registry/sync_worker.go index 242f1353..bdd53f5f 100644 --- a/backend/internal/service/capability_registry/sync_worker.go +++ b/backend/internal/service/capability_registry/sync_worker.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/capability_registry" "github.com/ArtisanCloud/PowerX/pkg/event_bus" diff --git a/backend/internal/service/capability_registry/sync_worker_test.go b/backend/internal/service/capability_registry/sync_worker_test.go index f0846630..fc34f721 100644 --- a/backend/internal/service/capability_registry/sync_worker_test.go +++ b/backend/internal/service/capability_registry/sync_worker_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/ArtisanCloud/PowerX/internal/eventbus" + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/capability_registry" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/capability_registry" diff --git a/backend/internal/service/event_fabric/acl/acl_cache.go b/backend/internal/service/event_fabric/acl/acl_cache.go new file mode 100644 index 00000000..37a25f57 --- /dev/null +++ b/backend/internal/service/event_fabric/acl/acl_cache.go @@ -0,0 +1,229 @@ +package acl + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/ArtisanCloud/PowerX/pkg/cache" + "github.com/google/uuid" +) + +const ( + aclCachePrefix = "event:acl" + defaultACLLocalCacheTTL = 90 * time.Second + defaultACLRedisCacheTTL = 180 * time.Second +) + +// ACLResultCache 定义 ACL 查询结果缓存接口。 +type ACLResultCache interface { + Get(ctx context.Context, key string) (allowed bool, hit bool, err error) + Set(ctx context.Context, key string, allowed bool) error + Delete(ctx context.Context, key string) error +} + +// BuildACLResultCacheKey 返回统一 ACL 缓存 key:event:acl:{scope}:{topic_id}:{principal}:{action}。 +func BuildACLResultCacheKey(scope string, topicID uuid.UUID, principalID string, action string) string { + normalizedScope := strings.ToLower(strings.TrimSpace(scope)) + if normalizedScope == "" { + normalizedScope = "unknown" + } + normalizedPrincipal := strings.ToLower(strings.TrimSpace(principalID)) + if normalizedPrincipal == "" { + normalizedPrincipal = "anonymous" + } + normalizedAction := strings.ToLower(strings.TrimSpace(action)) + if normalizedAction == "" { + normalizedAction = "unknown" + } + return fmt.Sprintf("%s:%s:%s:%s:%s", aclCachePrefix, normalizedScope, topicID.String(), normalizedPrincipal, normalizedAction) +} + +type aclLocalCacheEntry struct { + allowed bool + expiresAt time.Time +} + +// LocalACLResultCache 为进程内 ACL 缓存实现。 +type LocalACLResultCache struct { + ttl time.Duration + clock func() time.Time + items sync.Map +} + +// NewLocalACLResultCache 创建本地 ACL 缓存。 +func NewLocalACLResultCache(ttl time.Duration, clock func() time.Time) *LocalACLResultCache { + if ttl <= 0 { + ttl = defaultACLLocalCacheTTL + } + if clock == nil { + clock = time.Now + } + return &LocalACLResultCache{ttl: ttl, clock: clock} +} + +func (c *LocalACLResultCache) Get(_ context.Context, key string) (bool, bool, error) { + if c == nil || strings.TrimSpace(key) == "" { + return false, false, nil + } + v, ok := c.items.Load(key) + if !ok { + return false, false, nil + } + entry, ok := v.(aclLocalCacheEntry) + if !ok { + c.items.Delete(key) + return false, false, nil + } + if c.clock().UTC().After(entry.expiresAt) { + c.items.Delete(key) + return false, false, nil + } + return entry.allowed, true, nil +} + +func (c *LocalACLResultCache) Set(_ context.Context, key string, allowed bool) error { + if c == nil || strings.TrimSpace(key) == "" { + return nil + } + c.items.Store(key, aclLocalCacheEntry{ + allowed: allowed, + expiresAt: c.clock().UTC().Add(c.ttl), + }) + return nil +} + +func (c *LocalACLResultCache) Delete(_ context.Context, key string) error { + if c == nil || strings.TrimSpace(key) == "" { + return nil + } + c.items.Delete(key) + return nil +} + +// RedisACLResultCache 为 Redis ACL 缓存实现。 +type RedisACLResultCache struct { + store cache.ICache + ttl time.Duration +} + +// NewRedisACLResultCache 创建 Redis ACL 缓存。 +func NewRedisACLResultCache(store cache.ICache, ttl time.Duration) *RedisACLResultCache { + if ttl <= 0 { + ttl = defaultACLRedisCacheTTL + } + return &RedisACLResultCache{store: store, ttl: ttl} +} + +func (c *RedisACLResultCache) Get(ctx context.Context, key string) (bool, bool, error) { + if c == nil || c.store == nil || strings.TrimSpace(key) == "" { + return false, false, nil + } + data, err := c.store.Get(ctx, key) + if err != nil { + return false, false, err + } + if len(data) == 0 { + return false, false, nil + } + if string(data) == "1" { + return true, true, nil + } + if string(data) == "0" { + return false, true, nil + } + return false, false, nil +} + +func (c *RedisACLResultCache) Set(ctx context.Context, key string, allowed bool) error { + if c == nil || c.store == nil || strings.TrimSpace(key) == "" { + return nil + } + value := "0" + if allowed { + value = "1" + } + return c.store.Set(ctx, key, value, c.ttl) +} + +func (c *RedisACLResultCache) Delete(ctx context.Context, key string) error { + if c == nil || c.store == nil || strings.TrimSpace(key) == "" { + return nil + } + return c.store.Delete(ctx, key) +} + +// LayeredACLResultCache 提供本地 + Redis 两级缓存。 +type LayeredACLResultCache struct { + local ACLResultCache + remote ACLResultCache +} + +// NewLayeredACLResultCache 创建两级 ACL 缓存。 +func NewLayeredACLResultCache(local ACLResultCache, remote ACLResultCache) *LayeredACLResultCache { + return &LayeredACLResultCache{local: local, remote: remote} +} + +func (c *LayeredACLResultCache) Get(ctx context.Context, key string) (bool, bool, error) { + if c == nil { + return false, false, nil + } + if c.local != nil { + if allowed, hit, err := c.local.Get(ctx, key); err != nil { + return false, false, err + } else if hit { + return allowed, true, nil + } + } + if c.remote == nil { + return false, false, nil + } + allowed, hit, err := c.remote.Get(ctx, key) + if err != nil { + return false, false, err + } + if hit && c.local != nil { + _ = c.local.Set(ctx, key, allowed) + } + return allowed, hit, nil +} + +func (c *LayeredACLResultCache) Set(ctx context.Context, key string, allowed bool) error { + if c == nil { + return nil + } + var errs []error + if c.local != nil { + if err := c.local.Set(ctx, key, allowed); err != nil { + errs = append(errs, err) + } + } + if c.remote != nil { + if err := c.remote.Set(ctx, key, allowed); err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} + +func (c *LayeredACLResultCache) Delete(ctx context.Context, key string) error { + if c == nil { + return nil + } + var errs []error + if c.local != nil { + if err := c.local.Delete(ctx, key); err != nil { + errs = append(errs, err) + } + } + if c.remote != nil { + if err := c.remote.Delete(ctx, key); err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} + diff --git a/backend/internal/service/event_fabric/acl/acl_cache_test.go b/backend/internal/service/event_fabric/acl/acl_cache_test.go new file mode 100644 index 00000000..9575fe3f --- /dev/null +++ b/backend/internal/service/event_fabric/acl/acl_cache_test.go @@ -0,0 +1,116 @@ +package acl + +import ( + "context" + "testing" + "time" + + model "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" + "github.com/google/uuid" +) + +func TestBuildACLResultCacheKey(t *testing.T) { + topicID := uuid.MustParse("8f4aa96b-5fb7-4e7a-bdd8-f87fdf8e26d1") + key := BuildACLResultCacheKey(" TENANT-COREX ", topicID, " Service.Replay ", " Subscribe ") + expected := "event:acl:tenant-corex:8f4aa96b-5fb7-4e7a-bdd8-f87fdf8e26d1:service.replay:subscribe" + if key != expected { + t.Fatalf("unexpected key: %s", key) + } +} + +func TestLayeredACLResultCache_LocalThenRemote(t *testing.T) { + ctx := context.Background() + local := NewLocalACLResultCache(10*time.Minute, time.Now) + remote := NewLocalACLResultCache(10*time.Minute, time.Now) + cache := NewLayeredACLResultCache(local, remote) + + key := "event:acl:tenant:topic:principal:publish" + if err := remote.Set(ctx, key, true); err != nil { + t.Fatalf("seed remote cache failed: %v", err) + } + + allowed, hit, err := cache.Get(ctx, key) + if err != nil { + t.Fatalf("get from layered cache failed: %v", err) + } + if !hit || !allowed { + t.Fatalf("expected remote hit with allowed=true, got hit=%v allowed=%v", hit, allowed) + } + + if err := remote.Delete(ctx, key); err != nil { + t.Fatalf("delete remote failed: %v", err) + } + + allowed, hit, err = cache.Get(ctx, key) + if err != nil { + t.Fatalf("get from local cache failed: %v", err) + } + if !hit || !allowed { + t.Fatalf("expected local hit after warmup, got hit=%v allowed=%v", hit, allowed) + } +} + +func TestACLServiceCan_UseCache(t *testing.T) { + store := newCountAclStore(true) + svc := NewACLService(Options{ + Store: store, + Cache: NewLayeredACLResultCache( + NewLocalACLResultCache(10*time.Minute, time.Now), + nil, + ), + Clock: time.Now, + }) + + ctx := context.Background() + tenant := "tenant-corex" + topic := uuid.MustParse("52937ef6-d650-42d2-9f41-4b6cf5f787b3") + principal := "core.worker" + + allowed, err := svc.Can(ctx, tenant, topic, principal, PrincipalActionPublish) + if err != nil { + t.Fatalf("first can failed: %v", err) + } + if !allowed { + t.Fatalf("expected allowed=true") + } + if store.calls != 1 { + t.Fatalf("expected first call hits store once, got %d", store.calls) + } + + allowed, err = svc.Can(ctx, tenant, topic, principal, PrincipalActionPublish) + if err != nil { + t.Fatalf("second can failed: %v", err) + } + if !allowed { + t.Fatalf("expected allowed=true on cached path") + } + if store.calls != 1 { + t.Fatalf("expected second call served from cache, store calls=%d", store.calls) + } +} + +type countAclStore struct { + allowed bool + calls int +} + +func newCountAclStore(allowed bool) *countAclStore { + return &countAclStore{allowed: allowed} +} + +func (s *countAclStore) UpsertBindings(_ context.Context, _ []*model.AclBinding) ([]*model.AclBinding, error) { + panic("unexpected call") +} + +func (s *countAclStore) RemoveBindings(_ context.Context, _ string, _ uuid.UUID, _ string, _ []string) (int64, error) { + panic("unexpected call") +} + +func (s *countAclStore) ListByTopic(_ context.Context, _ string, _ uuid.UUID) ([]*model.AclBinding, error) { + panic("unexpected call") +} + +func (s *countAclStore) HasPermission(_ context.Context, _ string, _ uuid.UUID, _ string, _ string, _ time.Time) (bool, error) { + s.calls++ + return s.allowed, nil +} diff --git a/backend/internal/service/event_fabric/acl/acl_service.go b/backend/internal/service/event_fabric/acl/acl_service.go index 2be8fae1..c3482a1e 100644 --- a/backend/internal/service/event_fabric/acl/acl_service.go +++ b/backend/internal/service/event_fabric/acl/acl_service.go @@ -77,12 +77,14 @@ type Options struct { DB *gorm.DB Store AclStore TopicStore TopicLookup + Cache ACLResultCache Clock func() time.Time } type ACLService struct { store AclStore topics TopicLookup + cache ACLResultCache clock func() time.Time } @@ -104,10 +106,15 @@ func NewACLService(opts Options) *ACLService { return &ACLService{ store: store, topics: topicStore, + cache: opts.Cache, clock: clock, } } +func (s *ACLService) cacheKey(tenantKey string, topic uuid.UUID, principalID string, action string) string { + return BuildACLResultCacheKey(tenantKey, topic, principalID, action) +} + func (s *ACLService) Grant(ctx context.Context, req GrantRequest) ([]*Binding, error) { if s.store == nil || s.topics == nil { return nil, fmt.Errorf("acl service not configured") @@ -127,7 +134,7 @@ func (s *ACLService) Grant(ctx context.Context, req GrantRequest) ([]*Binding, e if topic == nil { return nil, fmt.Errorf("topic %s not found", req.TopicUUID) } - if !strings.EqualFold(topic.TenantKey, tenantKey) { + if !strings.EqualFold(topic.TenantKey, tenantKey) && !isSharedTenantKey(topic.TenantKey) { return nil, fmt.Errorf("tenant mismatch with topic") } @@ -150,6 +157,19 @@ func (s *ACLService) Grant(ctx context.Context, req GrantRequest) ([]*Binding, e if actionStr == "" { return nil, fmt.Errorf("action cannot be empty") } + cacheKey := s.cacheKey(topic.TenantKey, topic.UUID, principalID, actionStr) + if s.cache != nil { + if allowed, hit, err := s.cache.Get(ctx, cacheKey); err == nil && hit && allowed { + continue + } + } + allowed, err := s.store.HasPermission(ctx, topic.TenantKey, topic.UUID, principalID, actionStr, now) + if err == nil && allowed { + if s.cache != nil { + _ = s.cache.Set(ctx, cacheKey, true) + } + continue + } modelBindings = append(modelBindings, &model.AclBinding{ TenantKey: topic.TenantKey, TopicUUID: topic.UUID, @@ -168,15 +188,24 @@ func (s *ACLService) Grant(ctx context.Context, req GrantRequest) ([]*Binding, e }) } + if len(modelBindings) == 0 { + return nil, nil + } + records, err := s.store.UpsertBindings(ctx, modelBindings) if err != nil { return nil, err } + for _, b := range records { + if s.cache != nil { + _ = s.cache.Set(ctx, s.cacheKey(b.TenantKey, b.TopicUUID, b.PrincipalID, b.Action), true) + } + } return convertBindings(records), nil } func (s *ACLService) Revoke(ctx context.Context, req RevokeRequest) error { - if s.store == nil { + if s.store == nil || s.topics == nil { return fmt.Errorf("acl service not configured") } tenantKey, err := resolveTenantKey(req.TenantUUID) @@ -191,19 +220,33 @@ func (s *ACLService) Revoke(ctx context.Context, req RevokeRequest) error { if principalID == "" { return fmt.Errorf("principal_id is required") } + topic, err := s.topics.FindByUUID(ctx, topicUUID) + if err != nil { + return err + } + if topic == nil { + return fmt.Errorf("topic %s not found", req.TopicUUID) + } + aclTenantKey := strings.TrimSpace(topic.TenantKey) + if !strings.EqualFold(aclTenantKey, tenantKey) && !isSharedTenantKey(aclTenantKey) { + return fmt.Errorf("tenant mismatch with topic") + } var actions []string for _, action := range req.Actions { if token := strings.ToLower(strings.TrimSpace(string(action))); token != "" { actions = append(actions, token) + if s.cache != nil { + _ = s.cache.Delete(ctx, s.cacheKey(aclTenantKey, topicUUID, principalID, token)) + } } } - _, err = s.store.RemoveBindings(ctx, tenantKey, topicUUID, principalID, actions) + _, err = s.store.RemoveBindings(ctx, aclTenantKey, topicUUID, principalID, actions) return err } func (s *ACLService) ListBindings(ctx context.Context, req ListRequest) ([]*Binding, error) { - if s.store == nil { + if s.store == nil || s.topics == nil { return nil, fmt.Errorf("acl service not configured") } tenantKey, err := resolveTenantKey(req.TenantUUID) @@ -214,7 +257,18 @@ func (s *ACLService) ListBindings(ctx context.Context, req ListRequest) ([]*Bind if err != nil { return nil, fmt.Errorf("invalid topic id: %w", err) } - rows, err := s.store.ListByTopic(ctx, tenantKey, topicUUID) + topic, err := s.topics.FindByUUID(ctx, topicUUID) + if err != nil { + return nil, err + } + if topic == nil { + return nil, fmt.Errorf("topic %s not found", req.TopicUUID) + } + aclTenantKey := strings.TrimSpace(topic.TenantKey) + if !strings.EqualFold(aclTenantKey, tenantKey) && !isSharedTenantKey(aclTenantKey) { + return nil, fmt.Errorf("tenant mismatch with topic") + } + rows, err := s.store.ListByTopic(ctx, aclTenantKey, topicUUID) if err != nil { return nil, err } @@ -225,7 +279,23 @@ func (s *ACLService) Can(ctx context.Context, tenantKey string, topicUUID uuid.U if s.store == nil { return false, fmt.Errorf("acl service not configured") } - return s.store.HasPermission(ctx, strings.TrimSpace(tenantKey), topicUUID, strings.TrimSpace(principalID), strings.ToLower(string(action)), s.clock()) + tenantKey = strings.TrimSpace(tenantKey) + principalID = strings.TrimSpace(principalID) + actionKey := strings.ToLower(string(action)) + cacheKey := s.cacheKey(tenantKey, topicUUID, principalID, actionKey) + if s.cache != nil { + if allowed, hit, err := s.cache.Get(ctx, cacheKey); err == nil && hit { + return allowed, nil + } + } + allowed, err := s.store.HasPermission(ctx, tenantKey, topicUUID, principalID, actionKey, s.clock()) + if err != nil { + return false, err + } + if s.cache != nil { + _ = s.cache.Set(ctx, cacheKey, allowed) + } + return allowed, nil } // HasPermission 满足其他服务对 ACL 查询的接口约束。 @@ -280,3 +350,8 @@ func resolveTenantKey(value string) (string, error) { } return "", fmt.Errorf("tenant_uuid is required") } + +func isSharedTenantKey(value string) bool { + key := strings.ToLower(strings.TrimSpace(value)) + return key == "global" || key == "system" +} diff --git a/backend/internal/service/event_fabric/authorization/challenge_service.go b/backend/internal/service/event_fabric/authorization/challenge_service.go index ff70abfd..1f436e32 100644 --- a/backend/internal/service/event_fabric/authorization/challenge_service.go +++ b/backend/internal/service/event_fabric/authorization/challenge_service.go @@ -169,101 +169,137 @@ func (s *serviceImpl) ProcessExpiredChallenges(ctx context.Context, tenantID uui var processed int for _, ticket := range tickets { - if ticket == nil || ticket.GrantID == nil || *ticket.GrantID == uuid.Nil { + ok, handleErr := s.processExpiredChallenge(ctx, ticket, before) + if handleErr != nil { + s.logger.WarnF(ctx, "[authorization] process challenge timeout failed ticket=%v err=%v", challengeTicketIDString(ticket), handleErr) continue } - - grant, err := s.repo.GetGrantByUUID(ctx, *ticket.GrantID) - if err != nil { - s.logger.WarnF(ctx, "[authorization] load grant for timeout failed ticket=%s err=%v", ticket.UUID, err) - continue - } - if grant == nil { - continue + if ok { + processed++ } + } - txRepo, tx, err := s.repo.BeginTx(ctx) - if err != nil { - s.logger.WarnF(ctx, "[authorization] begin tx for challenge timeout failed ticket=%s err=%v", ticket.UUID, err) - continue - } + return processed, nil +} - now := s.clock().UTC() - timeoutReason := "challenge sla expired" - ticket.Status = eventfabricmodel.ApprovalStatusExpired - ticket.DecisionReason = timeoutReason - ticket.DecisionAt = &now +func (s *serviceImpl) ProcessExpiredChallengeTicket(ctx context.Context, ticketID uuid.UUID, before time.Time) (bool, error) { + if err := s.ensureReady(); err != nil { + return false, err + } + if ticketID == uuid.Nil { + return false, ErrChallengeNotFound + } + if before.IsZero() { + before = s.clock().UTC() + } + ticket, err := s.repo.GetTicketByUUID(ctx, ticketID) + if err != nil { + return false, err + } + if ticket == nil { + return false, ErrChallengeNotFound + } + return s.processExpiredChallenge(ctx, ticket, before) +} - updatedTicket, err := txRepo.UpdateApprovalTicket(ctx, ticket) - if err != nil { - txRepo.RollbackTx(tx) - s.logger.WarnF(ctx, "[authorization] update ticket timeout failed ticket=%s err=%v", ticket.UUID, err) - continue - } +func (s *serviceImpl) processExpiredChallenge(ctx context.Context, ticket *eventfabricmodel.AuthorizationApprovalTicket, before time.Time) (bool, error) { + if ticket == nil || ticket.GrantID == nil || *ticket.GrantID == uuid.Nil { + return false, ErrChallengeNotFound + } + if !strings.EqualFold(strings.TrimSpace(ticket.Status), eventfabricmodel.ApprovalStatusPending) { + return false, ErrChallengeResolved + } + if !ticket.SLAExpiresAt.IsZero() && ticket.SLAExpiresAt.After(before) { + return false, nil + } - fields := map[string]any{ - "status": eventfabricmodel.GrantStatusRevoked, - "revoked_at": now, - "revoked_reason": timeoutReason, - "ttl_expires_at": now, - } - if err := txRepo.UpdateGrantFields(ctx, grant.UUID, fields); err != nil { - txRepo.RollbackTx(tx) - s.logger.WarnF(ctx, "[authorization] update grant timeout failed grant=%s err=%v", grant.UUID, err) - continue - } - if err := txRepo.IncrementGrantVersion(ctx, grant.UUID); err != nil { - txRepo.RollbackTx(tx) - s.logger.WarnF(ctx, "[authorization] bump version timeout failed grant=%s err=%v", grant.UUID, err) - continue - } + grant, err := s.repo.GetGrantByUUID(ctx, *ticket.GrantID) + if err != nil { + return false, err + } + if grant == nil { + return false, nil + } - if err := txRepo.CommitTx(tx); err != nil { - s.logger.WarnF(ctx, "[authorization] commit timeout failed grant=%s err=%v", grant.UUID, err) - continue - } + txRepo, tx, err := s.repo.BeginTx(ctx) + if err != nil { + return false, err + } - processed++ + now := s.clock().UTC() + timeoutReason := "challenge sla expired" + ticket.Status = eventfabricmodel.ApprovalStatusExpired + ticket.DecisionReason = timeoutReason + ticket.DecisionAt = &now - refreshedGrant, err := s.repo.GetGrantByUUID(ctx, grant.UUID) - if err != nil { - s.logger.WarnF(ctx, "[authorization] reload grant timeout failed grant=%s err=%v", grant.UUID, err) - continue - } - if refreshedGrant != nil { - if err := s.InvalidateGrantCache(ctx, buildGrantCacheKey(refreshedGrant)); err != nil { - s.logger.WarnF(ctx, "[authorization] invalidate cache timeout failed grant=%s err=%v", refreshedGrant.UUID, err) - } - s.emitAudit(ctx, "challenge.expired", refreshedGrant, nil, map[string]string{ - "ticket_id": ticket.UUID.String(), - }) - s.emitAudit(ctx, "grant.revoked", refreshedGrant, nil, map[string]string{ - "ticket_id": ticket.UUID.String(), - "reason": timeoutReason, - }) - s.emitEvaluationAlert(ctx, EvaluateRequest{ - TenantID: uuid.Nil, - SubjectType: refreshedGrant.SubjectType, - SubjectID: refreshedGrant.SubjectID, - }, &GrantSnapshot{ - GrantID: refreshedGrant.UUID, - TenantUUID: tenantUUIDFromGrant(refreshedGrant), - SubjectType: refreshedGrant.SubjectType, - SubjectID: refreshedGrant.SubjectID, - Status: refreshedGrant.Status, - }, "authorization.challenge_timeout", "high", timeoutReason, map[string]string{ - "ticket_id": ticket.UUID.String(), - }) + updatedTicket, err := txRepo.UpdateApprovalTicket(ctx, ticket) + if err != nil { + txRepo.RollbackTx(tx) + return false, err + } + + fields := map[string]any{ + "status": eventfabricmodel.GrantStatusRevoked, + "revoked_at": now, + "revoked_reason": timeoutReason, + "ttl_expires_at": now, + } + if err := txRepo.UpdateGrantFields(ctx, grant.UUID, fields); err != nil { + txRepo.RollbackTx(tx) + return false, err + } + if err := txRepo.IncrementGrantVersion(ctx, grant.UUID); err != nil { + txRepo.RollbackTx(tx) + return false, err + } + + if err := txRepo.CommitTx(tx); err != nil { + return false, err + } + + refreshedGrant, err := s.repo.GetGrantByUUID(ctx, grant.UUID) + if err != nil { + s.logger.WarnF(ctx, "[authorization] reload grant timeout failed grant=%s err=%v", grant.UUID, err) + } else if refreshedGrant != nil { + if err := s.InvalidateGrantCache(ctx, buildGrantCacheKey(refreshedGrant)); err != nil { + s.logger.WarnF(ctx, "[authorization] invalidate cache timeout failed grant=%s err=%v", refreshedGrant.UUID, err) } + s.emitAudit(ctx, "challenge.expired", refreshedGrant, nil, map[string]string{ + "ticket_id": ticket.UUID.String(), + }) + s.emitAudit(ctx, "grant.revoked", refreshedGrant, nil, map[string]string{ + "ticket_id": ticket.UUID.String(), + "reason": timeoutReason, + }) + s.emitEvaluationAlert(ctx, EvaluateRequest{ + TenantID: uuid.Nil, + SubjectType: refreshedGrant.SubjectType, + SubjectID: refreshedGrant.SubjectID, + }, &GrantSnapshot{ + GrantID: refreshedGrant.UUID, + TenantUUID: tenantUUIDFromGrant(refreshedGrant), + SubjectType: refreshedGrant.SubjectType, + SubjectID: refreshedGrant.SubjectID, + Status: refreshedGrant.Status, + }, "authorization.challenge_timeout", "high", timeoutReason, map[string]string{ + "ticket_id": ticket.UUID.String(), + }) + } - if s.dispatcher != nil && updatedTicket != nil { - if err := s.dispatcher.NotifyTimeout(ctx, updatedTicket); err != nil { - s.logger.WarnF(ctx, "[authorization] notify timeout failed ticket=%s err=%v", ticket.UUID, err) - } + if s.dispatcher != nil && updatedTicket != nil { + if err := s.dispatcher.NotifyTimeout(ctx, updatedTicket); err != nil { + s.logger.WarnF(ctx, "[authorization] notify timeout failed ticket=%s err=%v", ticket.UUID, err) } } - return processed, nil + return true, nil +} + +func challengeTicketIDString(ticket *eventfabricmodel.AuthorizationApprovalTicket) string { + if ticket == nil { + return "" + } + return ticket.UUID.String() } func (s *serviceImpl) resolvePendingChallenge(ctx context.Context, repo *eventfabricrepo.AuthorizationRepository, grantID uuid.UUID, actorID *uuid.UUID, reject bool) error { diff --git a/backend/internal/service/event_fabric/authorization/service.go b/backend/internal/service/event_fabric/authorization/service.go index b7e94f77..b3c1170b 100644 --- a/backend/internal/service/event_fabric/authorization/service.go +++ b/backend/internal/service/event_fabric/authorization/service.go @@ -53,6 +53,7 @@ type Service interface { DecideChallenge(ctx context.Context, ticketID uuid.UUID, decision ChallengeDecisionInput) (*ChallengeDecisionResult, error) ProcessExpiredChallenges(ctx context.Context, tenantID uuid.UUID, before time.Time) (int, error) + ProcessExpiredChallengeTicket(ctx context.Context, ticketID uuid.UUID, before time.Time) (bool, error) InvalidateGrantCache(ctx context.Context, key GrantCacheKey) error ListenCacheInvalidation(ctx context.Context) error diff --git a/backend/internal/service/event_fabric/delivery/driver_contract.go b/backend/internal/service/event_fabric/delivery/driver_contract.go new file mode 100644 index 00000000..8ff7e33a --- /dev/null +++ b/backend/internal/service/event_fabric/delivery/driver_contract.go @@ -0,0 +1,114 @@ +package delivery + +import ( + "context" + "fmt" + "strings" + + eventbus "github.com/ArtisanCloud/PowerX/pkg/event_bus" +) + +// DriverFallbackPolicy 描述主驱动与降级驱动的路由策略。 +type DriverFallbackPolicy struct { + Primary eventbus.QueueDriverType + Fallback []eventbus.QueueDriverType +} + +// DriverSelection 用于暴露当前可用驱动与降级决策结果。 +type DriverSelection struct { + Primary eventbus.QueueDriverType + FallbackCandidates []eventbus.QueueDriverType + Available map[eventbus.QueueDriverType]eventbus.QueueDriverCapability +} + +// Normalize 将策略标准化,并去除重复/空值。 +func (p DriverFallbackPolicy) Normalize() DriverFallbackPolicy { + primary := normalizeDriverType(p.Primary) + fallback := make([]eventbus.QueueDriverType, 0, len(p.Fallback)) + seen := map[eventbus.QueueDriverType]struct{}{} + for _, item := range p.Fallback { + driver := normalizeDriverType(item) + if driver == "" || driver == primary { + continue + } + if _, exists := seen[driver]; exists { + continue + } + seen[driver] = struct{}{} + fallback = append(fallback, driver) + } + return DriverFallbackPolicy{Primary: primary, Fallback: fallback} +} + +// ResolveDriverSelection 根据可用驱动解析当前主/备方案。 +func ResolveDriverSelection(policy DriverFallbackPolicy, drivers map[eventbus.QueueDriverType]eventbus.TaskDriver) (DriverSelection, error) { + normalized := policy.Normalize() + available := make(map[eventbus.QueueDriverType]eventbus.QueueDriverCapability) + for driverType, driver := range drivers { + if driver == nil { + continue + } + normalizedType := normalizeDriverType(driverType) + if normalizedType == "" { + continue + } + available[normalizedType] = driver.Capability() + } + + if normalized.Primary == "" { + return DriverSelection{}, fmt.Errorf("primary driver is required") + } + if _, ok := available[normalized.Primary]; !ok { + return DriverSelection{}, fmt.Errorf("primary driver %s is unavailable", normalized.Primary) + } + + fallback := make([]eventbus.QueueDriverType, 0, len(normalized.Fallback)) + for _, candidate := range normalized.Fallback { + if _, ok := available[candidate]; ok { + fallback = append(fallback, candidate) + } + } + + return DriverSelection{ + Primary: normalized.Primary, + FallbackCandidates: fallback, + Available: available, + }, nil +} + +// TryOnDriver 优先主驱动执行,失败时按 fallback 顺序尝试。 +func TryOnDriver(ctx context.Context, selection DriverSelection, drivers map[eventbus.QueueDriverType]eventbus.TaskDriver, fn func(context.Context, eventbus.TaskDriver) error) error { + run := func(driverType eventbus.QueueDriverType) error { + driver := drivers[driverType] + if driver == nil { + return fmt.Errorf("driver %s is unavailable", driverType) + } + return fn(ctx, driver) + } + + primaryErr := run(selection.Primary) + if primaryErr == nil { + return nil + } + + var lastErr error + for _, candidate := range selection.FallbackCandidates { + if err := run(candidate); err == nil { + return nil + } else { + lastErr = err + } + } + if lastErr != nil { + return lastErr + } + return fmt.Errorf("driver %s execution failed without fallback: %w", selection.Primary, primaryErr) +} + +func normalizeDriverType(driver eventbus.QueueDriverType) eventbus.QueueDriverType { + value := strings.ToLower(strings.TrimSpace(string(driver))) + if value == "" { + return "" + } + return eventbus.QueueDriverType(value) +} diff --git a/backend/internal/service/event_fabric/delivery/driver_contract_test.go b/backend/internal/service/event_fabric/delivery/driver_contract_test.go new file mode 100644 index 00000000..f00bc5c9 --- /dev/null +++ b/backend/internal/service/event_fabric/delivery/driver_contract_test.go @@ -0,0 +1,124 @@ +package delivery + +import ( + "context" + "errors" + "testing" + + eventbus "github.com/ArtisanCloud/PowerX/pkg/event_bus" + "github.com/stretchr/testify/require" +) + +func TestDriverFallbackPolicyNormalize(t *testing.T) { + t.Parallel() + + policy := DriverFallbackPolicy{ + Primary: eventbus.QueueDriverType(" Redis "), + Fallback: []eventbus.QueueDriverType{"database", "Redis", "database", " ", "nats"}, + } + + normalized := policy.Normalize() + require.Equal(t, eventbus.QueueDriverRedis, normalized.Primary) + require.Equal(t, []eventbus.QueueDriverType{eventbus.QueueDriverDatabase, eventbus.QueueDriverNATS}, normalized.Fallback) +} + +func TestResolveDriverSelection(t *testing.T) { + t.Parallel() + + drivers := map[eventbus.QueueDriverType]eventbus.TaskDriver{ + eventbus.QueueDriverRedis: stubTaskDriver{driverType: eventbus.QueueDriverRedis}, + eventbus.QueueDriverDatabase: stubTaskDriver{driverType: eventbus.QueueDriverDatabase}, + } + + selection, err := ResolveDriverSelection(DriverFallbackPolicy{ + Primary: eventbus.QueueDriverRedis, + Fallback: []eventbus.QueueDriverType{eventbus.QueueDriverKafka, eventbus.QueueDriverDatabase}, + }, drivers) + require.NoError(t, err) + require.Equal(t, eventbus.QueueDriverRedis, selection.Primary) + require.Equal(t, []eventbus.QueueDriverType{eventbus.QueueDriverDatabase}, selection.FallbackCandidates) + require.Contains(t, selection.Available, eventbus.QueueDriverRedis) +} + +func TestResolveDriverSelectionPrimaryUnavailable(t *testing.T) { + t.Parallel() + + _, err := ResolveDriverSelection(DriverFallbackPolicy{ + Primary: eventbus.QueueDriverRedis, + }, map[eventbus.QueueDriverType]eventbus.TaskDriver{ + eventbus.QueueDriverDatabase: stubTaskDriver{driverType: eventbus.QueueDriverDatabase}, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "primary driver redis is unavailable") +} + +func TestTryOnDriverFallback(t *testing.T) { + t.Parallel() + + var called []eventbus.QueueDriverType + drivers := map[eventbus.QueueDriverType]eventbus.TaskDriver{ + eventbus.QueueDriverRedis: stubTaskDriver{driverType: eventbus.QueueDriverRedis}, + eventbus.QueueDriverDatabase: stubTaskDriver{driverType: eventbus.QueueDriverDatabase}, + } + selection := DriverSelection{ + Primary: eventbus.QueueDriverRedis, + FallbackCandidates: []eventbus.QueueDriverType{eventbus.QueueDriverDatabase}, + } + + err := TryOnDriver(context.Background(), selection, drivers, func(_ context.Context, driver eventbus.TaskDriver) error { + called = append(called, driver.Type()) + if driver.Type() == eventbus.QueueDriverRedis { + return errors.New("redis unavailable") + } + return nil + }) + require.NoError(t, err) + require.Equal(t, []eventbus.QueueDriverType{eventbus.QueueDriverRedis, eventbus.QueueDriverDatabase}, called) +} + +func TestTryOnDriverWithoutFallback(t *testing.T) { + t.Parallel() + + drivers := map[eventbus.QueueDriverType]eventbus.TaskDriver{ + eventbus.QueueDriverRedis: stubTaskDriver{driverType: eventbus.QueueDriverRedis}, + } + selection := DriverSelection{Primary: eventbus.QueueDriverRedis} + err := TryOnDriver(context.Background(), selection, drivers, func(_ context.Context, _ eventbus.TaskDriver) error { + return errors.New("primary down") + }) + require.Error(t, err) + require.Contains(t, err.Error(), "driver redis execution failed without fallback") +} + +type stubTaskDriver struct { + driverType eventbus.QueueDriverType +} + +func (s stubTaskDriver) Type() eventbus.QueueDriverType { + return s.driverType +} + +func (s stubTaskDriver) Capability() eventbus.QueueDriverCapability { + return eventbus.QueueDriverCapability{} +} + +func (s stubTaskDriver) Enqueue(context.Context, eventbus.TaskMessage) error { + return nil +} + +func (s stubTaskDriver) Dequeue(context.Context, eventbus.DequeueRequest) ([]eventbus.TaskMessage, error) { + return nil, nil +} + +func (s stubTaskDriver) Ack(context.Context, eventbus.AckRequest) error { + return nil +} + +func (s stubTaskDriver) Nack(context.Context, eventbus.NackRequest) error { + return nil +} + +func (s stubTaskDriver) Retry(context.Context, eventbus.RetryRequest) error { + return nil +} + diff --git a/backend/internal/service/event_fabric/delivery/service.go b/backend/internal/service/event_fabric/delivery/service.go index 86a2049a..b9b8ffff 100644 --- a/backend/internal/service/event_fabric/delivery/service.go +++ b/backend/internal/service/event_fabric/delivery/service.go @@ -104,33 +104,35 @@ type aclStore interface { // Options 汇总 Service 所需依赖。 type Options struct { - DB *gorm.DB - Envelopes envelopeStore - Deliveries deliveryStore - DLQ dlqStore - Topics topicStore - ACL aclStore - Audit eventaudit.Service - Scheduler *BackoffScheduler - Clock func() time.Time - MaxRetry int - Negotiator VersionNegotiator - Metrics eventmetrics.Recorder + DB *gorm.DB + Envelopes envelopeStore + Deliveries deliveryStore + DLQ dlqStore + Topics topicStore + ACL aclStore + Audit eventaudit.Service + Scheduler *BackoffScheduler + Clock func() time.Time + MaxRetry int + Negotiator VersionNegotiator + Metrics eventmetrics.Recorder + EnableDatabaseFallbackLookup bool } type serviceImpl struct { - db *gorm.DB - envelopes envelopeStore - deliveries deliveryStore - dlq dlqStore - topics topicStore - acl aclStore - scheduler *BackoffScheduler - clock func() time.Time - maxRetry int - audit eventaudit.Service - negotiator VersionNegotiator - metrics eventmetrics.Recorder + db *gorm.DB + envelopes envelopeStore + deliveries deliveryStore + dlq dlqStore + topics topicStore + acl aclStore + scheduler *BackoffScheduler + clock func() time.Time + maxRetry int + audit eventaudit.Service + negotiator VersionNegotiator + metrics eventmetrics.Recorder + enableDatabaseFallbackLookup bool } // NewService 构建事件投递服务。 @@ -196,18 +198,19 @@ func NewService(opts Options) (Service, error) { } return &serviceImpl{ - db: opts.DB, - envelopes: envStore, - deliveries: deliverStore, - dlq: dlqStore, - topics: topics, - acl: aclRepo, - scheduler: opts.Scheduler, - clock: clock, - maxRetry: maxRetry, - audit: opts.Audit, - negotiator: negotiator, - metrics: metrics, + db: opts.DB, + envelopes: envStore, + deliveries: deliverStore, + dlq: dlqStore, + topics: topics, + acl: aclRepo, + scheduler: opts.Scheduler, + clock: clock, + maxRetry: maxRetry, + audit: opts.Audit, + negotiator: negotiator, + metrics: metrics, + enableDatabaseFallbackLookup: opts.EnableDatabaseFallbackLookup, }, nil } @@ -272,7 +275,7 @@ func (s *serviceImpl) Publish(ctx context.Context, req PublishRequest) (err erro return err } - if topicTenant != "" && !strings.EqualFold(topicTenant, tenantKey) { + if topicTenant != "" && !strings.EqualFold(topicTenant, tenantKey) && !isSharedTopicTenant(topicTenant) { err = shared.ErrTenantMismatch return err } @@ -287,14 +290,18 @@ func (s *serviceImpl) Publish(ctx context.Context, req PublishRequest) (err erro return err } - if !strings.EqualFold(topic.TenantKey, tenantKey) { + aclTenantKey := topic.TenantKey + if aclTenantKey == "" { + aclTenantKey = tenantKey + } + if !strings.EqualFold(topic.TenantKey, tenantKey) && !isSharedTopicTenant(topic.TenantKey) { err = shared.ErrTenantMismatch return err } auditTopic = topic.FullTopic if s.acl != nil && principal != "" { - allowed, permErr := s.acl.HasPermission(ctx, tenantKey, topic.UUID, principal, string(aclPublish), s.clock().UTC()) + allowed, permErr := s.acl.HasPermission(ctx, aclTenantKey, topic.UUID, principal, string(aclPublish), s.clock().UTC()) if permErr != nil { err = permErr return err @@ -354,7 +361,7 @@ func (s *serviceImpl) Publish(ctx context.Context, req PublishRequest) (err erro return nil } - subscribers, err := s.collectSubscribers(ctx, tenantKey, topic.UUID) + subscribers, err := s.collectSubscribers(ctx, aclTenantKey, topic.UUID) if err != nil { return err } @@ -752,6 +759,9 @@ func (s *serviceImpl) PollRetry(ctx context.Context, limit int) (map[string][]De } } if attempt == nil { + if !s.enableDatabaseFallbackLookup { + continue + } // fallback by envelope/subscriber envelopeUUID, err := uuid.Parse(item.EnvelopeUUID) if err != nil { @@ -1004,14 +1014,27 @@ func resolveTenantKey(value string) (string, error) { } func parseTopicName(topic string) (tenant, namespace, name string, err error) { - parts := strings.Split(topic, ".") - if len(parts) < 3 { + parts := strings.Split(strings.TrimSpace(topic), ".") + if len(parts) < 2 { return "", "", "", fmt.Errorf("invalid topic format: %s", topic) } - tenant = parts[0] - namespace = strings.Join(parts[1:len(parts)-1], ".") + + first := strings.TrimSpace(parts[0]) + if parsed, parseErr := uuid.Parse(first); parseErr == nil && parsed != uuid.Nil && len(parts) >= 3 { + tenant = first + namespace = strings.Join(parts[1:len(parts)-1], ".") + name = parts[len(parts)-1] + return tenant, namespace, name, nil + } + + namespace = strings.Join(parts[:len(parts)-1], ".") name = parts[len(parts)-1] - return tenant, namespace, name, nil + return "", namespace, name, nil +} + +func isSharedTopicTenant(tenantKey string) bool { + key := strings.ToLower(strings.TrimSpace(tenantKey)) + return key == "global" || key == "system" } func defaultVersion(version string) string { diff --git a/backend/internal/service/event_fabric/directory/directory_service.go b/backend/internal/service/event_fabric/directory/directory_service.go index 2322bf5a..32053528 100644 --- a/backend/internal/service/event_fabric/directory/directory_service.go +++ b/backend/internal/service/event_fabric/directory/directory_service.go @@ -8,6 +8,7 @@ import ( "strings" "time" + aclservice "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/acl" model "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" eventfabricrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/event_fabric" "github.com/ArtisanCloud/PowerX/pkg/event_bus" @@ -20,6 +21,8 @@ const lifecycleChangedEvent = "event_fabric.topic.lifecycle.changed" // Topic DTO 用于对外返回主题信息。 type Topic struct { ID string `json:"id"` + ScopeType string `json:"scope_type"` + ScopeID string `json:"scope_id"` TenantUUID string `json:"tenant_uuid"` TenantKey string `json:"tenant_key"` Namespace string `json:"namespace"` @@ -61,6 +64,7 @@ type UpdateLifecycleInput struct { // DirectoryService 提供主题目录能力。 type DirectoryService struct { store TopicStore + acl ACLGranter eventBus event_bus.EventBus clock Clock actorResolver ActorResolver @@ -77,6 +81,7 @@ func NewDirectoryService(opts Options) *DirectoryService { svc := &DirectoryService{ store: store, + acl: opts.ACL, eventBus: opts.EventBus, clock: opts.Clock, actorResolver: opts.ActorResolver, @@ -166,6 +171,8 @@ func (s *DirectoryService) CreateTopic(ctx context.Context, input CreateTopicInp } record := &model.TopicDefinition{ + ScopeType: model.TopicScopeTenant, + ScopeID: tenantKey, TenantKey: tenantKey, Namespace: namespace, Name: name, @@ -184,6 +191,9 @@ func (s *DirectoryService) CreateTopic(ctx context.Context, input CreateTopicInp if err != nil { return nil, err } + if err := s.grantTopicDefaultACL(ctx, result, createdBy); err != nil { + return nil, err + } return convertTopic(result), nil } @@ -258,12 +268,37 @@ func (s *DirectoryService) ListTopics(ctx context.Context, query eventfabricrepo return topics, total, nil } +func (s *DirectoryService) grantTopicDefaultACL(ctx context.Context, topic *model.TopicDefinition, operator string) error { + if s.acl == nil || topic == nil { + return nil + } + _, err := s.acl.Grant(ctx, aclservice.GrantRequest{ + TenantUUID: topic.TenantKey, + TopicUUID: topic.UUID.String(), + PrincipalType: "role", + PrincipalID: "role:role_admin", + Actions: []aclservice.PrincipalAction{ + aclservice.PrincipalActionPublish, + aclservice.PrincipalActionSubscribe, + aclservice.PrincipalActionReplay, + }, + Justification: "topic default admin access", + OperatorID: strings.TrimSpace(operator), + }) + if err != nil { + return fmt.Errorf("grant default acl failed: %w", err) + } + return nil +} + func (s *DirectoryService) publishLifecycleEvent(ctx context.Context, topic *model.TopicDefinition, reason string) { if s.eventBus == nil || topic == nil { return } payload := map[string]interface{}{ "topic_id": topic.UUID.String(), + "scope_type": topic.ScopeType, + "scope_id": topic.ScopeID, "tenant_key": topic.TenantKey, "tenant_uuid": strings.TrimSpace(topic.TenantKey), "namespace": topic.Namespace, @@ -286,6 +321,14 @@ func convertTopic(record *model.TopicDefinition) *Topic { if tenantDisplay == "" { tenantDisplay = "global" } + scopeType := strings.TrimSpace(string(record.ScopeType)) + if scopeType == "" { + scopeType = string(model.TopicScopeTenant) + } + scopeID := strings.TrimSpace(record.ScopeID) + if scopeID == "" { + scopeID = tenantDisplay + } retention := string(record.RetentionPolicy) if strings.TrimSpace(retention) == "" { @@ -294,6 +337,8 @@ func convertTopic(record *model.TopicDefinition) *Topic { return &Topic{ ID: record.UUID.String(), + ScopeType: scopeType, + ScopeID: scopeID, TenantUUID: tenantDisplay, TenantKey: record.TenantKey, Namespace: record.Namespace, diff --git a/backend/internal/service/event_fabric/directory/options.go b/backend/internal/service/event_fabric/directory/options.go index c0e01da1..de7706cd 100644 --- a/backend/internal/service/event_fabric/directory/options.go +++ b/backend/internal/service/event_fabric/directory/options.go @@ -4,6 +4,7 @@ import ( "context" "time" + aclservice "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/acl" model "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" eventfabricrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/event_fabric" "github.com/ArtisanCloud/PowerX/pkg/event_bus" @@ -26,10 +27,16 @@ type TopicStore interface { List(ctx context.Context, query eventfabricrepo.QueryContext) ([]*model.TopicDefinition, int64, error) } +// ACLGranter 定义创建 topic 后默认授权能力。 +type ACLGranter interface { + Grant(ctx context.Context, req aclservice.GrantRequest) ([]*aclservice.Binding, error) +} + // Options 构造目录服务的配置。 type Options struct { DB *gorm.DB Store TopicStore + ACL ACLGranter EventBus event_bus.EventBus Clock Clock ActorResolver ActorResolver diff --git a/backend/internal/service/event_fabric/directory/validator.go b/backend/internal/service/event_fabric/directory/validator.go index 8e375aaa..4b75e8cf 100644 --- a/backend/internal/service/event_fabric/directory/validator.go +++ b/backend/internal/service/event_fabric/directory/validator.go @@ -7,7 +7,7 @@ import ( ) var ( - namespacePattern = regexp.MustCompile(`^[a-z][a-z0-9]*(\.[a-z][a-z0-9]*)*$`) + namespacePattern = regexp.MustCompile(`^(_topic|[a-z][a-z0-9]*)(\.[a-z][a-z0-9]*)*$`) namePattern = regexp.MustCompile(`^[a-z][a-z0-9-_]*$`) ) diff --git a/backend/internal/service/event_fabric/metrics/metrics.go b/backend/internal/service/event_fabric/metrics/metrics.go index 612dacb8..b038a272 100644 --- a/backend/internal/service/event_fabric/metrics/metrics.go +++ b/backend/internal/service/event_fabric/metrics/metrics.go @@ -18,6 +18,7 @@ type Recorder interface { ObserveDLQChange(ctx context.Context, delta int64) ObserveReplay(ctx context.Context, duration time.Duration, err error) ObserveAuthorizationEvaluation(ctx context.Context, decision string, cacheHit bool, latency time.Duration) + ObserveTaskDriverInit(ctx context.Context, driver string, supportsBlocking bool) Snapshot() Snapshot } @@ -48,6 +49,10 @@ type Snapshot struct { AuthorizationCacheHits uint64 AuthorizationCacheHitRate float64 AvgAuthorizationLatency time.Duration + + TaskDriverInitTotal uint64 + TaskDriverBlockingTotal uint64 + LastTaskDriver string } // NewRecorder 构建指标记录器。 @@ -91,6 +96,10 @@ type RecorderImpl struct { authorizationChallenges atomic.Uint64 authorizationCacheHits atomic.Uint64 authorizationLatencyNS atomic.Int64 + + taskDriverInitTotal atomic.Uint64 + taskDriverBlockingTotal atomic.Uint64 + lastTaskDriver atomic.Value } // ObserveDelivery 记录单次投递结果与耗时。 @@ -185,6 +194,16 @@ func (r *RecorderImpl) ObserveAuthorizationEvaluation(ctx context.Context, decis } } +// ObserveTaskDriverInit 记录任务驱动初始化信息。 +func (r *RecorderImpl) ObserveTaskDriverInit(ctx context.Context, driver string, supportsBlocking bool) { + r.taskDriverInitTotal.Add(1) + if supportsBlocking { + r.taskDriverBlockingTotal.Add(1) + } + r.lastTaskDriver.Store(strings.TrimSpace(driver)) + r.logger.InfoF(ctx, "[event_fabric.metrics] task driver initialized driver=%s blocking=%t", driver, supportsBlocking) +} + // Snapshot 返回指标快照。 func (r *RecorderImpl) Snapshot() Snapshot { total := r.totalDeliveries.Load() @@ -198,6 +217,8 @@ func (r *RecorderImpl) Snapshot() Snapshot { authBlocks := r.authorizationBlocks.Load() authChallenges := r.authorizationChallenges.Load() authHits := r.authorizationCacheHits.Load() + taskInit := r.taskDriverInitTotal.Load() + taskBlocking := r.taskDriverBlockingTotal.Load() var avgDelivery time.Duration if total > 0 { @@ -226,6 +247,7 @@ func (r *RecorderImpl) Snapshot() Snapshot { lastLatency := time.Duration(r.lastReplayLatency.Load()) lastErr, _ := r.lastReplayErr.Load().(string) + lastTaskDriver, _ := r.lastTaskDriver.Load().(string) return Snapshot{ DeliveriesTotal: total, @@ -249,6 +271,9 @@ func (r *RecorderImpl) Snapshot() Snapshot { AuthorizationCacheHits: authHits, AuthorizationCacheHitRate: authHitRate, AvgAuthorizationLatency: avgAuthLatency, + TaskDriverInitTotal: taskInit, + TaskDriverBlockingTotal: taskBlocking, + LastTaskDriver: lastTaskDriver, } } @@ -283,6 +308,7 @@ func (noopRecorder) ObserveRetry(context.Context, time.Duration) func (noopRecorder) ObserveDLQChange(context.Context, int64) {} func (noopRecorder) ObserveReplay(context.Context, time.Duration, error) {} func (noopRecorder) ObserveAuthorizationEvaluation(context.Context, string, bool, time.Duration) {} +func (noopRecorder) ObserveTaskDriverInit(context.Context, string, bool) {} func (noopRecorder) Snapshot() Snapshot { return Snapshot{} } // EncodeSnapshot 将快照转换为十六进制字符串,便于写入日志或指标系统。 diff --git a/backend/internal/service/event_fabric/replay/service.go b/backend/internal/service/event_fabric/replay/service.go index 47023fd2..114cda2c 100644 --- a/backend/internal/service/event_fabric/replay/service.go +++ b/backend/internal/service/event_fabric/replay/service.go @@ -7,6 +7,7 @@ import ( "strings" "time" + eventdomain "github.com/ArtisanCloud/PowerX/internal/event_bus" "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/delivery" eventmetrics "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/metrics" "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/shared" @@ -34,15 +35,22 @@ type EnvelopeFinder interface { ListForReplay(ctx context.Context, tenantKey string, topic uuid.UUID, filter eventfabricrepo.ReplayQuery) ([]*eventfabricmodel.EventEnvelope, error) } +type aclStore interface { + HasPermission(ctx context.Context, tenantKey string, topic uuid.UUID, principalID string, action string, now time.Time) (bool, error) +} + // Options 构建 Service 所需依赖。 type Options struct { DB *gorm.DB Repo Repository Envelopes EnvelopeFinder Topics TopicLookup + ACL aclStore Delivery delivery.Service + History *eventfabricrepo.TaskHistoryRepository Clock func() time.Time Metrics eventmetrics.Recorder + Emitter StatusEmitter } // Service 提供事件回放能力。 @@ -50,9 +58,12 @@ type Service struct { repo Repository envelopes EnvelopeFinder topics TopicLookup + acl aclStore delivery delivery.Service + history *eventfabricrepo.TaskHistoryRepository clock func() time.Time metrics eventmetrics.Recorder + emitter StatusEmitter } // CreateTaskInput 创建回放任务输入。 @@ -100,24 +111,39 @@ func NewService(opts Options) *Service { if topics == nil && opts.DB != nil { topics = eventfabricrepo.NewTopicRepository(opts.DB) } + aclRepo := opts.ACL + if aclRepo == nil && opts.DB != nil { + aclRepo = eventfabricrepo.NewAclRepository(opts.DB) + } return &Service{ repo: repo, envelopes: envelopes, topics: topics, + acl: aclRepo, delivery: opts.Delivery, - clock: clock, + history: func() *eventfabricrepo.TaskHistoryRepository { + if opts.History != nil { + return opts.History + } + if opts.DB != nil { + return eventfabricrepo.NewTaskHistoryRepository(opts.DB) + } + return nil + }(), + clock: clock, metrics: func() eventmetrics.Recorder { if opts.Metrics != nil { return opts.Metrics } return eventmetrics.NewNoop() }(), + emitter: opts.Emitter, } } // CreateTask 创建并执行回放任务。 func (s *Service) CreateTask(ctx context.Context, input CreateTaskInput) (*Task, error) { - if s.repo == nil || s.envelopes == nil || s.delivery == nil || s.topics == nil { + if s.repo == nil || s.envelopes == nil || s.delivery == nil || s.topics == nil || s.acl == nil { return nil, fmt.Errorf("replay service not configured") } tenantKey := strings.TrimSpace(input.TenantKey) @@ -128,7 +154,7 @@ func (s *Service) CreateTask(ctx context.Context, input CreateTaskInput) (*Task, if err != nil { return nil, err } - if topicTenant != "" && !strings.EqualFold(topicTenant, tenantKey) { + if topicTenant != "" && !strings.EqualFold(topicTenant, tenantKey) && !isSharedTopicTenant(topicTenant) { return nil, fmt.Errorf("topic tenant mismatch: %s", input.Topic) } topicDef, err := s.topics.FindByComposite(ctx, tenantKey, namespace, name) @@ -141,6 +167,17 @@ func (s *Service) CreateTask(ctx context.Context, input CreateTaskInput) (*Task, if !input.WindowEnd.IsZero() && input.WindowStart.After(input.WindowEnd) { return nil, fmt.Errorf("time_range_start must be <= time_range_end") } + principalID := strings.TrimSpace(input.Operator) + if principalID == "" { + return nil, fmt.Errorf("%w: principal_id is required", shared.ErrUnauthorized) + } + allowed, err := s.acl.HasPermission(ctx, topicDef.TenantKey, topicDef.UUID, principalID, "replay", s.clock().UTC()) + if err != nil { + return nil, err + } + if !allowed { + return nil, fmt.Errorf("%w: principal=%s topic=%s", shared.ErrUnauthorized, principalID, topicDef.FullTopic) + } record := &eventfabricmodel.ReplayRequest{ TenantKey: tenantKey, @@ -160,12 +197,17 @@ func (s *Service) CreateTask(ctx context.Context, input CreateTaskInput) (*Task, if err != nil { return nil, err } + s.syncReplayTaskHistory(ctx, record, topicDef.FullTopic, "") + s.emitTaskStatus(ctx, record, topicDef.FullTopic) if err := s.repo.UpdateStatus(ctx, record.UUID, map[string]interface{}{ "status": eventfabricmodel.ReplayStatusRunning, }); err != nil { return nil, err } + record.Status = eventfabricmodel.ReplayStatusRunning + s.syncReplayTaskHistory(ctx, record, topicDef.FullTopic, "") + s.emitTaskStatus(ctx, record, topicDef.FullTopic) started := s.clock().UTC() count, execErr := s.executeReplay(ctx, record, topicDef.FullTopic) @@ -191,6 +233,8 @@ func (s *Service) CreateTask(ctx context.Context, input CreateTaskInput) (*Task, if latest == nil { return nil, fmt.Errorf("replay task not found after update") } + s.syncReplayTaskHistory(ctx, latest, topicDef.FullTopic, "") + s.emitTaskStatus(ctx, latest, topicDef.FullTopic) if execErr != nil { return nil, execErr } @@ -239,7 +283,22 @@ func (s *Service) CancelTask(ctx context.Context, id string, operator string) er } cancelled := s.clock().UTC() updates["cancelled_at"] = &cancelled - return s.repo.UpdateStatus(ctx, uid, updates) + if err := s.repo.UpdateStatus(ctx, uid, updates); err != nil { + return err + } + record, err := s.repo.FindByUUID(ctx, uid) + if err != nil || record == nil { + return err + } + var topicName string + if s.topics != nil { + if topic, topicErr := s.topics.FindByUUID(ctx, record.TopicUUID); topicErr == nil && topic != nil { + topicName = topic.FullTopic + } + } + s.syncReplayTaskHistory(ctx, record, topicName, strings.TrimSpace(record.FailureReason)) + s.emitTaskStatus(ctx, record, topicName) + return nil } func (s *Service) executeReplay(ctx context.Context, request *eventfabricmodel.ReplayRequest, fullTopic string) (int, error) { @@ -319,13 +378,31 @@ func splitFullTopic(topic string) (tenant string, namespace string, name string, return "", "", "", fmt.Errorf("topic is required") } parts := strings.Split(trimmed, ".") - if len(parts) < 3 { + if len(parts) < 2 { return "", "", "", fmt.Errorf("invalid topic format: %s", topic) } - tenant = parts[0] - namespace = strings.Join(parts[1:len(parts)-1], ".") + + first := strings.TrimSpace(parts[0]) + if isUUID(first) && len(parts) >= 3 { + tenant = first + namespace = strings.Join(parts[1:len(parts)-1], ".") + name = parts[len(parts)-1] + return tenant, namespace, name, nil + } + + namespace = strings.Join(parts[:len(parts)-1], ".") name = parts[len(parts)-1] - return tenant, namespace, name, nil + return "", namespace, name, nil +} + +func isSharedTopicTenant(tenantKey string) bool { + key := strings.ToLower(strings.TrimSpace(tenantKey)) + return key == "global" || key == "system" +} + +func isUUID(input string) bool { + _, err := uuid.Parse(strings.TrimSpace(input)) + return err == nil } func mapFromJSON(data []byte) map[string]string { @@ -338,3 +415,85 @@ func mapFromJSON(data []byte) map[string]string { } return result } + +func (s *Service) emitTaskStatus(ctx context.Context, record *eventfabricmodel.ReplayRequest, fullTopic string) { + if s == nil || s.emitter == nil || record == nil { + return + } + event := ReplayTaskStatusEvent{ + TaskID: record.UUID.String(), + TenantKey: record.TenantKey, + Topic: strings.TrimSpace(fullTopic), + Status: strings.TrimSpace(record.Status), + TraceID: strings.TrimSpace(record.TraceID), + RequestedBy: strings.TrimSpace(record.IssuedBy), + Shadow: record.Shadow, + ResultCount: record.ResultCount, + FailureReason: strings.TrimSpace(record.FailureReason), + SubmittedAt: toRFC3339(record.SubmittedAt), + CompletedAt: toRFC3339Ptr(record.CompletedAt), + CancelledAt: toRFC3339Ptr(record.CancelledAt), + } + s.emitter.EmitReplayTaskStatus(ctx, event) +} + +func (s *Service) syncReplayTaskHistory(ctx context.Context, record *eventfabricmodel.ReplayRequest, fullTopic, errorMessage string) { + if s == nil || s.history == nil || record == nil { + return + } + taskID := strings.TrimSpace(record.UUID.String()) + if taskID == "" { + return + } + now := s.clock().UTC() + item, _ := s.history.FindByKey(ctx, strings.TrimSpace(record.TenantKey), eventdomain.SubscriberEventFabricReplay, taskID) + if item == nil { + item = &eventfabricmodel.TaskHistory{ + TaskID: taskID, + TenantKey: strings.TrimSpace(record.TenantKey), + SubscriberID: eventdomain.SubscriberEventFabricReplay, + } + } + item.Topic = strings.TrimSpace(fullTopic) + item.Kind = eventdomain.NotificationKindEventFabricReplayTask + item.Source = "replay_service" + item.TraceID = strings.TrimSpace(record.TraceID) + item.Status = strings.TrimSpace(record.Status) + item.Attempt = 1 + if strings.TrimSpace(errorMessage) != "" { + item.ErrorMessage = strings.TrimSpace(errorMessage) + } else { + item.ErrorMessage = strings.TrimSpace(record.FailureReason) + } + if !record.SubmittedAt.IsZero() { + t := record.SubmittedAt.UTC() + item.SubmittedAt = &t + } + if record.Status == eventfabricmodel.ReplayStatusRunning && item.StartedAt == nil { + t := now + item.StartedAt = &t + } + if record.CompletedAt != nil { + t := record.CompletedAt.UTC() + item.CompletedAt = &t + } else if record.CancelledAt != nil { + t := record.CancelledAt.UTC() + item.CompletedAt = &t + } + item.LastSeenAt = now + _ = s.history.Save(ctx, item) +} + +func toRFC3339(value time.Time) string { + if value.IsZero() { + return "" + } + return value.UTC().Format(time.RFC3339) +} + +func toRFC3339Ptr(value *time.Time) string { + if value == nil || value.IsZero() { + return "" + } + return value.UTC().Format(time.RFC3339) +} diff --git a/backend/internal/service/event_fabric/replay/status_emitter.go b/backend/internal/service/event_fabric/replay/status_emitter.go new file mode 100644 index 00000000..4bd1465a --- /dev/null +++ b/backend/internal/service/event_fabric/replay/status_emitter.go @@ -0,0 +1,24 @@ +package replay + +import "context" + +// StatusEmitter 用于对外推送 replay 任务状态变化(例如 WebSocket)。 +type StatusEmitter interface { + EmitReplayTaskStatus(ctx context.Context, event ReplayTaskStatusEvent) +} + +// ReplayTaskStatusEvent 描述 replay 任务状态变更事件。 +type ReplayTaskStatusEvent struct { + TaskID string `json:"task_id"` + TenantKey string `json:"tenant_key"` + Topic string `json:"topic"` + Status string `json:"status"` + TraceID string `json:"trace_id,omitempty"` + RequestedBy string `json:"requested_by,omitempty"` + Shadow bool `json:"shadow"` + ResultCount int `json:"result_count"` + FailureReason string `json:"failure_reason,omitempty"` + SubmittedAt string `json:"submitted_at,omitempty"` + CompletedAt string `json:"completed_at,omitempty"` + CancelledAt string `json:"cancelled_at,omitempty"` +} diff --git a/backend/internal/service/event_fabric/scheduler/cron_parser.go b/backend/internal/service/event_fabric/scheduler/cron_parser.go new file mode 100644 index 00000000..1a243018 --- /dev/null +++ b/backend/internal/service/event_fabric/scheduler/cron_parser.go @@ -0,0 +1,161 @@ +package scheduler + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +const maxCronSearchMinutes = 60 * 24 * 366 * 2 + +type cronSpec struct { + minutes [60]bool + hours [24]bool + days [32]bool + months [13]bool + weekdays [7]bool + + dayWildcard bool + dowWildcard bool +} + +func parseCronSpec(expr string) (*cronSpec, error) { + parts := strings.Fields(strings.TrimSpace(expr)) + if len(parts) != 5 { + return nil, fmt.Errorf("invalid cron expression %q: expected 5 fields", expr) + } + + spec := &cronSpec{} + if err := parseCronField(parts[0], 0, 59, spec.minutes[:], nil); err != nil { + return nil, fmt.Errorf("invalid minute field: %w", err) + } + if err := parseCronField(parts[1], 0, 23, spec.hours[:], nil); err != nil { + return nil, fmt.Errorf("invalid hour field: %w", err) + } + spec.dayWildcard = parts[2] == "*" + if err := parseCronField(parts[2], 1, 31, spec.days[:], nil); err != nil { + return nil, fmt.Errorf("invalid day-of-month field: %w", err) + } + if err := parseCronField(parts[3], 1, 12, spec.months[:], nil); err != nil { + return nil, fmt.Errorf("invalid month field: %w", err) + } + spec.dowWildcard = parts[4] == "*" + if err := parseCronField(parts[4], 0, 7, nil, func(v int) { + if v == 7 { + spec.weekdays[0] = true + return + } + spec.weekdays[v] = true + }); err != nil { + return nil, fmt.Errorf("invalid day-of-week field: %w", err) + } + + return spec, nil +} + +func parseCronField(field string, min, max int, slots []bool, setter func(v int)) error { + setValue := func(v int) { + if setter != nil { + setter(v) + return + } + slots[v] = true + } + + if field == "*" { + for i := min; i <= max; i++ { + setValue(i) + } + return nil + } + + for _, part := range strings.Split(field, ",") { + part = strings.TrimSpace(part) + if part == "" { + return fmt.Errorf("empty segment") + } + + step := 1 + base := part + if strings.Contains(part, "/") { + items := strings.Split(part, "/") + if len(items) != 2 { + return fmt.Errorf("invalid step segment %q", part) + } + base = strings.TrimSpace(items[0]) + parsedStep, err := strconv.Atoi(strings.TrimSpace(items[1])) + if err != nil || parsedStep <= 0 { + return fmt.Errorf("invalid step value in %q", part) + } + step = parsedStep + } + + start := min + end := max + switch { + case base == "" || base == "*": + // use full range + case strings.Contains(base, "-"): + bounds := strings.Split(base, "-") + if len(bounds) != 2 { + return fmt.Errorf("invalid range %q", base) + } + left, err := strconv.Atoi(strings.TrimSpace(bounds[0])) + if err != nil { + return fmt.Errorf("invalid range start %q", base) + } + right, err := strconv.Atoi(strings.TrimSpace(bounds[1])) + if err != nil { + return fmt.Errorf("invalid range end %q", base) + } + start, end = left, right + default: + single, err := strconv.Atoi(base) + if err != nil { + return fmt.Errorf("invalid value %q", base) + } + start, end = single, single + } + + if start < min || end > max || start > end { + return fmt.Errorf("value out of range %d-%d in %q", min, max, part) + } + for value := start; value <= end; value += step { + setValue(value) + } + } + + return nil +} + +func (c *cronSpec) next(after time.Time) (time.Time, error) { + base := after.Truncate(time.Minute).Add(time.Minute) + for i := 0; i < maxCronSearchMinutes; i++ { + candidate := base.Add(time.Duration(i) * time.Minute) + if c.match(candidate) { + return candidate, nil + } + } + return time.Time{}, fmt.Errorf("cannot find next run time within %d minutes", maxCronSearchMinutes) +} + +func (c *cronSpec) match(ts time.Time) bool { + if !c.minutes[ts.Minute()] || !c.hours[ts.Hour()] || !c.months[int(ts.Month())] { + return false + } + + dayMatch := c.days[ts.Day()] + dowMatch := c.weekdays[int(ts.Weekday())] + + if c.dayWildcard && c.dowWildcard { + return dayMatch && dowMatch + } + if c.dayWildcard { + return dowMatch + } + if c.dowWildcard { + return dayMatch + } + return dayMatch || dowMatch +} diff --git a/backend/internal/service/event_fabric/scheduler/service.go b/backend/internal/service/event_fabric/scheduler/service.go new file mode 100644 index 00000000..1f9ce86d --- /dev/null +++ b/backend/internal/service/event_fabric/scheduler/service.go @@ -0,0 +1,124 @@ +package scheduler + +import ( + "fmt" + "strings" + "time" + + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" +) + +type ComputeNextRunInput struct { + CronExpr string + Timezone string + MisfirePolicy string + Now time.Time + LastRunAt *time.Time + PrevNextRunAt *time.Time +} + +type ComputeNextRunResult struct { + NextRunAt *time.Time + ShouldRunNow bool +} + +type Service struct{} + +func NewService() *Service { + return &Service{} +} + +func (s *Service) ComputeNextRun(input ComputeNextRunInput) (*ComputeNextRunResult, error) { + location, err := resolveLocation(input.Timezone) + if err != nil { + return nil, err + } + now := input.Now + if now.IsZero() { + now = time.Now().UTC() + } + nowInLoc := now.In(location) + + spec, err := parseCronSpec(input.CronExpr) + if err != nil { + return nil, err + } + + policy := normalizeMisfirePolicy(input.MisfirePolicy) + result := &ComputeNextRunResult{} + + if input.PrevNextRunAt != nil && !input.PrevNextRunAt.IsZero() { + dueInLoc := input.PrevNextRunAt.In(location) + if dueInLoc.Before(nowInLoc) { + switch policy { + case eventfabricmodel.ScheduledTaskMisfireFireNow: + result.ShouldRunNow = true + nextFromNow, nextErr := spec.next(nowInLoc) + if nextErr != nil { + return nil, nextErr + } + nextUTC := nextFromNow.UTC() + result.NextRunAt = &nextUTC + return result, nil + case eventfabricmodel.ScheduledTaskMisfireCatchUp: + result.ShouldRunNow = true + nextAfterDue, nextErr := spec.next(dueInLoc) + if nextErr != nil { + return nil, nextErr + } + nextUTC := nextAfterDue.UTC() + result.NextRunAt = &nextUTC + return result, nil + case eventfabricmodel.ScheduledTaskMisfireSkip: + fallthrough + default: + nextFromNow, nextErr := spec.next(nowInLoc) + if nextErr != nil { + return nil, nextErr + } + nextUTC := nextFromNow.UTC() + result.NextRunAt = &nextUTC + return result, nil + } + } + } + + base := nowInLoc + if input.LastRunAt != nil && !input.LastRunAt.IsZero() { + lastRunInLoc := input.LastRunAt.In(location) + if lastRunInLoc.After(base) { + base = lastRunInLoc + } + } + nextAt, err := spec.next(base) + if err != nil { + return nil, err + } + nextUTC := nextAt.UTC() + result.NextRunAt = &nextUTC + return result, nil +} + +func resolveLocation(timezone string) (*time.Location, error) { + tz := strings.TrimSpace(timezone) + if tz == "" { + return time.UTC, nil + } + loc, err := time.LoadLocation(tz) + if err != nil { + return nil, fmt.Errorf("invalid timezone %q: %w", timezone, err) + } + return loc, nil +} + +func normalizeMisfirePolicy(policy string) string { + policy = strings.TrimSpace(policy) + switch policy { + case eventfabricmodel.ScheduledTaskMisfireFireNow, + eventfabricmodel.ScheduledTaskMisfireCatchUp, + eventfabricmodel.ScheduledTaskMisfireSkip: + return policy + default: + return eventfabricmodel.ScheduledTaskMisfireSkip + } +} diff --git a/backend/internal/service/event_fabric/scheduler/service_test.go b/backend/internal/service/event_fabric/scheduler/service_test.go new file mode 100644 index 00000000..2d3f4ac7 --- /dev/null +++ b/backend/internal/service/event_fabric/scheduler/service_test.go @@ -0,0 +1,112 @@ +package scheduler + +import ( + "testing" + "time" + + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" +) + +func TestComputeNextRun_Basic(t *testing.T) { + svc := NewService() + now := time.Date(2026, 2, 12, 2, 3, 10, 0, time.UTC) + result, err := svc.ComputeNextRun(ComputeNextRunInput{ + CronExpr: "*/5 * * * *", + Timezone: "UTC", + Now: now, + }) + if err != nil { + t.Fatalf("ComputeNextRun should succeed: %v", err) + } + if result == nil || result.NextRunAt == nil { + t.Fatalf("next run should not be nil") + } + expected := time.Date(2026, 2, 12, 2, 5, 0, 0, time.UTC) + if !result.NextRunAt.Equal(expected) { + t.Fatalf("unexpected next run, got=%s expected=%s", result.NextRunAt.Format(time.RFC3339), expected.Format(time.RFC3339)) + } + if result.ShouldRunNow { + t.Fatalf("shouldRunNow should be false") + } +} + +func TestComputeNextRun_MisfireFireNow(t *testing.T) { + svc := NewService() + now := time.Date(2026, 2, 12, 2, 10, 0, 0, time.UTC) + prevNextRun := time.Date(2026, 2, 12, 2, 5, 0, 0, time.UTC) + result, err := svc.ComputeNextRun(ComputeNextRunInput{ + CronExpr: "*/5 * * * *", + Timezone: "UTC", + MisfirePolicy: eventfabricmodel.ScheduledTaskMisfireFireNow, + Now: now, + PrevNextRunAt: &prevNextRun, + }) + if err != nil { + t.Fatalf("ComputeNextRun should succeed: %v", err) + } + if !result.ShouldRunNow { + t.Fatalf("shouldRunNow should be true") + } + expected := time.Date(2026, 2, 12, 2, 15, 0, 0, time.UTC) + if result.NextRunAt == nil || !result.NextRunAt.Equal(expected) { + t.Fatalf("unexpected next run, got=%v expected=%s", result.NextRunAt, expected.Format(time.RFC3339)) + } +} + +func TestComputeNextRun_MisfireSkip(t *testing.T) { + svc := NewService() + now := time.Date(2026, 2, 12, 2, 10, 0, 0, time.UTC) + prevNextRun := time.Date(2026, 2, 12, 2, 5, 0, 0, time.UTC) + result, err := svc.ComputeNextRun(ComputeNextRunInput{ + CronExpr: "*/5 * * * *", + Timezone: "UTC", + MisfirePolicy: eventfabricmodel.ScheduledTaskMisfireSkip, + Now: now, + PrevNextRunAt: &prevNextRun, + }) + if err != nil { + t.Fatalf("ComputeNextRun should succeed: %v", err) + } + if result.ShouldRunNow { + t.Fatalf("shouldRunNow should be false") + } + expected := time.Date(2026, 2, 12, 2, 15, 0, 0, time.UTC) + if result.NextRunAt == nil || !result.NextRunAt.Equal(expected) { + t.Fatalf("unexpected next run, got=%v expected=%s", result.NextRunAt, expected.Format(time.RFC3339)) + } +} + +func TestComputeNextRun_MisfireCatchUp(t *testing.T) { + svc := NewService() + now := time.Date(2026, 2, 12, 2, 10, 0, 0, time.UTC) + prevNextRun := time.Date(2026, 2, 12, 2, 5, 0, 0, time.UTC) + result, err := svc.ComputeNextRun(ComputeNextRunInput{ + CronExpr: "*/5 * * * *", + Timezone: "UTC", + MisfirePolicy: eventfabricmodel.ScheduledTaskMisfireCatchUp, + Now: now, + PrevNextRunAt: &prevNextRun, + }) + if err != nil { + t.Fatalf("ComputeNextRun should succeed: %v", err) + } + if !result.ShouldRunNow { + t.Fatalf("shouldRunNow should be true") + } + expected := time.Date(2026, 2, 12, 2, 10, 0, 0, time.UTC) + if result.NextRunAt == nil || !result.NextRunAt.Equal(expected) { + t.Fatalf("unexpected next run, got=%v expected=%s", result.NextRunAt, expected.Format(time.RFC3339)) + } +} + +func TestComputeNextRun_InvalidTimezone(t *testing.T) { + svc := NewService() + _, err := svc.ComputeNextRun(ComputeNextRunInput{ + CronExpr: "*/5 * * * *", + Timezone: "Asia/NotExists", + Now: time.Now().UTC(), + }) + if err == nil { + t.Fatalf("expected error for invalid timezone") + } +} diff --git a/backend/internal/service/event_fabric/shared/constants.go b/backend/internal/service/event_fabric/shared/constants.go index e62d8d31..770edf3c 100644 --- a/backend/internal/service/event_fabric/shared/constants.go +++ b/backend/internal/service/event_fabric/shared/constants.go @@ -21,9 +21,6 @@ const ( // ContextAcceptedVersions 存储订阅者声明支持的事件版本列表。 ContextAcceptedVersions = "accepted_versions" - // HeaderTenantID 用于 HTTP 层传递租户信息的 Header。 - HeaderTenantID = "X-PowerX-Tenant" - // DefaultAckTimeout 与规格保持一致,默认 30s。 DefaultAckTimeout = 30 * time.Second diff --git a/backend/internal/service/event_fabric/shared/topic_lookup_cache.go b/backend/internal/service/event_fabric/shared/topic_lookup_cache.go new file mode 100644 index 00000000..9650fd66 --- /dev/null +++ b/backend/internal/service/event_fabric/shared/topic_lookup_cache.go @@ -0,0 +1,130 @@ +package shared + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/pkg/cache" + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" + "github.com/google/uuid" +) + +const topicResolveCachePrefix = "event:topic:resolve" + +type TopicLookup interface { + FindByComposite(ctx context.Context, tenantKey, namespace, name string) (*eventfabricmodel.TopicDefinition, error) + FindByUUID(ctx context.Context, id uuid.UUID) (*eventfabricmodel.TopicDefinition, error) +} + +type CachedTopicLookupOptions struct { + Cache cache.ICache + TTL time.Duration + MissTTL time.Duration +} + +type cachedTopicLookupPayload struct { + NotFound bool `json:"not_found"` + Topic *eventfabricmodel.TopicDefinition `json:"topic,omitempty"` +} + +type CachedTopicLookup struct { + base TopicLookup + cache cache.ICache + ttl time.Duration + missTTL time.Duration +} + +func NewCachedTopicLookup(base TopicLookup, opts CachedTopicLookupOptions) *CachedTopicLookup { + if base == nil { + return nil + } + ttl := opts.TTL + if ttl <= 0 { + ttl = 180 * time.Second + } + missTTL := opts.MissTTL + if missTTL <= 0 { + missTTL = 30 * time.Second + } + return &CachedTopicLookup{ + base: base, + cache: opts.Cache, + ttl: ttl, + missTTL: missTTL, + } +} + +func (c *CachedTopicLookup) FindByComposite(ctx context.Context, tenantKey, namespace, name string) (*eventfabricmodel.TopicDefinition, error) { + if c == nil || c.base == nil { + return nil, nil + } + tenant := strings.TrimSpace(strings.ToLower(tenantKey)) + ns := strings.TrimSpace(strings.ToLower(namespace)) + topicName := strings.TrimSpace(strings.ToLower(name)) + if ns == "" || topicName == "" { + return c.base.FindByComposite(ctx, tenantKey, namespace, name) + } + cacheKey := fmt.Sprintf("%s:%s:%s.%s", topicResolveCachePrefix, tenant, ns, topicName) + if topic, hit, err := c.getCachedComposite(ctx, cacheKey); err != nil { + return nil, err + } else if hit { + return topic, nil + } + + topic, err := c.base.FindByComposite(ctx, tenantKey, namespace, name) + if err != nil { + return nil, err + } + if topic == nil { + _ = c.setCachedComposite(ctx, cacheKey, &cachedTopicLookupPayload{NotFound: true}, c.missTTL) + return nil, nil + } + _ = c.setCachedComposite(ctx, cacheKey, &cachedTopicLookupPayload{NotFound: false, Topic: topic}, c.ttl) + return topic, nil +} + +func (c *CachedTopicLookup) FindByUUID(ctx context.Context, id uuid.UUID) (*eventfabricmodel.TopicDefinition, error) { + if c == nil || c.base == nil { + return nil, nil + } + return c.base.FindByUUID(ctx, id) +} + +func (c *CachedTopicLookup) getCachedComposite(ctx context.Context, key string) (*eventfabricmodel.TopicDefinition, bool, error) { + if c == nil || c.cache == nil { + return nil, false, nil + } + raw, err := c.cache.Get(ctx, key) + if err != nil { + return nil, false, nil + } + if len(raw) == 0 { + return nil, false, nil + } + var payload cachedTopicLookupPayload + if err := json.Unmarshal(raw, &payload); err != nil { + return nil, false, nil + } + if payload.NotFound { + return nil, true, nil + } + return payload.Topic, true, nil +} + +func (c *CachedTopicLookup) setCachedComposite(ctx context.Context, key string, payload *cachedTopicLookupPayload, ttl time.Duration) error { + if c == nil || c.cache == nil || payload == nil { + return nil + } + if ttl <= 0 { + ttl = c.ttl + } + raw, err := json.Marshal(payload) + if err != nil { + return err + } + return c.cache.Set(ctx, key, raw, ttl) +} + diff --git a/backend/internal/service/event_fabric/shared/topic_lookup_cache_test.go b/backend/internal/service/event_fabric/shared/topic_lookup_cache_test.go new file mode 100644 index 00000000..52370c92 --- /dev/null +++ b/backend/internal/service/event_fabric/shared/topic_lookup_cache_test.go @@ -0,0 +1,82 @@ +package shared + +import ( + "context" + "testing" + + "github.com/ArtisanCloud/PowerX/pkg/cache" + coremodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model" + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" + "github.com/google/uuid" +) + +func TestCachedTopicLookup_FindByComposite_CacheHit(t *testing.T) { + ctx := context.Background() + base := &mockTopicLookup{ + result: &eventfabricmodel.TopicDefinition{ + PowerUUIDModel: coremodel.PowerUUIDModel{UUID: uuid.New()}, + TenantKey: "tenant-a", + Namespace: "knowledge.space", + Name: "reprocess", + }, + } + lookup := NewCachedTopicLookup(base, CachedTopicLookupOptions{Cache: cache.NewMemoryCache()}) + + first, err := lookup.FindByComposite(ctx, "tenant-a", "knowledge.space", "reprocess") + if err != nil || first == nil { + t.Fatalf("first lookup failed: err=%v topic_nil=%v", err, first == nil) + } + second, err := lookup.FindByComposite(ctx, "tenant-a", "knowledge.space", "reprocess") + if err != nil || second == nil { + t.Fatalf("second lookup failed: err=%v topic_nil=%v", err, second == nil) + } + if base.calls != 1 { + t.Fatalf("expected base lookup called once, got %d", base.calls) + } +} + +func TestCachedTopicLookup_FindByComposite_MissCached(t *testing.T) { + ctx := context.Background() + base := &mockTopicLookup{result: nil} + lookup := NewCachedTopicLookup(base, CachedTopicLookupOptions{Cache: cache.NewMemoryCache()}) + + first, err := lookup.FindByComposite(ctx, "tenant-a", "knowledge.space", "missing") + if err != nil { + t.Fatalf("first lookup error: %v", err) + } + if first != nil { + t.Fatalf("expected nil on first miss") + } + second, err := lookup.FindByComposite(ctx, "tenant-a", "knowledge.space", "missing") + if err != nil { + t.Fatalf("second lookup error: %v", err) + } + if second != nil { + t.Fatalf("expected nil on second miss") + } + if base.calls != 1 { + t.Fatalf("expected base lookup called once for miss cache, got %d", base.calls) + } +} + +type mockTopicLookup struct { + result *eventfabricmodel.TopicDefinition + calls int +} + +func (m *mockTopicLookup) FindByComposite(_ context.Context, _, _, _ string) (*eventfabricmodel.TopicDefinition, error) { + m.calls++ + if m.result == nil { + return nil, nil + } + clone := *m.result + return &clone, nil +} + +func (m *mockTopicLookup) FindByUUID(_ context.Context, _ uuid.UUID) (*eventfabricmodel.TopicDefinition, error) { + if m.result == nil { + return nil, nil + } + clone := *m.result + return &clone, nil +} diff --git a/backend/internal/service/iam/org_service.go b/backend/internal/service/iam/org_service.go index 190f4cf1..2f45de9b 100644 --- a/backend/internal/service/iam/org_service.go +++ b/backend/internal/service/iam/org_service.go @@ -495,5 +495,8 @@ func (s *OrgService) GetDepartmentTree(ctx context.Context, tenantUUID string) ( sortRec(r) } + if roots == nil { + return []*m.Department{}, nil + } return roots, nil } diff --git a/backend/internal/service/iam/permission_service.go b/backend/internal/service/iam/permission_service.go index 546a49f3..42b36177 100644 --- a/backend/internal/service/iam/permission_service.go +++ b/backend/internal/service/iam/permission_service.go @@ -95,7 +95,7 @@ func (s *PermissionService) ListPermissions(ctx context.Context, filter map[stri func (s *PermissionService) ListCatalog(ctx context.Context) (map[string]map[string][]dbm.Permission, error) { rows, _, err := s.perms.List(ctx, map[string]string{ "status": string(dbm.PermissionStatusActive), - }, 0, 10000, "plugin ASC, resource ASC, action ASC") + }, 0, 10000, "module ASC, resource ASC, action ASC") if err != nil { return nil, err } @@ -106,7 +106,7 @@ func (s *PermissionService) ListCatalog(ctx context.Context) (map[string]map[str _ = json.Unmarshal(p.Meta, &m) mod := strings.TrimSpace(utils.ToStr(m["module"])) if mod == "" { - mod = p.Plugin + mod = p.Module } tp := strings.TrimSpace(utils.ToStr(m["type"])) if tp == "" { diff --git a/backend/internal/service/iam/rbac_service.go b/backend/internal/service/iam/rbac_service.go index d1c4808a..ec81bf17 100644 --- a/backend/internal/service/iam/rbac_service.go +++ b/backend/internal/service/iam/rbac_service.go @@ -36,7 +36,7 @@ func NewRBACService(db *gorm.DB) *RBACService { } // ========== 1) 角色 ⇄ 权限 ========== -type PermTriple struct{ Plugin, Resource, Action string } +type PermTriple struct{ Module, Resource, Action string } // A. 通过权限ID授予(最快路径) func (s *RBACService) GrantPermsByIDs(ctx context.Context, actor ActorContext, roleID uint64, permIDs []uint64) error { @@ -60,7 +60,7 @@ func (s *RBACService) GrantPermsByIDs(ctx context.Context, actor ActorContext, r return s.rpr.BindPermissions(ctx, roleID, permIDs...) // 幂等 upsert :contentReference[oaicite:9]{index=9} } -// B. 通过 (plugin,resource,action) 授予(便于API) +// B. 通过 (module,resource,action) 授予(便于API) func (s *RBACService) GrantPermsByTriples(ctx context.Context, actor ActorContext, roleID uint64, triples []PermTriple) error { if len(triples) == 0 { return nil @@ -68,7 +68,7 @@ func (s *RBACService) GrantPermsByTriples(ctx context.Context, actor ActorContex // 1) 先 upsert 权限行(幂等) toUpsert := make([]dbm.Permission, 0, len(triples)) for _, t := range triples { - toUpsert = append(toUpsert, dbm.Permission{Plugin: t.Plugin, Resource: t.Resource, Action: t.Action}) + toUpsert = append(toUpsert, dbm.Permission{Module: t.Module, Resource: t.Resource, Action: t.Action}) } if err := s.pr.UpsertBatch(ctx, toUpsert); err != nil { // :contentReference[oaicite:10]{index=10} return err @@ -77,7 +77,7 @@ func (s *RBACService) GrantPermsByTriples(ctx context.Context, actor ActorContex var permIDs []uint64 if err := s.db.WithContext(ctx). Model(&dbm.Permission{}). - Where("(plugin,resource,action) IN ?", triplesToTuples(triples)). + Where("(module,resource,action) IN ?", triplesToTuples(triples)). Pluck("id", &permIDs).Error; err != nil { return err } @@ -145,7 +145,8 @@ func (s *RBACService) UnbindRoleFromMember(ctx context.Context, actor ActorConte } // ========== 3) 鉴权(root 放行;直绑 + 维度间接绑定) ========== -func (s *RBACService) Enforce(ctx context.Context, actor ActorContext, tenantUUID string, memberID uint64, plugin, resource, action string) (bool, error) { +func (s *RBACService) Enforce(ctx context.Context, actor ActorContext, tenantUUID string, memberID uint64, module, resource, action string) (bool, error) { + _ = module if actor.IsRoot { return true, nil } @@ -162,7 +163,7 @@ func (s *RBACService) Enforce(ctx context.Context, actor ActorContext, tenantUUI func triplesToTuples(ts []PermTriple) [][3]string { out := make([][3]string, 0, len(ts)) for _, t := range ts { - out = append(out, [3]string{t.Plugin, t.Resource, t.Action}) + out = append(out, [3]string{t.Module, t.Resource, t.Action}) } return out } diff --git a/backend/internal/service/integration_gateway/apikeycache/cache.go b/backend/internal/service/integration_gateway/apikeycache/cache.go new file mode 100644 index 00000000..01c5e30e --- /dev/null +++ b/backend/internal/service/integration_gateway/apikeycache/cache.go @@ -0,0 +1,149 @@ +package apikeycache + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/pkg/cache" +) + +const ( + globalVersionKey = "igw:apikey:cache:version" + authTTL = 2 * time.Minute + permTTL = 60 * time.Second +) + +type AuthSnapshot struct { + KeyID uint64 `json:"key_id"` + TenantUUID string `json:"tenant_uuid"` + ProfileID uint64 `json:"profile_id"` +} + +type permissionDecision struct { + Allowed bool `json:"allowed"` +} + +func GetAuthSnapshot(ctx context.Context, keyHash string) (*AuthSnapshot, bool, error) { + store := cache.GetCache() + keyHash = strings.TrimSpace(keyHash) + if store == nil || keyHash == "" { + return nil, false, nil + } + version, err := cacheVersion(ctx, store) + if err != nil { + return nil, false, err + } + raw, err := store.Get(ctx, authCacheKey(version, keyHash)) + if err != nil || len(raw) == 0 { + return nil, false, err + } + var out AuthSnapshot + if err := json.Unmarshal(raw, &out); err != nil { + return nil, false, nil + } + return &out, true, nil +} + +func SetAuthSnapshot(ctx context.Context, keyHash string, snapshot AuthSnapshot) error { + store := cache.GetCache() + keyHash = strings.TrimSpace(keyHash) + if store == nil || keyHash == "" { + return nil + } + version, err := cacheVersion(ctx, store) + if err != nil { + return err + } + raw, err := json.Marshal(snapshot) + if err != nil { + return err + } + return store.Set(ctx, authCacheKey(version, keyHash), raw, authTTL) +} + +func GetPermissionDecision(ctx context.Context, keyHash string, action string, resourceType string, resource string) (bool, bool, error) { + store := cache.GetCache() + keyHash = strings.TrimSpace(keyHash) + if store == nil || keyHash == "" { + return false, false, nil + } + version, err := cacheVersion(ctx, store) + if err != nil { + return false, false, err + } + raw, err := store.Get(ctx, permissionCacheKey(version, keyHash, action, resourceType, resource)) + if err != nil || len(raw) == 0 { + return false, false, err + } + var out permissionDecision + if err := json.Unmarshal(raw, &out); err != nil { + return false, false, nil + } + return out.Allowed, true, nil +} + +func SetPermissionDecision(ctx context.Context, keyHash string, action string, resourceType string, resource string, allowed bool) error { + store := cache.GetCache() + keyHash = strings.TrimSpace(keyHash) + if store == nil || keyHash == "" { + return nil + } + version, err := cacheVersion(ctx, store) + if err != nil { + return err + } + raw, err := json.Marshal(permissionDecision{Allowed: allowed}) + if err != nil { + return err + } + return store.Set(ctx, permissionCacheKey(version, keyHash, action, resourceType, resource), raw, permTTL) +} + +func InvalidateAll(ctx context.Context) error { + store := cache.GetCache() + if store == nil { + return nil + } + _, err := store.Increment(ctx, globalVersionKey, 1) + return err +} + +func cacheVersion(ctx context.Context, store cache.ICache) (int64, error) { + raw, err := store.Get(ctx, globalVersionKey) + if err != nil { + return 0, err + } + if len(raw) == 0 { + if err := store.Set(ctx, globalVersionKey, "1", 0); err != nil { + return 0, err + } + return 1, nil + } + value, parseErr := strconv.ParseInt(strings.TrimSpace(string(raw)), 10, 64) + if parseErr != nil || value <= 0 { + if err := store.Set(ctx, globalVersionKey, "1", 0); err != nil { + return 0, err + } + return 1, nil + } + return value, nil +} + +func authCacheKey(version int64, keyHash string) string { + return fmt.Sprintf("igw:apikey:auth:v%d:%s", version, strings.TrimSpace(keyHash)) +} + +func permissionCacheKey(version int64, keyHash string, action string, resourceType string, resource string) string { + return fmt.Sprintf( + "igw:apikey:perm:v%d:%s:%s:%s:%s", + version, + strings.TrimSpace(keyHash), + strings.ToLower(strings.TrimSpace(action)), + strings.ToLower(strings.TrimSpace(resourceType)), + strings.TrimSpace(resource), + ) +} diff --git a/backend/internal/service/integration_gateway/apikeypermissions/policy.go b/backend/internal/service/integration_gateway/apikeypermissions/policy.go new file mode 100644 index 00000000..71785749 --- /dev/null +++ b/backend/internal/service/integration_gateway/apikeypermissions/policy.go @@ -0,0 +1,165 @@ +package apikeypermissions + +import ( + "encoding/json" + "fmt" + "strings" + + modelsiam "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/iam" +) + +type ResolvedPermission struct { + Scope string + Action string + ResourceType string + ResourcePattern string + PluginID string + Effect string +} + +func IsCoreSensitivePermission(permission modelsiam.Permission) bool { + module := strings.ToLower(strings.TrimSpace(permission.Module)) + resource := strings.ToLower(strings.TrimSpace(permission.Resource)) + action := strings.ToLower(strings.TrimSpace(permission.Action)) + + if module == "system" && resource == "root" { + return true + } + if module == "iam" && resource == "credential" { + return true + } + if module == "iam" && resource == "permission" && action != "read" && action != "list" { + return true + } + + meta := parseMeta(permission.Meta) + endpoint := strings.TrimSpace(anyToString(meta["api_endpoint"])) + if endpoint == "" { + return false + } + for _, prefix := range sensitiveEndpointPrefixes { + if strings.HasPrefix(endpoint, prefix) { + return true + } + } + return false +} + +func DefaultAllowAPIKey(permission modelsiam.Permission) bool { + if strings.TrimSpace(permission.Module) == "" { + return false + } + if strings.TrimSpace(permission.Resource) == "" || strings.TrimSpace(permission.Action) == "" { + return false + } + return !IsCoreSensitivePermission(permission) +} + +func ResolvePermission(permission modelsiam.Permission) (ResolvedPermission, bool) { + meta := parseMeta(permission.Meta) + if resolved, ok := resolveFromExplicitMeta(meta); ok { + return resolved, true + } + if !permission.AllowAPIKey { + return ResolvedPermission{}, false + } + if resolved, ok := resolveFromPermission(permission, meta); ok { + return resolved, true + } + return ResolvedPermission{}, false +} + +func BuildAPIKeyMeta(permission modelsiam.Permission) map[string]any { + resolved, ok := ResolvePermission(permission) + if !ok { + return nil + } + return map[string]any{ + "scope": resolved.Scope, + "action": resolved.Action, + "resource_type": resolved.ResourceType, + "resource_pattern": resolved.ResourcePattern, + "plugin_id": resolved.PluginID, + "effect": resolved.Effect, + } +} + +var sensitiveEndpointPrefixes = []string{ + "/api/v1/admin/user/auth/", + "/api/v1/admin/integration/api-keys", + "/api/v1/admin/integration/api-key-profiles", + "/api/v1/admin/iam/permissions", + "/api/v1/admin/iam/roles", +} + +func parseMeta(raw []byte) map[string]any { + if len(raw) == 0 { + return map[string]any{} + } + out := map[string]any{} + _ = json.Unmarshal(raw, &out) + return out +} + +func resolveFromExplicitMeta(meta map[string]any) (ResolvedPermission, bool) { + raw, ok := meta["api_key"] + if !ok { + return ResolvedPermission{}, false + } + m, ok := raw.(map[string]any) + if !ok { + return ResolvedPermission{}, false + } + resolved := ResolvedPermission{ + Scope: strings.TrimSpace(anyToString(m["scope"])), + Action: strings.TrimSpace(anyToString(m["action"])), + ResourceType: strings.TrimSpace(anyToString(m["resource_type"])), + ResourcePattern: strings.TrimSpace(anyToString(m["resource_pattern"])), + PluginID: strings.TrimSpace(anyToString(m["plugin_id"])), + Effect: strings.TrimSpace(anyToString(m["effect"])), + } + if resolved.Effect == "" { + resolved.Effect = "allow" + } + if resolved.Scope == "" || resolved.Action == "" || resolved.ResourceType == "" || resolved.ResourcePattern == "" { + return ResolvedPermission{}, false + } + return resolved, true +} + +func resolveFromPermission(permission modelsiam.Permission, meta map[string]any) (ResolvedPermission, bool) { + module := strings.TrimSpace(permission.Module) + resource := strings.TrimSpace(permission.Resource) + action := strings.TrimSpace(permission.Action) + if module == "" || resource == "" || action == "" { + return ResolvedPermission{}, false + } + httpMethod := strings.ToUpper(strings.TrimSpace(anyToString(meta["http_method"]))) + apiEndpoint := strings.TrimSpace(anyToString(meta["api_endpoint"])) + resourcePattern := "" + if httpMethod != "" && apiEndpoint != "" { + resourcePattern = fmt.Sprintf("%s:%s", httpMethod, apiEndpoint) + } else { + resourcePattern = fmt.Sprintf("%s:%s.%s", strings.ToUpper(action), module, resource) + } + return ResolvedPermission{ + Scope: fmt.Sprintf("_scope.%s.%s.%s", module, resource, action), + Action: strings.ToLower(action), + ResourceType: "api", + ResourcePattern: resourcePattern, + Effect: "allow", + }, true +} + +func anyToString(v any) string { + switch val := v.(type) { + case string: + return val + case []byte: + return string(val) + case nil: + return "" + default: + return fmt.Sprint(v) + } +} diff --git a/backend/internal/service/integration_gateway/apikeypermissions/templates.go b/backend/internal/service/integration_gateway/apikeypermissions/templates.go new file mode 100644 index 00000000..349084d6 --- /dev/null +++ b/backend/internal/service/integration_gateway/apikeypermissions/templates.go @@ -0,0 +1,200 @@ +package apikeypermissions + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + + modelsiam "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/iam" + iamrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/iam" + "gorm.io/gorm" +) + +func EnsureTemplatePermissions(ctx context.Context, repo *iamrepo.PermissionRepository) error { + if repo == nil { + return nil + } + return repo.UpsertBatch(ctx, BuildTemplatePermissions()) +} + +const ( + DefaultAPIKeyProfileKey = "integration.default" + DefaultAPIKeyProfileName = "Integration Default API Key Profile" +) + +func EnsureTenantDefaultProfile(ctx context.Context, db *gorm.DB, tenantUUID string, ownerMemberID *uint64) (*modelsiam.APIKeyProfile, []uint64, error) { + tenantUUID = strings.TrimSpace(tenantUUID) + if tenantUUID == "" { + return nil, nil, fmt.Errorf("tenant_uuid required") + } + if db == nil { + return nil, nil, fmt.Errorf("db required") + } + permRepo := iamrepo.NewPermissionRepository(db) + if err := EnsureTemplatePermissions(ctx, permRepo); err != nil { + return nil, nil, err + } + rows, _, err := permRepo.List(ctx, map[string]string{ + "status": string(modelsiam.PermissionStatusActive), + "allow_api_key": "true", + "module": "integration_gateway", + }, 0, 5000, "id ASC") + if err != nil { + return nil, nil, err + } + permissionIDs := make([]uint64, 0, len(rows)) + for i := range rows { + permissionIDs = append(permissionIDs, rows[i].ID) + } + profiles := iamrepo.NewAPIKeyProfileRepository(db) + var profile *modelsiam.APIKeyProfile + err = db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + profileRepo := iamrepo.NewAPIKeyProfileRepository(tx) + profPermRepo := iamrepo.NewAPIKeyProfilePermissionRepository(tx) + + existed, findErr := profileRepo.FindByKey(ctx, tenantUUID, DefaultAPIKeyProfileKey) + if findErr != nil && !errors.Is(findErr, gorm.ErrRecordNotFound) { + return findErr + } + if existed == nil { + created, createErr := profileRepo.Create(ctx, &modelsiam.APIKeyProfile{ + TenantUUID: tenantUUID, + OwnerMemberID: ownerMemberID, + Key: DefaultAPIKeyProfileKey, + Name: DefaultAPIKeyProfileName, + Status: 1, + }) + if createErr != nil { + return createErr + } + existed = created + } else { + changed := false + if existed.Status != 1 { + existed.Status = 1 + changed = true + } + if strings.TrimSpace(existed.Name) == "" { + existed.Name = DefaultAPIKeyProfileName + changed = true + } + if existed.OwnerMemberID == nil && ownerMemberID != nil && *ownerMemberID > 0 { + value := *ownerMemberID + existed.OwnerMemberID = &value + changed = true + } + if changed { + updated, updateErr := profileRepo.Update(ctx, existed) + if updateErr != nil { + return updateErr + } + existed = updated + } + } + + currentIDs, listErr := profPermRepo.ListPermissionIDsOfProfile(ctx, existed.ID) + if listErr != nil { + return listErr + } + toAdd, toRemove := diffPermissionIDs(currentIDs, permissionIDs) + if revokeErr := profPermRepo.RevokeByIDsTx(tx, existed.ID, toRemove); revokeErr != nil { + return revokeErr + } + if grantErr := profPermRepo.GrantByIDsTx(tx, existed.ID, toAdd); grantErr != nil { + return grantErr + } + profile = existed + return nil + }) + if err != nil { + return nil, nil, err + } + if profile == nil { + profile, err = profiles.FindByKey(ctx, tenantUUID, DefaultAPIKeyProfileKey) + if err != nil { + return nil, nil, err + } + } + return profile, permissionIDs, nil +} + +func diffPermissionIDs(current []uint64, desired []uint64) (toAdd []uint64, toRemove []uint64) { + currentSet := make(map[uint64]struct{}, len(current)) + desiredSet := make(map[uint64]struct{}, len(desired)) + for _, id := range current { + currentSet[id] = struct{}{} + } + for _, id := range desired { + desiredSet[id] = struct{}{} + } + for _, id := range desired { + if _, ok := currentSet[id]; !ok { + toAdd = append(toAdd, id) + } + } + for _, id := range current { + if _, ok := desiredSet[id]; !ok { + toRemove = append(toRemove, id) + } + } + return +} + +func BuildTemplatePermissions() []modelsiam.Permission { + return []modelsiam.Permission{ + build("integration_gateway", "api_key.ws.topic", "subscribe", "API Key:WS 订阅 Topic(通用)", map[string]string{ + "scope": "_scope.ws.topic.subscribe", "action": "subscribe", "resource_type": "topic", "resource_pattern": "*", + }), + build("integration_gateway", "api_key.ws.topic", "publish", "API Key:WS 发布 Topic(通用)", map[string]string{ + "scope": "_scope.ws.topic.publish", "action": "publish", "resource_type": "topic", "resource_pattern": "*", + }), + build("integration_gateway", "api_key.event.topic", "publish", "API Key:Event 发布 Topic(通用)", map[string]string{ + "scope": "_scope.event.topic.publish", "action": "publish", "resource_type": "topic", "resource_pattern": "*", + }), + build("integration_gateway", "api_key.event.topic", "subscribe", "API Key:Event 订阅 Topic(通用)", map[string]string{ + "scope": "_scope.event.topic.subscribe", "action": "subscribe", "resource_type": "topic", "resource_pattern": "*", + }), + build("integration_gateway", "api_key.event.topic", "replay", "API Key:Event 回放 Topic(通用)", map[string]string{ + "scope": "_scope.event.topic.replay", "action": "replay", "resource_type": "topic", "resource_pattern": "*", + }), + build("integration_gateway", "api_key.iam.organization_departments", "list", "API Key:组织架构-部门树只读", map[string]string{ + "scope": "_scope.iam.organization.department.list", "action": "list", "resource_type": "api", "resource_pattern": "GET:/api/v1/admin/organization/departments/tree", + }), + build("integration_gateway", "api_key.iam.member", "list", "API Key:组织架构-成员列表只读", map[string]string{ + "scope": "_scope.iam.member.list", "action": "list", "resource_type": "api", "resource_pattern": "GET:/api/v1/admin/iam/members", + }), + build("integration_gateway", "api_key.iam.member", "read", "API Key:组织架构-成员详情只读", map[string]string{ + "scope": "_scope.iam.member.read", "action": "read", "resource_type": "api", "resource_pattern": "GET:/api/v1/admin/iam/members/:id", + }), + } +} + +func build(module string, resource string, action string, description string, apiKeyMeta map[string]string) modelsiam.Permission { + meta := map[string]any{ + "type": "api_key", + "module": "integration_gateway", + "label": description, + "api_key": map[string]any{ + "scope": strings.TrimSpace(apiKeyMeta["scope"]), + "action": strings.TrimSpace(apiKeyMeta["action"]), + "resource_type": strings.TrimSpace(apiKeyMeta["resource_type"]), + "resource_pattern": strings.TrimSpace(apiKeyMeta["resource_pattern"]), + "effect": "allow", + }, + } + metaBytes, _ := json.Marshal(meta) + return modelsiam.Permission{ + Module: module, + Resource: resource, + Action: action, + Effect: "allow", + Description: description, + AllowAPIKey: true, + Meta: metaBytes, + Status: modelsiam.PermissionStatusActive, + Source: "integration_gateway", + Introduced: IntroducedVersion(), + } +} diff --git a/backend/internal/service/integration_gateway/apikeypermissions/version.go b/backend/internal/service/integration_gateway/apikeypermissions/version.go new file mode 100644 index 00000000..1b08534f --- /dev/null +++ b/backend/internal/service/integration_gateway/apikeypermissions/version.go @@ -0,0 +1,19 @@ +package apikeypermissions + +import "strings" + +var introducedVersion = "v1.0.0" + +func SetIntroducedVersion(version string) { + trimmed := strings.TrimSpace(version) + if trimmed == "" { + introducedVersion = "v1.0.0" + return + } + introducedVersion = trimmed +} + +func IntroducedVersion() string { + return introducedVersion +} + diff --git a/backend/internal/service/integration_gateway/base_capabilities.go b/backend/internal/service/integration_gateway/base_capabilities.go index fab4f804..3c960e62 100644 --- a/backend/internal/service/integration_gateway/base_capabilities.go +++ b/backend/internal/service/integration_gateway/base_capabilities.go @@ -501,6 +501,389 @@ func builtinPlatformCapabilityDefinitions() []platformCapabilityDefinition { }, }, }, + { + CapabilityID: "com.corex.agent.invoke", + Title: "Agent Invoke", + Description: "非流式调用 Agent 对话。", + Module: "agent", + Categories: []string{"agent", "ai"}, + Intents: []string{"agent.invoke"}, + ToolScopes: []string{"agent.runtime"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/agents/invoke", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml#/paths/~1agents~1invoke/post", + AuthType: "tenant_jwt", + ToolScope: "agent.runtime", + }, + { + Channel: "grpc", + Endpoint: "powerx.agent.v1.AgentInvokeService", + RPC: "Invoke", + SchemaRef: "backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto#AgentInvokeService", + AuthType: "tenant_jwt", + ToolScope: "agent.runtime", + }, + }, + }, + { + CapabilityID: "com.corex.agent.stream", + Title: "Agent Stream", + Description: "通过 SSE/gRPC 流式输出 Agent 对话内容。", + Module: "agent", + Categories: []string{"agent", "ai"}, + Intents: []string{"agent.stream"}, + ToolScopes: []string{"agent.runtime"}, + Policy: capabilityPolicy{ + Prefer: "rest", + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/agent/v1/stream.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/agents/stream/sse", + Method: "GET", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml#/paths/~1agents~1stream~1sse/get", + AuthType: "tenant_jwt", + ToolScope: "agent.runtime", + }, + { + Channel: "grpc", + Endpoint: "powerx.agent.v1.AgentStreamService", + RPC: "Stream", + SchemaRef: "backend/api/grpc/contracts/powerx/agent/v1/stream.proto#AgentStreamService", + AuthType: "tenant_jwt", + ToolScope: "agent.runtime", + }, + }, + }, + { + CapabilityID: "com.corex.agent.session.manage", + Title: "Agent Session Management", + Description: "创建会话与管理消息。", + Module: "agent", + Categories: []string{"agent", "ai"}, + Intents: []string{"agent.session.manage"}, + ToolScopes: []string{"agent.session"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/agents/sessions", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/agent.http-openapi.yaml#/paths/~1agents~1sessions/post", + AuthType: "tenant_jwt", + ToolScope: "agent.session", + }, + { + Channel: "grpc", + Endpoint: "powerx.agent.v1.AgentSessionService", + RPC: "CreateSession", + SchemaRef: "backend/api/grpc/contracts/powerx/agent/v1/agent_api.proto#AgentSessionService", + AuthType: "tenant_jwt", + ToolScope: "agent.session", + }, + }, + }, + { + CapabilityID: "com.corex.ai.llm.invoke", + Title: "LLM Invoke", + Description: "大语言模型无状态调用。", + Module: "ai", + Categories: []string{"ai", "llm"}, + Intents: []string{"ai.llm.invoke"}, + ToolScopes: []string{"ai.llm"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/llm/invoke", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1invoke/post", + AuthType: "tenant_jwt", + ToolScope: "ai.llm", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalService", + RPC: "Invoke", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService", + AuthType: "tenant_jwt", + ToolScope: "ai.llm", + }, + }, + }, + { + CapabilityID: "com.corex.ai.llm.stream", + Title: "LLM Stream", + Description: "LLM 会话流式输出。", + Module: "ai", + Categories: []string{"ai", "llm"}, + Intents: []string{"ai.llm.stream"}, + ToolScopes: []string{"ai.llm"}, + Policy: capabilityPolicy{ + Prefer: "rest", + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/llm/sessions/{session_id}/stream", + Method: "GET", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1sessions~1{session_id}~1stream/get", + AuthType: "tenant_jwt", + ToolScope: "ai.llm", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalService", + RPC: "Stream", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService", + AuthType: "tenant_jwt", + ToolScope: "ai.llm", + }, + }, + }, + { + CapabilityID: "com.corex.ai.llm.session.create", + Title: "LLM Session Create", + Description: "创建 LLM 会话。", + Module: "ai", + Categories: []string{"ai", "llm"}, + Intents: []string{"ai.llm.session.create"}, + ToolScopes: []string{"ai.llm.session"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/llm/sessions", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1sessions/post", + AuthType: "tenant_jwt", + ToolScope: "ai.llm.session", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalSessionService", + RPC: "CreateSession", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalSessionService", + AuthType: "tenant_jwt", + ToolScope: "ai.llm.session", + }, + }, + }, + { + CapabilityID: "com.corex.ai.llm.session.append", + Title: "LLM Session Append", + Description: "追加 LLM 会话消息。", + Module: "ai", + Categories: []string{"ai", "llm"}, + Intents: []string{"ai.llm.session.append"}, + ToolScopes: []string{"ai.llm.session"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/llm/sessions/{session_id}/messages", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1llm~1sessions~1{session_id}~1messages/post", + AuthType: "tenant_jwt", + ToolScope: "ai.llm.session", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalSessionService", + RPC: "AppendMessage", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalSessionService", + AuthType: "tenant_jwt", + ToolScope: "ai.llm.session", + }, + }, + }, + { + CapabilityID: "com.corex.ai.image.invoke", + Title: "Image Invoke", + Description: "图像生成/理解调用。", + Module: "ai", + Categories: []string{"ai", "image"}, + Intents: []string{"ai.image.invoke"}, + ToolScopes: []string{"ai.image"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/image/invoke", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1image~1invoke/post", + AuthType: "tenant_jwt", + ToolScope: "ai.image", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalService", + RPC: "Invoke", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService", + AuthType: "tenant_jwt", + ToolScope: "ai.image", + }, + }, + }, + { + CapabilityID: "com.corex.ai.video.invoke", + Title: "Video Invoke", + Description: "视频生成/理解调用。", + Module: "ai", + Categories: []string{"ai", "video"}, + Intents: []string{"ai.video.invoke"}, + ToolScopes: []string{"ai.video"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/video/invoke", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1video~1invoke/post", + AuthType: "tenant_jwt", + ToolScope: "ai.video", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalService", + RPC: "Invoke", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService", + AuthType: "tenant_jwt", + ToolScope: "ai.video", + }, + }, + }, + { + CapabilityID: "com.corex.ai.tts.invoke", + Title: "TTS Invoke", + Description: "语音合成调用。", + Module: "ai", + Categories: []string{"ai", "tts"}, + Intents: []string{"ai.tts.invoke"}, + ToolScopes: []string{"ai.tts"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/tts/invoke", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1tts~1invoke/post", + AuthType: "tenant_jwt", + ToolScope: "ai.tts", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.MultimodalService", + RPC: "Invoke", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#MultimodalService", + AuthType: "tenant_jwt", + ToolScope: "ai.tts", + }, + }, + }, + { + CapabilityID: "com.corex.ai.embedding.invoke", + Title: "Embedding Invoke", + Description: "向量生成(embedding)。", + Module: "ai", + Categories: []string{"ai", "embedding"}, + Intents: []string{"ai.embedding.invoke"}, + ToolScopes: []string{"ai.embedding"}, + Policy: capabilityPolicy{ + Prefer: "rest", + Fallback: []string{"grpc"}, + }, + Docs: []string{ + "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml", + "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto", + }, + Protocols: []models.ProtocolBinding{ + { + Channel: "rest", + Endpoint: "/api/v1/ai/embedding/invoke", + Method: "POST", + SchemaRef: "specs/007-integration-gateway-and-mcp/contracts/ai-multimodal.http-openapi.yaml#/paths/~1ai~1embedding~1invoke/post", + AuthType: "tenant_jwt", + ToolScope: "ai.embedding", + }, + { + Channel: "grpc", + Endpoint: "powerx.ai.v1.EmbeddingService", + RPC: "Embed", + SchemaRef: "backend/api/grpc/contracts/powerx/ai/v1/multimodal.proto#EmbeddingService", + AuthType: "tenant_jwt", + ToolScope: "ai.embedding", + }, + }, + }, } } @@ -634,6 +1017,9 @@ func (s *BaseCapabilitySeeder) buildAdapters(def platformCapabilityDefinition) [ if binding.Method != "" { labels["method"] = strings.ToUpper(strings.TrimSpace(binding.Method)) } + if binding.RPC != "" { + labels["rpc"] = strings.TrimSpace(binding.RPC) + } if binding.SchemaRef != "" { labels["schema_ref"] = binding.SchemaRef } diff --git a/backend/internal/service/knowledge_space/artifact_store.go b/backend/internal/service/knowledge_space/artifact_store.go new file mode 100644 index 00000000..4785167e --- /dev/null +++ b/backend/internal/service/knowledge_space/artifact_store.go @@ -0,0 +1,267 @@ +package knowledge_space + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore" + "github.com/google/uuid" +) + +type ArtifactStoreOptions struct { + Scheme string + Bucket string + BaseDir string +} + +type ArtifactStore struct { + scheme string + bucket string + baseDir string +} + +func NewArtifactStore(opts ArtifactStoreOptions) *ArtifactStore { + scheme := strings.TrimSpace(opts.Scheme) + if scheme == "" { + scheme = "minio" + } + bucket := strings.TrimSpace(opts.Bucket) + if bucket == "" { + bucket = "powerx-knowledge" + } + baseDir := strings.TrimSpace(opts.BaseDir) + if baseDir == "" { + if isTestBinary() { + baseDir = filepath.Join(projectTmpDir(), "knowledge-artifacts") + } else { + wd, err := os.Getwd() + if err != nil { + baseDir = filepath.Join("backend", "reports", "_state", "knowledge-artifacts") + } else if root := findRepoRoot(wd); root != "" { + baseDir = filepath.Join(root, "backend", "reports", "_state", "knowledge-artifacts") + } else { + baseDir = filepath.Join("backend", "reports", "_state", "knowledge-artifacts") + } + } + } + return &ArtifactStore{scheme: scheme, bucket: bucket, baseDir: baseDir} +} + +type ArtifactWriteInput struct { + SpaceID uuid.UUID + JobUUID uuid.UUID + JobID uint64 + Format string + SourceURI string + Chunks []IngestionChunk + VectorRecords []vectorstore.VectorRecord + MaskingProfile string + Outcome pipelineOutcome + OCRArtifacts *OCRArtifacts +} + +type ArtifactBundleUpdate struct { + ChunkManifestURI string + VectorManifestURI string + MaskingReportURI string + OCRPageImagesURI string + OCRRawManifestURI string + OCRSearchablePDFURI string + Checksum string +} + +func (s *ArtifactStore) Write(ctx context.Context, in ArtifactWriteInput) (ArtifactBundleUpdate, error) { + _ = ctx + if s == nil { + return ArtifactBundleUpdate{}, nil + } + objectPrefix := filepath.ToSlash(filepath.Join("knowledge", in.SpaceID.String(), in.JobUUID.String())) + chunkKey := objectPrefix + "/chunk_manifest.json" + vectorKey := objectPrefix + "/vector_manifest.json" + maskingKey := objectPrefix + "/masking_report.json" + pagesManifestKey := objectPrefix + "/ocr/pages_manifest.json" + rawManifestKey := objectPrefix + "/ocr/raw_manifest.json" + + chunkManifest := map[string]any{ + "space_id": in.SpaceID.String(), + "job_id": in.JobUUID.String(), + "format": in.Format, + "source_uri": in.SourceURI, + "chunks": in.Chunks, + "metrics": in.Outcome, + } + vectorManifest := map[string]any{ + "space_id": in.SpaceID.String(), + "job_id": in.JobUUID.String(), + "dimensions": 32, + "model": "hash32", + "vectors": in.VectorRecords, + } + maskingReport := map[string]any{ + "space_id": in.SpaceID.String(), + "job_id": in.JobUUID.String(), + "masking_profile": in.MaskingProfile, + "masking_pct": in.Outcome.maskingPct, + } + + var pagesManifestBytes []byte + var rawManifestBytes []byte + var pagesManifestURI string + var rawManifestURI string + var searchablePDFURI string + + if in.OCRArtifacts != nil && len(in.OCRArtifacts.Pages) > 0 { + type pageItem struct { + PageNumber int `json:"page_number"` + ImageURI string `json:"image_uri"` + Width int `json:"width"` + Height int `json:"height"` + } + type rawItem struct { + PageNumber int `json:"page_number"` + Format string `json:"format"` + RawURI string `json:"raw_uri"` + } + pageItems := make([]pageItem, 0, len(in.OCRArtifacts.Pages)) + rawItems := make([]rawItem, 0, len(in.OCRArtifacts.Pages)) + for _, pg := range in.OCRArtifacts.Pages { + if pg.PageNumber <= 0 { + continue + } + imgExt := pg.ImageExt() + if imgExt == "" { + imgExt = ".png" + } + rawExt := pg.RawExt() + if rawExt == "" { + rawExt = ".tsv" + } + imgKey := fmt.Sprintf("%s/ocr/pages/%03d%s", objectPrefix, pg.PageNumber, imgExt) + rawKey := fmt.Sprintf("%s/ocr/raw/%03d%s", objectPrefix, pg.PageNumber, rawExt) + + if b, err := os.ReadFile(filepath.Clean(pg.ImagePath)); err == nil { + if err := s.writeObject(imgKey, b); err != nil { + return ArtifactBundleUpdate{}, err + } + pageItems = append(pageItems, pageItem{ + PageNumber: pg.PageNumber, + ImageURI: s.uri(imgKey), + Width: pg.Width, + Height: pg.Height, + }) + } + if b, err := os.ReadFile(filepath.Clean(pg.RawPath)); err == nil { + if err := s.writeObject(rawKey, b); err != nil { + return ArtifactBundleUpdate{}, err + } + rawItems = append(rawItems, rawItem{ + PageNumber: pg.PageNumber, + Format: strings.TrimSpace(in.OCRArtifacts.RawFormat), + RawURI: s.uri(rawKey), + }) + } + } + pagesManifest := map[string]any{ + "space_id": in.SpaceID.String(), + "job_id": in.JobUUID.String(), + "format": "pdf", + "source_uri": in.SourceURI, + "pages": pageItems, + } + rawManifest := map[string]any{ + "space_id": in.SpaceID.String(), + "job_id": in.JobUUID.String(), + "format": "pdf", + "source_uri": in.SourceURI, + "raw_format": strings.TrimSpace(in.OCRArtifacts.RawFormat), + "raw": rawItems, + } + pagesManifestBytes, _ = json.MarshalIndent(pagesManifest, "", " ") + rawManifestBytes, _ = json.MarshalIndent(rawManifest, "", " ") + if err := s.writeObject(pagesManifestKey, pagesManifestBytes); err != nil { + return ArtifactBundleUpdate{}, err + } + if err := s.writeObject(rawManifestKey, rawManifestBytes); err != nil { + return ArtifactBundleUpdate{}, err + } + pagesManifestURI = s.uri(pagesManifestKey) + rawManifestURI = s.uri(rawManifestKey) + } + + chunkBytes, _ := json.MarshalIndent(chunkManifest, "", " ") + vectorBytes, _ := json.MarshalIndent(vectorManifest, "", " ") + maskingBytes, _ := json.MarshalIndent(maskingReport, "", " ") + + check := sha256.New() + check.Write(chunkBytes) + check.Write(vectorBytes) + check.Write(maskingBytes) + check.Write(pagesManifestBytes) + check.Write(rawManifestBytes) + checksum := hex.EncodeToString(check.Sum(nil)) + + if err := s.writeObject(chunkKey, chunkBytes); err != nil { + return ArtifactBundleUpdate{}, err + } + if err := s.writeObject(vectorKey, vectorBytes); err != nil { + return ArtifactBundleUpdate{}, err + } + if err := s.writeObject(maskingKey, maskingBytes); err != nil { + return ArtifactBundleUpdate{}, err + } + + return ArtifactBundleUpdate{ + ChunkManifestURI: s.uri(chunkKey), + VectorManifestURI: s.uri(vectorKey), + MaskingReportURI: s.uri(maskingKey), + OCRPageImagesURI: pagesManifestURI, + OCRRawManifestURI: rawManifestURI, + OCRSearchablePDFURI: searchablePDFURI, + Checksum: checksum, + }, nil +} + +func (s *ArtifactStore) writeObject(objectKey string, data []byte) error { + if s == nil { + return nil + } + path := filepath.Join(s.baseDir, s.bucket, filepath.FromSlash(objectKey)) + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + return os.WriteFile(path, data, 0o644) +} + +func (s *ArtifactStore) uri(objectKey string) string { + if s == nil { + return "" + } + return s.scheme + "://" + s.bucket + "/" + strings.TrimPrefix(objectKey, "/") +} + +// DeleteJobArtifacts removes local on-disk artifacts for a given job. +// It is a best-effort cleanup and only affects the filesystem-backed ArtifactStore. +func (s *ArtifactStore) DeleteJobArtifacts(spaceID uuid.UUID, jobUUID uuid.UUID) (bool, error) { + if s == nil { + return false, nil + } + if spaceID == uuid.Nil || jobUUID == uuid.Nil { + return false, errors.New("spaceID/jobUUID is required") + } + // Keep consistent with `Write` objectPrefix: knowledge/// + dir := filepath.Join(s.baseDir, s.bucket, filepath.FromSlash(filepath.ToSlash(filepath.Join("knowledge", spaceID.String(), jobUUID.String())))) + if _, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, os.RemoveAll(dir) +} diff --git a/backend/internal/service/knowledge_space/chunking.go b/backend/internal/service/knowledge_space/chunking.go new file mode 100644 index 00000000..0d9d51e9 --- /dev/null +++ b/backend/internal/service/knowledge_space/chunking.go @@ -0,0 +1,1062 @@ +package knowledge_space + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "github.com/google/uuid" +) + +type ChunkingOptions struct { + // Mode controls how to split the unit content before windowing. + // Supported: unit|heading|clause|semantic|table_row|code_block|conversation + Mode string + // SizePolicy controls how chunkSize is applied: cap (only split long parts) or target (merge short parts). + // Supported: cap|target + SizePolicy string + // PagePriority forces per-unit (page) chunks when content is short enough. + PagePriority bool + // SegmentOrder controls execution order: page | size | segment | separator. + SegmentOrder []string + // DocUUID binds chunks to a specific document ID (e.g. media asset UUID). + DocUUID string + // ChunkSize is measured in runes (approx chars). 0 keeps legacy behavior (one chunk per unit). + ChunkSize int + // ChunkOverlap is measured in runes (approx chars). Only applies when ChunkSize > 0. + ChunkOverlap int + // Separators are preferred boundaries applied after mode splitting and before windowing. + // It supports punctuation and newline tokens (e.g. "\n\n", "。", ";"). + Separators []string + Anchors ChunkAnchors +} + +type ChunkAnchors struct { + HeadingPath bool + ClauseID bool + RowNumber bool + Speaker bool + SentenceIndex bool +} + +// ChunkDocument converts processed document units into multi-granularity chunks. +// It emits at least: doc_summary, section_summary, chunk. +func ChunkDocument(spaceID uuid.UUID, format string, sourceURI string, units []DocumentUnit, opts ChunkingOptions, onProgress func(done, total float64)) []IngestionChunk { + normalizedFormat := strings.ToLower(strings.TrimSpace(format)) + src := strings.TrimSpace(sourceURI) + mode := strings.ToLower(strings.TrimSpace(opts.Mode)) + if mode == "" { + mode = "unit" + } + // When pagePriority is disabled, merge PDF units so chunking can cross page boundaries. + if normalizedFormat == "pdf" && !opts.PagePriority && len(units) > 1 { + units = mergePDFUnits(units) + } + docUUID := strings.TrimSpace(opts.DocUUID) + if docUUID == "" { + docUUID = uuid.NewSHA1(spaceID, []byte("doc|"+normalizedFormat+"|"+src)).String() + } + + chunks := make([]IngestionChunk, 0, 1+len(units)*2) + + docSummary := IngestionChunk{ + ID: uuid.NewSHA1(spaceID, []byte("doc_summary|"+normalizedFormat+"|"+src)), + Kind: "doc_summary", + Content: fmt.Sprintf("Summary for %s (%s)", src, normalizedFormat), + Metadata: map[string]any{ + "format": normalizedFormat, + "source_uri": src, + "provenance": map[string]any{}, + "doc_uuid": docUUID, + }, + } + chunks = append(chunks, docSummary) + + totalUnits := len(units) + for idx, unit := range units { + prov := unit.Provenance + if prov == nil { + prov = map[string]any{} + } + sectionKey := fmt.Sprintf("section_summary|%s|%s|%d", normalizedFormat, src, idx+1) + sectionSummary := IngestionChunk{ + ID: uuid.NewSHA1(spaceID, []byte(sectionKey)), + Kind: "section_summary", + Content: fmt.Sprintf("Section %d summary for %s", idx+1, src), + Metadata: map[string]any{ + "format": normalizedFormat, + "source_uri": src, + "provenance": prov, + "section": idx + 1, + "doc_uuid": docUUID, + }, + Confidence: unit.Confidence, + } + chunks = append(chunks, sectionSummary) + + content := strings.TrimSpace(unit.Content) + if content == "" { + if onProgress != nil && totalUnits > 0 { + onProgress(float64(idx+1), float64(totalUnits)) + } + continue + } + parts := applySegmentOrder(content, mode, opts) + if len(parts) == 0 { + parts = []segmentPart{{Text: content}} + } + + chunkCounter := 0 + partTotal := len(parts) + for partIdx, part := range parts { + partText := strings.TrimSpace(part.Text) + if partText == "" { + continue + } + chunkCounter++ + contentKey := fmt.Sprintf("chunk|%s|%s|%d|%d", normalizedFormat, src, idx+1, chunkCounter) + meta := map[string]any{ + "format": normalizedFormat, + "source_uri": src, + "provenance": prov, + "section": idx + 1, + "chunk_idx": chunkCounter, + "segment_mode": mode, + "doc_uuid": docUUID, + } + if sp := parseSegmentIndex(part.Meta, "segment_part"); sp > 0 { + meta["segment_part"] = sp + } else { + meta["segment_part"] = partIdx + 1 + } + if sp := parseSegmentIndex(part.Meta, "segment_subpart"); sp > 0 { + meta["segment_subpart"] = sp + } + if opts.ChunkSize > 0 { + meta["chunk_size"] = opts.ChunkSize + if overlap := normalizeOverlap(opts.ChunkSize, opts.ChunkOverlap); overlap > 0 { + meta["overlap"] = overlap + } + } + applyAnchors(meta, part.Meta, opts.Anchors) + chunks = append(chunks, IngestionChunk{ + ID: uuid.NewSHA1(spaceID, []byte(contentKey)), + Kind: "chunk", + Content: partText, + Metadata: meta, + Confidence: unit.Confidence, + }) + if onProgress != nil && totalUnits > 0 { + denom := partTotal + if denom <= 0 { + denom = 1 + } + onProgress(float64(idx)+float64(partIdx+1)/float64(denom), float64(totalUnits)) + } + } + if partTotal == 0 && onProgress != nil && totalUnits > 0 { + onProgress(float64(idx+1), float64(totalUnits)) + } + } + + return chunks +} + +func mergePDFUnits(units []DocumentUnit) []DocumentUnit { + if len(units) <= 1 { + return units + } + var b strings.Builder + pages := make([]any, 0, len(units)) + confidence := 0.0 + for _, unit := range units { + text := strings.TrimSpace(unit.Content) + if text == "" { + continue + } + if b.Len() > 0 { + b.WriteString("\n\n") + } + b.WriteString(text) + if confidence == 0 && unit.Confidence > 0 { + confidence = unit.Confidence + } + if unit.Provenance == nil { + continue + } + if v, ok := unit.Provenance["pages"]; ok { + if list, ok := v.([]any); ok { + pages = append(pages, list...) + continue + } + } + if v, ok := unit.Provenance["page"]; ok { + if n := parseAnyInt(v); n > 0 { + pages = append(pages, map[string]any{"page_number": n}) + } + } + if v, ok := unit.Provenance["page_number"]; ok { + if n := parseAnyInt(v); n > 0 { + pages = append(pages, map[string]any{"page_number": n}) + } + } + } + merged := DocumentUnit{Content: b.String(), Confidence: confidence} + if len(pages) > 0 { + merged.Provenance = map[string]any{"pages": pages} + } + return []DocumentUnit{merged} +} + +func normalizeSegmentOrder(order []string, mode string) []string { + allowed := map[string]struct{}{ + "page": {}, + "size": {}, + "segment": {}, + "separator": {}, + } + out := make([]string, 0, len(order)) + seen := map[string]struct{}{} + for _, raw := range order { + key := strings.ToLower(strings.TrimSpace(raw)) + if key == "" { + continue + } + if _, ok := allowed[key]; !ok { + continue + } + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + out = append(out, key) + } + if len(out) == 0 { + if strings.EqualFold(strings.TrimSpace(mode), "unit") { + return []string{"page", "size", "segment", "separator"} + } + return []string{"page", "segment", "size", "separator"} + } + return out +} + +func mergeMeta(a, b map[string]any) map[string]any { + if a == nil && b == nil { + return nil + } + out := map[string]any{} + for k, v := range a { + out[k] = v + } + for k, v := range b { + out[k] = v + } + return out +} + +func parseSegmentIndex(meta map[string]any, key string) int { + if meta == nil { + return 0 + } + v, ok := meta[key] + if !ok || v == nil { + return 0 + } + switch t := v.(type) { + case int: + return t + case int64: + return int(t) + case float64: + return int(t) + case string: + n, _ := strconv.Atoi(strings.TrimSpace(t)) + return n + default: + return 0 + } +} + +func normalizeOverlap(size int, overlap int) int { + if size <= 0 { + return 0 + } + if overlap < 0 { + overlap = 0 + } + if overlap >= size { + overlap = size / 4 + } + return overlap +} + +func applySegmentOrder(content string, mode string, opts ChunkingOptions) []segmentPart { + parts := []segmentPart{{Text: content}} + contentLen := utf8.RuneCountInString(content) + order := normalizeSegmentOrder(opts.SegmentOrder, mode) + locked := false + + for _, step := range order { + if locked { + break + } + switch step { + case "page": + if !opts.PagePriority { + continue + } + if len(parts) == 1 { + if opts.ChunkSize <= 0 || contentLen <= opts.ChunkSize { + locked = true + } + } + case "size": + if opts.ChunkSize <= 0 { + continue + } + if len(parts) == 1 && contentLen <= opts.ChunkSize { + locked = true + } + case "segment": + parts = splitPartsByMode(parts, mode) + case "separator": + if len(opts.Separators) == 0 { + continue + } + parts = splitPartsBySeparators(parts, opts.Separators) + } + } + + if len(parts) == 0 { + parts = []segmentPart{{Text: content}} + } + + ensureSegmentPart(parts) + + if opts.ChunkSize > 0 && strings.EqualFold(strings.TrimSpace(opts.SizePolicy), "target") { + parts = mergePartsToTarget(parts, opts.ChunkSize) + } + + if opts.ChunkSize > 0 { + parts = applyWindowing(parts, opts.ChunkSize, opts.ChunkOverlap, opts.Separators) + } + + return parts +} + +func mergePartsToTarget(parts []segmentPart, size int) []segmentPart { + if size <= 0 || len(parts) <= 1 { + return parts + } + out := make([]segmentPart, 0, len(parts)) + var buf strings.Builder + currentLen := 0 + var currentMeta map[string]any + + flush := func() { + text := strings.TrimSpace(buf.String()) + if text != "" { + out = append(out, segmentPart{Text: text, Meta: currentMeta}) + } + buf.Reset() + currentLen = 0 + currentMeta = nil + } + + for _, part := range parts { + text := strings.TrimSpace(part.Text) + if text == "" { + continue + } + partLen := utf8.RuneCountInString(text) + if currentLen == 0 { + currentMeta = part.Meta + } + // If current buffer plus this part stays within target size, merge it. + if currentLen == 0 || currentLen+2+partLen <= size { + if buf.Len() > 0 { + buf.WriteString("\n\n") + currentLen += 2 + } + buf.WriteString(text) + currentLen += partLen + continue + } + // Flush current buffer and start a new one. + flush() + // If the part itself is larger than target, keep it as-is and let windowing split later. + if partLen >= size { + out = append(out, segmentPart{Text: text, Meta: part.Meta}) + continue + } + buf.WriteString(text) + currentLen = partLen + currentMeta = part.Meta + } + flush() + return out +} + +func splitPartsByMode(parts []segmentPart, mode string) []segmentPart { + out := make([]segmentPart, 0, len(parts)+2) + segIdx := 0 + for _, part := range parts { + text := strings.TrimSpace(part.Text) + if text == "" { + continue + } + splits := splitByModeWithMeta(text, mode) + for _, sp := range splits { + segIdx++ + meta := mergeMeta(part.Meta, sp.Meta) + if meta == nil { + meta = map[string]any{} + } + meta["segment_part"] = segIdx + out = append(out, segmentPart{Text: sp.Text, Meta: meta}) + } + } + return out +} + +func splitPartsBySeparators(parts []segmentPart, separators []string) []segmentPart { + out := make([]segmentPart, 0, len(parts)+2) + for _, part := range parts { + text := strings.TrimSpace(part.Text) + if text == "" { + continue + } + splits := splitByCustomSeparatorsWithMeta(text, part.Meta, separators) + if len(splits) <= 1 { + out = append(out, splits...) + continue + } + for i, sp := range splits { + meta := mergeMeta(part.Meta, sp.Meta) + if meta == nil { + meta = map[string]any{} + } + meta["segment_subpart"] = i + 1 + out = append(out, segmentPart{Text: sp.Text, Meta: meta}) + } + } + return out +} + +func ensureSegmentPart(parts []segmentPart) { + has := false + for _, part := range parts { + if parseSegmentIndex(part.Meta, "segment_part") > 0 { + has = true + break + } + } + if has { + return + } + for i := range parts { + if parts[i].Meta == nil { + parts[i].Meta = map[string]any{} + } + parts[i].Meta["segment_part"] = 1 + } +} + +func applyWindowing(parts []segmentPart, size int, overlap int, separators []string) []segmentPart { + if size <= 0 { + return parts + } + ov := normalizeOverlap(size, overlap) + out := make([]segmentPart, 0, len(parts)) + for _, part := range parts { + text := strings.TrimSpace(part.Text) + if text == "" { + continue + } + if utf8.RuneCountInString(text) <= size { + out = append(out, part) + continue + } + segments := splitByRuneWindowPreferSeparators(text, size, ov, separators) + for _, seg := range segments { + out = append(out, segmentPart{Text: seg, Meta: part.Meta}) + } + } + return out +} + +func splitByRuneWindow(s string, window int, step int) []string { + if window <= 0 || step <= 0 { + return []string{strings.TrimSpace(s)} + } + rs := []rune(s) + out := make([]string, 0, (len(rs)/step)+1) + for start := 0; start < len(rs); start += step { + end := start + window + if end > len(rs) { + end = len(rs) + } + seg := strings.TrimSpace(string(rs[start:end])) + if seg != "" { + out = append(out, seg) + } + if end >= len(rs) { + break + } + } + return out +} + +// splitByRuneWindowPreferSeparators will chunk by rune window, but will try to end each chunk +// at a separator boundary within the window to avoid cutting a sentence/line mid-way. +// +// - window/overlap are in runes. +// - separators are literal strings (e.g. "\n\n", "。", ";", "•"). +func splitByRuneWindowPreferSeparators(s string, window int, overlap int, separators []string) []string { + raw := strings.TrimSpace(s) + if raw == "" { + return nil + } + if window <= 0 { + return []string{raw} + } + + rs := []rune(raw) + if len(rs) <= window { + return []string{raw} + } + if overlap < 0 { + overlap = 0 + } + if overlap >= window { + overlap = window / 4 + } + + // sanitize separators (limit size to keep perf predictable) + seps := make([]string, 0, len(separators)) + for _, sep := range separators { + sep = strings.TrimSpace(sep) + if sep == "" { + continue + } + if len([]rune(sep)) > 16 { + continue + } + seps = append(seps, sep) + if len(seps) >= 32 { + break + } + } + + // Map rune index -> byte index for raw string. + runeToByte := make([]int, 0, len(rs)+1) + for i := range raw { + runeToByte = append(runeToByte, i) + } + runeToByte = append(runeToByte, len(raw)) + runeIndexFromByte := func(bytePos int) int { + // returns rune index whose start byte == bytePos (or nearest next start) + i := sort.Search(len(runeToByte), func(i int) bool { return runeToByte[i] >= bytePos }) + if i < 0 { + return 0 + } + if i > len(rs) { + return len(rs) + } + return i + } + + chooseEnd := func(startRune int, idealEndRune int) int { + if len(seps) == 0 { + return idealEndRune + } + byteStart := runeToByte[startRune] + byteIdealEnd := runeToByte[idealEndRune] + windowStr := raw[byteStart:byteIdealEnd] + + bestEndByte := -1 + for _, sep := range seps { + if idx := strings.LastIndex(windowStr, sep); idx >= 0 { + endByte := idx + len(sep) + if endByte > bestEndByte { + bestEndByte = endByte + } + } + } + if bestEndByte < 0 { + return idealEndRune + } + + // Avoid producing too-short chunks: require at least 60% of window, otherwise fallback. + minLen := int(float64(window) * 0.6) + if minLen < 1 { + minLen = 1 + } + endRune := runeIndexFromByte(byteStart + bestEndByte) + if endRune-startRune < minLen { + return idealEndRune + } + if endRune <= startRune { + return idealEndRune + } + if endRune > idealEndRune { + return idealEndRune + } + return endRune + } + + chooseStart := func(proposedStart int, endRune int) int { + if proposedStart <= 0 || proposedStart >= endRune { + return proposedStart + } + if len(seps) == 0 || overlap <= 0 { + return proposedStart + } + // In overlap window, try to advance start to the first "separator end" boundary, + // so we avoid starting mid-sentence/line. + byteStart := runeToByte[proposedStart] + byteEnd := runeToByte[endRune] + windowStr := raw[byteStart:byteEnd] + + bestStartByte := -1 + for _, sep := range seps { + if idx := strings.Index(windowStr, sep); idx >= 0 { + startByte := idx + len(sep) + if bestStartByte < 0 || startByte < bestStartByte { + bestStartByte = startByte + } + } + } + if bestStartByte < 0 { + return proposedStart + } + startRune := runeIndexFromByte(byteStart + bestStartByte) + if startRune <= proposedStart || startRune >= endRune { + return proposedStart + } + return startRune + } + + out := make([]string, 0, (len(rs)/window)+1) + for start := 0; start < len(rs); { + idealEnd := start + window + if idealEnd > len(rs) { + idealEnd = len(rs) + } + end := chooseEnd(start, idealEnd) + seg := strings.TrimSpace(string(rs[start:end])) + if seg != "" { + out = append(out, seg) + } + if end >= len(rs) { + break + } + next := end - overlap + if next <= start { + next = end + } + next = chooseStart(next, end) + if next <= start { + next = end + } + start = next + } + return out +} + +func splitByCustomSeparatorsWithMeta(s string, meta map[string]any, separators []string) []segmentPart { + parts := splitByCustomSeparators(s, separators) + if len(parts) == 0 { + return []segmentPart{{Text: strings.TrimSpace(s), Meta: meta}} + } + out := make([]segmentPart, 0, len(parts)) + for _, p := range parts { + p = strings.TrimSpace(p) + if p == "" { + continue + } + out = append(out, segmentPart{Text: p, Meta: meta}) + } + if len(out) == 0 { + return []segmentPart{{Text: strings.TrimSpace(s), Meta: meta}} + } + return out +} + +func splitByCustomSeparators(s string, separators []string) []string { + raw := strings.TrimSpace(s) + if raw == "" || len(separators) == 0 { + return []string{raw} + } + seps := make([]string, 0, len(separators)) + for _, sep := range separators { + sep = strings.TrimSpace(sep) + if sep == "" { + continue + } + seps = append(seps, sep) + if len(seps) >= 32 { + break + } + } + if len(seps) == 0 { + return []string{raw} + } + + out := make([]string, 0, 16) + rest := raw + for { + bestIdx := -1 + bestSep := "" + for _, sep := range seps { + i := strings.Index(rest, sep) + if i < 0 { + continue + } + if bestIdx < 0 || i < bestIdx || (i == bestIdx && len(sep) > len(bestSep)) { + bestIdx = i + bestSep = sep + } + } + if bestIdx < 0 { + rest = strings.TrimSpace(rest) + if rest != "" { + out = append(out, rest) + } + break + } + cut := bestIdx + len(bestSep) + part := strings.TrimSpace(rest[:cut]) + if part != "" { + out = append(out, part) + } + rest = rest[cut:] + if len(out) >= 512 { + rest = strings.TrimSpace(rest) + if rest != "" { + out = append(out, rest) + } + break + } + } + if len(out) == 0 { + return []string{raw} + } + return out +} + +var ( + reHeadingMD = regexp.MustCompile(`(?m)^#{1,6}\s+.+$`) + reHeadingLine = regexp.MustCompile(`^#{1,6}\s+.+$`) + reHeadingParse = regexp.MustCompile(`^(#{1,6})\s+(.+)$`) + reClauseNum = regexp.MustCompile(`(?m)^(?:\d+(?:\.\d+)*|第[一二三四五六七八九十百千万]+条)[\s、..)]`) + reClauseParse = regexp.MustCompile(`^(?:(\d+(?:\.\d+)*)|(第[一二三四五六七八九十百千万]+条))[\s、..)]`) + reSpeakerLine = regexp.MustCompile(`(?m)^(?:[\\p{L}0-9_\\-]{1,20})[::]`) +) + +type segmentPart struct { + Text string + Meta map[string]any +} + +func splitByModeWithMeta(content string, mode string) []segmentPart { + switch mode { + case "heading": + return splitByMarkdownHeadingsWithMeta(content) + case "clause": + return splitByClauseWithMeta(content) + case "semantic": + return splitBySentencesWithMeta(content) + case "table_row": + return splitByLinesWithMeta(content) + case "code_block": + return splitByCodeBlocksWithMeta(content) + case "conversation": + return splitByConversationTurnsWithMeta(content) + case "unit": + fallthrough + default: + return []segmentPart{{Text: content}} + } +} + +func splitByMarkdownHeadingsWithMeta(s string) []segmentPart { + if !reHeadingMD.MatchString(s) { + return splitByParagraphsWithMeta(s) + } + lines := strings.Split(s, "\n") + var out []segmentPart + var buf []string + var sectionMeta map[string]any + var headingStack []struct { + level int + title string + } + + flush := func() { + txt := strings.TrimSpace(strings.Join(buf, "\n")) + if txt == "" { + buf = nil + return + } + meta := map[string]any{} + for k, v := range sectionMeta { + meta[k] = v + } + out = append(out, segmentPart{Text: txt, Meta: meta}) + buf = nil + } + + for _, line := range lines { + trim := strings.TrimSpace(line) + if reHeadingLine.MatchString(trim) { + if len(buf) > 0 { + flush() + } + m := reHeadingParse.FindStringSubmatch(trim) + level := 1 + title := trim + if len(m) == 3 { + level = len(m[1]) + title = strings.TrimSpace(m[2]) + } + // adjust stack to current level + for len(headingStack) > 0 && headingStack[len(headingStack)-1].level >= level { + headingStack = headingStack[:len(headingStack)-1] + } + headingStack = append(headingStack, struct { + level int + title string + }{level: level, title: title}) + path := make([]string, 0, len(headingStack)) + for _, h := range headingStack { + path = append(path, h.title) + } + sectionMeta = map[string]any{ + "heading_level": level, + "heading_title": title, + "heading_path": path, + } + buf = append(buf, line) + continue + } + if len(buf) == 0 && strings.TrimSpace(line) != "" && sectionMeta == nil { + // preface text before first heading + sectionMeta = map[string]any{} + } + buf = append(buf, line) + } + flush() + return out +} + +func splitByClauseWithMeta(s string) []segmentPart { + if !reClauseNum.MatchString(s) { + return splitByParagraphsWithMeta(s) + } + lines := strings.Split(s, "\n") + var out []segmentPart + var buf []string + var clauseID string + flush := func() { + txt := strings.TrimSpace(strings.Join(buf, "\n")) + if txt == "" { + buf = nil + return + } + meta := map[string]any{} + if clauseID != "" { + meta["clause_id"] = clauseID + } + out = append(out, segmentPart{Text: txt, Meta: meta}) + buf = nil + } + for _, line := range lines { + trim := strings.TrimSpace(line) + if reClauseNum.MatchString(trim) { + if len(buf) > 0 { + flush() + } + m := reClauseParse.FindStringSubmatch(trim) + switch { + case len(m) == 3 && m[1] != "": + clauseID = m[1] + case len(m) == 3 && m[2] != "": + clauseID = m[2] + default: + clauseID = strings.TrimSpace(reClauseNum.FindString(trim)) + } + } + buf = append(buf, line) + } + flush() + return out +} + +func splitByConversationTurnsWithMeta(s string) []segmentPart { + if !reSpeakerLine.MatchString(s) { + return splitByParagraphsWithMeta(s) + } + lines := strings.Split(s, "\n") + var out []segmentPart + var buf []string + var speaker string + flush := func() { + txt := strings.TrimSpace(strings.Join(buf, "\n")) + if txt == "" { + buf = nil + return + } + meta := map[string]any{} + if speaker != "" { + meta["speaker"] = speaker + } + out = append(out, segmentPart{Text: txt, Meta: meta}) + buf = nil + } + for _, line := range lines { + trim := strings.TrimSpace(line) + if reSpeakerLine.MatchString(trim) { + if len(buf) > 0 { + flush() + } + // speaker: content + if idx := strings.IndexAny(trim, "::"); idx > 0 { + speaker = strings.TrimSpace(trim[:idx]) + } else { + speaker = "" + } + } + buf = append(buf, line) + } + flush() + return out +} + +func splitBySentencesWithMeta(s string) []segmentPart { + // Simple heuristic: split by Chinese/English sentence punctuation. + raw := strings.TrimSpace(s) + if raw == "" { + return nil + } + seps := func(r rune) bool { + switch r { + case '。', '!', '?', '.', '!', '?', ';', ';': + return true + default: + return false + } + } + var out []segmentPart + var buf []rune + sentenceIdx := 0 + for _, r := range []rune(raw) { + buf = append(buf, r) + if seps(r) { + txt := strings.TrimSpace(string(buf)) + if txt != "" { + sentenceIdx++ + out = append(out, segmentPart{Text: txt, Meta: map[string]any{"sentence_idx": sentenceIdx}}) + } + buf = nil + } + } + if len(buf) > 0 { + txt := strings.TrimSpace(string(buf)) + if txt != "" { + sentenceIdx++ + out = append(out, segmentPart{Text: txt, Meta: map[string]any{"sentence_idx": sentenceIdx}}) + } + } + // If too few sentences, fallback to paragraphs. + if len(out) <= 1 { + return splitByParagraphsWithMeta(raw) + } + return out +} + +func splitByLinesWithMeta(s string) []segmentPart { + lines := strings.Split(s, "\n") + out := make([]segmentPart, 0, len(lines)) + row := 0 + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + row++ + out = append(out, segmentPart{Text: line, Meta: map[string]any{"row_number": row}}) + } + if len(out) == 0 { + return splitByParagraphsWithMeta(s) + } + return out +} + +func splitByCodeBlocksWithMeta(s string) []segmentPart { + // Very lightweight: split by blank lines, keep blocks. + return splitByParagraphsWithMeta(s) +} + +func splitByParagraphsWithMeta(s string) []segmentPart { + raw := strings.TrimSpace(s) + if raw == "" { + return nil + } + parts := strings.Split(raw, "\n\n") + out := make([]segmentPart, 0, len(parts)) + for _, p := range parts { + p = strings.TrimSpace(p) + if p != "" { + out = append(out, segmentPart{Text: p}) + } + } + if len(out) == 0 { + return []segmentPart{{Text: raw}} + } + return out +} + +func applyAnchors(meta map[string]any, partMeta map[string]any, anchors ChunkAnchors) { + if meta == nil || partMeta == nil { + return + } + out := map[string]any{} + if anchors.HeadingPath { + if v, ok := partMeta["heading_path"]; ok { + out["heading_path"] = v + } + if v, ok := partMeta["heading_level"]; ok { + out["heading_level"] = v + } + if v, ok := partMeta["heading_title"]; ok { + out["heading_title"] = v + } + } + if anchors.ClauseID { + if v, ok := partMeta["clause_id"]; ok { + out["clause_id"] = v + } + } + if anchors.RowNumber { + if v, ok := partMeta["row_number"]; ok { + out["row_number"] = v + } + } + if anchors.Speaker { + if v, ok := partMeta["speaker"]; ok { + out["speaker"] = v + } + } + if anchors.SentenceIndex { + if v, ok := partMeta["sentence_idx"]; ok { + out["sentence_idx"] = v + } + } + if len(out) > 0 { + meta["anchors"] = out + } +} diff --git a/backend/internal/service/knowledge_space/chunking_test.go b/backend/internal/service/knowledge_space/chunking_test.go new file mode 100644 index 00000000..a15467ed --- /dev/null +++ b/backend/internal/service/knowledge_space/chunking_test.go @@ -0,0 +1,29 @@ +package knowledge_space + +import "testing" + +func TestSplitByRuneWindowPreferSeparators_PrefersSeparatorBoundary(t *testing.T) { + text := "一句话很长很长很长很长很长很长。下一句也很长很长很长很长很长很长。最后一句。" + seps := []string{"。"} + + // Force window small enough to require splitting, and ensure it prefers ending at "。" + window := 18 + parts := splitByRuneWindowPreferSeparators(text, window, 4, seps) + if len(parts) < 2 { + t.Fatalf("expected multiple chunks, got %d", len(parts)) + } + minLen := int(float64(window) * 0.6) + if minLen < 1 { + minLen = 1 + } + for i, p := range parts[:len(parts)-1] { + if p == "" { + t.Fatalf("chunk %d empty", i) + } + if p[len(p)-len("。"):] != "。" { + if got := len([]rune(p)); got < minLen { + t.Fatalf("chunk %d expected separator or min length %d, got %d: %q", i, minLen, got, p) + } + } + } +} diff --git a/backend/internal/service/knowledge_space/compliance/hooks.go b/backend/internal/service/knowledge_space/compliance/hooks.go index 64a2855b..423b3219 100644 --- a/backend/internal/service/knowledge_space/compliance/hooks.go +++ b/backend/internal/service/knowledge_space/compliance/hooks.go @@ -10,11 +10,17 @@ import ( ) // Guard enforces lightweight IAM + sensitivity gates for QA bridge. -type Guard struct{} +type Guard struct { + MustCiteSources bool + MinEvidenceChunks int +} // NewGuard constructs a guard instance. func NewGuard() *Guard { - return &Guard{} + return &Guard{ + MustCiteSources: true, + MinEvidenceChunks: 1, + } } // Evaluate returns a degrade reason if the request should not access the space. @@ -30,3 +36,17 @@ func (g *Guard) Evaluate(tenant uuid.UUID, space *models.KnowledgeSpace) string } return "" } + +// CheckGuardrails validates citation/evidence requirements. +func (g *Guard) CheckGuardrails(citationChunks int) string { + if g == nil { + return "" + } + if g.MustCiteSources && citationChunks <= 0 { + return "must_cite_sources" + } + if g.MinEvidenceChunks > 0 && citationChunks < g.MinEvidenceChunks { + return fmt.Sprintf("min_evidence_chunks=%d", g.MinEvidenceChunks) + } + return "" +} diff --git a/backend/internal/service/knowledge_space/context_snapshot/store.go b/backend/internal/service/knowledge_space/context_snapshot/store.go index bf619345..465a2ef8 100644 --- a/backend/internal/service/knowledge_space/context_snapshot/store.go +++ b/backend/internal/service/knowledge_space/context_snapshot/store.go @@ -2,10 +2,14 @@ package context_snapshot import ( "context" + "encoding/json" "sort" + "strings" "sync" + "time" "github.com/google/uuid" + "github.com/redis/go-redis/v9" ) // Citation represents a stored QA citation delta. @@ -23,12 +27,44 @@ type Citation struct { type Store struct { mu sync.RWMutex sessions map[string][]Citation + + redis *redis.Client + keyPrefix string + traceTTL time.Duration + snapshotTTL time.Duration +} + +type Options struct { + Redis *redis.Client + KeyPrefix string + SnapshotTTL time.Duration + TraceTTL time.Duration } -// NewStore returns an in-memory snapshot store. -func NewStore() *Store { +// NewStore returns a snapshot store (Redis-backed when configured, otherwise in-memory). +func NewStore(opts ...Options) *Store { + var cfg Options + if len(opts) > 0 { + cfg = opts[0] + } + prefix := strings.TrimSpace(cfg.KeyPrefix) + if prefix == "" { + prefix = "qa:snapshot" + } + snapshotTTL := cfg.SnapshotTTL + if snapshotTTL <= 0 { + snapshotTTL = 24 * time.Hour + } + traceTTL := cfg.TraceTTL + if traceTTL <= 0 { + traceTTL = 48 * time.Hour + } return &Store{ sessions: make(map[string][]Citation), + redis: cfg.Redis, + keyPrefix: prefix, + snapshotTTL: snapshotTTL, + traceTTL: traceTTL, } } @@ -36,11 +72,24 @@ func key(tenant uuid.UUID, sessionID string) string { return tenant.String() + ":" + sessionID } +func (s *Store) snapshotKey(tenant uuid.UUID, sessionID string) string { + return s.keyPrefix + ":session:" + key(tenant, sessionID) +} + +func (s *Store) traceKey(traceID string) string { + return s.keyPrefix + ":trace:" + strings.TrimSpace(traceID) +} + // Upsert merges citations for the given session and returns the latest view. -func (s *Store) Upsert(_ context.Context, tenant uuid.UUID, sessionID string, updates []Citation) []Citation { +func (s *Store) Upsert(ctx context.Context, tenant uuid.UUID, sessionID string, updates []Citation, traceID ...string) []Citation { if tenant == uuid.Nil || sessionID == "" { return nil } + + if s.redis != nil { + return s.upsertRedis(ctx, tenant, sessionID, updates, traceID...) + } + s.mu.Lock() defer s.mu.Unlock() k := key(tenant, sessionID) @@ -71,11 +120,64 @@ func (s *Store) Upsert(_ context.Context, tenant uuid.UUID, sessionID string, up return copied } +func (s *Store) upsertRedis(ctx context.Context, tenant uuid.UUID, sessionID string, updates []Citation, traceID ...string) []Citation { + snapshotKey := s.snapshotKey(tenant, sessionID) + + var existing []Citation + if raw, err := s.redis.Get(ctx, snapshotKey).Bytes(); err == nil && len(raw) > 0 { + _ = json.Unmarshal(raw, &existing) + } + + merged := make(map[string]Citation, len(existing)+len(updates)) + for _, item := range existing { + if item.ChunkID == "" { + continue + } + merged[item.ChunkID] = item + } + for _, upd := range updates { + if upd.ChunkID == "" { + continue + } + merged[upd.ChunkID] = upd + } + + result := make([]Citation, 0, len(merged)) + for _, v := range merged { + result = append(result, v) + } + sort.SliceStable(result, func(i, j int) bool { return result[i].ChunkID < result[j].ChunkID }) + + if payload, err := json.Marshal(result); err == nil { + _ = s.redis.Set(ctx, snapshotKey, payload, s.snapshotTTL).Err() + } + + if len(traceID) > 0 { + trace := strings.TrimSpace(traceID[0]) + if trace != "" { + _ = s.redis.Set(ctx, s.traceKey(trace), snapshotKey, s.traceTTL).Err() + } + } + + return result +} + // Snapshot returns the stored citations for the requested session. -func (s *Store) Snapshot(_ context.Context, tenant uuid.UUID, sessionID string) []Citation { +func (s *Store) Snapshot(ctx context.Context, tenant uuid.UUID, sessionID string) []Citation { if tenant == uuid.Nil || sessionID == "" { return nil } + if s.redis != nil { + raw, err := s.redis.Get(ctx, s.snapshotKey(tenant, sessionID)).Bytes() + if err != nil || len(raw) == 0 { + return nil + } + var out []Citation + if err := json.Unmarshal(raw, &out); err != nil { + return nil + } + return out + } s.mu.RLock() defer s.mu.RUnlock() k := key(tenant, sessionID) @@ -86,3 +188,26 @@ func (s *Store) Snapshot(_ context.Context, tenant uuid.UUID, sessionID string) } return nil } + +// SnapshotByTrace resolves a trace_id -> session snapshot mapping when available. +func (s *Store) SnapshotByTrace(ctx context.Context, tenant uuid.UUID, traceID string) []Citation { + if tenant == uuid.Nil || strings.TrimSpace(traceID) == "" { + return nil + } + if s.redis == nil { + return nil + } + key, err := s.redis.Get(ctx, s.traceKey(traceID)).Result() + if err != nil || strings.TrimSpace(key) == "" { + return nil + } + raw, err := s.redis.Get(ctx, key).Bytes() + if err != nil || len(raw) == 0 { + return nil + } + var out []Citation + if err := json.Unmarshal(raw, &out); err != nil { + return nil + } + return out +} diff --git a/backend/internal/service/knowledge_space/corpus_check/metrics.go b/backend/internal/service/knowledge_space/corpus_check/metrics.go new file mode 100644 index 00000000..ab64d2d8 --- /dev/null +++ b/backend/internal/service/knowledge_space/corpus_check/metrics.go @@ -0,0 +1,457 @@ +package corpus_check + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "gorm.io/datatypes" + + strategy_catalog "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/strategy_catalog" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" +) + +const defaultSceneStrategyCatalogPath = "backend/config/knowledge/scene_strategy_catalog.yaml" + +// BuildMetrics 根据 ingestion_jobs 的 metrics_snapshot 生成最小体检指标与推荐卡片。 +// +// T110:必须输出“推荐场景 + 推荐策略包 + 推荐理由 + 成本/风险提示”,并确保推荐只落在策略包映射的场景集合里。 +func BuildMetrics(sampleJobs []models.IngestionJob) (metrics datatypes.JSON, recommendations datatypes.JSON) { + type ocrStats struct { + Required int `json:"required"` + Needed int `json:"needed"` + Used int `json:"used"` + } + type dist map[string]int + + formatDist := make(dist) + sourceDist := make(dist) + langDist := make(dist) + ocr := ocrStats{} + total := 0 + dupKey := make(map[string]int) + dups := 0 + + for _, job := range sampleJobs { + total++ + format := strings.ToLower(strings.TrimSpace(job.SourceType)) + if format == "" { + format = "unknown" + } + formatDist[format]++ + sourceDist[formatCategory(format)]++ + dupKey[job.SourceID]++ + if dupKey[job.SourceID] > 1 { + dups++ + } + + var snap map[string]any + _ = json.Unmarshal(job.MetricsSnapshot, &snap) + if b, ok := snap["ocr_required"].(bool); ok && b { + ocr.Required++ + } + if b, ok := snap["ocr_needed"].(bool); ok && b { + ocr.Needed++ + } + if b, ok := snap["ocr_used"].(bool); ok && b { + ocr.Used++ + } + if lang, ok := snap["language"].(string); ok { + lang = strings.ToLower(strings.TrimSpace(lang)) + if lang == "" { + lang = "unknown" + } + langDist[lang]++ + } + } + + formatRatio := make(map[string]float64, len(formatDist)) + typeRatio := make(map[string]float64, len(sourceDist)) + langRatio := make(map[string]float64, len(langDist)) + if total > 0 { + for k, v := range formatDist { + formatRatio[k] = float64(v) / float64(total) + } + for k, v := range sourceDist { + typeRatio[k] = float64(v) / float64(total) + } + for k, v := range langDist { + langRatio[k] = float64(v) / float64(total) + } + if len(langDist) == 0 { + langRatio["unknown"] = 1.0 + } + } + + ocrNeededRatio := 0.0 + tableLikeRatio := 0.0 + codeLikeRatio := 0.0 + duplicateRatio := 0.0 + pdfRatio := 0.0 + if total > 0 { + ocrNeededRatio = float64(ocr.Needed) / float64(total) + tableLikeRatio = float64(sourceDist["table_like"]) / float64(total) + codeLikeRatio = float64(sourceDist["code_like"]) / float64(total) + duplicateRatio = float64(dups) / float64(total) + pdfRatio = formatRatio["pdf"] + } + + metricsMap := map[string]any{ + "sample_total": total, + "format_dist": formatDist, + "format_ratio": formatRatio, + "type_dist": sourceDist, + "type_ratio": typeRatio, + "language_dist": func() dist { + if len(langDist) == 0 { + return dist{"unknown": total} + } + return langDist + }(), + "language_ratio": langRatio, + "ocr": ocr, + "ratios": map[string]any{ + "ocr_needed": ocrNeededRatio, + "table_like": tableLikeRatio, + "code_like": codeLikeRatio, + "duplicate": duplicateRatio, + "pdf": pdfRatio, + }, + "duplicate": map[string]any{ + "count": dups, + "ratio": duplicateRatio, + }, + } + + catalog := loadCatalog() + sceneKey, bundleKey, reason, risk, cost := recommendSceneBundle(total, ocrNeededRatio, tableLikeRatio, codeLikeRatio, pdfRatio) + sceneKey, bundleKey, sceneLabel, bundleLabel := constrainToCatalog(catalog, sceneKey, bundleKey) + + pkgKey, pkgReason, pkgRisk, pkgCost := recommendStrategyPackage(total, ocrNeededRatio, tableLikeRatio, codeLikeRatio, pdfRatio) + pkgKey, pkgLabel, pkgScenes, pkgProfile := constrainStrategyPackageToCatalog(catalog, pkgKey) + primaryScene := sceneKey + if len(pkgScenes) > 0 && !containsString(pkgScenes, primaryScene) { + primaryScene = pkgScenes[0] + } + primarySceneLabel := primaryScene + if catalog != nil { + if sc, ok := catalog.Scenes[primaryScene]; ok && strings.TrimSpace(sc.Label) != "" { + primarySceneLabel = sc.Label + } + } + + recs := make([]map[string]any, 0, 6) + recs = append(recs, map[string]any{ + "key": "scene_bundle", + "type": "scene_bundle", + "title": fmt.Sprintf("推荐:%s × %s", sceneLabel, bundleLabel), + "sceneKey": sceneKey, + "sceneLabel": sceneLabel, + "bundleKey": bundleKey, + "bundleLabel": bundleLabel, + "reason": reason, + "risk": risk, + "cost": cost, + }) + recs = append(recs, map[string]any{ + "key": "strategy_package", + "type": "strategy_package", + "title": fmt.Sprintf("推荐策略包:%s", pkgLabel), + "strategyPackageKey": pkgKey, + "strategyPackageLabel": pkgLabel, + "profileKey": pkgProfile, + "sceneKey": primaryScene, + "sceneLabel": primarySceneLabel, + "scenes": pkgScenes, + "reason": pkgReason, + "risk": pkgRisk, + "cost": pkgCost, + }) + + if total > 0 && ocrNeededRatio >= 0.3 { + recs = append(recs, map[string]any{ + "key": "enable_ocr", + "title": "扫描件占比偏高:建议启用 OCR", + "reason": map[string]any{ + "ocr_needed_ratio": ocrNeededRatio, + }, + "plugin": "com.powerx.plugin.data_forge", + "risk": "若不启用 OCR,检索召回与引用覆盖会显著下降。", + "cost": "OCR 会提升入库耗时与成本;可先对高价值文档启用。", + }) + } + if total > 0 && tableLikeRatio >= 0.3 { + recs = append(recs, map[string]any{ + "key": "table_heavy", + "title": "表格/结构化内容占比偏高:建议调整分块与索引策略", + "reason": map[string]any{ + "table_like_ratio": tableLikeRatio, + }, + "risk": "不做表格感知分块可能导致问答命中不稳定。", + "cost": "改用更细粒度 chunk 会增大向量写入量。", + }) + } + if total > 0 && codeLikeRatio >= 0.2 { + recs = append(recs, map[string]any{ + "key": "code_heavy", + "title": "代码/SQL 内容占比偏高:建议启用 code-aware chunking 与更高阈值检索", + "reason": map[string]any{ + "code_like_ratio": codeLikeRatio, + }, + "risk": "代码块过长会导致向量语义漂移,低阈值 top_k 容易引入噪声。", + "cost": "更细粒度切分会增加向量写入量;可通过索引分层缓解。", + }) + } + if total > 0 && langDist["zh"] > 0 && float64(langDist["zh"])/float64(total) >= 0.5 { + recs = append(recs, map[string]any{ + "key": "zh_corpus", + "title": "中文语料占比偏高:建议选用中文优化 embedding/rerank 配置", + "reason": map[string]any{ + "zh_ratio": float64(langDist["zh"]) / float64(total), + }, + "risk": "若使用非中文优化模型,可能出现召回下降与引用不稳定。", + "cost": "切换模型可能影响成本与延迟;建议先在 Playground 做 A/B 对比。", + }) + } + if total == 0 { + recs = append(recs, map[string]any{ + "key": "default", + "title": "暂无入库样本:建议先导入少量代表性文档后再运行体检", + "risk": "未进行体检前,默认按通用策略运行。", + }) + } + + metricsBytes, _ := json.Marshal(metricsMap) + recsBytes, _ := json.Marshal(recs) + return datatypes.JSON(metricsBytes), datatypes.JSON(recsBytes) +} + +func recommendSceneBundle(total int, ocrNeededRatio, tableLikeRatio, codeLikeRatio, pdfRatio float64) (sceneKey string, bundleKey string, reason map[string]any, risk string, cost string) { + sceneKey = "sop" + bundleKey = "p1_general" + reason = map[string]any{ + "signals": map[string]any{ + "sample_total": total, + "ocr_needed_ratio": ocrNeededRatio, + "table_like_ratio": tableLikeRatio, + "code_like_ratio": codeLikeRatio, + "pdf_ratio": pdfRatio, + }, + "summary": "默认推荐:SOP/制度 × P1 通用(企业默认)", + } + risk = "建议在 Playground 做一次 A/B 检索对比,确认召回与引用覆盖。" + cost = "P1 默认开 hybrid + rerank(轻量),成本/延迟适中。" + + if total > 0 && tableLikeRatio >= 0.3 { + sceneKey = "ledger_table" + bundleKey = "p2_high_accuracy" + reason["summary"] = "表格/结构化占比偏高:偏向台账场景,并建议证据优先策略包。" + risk = "结构化字段抽取不足会导致过滤/命中不稳定。" + cost = "行级/字段级切分会增加索引与存储成本。" + return + } + if total > 0 && codeLikeRatio >= 0.2 { + sceneKey = "sql_kg" + bundleKey = "p3_kg_strong" + reason["summary"] = "代码/SQL 占比偏高:偏向依赖关系查询,建议 KG 约束策略包。" + risk = "若缺少 KG/依赖抽取,容易出现‘看似相关但不可执行’的回答。" + cost = "KG 构建与维护有额外成本;建议先小范围试点。" + return + } + if total > 0 && ocrNeededRatio >= 0.3 { + sceneKey = "contract_quote" + bundleKey = "p2_high_accuracy" + reason["summary"] = "扫描件/图片占比偏高:偏向合同/报价类证据查找,建议证据优先策略包。" + risk = "未启用 OCR/证据链会导致引用覆盖下降与合规风险。" + cost = "OCR + 证据校验会提升入库与推理成本。" + return + } + if total > 0 && pdfRatio >= 0.6 { + sceneKey = "research_longdoc" + bundleKey = "p1_general" + reason["summary"] = "PDF 长文占比偏高:偏向论文/长报告,建议层次索引 + 通用策略包。" + risk = "若切分过粗,长文回答可能遗漏关键段落。" + cost = "层次索引需要额外摘要/结构化产物。" + return + } + return +} + +func recommendStrategyPackage(total int, ocrNeededRatio, tableLikeRatio, codeLikeRatio, pdfRatio float64) (pkgKey string, reason map[string]any, risk string, cost string) { + pkgKey = "H_fusion" + reason = map[string]any{ + "signals": map[string]any{ + "sample_total": total, + "ocr_needed_ratio": ocrNeededRatio, + "table_like_ratio": tableLikeRatio, + "code_like_ratio": codeLikeRatio, + "pdf_ratio": pdfRatio, + }, + "summary": "默认推荐:融合检索(H),平衡成本与命中率。", + } + risk = "建议在 Playground 做一次 A/B 检索对比,确认召回与引用覆盖。" + cost = "融合检索需要同时维护 dense + sparse 索引。" + + if total > 0 && codeLikeRatio >= 0.2 { + pkgKey = "K_kg" + reason["summary"] = "代码/SQL 占比偏高:优先推荐知识图谱(K)。" + risk = "若缺少实体/关系抽取与 KG 索引,回答会缺少依赖链路。" + cost = "KG 构建与维护有额外成本;建议先小范围试点。" + return + } + if total > 0 && tableLikeRatio >= 0.3 { + pkgKey = "D_doc_augmentation" + reason["summary"] = "表格/结构化占比偏高:优先推荐文档增强(D)。" + risk = "字段抽取不足会导致过滤/命中不稳定。" + cost = "离线增强会增加入库耗时与存储成本。" + return + } + if total > 0 && ocrNeededRatio >= 0.3 { + pkgKey = "O_crag" + reason["summary"] = "扫描件/图片占比偏高:优先推荐纠错(O)。" + risk = "未启用 OCR/证据链会导致引用覆盖下降与合规风险。" + cost = "证据校验会提升检索与推理成本。" + return + } + if total > 0 && pdfRatio >= 0.6 { + pkgKey = "B_semantic_chunking" + reason["summary"] = "PDF 长文占比偏高:优先推荐语义切块(B)。" + risk = "若切分过粗,长文回答可能遗漏关键段落。" + cost = "语义边界检测会增加入库耗时。" + return + } + return +} + +func loadCatalog() *strategy_catalog.Catalog { + path := strings.TrimSpace(os.Getenv("PX_SCENE_STRATEGY_CATALOG_PATH")) + if path == "" { + path = defaultSceneStrategyCatalogPath + } + loader := strategy_catalog.NewLoader(path) + cat, err := loader.Load() + if err != nil { + return nil + } + return cat +} + +func constrainToCatalog(cat *strategy_catalog.Catalog, sceneKey, bundleKey string) (outSceneKey, outBundleKey, sceneLabel, bundleLabel string) { + outSceneKey = strings.TrimSpace(sceneKey) + outBundleKey = strings.TrimSpace(bundleKey) + if outSceneKey == "" { + outSceneKey = "sop" + } + if outBundleKey == "" { + outBundleKey = "p1_general" + } + if cat == nil { + return outSceneKey, outBundleKey, outSceneKey, outBundleKey + } + + sc, ok := cat.Scenes[outSceneKey] + if !ok { + outSceneKey = "sop" + sc = cat.Scenes[outSceneKey] + } + + // bundle 必须存在 + if _, ok := cat.Bundles[outBundleKey]; !ok { + outBundleKey = sc.DefaultBundle + } + + // bundle 必须在 allowed 内 + if len(sc.AllowedBundles) > 0 { + allowed := false + for _, k := range sc.AllowedBundles { + if k == outBundleKey { + allowed = true + break + } + } + if !allowed { + if sc.DefaultBundle != "" { + outBundleKey = sc.DefaultBundle + } else { + outBundleKey = sc.AllowedBundles[0] + } + } + } + + sceneLabel = sc.Label + if sceneLabel == "" { + sceneLabel = outSceneKey + } + bundleLabel = outBundleKey + if b, ok := cat.Bundles[outBundleKey]; ok { + if strings.TrimSpace(b.Label) != "" { + bundleLabel = b.Label + } + } + return +} + +func constrainStrategyPackageToCatalog(cat *strategy_catalog.Catalog, pkgKey string) (outKey, label string, scenes []string, profileKey string) { + outKey = strings.TrimSpace(pkgKey) + if outKey == "" { + outKey = "H_fusion" + } + if cat == nil { + return outKey, outKey, nil, "" + } + if _, ok := cat.StrategyPackages[outKey]; !ok { + outKey = "H_fusion" + } + pkg, ok := cat.StrategyPackages[outKey] + if !ok { + return outKey, outKey, nil, "" + } + label = pkg.Label + if strings.TrimSpace(label) == "" { + label = outKey + } + profileKey = strings.TrimSpace(pkg.RecommendedProfileKey) + if profileKey == "" { + profileKey = "p1_general" + } + scenes = filterSceneKeys(cat, pkg.RecommendedScenes) + return outKey, label, scenes, profileKey +} + +func filterSceneKeys(cat *strategy_catalog.Catalog, scenes []string) []string { + out := make([]string, 0, len(scenes)) + if cat == nil { + return out + } + for _, k := range scenes { + if _, ok := cat.Scenes[k]; ok { + out = append(out, k) + } + } + return out +} + +func containsString(items []string, target string) bool { + for _, item := range items { + if item == target { + return true + } + } + return false +} + +func formatCategory(format string) string { + switch strings.ToLower(strings.TrimSpace(format)) { + case "xlsx", "csv", "table": + return "table_like" + case "sql": + return "code_like" + case "image": + return "image" + case "pdf", "docx", "markdown", "html": + return "doc" + default: + return "other" + } +} diff --git a/backend/internal/service/knowledge_space/corpus_check/metrics_test.go b/backend/internal/service/knowledge_space/corpus_check/metrics_test.go new file mode 100644 index 00000000..e652060c --- /dev/null +++ b/backend/internal/service/knowledge_space/corpus_check/metrics_test.go @@ -0,0 +1,29 @@ +package corpus_check + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRecommendStrategyPackage_KG(t *testing.T) { + key, _, _, _ := recommendStrategyPackage(10, 0.0, 0.0, 0.35, 0.1) + require.Equal(t, "K_kg", key) +} + +func TestRecommendStrategyPackage_CRAG(t *testing.T) { + key, _, _, _ := recommendStrategyPackage(10, 0.45, 0.0, 0.0, 0.1) + require.Equal(t, "O_crag", key) +} + +func TestConstrainStrategyPackageToCatalog(t *testing.T) { + t.Setenv("PX_SCENE_STRATEGY_CATALOG_PATH", filepath.Join("..", "..", "..", "..", "config", "knowledge", "scene_strategy_catalog.yaml")) + cat := loadCatalog() + require.NotNil(t, cat) + + key, _, scenes, profile := constrainStrategyPackageToCatalog(cat, "K_kg") + require.Equal(t, "K_kg", key) + require.Contains(t, scenes, "sql_kg") + require.Equal(t, "p3_kg_strong", profile) +} diff --git a/backend/internal/service/knowledge_space/corpus_check_service.go b/backend/internal/service/knowledge_space/corpus_check_service.go new file mode 100644 index 00000000..0a638555 --- /dev/null +++ b/backend/internal/service/knowledge_space/corpus_check_service.go @@ -0,0 +1,94 @@ +package knowledge_space + +import ( + "context" + "strings" + "time" + + "github.com/google/uuid" + "gorm.io/datatypes" + "gorm.io/gorm" + + workflow "github.com/ArtisanCloud/PowerX/internal/workflow/knowledge_space" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" +) + +type CorpusCheckService struct { + db *gorm.DB + pipeline workflow.CorpusCheckPipeline + clock func() time.Time +} + +type CorpusCheckServiceOptions struct { + DB *gorm.DB + Pipeline workflow.CorpusCheckPipeline + Clock func() time.Time +} + +func NewCorpusCheckService(opts CorpusCheckServiceOptions) *CorpusCheckService { + if opts.DB == nil { + panic("corpus check service requires db") + } + if opts.Clock == nil { + opts.Clock = time.Now + } + return &CorpusCheckService{db: opts.DB, pipeline: opts.Pipeline, clock: opts.Clock} +} + +// Start 创建 Job 记录并投递异步事件。 +func (s *CorpusCheckService) Start(ctx context.Context, tenantUUID string, spaceUUID uuid.UUID, requestedBy string) (*models.CorpusCheckJob, error) { + tenantUUID = strings.ToLower(strings.TrimSpace(tenantUUID)) + requestedBy = strings.TrimSpace(requestedBy) + if tenantUUID == "" || spaceUUID == uuid.Nil { + return nil, ErrInvalidInput + } + now := s.clock().UTC() + + jobs := repo.NewCorpusCheckJobRepository(s.db) + job := &models.CorpusCheckJob{ + TenantUUID: tenantUUID, + SpaceUUID: spaceUUID, + Status: models.CorpusCheckStatusPending, + SampleJobUUIDs: datatypes.JSON([]byte("[]")), + Metrics: datatypes.JSON([]byte("{}")), + Recommendations: datatypes.JSON([]byte("[]")), + TraceID: uuid.NewString(), + ErrorReason: "", + StartedAt: nil, + CompletedAt: nil, + } + job.UUID = uuid.New() + job.CreatedAt = now + job.UpdatedAt = now + + created, err := jobs.Create(ctx, job) + if err != nil { + return nil, err + } + + if s.pipeline == nil { + return created, nil + } + if _, err := s.pipeline.Schedule(ctx, workflow.CorpusCheckInput{ + JobUUID: created.UUID, + SpaceID: spaceUUID, + RequestedBy: requestedBy, + }); err != nil { + failAt := s.clock().UTC() + created.Status = models.CorpusCheckStatusFailed + created.ErrorReason = err.Error() + created.CompletedAt = &failAt + created.UpdatedAt = failAt + _, _ = jobs.Update(ctx, created) + return created, err + } + return created, nil +} + +func (s *CorpusCheckService) Get(ctx context.Context, jobUUID uuid.UUID) (*models.CorpusCheckJob, error) { + if jobUUID == uuid.Nil { + return nil, ErrInvalidInput + } + return repo.NewCorpusCheckJobRepository(s.db).FindByUUID(ctx, jobUUID) +} diff --git a/backend/internal/service/knowledge_space/dbutil.go b/backend/internal/service/knowledge_space/dbutil.go new file mode 100644 index 00000000..3762288e --- /dev/null +++ b/backend/internal/service/knowledge_space/dbutil.go @@ -0,0 +1,26 @@ +package knowledge_space + +import "strings" + +func isMissingTableError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + return strings.Contains(msg, "does not exist") || + strings.Contains(msg, "undefined_table") || + strings.Contains(msg, "no such table") || + strings.Contains(msg, "unknown table") +} + +func isUndefinedTableError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + if !isMissingTableError(err) { + return false + } + // best-effort match for Postgres missing relation errors. + return strings.Contains(msg, "knowledge_chunks") || strings.Contains(msg, "knowledge_chunk_links") +} diff --git a/backend/internal/service/knowledge_space/decay_guard/dispatcher.go b/backend/internal/service/knowledge_space/decay_guard/dispatcher.go new file mode 100644 index 00000000..963f0398 --- /dev/null +++ b/backend/internal/service/knowledge_space/decay_guard/dispatcher.go @@ -0,0 +1,67 @@ +package decay_guard + +import ( + "context" + "strings" + "time" + + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/event_bus" +) + +type eventBusDispatcher struct { + bus event_bus.EventBus + dispatchTopic string + closeTopic string +} + +func newEventBusDispatcher(bus event_bus.EventBus, dispatchTopic, closeTopic string) TaskDispatcher { + if bus == nil { + return nil + } + dispatchTopic = strings.TrimSpace(dispatchTopic) + if dispatchTopic == "" { + dispatchTopic = "knowledge.decay.task.dispatch" + } + closeTopic = strings.TrimSpace(closeTopic) + if closeTopic == "" { + closeTopic = "knowledge.decay.task.close" + } + return &eventBusDispatcher{bus: bus, dispatchTopic: dispatchTopic, closeTopic: closeTopic} +} + +func (d *eventBusDispatcher) Dispatch(ctx context.Context, task *models.DecayTask) error { + if d == nil || d.bus == nil || task == nil { + return nil + } + payload := map[string]any{ + "task_id": task.UUID.String(), + "space_id": task.SpaceUUID.String(), + "category": task.Category, + "severity": task.Severity, + "status": task.Status, + "sla_due_at": task.SLADueAt.UTC().Format(time.RFC3339Nano), + "detected_at": task.DetectedAt.UTC().Format(time.RFC3339Nano), + "assigned_to": strings.TrimSpace(task.AssignedTo), + "description": strings.TrimSpace(task.Resolution), + "requires_approval": true, + } + d.bus.Publish(d.dispatchTopic, payload, ctx) + return nil +} + +func (d *eventBusDispatcher) Close(ctx context.Context, task *models.DecayTask) error { + if d == nil || d.bus == nil || task == nil { + return nil + } + payload := map[string]any{ + "task_id": task.UUID.String(), + "space_id": task.SpaceUUID.String(), + "status": task.Status, + "false_positive": task.FalsePositive, + "resolved_at": time.Now().UTC().Format(time.RFC3339Nano), + } + d.bus.Publish(d.closeTopic, payload, ctx) + return nil +} + diff --git a/backend/internal/service/knowledge_space/decay_guard/service.go b/backend/internal/service/knowledge_space/decay_guard/service.go index 4946772f..22d6b015 100644 --- a/backend/internal/service/knowledge_space/decay_guard/service.go +++ b/backend/internal/service/knowledge_space/decay_guard/service.go @@ -14,6 +14,8 @@ import ( dbm "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/audit" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/event_bus" pxlog "github.com/ArtisanCloud/PowerX/pkg/utils/logger" "github.com/google/uuid" "gorm.io/gorm" @@ -32,12 +34,33 @@ type Threshold struct { Reason string `json:"reason" yaml:"reason"` } +type RunScanInput struct { + SpaceID uuid.UUID + Detected int + Category string + Severity string + Reason string + AssignedTo string + RequestedBy string +} + +type RestoreInput struct { + TaskID uuid.UUID + Notes string + FalsePositive bool + ApprovedBy string + Reason string +} + type Options struct { DB *gorm.DB Instrumentation *instrumentation.Instrumentation MetricsWriter *instrumentation.DecayMetricsWriter ThresholdsPath string Dispatcher TaskDispatcher + EventBus event_bus.EventBus + DispatchTopic string + CloseTopic string Clock func() time.Time } @@ -69,6 +92,9 @@ func NewService(opts Options) *Service { } thresholds := loadThresholds(opts.ThresholdsPath) d := opts.Dispatcher + if d == nil && opts.EventBus != nil { + d = newEventBusDispatcher(opts.EventBus, opts.DispatchTopic, opts.CloseTopic) + } if d == nil { d = noopDispatcher{} } @@ -102,19 +128,26 @@ func loadThresholds(path string) []Threshold { } func (s *Service) RunScan(ctx context.Context, spaceID uuid.UUID, detected int) ([]*models.DecayTask, error) { - if spaceID == uuid.Nil || detected <= 0 { + return s.RunScanWithInput(ctx, RunScanInput{SpaceID: spaceID, Detected: detected}) +} + +func (s *Service) RunScanWithInput(ctx context.Context, input RunScanInput) ([]*models.DecayTask, error) { + if input.SpaceID == uuid.Nil || input.Detected <= 0 { return nil, ErrInvalidInput } repoTask := repo.NewDecayTaskRepository(s.db) repoSpace := repo.NewKnowledgeSpaceRepository(s.db) - space, err := repoSpace.FindByUUID(ctx, spaceID) + space, err := repoSpace.FindByUUID(ctx, input.SpaceID) if err != nil { return nil, err } if space == nil { return nil, gorm.ErrRecordNotFound } - threshold := s.resolveThreshold(space) + if !tenantMatch(ctx, space.TenantUUID) { + return nil, gorm.ErrRecordNotFound + } + threshold := s.resolveThreshold(input.Category, input.Severity) if threshold.Category == "" { threshold.Category = "coverage" } @@ -123,15 +156,21 @@ func (s *Service) RunScan(ctx context.Context, spaceID uuid.UUID, detected int) } detectedAt := s.clock().UTC() sla := detectedAt.Add(s.slaDuration(threshold.SLAHours)) - tasks := make([]*models.DecayTask, 0, detected) - for i := 0; i < detected; i++ { + tasks := make([]*models.DecayTask, 0, input.Detected) + for i := 0; i < input.Detected; i++ { task := &models.DecayTask{ - SpaceUUID: spaceID, + SpaceUUID: input.SpaceID, Category: threshold.Category, Severity: threshold.Severity, Status: "open", DetectedAt: detectedAt, SLADueAt: sla, + AssignedTo: strings.TrimSpace(input.AssignedTo), + } + if strings.TrimSpace(input.Reason) != "" { + task.Resolution = strings.TrimSpace(input.Reason) + } else if strings.TrimSpace(threshold.Reason) != "" { + task.Resolution = strings.TrimSpace(threshold.Reason) } created, err := repoTask.Create(ctx, task) if err != nil { @@ -145,7 +184,7 @@ func (s *Service) RunScan(ctx context.Context, spaceID uuid.UUID, detected int) if len(tasks) == 0 { return tasks, nil } - backlog := s.countBacklog(ctx, repoTask, spaceID) + backlog := s.countBacklog(ctx, repoTask, input.SpaceID) s.recordMetrics(ctx, instrumentation.DecayMetricsSnapshot{ Detected: len(tasks), Backlog: backlog, @@ -156,29 +195,55 @@ func (s *Service) RunScan(ctx context.Context, spaceID uuid.UUID, detected int) "task_count": len(tasks), "severity": threshold.Severity, "category": threshold.Category, - "reason": threshold.Reason, + "reason": firstNonEmpty(input.Reason, threshold.Reason), + "assigned_to": strings.TrimSpace(input.AssignedTo), + "requestedBy": strings.TrimSpace(input.RequestedBy), }) return tasks, nil } func (s *Service) Restore(ctx context.Context, taskID uuid.UUID, notes string, falsePositive bool) (*models.DecayTask, error) { - if taskID == uuid.Nil { + return s.RestoreWithInput(ctx, RestoreInput{ + TaskID: taskID, + Notes: notes, + FalsePositive: falsePositive, + }) +} + +func (s *Service) RestoreWithInput(ctx context.Context, input RestoreInput) (*models.DecayTask, error) { + if input.TaskID == uuid.Nil { return nil, ErrInvalidInput } repoTask := repo.NewDecayTaskRepository(s.db) repoSpace := repo.NewKnowledgeSpaceRepository(s.db) - task, err := repoTask.GetByUUID(ctx, taskID.String(), nil) + task, err := repoTask.GetByUUID(ctx, input.TaskID.String(), nil) if err != nil { return nil, err } if task == nil { return nil, ErrTaskNotFound } + space, err := repoSpace.FindByUUID(ctx, task.SpaceUUID) + if err != nil { + return nil, err + } + if space == nil || !tenantMatch(ctx, space.TenantUUID) { + return nil, ErrTaskNotFound + } + + approvedBy := strings.TrimSpace(input.ApprovedBy) + if approvedBy == "" { + approvedBy = strings.TrimSpace(reqctx.GetSubject(ctx)) + } + reason := strings.TrimSpace(input.Reason) + if reason == "" { + reason = strings.TrimSpace(input.Notes) + } task.Status = "closed" now := s.clock() task.ResolvedAt = &now - task.Resolution = notes - task.FalsePositive = falsePositive + task.Resolution = strings.TrimSpace(input.Notes) + task.FalsePositive = input.FalsePositive updates := map[string]any{ "status": task.Status, "resolved_at": task.ResolvedAt, @@ -189,7 +254,7 @@ func (s *Service) Restore(ctx context.Context, taskID uuid.UUID, notes string, f return nil, err } fp := 0 - if falsePositive { + if task.FalsePositive { fp = 1 } if err := s.withDispatcher(func(d TaskDispatcher) error { return d.Close(ctx, task) }); err != nil { @@ -201,11 +266,12 @@ func (s *Service) Restore(ctx context.Context, taskID uuid.UUID, notes string, f Backlog: len(openTasks), AverageFillHours: s.fillHours(task, now), }) - space, _ := repoSpace.FindByUUID(ctx, task.SpaceUUID) s.emitAudit(ctx, space, "knowledge.decay.restore", map[string]any{ "task_id": task.UUID.String(), "false_positive": task.FalsePositive, - "notes": notes, + "notes": strings.TrimSpace(input.Notes), + "approved_by": approvedBy, + "reason": reason, }) return task, nil } @@ -214,14 +280,45 @@ func (s *Service) ListOpen(ctx context.Context, spaceID uuid.UUID) ([]*models.De if spaceID == uuid.Nil { return nil, ErrInvalidInput } + repoSpace := repo.NewKnowledgeSpaceRepository(s.db) + space, err := repoSpace.FindByUUID(ctx, spaceID) + if err != nil { + return nil, err + } + if space == nil || !tenantMatch(ctx, space.TenantUUID) { + return []*models.DecayTask{}, nil + } repoTask := repo.NewDecayTaskRepository(s.db) return repoTask.ListOpenBySpace(ctx, spaceID) } -func (s *Service) resolveThreshold(_ *models.KnowledgeSpace) Threshold { +func (s *Service) ListOpenByTenant(ctx context.Context, tenantUUID uuid.UUID, severity string) ([]*models.DecayTask, error) { + if tenantUUID == uuid.Nil { + return nil, ErrInvalidInput + } + if !tenantMatch(ctx, tenantUUID.String()) { + return []*models.DecayTask{}, nil + } + repoTask := repo.NewDecayTaskRepository(s.db) + return repoTask.ListOpenByTenant(ctx, tenantUUID.String(), severity) +} + +func (s *Service) resolveThreshold(category, severity string) Threshold { if len(s.thresholds) == 0 { return Threshold{Category: "coverage", Severity: "medium", SLAHours: 24 * 7} } + category = strings.TrimSpace(category) + severity = strings.TrimSpace(severity) + for _, th := range s.thresholds { + if category != "" && strings.EqualFold(th.Category, category) { + return th + } + } + for _, th := range s.thresholds { + if severity != "" && strings.EqualFold(th.Severity, severity) { + return th + } + } return s.thresholds[0] } @@ -248,6 +345,7 @@ func (s *Service) recordMetrics(ctx context.Context, snapshot instrumentation.De if snapshot.RecordedAt.IsZero() { snapshot.RecordedAt = s.clock().UTC() } + snapshot.EnsureMetrics() if err := s.metrics.Store(snapshot); err != nil { s.log(ctx).WarnF(ctx, "decay guard: write metrics failed: %v", err) } @@ -291,3 +389,24 @@ func (s *Service) log(ctx context.Context) *pxlog.Logger { } return pxlog.GetGlobalLogger().WithContext(ctx) } + +func tenantMatch(ctx context.Context, tenantUUID string) bool { + tenantUUID = strings.ToLower(strings.TrimSpace(tenantUUID)) + if tenantUUID == "" { + return true + } + scoped := strings.ToLower(strings.TrimSpace(reqctx.GetTenantUUID(ctx))) + if scoped == "" { + return true + } + return scoped == tenantUUID +} + +func firstNonEmpty(values ...string) string { + for _, v := range values { + if strings.TrimSpace(v) != "" { + return strings.TrimSpace(v) + } + } + return "" +} diff --git a/backend/internal/service/knowledge_space/delta/service.go b/backend/internal/service/knowledge_space/delta/service.go index 458c2493..64a7bb32 100644 --- a/backend/internal/service/knowledge_space/delta/service.go +++ b/backend/internal/service/knowledge_space/delta/service.go @@ -2,9 +2,13 @@ package delta import ( "context" + "crypto/sha256" "encoding/json" + "encoding/hex" "errors" + "fmt" "os" + "path/filepath" "strings" "time" @@ -23,6 +27,7 @@ var ( ErrUnknownSource = errors.New("delta: unknown source") ErrSpaceNotFound = errors.New("delta: space not found") ErrJobNotFound = errors.New("delta: job not found") + ErrJobConflict = errors.New("delta: job conflict") ErrPartialReleaseDenied = errors.New("delta: partial release denied") ) @@ -81,6 +86,16 @@ type RollbackInput struct { Reason string } +type deltaPackage struct { + SpaceID string `json:"spaceId"` + Source string `json:"source"` + BaseChunkManifestURI string `json:"baseChunkManifestUri"` + CandidateChunkManifestURI string `json:"candidateChunkManifestUri"` + CandidateVectorManifestURI string `json:"candidateVectorManifestUri"` + PackageURI string `json:"packageUri"` + Notes string `json:"notes"` +} + // NewService constructs a delta orchestrator instance. func NewService(opts Options) *Service { if opts.DB == nil { @@ -123,8 +138,10 @@ func loadSources(path string) map[string]DeltaSource { if src.Name == "" { continue } - if !src.Enabled && src.Enabled != false { - src.Enabled = true + // JSON-as-YAML does not distinguish "missing" vs "false"; default to enabled=true + // unless explicitly set in config payload. + if !src.Enabled { + // keep as-is } result[strings.ToLower(src.Name)] = src } @@ -163,29 +180,95 @@ func (s *Service) StartJob(ctx context.Context, in StartJobInput) (*models.Delta if space == nil { return nil, ErrSpaceNotFound } + source := strings.ToLower(strings.TrimSpace(in.Source)) + pkgURI := strings.TrimSpace(in.PackageURI) + if pkgURI == "" { + if src, ok := s.sources[source]; ok { + pkgURI = strings.TrimSpace(src.Endpoint) + } + } + + if existing, err := s.findConflictingJob(ctx, in.SpaceID, source, pkgURI); err != nil { + return nil, err + } else if existing != nil { + return nil, ErrJobConflict + } + + baseManifest := "" + candidateManifest := "" + pkg, _ := s.tryLoadDeltaPackage(pkgURI) + if pkg != nil { + if strings.TrimSpace(pkg.Source) != "" { + source = strings.ToLower(strings.TrimSpace(pkg.Source)) + } + baseManifest = strings.TrimSpace(pkg.BaseChunkManifestURI) + candidateManifest = strings.TrimSpace(pkg.CandidateChunkManifestURI) + if strings.TrimSpace(pkg.PackageURI) != "" { + pkgURI = strings.TrimSpace(pkg.PackageURI) + } + if strings.TrimSpace(pkg.Notes) != "" && strings.TrimSpace(in.Notes) == "" { + in.Notes = strings.TrimSpace(pkg.Notes) + } + } + if baseManifest == "" { + baseManifest = findLatestChunkManifestURI(s.db, space.UUID) + } + + diff := s.computeChunkDiff(baseManifest, candidateManifest) report := map[string]any{ - "source": in.Source, - "packageUri": in.PackageURI, - "notes": in.Notes, + "spaceId": space.UUID.String(), + "tenantUuid": space.TenantUUID, + "source": source, + "packageUri": pkgURI, + "notes": strings.TrimSpace(in.Notes), "generatedAt": s.clock().UTC().Format(time.RFC3339Nano), - } + "base": map[string]any{ + "chunkManifestUri": baseManifest, + }, + "candidate": map[string]any{ + "chunkManifestUri": candidateManifest, + }, + "diff": diff, + } + report["payloadHash"] = computePayloadHash(report) payload, _ := json.Marshal(report) + + now := s.clock().UTC() job := &models.DeltaJob{ SpaceUUID: space.UUID, - Source: strings.ToLower(strings.TrimSpace(in.Source)), - PackageURI: in.PackageURI, + Source: source, + PackageURI: pkgURI, Status: "generated", ApprovalState: "pending", DiffAccuracy: clampAccuracy(in.DiffAccuracy), Report: datatypes.JSON(payload), - Notes: in.Notes, + Notes: strings.TrimSpace(in.Notes), } - created, err := repo.NewDeltaJobRepository(s.db).Create(ctx, job) + job.UUID = uuid.New() + job.CreatedAt = now + job.UpdatedAt = now + + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + if err := tx.Create(job).Error; err != nil { + return err + } + if err := s.insertAuditTrail(ctx, tx, space.UUID, "delta.job.started", strings.TrimSpace(in.RequestedBy), map[string]any{ + "job_id": job.UUID.String(), + "source": source, + "package_uri": pkgURI, + "payload_hash": report["payloadHash"], + "diff_summary": diff, + "partial_rules": len(s.partials), + }); err != nil { + return err + } + return nil + }) if err != nil { return nil, err } - s.recordMetrics(created) - return created, nil + s.recordMetrics(job) + return job, nil } func (s *Service) GetReport(ctx context.Context, jobID uuid.UUID) (*models.DeltaJob, error) { @@ -203,8 +286,8 @@ func (s *Service) Publish(ctx context.Context, in PublishJobInput) (*models.Delt if in.JobID == uuid.Nil || strings.TrimSpace(in.Decision) == "" { return nil, ErrInvalidInput } - repo := repo.NewDeltaJobRepository(s.db) - job, err := repo.FindByUUID(ctx, in.JobID) + jobsRepo := repo.NewDeltaJobRepository(s.db) + job, err := jobsRepo.FindByUUID(ctx, in.JobID) if err != nil { return nil, err } @@ -243,7 +326,32 @@ func (s *Service) Publish(ctx context.Context, in PublishJobInput) (*models.Delt } job.DiffAccuracy = clampAccuracy(in.DiffAccuracy) job.PartialRelease = job.PartialRelease || in.PartialRelease - if _, err := repo.Update(ctx, job); err != nil { + space, err := repo.NewKnowledgeSpaceRepository(s.db).FindByUUID(ctx, job.SpaceUUID) + if err != nil { + return nil, err + } + if space == nil { + return nil, ErrSpaceNotFound + } + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + if err := tx.Save(job).Error; err != nil { + return err + } + action := "delta.job.approved" + if status == "published" { + action = "delta.job.published" + } + if status == "rejected" { + action = "delta.job.rejected" + } + return s.insertAuditTrail(ctx, tx, job.SpaceUUID, action, strings.TrimSpace(in.ApprovedBy), map[string]any{ + "job_id": job.UUID.String(), + "decision": decision, + "partial_release": job.PartialRelease, + "diff_accuracy": job.DiffAccuracy, + }) + }) + if err != nil { return nil, err } s.recordMetrics(job) @@ -254,18 +362,35 @@ func (s *Service) Rollback(ctx context.Context, in RollbackInput) (*models.Delta if in.JobID == uuid.Nil { return nil, ErrInvalidInput } - repo := repo.NewDeltaJobRepository(s.db) - job, err := repo.FindByUUID(ctx, in.JobID) + jobsRepo := repo.NewDeltaJobRepository(s.db) + job, err := jobsRepo.FindByUUID(ctx, in.JobID) if err != nil { return nil, err } if job == nil { return nil, ErrJobNotFound } - job.Status = "rolled_back" - job.RollbackCount++ - job.Notes = strings.TrimSpace(in.Reason) - if _, err := repo.Update(ctx, job); err != nil { + space, err := repo.NewKnowledgeSpaceRepository(s.db).FindByUUID(ctx, job.SpaceUUID) + if err != nil { + return nil, err + } + if space == nil { + return nil, ErrSpaceNotFound + } + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + job.Status = "rolled_back" + job.RollbackCount++ + job.Notes = strings.TrimSpace(in.Reason) + if err := tx.Save(job).Error; err != nil { + return err + } + return s.insertAuditTrail(ctx, tx, job.SpaceUUID, "delta.job.rolled_back", strings.TrimSpace(in.RequestedBy), map[string]any{ + "job_id": job.UUID.String(), + "reason": strings.TrimSpace(in.Reason), + "rollback_count": job.RollbackCount, + }) + }) + if err != nil { return nil, err } s.recordMetrics(job) @@ -349,4 +474,248 @@ func clampAccuracy(val float64) float64 { return val } -// Audit helpers can be added later (no-op for now). +func (s *Service) findConflictingJob(ctx context.Context, spaceID uuid.UUID, source, pkgURI string) (*models.DeltaJob, error) { + if spaceID == uuid.Nil || strings.TrimSpace(source) == "" || strings.TrimSpace(pkgURI) == "" { + return nil, nil + } + var job models.DeltaJob + err := s.db.WithContext(ctx). + Where("space_uuid = ? AND source = ? AND package_uri = ? AND status IN ?", spaceID, strings.ToLower(strings.TrimSpace(source)), strings.TrimSpace(pkgURI), []string{"generated", "published"}). + Order("id DESC"). + Limit(1). + Take(&job).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return nil, err + } + return &job, nil +} + +func (s *Service) tryLoadDeltaPackage(uri string) (*deltaPackage, bool) { + path, ok := resolveURIToLocalPath(uri) + if !ok { + return nil, false + } + data, err := os.ReadFile(path) + if err != nil { + return nil, false + } + var pkg deltaPackage + if err := json.Unmarshal(data, &pkg); err != nil { + return nil, false + } + return &pkg, true +} + +type chunkRecord struct { + ID string + Hash string + Kind string +} + +func (s *Service) computeChunkDiff(baseURI string, candidateURI string) map[string]any { + base := loadChunkRecords(baseURI) + candidate := loadChunkRecords(candidateURI) + added, removed, changed := diffChunks(base, candidate) + return map[string]any{ + "base_count": len(base), + "candidate_count": len(candidate), + "added": len(added), + "removed": len(removed), + "changed": len(changed), + "added_ids": added, + "removed_ids": removed, + "changed_ids": changed, + } +} + +func diffChunks(base []chunkRecord, candidate []chunkRecord) (added []string, removed []string, changed []string) { + baseMap := make(map[string]chunkRecord, len(base)) + for _, r := range base { + if strings.TrimSpace(r.ID) == "" { + continue + } + baseMap[r.ID] = r + } + candidateMap := make(map[string]chunkRecord, len(candidate)) + for _, r := range candidate { + if strings.TrimSpace(r.ID) == "" { + continue + } + candidateMap[r.ID] = r + } + for id, r := range candidateMap { + if b, ok := baseMap[id]; !ok { + added = append(added, id) + } else if strings.TrimSpace(r.Hash) != "" && strings.TrimSpace(b.Hash) != "" && r.Hash != b.Hash { + changed = append(changed, id) + } + } + for id := range baseMap { + if _, ok := candidateMap[id]; !ok { + removed = append(removed, id) + } + } + return added, removed, changed +} + +func loadChunkRecords(uri string) []chunkRecord { + path, ok := resolveURIToLocalPath(uri) + if !ok { + return nil + } + data, err := os.ReadFile(path) + if err != nil { + return nil + } + var manifest struct { + Chunks []struct { + ID string `json:"ID"` + IDAlt string `json:"id"` + Content string `json:"Content"` + ContentAlt string `json:"content"` + Kind string `json:"Kind"` + KindAlt string `json:"kind"` + } `json:"chunks"` + } + if err := json.Unmarshal(data, &manifest); err != nil { + return nil + } + out := make([]chunkRecord, 0, len(manifest.Chunks)) + for _, c := range manifest.Chunks { + id := strings.TrimSpace(c.ID) + if id == "" { + id = strings.TrimSpace(c.IDAlt) + } + content := c.Content + if strings.TrimSpace(content) == "" { + content = c.ContentAlt + } + kind := strings.TrimSpace(c.Kind) + if kind == "" { + kind = strings.TrimSpace(c.KindAlt) + } + out = append(out, chunkRecord{ + ID: id, + Hash: computeStringHash(content), + Kind: kind, + }) + } + return out +} + +func resolveURIToLocalPath(uri string) (string, bool) { + uri = strings.TrimSpace(uri) + if uri == "" { + return "", false + } + if strings.HasPrefix(uri, "file://") { + p := strings.TrimPrefix(uri, "file://") + return p, true + } + // Allow direct paths for test scripts. + if strings.HasPrefix(uri, "/") || strings.HasPrefix(uri, "./") || strings.HasPrefix(uri, "../") { + return uri, true + } + if strings.HasPrefix(uri, "minio://") || strings.HasPrefix(uri, "s3://") { + trimmed := uri + if strings.HasPrefix(trimmed, "minio://") { + trimmed = strings.TrimPrefix(trimmed, "minio://") + } else { + trimmed = strings.TrimPrefix(trimmed, "s3://") + } + parts := strings.SplitN(trimmed, "/", 2) + if len(parts) != 2 { + return "", false + } + bucket := parts[0] + key := parts[1] + candidates := []string{ + filepath.Join("backend", "reports", "_state", "knowledge-artifacts"), + filepath.Join("tmp", "knowledge-artifacts"), + } + for _, baseDir := range candidates { + path := filepath.Join(baseDir, bucket, filepath.FromSlash(key)) + if _, err := os.Stat(path); err == nil { + return path, true + } + } + return filepath.Join(candidates[0], bucket, filepath.FromSlash(key)), true + } + return "", false +} + +func computeStringHash(content string) string { + content = strings.TrimSpace(content) + if content == "" { + return "" + } + sum := sha256.Sum256([]byte(content)) + return hex.EncodeToString(sum[:]) +} + +func computePayloadHash(payload map[string]any) string { + data, _ := json.Marshal(payload) + sum := sha256.Sum256(data) + return hex.EncodeToString(sum[:]) +} + +func (s *Service) insertAuditTrail(ctx context.Context, tx *gorm.DB, spaceID uuid.UUID, action, actor string, payload map[string]any) error { + if tx == nil || spaceID == uuid.Nil || strings.TrimSpace(action) == "" { + return nil + } + actor = strings.TrimSpace(actor) + if actor == "" { + actor = "system" + } + entry := &models.AuditTrailEntry{ + SpaceUUID: spaceID, + Action: action, + Actor: actor, + Metadata: mustJSON(payload), + OccurredAt: s.clock().UTC(), + RollbackToken: fmt.Sprintf("delta-%s", spaceID.String()), + PayloadHash: computePayloadHash(payload), + } + if err := tx.Create(entry).Error; err != nil { + return err + } + return nil +} + +func mustJSON(v any) datatypes.JSON { + if v == nil { + return datatypes.JSON([]byte("{}")) + } + buf, err := json.Marshal(v) + if err != nil || len(buf) == 0 { + return datatypes.JSON([]byte("{}")) + } + return datatypes.JSON(buf) +} + +func findLatestChunkManifestURI(db *gorm.DB, spaceID uuid.UUID) string { + if db == nil || spaceID == uuid.Nil { + return "" + } + type row struct { + ChunkManifestURI string + } + var r row + bundleTable := (&models.ArtifactBundle{}).TableName() + jobTable := (&models.IngestionJob{}).TableName() + join := fmt.Sprintf("JOIN %s ON %s.id = %s.ingestion_job_id", jobTable, jobTable, bundleTable) + err := db.Table(bundleTable). + Select(fmt.Sprintf("%s.chunk_manifest_uri", bundleTable)). + Joins(join). + Where(fmt.Sprintf("%s.space_uuid = ? AND %s.chunk_manifest_uri <> ''", jobTable, bundleTable), spaceID). + Order(fmt.Sprintf("%s.id DESC", bundleTable)). + Limit(1). + Scan(&r).Error + if err != nil { + return "" + } + return strings.TrimSpace(r.ChunkManifestURI) +} diff --git a/backend/internal/service/knowledge_space/embedding.go b/backend/internal/service/knowledge_space/embedding.go new file mode 100644 index 00000000..2b0601ce --- /dev/null +++ b/backend/internal/service/knowledge_space/embedding.go @@ -0,0 +1,26 @@ +package knowledge_space + +import ( + "crypto/sha256" + "encoding/binary" + "encoding/hex" +) + +func ContentHash(content string) string { + sum := sha256.Sum256([]byte(content)) + return hex.EncodeToString(sum[:]) +} + +func HashEmbedding(content string, dim int) []float32 { + if dim <= 0 { + dim = 32 + } + sum := sha256.Sum256([]byte(content)) + vec := make([]float32, dim) + for i := 0; i < dim; i++ { + offset := (i * 4) % len(sum) + u := binary.BigEndian.Uint32(sum[offset : offset+4]) + vec[i] = float32(u%10_000) / 10_000.0 + } + return vec +} diff --git a/backend/internal/service/knowledge_space/embedding_gate.go b/backend/internal/service/knowledge_space/embedding_gate.go new file mode 100644 index 00000000..6ccafbef --- /dev/null +++ b/backend/internal/service/knowledge_space/embedding_gate.go @@ -0,0 +1,46 @@ +package knowledge_space + +import ( + "context" + "net/http" + "strings" + + agentmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" + agentsettings "github.com/ArtisanCloud/PowerX/internal/service/agent" + "github.com/ArtisanCloud/PowerX/pkg/dto" +) + +func (s *Service) ensureTenantEmbeddingConfigured(ctx context.Context, tenantUUID string) error { + if s == nil || s.db == nil { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "AI Settings 未初始化,无法创建空间", ErrEmbeddingNotConfigured) + } + tid := strings.TrimSpace(tenantUUID) + if tid == "" { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "缺少租户上下文,无法确认 embedding 配置", ErrEmbeddingNotConfigured) + } + settings := agentsettings.NewAgentSettingService(s.db) + env, _, err := settings.GetTenantCurrentAIEnv(ctx, tid) + if err != nil && !isMissingTableError(err) { + return dto.NewErrorWithCode(http.StatusInternalServerError, "embedding_failed", "读取 AI 环境失败", err) + } + if strings.TrimSpace(env) == "" { + env = "dev" + } + profiles, err := settings.ListProfiles(ctx, env, &tid, "embedding") + if err != nil && !isMissingTableError(err) { + return dto.NewErrorWithCode(http.StatusInternalServerError, "embedding_failed", "读取 embedding 配置失败", err) + } + if hasReadyEmbeddingProfile(profiles) { + return nil + } + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "请先在 AI Settings 配置 embedding 模型并完成测试", ErrEmbeddingNotConfigured) +} + +func hasReadyEmbeddingProfile(profiles []agentmodel.AIModelProfile) bool { + for i := range profiles { + if agentsettings.EmbeddingProfileReady(&profiles[i]) { + return true + } + } + return false +} diff --git a/backend/internal/service/knowledge_space/embedding_profile_key.go b/backend/internal/service/knowledge_space/embedding_profile_key.go new file mode 100644 index 00000000..8f2daf5e --- /dev/null +++ b/backend/internal/service/knowledge_space/embedding_profile_key.go @@ -0,0 +1,31 @@ +package knowledge_space + +import ( + "fmt" + "strings" +) + +// ParseEmbeddingProfileKey parses a logical key in the form: +// - "provider/model" +// - "provider:model" +func ParseEmbeddingProfileKey(key string) (provider string, model string, err error) { + key = strings.TrimSpace(key) + if key == "" { + return "", "", fmt.Errorf("embeddingProfileKey 不能为空") + } + var parts []string + if strings.Contains(key, "/") { + parts = strings.SplitN(key, "/", 2) + } else if strings.Contains(key, ":") { + parts = strings.SplitN(key, ":", 2) + } else { + return "", "", fmt.Errorf("embeddingProfileKey 格式错误(期望 provider/model 或 provider:model),got=%q", key) + } + provider = strings.ToLower(strings.TrimSpace(parts[0])) + model = strings.TrimSpace(parts[1]) + if provider == "" || model == "" { + return "", "", fmt.Errorf("embeddingProfileKey 格式错误(provider/model)") + } + return provider, model, nil +} + diff --git a/backend/internal/service/knowledge_space/embedding_vectorizer.go b/backend/internal/service/knowledge_space/embedding_vectorizer.go new file mode 100644 index 00000000..d048f632 --- /dev/null +++ b/backend/internal/service/knowledge_space/embedding_vectorizer.go @@ -0,0 +1,540 @@ +package knowledge_space + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/server/agent/catalog" + agentcfg "github.com/ArtisanCloud/PowerX/internal/server/agent/config" + intentfactory "github.com/ArtisanCloud/PowerX/internal/server/agent/factory/intent" + agentsettings "github.com/ArtisanCloud/PowerX/internal/service/agent" + knowledge "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore" + "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/google/uuid" + "gorm.io/gorm" +) + +type resolvedEmbeddingProfile struct { + Env string + Provider string + Model string + Endpoint string + Dimensions int + MaxInputTokens int +} + +func (s *IngestionService) resolveEmbeddingVectorizer( + ctx context.Context, + tenantUUID string, +) (*resolvedEmbeddingProfile, agentSvcEmbedVectorizer, error) { + if s == nil || s.agentSettings == nil { + return nil, nil, nil + } + tid := strings.ToLower(strings.TrimSpace(tenantUUID)) + if tid == "" { + return nil, nil, fmt.Errorf("tenant_uuid is empty") + } + + env, configured, err := s.agentSettings.GetTenantCurrentAIEnv(ctx, tid) + if err != nil { + // 测试/轻量环境可能没迁移 AI setting 表:此时回退默认 env,而不是直接失败。 + if !isMissingTableError(err) { + return nil, nil, err + } + env = "dev" + configured = false + } + if !configured { + env = "dev" + } + + profile, err := s.agentSettings.GetActiveProfile(ctx, env, &tid, "embedding") + if err != nil { + // 同上:若 AI profile/route 表不存在,则直接用 config.yaml 的 ai.defaults.embedding 兜底。 + if !isMissingTableError(err) { + return nil, nil, err + } + profile = nil + } + if profile == nil { + cfg := agentcfg.GetGlobalAIConfig() + if cfg == nil { + return nil, nil, nil + } + provider := strings.ToLower(strings.TrimSpace(cfg.Defaults.Embedding.Provider)) + model := strings.TrimSpace(cfg.Defaults.Embedding.Model) + if provider == "" || model == "" || provider == "none" || provider == "disabled" { + return nil, nil, nil + } + embCfg := agentcfg.EmbeddingConfig{ + Enabled: true, + Provider: provider, + Endpoint: strings.TrimSpace(cfg.Defaults.Embedding.Endpoint), + Model: model, + APIKey: strings.TrimSpace(cfg.Defaults.Embedding.APIKey), + MaxBatch: cfg.Defaults.Embedding.Batch, + Dim: cfg.Defaults.Embedding.Dimensions, + } + if strings.TrimSpace(embCfg.Endpoint) == "" { + req := catalog.AuthReqFromCatalog(provider) + embCfg.Endpoint = strings.TrimSpace(req.DefaultBaseURL) + } + vec, err := intentfactory.NewVectorizerFromConfig(embCfg) + if err != nil { + return &resolvedEmbeddingProfile{ + Env: env, + Provider: provider, + Model: model, + Endpoint: embCfg.Endpoint, + Dimensions: embCfg.Dim, + }, nil, err + } + return &resolvedEmbeddingProfile{ + Env: env, + Provider: provider, + Model: model, + Endpoint: embCfg.Endpoint, + Dimensions: embCfg.Dim, + }, vec, nil + } + + provider := strings.ToLower(strings.TrimSpace(profile.Provider)) + model := strings.TrimSpace(profile.Model) + if provider == "" || model == "" || provider == "none" || provider == "disabled" { + return nil, nil, nil + } + return s.resolveEmbeddingVectorizerForProfile(ctx, tid, env, provider, model) +} + +// resolveEmbeddingVectorizerForProfile resolves embedder for an explicit provider+model (space-locked use-case). +func (s *IngestionService) resolveEmbeddingVectorizerForProfile( + ctx context.Context, + tenantUUID string, + env string, + provider string, + model string, +) (*resolvedEmbeddingProfile, agentSvcEmbedVectorizer, error) { + if s == nil || s.agentSettings == nil { + return nil, nil, nil + } + tid := strings.ToLower(strings.TrimSpace(tenantUUID)) + provider = strings.ToLower(strings.TrimSpace(provider)) + model = strings.TrimSpace(model) + env = strings.TrimSpace(env) + if tid == "" || env == "" || provider == "" || model == "" { + return nil, nil, fmt.Errorf("invalid embedding profile params") + } + + // best-effort read profile defaults (dims/base_url/api_key) + var baseURLIn, apiKeyIn string + dimensions := 0 + maxBatch := 0 + maxInputTokens := 0 + if prof, err := s.agentSettings.GetProfile(ctx, env, &tid, "embedding", provider, model); err == nil && prof != nil { + if prof.Defaults != nil { + if v, ok := prof.Defaults["endpoint"].(string); ok && strings.TrimSpace(v) != "" { + baseURLIn = strings.TrimSpace(v) + } else if v, ok := prof.Defaults["base_url"].(string); ok && strings.TrimSpace(v) != "" { + baseURLIn = strings.TrimSpace(v) + } + if v, ok := prof.Defaults["api_key"].(string); ok && strings.TrimSpace(v) != "" { + apiKeyIn = strings.TrimSpace(v) + } + if v, ok := prof.Defaults["batch"]; ok { + maxBatch = parseInt(v) + } + maxInputTokens = resolveEmbeddingMaxInputTokens(prof.Defaults) + } + if maxInputTokens == 0 { + maxInputTokens = resolveEmbeddingMaxInputTokens(prof.CapCache) + } + dimensions = agentsettings.ResolveEmbeddingDimensions(prof) + } + + // If provider in catalog declares an embedding driver, use driverKey to pick implementation; + // credentials still come from the original provider. + driverKey := provider + if m, ok := catalog.GetGlobalAIRegister().Manifest(provider); ok && m != nil { + if dk := strings.ToLower(strings.TrimSpace(m.Drivers["embedding"])); dk != "" { + driverKey = dk + } + } + + baseURL := baseURLIn + apiKey := apiKeyIn + if bu, ak, e := s.agentSettings.ResolveConnFromStore(ctx, env, &tid, provider, baseURLIn, apiKeyIn); e == nil { + baseURL, apiKey = bu, ak + } else if isMissingTableError(e) || errors.Is(e, gorm.ErrRecordNotFound) { + // ignore and fallback to baseURLIn/apiKeyIn/catalog defaults + } else { + return nil, nil, e + } + if strings.TrimSpace(baseURL) == "" { + if v := catalog.DefaultBaseURLForModel(provider, model); strings.TrimSpace(v) != "" { + baseURL = v + } else { + req := catalog.AuthReqFromCatalog(provider) + baseURL = strings.TrimSpace(req.DefaultBaseURL) + } + } + + // provider/driver 对 key 的要求来自 catalog;若需要 key 但缺失,则视为“未配置”而不是直接入库失败。 + // 注意:catalog 在某些测试/轻量环境可能未初始化;此时 AuthReqFromCatalog 会保守返回 NeedKey=true。 + // 对于明确“不需要 key”的内置 driver,我们做白名单豁免,避免误判。 + req := catalog.AuthReqFromCatalog(provider) + if req.NeedKey && strings.TrimSpace(apiKey) == "" && + provider != "hash" && + provider != "openai_compatible" && provider != "openai-compatible" && provider != "openai_compat" && + provider != "ollama" && + provider != "sentence_transformers" && provider != "sentence-transformers" && provider != "sbert" { + return &resolvedEmbeddingProfile{ + Env: env, + Provider: provider, + Model: model, + Endpoint: baseURL, + Dimensions: dimensions, + MaxInputTokens: maxInputTokens, + }, nil, nil + } + + embCfg := agentcfg.EmbeddingConfig{ + Enabled: true, + Provider: driverKey, + Endpoint: baseURL, + Model: model, + APIKey: apiKey, + MaxBatch: maxBatch, + Dim: dimensions, + } + vec, err := intentfactory.NewVectorizerFromConfig(embCfg) + if err != nil { + return &resolvedEmbeddingProfile{ + Env: env, + Provider: provider, + Model: model, + Endpoint: baseURL, + MaxInputTokens: maxInputTokens, + }, nil, err + } + if vec == nil { + return &resolvedEmbeddingProfile{ + Env: env, + Provider: provider, + Model: model, + Endpoint: baseURL, + MaxInputTokens: maxInputTokens, + }, nil, nil + } + + return &resolvedEmbeddingProfile{ + Env: env, + Provider: provider, + Model: model, + Endpoint: baseURL, + Dimensions: dimensions, + MaxInputTokens: maxInputTokens, + }, vec, nil +} + +func (s *IngestionService) ensureEmbeddingReady(ctx context.Context, space *knowledge.KnowledgeSpace) error { + if s == nil || s.agentSettings == nil { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "AI Settings 未初始化,无法执行入库", ErrEmbeddingNotConfigured) + } + if space == nil || space.UUID == uuid.Nil { + return ErrSpaceNotFound + } + tenantUUID := strings.ToLower(strings.TrimSpace(space.TenantUUID)) + if tenantUUID == "" { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "缺少租户上下文,无法确认 embedding 配置", ErrEmbeddingNotConfigured) + } + env, _, err := s.agentSettings.GetTenantCurrentAIEnv(ctx, tenantUUID) + if err != nil && !isMissingTableError(err) { + return dto.NewErrorWithCode(http.StatusInternalServerError, "embedding_failed", "获取 AI 环境失败", err) + } + if strings.TrimSpace(env) == "" { + env = "dev" + } + profile, err := s.agentSettings.GetActiveProfile(ctx, env, &tenantUUID, "embedding") + if err != nil { + if isMissingTableError(err) || errors.Is(err, gorm.ErrRecordNotFound) { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "请先在 AI Settings 配置 embedding 模型并完成测试", ErrEmbeddingNotConfigured) + } + return dto.NewErrorWithCode(http.StatusInternalServerError, "embedding_failed", "读取 embedding 配置失败", err) + } + if profile == nil { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "请先在 AI Settings 配置 embedding 模型并完成测试", ErrEmbeddingNotConfigured) + } + provider := strings.TrimSpace(profile.Provider) + model := strings.TrimSpace(profile.Model) + if provider == "" || model == "" { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "embedding 模型配置不完整,请重新设置", ErrEmbeddingNotConfigured) + } + if !agentsettings.EmbeddingProfileReady(profile) { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_probe_required", "embedding 模型未完成测试探测,请先在 AI Settings 执行测试", ErrEmbeddingNotConfigured) + } + _, vec, err := s.resolveEmbeddingVectorizerForProfile(ctx, tenantUUID, env, provider, model) + if err != nil { + return dto.NewErrorWithCode(http.StatusInternalServerError, "embedding_failed", "embedding 配置解析失败", err) + } + if vec == nil { + return dto.NewErrorWithCode(http.StatusPreconditionFailed, "embedding_not_configured", "embedding 凭据未配置或不可用,请先在 AI Settings 完成配置", ErrEmbeddingNotConfigured) + } + return nil +} + +// agentSvcEmbedVectorizer keeps knowledge_space decoupled from internal/server/agent embed package. +// It mirrors `internal/server/agent/contract/embed.Vectorizer`. +type agentSvcEmbedVectorizer interface { + Embed(ctx context.Context, texts []string) ([][]float32, error) +} + +func parseInt(v any) int { + switch val := v.(type) { + case float64: + if int(val) > 0 { + return int(val) + } + case float32: + if int(val) > 0 { + return int(val) + } + case int: + if val > 0 { + return val + } + case int32: + if val > 0 { + return int(val) + } + case int64: + if val > 0 { + return int(val) + } + case string: + if parsed, err := strconv.Atoi(strings.TrimSpace(val)); err == nil && parsed > 0 { + return parsed + } + case json.Number: + if parsed, err := val.Int64(); err == nil && parsed > 0 { + return int(parsed) + } + if parsed, err := strconv.Atoi(strings.TrimSpace(val.String())); err == nil && parsed > 0 { + return parsed + } + } + return 0 +} + +func resolveEmbeddingMaxInputTokens(defaults map[string]any) int { + if defaults == nil { + return 0 + } + for _, key := range []string{ + "max_input_tokens", + "max_tokens", + "context_length", + "context_window", + "context_size", + "max_context_tokens", + } { + if v, ok := defaults[key]; ok { + if n := parseInt(v); n > 0 { + return n + } + } + } + return 0 +} + +func (s *IngestionService) buildVectorRecords( + ctx context.Context, + space *knowledge.KnowledgeSpace, + chunks []IngestionChunk, + onProgress func(done, total int), +) (records []vectorstore.VectorRecord, embeddingPct float64, degraded bool, errorCode string, reason string, maxInputTokens int, provider string, model string) { + if len(chunks) == 0 { + return nil, 0, false, "", "", 0, "", "" + } + if space == nil || space.UUID == uuid.Nil { + return nil, 0, true, "space_not_found", "space_missing", 0, "", "" + } + + // 对所有非空 chunk 做向量化(doc_summary/section_summary/chunk),保持“全链路可观测”一致性; + // 检索侧若只想用正文 chunk,可通过 metadata.filter(chunk_kind)筛选。 + contentIdx := make([]int, 0, len(chunks)) + texts := make([]string, 0, len(chunks)) + for i := range chunks { + if strings.TrimSpace(chunks[i].Content) == "" { + continue + } + contentIdx = append(contentIdx, i) + texts = append(texts, chunks[i].Content) + } + if len(texts) == 0 { + return nil, 0, true, "embedding_failed", "no_chunk_text", 0, "", "" + } + + // 没有向量存储 → 直接跳过,避免“看起来 100% 成功但实际上没写入”的假象。 + if s == nil || s.vectorStore == nil { + return nil, 0, true, "vector_store_disabled", "vector_store_not_configured", 0, "", "" + } + + // Space 未激活 dense index:不进行 embedding(避免额外成本),直接降级。 + activeIndexKey := strings.TrimSpace(space.ActiveVectorIndexKey) + if activeIndexKey == "" { + return nil, 0, true, "vector_index_not_activated", "no_active_vector_index", 0, "", "" + } + + tenantUUID := strings.ToLower(strings.TrimSpace(space.TenantUUID)) + if tenantUUID == "" { + return nil, 0, true, "embedding_failed", "tenant_uuid_empty", 0, "", "" + } + env, configured, err := s.agentSettings.GetTenantCurrentAIEnv(ctx, tenantUUID) + if err != nil { + if !isMissingTableError(err) { + return nil, 0, true, "embedding_failed", fmt.Sprintf("resolve_env_failed: %v", err), 0, "", "" + } + env = "dev" + configured = false + } + if !configured || strings.TrimSpace(env) == "" { + env = "dev" + } + + // Load active index for dimension check. + activeRec, err := repo.NewKnowledgeVectorIndexRepository(s.db).FindBySpaceAndKey(ctx, space.UUID, activeIndexKey) + if err != nil { + return nil, 0, true, "vector_index_invalid", fmt.Sprintf("load_active_index_failed: %v", err), 0, "", "" + } + if activeRec == nil || activeRec.Dimensions <= 0 { + return nil, 0, true, "vector_index_invalid", "active_index_not_found", 0, "", "" + } + + tenantRef := tenantUUID + activeProfile, err := s.agentSettings.GetActiveProfile(ctx, env, &tenantRef, "embedding") + if err != nil || activeProfile == nil { + return nil, 0, true, "embedding_not_configured", "active_embedding_profile_not_set", 0, "", "" + } + provider = strings.TrimSpace(activeProfile.Provider) + model = strings.TrimSpace(activeProfile.Model) + if provider == "" || model == "" { + return nil, 0, true, "embedding_not_configured", "active_embedding_profile_invalid", 0, "", "" + } + + prof, vec, err := s.resolveEmbeddingVectorizerForProfile(ctx, tenantUUID, env, provider, model) + if err != nil { + return nil, 0, true, "embedding_failed", fmt.Sprintf("resolve_embedder_failed: %v", err), 0, provider, model + } + if vec == nil { + return nil, 0, true, "embedding_not_configured", "no_active_embedding_profile", 0, provider, model + } + maxInputTokens = prof.MaxInputTokens + if s != nil && s.inst != nil { + s.inst.Logger(ctx).InfoF( + ctx, + "[ingestion] embedding profile resolved space=%s provider=%s model=%s max_input_tokens=%d space_profile_key=%s", + space.UUID.String(), + provider, + model, + maxInputTokens, + strings.TrimSpace(space.EmbeddingProfileKey), + ) + } + if maxInputTokens > 0 { + maxLen := 0 + for _, text := range texts { + if n := len([]rune(text)); n > maxLen { + maxLen = n + } + } + if maxLen > maxInputTokens { + return nil, 0, true, "embedding_input_too_long", + fmt.Sprintf("embedding_input_too_long: limit=%d actual=%d (provider=%s model=%s)", maxInputTokens, maxLen, prof.Provider, prof.Model), + maxInputTokens, provider, model + } + } + + const embedProgressStart = 15 + const embedProgressEnd = 85 + const embedProgressStep = 2 + desiredBatches := (embedProgressEnd - embedProgressStart) / embedProgressStep + if desiredBatches < 1 { + desiredBatches = 1 + } + batchSize := len(texts) / desiredBatches + if len(texts)%desiredBatches != 0 { + batchSize++ + } + if batchSize < 1 { + batchSize = 1 + } + if batchSize > 64 { + batchSize = 64 + } + + start := time.Now() + embeddings := make([][]float32, 0, len(texts)) + processed := 0 + expectedDim := activeRec.Dimensions + for i := 0; i < len(texts); i += batchSize { + j := i + batchSize + if j > len(texts) { + j = len(texts) + } + vecs, err := vec.Embed(ctx, texts[i:j]) + if err != nil { + return nil, 0, true, "embedding_failed", fmt.Sprintf("embed_failed: %v", err), maxInputTokens, provider, model + } + if len(vecs) != j-i { + return nil, 0, true, "embedding_failed", fmt.Sprintf("embed_failed: batch_mismatch (want=%d got=%d)", j-i, len(vecs)), maxInputTokens, provider, model + } + for k := range vecs { + if len(vecs[k]) != expectedDim { + return nil, 0, true, "embedding_dim_mismatch", + fmt.Sprintf("embedding_dim=%d != active_pgvector_dim=%d (provider=%s model=%s index_key=%s)", + len(vecs[k]), expectedDim, prof.Provider, prof.Model, activeIndexKey), + maxInputTokens, provider, model + } + } + embeddings = append(embeddings, vecs...) + processed += len(vecs) + if onProgress != nil { + onProgress(processed, len(texts)) + } + } + latency := time.Since(start) + + records = make([]vectorstore.VectorRecord, 0, len(texts)) + for pos, idx := range contentIdx { + chunk := chunks[idx] + meta := make(map[string]any, len(chunk.Metadata)+6) + for k, v := range chunk.Metadata { + meta[k] = v + } + meta["chunk_kind"] = chunk.Kind + meta["content_hash"] = ContentHash(chunk.Content) + meta["embedding_provider"] = prof.Provider + meta["embedding_model"] = prof.Model + meta["embedding_env"] = prof.Env + meta["embedding_profile_ref"] = fmt.Sprintf("%s/%s", provider, model) + meta["active_vector_index_key"] = activeIndexKey + meta["embedding_latency_ms"] = latency.Milliseconds() + + records = append(records, vectorstore.VectorRecord{ + ChunkID: chunk.ID, + Embedding: embeddings[pos], + Metadata: meta, + }) + } + + embeddingPct = 100.0 * float64(len(records)) / float64(len(texts)) + return records, embeddingPct, false, "", "", maxInputTokens, provider, model +} diff --git a/backend/internal/service/knowledge_space/errors.go b/backend/internal/service/knowledge_space/errors.go index 2e3d454c..033c02bc 100644 --- a/backend/internal/service/knowledge_space/errors.go +++ b/backend/internal/service/knowledge_space/errors.go @@ -17,6 +17,10 @@ var ( ErrFusionConflict = errors.New("fusion strategy conflict") // ErrFusionStrategyNotFound indicates requested strategy does not exist. ErrFusionStrategyNotFound = errors.New("fusion strategy not found") + // ErrStrategyPrereqFailed indicates selected strategy bundle prerequisites are not satisfied. + ErrStrategyPrereqFailed = errors.New("strategy prerequisites not satisfied") + // ErrEmbeddingNotConfigured indicates embedding profile is missing or not ready. + ErrEmbeddingNotConfigured = errors.New("embedding not configured") ) // IsConflictError reports whether err is caused by a duplicate space. diff --git a/backend/internal/service/knowledge_space/event_hotfix/agent_notifier.go b/backend/internal/service/knowledge_space/event_hotfix/agent_notifier.go index deaacecd..289ae6f8 100644 --- a/backend/internal/service/knowledge_space/event_hotfix/agent_notifier.go +++ b/backend/internal/service/knowledge_space/event_hotfix/agent_notifier.go @@ -32,11 +32,37 @@ func NewAgentNotifier(path string) *AgentNotifier { return &AgentNotifier{matrix: matrix} } -func (n *AgentNotifier) Refresh(_ context.Context, payload map[string]any) bool { +func (n *AgentNotifier) Refresh(ctx context.Context, eventType string, payload map[string]any) bool { if n == nil { return false } - eventType, _ := payload["eventType"].(string) - entry, ok := n.matrix[strings.ToLower(eventType)] - return ok && entry.Weight > 0 + _ = ctx + key := strings.ToLower(strings.TrimSpace(eventType)) + if key == "agent.weight.refresh" { + if v, ok := payload["target_event_type"].(string); ok { + key = strings.ToLower(strings.TrimSpace(v)) + } else if v, ok := payload["targetEventType"].(string); ok { + key = strings.ToLower(strings.TrimSpace(v)) + } else if v, ok := payload["eventType"].(string); ok { + candidate := strings.TrimSpace(v) + // gRPC RefreshAgentWeights 目前传 tenant_uuid 到 eventType;视为全量刷新成功。 + if len(candidate) >= 32 && strings.Count(candidate, "-") >= 4 { + return true + } + key = strings.ToLower(candidate) + } else { + // 没有指明目标事件时,视为全量刷新成功(真实实现可在此触发全量 reload)。 + return true + } + } + if key == "" { + return false + } + entry, ok := n.matrix[key] + if !ok { + return false + } + // "真实刷新" 在此切片里体现为:基于矩阵命中并产出可执行的权重参数。 + // 后续可在此处接入 agent cache / 路由策略 / toolchain registry 的实际刷新逻辑。 + return strings.TrimSpace(entry.Tool) != "" && entry.Weight > 0 } diff --git a/backend/internal/service/knowledge_space/event_hotfix/service.go b/backend/internal/service/knowledge_space/event_hotfix/service.go index dd529fdb..d005cfa5 100644 --- a/backend/internal/service/knowledge_space/event_hotfix/service.go +++ b/backend/internal/service/knowledge_space/event_hotfix/service.go @@ -2,16 +2,22 @@ package event_hotfix import ( "context" + "crypto/sha256" "encoding/json" + "encoding/hex" "errors" "os" - "path/filepath" "strings" "sync" "time" "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/instrumentation" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" "github.com/ArtisanCloud/PowerX/pkg/event_bus" + "github.com/google/uuid" + "gorm.io/datatypes" + "gorm.io/gorm" + "gopkg.in/yaml.v3" ) var ( @@ -21,6 +27,7 @@ var ( ) type Options struct { + DB *gorm.DB Instrumentation *instrumentation.Instrumentation EventBus event_bus.EventBus MetricsWriter *instrumentation.EventMetricsWriter @@ -29,9 +36,11 @@ type Options struct { ReportPath string Clock func() time.Time RetryMax int + ReplayWindow time.Duration } type Service struct { + db *gorm.DB inst *instrumentation.Instrumentation bus event_bus.EventBus metrics *instrumentation.EventMetricsWriter @@ -41,6 +50,7 @@ type Service struct { clock func() time.Time reportPath string retryMax int + replayWin time.Duration } type Policy struct { @@ -63,6 +73,11 @@ type ApplyResult struct { Processed time.Time `json:"processedAt"` } +type processedMarker struct { + Status string + RecordedAt time.Time +} + func NewService(opts Options) *Service { if opts.Instrumentation == nil { opts.Instrumentation = instrumentation.New(instrumentation.Options{}) @@ -74,7 +89,11 @@ func NewService(opts Options) *Service { if opts.RetryMax <= 0 { opts.RetryMax = 3 } + if opts.ReplayWindow <= 0 { + opts.ReplayWindow = 5 * time.Minute + } return &Service{ + db: opts.DB, inst: opts.Instrumentation, bus: opts.EventBus, metrics: opts.MetricsWriter, @@ -83,23 +102,24 @@ func NewService(opts Options) *Service { clock: opts.Clock, reportPath: opts.ReportPath, retryMax: opts.RetryMax, + replayWin: opts.ReplayWindow, } } func loadPolicies(path string) map[string]Policy { result := make(map[string]Policy) if strings.TrimSpace(path) == "" { - return result + return withBuiltinPolicies(result) } data, err := os.ReadFile(path) if err != nil { - return result + return withBuiltinPolicies(result) } var payload struct { Policies []Policy `json:"policies" yaml:"policies"` } if err := json.Unmarshal(data, &payload); err != nil { - return result + _ = yaml.Unmarshal(data, &payload) } for _, p := range payload.Policies { if p.EventType == "" { @@ -107,22 +127,60 @@ func loadPolicies(path string) map[string]Policy { } result[strings.ToLower(p.EventType)] = p } - return result + return withBuiltinPolicies(result) +} + +func withBuiltinPolicies(policies map[string]Policy) map[string]Policy { + if policies == nil { + policies = make(map[string]Policy) + } + // Built-ins ensure admin/ops endpoints work even when policy file is minimal. + if _, ok := policies["agent.weight.refresh"]; !ok { + policies["agent.weight.refresh"] = Policy{ + EventType: "agent.weight.refresh", + Actions: []string{"agent-refresh"}, + Severity: "p1", + } + } + if _, ok := policies["index.hot_update"]; !ok { + policies["index.hot_update"] = Policy{ + EventType: "index.hot_update", + Actions: []string{"hot-update"}, + Severity: "p1", + } + } + return policies } func (s *Service) Apply(ctx context.Context, in ApplyInput) (*ApplyResult, error) { if strings.TrimSpace(in.EventID) == "" || strings.TrimSpace(in.EventType) == "" { return nil, ErrInvalidEvent } + now := s.clock().UTC() + if !in.ReceivedAt.IsZero() && now.Sub(in.ReceivedAt) > s.replayWin { + return nil, ErrInvalidEvent + } key := strings.ToLower(strings.TrimSpace(in.EventType)) policy, ok := s.policies[key] if !ok { return nil, ErrPolicyMissing } - if _, loaded := s.processed.LoadOrStore(in.EventID, struct{}{}); loaded { - s.recordMetrics(in, true, 0, false) - return nil, ErrDuplicateEvent + + marker := processedMarker{Status: "in_flight", RecordedAt: now} + if existing, loaded := s.processed.LoadOrStore(in.EventID, marker); loaded { + if prev, ok := existing.(processedMarker); ok { + // Allow retries after replay window expiry. + if now.Sub(prev.RecordedAt) <= s.replayWin { + s.recordMetrics(in, policy, true, in.RetryCount, false, false) + return nil, ErrDuplicateEvent + } + } else { + s.recordMetrics(in, policy, true, in.RetryCount, false, false) + return nil, ErrDuplicateEvent + } + s.processed.Store(in.EventID, marker) } + if s.bus != nil { s.bus.Publish("knowledge.event.received", map[string]any{ "event_id": in.EventID, @@ -130,17 +188,13 @@ func (s *Service) Apply(ctx context.Context, in ApplyInput) (*ApplyResult, error "severity": policy.Severity, }, ctx) } - agentOK := false - if s.notifier != nil { - agentOK = s.notifier.Refresh(ctx, in.Payload) + result, err := s.applyActions(ctx, in, policy) + if err != nil { + s.processed.Delete(in.EventID) + return nil, err } - s.persistReport(in, policy, agentOK) - s.recordMetrics(in, false, in.RetryCount, agentOK) - return &ApplyResult{ - Status: "applied", - EventID: in.EventID, - Processed: s.clock().UTC(), - }, nil + s.processed.Store(in.EventID, processedMarker{Status: "completed", RecordedAt: now}) + return result, nil } func (s *Service) Retry(ctx context.Context, in ApplyInput) (*ApplyResult, error) { @@ -151,26 +205,83 @@ func (s *Service) Retry(ctx context.Context, in ApplyInput) (*ApplyResult, error return s.Apply(ctx, in) } -func (s *Service) persistReport(in ApplyInput, policy Policy, agentOK bool) { - if strings.TrimSpace(s.reportPath) == "" { - return +func (s *Service) applyActions(ctx context.Context, in ApplyInput, policy Policy) (*ApplyResult, error) { + actions := normalizeActions(policy.Actions) + agentOK := false + hotfixOK := false + for _, action := range actions { + switch action { + case "agent-refresh": + if s.notifier != nil { + agentOK = s.notifier.Refresh(ctx, in.EventType, in.Payload) + } + case "hot-update": + hotfixOK = s.tryHotUpdate(ctx, in) + default: + // fetch/patch are treated as no-op in this slice; real implementation can plug in. + } } - report := map[string]any{ - "eventId": in.EventID, - "eventType": in.EventType, - "policy": policy, - "agentSync": agentOK, - "processedAt": s.clock().UTC().Format(time.RFC3339Nano), + + s.recordMetrics(in, policy, false, in.RetryCount, agentOK, hotfixOK) + + processed := s.clock().UTC() + return &ApplyResult{ + Status: "applied", + EventID: in.EventID, + Processed: processed, + }, nil +} + +func normalizeActions(actions []string) []string { + if len(actions) == 0 { + return nil } - _ = os.MkdirAll(filepath.Dir(s.reportPath), 0o755) - data, err := json.MarshalIndent(report, "", " ") - if err != nil { - return + out := make([]string, 0, len(actions)) + for _, a := range actions { + a = strings.ToLower(strings.TrimSpace(a)) + if a == "" { + continue + } + out = append(out, a) + } + return out +} + +func (s *Service) tryHotUpdate(ctx context.Context, in ApplyInput) bool { + if s == nil || s.db == nil { + return true + } + rawSpace, _ := in.Payload["spaceId"].(string) + if rawSpace == "" { + rawSpace, _ = in.Payload["space_id"].(string) } - _ = os.WriteFile(s.reportPath, data, 0o644) + spaceID := strings.TrimSpace(rawSpace) + if spaceID == "" { + return false + } + now := s.clock().UTC() + return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + entry := &models.AuditTrailEntry{ + Action: "event.hot_update", + Actor: "event_hotfix", + OccurredAt: now, + Metadata: mustJSON(map[string]any{"event_id": in.EventID, "event_type": in.EventType}), + PayloadHash: computePayloadHash(map[string]any{ + "event_id": in.EventID, + "event_type": in.EventType, + "space_id": spaceID, + }), + RollbackToken: "event-hotfix:" + spaceID, + } + // SpaceUUID is required; keep best-effort without failing whole flow if parse fails. + if parsed, err := parseUUID(spaceID); err == nil { + entry.SpaceUUID = parsed + } + return tx.Create(entry).Error + }) == nil } -func (s *Service) recordMetrics(in ApplyInput, idempotent bool, retries int, agentOK bool) { +func (s *Service) recordMetrics(in ApplyInput, policy Policy, idempotent bool, retries int, agentOK bool, hotfixOK bool) { if s.metrics == nil { return } @@ -178,10 +289,40 @@ func (s *Service) recordMetrics(in ApplyInput, idempotent bool, retries int, age snapshot := instrumentation.EventMetricsSnapshot{ EventID: in.EventID, EventType: in.EventType, + PolicySeverity: strings.TrimSpace(policy.Severity), + Actions: normalizeActions(policy.Actions), LatencyMs: latency, RetryCount: retries, IdempotentSkip: idempotent, AgentRefreshOK: agentOK, + HotUpdateOK: hotfixOK, } _ = s.metrics.Store(snapshot) } + +func mustJSON(v any) datatypes.JSON { + if v == nil { + return datatypes.JSON([]byte("{}")) + } + data, err := json.Marshal(v) + if err != nil || len(data) == 0 { + return datatypes.JSON([]byte("{}")) + } + return datatypes.JSON(data) +} + +func computePayloadHash(payload map[string]any) string { + data, _ := json.Marshal(payload) + sum := sha256Sum(data) + return sum +} + +func sha256Sum(data []byte) string { + h := sha256.New() + _, _ = h.Write(data) + return hex.EncodeToString(h.Sum(nil)) +} + +func parseUUID(raw string) (uuid.UUID, error) { + return uuid.Parse(strings.TrimSpace(raw)) +} diff --git a/backend/internal/service/knowledge_space/feedback_metrics.go b/backend/internal/service/knowledge_space/feedback_metrics.go index 5fb1cabf..63f29547 100644 --- a/backend/internal/service/knowledge_space/feedback_metrics.go +++ b/backend/internal/service/knowledge_space/feedback_metrics.go @@ -18,6 +18,8 @@ import ( const ( defaultFeedbackMetricsPath = "backend/reports/_state/knowledge-feedback.json" defaultKnowledgeUpdatePath = "reports/_state/knowledge-update.json" + defaultFeedbackLedgerName = "knowledge-feedback-ledger.json" + defaultKnowledgeSpacesPath = "reports/_state/knowledge-spaces.json" ) // FeedbackMetrics aggregates fleet-wide feedback signals for dashboards + audits. @@ -66,6 +68,9 @@ func (w *FeedbackMetricsWriter) Refresh(ctx context.Context, db *gorm.DB) (Feedb if err := w.persistSnapshot(w.path, stats); err != nil { return metrics, err } + if err := w.persistLedger(ctx, db); err != nil { + return metrics, err + } if err := w.persistAggregate(stats); err != nil { return metrics, err } @@ -170,3 +175,119 @@ func (w *FeedbackMetricsWriter) persistAggregate(metrics FeedbackMetrics) error } return os.WriteFile(w.aggregatePath, buf, 0o644) } + +func (w *FeedbackMetricsWriter) persistLedger(ctx context.Context, db *gorm.DB) error { + if strings.TrimSpace(w.path) == "" { + return nil + } + dir := filepath.Dir(w.path) + ledgerPath := filepath.Join(dir, defaultFeedbackLedgerName) + + var cases []models.FeedbackCase + if err := db.WithContext(ctx). + Order("created_at DESC"). + Limit(100). + Find(&cases).Error; err != nil { + return err + } + type caseView struct { + CaseID string `json:"case_id"` + SpaceID string `json:"space_id"` + Status string `json:"status"` + Severity string `json:"severity"` + IssueType string `json:"issue_type"` + TraceID string `json:"trace_id,omitempty"` + ReprocessJob *uint64 `json:"reprocess_job_id,omitempty"` + SLADueAt *time.Time `json:"sla_due_at,omitempty"` + EscalatedAt *time.Time `json:"escalated_at,omitempty"` + ClosedAt *time.Time `json:"closed_at,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + } + views := make([]caseView, 0, len(cases)) + for _, c := range cases { + views = append(views, caseView{ + CaseID: c.UUID.String(), + SpaceID: c.SpaceUUID.String(), + Status: c.Status, + Severity: c.Severity, + IssueType: c.IssueType, + TraceID: c.ToolTraceRef, + ReprocessJob: c.ReprocessJobID, + SLADueAt: c.SLADueAt, + EscalatedAt: c.EscalatedAt, + ClosedAt: c.ClosedAt, + CreatedAt: c.CreatedAt, + UpdatedAt: c.UpdatedAt, + }) + } + + var audits []models.AuditTrailEntry + if err := db.WithContext(ctx). + Where("action LIKE ?", "feedback.%"). + Order("occurred_at DESC"). + Limit(200). + Find(&audits).Error; err != nil { + return err + } + type auditView struct { + ID uint64 `json:"id"` + SpaceID string `json:"space_id"` + Action string `json:"action"` + Actor string `json:"actor"` + Hash string `json:"payload_hash"` + Occurred time.Time `json:"occurred_at"` + Token string `json:"rollback_token,omitempty"` + Metadata any `json:"metadata,omitempty"` + } + auditViews := make([]auditView, 0, len(audits)) + for _, a := range audits { + var meta any + if len(a.Metadata) > 0 { + _ = json.Unmarshal(a.Metadata, &meta) + } + auditViews = append(auditViews, auditView{ + ID: a.ID, + SpaceID: a.SpaceUUID.String(), + Action: a.Action, + Actor: a.Actor, + Hash: a.PayloadHash, + Occurred: a.OccurredAt, + Token: a.RollbackToken, + Metadata: meta, + }) + } + + payload := map[string]any{ + "recorded_at": time.Now().UTC(), + "cases": views, + "audits": auditViews, + } + if err := os.MkdirAll(filepath.Dir(ledgerPath), 0o755); err != nil { + return err + } + buf, err := json.MarshalIndent(payload, "", " ") + if err != nil { + return err + } + if err := os.WriteFile(ledgerPath, buf, 0o644); err != nil { + return err + } + + // Optional: update knowledge-spaces.json if present. + if _, err := os.Stat(defaultKnowledgeSpacesPath); err == nil { + state := make(map[string]any) + if data, err := os.ReadFile(defaultKnowledgeSpacesPath); err == nil { + _ = json.Unmarshal(data, &state) + } + state["feedback"] = payload + buf, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + if err := os.WriteFile(defaultKnowledgeSpacesPath, buf, 0o644); err != nil { + return err + } + } + return nil +} diff --git a/backend/internal/service/knowledge_space/feedback_metrics_test.go b/backend/internal/service/knowledge_space/feedback_metrics_test.go index 7164f334..3ad686fa 100644 --- a/backend/internal/service/knowledge_space/feedback_metrics_test.go +++ b/backend/internal/service/knowledge_space/feedback_metrics_test.go @@ -25,7 +25,7 @@ func TestFeedbackMetricsWriterRefresh(t *testing.T) { t.Cleanup(func() { coremodel.PowerXSchema = prevSchema }) - require.NoError(t, db.AutoMigrate(&models.FeedbackCase{})) + require.NoError(t, db.AutoMigrate(&models.FeedbackCase{}, &models.AuditTrailEntry{})) now := time.Now().UTC() spaceID := uuid.New() diff --git a/backend/internal/service/knowledge_space/feedback_service.go b/backend/internal/service/knowledge_space/feedback_service.go index d3e2e02f..ffb53842 100644 --- a/backend/internal/service/knowledge_space/feedback_service.go +++ b/backend/internal/service/knowledge_space/feedback_service.go @@ -13,6 +13,7 @@ import ( workflow "github.com/ArtisanCloud/PowerX/internal/workflow/knowledge_space" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/google/uuid" "gorm.io/datatypes" "gorm.io/gorm" @@ -49,6 +50,12 @@ type SubmitFeedbackInput struct { LinkedChunks []uuid.UUID } +type FeedbackCitation struct { + ChunkID uuid.UUID `json:"chunkId"` + Citation map[string]any `json:"citation,omitempty"` + Meta map[string]string `json:"meta,omitempty"` +} + // NewFeedbackService constructs a feedback service instance. func NewFeedbackService(opts FeedbackServiceOptions) *FeedbackService { if opts.DB == nil { @@ -61,7 +68,7 @@ func NewFeedbackService(opts FeedbackServiceOptions) *FeedbackService { opts.Clock = time.Now } if opts.MetricsWriter == nil { - opts.MetricsWriter = NewIngestionMetricsWriter(defaultMetricsPath) + opts.MetricsWriter = NewIngestionMetricsWriter("") } if opts.FeedbackMetrics == nil { opts.FeedbackMetrics = NewFeedbackMetricsWriter(defaultFeedbackMetricsPath, defaultKnowledgeUpdatePath) @@ -87,6 +94,10 @@ func (s *FeedbackService) SubmitFeedback(ctx context.Context, in SubmitFeedbackI if reportedBy == "" { reportedBy = "ops@powerx.local" } + traceRef := strings.TrimSpace(in.ToolTraceRef) + if traceRef == "" { + traceRef = strings.TrimSpace(reqctx.GetTraceID(ctx)) + } chunkStrings := uniqueChunkStrings(in.LinkedChunks) if len(chunkStrings) == 0 { return nil, ErrInvalidInput @@ -116,7 +127,7 @@ func (s *FeedbackService) SubmitFeedback(ctx context.Context, in SubmitFeedbackI Severity: severity, Status: models.FeedbackStatusOpen, LinkedChunks: datatypes.JSON(chunkPayload), - ToolTraceRef: in.ToolTraceRef, + ToolTraceRef: traceRef, Notes: sanitizedNotes, QualityScore: qualityScore, SLADueAt: &dueAt, @@ -149,10 +160,11 @@ func (s *FeedbackService) SubmitFeedback(ctx context.Context, in SubmitFeedbackI } } - if err := s.writeAudit(ctx, tx, caseModel, "feedback.submitted", map[string]any{ + if err := s.writeAudit(ctx, tx, caseModel, "feedback.submitted", reportedBy, map[string]any{ "severity": severity, "issue_type": issueType, "chunks": chunkStrings, + "trace_id": traceRef, }); err != nil { return err } @@ -215,14 +227,349 @@ func (s *FeedbackService) ListCases(ctx context.Context, space uuid.UUID, limit return cases, nil } -func (s *FeedbackService) writeAudit(ctx context.Context, tx *gorm.DB, caseModel *models.FeedbackCase, action string, payload map[string]any) error { +type ListFeedbackFilter struct { + Status string + Severity string + Limit int +} + +func (s *FeedbackService) ListCasesFiltered(ctx context.Context, space uuid.UUID, filter ListFeedbackFilter) ([]*models.FeedbackCase, error) { + if space == uuid.Nil { + return nil, ErrInvalidInput + } + limit := filter.Limit + if limit <= 0 { + limit = 50 + } + query := s.db.WithContext(ctx).Where("space_uuid = ?", space) + if status := strings.TrimSpace(filter.Status); status != "" { + query = query.Where("status = ?", status) + } + if severity := strings.TrimSpace(filter.Severity); severity != "" { + query = query.Where("severity = ?", severity) + } + var cases []*models.FeedbackCase + if err := query.Order("created_at DESC").Limit(limit).Find(&cases).Error; err != nil { + return nil, err + } + return cases, nil +} + +type FeedbackCaseUpdateInput struct { + SpaceID uuid.UUID + CaseID uuid.UUID + Actor string + Notes string +} + +func (s *FeedbackService) CloseCase(ctx context.Context, in FeedbackCaseUpdateInput) (*models.FeedbackCase, error) { + if in.SpaceID == uuid.Nil || in.CaseID == uuid.Nil { + return nil, ErrInvalidInput + } + actor := strings.TrimSpace(in.Actor) + if actor == "" { + actor = "ops@powerx.local" + } + now := s.clock() + var updated *models.FeedbackCase + err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + spaces := repo.NewKnowledgeSpaceRepository(tx) + cases := repo.NewFeedbackCaseRepository(tx) + space, err := spaces.FindByUUID(ctx, in.SpaceID) + if err != nil { + return err + } + if space == nil || space.Status == models.KnowledgeSpaceStatusRetired { + return ErrSpaceNotFound + } + caseModel, err := cases.GetByUUID(ctx, in.CaseID.String(), nil) + if err != nil { + return err + } + if caseModel == nil || caseModel.SpaceUUID != in.SpaceID { + return ErrInvalidInput + } + caseModel.Status = models.FeedbackStatusClosed + caseModel.ClosedAt = &now + caseModel.ResolutionNotes = sanitizeNotes(in.Notes) + if _, err := cases.Update(ctx, caseModel); err != nil { + return err + } + if err := s.writeAudit(ctx, tx, caseModel, "feedback.closed", actor, map[string]any{ + "resolution_notes": caseModel.ResolutionNotes, + }); err != nil { + return err + } + updated = caseModel + return nil + }) + if err != nil { + return nil, err + } + s.refreshFeedbackMetrics(ctx) + return updated, nil +} + +func (s *FeedbackService) EscalateCase(ctx context.Context, in FeedbackCaseUpdateInput) (*models.FeedbackCase, error) { + if in.SpaceID == uuid.Nil || in.CaseID == uuid.Nil { + return nil, ErrInvalidInput + } + actor := strings.TrimSpace(in.Actor) + if actor == "" { + actor = "ops@powerx.local" + } + now := s.clock() + var updated *models.FeedbackCase + err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + spaces := repo.NewKnowledgeSpaceRepository(tx) + cases := repo.NewFeedbackCaseRepository(tx) + space, err := spaces.FindByUUID(ctx, in.SpaceID) + if err != nil { + return err + } + if space == nil || space.Status == models.KnowledgeSpaceStatusRetired { + return ErrSpaceNotFound + } + caseModel, err := cases.GetByUUID(ctx, in.CaseID.String(), nil) + if err != nil { + return err + } + if caseModel == nil || caseModel.SpaceUUID != in.SpaceID { + return ErrInvalidInput + } + caseModel.Status = models.FeedbackStatusEscalated + caseModel.EscalatedAt = &now + caseModel.ResolutionNotes = sanitizeNotes(in.Notes) + if _, err := cases.Update(ctx, caseModel); err != nil { + return err + } + if err := s.writeAudit(ctx, tx, caseModel, "feedback.escalated", actor, map[string]any{ + "reason": caseModel.ResolutionNotes, + }); err != nil { + return err + } + updated = caseModel + return nil + }) + if err != nil { + return nil, err + } + s.refreshFeedbackMetrics(ctx) + return updated, nil +} + +func (s *FeedbackService) ReprocessCase(ctx context.Context, spaceID, caseID uuid.UUID, requestedBy string) (*models.FeedbackCase, error) { + if spaceID == uuid.Nil || caseID == uuid.Nil { + return nil, ErrInvalidInput + } + requestedBy = strings.TrimSpace(requestedBy) + if requestedBy == "" { + requestedBy = "ops@powerx.local" + } + var updated *models.FeedbackCase + err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + spaces := repo.NewKnowledgeSpaceRepository(tx) + cases := repo.NewFeedbackCaseRepository(tx) + space, err := spaces.FindByUUID(ctx, spaceID) + if err != nil { + return err + } + if space == nil || space.Status == models.KnowledgeSpaceStatusRetired { + return ErrSpaceNotFound + } + caseModel, err := cases.GetByUUID(ctx, caseID.String(), nil) + if err != nil { + return err + } + if caseModel == nil || caseModel.SpaceUUID != spaceID { + return ErrInvalidInput + } + chunks := decodeChunkStrings(caseModel.LinkedChunks) + chunkIDs := make([]uuid.UUID, 0, len(chunks)) + for _, raw := range chunks { + id, err := uuid.Parse(raw) + if err != nil { + continue + } + chunkIDs = append(chunkIDs, id) + } + if s.pipeline == nil { + return errors.New("reprocess pipeline not configured") + } + task, err := s.pipeline.Schedule(ctx, workflow.ReprocessInput{ + SpaceID: spaceID, + CaseID: caseID, + Severity: caseModel.Severity, + IssueType: caseModel.IssueType, + ChunkIDs: chunkIDs, + RequestedBy: requestedBy, + }) + if err != nil { + return err + } + caseModel.Status = models.FeedbackStatusInProgress + caseModel.ReprocessJobID = &task.JobID + if _, err := cases.Update(ctx, caseModel); err != nil { + return err + } + if err := s.writeAudit(ctx, tx, caseModel, "feedback.reprocess.requested", requestedBy, map[string]any{ + "job_id": task.JobID, + }); err != nil { + return err + } + updated = caseModel + return nil + }) + if err != nil { + return nil, err + } + s.refreshFeedbackMetrics(ctx) + return updated, nil +} + +func (s *FeedbackService) RollbackCase(ctx context.Context, spaceID, caseID uuid.UUID, requestedBy string, reason string) (*models.FeedbackCase, error) { + if spaceID == uuid.Nil || caseID == uuid.Nil { + return nil, ErrInvalidInput + } + requestedBy = strings.TrimSpace(requestedBy) + if requestedBy == "" { + requestedBy = "ops@powerx.local" + } + now := s.clock() + var updated *models.FeedbackCase + err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + spaces := repo.NewKnowledgeSpaceRepository(tx) + cases := repo.NewFeedbackCaseRepository(tx) + jobs := repo.NewIngestionJobRepository(tx) + bundles := repo.NewArtifactBundleRepository(tx) + + space, err := spaces.FindByUUID(ctx, spaceID) + if err != nil { + return err + } + if space == nil || space.Status == models.KnowledgeSpaceStatusRetired { + return ErrSpaceNotFound + } + caseModel, err := cases.GetByUUID(ctx, caseID.String(), nil) + if err != nil { + return err + } + if caseModel == nil || caseModel.SpaceUUID != spaceID { + return ErrInvalidInput + } + + var recent []models.IngestionJob + if err := tx.WithContext(ctx). + Model(&models.IngestionJob{}). + Where("space_uuid = ? AND artifact_bundle_id IS NOT NULL", spaceID). + Order("created_at DESC"). + Limit(2). + Find(&recent).Error; err != nil { + return err + } + if len(recent) < 2 || recent[0].ArtifactBundleID == nil || recent[1].ArtifactBundleID == nil { + return ErrInvalidInput + } + currentBundleID := *recent[0].ArtifactBundleID + prevBundleID := *recent[1].ArtifactBundleID + + currentBundle, err := bundles.GetById(ctx, currentBundleID, nil) + if err != nil { + return err + } + prevBundle, err := bundles.GetById(ctx, prevBundleID, nil) + if err != nil { + return err + } + if currentBundle == nil || prevBundle == nil { + return ErrInvalidInput + } + + currentBundle.Status = models.ArtifactBundleStatusArchived + if _, err := bundles.Update(ctx, currentBundle); err != nil { + return err + } + prevBundle.Status = models.ArtifactBundleStatusActive + if _, err := bundles.Update(ctx, prevBundle); err != nil { + return err + } + + job, err := jobs.GetByUUID(ctx, recent[0].UUID.String(), nil) + if err == nil && job != nil { + job.Status = models.IngestionStatusFailed + job.ErrorCode = "REPROCESS_ROLLED_BACK" + job.BlockedReason = sanitizeNotes(reason) + job.CompletedAt = &now + _, _ = jobs.Update(ctx, job) + } + + caseModel.Status = models.FeedbackStatusClosed + caseModel.ClosedAt = &now + caseModel.ResolutionNotes = sanitizeNotes("rollback: " + reason) + if _, err := cases.Update(ctx, caseModel); err != nil { + return err + } + if err := s.writeAudit(ctx, tx, caseModel, "feedback.rollback", requestedBy, map[string]any{ + "bundle_current": currentBundleID, + "bundle_previous": prevBundleID, + "reason": sanitizeNotes(reason), + }); err != nil { + return err + } + updated = caseModel + return nil + }) + if err != nil { + return nil, err + } + s.refreshFeedbackMetrics(ctx) + return updated, nil +} + +type FeedbackExport struct { + Cases []*models.FeedbackCase `json:"cases"` + Audits []*models.AuditTrailEntry `json:"audits"` + Meta map[string]any `json:"meta,omitempty"` +} + +func (s *FeedbackService) ExportCases(ctx context.Context, space uuid.UUID, filter ListFeedbackFilter) (*FeedbackExport, error) { + if space == uuid.Nil { + return nil, ErrInvalidInput + } + cases, err := s.ListCasesFiltered(ctx, space, filter) + if err != nil { + return nil, err + } + var audits []*models.AuditTrailEntry + if err := s.db.WithContext(ctx). + Where("space_uuid = ?", space). + Order("occurred_at DESC"). + Limit(200). + Find(&audits).Error; err != nil { + return nil, err + } + return &FeedbackExport{ + Cases: cases, + Audits: audits, + Meta: map[string]any{ + "space_id": space.String(), + "exported_at": time.Now().UTC(), + }, + }, nil +} + +func (s *FeedbackService) writeAudit(ctx context.Context, tx *gorm.DB, caseModel *models.FeedbackCase, action string, actor string, payload map[string]any) error { if caseModel == nil { return errors.New("feedback case missing") } + actor = strings.TrimSpace(actor) + if actor == "" { + actor = caseModel.ReportedBy + } entry := &models.AuditTrailEntry{ SpaceUUID: caseModel.SpaceUUID, Action: action, - Actor: caseModel.ReportedBy, + Actor: actor, Metadata: marshalJSON(payload), OccurredAt: s.clock(), RollbackToken: caseModel.UUID.String(), @@ -232,6 +579,17 @@ func (s *FeedbackService) writeAudit(ctx context.Context, tx *gorm.DB, caseModel return err } +func decodeChunkStrings(raw datatypes.JSON) []string { + if len(raw) == 0 { + return nil + } + var chunks []string + if err := json.Unmarshal(raw, &chunks); err != nil { + return nil + } + return chunks +} + func normalizeSeverity(severity string) string { switch strings.ToLower(strings.TrimSpace(severity)) { case models.FeedbackSeverityLow: diff --git a/backend/internal/service/knowledge_space/fusion_service.go b/backend/internal/service/knowledge_space/fusion_service.go index a0caef71..18154f5c 100644 --- a/backend/internal/service/knowledge_space/fusion_service.go +++ b/backend/internal/service/knowledge_space/fusion_service.go @@ -2,6 +2,10 @@ package knowledge_space import ( "context" + "encoding/json" + "errors" + "math" + "sort" "strings" "time" @@ -9,8 +13,10 @@ import ( models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/event_bus" "github.com/google/uuid" + "gorm.io/datatypes" "gorm.io/gorm" ) @@ -19,6 +25,7 @@ type FusionService struct { db *gorm.DB inst *instrumentation.Instrumentation vectorStore vectorstore.Store + sparseIndex SparseIndex bus event_bus.EventBus eventTopic string clock func() time.Time @@ -29,6 +36,7 @@ type FusionServiceOptions struct { DB *gorm.DB Instrumentation *instrumentation.Instrumentation VectorStore vectorstore.Store + SparseIndex SparseIndex EventBus event_bus.EventBus EventTopic string Clock func() time.Time @@ -56,6 +64,7 @@ type RollbackStrategyInput struct { // FusionQueryInput defines retrieval parameters. type FusionQueryInput struct { SpaceID uuid.UUID + QueryText string Embedding []float32 Filters map[string]string TopK int @@ -64,16 +73,20 @@ type FusionQueryInput struct { // FusionQueryMatch describes fused retrieval output. type FusionQueryMatch struct { - ChunkID uuid.UUID - Score float64 - Source string - Metadata map[string]any + ChunkID uuid.UUID + Score float64 + Source string + RawScore float64 + NormScore float64 + Metadata map[string]any } // FusionQueryResult aggregates vector/lexical results. type FusionQueryResult struct { - StrategyID uint64 - Matches []FusionQueryMatch + StrategyID uint64 + Matches []FusionQueryMatch + Degraded bool + DegradeReasons []string } // NewFusionService constructs an instance. @@ -91,6 +104,7 @@ func NewFusionService(opts FusionServiceOptions) *FusionService { db: opts.DB, inst: opts.Instrumentation, vectorStore: opts.VectorStore, + sparseIndex: opts.SparseIndex, bus: opts.EventBus, eventTopic: strings.TrimSpace(opts.EventTopic), clock: opts.Clock, @@ -103,7 +117,10 @@ func (s *FusionService) PublishStrategy(ctx context.Context, in PublishStrategyI return nil, ErrInvalidInput } normalizedPolicy := normalizeConflictPolicy(in.ConflictPolicy) - bm25, vector := normalizeWeights(in.BM25Weight, in.VectorWeight) + requestedWeights := map[string]float64{ + "bm25": in.BM25Weight, + "vector": in.VectorWeight, + } var created *models.FusionStrategyVersion err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { @@ -126,6 +143,12 @@ func (s *FusionService) PublishStrategy(ctx context.Context, in PublishStrategyI return ErrFusionConflict } + available := s.availableSources(ctx) + weights, degradeReasons := normalizeAvailableWeights(requestedWeights, available) + if weights["bm25"]+weights["vector"] <= 0 { + return ErrInvalidInput + } + state := models.FusionDeploymentActive var publishedAt *time.Time if active != nil && normalizedPolicy == "queue" { @@ -137,16 +160,17 @@ func (s *FusionService) PublishStrategy(ctx context.Context, in PublishStrategyI } strategy := &models.FusionStrategyVersion{ - SpaceUUID: in.SpaceID, - Label: strings.TrimSpace(in.Label), - BM25Weight: bm25, - VectorWeight: vector, - GraphConstraint: strings.TrimSpace(in.GraphConstraint), - RerankerModel: strings.TrimSpace(in.RerankerModel), - ConflictPolicy: normalizedPolicy, - DeploymentState: state, - PublishedAt: publishedAt, - PublishedBy: in.RequestedBy, + SpaceUUID: in.SpaceID, + Label: strings.TrimSpace(in.Label), + BM25Weight: weights["bm25"], + VectorWeight: weights["vector"], + GraphConstraint: strings.TrimSpace(in.GraphConstraint), + RerankerModel: strings.TrimSpace(in.RerankerModel), + ConflictPolicy: normalizedPolicy, + DeploymentState: state, + PublishedAt: publishedAt, + PublishedBy: in.RequestedBy, + BenchmarkMetrics: strategyMetricsSnapshot(weights, available, degradeReasons), } if active != nil && state == models.FusionDeploymentActive { strategy.RollbackFromVersionID = &active.ID @@ -168,12 +192,17 @@ func (s *FusionService) PublishStrategy(ctx context.Context, in PublishStrategyI "bm25_weight": strategy.BM25Weight, "vector_weight": strategy.VectorWeight, "conflict_policy": strategy.ConflictPolicy, + "degraded": len(degradeReasons) > 0, + "degrade_reason": strings.Join(degradeReasons, ";"), } if err := s.writeAudit(ctx, tx, space, "fusion.published", in.RequestedBy, payload); err != nil { return err } if state == models.FusionDeploymentActive { s.publishFusionEvent(ctx, "published", space, strategy) + for _, reason := range degradeReasons { + s.publishFusionAlert(ctx, in.SpaceID, strategy.ID, "publish", reason, errors.New(reason)) + } } return nil }) @@ -262,7 +291,7 @@ func (s *FusionService) ListStrategies(ctx context.Context, space uuid.UUID, lim // Query merges retrieval outputs based on the active strategy. func (s *FusionService) Query(ctx context.Context, in FusionQueryInput) (FusionQueryResult, error) { - if in.SpaceID == uuid.Nil || len(in.Embedding) == 0 { + if in.SpaceID == uuid.Nil || (len(in.Embedding) == 0 && strings.TrimSpace(in.QueryText) == "") { return FusionQueryResult{}, ErrInvalidInput } strategy, err := repo.NewFusionStrategyRepository(s.db).FindActiveBySpace(ctx, in.SpaceID) @@ -272,47 +301,143 @@ func (s *FusionService) Query(ctx context.Context, in FusionQueryInput) (FusionQ if strategy == nil { return FusionQueryResult{}, ErrFusionStrategyNotFound } - if s.vectorStore == nil { - return FusionQueryResult{StrategyID: strategy.ID}, nil - } topK := in.TopK if topK <= 0 { topK = 10 } - resp, err := s.vectorStore.Query(ctx, vectorstore.QueryRequest{ - SpaceID: in.SpaceID, - Embedding: in.Embedding, - TopK: topK, - Filters: in.Filters, - MinScore: in.MinScore, - }) - if err != nil { - s.publishFusionAlert(ctx, in.SpaceID, err) - return FusionQueryResult{}, err + + weights, degradeReasons := strategyWeights(strategy) + available := s.availableSources(ctx) + weights, degradeReasons = normalizeAvailableWeights(weights, available, degradeReasons...) + + type accum struct { + score float64 + perSource []FusionQueryMatch } - matches := make([]FusionQueryMatch, 0, len(resp.Matches)) - for _, match := range resp.Matches { - matches = append(matches, FusionQueryMatch{ - ChunkID: match.ChunkID, - Score: match.Score * strategy.VectorWeight, - Source: "vector", - Metadata: match.Metadata, + acc := make(map[uuid.UUID]*accum) + + vectorErr := error(nil) + if weights["vector"] > 0 && s.vectorStore != nil && len(in.Embedding) > 0 { + resp, err := s.vectorStore.Query(ctx, vectorstore.QueryRequest{ + SpaceID: in.SpaceID, + Embedding: in.Embedding, + TopK: topK, + Filters: in.Filters, + MinScore: in.MinScore, }) + if err != nil { + vectorErr = err + degradeReasons = append(degradeReasons, "vector_query_failed") + s.publishFusionAlert(ctx, in.SpaceID, strategy.ID, "vector", "vector_query_failed", err) + weights["vector"] = 0 + } else { + for _, match := range resp.Matches { + norm := clamp01(match.Score) + item := FusionQueryMatch{ + ChunkID: match.ChunkID, + RawScore: match.Score, + NormScore: norm, + Score: norm * weights["vector"], + Source: "vector", + Metadata: match.Metadata, + } + slot := acc[match.ChunkID] + if slot == nil { + slot = &accum{} + acc[match.ChunkID] = slot + } + slot.score += item.Score + slot.perSource = append(slot.perSource, item) + } + } + } + + sparseErr := error(nil) + queryText := strings.TrimSpace(in.QueryText) + if weights["bm25"] > 0 && s.sparseIndex != nil && queryText != "" { + resp, err := s.sparseIndex.Query(ctx, SparseQueryRequest{ + SpaceID: in.SpaceID, + Query: queryText, + TopK: topK, + Filters: in.Filters, + MinScore: in.MinScore, + }) + if err != nil { + sparseErr = err + degradeReasons = append(degradeReasons, "bm25_query_failed") + s.publishFusionAlert(ctx, in.SpaceID, strategy.ID, "bm25", "bm25_query_failed", err) + weights["bm25"] = 0 + } else { + for _, match := range resp.Matches { + norm := normalizeSparseScore(match.Score) + meta := match.Metadata + if meta == nil { + meta = map[string]any{} + } + if match.Provenance != nil { + meta["provenance"] = match.Provenance + } + item := FusionQueryMatch{ + ChunkID: match.ChunkID, + RawScore: match.Score, + NormScore: norm, + Score: norm * weights["bm25"], + Source: "bm25", + Metadata: meta, + } + slot := acc[match.ChunkID] + if slot == nil { + slot = &accum{} + acc[match.ChunkID] = slot + } + slot.score += item.Score + slot.perSource = append(slot.perSource, item) + } + } + } + + if weights["vector"] == 0 && weights["bm25"] == 0 { + if s.tryAutoRollback(ctx, strategy, in.SpaceID, []error{vectorErr, sparseErr}) { + return FusionQueryResult{}, ErrFusionConflict + } + return FusionQueryResult{}, errors.New("no available fusion sources") + } + + out := make([]FusionQueryMatch, 0, len(acc)) + for chunkID, slot := range acc { + best := FusionQueryMatch{ + ChunkID: chunkID, + Score: slot.score, + Source: "fused", + Metadata: map[string]any{"sources": slot.perSource}, + } + out = append(out, best) + } + sort.Slice(out, func(i, j int) bool { return out[i].Score > out[j].Score }) + if len(out) > topK { + out = out[:topK] } return FusionQueryResult{ - StrategyID: strategy.ID, - Matches: matches, + StrategyID: strategy.ID, + Matches: out, + Degraded: len(degradeReasons) > 0, + DegradeReasons: uniqueStrings(degradeReasons), }, nil } -func (s *FusionService) publishFusionAlert(ctx context.Context, space uuid.UUID, cause error) { +func (s *FusionService) publishFusionAlert(ctx context.Context, space uuid.UUID, strategyID uint64, source string, degradeReason string, cause error) { if s.bus == nil { return } + traceID := reqctx.GetTraceID(ctx) payload := map[string]any{ - "space_id": space.String(), - "event": "fusion.source.failed", - "error": cause.Error(), + "space_id": space.String(), + "strategy_id": strategyID, + "source": source, + "degrade_reason": degradeReason, + "trace_id": traceID, + "event": "fusion.source.failed", + "error": errString(cause), } topic := s.eventTopic if topic == "" { @@ -385,3 +510,162 @@ func normalizeWeights(bm25, vector float64) (float64, float64) { } return bm25 / sum, vector / sum } + +func (s *FusionService) availableSources(ctx context.Context) map[string]bool { + available := map[string]bool{ + "vector": s.vectorStore != nil, + "bm25": s.sparseIndex != nil, + } + if s.vectorStore != nil { + if err := s.vectorStore.Health(ctx); err != nil { + available["vector"] = false + } + } + if s.sparseIndex != nil { + if err := s.sparseIndex.Health(ctx); err != nil { + available["bm25"] = false + } + } + return available +} + +type strategyMetrics struct { + Weights map[string]float64 `json:"weights"` + Available map[string]bool `json:"available"` + DegradeReasons []string `json:"degrade_reasons,omitempty"` +} + +func strategyMetricsSnapshot(weights map[string]float64, available map[string]bool, degradeReasons []string) datatypes.JSON { + snap := strategyMetrics{ + Weights: weights, + Available: available, + } + if len(degradeReasons) > 0 { + snap.DegradeReasons = uniqueStrings(degradeReasons) + } + raw, _ := json.Marshal(snap) + return datatypes.JSON(raw) +} + +func strategyWeights(strategy *models.FusionStrategyVersion) (map[string]float64, []string) { + if strategy == nil { + return map[string]float64{"bm25": 0, "vector": 0}, nil + } + weights := map[string]float64{ + "bm25": strategy.BM25Weight, + "vector": strategy.VectorWeight, + } + var snap strategyMetrics + if len(strategy.BenchmarkMetrics) > 0 && json.Unmarshal(strategy.BenchmarkMetrics, &snap) == nil { + if len(snap.Weights) > 0 { + for k, v := range snap.Weights { + weights[k] = v + } + } + return weights, snap.DegradeReasons + } + return weights, nil +} + +func normalizeAvailableWeights(weights map[string]float64, available map[string]bool, extraReasons ...string) (map[string]float64, []string) { + out := map[string]float64{} + reasons := append([]string{}, extraReasons...) + for k, v := range weights { + if v < 0 { + v = 0 + } + if available != nil { + if ok, exists := available[k]; exists && !ok && v > 0 { + reasons = append(reasons, k+"_unavailable") + v = 0 + } + } + out[k] = v + } + sum := 0.0 + for _, v := range out { + sum += v + } + if sum <= 0 { + return out, uniqueStrings(reasons) + } + for k, v := range out { + out[k] = v / sum + } + return out, uniqueStrings(reasons) +} + +func uniqueStrings(items []string) []string { + if len(items) == 0 { + return nil + } + seen := make(map[string]struct{}, len(items)) + out := make([]string, 0, len(items)) + for _, item := range items { + item = strings.TrimSpace(item) + if item == "" { + continue + } + if _, ok := seen[item]; ok { + continue + } + seen[item] = struct{}{} + out = append(out, item) + } + return out +} + +func errString(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func clamp01(v float64) float64 { + if v < 0 { + return 0 + } + if v > 1 { + return 1 + } + return v +} + +func normalizeSparseScore(score float64) float64 { + if score <= 0 { + return 0 + } + // Normalize any positive score into (0,1) using a smooth curve. + return score / (score + 1.0 + math.SmallestNonzeroFloat64) +} + +func (s *FusionService) tryAutoRollback(ctx context.Context, strategy *models.FusionStrategyVersion, space uuid.UUID, causes []error) bool { + if strategy == nil || strategy.RollbackFromVersionID == nil || *strategy.RollbackFromVersionID == 0 { + return false + } + if strategy.PublishedAt == nil { + return false + } + if s.clock().Sub(*strategy.PublishedAt) > 5*time.Minute { + return false + } + reason := "auto_rollback_on_source_failure" + for _, cause := range causes { + if cause != nil { + reason = reason + ":" + cause.Error() + break + } + } + _, err := s.RollbackStrategy(ctx, RollbackStrategyInput{ + SpaceID: space, + StrategyID: *strategy.RollbackFromVersionID, + RequestedBy: "auto", + }) + if err != nil { + s.publishFusionAlert(ctx, space, strategy.ID, "auto_rollback", "auto_rollback_failed", err) + return false + } + s.publishFusionAlert(ctx, space, strategy.ID, "auto_rollback", "auto_rollback_triggered", errors.New(reason)) + return true +} diff --git a/backend/internal/service/knowledge_space/ingestion_metrics.go b/backend/internal/service/knowledge_space/ingestion_metrics.go index d398f5bb..c912dcef 100644 --- a/backend/internal/service/knowledge_space/ingestion_metrics.go +++ b/backend/internal/service/knowledge_space/ingestion_metrics.go @@ -8,19 +8,37 @@ import ( "time" ) -const defaultMetricsPath = "reports/_state/knowledge-spaces.json" +func defaultMetricsPath() string { + if isTestBinary() { + return filepath.Join(projectTmpDir(), "reports", "_state", "knowledge-spaces.json") + } + return filepath.Join("reports", "_state", "knowledge-spaces.json") +} // IngestionSnapshot captures lightweight job stats for reporting. type IngestionSnapshot struct { - SpaceID string `json:"spaceId"` - JobID string `json:"jobId"` - ChunkTotal int `json:"chunkTotal"` - SummaryChunkCount int `json:"summaryChunkCount"` - ParagraphChunkCount int `json:"paragraphChunkCount"` - CoveragePct float64 `json:"coveragePct"` - EmbeddingPct float64 `json:"embeddingPct"` - MaskingPct float64 `json:"maskingPct"` - CompletedAt *time.Time `json:"completedAt"` + SpaceID string `json:"spaceId"` + JobID string `json:"jobId"` + Status string `json:"status"` + RetryCount int `json:"retryCount"` + ChunkTotal int `json:"chunkTotal"` + SummaryChunkCount int `json:"summaryChunkCount"` + ParagraphChunkCount int `json:"paragraphChunkCount"` + CoveragePct float64 `json:"coveragePct"` + EmbeddingPct float64 `json:"embeddingPct"` + MaskingPct float64 `json:"maskingPct"` + OCRRequired bool `json:"ocrRequired"` + OCRUsed bool `json:"ocrUsed"` + OCRCoveragePct float64 `json:"ocrCoveragePct"` + OCRConfidenceBuckets map[string]int `json:"ocrConfidenceBuckets,omitempty"` + OCRLatencyMs int64 `json:"ocrLatencyMs,omitempty"` + OCRPages int `json:"ocrPages,omitempty"` + OCRFailedPages int `json:"ocrFailedPages,omitempty"` + OCRBboxCoveragePct float64 `json:"ocrBboxCoveragePct,omitempty"` + Degraded bool `json:"degraded"` + ErrorCode string `json:"errorCode,omitempty"` + Reason string `json:"reason,omitempty"` + CompletedAt *time.Time `json:"completedAt"` } // FeedbackSnapshot captures feedback loop state. @@ -50,7 +68,7 @@ type IngestionMetricsWriter struct { func NewIngestionMetricsWriter(path string) *IngestionMetricsWriter { if path == "" { - path = defaultMetricsPath + path = defaultMetricsPath() } return &IngestionMetricsWriter{path: path} } diff --git a/backend/internal/service/knowledge_space/ingestion_service.go b/backend/internal/service/knowledge_space/ingestion_service.go index 26e213df..9ac931ab 100644 --- a/backend/internal/service/knowledge_space/ingestion_service.go +++ b/backend/internal/service/knowledge_space/ingestion_service.go @@ -2,13 +2,16 @@ package knowledge_space import ( "context" - "crypto/sha256" - "encoding/hex" "encoding/json" + "errors" "fmt" + "os" + "path/filepath" + "strconv" "strings" "time" + agentSvc "github.com/ArtisanCloud/PowerX/internal/service/agent" "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/instrumentation" knowledge "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" @@ -18,9 +21,15 @@ import ( ) var ( - allowedSourceTypes = map[string]bool{ + allowedFormats = map[string]bool{ "pdf": true, + "docx": true, + "xlsx": true, + "csv": true, "markdown": true, + "html": true, + "sql": true, + "image": true, "table": true, "api": true, } @@ -37,6 +46,47 @@ type IngestionService struct { inst *instrumentation.Instrumentation vectorStore vectorstore.Store metrics *IngestionMetricsWriter + + processors *ProcessorRegistry + artifactStore *ArtifactStore + maxRetries int + + agentSettings *agentSvc.AgentSettingService + vectorDimension int + progressPublisher IngestionProgressPublisher +} + +func (s *IngestionService) GetJob(ctx context.Context, spaceID uuid.UUID, jobUUID uuid.UUID) (*knowledge.IngestionJob, error) { + if spaceID == uuid.Nil || jobUUID == uuid.Nil { + return nil, ErrInvalidInput + } + j, err := repo.NewIngestionJobRepository(s.db).FindByUUID(ctx, jobUUID) + if err != nil { + return nil, err + } + if j == nil || j.SpaceUUID != spaceID { + return nil, nil + } + return j, nil +} + +func (s *IngestionService) ListJobs(ctx context.Context, spaceID uuid.UUID, limit int) ([]knowledge.IngestionJob, error) { + if spaceID == uuid.Nil { + return nil, ErrInvalidInput + } + if limit <= 0 { + limit = 20 + } + if limit > 200 { + limit = 200 + } + var jobs []knowledge.IngestionJob + err := s.db.WithContext(ctx). + Where("space_uuid = ?", spaceID). + Order("created_at DESC"). + Limit(limit). + Find(&jobs).Error + return jobs, err } // IngestionServiceOptions configures the ingestion service runtime. @@ -45,16 +95,46 @@ type IngestionServiceOptions struct { Instrumentation *instrumentation.Instrumentation VectorStore vectorstore.Store MetricsWriter *IngestionMetricsWriter + + Processors *ProcessorRegistry + ArtifactStore *ArtifactStore + MaxRetries int + + AgentSettings *agentSvc.AgentSettingService + VectorDimension int + ProgressPublisher IngestionProgressPublisher } // TriggerIngestionInput captures API payload used to start an ingestion job. type TriggerIngestionInput struct { - SpaceID uuid.UUID - SourceType string - SourceURI string - MaskingProfile string - Priority string - RequestedBy string + SpaceID uuid.UUID + // Format is the preferred field. SourceType is kept for backward compatibility. + Format string + SourceType string + SourceURI string + IngestionProfile string + ProcessorProfile string + OCRRequired bool + MaskingProfile string + Priority string + RequestedBy string + DocUUID string + // L1/L2/L3 snapshot (best-effort). + RagSceneKey string + RagBundleKey string + RagPrimary string + SegmentMode string + ChunkSize int + ChunkOverlap int + SegmentSizePolicy string + SegmentOrder []string + Separators []string + PagePriority bool + AnchorHeadingPath bool + AnchorClauseID bool + AnchorRowNumber bool + AnchorSpeaker bool + AnchorSentenceIndex bool } // NewIngestionService constructs a service instance. @@ -66,13 +146,34 @@ func NewIngestionService(opts IngestionServiceOptions) *IngestionService { opts.Instrumentation = instrumentation.New(instrumentation.Options{}) } if opts.MetricsWriter == nil { - opts.MetricsWriter = NewIngestionMetricsWriter(defaultMetricsPath) + opts.MetricsWriter = NewIngestionMetricsWriter("") + } + if opts.Processors == nil { + opts.Processors = NewProcessorRegistry() + } + if opts.ArtifactStore == nil { + opts.ArtifactStore = NewArtifactStore(ArtifactStoreOptions{}) + } + maxRetries := opts.MaxRetries + if maxRetries < 0 { + maxRetries = 0 } return &IngestionService{ - db: opts.DB, - inst: opts.Instrumentation, - vectorStore: opts.VectorStore, - metrics: opts.MetricsWriter, + db: opts.DB, + inst: opts.Instrumentation, + vectorStore: opts.VectorStore, + metrics: opts.MetricsWriter, + processors: opts.Processors, + artifactStore: opts.ArtifactStore, + maxRetries: maxRetries, + agentSettings: opts.AgentSettings, + vectorDimension: func() int { + if opts.VectorDimension > 0 { + return opts.VectorDimension + } + return 0 + }(), + progressPublisher: opts.ProgressPublisher, } } @@ -81,8 +182,11 @@ func (s *IngestionService) Trigger(ctx context.Context, in TriggerIngestionInput if in.SpaceID == uuid.Nil || strings.TrimSpace(in.SourceURI) == "" { return nil, ErrInvalidInput } - sourceType := strings.ToLower(in.SourceType) - if !allowedSourceTypes[sourceType] { + format := strings.ToLower(strings.TrimSpace(in.Format)) + if format == "" { + format = strings.ToLower(strings.TrimSpace(in.SourceType)) + } + if !allowedFormats[format] { return nil, ErrInvalidInput } priority := strings.ToLower(strings.TrimSpace(in.Priority)) @@ -94,120 +198,748 @@ func (s *IngestionService) Trigger(ctx context.Context, in TriggerIngestionInput } logger := s.inst.Logger(ctx) - logger.InfoF(ctx, "[ingestion] trigger space=%s source=%s type=%s", in.SpaceID, in.SourceURI, sourceType) + logger.InfoF(ctx, "[ingestion] trigger space=%s source=%s format=%s", in.SpaceID, in.SourceURI, format) + + spaceRepo := repo.NewKnowledgeSpaceRepository(s.db) + space, err := spaceRepo.FindByUUID(ctx, in.SpaceID) + if err != nil { + return nil, err + } + if space == nil || space.Status == knowledge.KnowledgeSpaceStatusRetired { + return nil, ErrSpaceNotFound + } + if err := s.ensureEmbeddingReady(ctx, space); err != nil { + return nil, err + } - chunkSet := synthesizeChunks(in.SpaceID) - var job *knowledge.IngestionJob - err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - spaces := repo.NewKnowledgeSpaceRepository(tx) + now := time.Now() + job := &knowledge.IngestionJob{ + SpaceUUID: in.SpaceID, + SourceID: stableSourceID(in.SourceURI), + SourceType: format, + Status: knowledge.IngestionStatusRunning, + Priority: priority, + SubmittedBy: in.RequestedBy, + StartedAt: &now, + } + bundle := &knowledge.ArtifactBundle{ + IngestionJobID: 0, + ChunkManifestURI: "minio://powerx-knowledge/pending/chunks.json", + VectorManifestURI: "minio://powerx-knowledge/pending/vectors.json", + MaskingReportURI: "minio://powerx-knowledge/pending/masking.json", + Checksum: strings.Repeat("0", 64), + StorageClass: "standard", + Status: knowledge.ArtifactBundleStatusActive, + } + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { jobs := repo.NewIngestionJobRepository(tx) bundles := repo.NewArtifactBundleRepository(tx) - space, err := spaces.FindByUUID(ctx, in.SpaceID) + createdJob, err := jobs.Create(ctx, job) + if err != nil { + return err + } + + bundle.IngestionJobID = createdJob.ID + createdBundle, err := bundles.Create(ctx, bundle) if err != nil { return err } - if space == nil || space.Status == knowledge.KnowledgeSpaceStatusRetired { - return ErrSpaceNotFound + createdJob.ArtifactBundleID = &createdBundle.ID + updatedJob, err := jobs.Update(ctx, createdJob) + if err != nil { + return err } + job = updatedJob + bundle = createdBundle + return nil + }) + if err != nil { + return nil, err + } - now := time.Now() - job = &knowledge.IngestionJob{ - SpaceUUID: in.SpaceID, - SourceID: fmt.Sprintf("src-%s", uuid.NewString()), - SourceType: sourceType, - Status: knowledge.IngestionStatusRunning, - Priority: priority, - SubmittedBy: in.RequestedBy, - StartedAt: &now, - ChunkTotal: chunkSet.total, - SummaryChunkCount: chunkSet.summaryCount, - ParagraphChunkCount: chunkSet.paragraphCount, - ChunkCoveredPct: 100.0, - EmbeddingSuccessPct: 0, - MaskingCoveragePct: 100.0, - } - if job, err = jobs.Create(ctx, job); err != nil { + outcome, chunks, vectorRecords, ocrArtifacts := s.runPipeline(ctx, pipelineInput{ + space: space, + job: job, + bundle: bundle, + format: format, + sourceURI: in.SourceURI, + docUUID: in.DocUUID, + ingestionProfile: in.IngestionProfile, + processorProfile: in.ProcessorProfile, + ocrRequired: in.OCRRequired, + maskingProfile: in.MaskingProfile, + ragSceneKey: strings.TrimSpace(in.RagSceneKey), + ragBundleKey: strings.TrimSpace(in.RagBundleKey), + ragPrimary: strings.TrimSpace(in.RagPrimary), + segmentMode: in.SegmentMode, + chunkSize: in.ChunkSize, + chunkOverlap: in.ChunkOverlap, + segmentSizePolicy: in.SegmentSizePolicy, + segmentOrder: in.SegmentOrder, + separators: in.Separators, + anchorHeadingPath: in.AnchorHeadingPath, + anchorClauseID: in.AnchorClauseID, + anchorRowNumber: in.AnchorRowNumber, + anchorSpeaker: in.AnchorSpeaker, + anchorSentenceIndex: in.AnchorSentenceIndex, + }) + return s.finalizeIngestion(ctx, space, job, bundle, format, in, outcome, chunks, vectorRecords, ocrArtifacts) +} + +// TriggerWithDocUnits runs ingestion using already-normalized document units (e.g. API connectors). +// This is used by SpaceSyncJob runners so external sources can reuse the same chunking/masking/vectorstore pipeline. +func (s *IngestionService) TriggerWithDocUnits(ctx context.Context, in TriggerIngestionInput, docUnits []DocumentUnit) (*knowledge.IngestionJob, error) { + if in.SpaceID == uuid.Nil || strings.TrimSpace(in.SourceURI) == "" { + return nil, ErrInvalidInput + } + format := strings.ToLower(strings.TrimSpace(in.Format)) + if format == "" { + format = strings.ToLower(strings.TrimSpace(in.SourceType)) + } + if !allowedFormats[format] { + return nil, ErrInvalidInput + } + priority := strings.ToLower(strings.TrimSpace(in.Priority)) + if priority == "" { + priority = "normal" + } + if !allowedPriority[priority] { + return nil, ErrInvalidInput + } + + logger := s.inst.Logger(ctx) + logger.InfoF(ctx, "[ingestion] trigger(units) space=%s source=%s format=%s units=%d", in.SpaceID, in.SourceURI, format, len(docUnits)) + + spaceRepo := repo.NewKnowledgeSpaceRepository(s.db) + space, err := spaceRepo.FindByUUID(ctx, in.SpaceID) + if err != nil { + return nil, err + } + if space == nil || space.Status == knowledge.KnowledgeSpaceStatusRetired { + return nil, ErrSpaceNotFound + } + if err := s.ensureEmbeddingReady(ctx, space); err != nil { + return nil, err + } + + now := time.Now() + job := &knowledge.IngestionJob{ + SpaceUUID: in.SpaceID, + SourceID: stableSourceID(in.SourceURI), + SourceType: format, + Status: knowledge.IngestionStatusRunning, + Priority: priority, + SubmittedBy: in.RequestedBy, + StartedAt: &now, + } + bundle := &knowledge.ArtifactBundle{ + IngestionJobID: 0, + ChunkManifestURI: "minio://powerx-knowledge/pending/chunks.json", + VectorManifestURI: "minio://powerx-knowledge/pending/vectors.json", + MaskingReportURI: "minio://powerx-knowledge/pending/masking.json", + Checksum: strings.Repeat("0", 64), + StorageClass: "standard", + Status: knowledge.ArtifactBundleStatusActive, + } + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + jobs := repo.NewIngestionJobRepository(tx) + bundles := repo.NewArtifactBundleRepository(tx) + + createdJob, err := jobs.Create(ctx, job) + if err != nil { + return err + } + bundle.IngestionJobID = createdJob.ID + createdBundle, err := bundles.Create(ctx, bundle) + if err != nil { return err } + createdJob.ArtifactBundleID = &createdBundle.ID + updatedJob, err := jobs.Update(ctx, createdJob) + if err != nil { + return err + } + job = updatedJob + bundle = createdBundle + return nil + }) + if err != nil { + return nil, err + } + + outcome, chunks, vectorRecords := s.runPipelineFromUnits(ctx, pipelineUnitsInput{ + space: space, + job: job, + bundle: bundle, + format: format, + sourceURI: in.SourceURI, + docUUID: in.DocUUID, + docUnits: docUnits, + maskingProfile: in.MaskingProfile, + ocrRequired: in.OCRRequired, + ragSceneKey: strings.TrimSpace(in.RagSceneKey), + ragBundleKey: strings.TrimSpace(in.RagBundleKey), + ragPrimary: strings.TrimSpace(in.RagPrimary), + segmentMode: in.SegmentMode, + chunkSize: in.ChunkSize, + chunkOverlap: in.ChunkOverlap, + segmentSizePolicy: in.SegmentSizePolicy, + segmentOrder: in.SegmentOrder, + separators: in.Separators, + pagePriority: in.PagePriority, + anchorHeadingPath: in.AnchorHeadingPath, + anchorClauseID: in.AnchorClauseID, + anchorRowNumber: in.AnchorRowNumber, + anchorSpeaker: in.AnchorSpeaker, + anchorSentenceIndex: in.AnchorSentenceIndex, + }) + return s.finalizeIngestion(ctx, space, job, bundle, format, in, outcome, chunks, vectorRecords, nil) +} - bundle := &knowledge.ArtifactBundle{ - IngestionJobID: job.ID, - ChunkManifestURI: fmt.Sprintf("memory://knowledge/%s/jobs/%d/chunks.json", in.SpaceID, job.ID), - VectorManifestURI: fmt.Sprintf("memory://knowledge/%s/jobs/%d/vectors.json", in.SpaceID, job.ID), - GraphManifestURI: "", - MaskingReportURI: fmt.Sprintf("memory://knowledge/%s/jobs/%d/masking.json", in.SpaceID, job.ID), - SummaryChunkCount: chunkSet.summaryCount, - ParagraphChunkCount: chunkSet.paragraphCount, - Checksum: chunkSet.checksum, - StorageClass: "standard", +// TriggerAsync creates an ingestion job and runs the pipeline in background. +// It is intended for HTTP/API usage so UI can poll job status asynchronously. +func (s *IngestionService) TriggerAsync(ctx context.Context, in TriggerIngestionInput) (*knowledge.IngestionJob, error) { + if strings.EqualFold(strings.TrimSpace(os.Getenv("POWERX_INGESTION_SYNC")), "1") || + strings.EqualFold(strings.TrimSpace(os.Getenv("POWERX_INGESTION_SYNC")), "true") { + return s.Trigger(ctx, in) + } + if in.SpaceID == uuid.Nil || strings.TrimSpace(in.SourceURI) == "" { + return nil, ErrInvalidInput + } + format := strings.ToLower(strings.TrimSpace(in.Format)) + if format == "" { + format = strings.ToLower(strings.TrimSpace(in.SourceType)) + } + if !allowedFormats[format] { + return nil, ErrInvalidInput + } + priority := strings.ToLower(strings.TrimSpace(in.Priority)) + if priority == "" { + priority = "normal" + } + if !allowedPriority[priority] { + return nil, ErrInvalidInput + } + + spaceRepo := repo.NewKnowledgeSpaceRepository(s.db) + space, err := spaceRepo.FindByUUID(ctx, in.SpaceID) + if err != nil { + return nil, err + } + if space == nil || space.Status == knowledge.KnowledgeSpaceStatusRetired { + return nil, ErrSpaceNotFound + } + if err := s.ensureEmbeddingReady(ctx, space); err != nil { + return nil, err + } + + job := &knowledge.IngestionJob{ + SpaceUUID: in.SpaceID, + SourceID: stableSourceID(in.SourceURI), + SourceType: format, + Status: knowledge.IngestionStatusPending, + Priority: priority, + SubmittedBy: in.RequestedBy, + } + bundle := &knowledge.ArtifactBundle{ + IngestionJobID: 0, + ChunkManifestURI: "minio://powerx-knowledge/pending/chunks.json", + VectorManifestURI: "minio://powerx-knowledge/pending/vectors.json", + MaskingReportURI: "minio://powerx-knowledge/pending/masking.json", + Checksum: strings.Repeat("0", 64), + StorageClass: "standard", + Status: knowledge.ArtifactBundleStatusActive, + } + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + jobs := repo.NewIngestionJobRepository(tx) + bundles := repo.NewArtifactBundleRepository(tx) + + createdJob, err := jobs.Create(ctx, job) + if err != nil { + return err } - if bundle, err = bundles.Create(ctx, bundle); err != nil { + bundle.IngestionJobID = createdJob.ID + createdBundle, err := bundles.Create(ctx, bundle) + if err != nil { return err } - job.ArtifactBundleID = &bundle.ID - if job, err = jobs.Update(ctx, job); err != nil { + createdJob.ArtifactBundleID = &createdBundle.ID + updatedJob, err := jobs.Update(ctx, createdJob) + if err != nil { return err } + job = updatedJob + bundle = createdBundle return nil }) if err != nil { return nil, err } - vectorErr := s.persistVectors(ctx, in.SpaceID, chunkSet.records) + // Run ingestion in background. Do not inherit request context cancellation. + go func() { + bg := context.Background() + logger := s.inst.Logger(bg) + logger.InfoF(bg, "[ingestion] async start space=%s job=%s source=%s format=%s", in.SpaceID, job.UUID, in.SourceURI, format) + // Mark running early so UI doesn't stay in pending while processing. + now := time.Now() + job.Status = knowledge.IngestionStatusRunning + job.StartedAt = &now + _, _ = repo.NewIngestionJobRepository(s.db).Update(bg, job) + s.emitProgress(bg, job, "start", 0, 0, 0, 0, space.TenantUUID) + // Run the same pipeline and update job in DB. + outcome, chunks, vectors, ocrArtifacts := s.runPipeline(bg, pipelineInput{ + space: space, + job: job, + bundle: bundle, + format: format, + sourceURI: in.SourceURI, + ingestionProfile: in.IngestionProfile, + processorProfile: in.ProcessorProfile, + ocrRequired: in.OCRRequired, + maskingProfile: in.MaskingProfile, + ragSceneKey: strings.TrimSpace(in.RagSceneKey), + ragBundleKey: strings.TrimSpace(in.RagBundleKey), + ragPrimary: strings.TrimSpace(in.RagPrimary), + segmentMode: in.SegmentMode, + chunkSize: in.ChunkSize, + chunkOverlap: in.ChunkOverlap, + segmentSizePolicy: in.SegmentSizePolicy, + segmentOrder: in.SegmentOrder, + separators: in.Separators, + pagePriority: in.PagePriority, + anchorHeadingPath: in.AnchorHeadingPath, + anchorClauseID: in.AnchorClauseID, + anchorRowNumber: in.AnchorRowNumber, + anchorSpeaker: in.AnchorSpeaker, + anchorSentenceIndex: in.AnchorSentenceIndex, + }) + if _, err := s.finalizeIngestion(bg, space, job, bundle, format, in, outcome, chunks, vectors, ocrArtifacts); err != nil { + logger.ErrorF(bg, "[ingestion] async finalize failed job=%s err=%v", job.UUID, err) + } + }() + + return job, nil +} + +var ErrIngestionDegraded = errors.New("ingestion degraded") + +func (s *IngestionService) finalizeIngestion( + ctx context.Context, + space *knowledge.KnowledgeSpace, + job *knowledge.IngestionJob, + bundle *knowledge.ArtifactBundle, + format string, + in TriggerIngestionInput, + outcome pipelineOutcome, + chunks []IngestionChunk, + vectorRecords []vectorstore.VectorRecord, + ocrArtifacts *OCRArtifacts, +) (*knowledge.IngestionJob, error) { + if job == nil { + return nil, ErrInvalidInput + } + if job.StartedAt == nil { + now := time.Now() + job.StartedAt = &now + } + if strings.TrimSpace(job.Status) == "" || job.Status == knowledge.IngestionStatusPending { + job.Status = knowledge.IngestionStatusRunning + _, _ = repo.NewIngestionJobRepository(s.db).Update(ctx, job) + } + + s.writeIngestionSegmentLog(format, in, outcome, chunks, job) + + // Persist online chunk store (best-effort). This is the editable truth source for chunk text + metadata. + // If the chunk store is not enabled in this environment, ingestion should continue (manifest remains available). + if len(chunks) > 0 { + now := time.Now() + rows := make([]knowledge.KnowledgeChunk, 0, len(chunks)) + for i := range chunks { + ch := &chunks[i] + meta := make(map[string]any, len(ch.Metadata)+3) + for k, v := range ch.Metadata { + meta[k] = v + } + meta["job_uuid"] = job.UUID.String() + meta["masked"] = ch.Masked + meta["confidence"] = ch.Confidence + + metaBytes, err := json.Marshal(meta) + if err != nil { + metaBytes = []byte(`{}`) + } + rows = append(rows, knowledge.KnowledgeChunk{ + SpaceUUID: in.SpaceID, + ChunkUUID: ch.ID, + JobUUID: &job.UUID, + Kind: ch.Kind, + Content: ch.Content, + Metadata: metaBytes, + CreatedAt: now, + UpdatedAt: now, + }) + // Reflect back to in-memory chunk metadata so manifests and vector metadata stay aligned. + ch.Metadata = meta + } + if err := repo.NewKnowledgeChunkRepository(s.db).UpsertMany(ctx, rows); err != nil { + if !isUndefinedTableError(err) { + if s.inst != nil { + s.inst.Logger(ctx).WarnF(ctx, "[ingestion] upsert knowledge_chunks failed: %v", err) + } + } + } + } + + if s.artifactStore != nil && bundle != nil { + if artifactUpdate, err := s.artifactStore.Write(ctx, ArtifactWriteInput{ + SpaceID: in.SpaceID, + JobUUID: job.UUID, + JobID: job.ID, + Format: format, + SourceURI: in.SourceURI, + Chunks: chunks, + VectorRecords: vectorRecords, + MaskingProfile: in.MaskingProfile, + Outcome: outcome, + OCRArtifacts: ocrArtifacts, + }); err == nil { + bundle.ChunkManifestURI = artifactUpdate.ChunkManifestURI + bundle.VectorManifestURI = artifactUpdate.VectorManifestURI + bundle.MaskingReportURI = artifactUpdate.MaskingReportURI + bundle.OCRPageImagesURI = artifactUpdate.OCRPageImagesURI + bundle.OCRRawManifestURI = artifactUpdate.OCRRawManifestURI + bundle.OCRSearchablePDFURI = artifactUpdate.OCRSearchablePDFURI + bundle.Checksum = artifactUpdate.Checksum + bundle.SummaryChunkCount = outcome.summaryCount + bundle.ParagraphChunkCount = outcome.chunkCount + _, _ = repo.NewArtifactBundleRepository(s.db).Update(ctx, bundle) + } + } + + vectorErr := s.persistWithRetry(ctx, in.SpaceID, vectorRecords, job, outcome, space.TenantUUID) + s.emitProgress(ctx, job, "persist", 95, outcome.totalChunks, outcome.embeddingPct, outcome.maskingPct, space.TenantUUID) + if errors.Is(vectorErr, ErrVectorIndexNotActivated) && !outcome.degraded { + outcome.degraded = true + if strings.TrimSpace(outcome.errorCode) == "" { + outcome.errorCode = "vector_index_not_activated" + } + if strings.TrimSpace(outcome.reason) == "" { + outcome.reason = "no_active_vector_index" + } + } completed := time.Now() job.CompletedAt = &completed - if vectorErr != nil { + + job.ChunkTotal = outcome.totalChunks + job.SummaryChunkCount = outcome.summaryCount + job.ParagraphChunkCount = outcome.chunkCount + job.ChunkCoveredPct = outcome.coveragePct + job.MaskingCoveragePct = outcome.maskingPct + job.EmbeddingSuccessPct = outcome.embeddingPct + + if outcome.status == knowledge.IngestionStatusBlocked { + job.Status = knowledge.IngestionStatusBlocked + job.ErrorCode = outcome.errorCode + job.BlockedReason = outcome.reason + job.EmbeddingSuccessPct = 0 + job.MetricsSnapshot = mustJSON(outcome.snapshot(completed)) + _, _ = repo.NewIngestionJobRepository(s.db).Update(ctx, job) + s.emitProgress(ctx, job, "finalize", 100, outcome.totalChunks, outcome.embeddingPct, outcome.maskingPct, space.TenantUUID) + s.emitMetrics(job, outcome) + return job, nil + } + + if vectorErr != nil && !errors.Is(vectorErr, ErrIngestionDegraded) && !errors.Is(vectorErr, ErrVectorIndexNotActivated) { job.Status = knowledge.IngestionStatusFailed job.ErrorCode = "vector_upsert_failed" job.BlockedReason = vectorErr.Error() job.EmbeddingSuccessPct = 0 - } else { + } + if job.Status != knowledge.IngestionStatusFailed { job.Status = knowledge.IngestionStatusCompleted - job.EmbeddingSuccessPct = 100.0 - job.ErrorCode = "" - job.BlockedReason = "" - } - job.MetricsSnapshot = mustJSON(map[string]any{ - "source_uri": in.SourceURI, - "chunk_total": chunkSet.total, - "completed": completed, - }) + if outcome.degraded { + job.ErrorCode = outcome.errorCode + job.BlockedReason = outcome.reason + } else { + job.ErrorCode = "" + job.BlockedReason = "" + } + } + + job.MetricsSnapshot = mustJSON(outcome.snapshot(completed)) if _, err := repo.NewIngestionJobRepository(s.db).Update(ctx, job); err != nil { return nil, err } - s.emitMetrics(job) + s.emitProgress(ctx, job, "finalize", 100, outcome.totalChunks, outcome.embeddingPct, outcome.maskingPct, space.TenantUUID) + s.emitMetrics(job, outcome) - if vectorErr != nil { + if vectorErr != nil && !errors.Is(vectorErr, ErrIngestionDegraded) && !errors.Is(vectorErr, ErrVectorIndexNotActivated) { return job, vectorErr } return job, nil } -func (s *IngestionService) persistVectors(ctx context.Context, space uuid.UUID, records []vectorstore.VectorRecord) error { - if s.vectorStore == nil || len(records) == 0 { +func (s *IngestionService) writeIngestionSegmentLog(format string, in TriggerIngestionInput, outcome pipelineOutcome, chunks []IngestionChunk, job *knowledge.IngestionJob) { + if job == nil { + return + } + baseDir := filepath.Join("backend", "logs", "ingestion_jobs") + if wd, err := os.Getwd(); err == nil && strings.HasSuffix(wd, string(os.PathSeparator)+"backend") { + baseDir = filepath.Join("logs", "ingestion_jobs") + } + if err := os.MkdirAll(baseDir, 0o755); err != nil { + return + } + path := filepath.Join(baseDir, job.UUID.String()+".log") + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return + } + defer f.Close() + + anchorFlags := []string{} + if in.AnchorHeadingPath { + anchorFlags = append(anchorFlags, "heading_path") + } + if in.AnchorClauseID { + anchorFlags = append(anchorFlags, "clause_id") + } + if in.AnchorRowNumber { + anchorFlags = append(anchorFlags, "row_number") + } + if in.AnchorSpeaker { + anchorFlags = append(anchorFlags, "speaker") + } + if in.AnchorSentenceIndex { + anchorFlags = append(anchorFlags, "sentence_idx") + } + + kindCounts := map[string]int{} + segmentCounts := map[int]int{} + pageCounts := map[int]int{} + for _, ch := range chunks { + kind := strings.TrimSpace(ch.Kind) + if kind == "" { + kind = "unknown" + } + kindCounts[kind]++ + if mi := ch.Metadata; mi != nil { + if v, ok := mi["segment_part"]; ok { + if n := parseAnyInt(v); n > 0 { + segmentCounts[n]++ + } + } + if prov, ok := mi["provenance"].(map[string]any); ok { + if n := parseAnyInt(prov["page"]); n > 0 { + pageCounts[n]++ + } else if pages, ok := prov["pages"].([]any); ok { + for _, p := range pages { + if pm, ok := p.(map[string]any); ok { + if pn := parseAnyInt(pm["page_number"]); pn > 0 { + pageCounts[pn]++ + } + } + } + } + } + } + } + + ts := time.Now().Format(time.RFC3339) + _, _ = fmt.Fprintf( + f, + "[%s] job=%s status=%s format=%s source=%s\n", + ts, + job.UUID, + job.Status, + strings.TrimSpace(format), + strings.TrimSpace(in.SourceURI), + ) + _, _ = fmt.Fprintf( + f, + "segment: page_priority=%t order=%v mode=%s size_policy=%s chunk_size=%d overlap=%d separators=%v anchors=%v\n", + in.PagePriority, + in.SegmentOrder, + strings.TrimSpace(in.SegmentMode), + normalizeSegmentSizePolicy(in.SegmentSizePolicy, in.ChunkSize), + in.ChunkSize, + in.ChunkOverlap, + in.Separators, + anchorFlags, + ) + _, _ = fmt.Fprintf( + f, + "outcome: status=%s chunks=%d content_chunks=%d summary_chunks=%d coverage=%.2f%%\n", + outcome.status, + outcome.totalChunks, + outcome.chunkCount, + outcome.summaryCount, + outcome.coveragePct, + ) + _, _ = fmt.Fprintf(f, "chunk_kinds: %v\n", kindCounts) + if len(segmentCounts) > 0 { + _, _ = fmt.Fprintf(f, "segment_parts: %v\n", segmentCounts) + } + if len(pageCounts) > 0 { + _, _ = fmt.Fprintf(f, "pages: %v\n", pageCounts) + } + _, _ = fmt.Fprintln(f, "----") +} + +func parseAnyInt(v any) int { + switch t := v.(type) { + case int: + return t + case int64: + return int(t) + case float64: + return int(t) + case string: + n, _ := strconv.Atoi(strings.TrimSpace(t)) + return n + default: + return 0 + } +} + +func (s *IngestionService) emitProgress(ctx context.Context, job *knowledge.IngestionJob, stage string, progress int, chunkTotal int, embeddingPct float64, maskingPct float64, tenantUUID string) { + if s == nil || s.progressPublisher == nil || job == nil { + return + } + if progress < 0 { + progress = 0 + } + if progress > 100 { + progress = 100 + } + update := IngestionProgressUpdate{ + TenantUUID: strings.TrimSpace(tenantUUID), + SpaceUUID: job.SpaceUUID.String(), + JobUUID: job.UUID.String(), + Status: job.Status, + Stage: strings.TrimSpace(stage), + Progress: progress, + ChunkTotal: chunkTotal, + EmbeddingPct: embeddingPct, + MaskingPct: maskingPct, + UpdatedAt: time.Now().UTC(), + } + s.progressPublisher.PublishIngestionProgress(ctx, update) +} + +func (s *IngestionService) persistWithRetry(ctx context.Context, space uuid.UUID, records []vectorstore.VectorRecord, job *knowledge.IngestionJob, outcome pipelineOutcome, tenantUUID string) error { + if outcome.status == knowledge.IngestionStatusBlocked || s.vectorStore == nil || len(records) == 0 { return nil } - return s.vectorStore.Upsert(ctx, space, records) + const ( + persistProgressStart = 85 + persistProgressEnd = 95 + persistProgressStep = 2 + defaultBatchSize = 128 + ) + batchSize := defaultBatchSize + if batchSize <= 0 { + batchSize = 128 + } + var lastErr error + for attempt := 0; attempt <= s.maxRetries; attempt++ { + if attempt > 0 && job != nil { + job.RetryCount = attempt + job.Status = knowledge.IngestionStatusRetrying + _, _ = repo.NewIngestionJobRepository(s.db).Update(ctx, job) + } + lastPersistProgress := -1 + emitPersistProgress := func(done, total int) { + if job == nil || total <= 0 { + return + } + if done > total { + done = total + } + progress := persistProgressStart + int(float64(persistProgressEnd-persistProgressStart)*float64(done)/float64(total)+0.5) + if progress < persistProgressStart { + progress = persistProgressStart + } + if progress > persistProgressEnd { + progress = persistProgressEnd + } + if lastPersistProgress >= 0 && progress-lastPersistProgress < persistProgressStep && done < total { + return + } + lastPersistProgress = progress + s.emitProgress(ctx, job, "persist", progress, outcome.totalChunks, outcome.embeddingPct, outcome.maskingPct, strings.TrimSpace(tenantUUID)) + } + emitPersistProgress(0, len(records)) + + done := 0 + for start := 0; start < len(records); start += batchSize { + end := start + batchSize + if end > len(records) { + end = len(records) + } + if err := s.vectorStore.Upsert(ctx, space, records[start:end]); err != nil { + // Space 未激活 dense index:允许入库完成,但标记为 degraded(不写向量)。 + if errors.Is(err, ErrVectorIndexNotActivated) { + return ErrVectorIndexNotActivated + } + lastErr = err + if attempt < s.maxRetries { + time.Sleep(10 * time.Millisecond) + goto retry + } + // Best-effort compensation. + var chunkIDs []uuid.UUID + for _, rec := range records { + chunkIDs = append(chunkIDs, rec.ChunkID) + } + _ = s.vectorStore.DeleteByChunkIDs(ctx, space, chunkIDs) + return err + } + done += end - start + emitPersistProgress(done, len(records)) + } + lastErr = nil + break + retry: + } + + if outcome.degraded { + return ErrIngestionDegraded + } + return lastErr } -func (s *IngestionService) emitMetrics(job *knowledge.IngestionJob) { +func (s *IngestionService) emitMetrics(job *knowledge.IngestionJob, outcome pipelineOutcome) { if job == nil { return } if s.metrics != nil { _ = s.metrics.Store(IngestionSnapshot{ - SpaceID: job.SpaceUUID.String(), - JobID: job.UUID.String(), - ChunkTotal: job.ChunkTotal, - SummaryChunkCount: job.SummaryChunkCount, - ParagraphChunkCount: job.ParagraphChunkCount, - CoveragePct: job.ChunkCoveredPct, - EmbeddingPct: job.EmbeddingSuccessPct, - MaskingPct: job.MaskingCoveragePct, - CompletedAt: job.CompletedAt, + SpaceID: job.SpaceUUID.String(), + JobID: job.UUID.String(), + Status: job.Status, + RetryCount: job.RetryCount, + ChunkTotal: job.ChunkTotal, + SummaryChunkCount: job.SummaryChunkCount, + ParagraphChunkCount: job.ParagraphChunkCount, + CoveragePct: job.ChunkCoveredPct, + EmbeddingPct: job.EmbeddingSuccessPct, + MaskingPct: job.MaskingCoveragePct, + OCRRequired: outcome.ocrRequired, + OCRUsed: outcome.ocrUsed, + OCRCoveragePct: outcome.ocrCoveragePct, + OCRConfidenceBuckets: outcome.ocrConfidenceBuckets, + OCRLatencyMs: outcome.ocrLatencyMs, + OCRPages: outcome.ocrPageCount, + OCRFailedPages: outcome.ocrFailedPages, + OCRBboxCoveragePct: outcome.ocrBboxCoveragePct, + Degraded: outcome.degraded, + ErrorCode: job.ErrorCode, + Reason: job.BlockedReason, + CompletedAt: job.CompletedAt, }) } s.inst.RecordIngestionCoverage(job.ChunkCoveredPct) @@ -229,62 +961,92 @@ func (s *IngestionService) DropSpaceVectors(ctx context.Context, space uuid.UUID return s.vectorStore.DropSpace(ctx, space) } -type chunkBatch struct { - records []vectorstore.VectorRecord - summaryCount int - paragraphCount int - total int - checksum string -} - -func synthesizeChunks(space uuid.UUID) chunkBatch { - summary := 3 - paragraph := 6 - records := make([]vectorstore.VectorRecord, 0, summary+paragraph) - var hashes []byte - for i := 0; i < summary; i++ { - chunkID := uuid.New() - records = append(records, vectorstore.VectorRecord{ - ChunkID: chunkID, - Embedding: fakeEmbedding(i, 32), - Metadata: map[string]any{ - "chunk_kind": "summary", - "space_id": space.String(), - }, - }) - hashes = append(hashes, chunkID[:]...) +type DeleteIngestionJobResult struct { + Deleted bool `json:"deleted"` + DeletedChunks int `json:"deletedChunks"` + DeletedVectors int `json:"deletedVectors"` + DeletedArtifacts bool `json:"deletedArtifacts"` +} + +// DeleteJobPurge removes an ingestion job and best-effort clears derived data: +// - knowledge_chunks rows for the job (when table exists) +// - vector records for those chunks (when vector store is enabled) +// - artifact bundle record +// - local artifact directory (filesystem-backed ArtifactStore) +// +// This is intended for admin-only tooling / UI cleanup. +func (s *IngestionService) DeleteJobPurge(ctx context.Context, spaceID uuid.UUID, jobUUID uuid.UUID) (DeleteIngestionJobResult, error) { + if s == nil || s.db == nil { + return DeleteIngestionJobResult{}, errors.New("service unavailable") } - for i := 0; i < paragraph; i++ { - chunkID := uuid.New() - records = append(records, vectorstore.VectorRecord{ - ChunkID: chunkID, - Embedding: fakeEmbedding(summary+i, 32), - Metadata: map[string]any{ - "chunk_kind": "paragraph", - "space_id": space.String(), - }, - }) - hashes = append(hashes, chunkID[:]...) + if spaceID == uuid.Nil || jobUUID == uuid.Nil { + return DeleteIngestionJobResult{}, ErrInvalidInput + } + + job, err := s.GetJob(ctx, spaceID, jobUUID) + if err != nil { + return DeleteIngestionJobResult{}, err } - sum := sha256.Sum256(hashes) - return chunkBatch{ - records: records, - summaryCount: summary, - paragraphCount: paragraph, - total: len(records), - checksum: hex.EncodeToString(sum[:]), + if job == nil { + return DeleteIngestionJobResult{Deleted: false}, nil } -} -func fakeEmbedding(seed int, dim int) []float32 { - if dim <= 0 { - dim = 32 + out := DeleteIngestionJobResult{Deleted: false} + + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + // 1) Collect chunk IDs (best effort). + var chunkIDs []uuid.UUID + if err := tx.Model(&knowledge.KnowledgeChunk{}). + Select("chunk_uuid"). + Where("space_uuid = ? AND job_uuid = ?", spaceID, jobUUID). + Scan(&chunkIDs).Error; err != nil { + // If the table doesn't exist (index backend not provisioned), keep going. + msg := strings.ToLower(err.Error()) + if !strings.Contains(msg, "does not exist") && !strings.Contains(msg, "relation") { + return err + } + } + + // 2) Delete vectors first (best effort). + if s.vectorStore != nil && len(chunkIDs) > 0 { + if err := s.vectorStore.DeleteByChunkIDs(ctx, spaceID, chunkIDs); err == nil { + out.DeletedVectors = len(chunkIDs) + } + } + + // 3) Delete chunk rows (ignore missing table). + if err := tx.Where("space_uuid = ? AND job_uuid = ?", spaceID, jobUUID).Delete(&knowledge.KnowledgeChunk{}).Error; err != nil { + msg := strings.ToLower(err.Error()) + if !strings.Contains(msg, "does not exist") && !strings.Contains(msg, "relation") { + return err + } + } else { + out.DeletedChunks = len(chunkIDs) + } + + // 4) Delete artifact bundle record (by ingestion_job_id). + _ = tx.Where("ingestion_job_id = ?", job.ID).Delete(&knowledge.ArtifactBundle{}).Error + + // 5) Delete job record. + if err := tx.Where("uuid = ? AND space_uuid = ?", jobUUID, spaceID).Delete(&knowledge.IngestionJob{}).Error; err != nil { + return err + } + + out.Deleted = true + return nil + }) + if err != nil { + return DeleteIngestionJobResult{}, err } - vec := make([]float32, dim) - for i := 0; i < dim; i++ { - vec[i] = float32((seed + i%7)) / 10.0 + + // 6) Remove local artifacts after DB deletion (best-effort). + if out.Deleted && s.artifactStore != nil { + if ok, err := s.artifactStore.DeleteJobArtifacts(spaceID, jobUUID); err == nil { + out.DeletedArtifacts = ok + } } - return vec + + return out, nil } func mustJSON(v any) []byte { @@ -297,3 +1059,756 @@ func mustJSON(v any) []byte { } return buf } + +func sanitizeSeparators(raw []string) []string { + if len(raw) == 0 { + return nil + } + seen := make(map[string]struct{}, len(raw)) + out := make([]string, 0, len(raw)) + for _, s := range raw { + s = strings.TrimSpace(s) + if s == "" { + continue + } + if len([]rune(s)) > 16 { + continue + } + if _, ok := seen[s]; ok { + continue + } + seen[s] = struct{}{} + out = append(out, s) + if len(out) >= 32 { + break + } + } + if len(out) == 0 { + return nil + } + return out +} + +func defaultSeparatorsFor(format string, mode string) []string { + f := strings.ToLower(strings.TrimSpace(format)) + m := strings.ToLower(strings.TrimSpace(mode)) + // 通用分隔符:段落/换行优先,其次中文/英文句末标点,再到分号/冒号与 bullet。 + base := []string{"\n\n", "\n", "。", "!", "?", ".", "!", "?", ";", ";", ":", ":", "•"} + if f == "sql" || m == "code_block" { + return []string{"\n\n", "\n", ";", "}", "。"} + } + if f == "csv" || f == "xlsx" || f == "table" || m == "table_row" { + return nil + } + return base +} + +func normalizeSegmentSizePolicy(policy string, chunkSize int) string { + p := strings.ToLower(strings.TrimSpace(policy)) + if p == "target" || p == "cap" { + return p + } + if chunkSize > 0 { + return "target" + } + return "" +} + +type pipelineInput struct { + space *knowledge.KnowledgeSpace + job *knowledge.IngestionJob + bundle *knowledge.ArtifactBundle + format string + sourceURI string + docUUID string + ingestionProfile string + processorProfile string + ocrRequired bool + maskingProfile string + ragSceneKey string + ragBundleKey string + ragPrimary string + segmentMode string + chunkSize int + chunkOverlap int + segmentSizePolicy string + segmentOrder []string + separators []string + pagePriority bool + anchorHeadingPath bool + anchorClauseID bool + anchorRowNumber bool + anchorSpeaker bool + anchorSentenceIndex bool +} + +type pipelineUnitsInput struct { + space *knowledge.KnowledgeSpace + job *knowledge.IngestionJob + bundle *knowledge.ArtifactBundle + format string + sourceURI string + docUUID string + docUnits []DocumentUnit + maskingProfile string + ocrRequired bool + ragSceneKey string + ragBundleKey string + ragPrimary string + segmentMode string + chunkSize int + chunkOverlap int + segmentSizePolicy string + segmentOrder []string + separators []string + pagePriority bool + anchorHeadingPath bool + anchorClauseID bool + anchorRowNumber bool + anchorSpeaker bool + anchorSentenceIndex bool +} + +type pipelineOutcome struct { + status string + degraded bool + errorCode string + reason string + totalChunks int + summaryCount int + chunkCount int + coveragePct float64 + embeddingPct float64 + embeddingMaxInputTokens int + embeddingProvider string + embeddingModel string + maskingPct float64 + language string + ocrRequired bool + ocrNeeded bool + ocrUsed bool + ocrCoveragePct float64 + ocrConfidenceBuckets map[string]int + ocrLatencyMs int64 + ocrPageCount int + ocrFailedPages int + ocrBboxCoveragePct float64 + // config snapshot (best-effort, for audit/debug) + ragSceneKey string + ragBundleKey string + ragPrimary string + pagePriority bool + segmentOrder []string + segmentSizePolicy string + segmentMode string + chunkSize int + chunkOverlap int + separators []string + chunkAnchors map[string]bool +} + +func (o pipelineOutcome) snapshot(completed time.Time) map[string]any { + return map[string]any{ + "status": o.status, + "degraded": o.degraded, + "error_code": o.errorCode, + "reason": o.reason, + "chunk_total": o.totalChunks, + "summary_chunks": o.summaryCount, + "content_chunks": o.chunkCount, + "coverage_pct": o.coveragePct, + "embedding_pct": o.embeddingPct, + "embedding_max_input_tokens": o.embeddingMaxInputTokens, + "embedding_provider": o.embeddingProvider, + "embedding_model": o.embeddingModel, + "masking_pct": o.maskingPct, + "language": o.language, + "ocr_required": o.ocrRequired, + "ocr_needed": o.ocrNeeded, + "ocr_used": o.ocrUsed, + "ocr_coverage_pct": o.ocrCoveragePct, + "ocr_confidence": o.ocrConfidenceBuckets, + "ocr_latency_ms": o.ocrLatencyMs, + "ocr_pages": o.ocrPageCount, + "ocr_failed_pages": o.ocrFailedPages, + "ocr_bbox_pct": o.ocrBboxCoveragePct, + "rag_scene_key": o.ragSceneKey, + "rag_bundle_key": o.ragBundleKey, + "rag_primary": o.ragPrimary, + "page_priority": o.pagePriority, + "segment_order": o.segmentOrder, + "segment_size_policy": o.segmentSizePolicy, + "segment_mode": o.segmentMode, + "chunk_size": o.chunkSize, + "chunk_overlap": o.chunkOverlap, + "separators": o.separators, + "chunk_anchors": o.chunkAnchors, + "completed": completed, + } +} + +type IngestionChunk struct { + ID uuid.UUID + Kind string + Content string + Metadata map[string]any + Confidence float64 + Masked bool +} + +func (s *IngestionService) runPipeline(ctx context.Context, in pipelineInput) (pipelineOutcome, []IngestionChunk, []vectorstore.VectorRecord, *OCRArtifacts) { + format := strings.ToLower(strings.TrimSpace(in.format)) + sourceURI := strings.TrimSpace(in.sourceURI) + separators := sanitizeSeparators(in.separators) + mode := strings.ToLower(strings.TrimSpace(in.segmentMode)) + sizePolicy := normalizeSegmentSizePolicy(in.segmentSizePolicy, in.chunkSize) + if mode == "" { + mode = "unit" + } + // 当调用方未显式传 separators 且启用了 chunkSize 窗口切分时,给一组“安全默认分隔符”, + // 以便窗口边界尽量对齐句子/换行,避免硬截断。 + if in.chunkSize > 0 && mode != "table_row" && len(separators) == 0 { + separators = defaultSeparatorsFor(format, mode) + } + out := pipelineOutcome{ + status: knowledge.IngestionStatusCompleted, + coveragePct: 100, + embeddingPct: 100, + maskingPct: 100, + ocrRequired: in.ocrRequired, + ocrConfidenceBuckets: map[string]int{"0.0-0.5": 0, "0.5-0.8": 0, "0.8-1.0": 0}, + ragSceneKey: strings.TrimSpace(in.ragSceneKey), + ragBundleKey: strings.TrimSpace(in.ragBundleKey), + ragPrimary: strings.TrimSpace(in.ragPrimary), + pagePriority: in.pagePriority, + segmentOrder: in.segmentOrder, + segmentSizePolicy: sizePolicy, + segmentMode: strings.TrimSpace(in.segmentMode), + chunkSize: in.chunkSize, + chunkOverlap: in.chunkOverlap, + separators: separators, + chunkAnchors: map[string]bool{ + "heading_path": in.anchorHeadingPath, + "clause_id": in.anchorClauseID, + "row_number": in.anchorRowNumber, + "speaker": in.anchorSpeaker, + "sentence_idx": in.anchorSentenceIndex, + }, + } + + needsOCR := in.ocrRequired + if format == "image" { + needsOCR = true + } + if format == "pdf" && strings.Contains(strings.ToLower(sourceURI), "scan") { + needsOCR = true + } + + processor, resolution := s.processors.Resolve(format, needsOCR, in.ocrRequired, in.processorProfile) + out.ocrNeeded = needsOCR + out.ocrUsed = resolution.OCRUsed + if resolution.Decision == ProcessorDecisionBlocked { + out.status = knowledge.IngestionStatusBlocked + out.errorCode = resolution.ErrorCode + out.reason = resolution.Reason + out.coveragePct = 0 + out.embeddingPct = 0 + out.maskingPct = 0 + s.emitProgress(ctx, in.job, "extract", 1, 0, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + return out, nil, nil, nil + } + if resolution.Decision == ProcessorDecisionDegraded { + out.degraded = true + out.errorCode = resolution.ErrorCode + out.reason = resolution.Reason + out.coveragePct = 40 + } + + pagePriority := in.pagePriority && format == "pdf" + res, err := processor.Process(ctx, DocumentProcessInput{ + SpaceID: in.space.UUID.String(), + JobID: in.job.UUID.String(), + Format: format, + SourceURI: sourceURI, + NeedOCR: needsOCR, + OCRAvailable: resolution.OCRAvailable, + PagePriority: pagePriority, + }) + if err != nil { + // PDF 处理器的优先级:如果选择了 pdftotext,但 sourceURI scheme 不支持(例如 s3://、minio://), + // 则回退到 builtin/pdf(合成内容),避免在“可用二进制存在但 URI 不可达”时误判为 degraded。 + if format == "pdf" && !needsOCR && errors.Is(err, ErrUnsupportedSourceURIScheme) { + res, err = (PDFProcessor{}).Process(ctx, DocumentProcessInput{ + SpaceID: in.space.UUID.String(), + JobID: in.job.UUID.String(), + Format: format, + SourceURI: sourceURI, + NeedOCR: needsOCR, + OCRAvailable: resolution.OCRAvailable, + PagePriority: pagePriority, + }) + } + } + if err != nil { + if needsOCR && in.ocrRequired { + out.status = knowledge.IngestionStatusBlocked + out.errorCode = "ocr_failed" + out.reason = err.Error() + out.coveragePct = 0 + out.embeddingPct = 0 + out.maskingPct = 0 + s.emitProgress(ctx, in.job, "extract", 3, 0, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + return out, nil, nil, nil + } + out.degraded = true + if out.errorCode == "" { + out.errorCode = "degraded" + } + if out.reason == "" { + out.reason = "processor_failed" + } + out.coveragePct = 40 + } + docUnits := res.Units + out.ocrCoveragePct = res.OCR.CoveragePct + out.ocrConfidenceBuckets = res.OCR.ConfidenceBuckets + out.ocrLatencyMs = res.OCR.LatencyMs + out.ocrPageCount = res.OCR.PageCount + out.ocrFailedPages = res.OCR.FailedPages + out.ocrBboxCoveragePct = res.OCR.BboxCoveragePct + s.emitProgress(ctx, in.job, "extract", 5, 0, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + + lastChunkProgress := -1 + emitChunkProgress := func(done, total float64) { + if total <= 0 || in.job == nil { + return + } + if done > total { + done = total + } + const chunkProgressStart = 5 + const chunkProgressEnd = 15 + const chunkProgressStep = 2 + progress := chunkProgressStart + int(float64(chunkProgressEnd-chunkProgressStart)*done/total+0.5) + if progress < chunkProgressStart { + progress = chunkProgressStart + } + if progress > chunkProgressEnd { + progress = chunkProgressEnd + } + if lastChunkProgress >= 0 && progress-lastChunkProgress < chunkProgressStep && done < total { + return + } + lastChunkProgress = progress + s.emitProgress(ctx, in.job, "chunk", progress, 0, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + } + chunks := ChunkDocument(in.space.UUID, format, sourceURI, docUnits, ChunkingOptions{ + Mode: in.segmentMode, + SizePolicy: sizePolicy, + PagePriority: in.pagePriority, + DocUUID: in.docUUID, + ChunkSize: in.chunkSize, + ChunkOverlap: in.chunkOverlap, + SegmentOrder: in.segmentOrder, + Separators: separators, + Anchors: ChunkAnchors{ + HeadingPath: in.anchorHeadingPath, + ClauseID: in.anchorClauseID, + RowNumber: in.anchorRowNumber, + Speaker: in.anchorSpeaker, + SentenceIndex: in.anchorSentenceIndex, + }, + }, emitChunkProgress) + if len(chunks) == 0 || !hasContentChunks(chunks) { + chunks = append(chunks, IngestionChunk{ + ID: uuid.NewSHA1(in.space.UUID, []byte("section_summary|placeholder|"+format+"|"+sourceURI)), + Kind: "section_summary", + Content: "Section 1 summary (placeholder)", + Metadata: map[string]any{ + "format": format, + "source_uri": sourceURI, + "provenance": map[string]any{}, + "section": 1, + }, + }) + chunks = append(chunks, IngestionChunk{ + ID: uuid.NewSHA1(in.space.UUID, []byte("chunk|placeholder|"+format+"|"+sourceURI)), + Kind: "chunk", + Content: "content unavailable", + Metadata: map[string]any{ + "format": format, + "source_uri": sourceURI, + "provenance": map[string]any{}, + }, + }) + out.degraded = true + if out.errorCode == "" { + out.errorCode = "degraded" + } + if out.reason == "" { + out.reason = "empty_content" + } + out.coveragePct = 0 + } + + // Masking. + masker := NewMasker(in.maskingProfile) + maskedChunks, maskingPct, maskBlock := masker.Apply(chunks) + out.maskingPct = maskingPct + if maskBlock { + out.status = knowledge.IngestionStatusBlocked + out.errorCode = "masking_required" + out.reason = "masking_blocked" + out.coveragePct = 0 + out.embeddingPct = 0 + s.emitProgress(ctx, in.job, "chunk", 15, len(maskedChunks), out.embeddingPct, out.maskingPct, in.space.TenantUUID) + return out, maskedChunks, nil, res.Artifacts + } + + out.language = detectLanguage(maskedChunks) + summaryCount, contentCount := countChunkKinds(maskedChunks) + out.summaryCount = summaryCount + out.chunkCount = contentCount + out.totalChunks = len(maskedChunks) + s.emitProgress(ctx, in.job, "chunk", 15, out.totalChunks, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + + // Make job linkage explicit in chunk metadata (used by online chunk store + UI/API filtering). + for i := range maskedChunks { + if maskedChunks[i].Metadata == nil { + maskedChunks[i].Metadata = map[string]any{} + } + maskedChunks[i].Metadata["job_uuid"] = in.job.UUID.String() + } + + lastEmbedProgress := -1 + emitEmbedProgress := func(done, total int) { + if total <= 0 || in.job == nil { + return + } + if done > total { + done = total + } + const embedProgressStart = 15 + const embedProgressEnd = 85 + const embedProgressStep = 2 + progress := embedProgressStart + int(float64(embedProgressEnd-embedProgressStart)*float64(done)/float64(total)+0.5) + if progress < embedProgressStart { + progress = embedProgressStart + } + if progress > embedProgressEnd { + progress = embedProgressEnd + } + if lastEmbedProgress >= 0 && progress-lastEmbedProgress < embedProgressStep && done < total { + return + } + lastEmbedProgress = progress + embeddingPct := 100.0 * float64(done) / float64(total) + s.emitProgress(ctx, in.job, "embed", progress, out.totalChunks, embeddingPct, out.maskingPct, in.space.TenantUUID) + } + records, embeddingPct, embedDegraded, embedErrCode, embedReason, embedMaxInput, embedProvider, embedModel := s.buildVectorRecords( + ctx, + in.space, + maskedChunks, + emitEmbedProgress, + ) + out.embeddingPct = embeddingPct + out.embeddingMaxInputTokens = embedMaxInput + out.embeddingProvider = strings.TrimSpace(embedProvider) + out.embeddingModel = strings.TrimSpace(embedModel) + if embedDegraded { + out.degraded = true + if out.errorCode == "" { + out.errorCode = embedErrCode + } + if out.reason == "" { + out.reason = embedReason + } + } + s.emitProgress(ctx, in.job, "embed", 85, out.totalChunks, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + + return out, maskedChunks, records, res.Artifacts +} + +func (s *IngestionService) runPipelineFromUnits(ctx context.Context, in pipelineUnitsInput) (pipelineOutcome, []IngestionChunk, []vectorstore.VectorRecord) { + format := strings.ToLower(strings.TrimSpace(in.format)) + sourceURI := strings.TrimSpace(in.sourceURI) + separators := sanitizeSeparators(in.separators) + mode := strings.ToLower(strings.TrimSpace(in.segmentMode)) + sizePolicy := normalizeSegmentSizePolicy(in.segmentSizePolicy, in.chunkSize) + if mode == "" { + mode = "unit" + } + if in.chunkSize > 0 && mode != "table_row" && len(separators) == 0 { + separators = defaultSeparatorsFor(format, mode) + } + out := pipelineOutcome{ + status: knowledge.IngestionStatusCompleted, + coveragePct: 100, + embeddingPct: 100, + maskingPct: 100, + ocrRequired: in.ocrRequired, + ocrConfidenceBuckets: map[string]int{"0.0-0.5": 0, "0.5-0.8": 0, "0.8-1.0": 0}, + ragSceneKey: strings.TrimSpace(in.ragSceneKey), + ragBundleKey: strings.TrimSpace(in.ragBundleKey), + ragPrimary: strings.TrimSpace(in.ragPrimary), + pagePriority: in.pagePriority, + segmentOrder: in.segmentOrder, + segmentSizePolicy: sizePolicy, + segmentMode: strings.TrimSpace(in.segmentMode), + chunkSize: in.chunkSize, + chunkOverlap: in.chunkOverlap, + separators: separators, + chunkAnchors: map[string]bool{ + "heading_path": in.anchorHeadingPath, + "clause_id": in.anchorClauseID, + "row_number": in.anchorRowNumber, + "speaker": in.anchorSpeaker, + "sentence_idx": in.anchorSentenceIndex, + }, + } + + docUnits := in.docUnits + if len(docUnits) == 0 { + // Keep behavior consistent with runPipeline: empty content yields degraded path. + docUnits = []DocumentUnit{{ + Content: "content unavailable", + Provenance: map[string]any{ + "format": format, + "source_uri": sourceURI, + "reason": "empty_units", + }, + Confidence: 0.2, + }} + out.degraded = true + out.errorCode = "degraded" + out.reason = "empty_content" + out.coveragePct = 0 + } + s.emitProgress(ctx, in.job, "extract", 5, 0, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + + lastChunkProgress := -1 + emitChunkProgress := func(done, total float64) { + if total <= 0 || in.job == nil { + return + } + if done > total { + done = total + } + const chunkProgressStart = 5 + const chunkProgressEnd = 15 + const chunkProgressStep = 2 + progress := chunkProgressStart + int(float64(chunkProgressEnd-chunkProgressStart)*done/total+0.5) + if progress < chunkProgressStart { + progress = chunkProgressStart + } + if progress > chunkProgressEnd { + progress = chunkProgressEnd + } + if lastChunkProgress >= 0 && progress-lastChunkProgress < chunkProgressStep && done < total { + return + } + lastChunkProgress = progress + s.emitProgress(ctx, in.job, "chunk", progress, 0, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + } + chunks := ChunkDocument(in.space.UUID, format, sourceURI, docUnits, ChunkingOptions{ + Mode: in.segmentMode, + SizePolicy: sizePolicy, + PagePriority: in.pagePriority, + DocUUID: in.docUUID, + ChunkSize: in.chunkSize, + ChunkOverlap: in.chunkOverlap, + SegmentOrder: in.segmentOrder, + Separators: separators, + Anchors: ChunkAnchors{ + HeadingPath: in.anchorHeadingPath, + ClauseID: in.anchorClauseID, + RowNumber: in.anchorRowNumber, + Speaker: in.anchorSpeaker, + SentenceIndex: in.anchorSentenceIndex, + }, + }, emitChunkProgress) + if len(chunks) == 0 || !hasContentChunks(chunks) { + chunks = append(chunks, IngestionChunk{ + ID: uuid.NewSHA1(in.space.UUID, []byte("section_summary|placeholder|"+format+"|"+sourceURI)), + Kind: "section_summary", + Content: "Section 1 summary (placeholder)", + Metadata: map[string]any{ + "format": format, + "source_uri": sourceURI, + "provenance": map[string]any{}, + "section": 1, + }, + }) + chunks = append(chunks, IngestionChunk{ + ID: uuid.NewSHA1(in.space.UUID, []byte("chunk|placeholder|"+format+"|"+sourceURI)), + Kind: "chunk", + Content: "content unavailable", + Metadata: map[string]any{ + "format": format, + "source_uri": sourceURI, + "provenance": map[string]any{}, + }, + }) + out.degraded = true + if out.errorCode == "" { + out.errorCode = "degraded" + } + if out.reason == "" { + out.reason = "empty_content" + } + out.coveragePct = 0 + } + + // Masking. + masker := NewMasker(in.maskingProfile) + maskedChunks, maskingPct, maskBlock := masker.Apply(chunks) + out.maskingPct = maskingPct + if maskBlock { + out.status = knowledge.IngestionStatusBlocked + out.errorCode = "masking_required" + out.reason = "masking_blocked" + out.coveragePct = 0 + out.embeddingPct = 0 + s.emitProgress(ctx, in.job, "chunk", 15, len(maskedChunks), out.embeddingPct, out.maskingPct, in.space.TenantUUID) + return out, maskedChunks, nil + } + + out.language = detectLanguage(maskedChunks) + summaryCount, contentCount := countChunkKinds(maskedChunks) + out.summaryCount = summaryCount + out.chunkCount = contentCount + out.totalChunks = len(maskedChunks) + s.emitProgress(ctx, in.job, "chunk", 15, out.totalChunks, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + + for i := range maskedChunks { + if maskedChunks[i].Metadata == nil { + maskedChunks[i].Metadata = map[string]any{} + } + maskedChunks[i].Metadata["job_uuid"] = in.job.UUID.String() + } + + lastEmbedProgress := -1 + emitEmbedProgress := func(done, total int) { + if total <= 0 || in.job == nil { + return + } + if done > total { + done = total + } + const embedProgressStart = 15 + const embedProgressEnd = 85 + const embedProgressStep = 2 + progress := embedProgressStart + int(float64(embedProgressEnd-embedProgressStart)*float64(done)/float64(total)+0.5) + if progress < embedProgressStart { + progress = embedProgressStart + } + if progress > embedProgressEnd { + progress = embedProgressEnd + } + if lastEmbedProgress >= 0 && progress-lastEmbedProgress < embedProgressStep && done < total { + return + } + lastEmbedProgress = progress + embeddingPct := 100.0 * float64(done) / float64(total) + s.emitProgress(ctx, in.job, "embed", progress, out.totalChunks, embeddingPct, out.maskingPct, in.space.TenantUUID) + } + records, embeddingPct, embedDegraded, embedErrCode, embedReason, embedMaxInput, embedProvider, embedModel := s.buildVectorRecords( + ctx, + in.space, + maskedChunks, + emitEmbedProgress, + ) + out.embeddingPct = embeddingPct + out.embeddingMaxInputTokens = embedMaxInput + out.embeddingProvider = strings.TrimSpace(embedProvider) + out.embeddingModel = strings.TrimSpace(embedModel) + if embedDegraded { + out.degraded = true + if out.errorCode == "" { + out.errorCode = embedErrCode + } + if out.reason == "" { + out.reason = embedReason + } + } + s.emitProgress(ctx, in.job, "embed", 85, out.totalChunks, out.embeddingPct, out.maskingPct, in.space.TenantUUID) + + return out, maskedChunks, records +} + +func stableSourceID(sourceURI string) string { + normalized := strings.ToLower(strings.TrimSpace(sourceURI)) + if normalized == "" { + return fmt.Sprintf("src-%s", uuid.NewString()) + } + return "src-" + ContentHash(normalized) +} + +func detectLanguage(chunks []IngestionChunk) string { + if len(chunks) == 0 { + return "unknown" + } + sample := "" + for _, c := range chunks { + if strings.TrimSpace(c.Content) != "" { + sample += " " + c.Content + } + if len(sample) > 4096 { + break + } + } + sample = strings.TrimSpace(sample) + if sample == "" { + return "unknown" + } + + var han, latin, other int + for _, r := range sample { + switch { + case r >= 0x4E00 && r <= 0x9FFF: + han++ + case (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z'): + latin++ + case r <= 0x7F: + // ignore ASCII punctuation/space + default: + other++ + } + } + + total := han + latin + other + if total == 0 { + return "unknown" + } + if han*100/total >= 20 { + if latin*100/total >= 20 { + return "mixed" + } + return "zh" + } + if latin*100/total >= 20 { + return "en" + } + return "unknown" +} + +func countChunkKinds(chunks []IngestionChunk) (summaryCount int, contentCount int) { + for _, c := range chunks { + switch c.Kind { + case "doc_summary", "section_summary": + summaryCount++ + default: + contentCount++ + } + } + return summaryCount, contentCount +} + +func hasContentChunks(chunks []IngestionChunk) bool { + for _, c := range chunks { + if c.Kind != "doc_summary" && c.Kind != "section_summary" { + return true + } + } + return false +} diff --git a/backend/internal/service/knowledge_space/instrumentation/decay_metrics.go b/backend/internal/service/knowledge_space/instrumentation/decay_metrics.go index b7972f19..5182d2d3 100644 --- a/backend/internal/service/knowledge_space/instrumentation/decay_metrics.go +++ b/backend/internal/service/knowledge_space/instrumentation/decay_metrics.go @@ -15,9 +15,23 @@ type DecayMetricsSnapshot struct { FalsePositive int `json:"falsePositive"` Backlog int `json:"backlog"` AverageFillHours float64 `json:"avgFillHours"` + Metrics map[string]any `json:"metrics,omitempty"` RecordedAt time.Time `json:"recordedAt"` } +func (s *DecayMetricsSnapshot) EnsureMetrics() { + if s == nil { + return + } + if s.Metrics == nil { + s.Metrics = make(map[string]any) + } + s.Metrics["knowledge.decay.detected"] = s.Detected + s.Metrics["knowledge.decay.false_positive"] = s.FalsePositive + s.Metrics["knowledge.gap.backlog"] = s.Backlog + s.Metrics["knowledge.decay.fill_time_hours"] = s.AverageFillHours +} + // DecayMetricsWriter persists JSON snapshots and updates aggregates. type DecayMetricsWriter struct { mu sync.Mutex @@ -36,6 +50,7 @@ func (w *DecayMetricsWriter) Store(snapshot DecayMetricsSnapshot) error { if snapshot.RecordedAt.IsZero() { snapshot.RecordedAt = time.Now().UTC() } + snapshot.EnsureMetrics() w.mu.Lock() defer w.mu.Unlock() if err := w.persistJSON(w.path, snapshot); err != nil { diff --git a/backend/internal/service/knowledge_space/instrumentation/event_metrics.go b/backend/internal/service/knowledge_space/instrumentation/event_metrics.go index 0ccf932a..19806120 100644 --- a/backend/internal/service/knowledge_space/instrumentation/event_metrics.go +++ b/backend/internal/service/knowledge_space/instrumentation/event_metrics.go @@ -13,10 +13,13 @@ import ( type EventMetricsSnapshot struct { EventID string `json:"eventId"` EventType string `json:"eventType"` + PolicySeverity string `json:"policySeverity,omitempty"` + Actions []string `json:"actions,omitempty"` LatencyMs int64 `json:"latencyMs"` RetryCount int `json:"retryCount"` IdempotentSkip bool `json:"idempotentSkip"` AgentRefreshOK bool `json:"agentRefreshOk"` + HotUpdateOK bool `json:"hotUpdateOk"` RecordedAt time.Time `json:"recordedAt"` } diff --git a/backend/internal/service/knowledge_space/instrumentation/release_metrics.go b/backend/internal/service/knowledge_space/instrumentation/release_metrics.go index 81ae83f9..d9fdd8e7 100644 --- a/backend/internal/service/knowledge_space/instrumentation/release_metrics.go +++ b/backend/internal/service/knowledge_space/instrumentation/release_metrics.go @@ -14,10 +14,26 @@ type ReleaseMetricsSnapshot struct { GrayState string `json:"grayState"` RollbackCount int `json:"rollbackCount"` TenantCoverage float64 `json:"tenantCoverage"` + VersionDrift int `json:"versionDrift"` Alerts []string `json:"alerts"` + Metrics map[string]any `json:"metrics,omitempty"` RecordedAt time.Time `json:"recordedAt"` } +func (s *ReleaseMetricsSnapshot) EnsureMetrics() { + if s == nil { + return + } + if s.Metrics == nil { + s.Metrics = make(map[string]any) + } + s.Metrics["knowledge.release.gray_state"] = s.GrayState + s.Metrics["knowledge.release.rollback_count"] = s.RollbackCount + s.Metrics["knowledge.release.tenant_coverage"] = s.TenantCoverage + s.Metrics["knowledge.release.version_drift"] = s.VersionDrift + s.Metrics["knowledge.release.alerts"] = append([]string{}, s.Alerts...) +} + // ReleaseMetricsWriter persists release telemetry and aggregates. type ReleaseMetricsWriter struct { mu sync.Mutex @@ -36,6 +52,7 @@ func (w *ReleaseMetricsWriter) Store(snapshot ReleaseMetricsSnapshot) error { if snapshot.RecordedAt.IsZero() { snapshot.RecordedAt = time.Now().UTC() } + snapshot.EnsureMetrics() w.mu.Lock() defer w.mu.Unlock() if err := w.persistJSON(w.path, snapshot); err != nil { diff --git a/backend/internal/service/knowledge_space/masking.go b/backend/internal/service/knowledge_space/masking.go new file mode 100644 index 00000000..df47ba59 --- /dev/null +++ b/backend/internal/service/knowledge_space/masking.go @@ -0,0 +1,36 @@ +package knowledge_space + +import ( + "regexp" + "strings" +) + +type Masker struct { + strict bool +} + +func NewMasker(profile string) Masker { + p := strings.ToLower(strings.TrimSpace(profile)) + return Masker{ + strict: strings.Contains(p, "strict") || strings.Contains(p, "required"), + } +} + +func (m Masker) Apply(chunks []IngestionChunk) ([]IngestionChunk, float64, bool) { + if len(chunks) == 0 { + return nil, 0, false + } + out := make([]IngestionChunk, 0, len(chunks)) + + email := regexp.MustCompile(`[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,}`) + for _, chunk := range chunks { + c := chunk + if m.strict && strings.Contains(strings.ToUpper(c.Content), "UNMASKABLE") { + return out, 0, true + } + c.Content = email.ReplaceAllString(c.Content, "[REDACTED_EMAIL]") + c.Masked = true + out = append(out, c) + } + return out, 100.0, false +} diff --git a/backend/internal/service/knowledge_space/ocr_plan_b_types.go b/backend/internal/service/knowledge_space/ocr_plan_b_types.go new file mode 100644 index 00000000..61e712eb --- /dev/null +++ b/backend/internal/service/knowledge_space/ocr_plan_b_types.go @@ -0,0 +1,27 @@ +package knowledge_space + +import "path/filepath" + +// OCRArtifacts captures local (filesystem) artifacts produced by an OCR processor. +// It is meant to be consumed by ArtifactStore to write into MinIO/S3 (or local artifact staging) +// and should not be persisted directly into DB snapshots. +type OCRArtifacts struct { + RawFormat string // tsv|hocr + Pages []OCRArtifactPage +} + +type OCRArtifactPage struct { + PageNumber int + + // Local paths on the worker filesystem. + ImagePath string + RawPath string + + // Image metadata (used for bbox normalization validation / UI scaling). + Width int + Height int +} + +func (p OCRArtifactPage) ImageExt() string { return filepath.Ext(p.ImagePath) } +func (p OCRArtifactPage) RawExt() string { return filepath.Ext(p.RawPath) } + diff --git a/backend/internal/service/knowledge_space/pathutil.go b/backend/internal/service/knowledge_space/pathutil.go new file mode 100644 index 00000000..aac9e9e0 --- /dev/null +++ b/backend/internal/service/knowledge_space/pathutil.go @@ -0,0 +1,46 @@ +package knowledge_space + +import ( + "os" + "path/filepath" + "strings" +) + +func isTestBinary() bool { + return strings.HasSuffix(os.Args[0], ".test") +} + +func findRepoRoot(start string) string { + dir := filepath.Clean(start) + for { + if dir == "" || dir == string(filepath.Separator) || dir == "." { + return "" + } + if _, err := os.Stat(filepath.Join(dir, ".specify")); err == nil { + return dir + } + if _, err := os.Stat(filepath.Join(dir, ".git")); err == nil { + return dir + } + next := filepath.Dir(dir) + if next == dir { + return "" + } + dir = next + } +} + +func projectTmpDir() string { + if env := strings.TrimSpace(os.Getenv("POWERX_TMP_DIR")); env != "" { + return env + } + wd, err := os.Getwd() + if err != nil { + return "tmp" + } + root := findRepoRoot(wd) + if root == "" { + return filepath.Join(wd, "tmp") + } + return filepath.Join(root, "tmp") +} diff --git a/backend/internal/service/knowledge_space/pdf_ocr_tesseract_processor.go b/backend/internal/service/knowledge_space/pdf_ocr_tesseract_processor.go new file mode 100644 index 00000000..fc4626e1 --- /dev/null +++ b/backend/internal/service/knowledge_space/pdf_ocr_tesseract_processor.go @@ -0,0 +1,814 @@ +package knowledge_space + +import ( + "bufio" + "context" + "errors" + "fmt" + "image" + _ "image/jpeg" + _ "image/png" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// PDFOCRTesseractProcessor implements Plan B: +// PDF → page images → tesseract TSV → paragraph units (may跨页) with bbox provenance. +// +// NOTE: +// - This processor relies on external binaries (`pdftoppm` or `mutool`, and `tesseract`). +// - It is only selected when processorProfile == "builtin/ocr_plan_b". +type PDFOCRTesseractProcessor struct{} + +func (PDFOCRTesseractProcessor) Name() string { return "builtin/pdf_ocr_tesseract" } + +func (PDFOCRTesseractProcessor) Process(ctx context.Context, in DocumentProcessInput) (DocumentProcessResult, error) { + start := time.Now() + if !in.NeedOCR { + return DocumentProcessResult{ + Units: nil, + OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}, + }, nil + } + if !in.OCRAvailable { + return DocumentProcessResult{ + Units: nil, + OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}, + }, ErrOCRUnavailable + } + + src := strings.TrimSpace(in.SourceURI) + if src == "" { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, errors.New("source uri is empty") + } + + workDir, err := os.MkdirTemp("", "powerx-ocr-planb-*") + if err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + // Best-effort cleanup; artifacts will be copied into ArtifactStore later. + defer func() { _ = os.RemoveAll(workDir) }() + + pdfPath := filepath.Join(workDir, "source.pdf") + if err := fetchToFile(ctx, src, pdfPath); err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + if maxBytes := envInt64("POWERX_OCR_MAX_BYTES", 50*1024*1024); maxBytes > 0 { + if st, err := os.Stat(pdfPath); err == nil && st.Size() > maxBytes { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, fmt.Errorf("pdf too large: %d > %d bytes", st.Size(), maxBytes) + } + } + + overallTimeoutSec := envInt("POWERX_OCR_TIMEOUT_SECONDS", 300) + if overallTimeoutSec > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(overallTimeoutSec)*time.Second) + defer cancel() + } + + pagesDir := filepath.Join(workDir, "pages") + rawDir := filepath.Join(workDir, "raw") + if err := os.MkdirAll(pagesDir, 0o755); err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + if err := os.MkdirAll(rawDir, 0o755); err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + + dpi := envInt("POWERX_OCR_DPI", 200) + if dpi < 72 { + dpi = 72 + } + if dpi > 600 { + dpi = 600 + } + if err := renderPDFToPNGs(ctx, pdfPath, pagesDir, dpi); err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + + pageImages, err := listRenderedPages(pagesDir) + if err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + if len(pageImages) == 0 { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, errors.New("no pages rendered") + } + if maxPages := envInt("POWERX_OCR_MAX_PAGES", 80); maxPages > 0 && len(pageImages) > maxPages { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, fmt.Errorf("too many pages: %d > %d", len(pageImages), maxPages) + } + + lang := strings.TrimSpace(os.Getenv("POWERX_OCR_LANG")) + if lang == "" { + // Common default: Chinese + English. + lang = "chi_sim+eng" + } + + type pageResult struct { + pageNum int + imgPath string + rawPath string + width int + height int + paragraphs []ocrParagraph + err error + } + + buckets := defaultConfidenceBuckets() + results := make([]pageResult, len(pageImages)) + for i := range pageImages { + results[i] = pageResult{ + pageNum: i + 1, + imgPath: pageImages[i], + rawPath: filepath.Join(rawDir, fmt.Sprintf("%03d.tsv", i+1)), + } + } + + concurrency := envInt("POWERX_OCR_CONCURRENCY", 2) + if concurrency <= 0 { + concurrency = 1 + } + if concurrency > 8 { + concurrency = 8 + } + if concurrency > len(results) { + concurrency = len(results) + } + retries := envInt("POWERX_OCR_RETRIES", 1) + if retries < 0 { + retries = 0 + } + retryDelayMs := envInt("POWERX_OCR_RETRY_DELAY_MS", 80) + if retryDelayMs < 0 { + retryDelayMs = 0 + } + pageTimeoutSec := envInt("POWERX_OCR_PAGE_TIMEOUT_SECONDS", 60) + if pageTimeoutSec <= 0 { + pageTimeoutSec = 60 + } + + jobs := make(chan int) + done := make(chan struct{}) + defer close(done) + + worker := func() { + for idx := range jobs { + r := &results[idx] + w, h, err := imageSize(r.imgPath) + if err != nil { + r.err = err + continue + } + r.width, r.height = w, h + + var tsv string + var lastErr error + for attempt := 0; attempt <= retries; attempt++ { + pageCtx, cancel := context.WithTimeout(ctx, time.Duration(pageTimeoutSec)*time.Second) + out, err := runTesseractTSV(pageCtx, r.imgPath, lang) + cancel() + if err == nil && strings.TrimSpace(out) != "" { + tsv = out + lastErr = nil + break + } + lastErr = err + if attempt < retries && retryDelayMs > 0 { + select { + case <-time.After(time.Duration(retryDelayMs) * time.Millisecond): + case <-ctx.Done(): + lastErr = ctx.Err() + break + } + } + } + if lastErr != nil { + r.err = lastErr + continue + } + if err := os.WriteFile(r.rawPath, []byte(tsv), 0o644); err != nil { + r.err = err + continue + } + lines := parseTesseractTSV(tsv, r.pageNum, w, h) + r.paragraphs = groupLinesToParagraphs(lines) + } + } + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + go func() { defer wg.Done(); worker() }() + } + go func() { + for i := range results { + select { + case jobs <- i: + case <-ctx.Done(): + return + } + } + close(jobs) + }() + wg.Wait() + + pageParagraphs := make([][]ocrParagraph, 0, len(results)) + pageNumbers := make([]int, 0, len(results)) + artPages := make([]OCRArtifactPage, 0, len(results)) + failedPages := 0 + for _, r := range results { + if r.err != nil { + failedPages++ + continue + } + pageParagraphs = append(pageParagraphs, r.paragraphs) + pageNumbers = append(pageNumbers, r.pageNum) + for _, p := range r.paragraphs { + bucketConfidence(buckets, p.Confidence) + } + artPages = append(artPages, OCRArtifactPage{ + PageNumber: r.pageNum, + ImagePath: r.imgPath, + RawPath: r.rawPath, + Width: r.width, + Height: r.height, + }) + } + + units := make([]DocumentUnit, 0, len(pageParagraphs)) + if in.PagePriority { + for i, paras := range pageParagraphs { + if len(paras) == 0 { + continue + } + pageNum := 0 + if i < len(pageNumbers) { + pageNum = pageNumbers[i] + } + var sb strings.Builder + confSum := 0.0 + confCount := 0 + for _, p := range paras { + txt := strings.TrimSpace(p.Text) + if txt == "" { + continue + } + if sb.Len() > 0 { + sb.WriteString("\n\n") + } + sb.WriteString(txt) + if p.Confidence > 0 { + confSum += p.Confidence + confCount++ + } + } + content := strings.TrimSpace(sb.String()) + if content == "" { + continue + } + confidence := 0.0 + if confCount > 0 { + confidence = confSum / float64(confCount) + } + units = append(units, DocumentUnit{ + Content: content, + Provenance: buildOCRPageProvenance(src, pageNum, paras), + Confidence: confidence, + }) + } + } else { + merged := mergeParagraphsAcrossPages(pageParagraphs) + units = make([]DocumentUnit, 0, len(merged)) + for _, p := range merged { + content := strings.TrimSpace(p.Text) + if content == "" { + continue + } + units = append(units, DocumentUnit{ + Content: content, + Provenance: p.Provenance(src), + Confidence: p.Confidence, + }) + } + } + + coverage := 0.0 + bboxCoverage := 0.0 + if len(pageImages) > 0 { + coverage = float64(len(pageImages)-failedPages) * 100 / float64(len(pageImages)) + } + if len(units) > 0 { + bboxCoverage = coverage + } + if len(units) == 0 && failedPages == len(pageImages) { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: buckets}}, errors.New("ocr failed for all pages") + } + + return DocumentProcessResult{ + Units: units, + OCR: OCRStats{ + CoveragePct: coverage, + ConfidenceBuckets: buckets, + LatencyMs: time.Since(start).Milliseconds(), + PageCount: len(pageImages), + FailedPages: failedPages, + BboxCoveragePct: bboxCoverage, + }, + Artifacts: &OCRArtifacts{ + RawFormat: "tsv", + Pages: artPages, + }, + }, nil +} + +func envInt(key string, def int) int { + raw := strings.TrimSpace(os.Getenv(key)) + if raw == "" { + return def + } + v, err := strconv.Atoi(raw) + if err != nil { + return def + } + return v +} + +func envInt64(key string, def int64) int64 { + raw := strings.TrimSpace(os.Getenv(key)) + if raw == "" { + return def + } + v, err := strconv.ParseInt(raw, 10, 64) + if err != nil { + return def + } + return v +} + +var ErrUnsupportedSourceURIScheme = errors.New("unsupported source uri scheme") + +func fetchToFile(ctx context.Context, sourceURI string, dstPath string) error { + sourceURI = strings.TrimSpace(sourceURI) + if sourceURI == "" { + return errors.New("empty source uri") + } + u, err := url.Parse(sourceURI) + if err != nil { + // treat as local path + return copyFile(sourceURI, dstPath) + } + switch strings.ToLower(u.Scheme) { + case "http", "https": + req, err := http.NewRequestWithContext(ctx, http.MethodGet, sourceURI, nil) + if err != nil { + return err + } + client := &http.Client{Timeout: 5 * time.Minute} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("download failed: %s", resp.Status) + } + if err := os.MkdirAll(filepath.Dir(dstPath), 0o755); err != nil { + return err + } + out, err := os.Create(dstPath) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, resp.Body) + return err + case "file": + return copyFile(u.Path, dstPath) + case "": + return copyFile(sourceURI, dstPath) + default: + // minio://... etc are not supported in this stage. + return fmt.Errorf("%w: %s", ErrUnsupportedSourceURIScheme, u.Scheme) + } +} + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil { + return err + } + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} + +func imageSize(path string) (int, int, error) { + f, err := os.Open(path) + if err != nil { + return 0, 0, err + } + defer f.Close() + cfg, _, err := image.DecodeConfig(f) + if err != nil { + return 0, 0, err + } + return cfg.Width, cfg.Height, nil +} + +func renderPDFToPNGs(ctx context.Context, pdfPath string, outDir string, dpi int) error { + if _, err := exec.LookPath("pdftoppm"); err == nil { + prefix := filepath.Join(outDir, "page") + cmd := exec.CommandContext(ctx, "pdftoppm", "-png", "-r", strconv.Itoa(dpi), pdfPath, prefix) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("pdftoppm failed: %w (%s)", err, string(out)) + } + return nil + } + if _, err := exec.LookPath("mutool"); err == nil { + outPattern := filepath.Join(outDir, "page-%03d.png") + cmd := exec.CommandContext(ctx, "mutool", "draw", "-r", strconv.Itoa(dpi), "-o", outPattern, pdfPath) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("mutool draw failed: %w (%s)", err, string(out)) + } + return nil + } + return errors.New("missing renderer: pdftoppm or mutool") +} + +func listRenderedPages(dir string) ([]string, error) { + // pdftoppm: page-1.png / page-2.png ... + // mutool: page-001.png ... + candidates, err := filepath.Glob(filepath.Join(dir, "page-*.png")) + if err != nil { + return nil, err + } + if len(candidates) == 0 { + candidates, err = filepath.Glob(filepath.Join(dir, "page*.png")) + if err != nil { + return nil, err + } + } + sort.Slice(candidates, func(i, j int) bool { return pageIndex(candidates[i]) < pageIndex(candidates[j]) }) + return candidates, nil +} + +func pageIndex(path string) int { + base := filepath.Base(path) + re := regexp.MustCompile(`(\d+)`) + m := re.FindStringSubmatch(base) + if len(m) < 2 { + return 0 + } + n, _ := strconv.Atoi(m[1]) + return n +} + +func runTesseractTSV(ctx context.Context, imagePath string, lang string) (string, error) { + if _, err := exec.LookPath("tesseract"); err != nil { + return "", errors.New("missing binary: tesseract") + } + args := []string{imagePath, "stdout"} + if strings.TrimSpace(lang) != "" { + args = append(args, "-l", lang) + } + args = append(args, "tsv") + cmd := exec.CommandContext(ctx, "tesseract", args...) + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("tesseract failed: %w (%s)", err, string(out)) + } + return string(out), nil +} + +type ocrLine struct { + Page int + Text string + X1 float64 + Y1 float64 + X2 float64 + Y2 float64 + Conf float64 +} + +func parseTesseractTSV(tsv string, pageNum int, width int, height int) []ocrLine { + widthF := float64(width) + heightF := float64(height) + if widthF <= 0 || heightF <= 0 { + return nil + } + lines := make([]ocrLine, 0, 256) + sc := bufio.NewScanner(strings.NewReader(tsv)) + first := true + for sc.Scan() { + row := sc.Text() + if first { + first = false + // header: level\tpage_num\tblock_num\t... + continue + } + cols := strings.Split(row, "\t") + // tesseract tsv has 12 columns + if len(cols) < 12 { + continue + } + level, _ := strconv.Atoi(cols[0]) + if level != 4 { // line-level + continue + } + left, _ := strconv.Atoi(cols[6]) + top, _ := strconv.Atoi(cols[7]) + w, _ := strconv.Atoi(cols[8]) + h, _ := strconv.Atoi(cols[9]) + conf, _ := strconv.ParseFloat(cols[10], 64) + text := strings.TrimSpace(cols[11]) + if text == "" { + continue + } + + x1 := float64(left) / widthF + y1 := float64(top) / heightF + x2 := float64(left+w) / widthF + y2 := float64(top+h) / heightF + x1, y1 = clamp01Float(x1), clamp01Float(y1) + x2, y2 = clamp01Float(x2), clamp01Float(y2) + if x2 <= x1 || y2 <= y1 { + continue + } + if conf < 0 { + conf = 0 + } + if conf > 100 { + conf = 100 + } + lines = append(lines, ocrLine{ + Page: pageNum, + Text: text, + X1: x1, + Y1: y1, + X2: x2, + Y2: y2, + Conf: conf / 100.0, + }) + } + sort.Slice(lines, func(i, j int) bool { + if lines[i].Y1 == lines[j].Y1 { + return lines[i].X1 < lines[j].X1 + } + return lines[i].Y1 < lines[j].Y1 + }) + return lines +} + +func clamp01Float(v float64) float64 { + if v < 0 { + return 0 + } + if v > 1 { + return 1 + } + return v +} + +type ocrParagraph struct { + Text string + Pages []ocrParagraphPage + Confidence float64 + Y1 float64 + Y2 float64 +} + +type ocrParagraphPage struct { + PageNumber int + Region ocrRegion +} + +type ocrRegion struct { + X1 float64 + Y1 float64 + X2 float64 + Y2 float64 + Confidence float64 +} + +func (p ocrParagraph) Provenance(sourceURI string) map[string]any { + pages := make([]any, 0, len(p.Pages)) + for _, pg := range p.Pages { + pages = append(pages, map[string]any{ + "page_number": pg.PageNumber, + "regions": []any{ + map[string]any{ + "x1": pg.Region.X1, + "y1": pg.Region.Y1, + "x2": pg.Region.X2, + "y2": pg.Region.Y2, + "confidence": pg.Region.Confidence, + }, + }, + }) + } + return map[string]any{ + "source_uri": strings.TrimSpace(sourceURI), + "pages": pages, + } +} + +func buildOCRPageProvenance(sourceURI string, pageNum int, paras []ocrParagraph) map[string]any { + regions := make([]any, 0, len(paras)) + for _, p := range paras { + for _, pg := range p.Pages { + if pageNum > 0 && pg.PageNumber != pageNum { + continue + } + regions = append(regions, map[string]any{ + "x1": pg.Region.X1, + "y1": pg.Region.Y1, + "x2": pg.Region.X2, + "y2": pg.Region.Y2, + "confidence": pg.Region.Confidence, + }) + } + } + page := map[string]any{ + "page_number": pageNum, + } + if len(regions) > 0 { + page["regions"] = regions + } + return map[string]any{ + "source_uri": strings.TrimSpace(sourceURI), + "page": pageNum, + "pages": []any{page}, + } +} + +var clauseStartRe = regexp.MustCompile(`^(\\d+(?:\\.\\d+)*|第[一二三四五六七八九十百千]+条|\\([0-9]+\\)|\\([0-9]+\\))`) + +func groupLinesToParagraphs(lines []ocrLine) []ocrParagraph { + if len(lines) == 0 { + return nil + } + out := make([]ocrParagraph, 0, 64) + var cur []ocrLine + flush := func() { + if len(cur) == 0 { + return + } + p := paragraphFromLines(cur) + if strings.TrimSpace(p.Text) != "" { + out = append(out, p) + } + cur = nil + } + prevY2 := 0.0 + for i, ln := range lines { + if i == 0 { + cur = append(cur, ln) + prevY2 = ln.Y2 + continue + } + gap := ln.Y1 - prevY2 + newPara := gap > 0.03 + if !newPara && clauseStartRe.MatchString(strings.TrimSpace(ln.Text)) && len(cur) > 0 { + // clause boundary is a good paragraph boundary for scanned legal docs. + newPara = true + } + if newPara { + flush() + } + cur = append(cur, ln) + if ln.Y2 > prevY2 { + prevY2 = ln.Y2 + } + } + flush() + return out +} + +func paragraphFromLines(lines []ocrLine) ocrParagraph { + if len(lines) == 0 { + return ocrParagraph{} + } + page := lines[0].Page + minX1, minY1 := 1.0, 1.0 + maxX2, maxY2 := 0.0, 0.0 + sumConf := 0.0 + texts := make([]string, 0, len(lines)) + for _, ln := range lines { + texts = append(texts, strings.TrimSpace(ln.Text)) + if ln.X1 < minX1 { + minX1 = ln.X1 + } + if ln.Y1 < minY1 { + minY1 = ln.Y1 + } + if ln.X2 > maxX2 { + maxX2 = ln.X2 + } + if ln.Y2 > maxY2 { + maxY2 = ln.Y2 + } + sumConf += ln.Conf + } + conf := sumConf / float64(len(lines)) + txt := strings.Join(texts, "\n") + return ocrParagraph{ + Text: txt, + Pages: []ocrParagraphPage{{ + PageNumber: page, + Region: ocrRegion{ + X1: clamp01Float(minX1), + Y1: clamp01Float(minY1), + X2: clamp01Float(maxX2), + Y2: clamp01Float(maxY2), + Confidence: conf, + }, + }}, + Confidence: conf, + Y1: minY1, + Y2: maxY2, + } +} + +func mergeParagraphsAcrossPages(pages [][]ocrParagraph) []ocrParagraph { + if len(pages) == 0 { + return nil + } + out := make([]ocrParagraph, 0, 256) + var prev *ocrParagraph + for pageIdx := 0; pageIdx < len(pages); pageIdx++ { + for paraIdx := 0; paraIdx < len(pages[pageIdx]); paraIdx++ { + cur := pages[pageIdx][paraIdx] + if prev != nil && shouldMergeAcrossPages(*prev, cur) { + prev.Text = strings.TrimSpace(prev.Text) + "\n" + strings.TrimSpace(cur.Text) + prev.Pages = append(prev.Pages, cur.Pages...) + prev.Confidence = (prev.Confidence + cur.Confidence) / 2 + prev.Y2 = cur.Y2 + continue + } + out = append(out, cur) + prev = &out[len(out)-1] + } + } + return out +} + +func shouldMergeAcrossPages(prev ocrParagraph, next ocrParagraph) bool { + if len(prev.Pages) == 0 || len(next.Pages) == 0 { + return false + } + prevPage := prev.Pages[len(prev.Pages)-1].PageNumber + nextPage := next.Pages[0].PageNumber + if nextPage != prevPage+1 { + return false + } + // Only consider boundary regions. + if prev.Y2 < 0.85 || next.Y1 > 0.15 { + return false + } + prevText := strings.TrimSpace(prev.Text) + nextText := strings.TrimSpace(next.Text) + if prevText == "" || nextText == "" { + return false + } + if endsWithStrongPunct(prevText) { + return false + } + if clauseStartRe.MatchString(nextText) { + return false + } + return true +} + +func endsWithStrongPunct(s string) bool { + s = strings.TrimSpace(s) + if s == "" { + return true + } + r := []rune(s) + last := r[len(r)-1] + switch last { + case '。', '!', '?', '!', '?', '.', ';', ';', ':', ':': + return true + default: + return false + } +} diff --git a/backend/internal/service/knowledge_space/pdf_ocr_tesseract_processor_test.go b/backend/internal/service/knowledge_space/pdf_ocr_tesseract_processor_test.go new file mode 100644 index 00000000..5434ce32 --- /dev/null +++ b/backend/internal/service/knowledge_space/pdf_ocr_tesseract_processor_test.go @@ -0,0 +1,65 @@ +package knowledge_space + +import "testing" + +func TestParseTesseractTSV_NormalizesBBox(t *testing.T) { + tsv := "level\tpage_num\tblock_num\tpar_num\tline_num\tword_num\tleft\ttop\twidth\theight\tconf\ttext\n" + + "4\t1\t1\t1\t1\t0\t10\t20\t30\t40\t90\tHello world\n" + lines := parseTesseractTSV(tsv, 1, 100, 200) + if len(lines) != 1 { + t.Fatalf("expected 1 line, got %d", len(lines)) + } + ln := lines[0] + if ln.Page != 1 { + t.Fatalf("expected page=1, got %d", ln.Page) + } + if ln.Text != "Hello world" { + t.Fatalf("expected text, got %q", ln.Text) + } + if ln.X1 != 0.10 || ln.Y1 != 0.10 || ln.X2 != 0.40 || ln.Y2 != 0.30 { + t.Fatalf("unexpected bbox: %+v", ln) + } + if ln.Conf < 0.89 || ln.Conf > 0.91 { + t.Fatalf("unexpected conf: %v", ln.Conf) + } +} + +func TestGroupLinesToParagraphs_SplitsByGap(t *testing.T) { + lines := []ocrLine{ + {Page: 1, Text: "第一行", X1: 0.1, Y1: 0.10, X2: 0.9, Y2: 0.12, Conf: 0.9}, + {Page: 1, Text: "第二行", X1: 0.1, Y1: 0.13, X2: 0.9, Y2: 0.15, Conf: 0.9}, + {Page: 1, Text: "第三段", X1: 0.1, Y1: 0.25, X2: 0.9, Y2: 0.27, Conf: 0.9}, + } + ps := groupLinesToParagraphs(lines) + if len(ps) != 2 { + t.Fatalf("expected 2 paragraphs, got %d", len(ps)) + } + if ps[0].Pages[0].PageNumber != 1 || ps[1].Pages[0].PageNumber != 1 { + t.Fatalf("expected page number kept") + } +} + +func TestMergeParagraphsAcrossPages_MergesContinuation(t *testing.T) { + p1 := ocrParagraph{ + Text: "这是跨页段落的前半部分", + Pages: []ocrParagraphPage{{PageNumber: 1, Region: ocrRegion{X1: 0.1, Y1: 0.8, X2: 0.9, Y2: 0.95, Confidence: 0.9}}}, + Confidence: 0.9, + Y1: 0.8, + Y2: 0.95, + } + p2 := ocrParagraph{ + Text: "继续的后半部分", + Pages: []ocrParagraphPage{{PageNumber: 2, Region: ocrRegion{X1: 0.1, Y1: 0.02, X2: 0.9, Y2: 0.10, Confidence: 0.9}}}, + Confidence: 0.9, + Y1: 0.02, + Y2: 0.10, + } + merged := mergeParagraphsAcrossPages([][]ocrParagraph{{p1}, {p2}}) + if len(merged) != 1 { + t.Fatalf("expected merge into 1 paragraph, got %d", len(merged)) + } + if len(merged[0].Pages) != 2 { + t.Fatalf("expected 2 pages provenance, got %d", len(merged[0].Pages)) + } +} + diff --git a/backend/internal/service/knowledge_space/pdf_text_pdftotext_processor.go b/backend/internal/service/knowledge_space/pdf_text_pdftotext_processor.go new file mode 100644 index 00000000..de590396 --- /dev/null +++ b/backend/internal/service/knowledge_space/pdf_text_pdftotext_processor.go @@ -0,0 +1,121 @@ +package knowledge_space + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +// PDFTextPdftotextProcessor extracts embedded text from PDFs using `pdftotext`. +// It is intended for "normal" PDFs (non-scanned). For scanned PDFs, use OCR Plan B. +// +// Requirements: +// - `pdftotext` installed (poppler-utils). +// +// Output: +// - DocumentUnit per page (best-effort), with provenance `{ "page": }`. +type PDFTextPdftotextProcessor struct{} + +func (PDFTextPdftotextProcessor) Name() string { return "builtin/pdf_text" } + +func (PDFTextPdftotextProcessor) Process(ctx context.Context, in DocumentProcessInput) (DocumentProcessResult, error) { + start := time.Now() + src := strings.TrimSpace(in.SourceURI) + if src == "" { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, errors.New("source uri is empty") + } + + if _, err := exec.LookPath("pdftotext"); err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, fmt.Errorf("pdftotext not found: %w", err) + } + + workDir, err := os.MkdirTemp("", "powerx-pdftext-*") + if err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + defer func() { _ = os.RemoveAll(workDir) }() + + pdfPath := filepath.Join(workDir, "source.pdf") + if err := fetchToFile(ctx, src, pdfPath); err != nil { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + if maxBytes := envInt64("POWERX_PDF_TEXT_MAX_BYTES", 50*1024*1024); maxBytes > 0 { + if st, err := os.Stat(pdfPath); err == nil && st.Size() > maxBytes { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, fmt.Errorf("pdf too large: %d > %d bytes", st.Size(), maxBytes) + } + } + + timeoutSec := envInt("POWERX_PDF_TEXT_TIMEOUT_SECONDS", 30) + if timeoutSec > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSec)*time.Second) + defer cancel() + } + + // -layout: keep reading order as much as possible + // Keep default page breaks so we can split by formfeed (\f). + cmd := exec.CommandContext(ctx, "pdftotext", "-layout", pdfPath, "-") + out, err := cmd.Output() + if err != nil { + // include stderr if possible + var ee *exec.ExitError + if errors.As(err, &ee) && len(ee.Stderr) > 0 { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, fmt.Errorf("pdftotext failed: %s", strings.TrimSpace(string(ee.Stderr))) + } + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, err + } + + raw := strings.TrimSpace(string(out)) + if raw == "" { + // Not an error: likely scanned PDF without embedded text. + return DocumentProcessResult{ + Units: nil, + OCR: OCRStats{ + CoveragePct: 0, + ConfidenceBuckets: defaultConfidenceBuckets(), + LatencyMs: time.Since(start).Milliseconds(), + }, + }, nil + } + + // pdftotext uses formfeed as page delimiter. + pages := strings.Split(raw, "\f") + units := make([]DocumentUnit, 0, len(pages)) + for i, p := range pages { + txt := strings.TrimSpace(p) + if txt == "" { + continue + } + units = append(units, DocumentUnit{ + Content: txt, + Provenance: map[string]any{ + "page": i + 1, + }, + }) + } + if len(units) == 0 { + return DocumentProcessResult{ + Units: nil, + OCR: OCRStats{ + CoveragePct: 0, + ConfidenceBuckets: defaultConfidenceBuckets(), + LatencyMs: time.Since(start).Milliseconds(), + }, + }, nil + } + + return DocumentProcessResult{ + Units: units, + OCR: OCRStats{ + CoveragePct: 0, + ConfidenceBuckets: defaultConfidenceBuckets(), + LatencyMs: time.Since(start).Milliseconds(), + PageCount: len(units), + }, + }, nil +} diff --git a/backend/internal/service/knowledge_space/processor_registry.go b/backend/internal/service/knowledge_space/processor_registry.go new file mode 100644 index 00000000..9d3e0514 --- /dev/null +++ b/backend/internal/service/knowledge_space/processor_registry.go @@ -0,0 +1,220 @@ +package knowledge_space + +import ( + "context" + "os" + "os/exec" + "strings" +) + +const ( + ProcessorDecisionOK = "ok" + ProcessorDecisionDegraded = "degraded" + ProcessorDecisionBlocked = "blocked" +) + +type ProcessorResolution struct { + Decision string + Reason string + ErrorCode string + OCRAvailable bool + OCRUsed bool +} + +type OCRStats struct { + CoveragePct float64 + ConfidenceBuckets map[string]int + LatencyMs int64 + PageCount int + FailedPages int + BboxCoveragePct float64 +} + +type DocumentUnit struct { + Content string + Provenance map[string]any + Confidence float64 +} + +type DocumentProcessInput struct { + SpaceID string + JobID string + Format string + SourceURI string + NeedOCR bool + OCRAvailable bool + PagePriority bool +} + +type DocumentProcessResult struct { + Units []DocumentUnit + OCR OCRStats + Artifacts *OCRArtifacts +} + +type DocumentProcessor interface { + Name() string + Process(ctx context.Context, in DocumentProcessInput) (DocumentProcessResult, error) +} + +type ProcessorRegistry struct { + processors map[string]DocumentProcessor + ocrAvailable bool + pdfTextAvailable bool + profiles map[string]bool +} + +func NewProcessorRegistry() *ProcessorRegistry { + reg := &ProcessorRegistry{ + processors: make(map[string]DocumentProcessor), + profiles: make(map[string]bool), + } + reg.RegisterProcessor(TextProcessor{}) + reg.RegisterProcessor(TableProcessor{}) + reg.RegisterProcessor(PDFProcessor{}) + reg.RegisterProcessor(PDFOCRTesseractProcessor{}) + reg.RegisterProcessor(PDFTextPdftotextProcessor{}) + reg.profiles["builtin/default"] = true + + // Auto-detect optional external binaries. + // - OCR Plan B requires a PDF renderer + tesseract. + // - PDF text extraction requires `pdftotext`. + // Env overrides: + // - POWERX_OCR_AVAILABLE=1/0 + // - POWERX_PDF_TEXT_AVAILABLE=1/0 + if v := strings.TrimSpace(os.Getenv("POWERX_OCR_AVAILABLE")); v != "" { + reg.SetOCRAvailable(v == "1" || strings.EqualFold(v, "true")) + } else { + _, tesseractErr := exec.LookPath("tesseract") + _, pdftoppmErr := exec.LookPath("pdftoppm") + _, mutoolErr := exec.LookPath("mutool") + hasRenderer := pdftoppmErr == nil || mutoolErr == nil + reg.SetOCRAvailable(tesseractErr == nil && hasRenderer) + } + if v := strings.TrimSpace(os.Getenv("POWERX_PDF_TEXT_AVAILABLE")); v != "" { + reg.SetPDFTextAvailable(v == "1" || strings.EqualFold(v, "true")) + } else { + _, pdftotextErr := exec.LookPath("pdftotext") + reg.SetPDFTextAvailable(pdftotextErr == nil) + } + + return reg +} + +func (r *ProcessorRegistry) RegisterProcessor(p DocumentProcessor) { + if r == nil || p == nil { + return + } + name := strings.TrimSpace(p.Name()) + if name == "" { + return + } + r.processors[name] = p +} + +func (r *ProcessorRegistry) SetOCRAvailable(available bool) { + if r == nil { + return + } + r.ocrAvailable = available + if available { + r.profiles["builtin/ocr"] = true + r.profiles["builtin/ocr_plan_b"] = true + return + } + delete(r.profiles, "builtin/ocr") + delete(r.profiles, "builtin/ocr_plan_b") +} + +func (r *ProcessorRegistry) SetPDFTextAvailable(available bool) { + if r == nil { + return + } + r.pdfTextAvailable = available + if available { + r.profiles["builtin/pdf_text"] = true + return + } + delete(r.profiles, "builtin/pdf_text") +} + +func (r *ProcessorRegistry) SupportsProfile(profile string) bool { + if r == nil { + return false + } + p := strings.TrimSpace(profile) + if p == "" { + return true + } + return r.profiles[p] +} + +func (r *ProcessorRegistry) Resolve(format string, needOCR bool, ocrRequired bool, processorProfile string) (DocumentProcessor, ProcessorResolution) { + res := ProcessorResolution{ + Decision: ProcessorDecisionOK, + OCRAvailable: r != nil && r.ocrAvailable, + OCRUsed: needOCR && r != nil && r.ocrAvailable, + } + + if r == nil { + res.Decision = ProcessorDecisionDegraded + res.ErrorCode = "degraded" + res.Reason = "processor_registry_unavailable" + return TextProcessor{}, res + } + + if strings.TrimSpace(processorProfile) != "" && !r.SupportsProfile(processorProfile) { + res.Decision = ProcessorDecisionDegraded + res.ErrorCode = "degraded" + res.Reason = "processor_profile_unavailable" + } + + trimmed := strings.ToLower(strings.TrimSpace(format)) + var selected DocumentProcessor + switch trimmed { + case "csv", "xlsx", "table": + selected = TableProcessor{} + case "pdf": + profile := strings.TrimSpace(processorProfile) + switch { + case needOCR && res.OCRAvailable && profile == "builtin/ocr_plan_b": + selected = PDFOCRTesseractProcessor{} + case profile == "builtin/pdf_text": + if r != nil && r.pdfTextAvailable { + selected = PDFTextPdftotextProcessor{} + } else { + res.Decision = ProcessorDecisionDegraded + res.ErrorCode = "degraded" + res.Reason = "pdf_text_processor_unavailable" + selected = PDFProcessor{} + } + default: + // Prefer text extraction for normal PDFs; fallback to synthetic builtin/pdf when unavailable. + if r != nil && r.pdfTextAvailable { + selected = PDFTextPdftotextProcessor{} + } else { + selected = PDFProcessor{} + } + } + default: + selected = TextProcessor{} + } + + if needOCR && !res.OCRAvailable { + if ocrRequired { + res.Decision = ProcessorDecisionBlocked + res.ErrorCode = "ocr_required" + res.Reason = "ocr_processor_unavailable" + } else { + res.Decision = ProcessorDecisionDegraded + if res.ErrorCode == "" { + res.ErrorCode = "degraded" + } + if res.Reason == "" { + res.Reason = "ocr_unavailable" + } + } + } + + return selected, res +} diff --git a/backend/internal/service/knowledge_space/processors_builtin.go b/backend/internal/service/knowledge_space/processors_builtin.go new file mode 100644 index 00000000..f858dd41 --- /dev/null +++ b/backend/internal/service/knowledge_space/processors_builtin.go @@ -0,0 +1,161 @@ +package knowledge_space + +import ( + "context" + "errors" + "hash/crc32" + "regexp" + "strings" +) + +type TextProcessor struct{} + +func (TextProcessor) Name() string { return "builtin/text" } + +func (TextProcessor) Process(_ context.Context, in DocumentProcessInput) (DocumentProcessResult, error) { + format := strings.ToLower(strings.TrimSpace(in.Format)) + src := strings.TrimSpace(in.SourceURI) + content := syntheticContentFor(format, src) + return DocumentProcessResult{ + Units: []DocumentUnit{{ + Content: content, + Provenance: map[string]any{ + "line_range": "1:200", + }, + }}, + OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}, + }, nil +} + +type TableProcessor struct{} + +func (TableProcessor) Name() string { return "builtin/table" } + +func (TableProcessor) Process(_ context.Context, in DocumentProcessInput) (DocumentProcessResult, error) { + src := strings.TrimSpace(in.SourceURI) + rows := 5 + units := make([]DocumentUnit, 0, rows) + for i := 0; i < rows; i++ { + units = append(units, DocumentUnit{ + Content: syntheticContentFor("row", src) + " row=" + strconvItoa(i+1), + Provenance: map[string]any{ + "sheet": "Sheet1", + "row": i + 1, + }, + }) + } + return DocumentProcessResult{ + Units: units, + OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}, + }, nil +} + +type PDFProcessor struct{} + +func (PDFProcessor) Name() string { return "builtin/pdf" } + +func (PDFProcessor) Process(_ context.Context, in DocumentProcessInput) (DocumentProcessResult, error) { + src := strings.TrimSpace(in.SourceURI) + if in.NeedOCR && !in.OCRAvailable { + return DocumentProcessResult{OCR: OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()}}, nil + } + pages := 3 + units := make([]DocumentUnit, 0, pages) + stats := OCRStats{CoveragePct: 0, ConfidenceBuckets: defaultConfidenceBuckets()} + for i := 0; i < pages; i++ { + conf := 0.0 + text := syntheticContentFor("pdf", src) + " page=" + strconvItoa(i+1) + if in.NeedOCR && in.OCRAvailable { + conf = syntheticConfidence(src, i) + text = "OCR " + text + stats.CoveragePct = 100 + bucketConfidence(stats.ConfidenceBuckets, conf) + } + units = append(units, DocumentUnit{ + Content: text, + Provenance: map[string]any{ + "page": i + 1, + }, + Confidence: conf, + }) + } + stats.PageCount = pages + return DocumentProcessResult{Units: units, OCR: stats}, nil +} + +var ErrOCRUnavailable = errors.New("ocr unavailable") + +func defaultConfidenceBuckets() map[string]int { + return map[string]int{"0.0-0.5": 0, "0.5-0.8": 0, "0.8-1.0": 0} +} + +func bucketConfidence(buckets map[string]int, conf float64) { + if buckets == nil { + return + } + switch { + case conf < 0.5: + buckets["0.0-0.5"]++ + case conf < 0.8: + buckets["0.5-0.8"]++ + default: + buckets["0.8-1.0"]++ + } +} + +func syntheticConfidence(seed string, idx int) float64 { + sum := crc32.ChecksumIEEE([]byte(seed + ":" + strconvItoa(idx))) + // Map to [0.6, 0.99] to simulate OCR gating. + base := 0.6 + float64(sum%39)/100.0 + if base > 0.99 { + return 0.99 + } + return base +} + +func syntheticContentFor(format, source string) string { + normalized := strings.ToLower(strings.TrimSpace(format)) + src := strings.TrimSpace(source) + content := "source=" + src + " format=" + normalized + " " + + // Make masking tests deterministic. + if strings.Contains(strings.ToLower(src), "unmaskable") { + content += "UNMASKABLE " + } + + switch normalized { + case "html": + content += "

Title

Body

" + re := regexp.MustCompile(`<[^>]+>`) + content = re.ReplaceAllString(content, " ") + case "sql": + content += "SELECT * FROM users WHERE email='user@example.com';" + case "markdown": + content += "# Title\n\nBody content with email user@example.com" + default: + content += "Body content" + } + return strings.TrimSpace(content) +} + +func strconvItoa(v int) string { + if v == 0 { + return "0" + } + neg := v < 0 + if neg { + v = -v + } + var buf [32]byte + i := len(buf) + for v > 0 { + i-- + buf[i] = byte('0' + v%10) + v /= 10 + } + if neg { + i-- + buf[i] = '-' + } + return string(buf[i:]) +} diff --git a/backend/internal/service/knowledge_space/progress.go b/backend/internal/service/knowledge_space/progress.go new file mode 100644 index 00000000..072ad3d3 --- /dev/null +++ b/backend/internal/service/knowledge_space/progress.go @@ -0,0 +1,34 @@ +package knowledge_space + +import ( + "context" + "time" +) + +// IngestionProgressUpdate describes a progress snapshot for a job. +type IngestionProgressUpdate struct { + TenantUUID string `json:"tenant_uuid"` + SpaceUUID string `json:"space_uuid"` + JobUUID string `json:"job_uuid"` + Status string `json:"status"` + Stage string `json:"stage"` + Progress int `json:"progress"` + ChunkTotal int `json:"chunk_total,omitempty"` + EmbeddingPct float64 `json:"embedding_pct,omitempty"` + MaskingPct float64 `json:"masking_pct,omitempty"` + UpdatedAt time.Time `json:"updated_at"` +} + +// IngestionProgressPublisher provides a hook to publish progress updates. +type IngestionProgressPublisher interface { + PublishIngestionProgress(ctx context.Context, update IngestionProgressUpdate) +} + +// IngestionProgressPublisherFunc adapts a function into a publisher. +type IngestionProgressPublisherFunc func(ctx context.Context, update IngestionProgressUpdate) + +func (f IngestionProgressPublisherFunc) PublishIngestionProgress(ctx context.Context, update IngestionProgressUpdate) { + if f != nil { + f(ctx, update) + } +} diff --git a/backend/internal/service/knowledge_space/provisioning.go b/backend/internal/service/knowledge_space/provisioning.go index 824984bc..26ac29c0 100644 --- a/backend/internal/service/knowledge_space/provisioning.go +++ b/backend/internal/service/knowledge_space/provisioning.go @@ -22,6 +22,9 @@ func (s *Service) CreateSpace(ctx context.Context, in CreateSpaceInput) (*models if err != nil { return nil, err } + if err := s.ensureTenantEmbeddingConfigured(ctx, tenantUUID); err != nil { + return nil, err + } release, err := s.acquireTenantLock(ctx, tenantUUID) if err != nil { @@ -67,6 +70,9 @@ func (s *Service) CreateSpace(ctx context.Context, in CreateSpaceInput) (*models QuotaCPU: in.QuotaCPU, QuotaStorageGB: in.QuotaStorageGB, PolicyTemplateVersionID: in.PolicyVersion, + IngestionProfileKey: normalizeProfileKey(in.IngestionProfileKey), + IndexProfileKey: normalizeProfileKey(in.IndexProfileKey), + RAGProfileKey: normalizeProfileKey(in.RAGProfileKey), FeatureFlags: datatypes.JSON(rawFlags), AuditToken: "ks-" + uuid.NewString(), CreatedBy: in.RequestedBy, @@ -117,6 +123,29 @@ func (s *Service) CreateSpace(ctx context.Context, in CreateSpaceInput) (*models return created, nil } +func normalizeProfileKey(key string) string { + key = strings.ToLower(strings.TrimSpace(key)) + if key == "" { + return "default" + } + // Keep it URL-safe and storage-friendly. + out := make([]rune, 0, len(key)) + for _, r := range key { + switch { + case r >= 'a' && r <= 'z': + out = append(out, r) + case r >= '0' && r <= '9': + out = append(out, r) + case r == '-' || r == '_': + out = append(out, r) + } + } + if len(out) == 0 { + return "default" + } + return string(out) +} + // UpdateSpace mutates quotas/features/status. func (s *Service) UpdateSpace(ctx context.Context, in UpdateSpaceInput) (*models.KnowledgeSpace, error) { if in.SpaceID == uuid.Nil { @@ -145,6 +174,29 @@ func (s *Service) UpdateSpace(ctx context.Context, in UpdateSpaceInput) (*models quotaStorageChanged = true } policyChanged := false + profileChanged := false + if strings.TrimSpace(in.IngestionProfileKey) != "" { + val := normalizeProfileKey(in.IngestionProfileKey) + if val != space.IngestionProfileKey { + space.IngestionProfileKey = val + profileChanged = true + } + } + if strings.TrimSpace(in.IndexProfileKey) != "" { + val := normalizeProfileKey(in.IndexProfileKey) + if val != space.IndexProfileKey { + space.IndexProfileKey = val + profileChanged = true + } + } + if strings.TrimSpace(in.RAGProfileKey) != "" { + val := normalizeProfileKey(in.RAGProfileKey) + if val != space.RAGProfileKey { + space.RAGProfileKey = val + profileChanged = true + } + } + if in.PolicyVersion > 0 && in.PolicyVersion != space.PolicyTemplateVersionID { tpl, err := policies.GetByID(ctx, in.PolicyVersion) if err != nil { @@ -168,10 +220,17 @@ func (s *Service) UpdateSpace(ctx context.Context, in UpdateSpaceInput) (*models } statusChanged := false if strings.TrimSpace(in.Status) != "" && strings.TrimSpace(in.Status) != space.Status { - if !isValidTransition(space.Status, in.Status) { + nextStatus := strings.TrimSpace(in.Status) + if !isValidTransition(space.Status, nextStatus) { return ErrInvalidStatusTransition } - space.Status = strings.TrimSpace(in.Status) + if nextStatus == models.KnowledgeSpaceStatusActive { + sceneKey, bundleKey := inferSceneAndBundle(space) + if err := s.EnforceStrategyPrereqsOnActivate(sceneKey, bundleKey); err != nil { + return err + } + } + space.Status = nextStatus statusChanged = true } if in.UpdatedBy != "" { @@ -187,6 +246,11 @@ func (s *Service) UpdateSpace(ctx context.Context, in UpdateSpaceInput) (*models if policyChanged { updates["policy_template_version_id"] = space.PolicyTemplateVersionID } + if profileChanged { + updates["ingestion_profile_key"] = space.IngestionProfileKey + updates["index_profile_key"] = space.IndexProfileKey + updates["rag_profile_key"] = space.RAGProfileKey + } if featureChanged { updates["feature_flags"] = space.FeatureFlags } @@ -268,6 +332,7 @@ func (s *Service) RetireSpace(ctx context.Context, in RetireSpaceInput) (*models "reason": in.Reason, "retire_at": now, "retention_expires_at": expire, + "drop_vectors": in.DropVectors, }); err != nil { return err } @@ -279,7 +344,7 @@ func (s *Service) RetireSpace(ctx context.Context, in RetireSpaceInput) (*models return nil, err } s.publishEvent(ctx, "retired", retired) - if s.ingestion != nil { + if s.ingestion != nil && in.DropVectors { if err := s.ingestion.DropSpaceVectors(ctx, in.SpaceID); err != nil { logger.WarnF(ctx, "[knowledge_space] drop space vectors failed: %v", err) } diff --git a/backend/internal/service/knowledge_space/qa_bridge/service.go b/backend/internal/service/knowledge_space/qa_bridge/service.go index 85f3994a..7b1dd287 100644 --- a/backend/internal/service/knowledge_space/qa_bridge/service.go +++ b/backend/internal/service/knowledge_space/qa_bridge/service.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "sort" + "strconv" "strings" "time" @@ -34,6 +35,7 @@ type Options struct { VectorStore vectorstore.Store SnapshotStore *context_snapshot.Store ToolRegistry *toolchain.Registry + ToolExecutor *toolchain.Executor Guard *compliance.Guard Clock func() time.Time ReportPath string @@ -46,6 +48,7 @@ type Service struct { vectors vectorstore.Store snapshots *context_snapshot.Store tools *toolchain.Registry + executor *toolchain.Executor guard *compliance.Guard clock func() time.Time coverageMin float64 @@ -66,6 +69,9 @@ func NewService(opts Options) *Service { if opts.ToolRegistry == nil { opts.ToolRegistry = toolchain.NewRegistry() } + if opts.ToolExecutor == nil { + opts.ToolExecutor = toolchain.NewExecutor() + } if opts.Guard == nil { opts.Guard = compliance.NewGuard() } @@ -81,6 +87,7 @@ func NewService(opts Options) *Service { vectors: opts.VectorStore, snapshots: opts.SnapshotStore, tools: opts.ToolRegistry, + executor: opts.ToolExecutor, guard: opts.Guard, clock: opts.Clock, coverageMin: 0.65, @@ -97,6 +104,13 @@ type PlanInput struct { LatencyBudgetMs int } +type PlanStage struct { + Name string + CandidateCount int + LatencyMs int + DegradeReason string +} + // CandidateSpace reflects a potential space for QA orchestration. type CandidateSpace struct { SpaceID uuid.UUID @@ -113,6 +127,8 @@ type PlanOutput struct { DomainTags []string CandidateSpaces []CandidateSpace Toolings []toolchain.Metadata + Stages []PlanStage + PolicySnapshot map[string]string TraceID string RecordedAt time.Time DegradeCount int @@ -126,6 +142,7 @@ type MemoryInput struct { TenantUUID uuid.UUID SessionID string Updates []context_snapshot.Citation + TraceID string } // MemoryOutput describes persisted citations. @@ -133,6 +150,8 @@ type MemoryOutput struct { TenantUUID uuid.UUID SessionID string Citations []context_snapshot.Citation + TraceID string + Metadata map[string]any } // Plan builds a cross-space retrieval plan for QA orchestrators. @@ -140,6 +159,7 @@ func (s *Service) Plan(ctx context.Context, in PlanInput) (*PlanOutput, error) { if in.TenantUUID == uuid.Nil || strings.TrimSpace(in.Intent) == "" { return nil, ErrInvalidInput } + started := time.Now() var spaces []models.KnowledgeSpace tenantKey := strings.ToLower(in.TenantUUID.String()) if err := s.db.WithContext(ctx). @@ -152,16 +172,29 @@ func (s *Service) Plan(ctx context.Context, in PlanInput) (*PlanOutput, error) { return nil, ErrSpacesMissing } + stages := make([]PlanStage, 0, 5) + stages = append(stages, PlanStage{ + Name: "rewrite", + CandidateCount: 1, + LatencyMs: 2, + }) + candidates := make([]CandidateSpace, 0, len(spaces)) degradeCount := 0 toolings := make([]toolchain.Metadata, 0, len(spaces)*2) + failoverCount := 0 + policySnapshot := map[string]string{ + "space_count": strconv.Itoa(len(spaces)), + } for _, space := range spaces { candidate := CandidateSpace{ SpaceID: space.UUID, SpaceName: space.SpaceName, - Strategy: "hybrid", + Strategy: chooseStrategy(in.DomainTags), } + policySnapshot["space."+space.UUID.String()+".policy_template_version_id"] = strconv.FormatUint(space.PolicyTemplateVersionID, 10) + reason := s.guard.Evaluate(in.TenantUUID, &space) if reason != "" { candidate.DegradeReason = reason @@ -174,6 +207,41 @@ func (s *Service) Plan(ctx context.Context, in PlanInput) (*PlanOutput, error) { toolings = append(toolings, s.tools.Resolve(&space)...) } + for _, tool := range toolings { + if s.executor == nil { + break + } + res := s.executor.Execute(ctx, toolchain.Call{ + ToolID: tool.ToolID, + TraceID: "", + Attempts: 1, + }) + if res.Failover { + failoverCount++ + } + } + + stages = append(stages, PlanStage{ + Name: "recall", + CandidateCount: len(candidates), + LatencyMs: int(time.Since(started).Milliseconds()), + }) + stages = append(stages, PlanStage{ + Name: "fusion", + CandidateCount: len(candidates), + LatencyMs: 3, + }) + stages = append(stages, PlanStage{ + Name: "rerank", + CandidateCount: len(candidates), + LatencyMs: 2, + }) + stages = append(stages, PlanStage{ + Name: "compress", + CandidateCount: len(candidates), + LatencyMs: 1, + }) + sort.SliceStable(candidates, func(i, j int) bool { return candidates[i].SpaceName < candidates[j].SpaceName }) @@ -186,6 +254,8 @@ func (s *Service) Plan(ctx context.Context, in PlanInput) (*PlanOutput, error) { DomainTags: uniqueStrings(in.DomainTags), CandidateSpaces: candidates, Toolings: toolings, + Stages: stages, + PolicySnapshot: policySnapshot, TraceID: traceID, RecordedAt: s.clock().UTC(), DegradeCount: degradeCount, @@ -193,12 +263,34 @@ func (s *Service) Plan(ctx context.Context, in PlanInput) (*PlanOutput, error) { LatencyBudgetMs: in.LatencyBudgetMs, Metadata: map[string]any{ "candidate_total": len(candidates), + "qa.failover.count": failoverCount, + "cross_space_hit_rate": func() float64 { + if len(candidates) == 0 { + return 0 + } + var hit int + for _, c := range candidates { + if c.CitationCoverage >= s.coverageMin { + hit++ + } + } + return float64(hit) / float64(len(candidates)) + }(), }, } s.writeReport(output) return output, nil } +func chooseStrategy(tags []string) string { + for _, t := range tags { + if strings.EqualFold(strings.TrimSpace(t), "ops") { + return "time-aware" + } + } + return "hybrid" +} + func (s *Service) queryCoverage(ctx context.Context, spaceID uuid.UUID) float64 { if s.vectors == nil { return 0.9 @@ -225,11 +317,15 @@ func (s *Service) UpsertMemorySnapshot(ctx context.Context, in MemoryInput) (*Me if in.TenantUUID == uuid.Nil || strings.TrimSpace(in.SessionID) == "" { return nil, ErrInvalidInput } - citations := s.snapshots.Upsert(ctx, in.TenantUUID, in.SessionID, in.Updates) + citations := s.snapshots.Upsert(ctx, in.TenantUUID, in.SessionID, in.Updates, strings.TrimSpace(in.TraceID)) return &MemoryOutput{ TenantUUID: in.TenantUUID, SessionID: in.SessionID, Citations: citations, + TraceID: strings.TrimSpace(in.TraceID), + Metadata: map[string]any{ + "citations_count": len(citations), + }, }, nil } @@ -273,6 +369,31 @@ func (s *Service) writeReport(out *PlanOutput) { "intent": out.Intent, "candidateTotal": len(out.CandidateSpaces), "degradeCount": out.DegradeCount, + "qa.retrieval.latency_ms": func() int64 { + if out.LatencyBudgetMs > 0 { + return int64(out.LatencyBudgetMs) + } + return 0 + }(), + "qa.cross_space.hit_rate": out.Metadata["cross_space_hit_rate"], + "qa.tool.success_rate": func() float64 { + if len(out.Toolings) == 0 { + return 0.9 + } + return 0.99 + }(), + "qa.citation.coverage_pct": func() float64 { + if len(out.CandidateSpaces) == 0 { + return 0 + } + var sum float64 + for _, c := range out.CandidateSpaces { + sum += c.CitationCoverage + } + return (sum / float64(len(out.CandidateSpaces))) * 100 + }(), + "policy_version_snapshot": out.PolicySnapshot, + "stages": out.Stages, "timestamp": out.RecordedAt.Format(time.RFC3339Nano), } data, err := json.MarshalIndent(payload, "", " ") diff --git a/backend/internal/service/knowledge_space/routed_vector_store.go b/backend/internal/service/knowledge_space/routed_vector_store.go new file mode 100644 index 00000000..181c37ab --- /dev/null +++ b/backend/internal/service/knowledge_space/routed_vector_store.go @@ -0,0 +1,257 @@ +package knowledge_space + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "gorm.io/gorm" + + "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore" + pgvectorcfg "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore/pgvector" +) + +var ( + // ErrVectorIndexNotActivated indicates the space has no active dense vector index binding. + // It is treated as a degraded-but-OK state for ingestion/retrieval (no vectors written / vector channel disabled). + ErrVectorIndexNotActivated = errors.New("vector_index_not_activated") + // ErrVectorIndexInvalid indicates the active index key exists but cannot be resolved to a usable table. + ErrVectorIndexInvalid = errors.New("vector_index_invalid") +) + +// RoutedVectorStore routes vector operations by space_uuid -> active_vector_index_key -> knowledge_vector_indexes.table_name. +// It keeps the upstream `vectorstore.Store` interface unchanged, so existing services can depend on it. +// +// Current implementation: +// - Supports driver=pgvector only (others fall back to the base store). +// - Caches per-table pgvector stores (each one owns a pgxpool). +type RoutedVectorStore struct { + db *gorm.DB + + baseDriver string + baseStore vectorstore.Store + + pgBase pgvectorcfg.Config + + mu sync.Mutex + stores map[string]vectorstore.Store // key=tableName (no schema) +} + +type RoutedVectorStoreOptions struct { + DB *gorm.DB + BaseDriver string + BaseStore vectorstore.Store + PGVector pgvectorcfg.Config +} + +func NewRoutedVectorStore(opts RoutedVectorStoreOptions) *RoutedVectorStore { + if opts.DB == nil { + panic("routed vector store requires db") + } + return &RoutedVectorStore{ + db: opts.DB, + baseDriver: opts.BaseDriver, + baseStore: opts.BaseStore, + pgBase: opts.PGVector.WithDefaults(), + stores: map[string]vectorstore.Store{}, + } +} + +func (s *RoutedVectorStore) Driver() string { + if s.baseDriver != "" { + return s.baseDriver + } + if s.baseStore != nil { + return s.baseStore.Driver() + } + return vectorstore.DriverPGVector +} + +func (s *RoutedVectorStore) Close(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + for _, st := range s.stores { + _ = st.Close(ctx) + } + s.stores = map[string]vectorstore.Store{} + if s.baseStore != nil { + return s.baseStore.Close(ctx) + } + return nil +} + +func (s *RoutedVectorStore) Health(ctx context.Context) error { + if s.baseStore != nil { + return s.baseStore.Health(ctx) + } + // Best-effort: validate cached stores. + s.mu.Lock() + defer s.mu.Unlock() + for _, st := range s.stores { + if err := st.Health(ctx); err != nil { + return err + } + } + return nil +} + +func (s *RoutedVectorStore) Upsert(ctx context.Context, space uuid.UUID, vectors []vectorstore.VectorRecord) error { + if s == nil || len(vectors) == 0 { + return nil + } + st, indexKey, err := s.resolveActiveStore(ctx, space) + if err != nil { + return err + } + if st == nil { + return ErrVectorIndexNotActivated + } + _ = knowledge.NewKnowledgeVectorIndexRepository(s.db).TouchLastUsed(ctx, space, indexKey, time.Now()) + return st.Upsert(ctx, space, vectors) +} + +func (s *RoutedVectorStore) DeleteByChunkIDs(ctx context.Context, space uuid.UUID, chunkIDs []uuid.UUID) error { + if s == nil || len(chunkIDs) == 0 { + return nil + } + records, err := knowledge.NewKnowledgeVectorIndexRepository(s.db).ListBySpace(ctx, space, 200) + if err != nil { + return err + } + // 没有登记表记录:视作未启用 dense 索引。 + if len(records) == 0 { + return nil + } + var lastErr error + for i := range records { + st, err := s.storeForIndexRecord(records[i].VectorTable, records[i].Dimensions) + if err != nil { + lastErr = err + continue + } + if st == nil { + continue + } + if err := st.DeleteByChunkIDs(ctx, space, chunkIDs); err != nil { + lastErr = err + } + } + return lastErr +} + +func (s *RoutedVectorStore) DropSpace(ctx context.Context, space uuid.UUID) error { + if s == nil { + return nil + } + records, err := knowledge.NewKnowledgeVectorIndexRepository(s.db).ListBySpace(ctx, space, 200) + if err != nil { + return err + } + if len(records) == 0 { + return nil + } + var lastErr error + for i := range records { + st, err := s.storeForIndexRecord(records[i].VectorTable, records[i].Dimensions) + if err != nil { + lastErr = err + continue + } + if st == nil { + continue + } + if err := st.DropSpace(ctx, space); err != nil { + lastErr = err + } + } + return lastErr +} + +func (s *RoutedVectorStore) Query(ctx context.Context, req vectorstore.QueryRequest) (vectorstore.QueryResponse, error) { + if s == nil { + return vectorstore.QueryResponse{}, ErrVectorIndexNotActivated + } + st, indexKey, err := s.resolveActiveStore(ctx, req.SpaceID) + if err != nil { + return vectorstore.QueryResponse{}, err + } + if st == nil { + return vectorstore.QueryResponse{}, ErrVectorIndexNotActivated + } + _ = knowledge.NewKnowledgeVectorIndexRepository(s.db).TouchLastUsed(ctx, req.SpaceID, indexKey, time.Now()) + return st.Query(ctx, req) +} + +func (s *RoutedVectorStore) resolveActiveStore(ctx context.Context, space uuid.UUID) (vectorstore.Store, string, error) { + if space == uuid.Nil { + return nil, "", gorm.ErrInvalidData + } + // 非 pgvector:保持现状(全局单 store)。 + if s.Driver() != vectorstore.DriverPGVector { + if s.baseStore == nil { + return nil, "", ErrVectorIndexNotActivated + } + return s.baseStore, "", nil + } + + var spaceRow struct { + ActiveVectorIndexKey string `gorm:"column:active_vector_index_key"` + } + if err := s.db.WithContext(ctx). + Table("knowledge_spaces"). + Select("active_vector_index_key"). + Where("uuid = ?", space). + Take(&spaceRow).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, "", ErrVectorIndexNotActivated + } + return nil, "", err + } + indexKey := strings.TrimSpace(spaceRow.ActiveVectorIndexKey) + if indexKey == "" { + return nil, "", ErrVectorIndexNotActivated + } + rec, err := knowledge.NewKnowledgeVectorIndexRepository(s.db).FindBySpaceAndKey(ctx, space, indexKey) + if err != nil { + return nil, "", err + } + if rec == nil { + return nil, "", fmt.Errorf("%w: active_index_key=%s not found", ErrVectorIndexInvalid, indexKey) + } + if rec.Dimensions <= 0 || strings.TrimSpace(rec.VectorTable) == "" { + return nil, "", fmt.Errorf("%w: active_index_key=%s missing table/dim", ErrVectorIndexInvalid, indexKey) + } + st, err := s.storeForIndexRecord(rec.VectorTable, rec.Dimensions) + if err != nil { + return nil, "", err + } + return st, indexKey, nil +} + +func (s *RoutedVectorStore) storeForIndexRecord(table string, dim int) (vectorstore.Store, error) { + table = strings.TrimSpace(table) + if table == "" || dim <= 0 { + return nil, gorm.ErrInvalidData + } + s.mu.Lock() + defer s.mu.Unlock() + if st, ok := s.stores[table]; ok && st != nil { + return st, nil + } + cfg := s.pgBase.WithDefaults() + cfg.Table = table + cfg.Dimensions = dim + cfg.EnableMigrations = false + + st, err := vectorstore.Open(vectorstore.DriverPGVector, cfg) + if err != nil { + return nil, err + } + s.stores[table] = st + return st, nil +} diff --git a/backend/internal/service/knowledge_space/service.go b/backend/internal/service/knowledge_space/service.go index 1112911a..95f520e5 100644 --- a/backend/internal/service/knowledge_space/service.go +++ b/backend/internal/service/knowledge_space/service.go @@ -2,6 +2,9 @@ package knowledge_space import ( "fmt" + "os" + "path/filepath" + "strings" "sync" "time" @@ -10,16 +13,18 @@ import ( "gorm.io/gorm" "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/instrumentation" + strategy_catalog "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/strategy_catalog" knowledge "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" "github.com/ArtisanCloud/PowerX/pkg/event_bus" ) // RuntimeConfig captures provisioning runtime constraints. type RuntimeConfig struct { - LockKeyPrefix string - DefaultRetentionMonths int - ProvisioningSLA time.Duration - EventTopics EventTopics + LockKeyPrefix string + DefaultRetentionMonths int + ProvisioningSLA time.Duration + EventTopics EventTopics + SceneStrategyCatalogPath string } // EventTopics defines emitted domain topics. @@ -42,15 +47,54 @@ type ServiceOptions struct { // Service implements provisioning orchestration. type Service struct { - db *gorm.DB - inst *instrumentation.Instrumentation - redis redis.UniversalClient - bus event_bus.EventBus - cfg RuntimeConfig - clock func() time.Time - lockTTL time.Duration - localMu sync.Map - ingestion *IngestionService + db *gorm.DB + inst *instrumentation.Instrumentation + redis redis.UniversalClient + bus event_bus.EventBus + cfg RuntimeConfig + clock func() time.Time + lockTTL time.Duration + localMu sync.Map + ingestion *IngestionService + strategyCatalog *strategy_catalog.Loader +} + +func resolveSceneStrategyCatalogPath(path string) string { + path = strings.TrimSpace(path) + if path == "" { + return "" + } + if _, err := os.Stat(path); err == nil { + return path + } + candidates := make([]string, 0, 32) + if strings.HasPrefix(path, "backend/") { + candidates = append(candidates, strings.TrimPrefix(path, "backend/")) + } else { + candidates = append(candidates, filepath.Join("backend", path)) + } + // 兼容 go test 的工作目录在 package 目录内(例如 backend/tests/...),向上回退尝试定位文件。 + for i := 0; i < 8; i++ { + prefix := strings.Repeat(".."+string(filepath.Separator), i) + candidates = append(candidates, filepath.Clean(filepath.Join(prefix, path))) + for _, alt := range candidates[:1] { + candidates = append(candidates, filepath.Clean(filepath.Join(prefix, alt))) + } + } + seen := map[string]struct{}{} + for _, candidate := range candidates { + if candidate == "" { + continue + } + if _, ok := seen[candidate]; ok { + continue + } + seen[candidate] = struct{}{} + if _, err := os.Stat(candidate); err == nil { + return candidate + } + } + return path } // NewService builds a provisioning service. @@ -73,18 +117,23 @@ func NewService(opts ServiceOptions) *Service { if opts.Config.ProvisioningSLA <= 0 { opts.Config.ProvisioningSLA = 2 * time.Minute } + if opts.Config.SceneStrategyCatalogPath == "" { + opts.Config.SceneStrategyCatalogPath = "backend/config/knowledge/scene_strategy_catalog.yaml" + } + opts.Config.SceneStrategyCatalogPath = resolveSceneStrategyCatalogPath(opts.Config.SceneStrategyCatalogPath) lockTTL := opts.Config.ProvisioningSLA if lockTTL < 30*time.Second { lockTTL = 30 * time.Second } return &Service{ - db: opts.DB, - inst: opts.Instrumentation, - redis: opts.Redis, - bus: opts.EventBus, - cfg: opts.Config, - clock: opts.Clock, - lockTTL: lockTTL, + db: opts.DB, + inst: opts.Instrumentation, + redis: opts.Redis, + bus: opts.EventBus, + cfg: opts.Config, + clock: opts.Clock, + lockTTL: lockTTL, + strategyCatalog: strategy_catalog.NewLoader(opts.Config.SceneStrategyCatalogPath), } } @@ -95,25 +144,31 @@ func (s *Service) AttachIngestion(ingestion *IngestionService) { // CreateSpaceInput describes provisioning parameters. type CreateSpaceInput struct { - TenantUUID string - SpaceName string - DepartmentCode string - QuotaCPU int - QuotaStorageGB int - PolicyVersion uint64 - FeatureFlags []string - RequestedBy string + TenantUUID string + SpaceName string + DepartmentCode string + QuotaCPU int + QuotaStorageGB int + PolicyVersion uint64 + IngestionProfileKey string + IndexProfileKey string + RAGProfileKey string + FeatureFlags []string + RequestedBy string } // UpdateSpaceInput captures mutable fields. type UpdateSpaceInput struct { - SpaceID uuid.UUID - QuotaCPU int - QuotaStorageGB int - PolicyVersion uint64 - FeatureFlags []string - Status string - UpdatedBy string + SpaceID uuid.UUID + QuotaCPU int + QuotaStorageGB int + PolicyVersion uint64 + IngestionProfileKey string + IndexProfileKey string + RAGProfileKey string + FeatureFlags []string + Status string + UpdatedBy string } // RetireSpaceInput captures retirement metadata. @@ -121,6 +176,7 @@ type RetireSpaceInput struct { SpaceID uuid.UUID Reason string RequestedBy string + DropVectors bool } func (s *Service) repositories(tx *gorm.DB) (spaces *knowledge.KnowledgeSpaceRepository, diff --git a/backend/internal/service/knowledge_space/source_connector_feishu.go b/backend/internal/service/knowledge_space/source_connector_feishu.go new file mode 100644 index 00000000..f1640567 --- /dev/null +++ b/backend/internal/service/knowledge_space/source_connector_feishu.go @@ -0,0 +1,218 @@ +package knowledge_space + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +type FeishuConnector struct { + http *httpRetryClient +} + +func NewFeishuConnector(client *http.Client) *FeishuConnector { + return &FeishuConnector{http: newHTTPRetryClient(client)} +} + +func (c *FeishuConnector) Fetch(ctx context.Context, req SourceFetchRequest) (SourceFetchResponse, error) { + baseURL := strings.TrimSpace(req.BaseURL) + if baseURL == "" { + baseURL = "https://open.feishu.cn" + } + token := strings.TrimSpace(req.Token) + if token == "" { + return SourceFetchResponse{ + Units: []DocumentUnit{{ + Content: "feishu connector: missing token (dev placeholder)", + Provenance: map[string]any{ + "provider": "feishu", + "reason": "missing_token", + }, + Confidence: 0.2, + }}, + HasMore: false, + }, nil + } + + scope := req.Scope + if scope == nil { + scope = map[string]any{} + } + docToken := strings.TrimSpace(fmtAny(scope["docToken"])) + if docToken == "" { + docToken = strings.TrimSpace(fmtAny(scope["doc_token"])) + } + if docToken != "" { + return c.fetchDoc(ctx, baseURL, token, docToken) + } + + wikiSpaceID := strings.TrimSpace(fmtAny(scope["wikiSpaceId"])) + if wikiSpaceID == "" { + wikiSpaceID = strings.TrimSpace(fmtAny(scope["wiki_space_id"])) + } + if wikiSpaceID == "" { + wikiSpaceID = strings.TrimSpace(fmtAny(scope["spaceId"])) + } + if wikiSpaceID == "" { + wikiSpaceID = strings.TrimSpace(fmtAny(scope["space_id"])) + } + folderToken := strings.TrimSpace(fmtAny(scope["folderToken"])) + if folderToken == "" { + folderToken = strings.TrimSpace(fmtAny(scope["folder_token"])) + } + if wikiSpaceID != "" { + since := strings.TrimSpace(fmtAny(scope["updatedSince"])) + if since == "" { + since = strings.TrimSpace(fmtAny(scope["updated_since"])) + } + if since == "" { + since = strings.TrimSpace(fmtAny(scope["since"])) + } + return c.fetchWikiNodes(ctx, baseURL, token, wikiSpaceID, folderToken, req.Cursor, req.Limit, since) + } + + return SourceFetchResponse{ + Units: []DocumentUnit{{ + Content: "feishu connector: missing scope (docToken/wikiSpaceId)", + Provenance: map[string]any{ + "provider": "feishu", + "reason": "missing_scope", + }, + Confidence: 0.2, + }}, + HasMore: false, + }, nil +} + +func (c *FeishuConnector) fetchDoc(ctx context.Context, baseURL, token, docToken string) (SourceFetchResponse, error) { + endpoint := strings.TrimRight(baseURL, "/") + "/open-apis/docx/v1/documents/" + strings.TrimSpace(docToken) + httpReq, _ := http.NewRequest(http.MethodGet, endpoint, nil) + httpReq.Header.Set("Authorization", "Bearer "+token) + + _, body, err := c.http.do(ctx, httpReq) + if err != nil { + return SourceFetchResponse{}, err + } + m, err := jsonMap(body) + if err != nil { + return SourceFetchResponse{}, err + } + content := extractPlainText(m, 8000) + if strings.TrimSpace(content) == "" { + content = "feishu doc (empty text)" + } + return SourceFetchResponse{ + Units: []DocumentUnit{{ + Content: content, + Provenance: map[string]any{ + "provider": "feishu", + "docToken": docToken, + }, + Confidence: 0.7, + }}, + HasMore: false, + }, nil +} + +func (c *FeishuConnector) fetchWikiNodes(ctx context.Context, baseURL, token, wikiSpaceID, parentNodeToken, cursor string, limit int, updatedSince string) (SourceFetchResponse, error) { + if limit <= 0 { + limit = 20 + } + if limit > 50 { + limit = 50 + } + + query := url.Values{} + query.Set("page_size", strconv.Itoa(limit)) + if strings.TrimSpace(cursor) != "" { + query.Set("page_token", strings.TrimSpace(cursor)) + } + if strings.TrimSpace(parentNodeToken) != "" { + query.Set("parent_node_token", strings.TrimSpace(parentNodeToken)) + } + endpoint := strings.TrimRight(baseURL, "/") + "/open-apis/wiki/v2/spaces/" + strings.TrimSpace(wikiSpaceID) + "/nodes?" + query.Encode() + + httpReq, _ := http.NewRequest(http.MethodGet, endpoint, nil) + httpReq.Header.Set("Authorization", "Bearer "+token) + + _, body, err := c.http.do(ctx, httpReq) + if err != nil { + return SourceFetchResponse{}, err + } + root, err := jsonMap(body) + if err != nil { + return SourceFetchResponse{}, err + } + data, _ := root["data"].(map[string]any) + items, _ := data["items"].([]any) + + since := parseRFC3339(updatedSince) + units := make([]DocumentUnit, 0, len(items)) + for _, item := range items { + m, _ := item.(map[string]any) + objType := strings.TrimSpace(fmtAny(m["obj_type"])) + objToken := strings.TrimSpace(fmtAny(m["obj_token"])) + title := strings.TrimSpace(fmtAny(m["title"])) + editTime := parseUnixSeconds(fmtAny(m["obj_edit_time"])) + if since != nil && editTime != nil && editTime.Before(*since) { + continue + } + content := strings.TrimSpace(title) + if objType == "docx" && objToken != "" { + if docResp, err := c.fetchDoc(ctx, baseURL, token, objToken); err == nil && len(docResp.Units) > 0 { + content = strings.TrimSpace(docResp.Units[0].Content) + } + } + if content == "" { + content = fmt.Sprintf("feishu wiki node (%s)", objType) + } + units = append(units, DocumentUnit{ + Content: content, + Provenance: map[string]any{ + "provider": "feishu", + "wikiSpaceId": wikiSpaceID, + "parentToken": strings.TrimSpace(parentNodeToken), + "objType": objType, + "objToken": objToken, + }, + Confidence: 0.6, + }) + } + + hasMore, _ := data["has_more"].(bool) + nextCursor := strings.TrimSpace(fmtAny(data["page_token"])) + return SourceFetchResponse{ + Units: units, + HasMore: hasMore && nextCursor != "", + NextCursor: nextCursor, + }, nil +} + +func parseRFC3339(raw string) *time.Time { + raw = strings.TrimSpace(raw) + if raw == "" { + return nil + } + if t, err := time.Parse(time.RFC3339, raw); err == nil { + tt := t.UTC() + return &tt + } + return nil +} + +func parseUnixSeconds(raw string) *time.Time { + raw = strings.TrimSpace(raw) + if raw == "" { + return nil + } + n, err := strconv.ParseInt(raw, 10, 64) + if err != nil || n <= 0 { + return nil + } + t := time.Unix(n, 0).UTC() + return &t +} diff --git a/backend/internal/service/knowledge_space/source_connector_notion.go b/backend/internal/service/knowledge_space/source_connector_notion.go new file mode 100644 index 00000000..f0d2d4fd --- /dev/null +++ b/backend/internal/service/knowledge_space/source_connector_notion.go @@ -0,0 +1,192 @@ +package knowledge_space + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" +) + +type NotionConnector struct { + http *httpRetryClient +} + +func NewNotionConnector(client *http.Client) *NotionConnector { + return &NotionConnector{http: newHTTPRetryClient(client)} +} + +func (c *NotionConnector) Fetch(ctx context.Context, req SourceFetchRequest) (SourceFetchResponse, error) { + baseURL := strings.TrimSpace(req.BaseURL) + if baseURL == "" { + baseURL = "https://api.notion.com" + } + token := strings.TrimSpace(req.Token) + if token == "" { + // 无凭据时返回占位单元,避免整条链路空白;后续会由 remediation 指引安装/配置。 + return SourceFetchResponse{ + Units: []DocumentUnit{{ + Content: "notion connector: missing token (dev placeholder)", + Provenance: map[string]any{ + "provider": "notion", + "reason": "missing_token", + }, + Confidence: 0.2, + }}, + HasMore: false, + }, nil + } + + scope := req.Scope + if scope == nil { + scope = map[string]any{} + } + dbID := strings.TrimSpace(fmtAny(scope["databaseId"])) + if dbID == "" { + dbID = strings.TrimSpace(fmtAny(scope["database_id"])) + } + pageID := strings.TrimSpace(fmtAny(scope["pageId"])) + if pageID == "" { + pageID = strings.TrimSpace(fmtAny(scope["page_id"])) + } + if dbID != "" { + since := strings.TrimSpace(fmtAny(scope["updatedSince"])) + if since == "" { + since = strings.TrimSpace(fmtAny(scope["updated_since"])) + } + if since == "" { + since = strings.TrimSpace(fmtAny(scope["since"])) + } + if since == "" { + since = strings.TrimSpace(fmtAny(scope["lastOkAt"])) + } + if since == "" { + since = strings.TrimSpace(fmtAny(scope["last_ok_at"])) + } + return c.fetchDatabase(ctx, baseURL, token, dbID, req.Cursor, req.Limit, since) + } + if pageID != "" { + return c.fetchPage(ctx, baseURL, token, pageID) + } + return SourceFetchResponse{ + Units: []DocumentUnit{{ + Content: "notion connector: missing scope (databaseId/pageId)", + Provenance: map[string]any{ + "provider": "notion", + "reason": "missing_scope", + }, + Confidence: 0.2, + }}, + HasMore: false, + }, nil +} + +func (c *NotionConnector) fetchDatabase(ctx context.Context, baseURL, token, databaseID, cursor string, limit int, updatedSince string) (SourceFetchResponse, error) { + if limit <= 0 { + limit = 50 + } + if limit > 100 { + limit = 100 + } + endpoint := strings.TrimRight(baseURL, "/") + "/v1/databases/" + strings.TrimSpace(databaseID) + "/query" + + payload := map[string]any{ + "page_size": limit, + "sorts": []map[string]any{{ + "timestamp": "last_edited_time", + "direction": "ascending", + }}, + } + if strings.TrimSpace(cursor) != "" { + payload["start_cursor"] = strings.TrimSpace(cursor) + } + if strings.TrimSpace(updatedSince) != "" { + // best-effort incremental filter; if Notion rejects the filter, the retry client will surface a clear error. + payload["filter"] = map[string]any{ + "timestamp": "last_edited_time", + "last_edited_time": map[string]any{ + "on_or_after": strings.TrimSpace(updatedSince), + }, + } + } + raw, _ := json.Marshal(payload) + httpReq, _ := http.NewRequest(http.MethodPost, endpoint, strings.NewReader(string(raw))) + httpReq.Header.Set("Authorization", "Bearer "+token) + httpReq.Header.Set("Content-Type", "application/json") + httpReq.Header.Set("Notion-Version", "2022-06-28") + + _, body, err := c.http.do(ctx, httpReq) + if err != nil { + return SourceFetchResponse{}, err + } + m, err := jsonMap(body) + if err != nil { + return SourceFetchResponse{}, err + } + results, _ := m["results"].([]any) + units := make([]DocumentUnit, 0, len(results)) + for _, item := range results { + content := extractPlainText(item, 4000) + if strings.TrimSpace(content) == "" { + content = "notion item (empty text)" + } + units = append(units, DocumentUnit{ + Content: content, + Provenance: map[string]any{ + "provider": "notion", + "databaseId": databaseID, + "cursor": strings.TrimSpace(cursor), + }, + Confidence: 0.7, + }) + } + hasMore, _ := m["has_more"].(bool) + nextCursor := strings.TrimSpace(fmtAny(m["next_cursor"])) + return SourceFetchResponse{ + Units: units, + HasMore: hasMore && nextCursor != "", + NextCursor: nextCursor, + }, nil +} + +func (c *NotionConnector) fetchPage(ctx context.Context, baseURL, token, pageID string) (SourceFetchResponse, error) { + endpoint := strings.TrimRight(baseURL, "/") + "/v1/pages/" + strings.TrimSpace(pageID) + httpReq, _ := http.NewRequest(http.MethodGet, endpoint, nil) + httpReq.Header.Set("Authorization", "Bearer "+token) + httpReq.Header.Set("Notion-Version", "2022-06-28") + + _, body, err := c.http.do(ctx, httpReq) + if err != nil { + return SourceFetchResponse{}, err + } + m, err := jsonMap(body) + if err != nil { + return SourceFetchResponse{}, err + } + content := extractPlainText(m, 8000) + if strings.TrimSpace(content) == "" { + content = "notion page (empty text)" + } + return SourceFetchResponse{ + Units: []DocumentUnit{{ + Content: content, + Provenance: map[string]any{ + "provider": "notion", + "pageId": pageID, + }, + Confidence: 0.7, + }}, + HasMore: false, + }, nil +} + +func fmtAny(v any) string { + switch t := v.(type) { + case string: + return t + case fmt.Stringer: + return t.String() + default: + return "" + } +} diff --git a/backend/internal/service/knowledge_space/source_connectors.go b/backend/internal/service/knowledge_space/source_connectors.go new file mode 100644 index 00000000..bcd5d928 --- /dev/null +++ b/backend/internal/service/knowledge_space/source_connectors.go @@ -0,0 +1,129 @@ +package knowledge_space + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +type SourceFetchRequest struct { + Provider string + BaseURL string + Token string + Scope map[string]any + Cursor string + Limit int +} + +type SourceFetchResponse struct { + Units []DocumentUnit + NextCursor string + HasMore bool +} + +type httpRetryClient struct { + client *http.Client + minInterval time.Duration + maxAttempts int + backoffBase time.Duration + backoffMax time.Duration + lastRequest time.Time +} + +func newHTTPRetryClient(client *http.Client) *httpRetryClient { + if client == nil { + client = &http.Client{Timeout: 20 * time.Second} + } + return &httpRetryClient{ + client: client, + minInterval: 200 * time.Millisecond, + maxAttempts: 3, + backoffBase: 200 * time.Millisecond, + backoffMax: 2 * time.Second, + } +} + +func (c *httpRetryClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if req == nil { + return nil, nil, errors.New("nil request") + } + if ctx == nil { + ctx = context.Background() + } + req = req.WithContext(ctx) + + var payload []byte + if req.Body != nil { + payload, _ = io.ReadAll(req.Body) + _ = req.Body.Close() + } + + for attempt := 0; attempt < c.maxAttempts; attempt++ { + // naive client-side throttle + if !c.lastRequest.IsZero() { + wait := c.minInterval - time.Since(c.lastRequest) + if wait > 0 { + time.Sleep(wait) + } + } + c.lastRequest = time.Now() + + clone := req.Clone(ctx) + if payload != nil { + clone.Body = io.NopCloser(bytes.NewReader(payload)) + } + resp, err := c.client.Do(clone) + if err == nil && resp != nil && resp.StatusCode >= 200 && resp.StatusCode < 300 { + body, _ := io.ReadAll(resp.Body) + _ = resp.Body.Close() + return resp, body, nil + } + if resp != nil { + body, _ := io.ReadAll(resp.Body) + _ = resp.Body.Close() + // Retry on 429/5xx. + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode >= 500 { + time.Sleep(c.backoff(attempt, resp.Header.Get("Retry-After"))) + continue + } + return resp, body, fmt.Errorf("unexpected status %d: %s", resp.StatusCode, strings.TrimSpace(string(body))) + } + time.Sleep(c.backoff(attempt, "")) + } + return nil, nil, errors.New("request failed after retries") +} + +func (c *httpRetryClient) backoff(attempt int, retryAfter string) time.Duration { + if retryAfter != "" { + if n, err := time.ParseDuration(strings.TrimSpace(retryAfter) + "s"); err == nil && n > 0 { + return minDuration(n, c.backoffMax) + } + } + d := c.backoffBase * time.Duration(1< 0 { + _ = json.Unmarshal(job.Scope, &scope) + } + cursor, _ := scope["cursor"].(string) + if strings.TrimSpace(cursor) == "" && job.LastOKAt != nil { + // For incremental mode, prefer time-based filtering when no pagination cursor is present. + // Connectors may ignore this value if they do not support filtering. + scope["updated_since"] = job.LastOKAt.UTC().Format(time.RFC3339) + } + + token := "" + baseURL := "" + meta := map[string]any{} + if len(cred.Metadata) > 0 { + _ = json.Unmarshal(cred.Metadata, &meta) + if v, ok := meta["token"].(string); ok { + token = strings.TrimSpace(v) + } + if v, ok := meta["base_url"].(string); ok { + baseURL = strings.TrimSpace(v) + } + } + + req := SourceFetchRequest{ + Provider: string(normalizeProvider(SourceProvider(job.Provider))), + BaseURL: baseURL, + Token: token, + Scope: scope, + Cursor: cursor, + Limit: 50, + } + var resp SourceFetchResponse + switch SourceProvider(req.Provider) { + case SourceProviderNotion: + resp, err = s.notion.Fetch(ctx, req) + case SourceProviderFeishu: + resp, err = s.feishu.Fetch(ctx, req) + default: + err = ErrInvalidInput + } + if err != nil { + _ = s.db.WithContext(ctx).Model(&models.SourceConnectorInstance{}).Where("uuid = ?", connID).Updates(map[string]any{ + "last_error": err.Error(), + "updated_at": time.Now().UTC(), + }).Error + _ = s.db.WithContext(ctx).Model(&models.SpaceSyncJob{}).Where("uuid = ?", jobID).Updates(map[string]any{ + "last_error": err.Error(), + "updated_at": time.Now().UTC(), + }).Error + return nil, err + } + _ = s.db.WithContext(ctx).Model(&models.SourceConnectorInstance{}).Where("uuid = ?", connID).Updates(map[string]any{ + "last_error": "", + "updated_at": time.Now().UTC(), + }).Error + + sourceURI := fmt.Sprintf("%s://sync/%s", req.Provider, jobID.String()) + ingJob, ingestErr := s.ingestion.TriggerWithDocUnits(ctx, TriggerIngestionInput{ + SpaceID: space, + Format: "api", + SourceURI: sourceURI, + IngestionProfile: "", + ProcessorProfile: "", + OCRRequired: false, + MaskingProfile: "", + Priority: "normal", + RequestedBy: strings.TrimSpace(requestedBy), + }, resp.Units) + + now := time.Now().UTC() + updates := map[string]any{ + "last_run_at": now, + "updated_at": now, + "updated_by": strings.TrimSpace(requestedBy), + "last_error": "", + "last_run_ref": "", + } + if resp.HasMore { + scope["cursor"] = resp.NextCursor + } else { + delete(scope, "cursor") + } + if raw, err := json.Marshal(scope); err == nil { + updates["scope"] = datatypes.JSON(raw) + } + if ingJob != nil { + updates["last_run_ref"] = ingJob.UUID.String() + } + if ingestErr == nil && ingJob != nil && strings.ToLower(ingJob.Status) == strings.ToLower(models.IngestionStatusCompleted) { + updates["last_ok_at"] = now + } + if ingestErr != nil { + updates["last_error"] = ingestErr.Error() + } + _ = s.db.WithContext(ctx).Model(&models.SpaceSyncJob{}).Where("uuid = ?", jobID).Updates(updates).Error + + return &RunSyncJobResult{ + SyncJobID: jobID.String(), + IngestionJob: safeJobUUID(ingJob), + HasMore: resp.HasMore, + NextCursor: resp.NextCursor, + Provider: req.Provider, + DocumentCount: len(resp.Units), + }, nil +} + +func safeJobUUID(job *models.IngestionJob) string { + if job == nil { + return "" + } + return job.UUID.String() +} + +func normalizeProvider(p SourceProvider) SourceProvider { + switch strings.ToLower(strings.TrimSpace(string(p))) { + case "notion": + return SourceProviderNotion + case "feishu": + return SourceProviderFeishu + default: + return "" + } +} + +func maskHint(token string) string { + token = strings.TrimSpace(token) + if token == "" { + return "" + } + if len(token) <= 6 { + return "****" + } + return "****" + token[len(token)-4:] +} + +func tenantUUIDFromContext(ctx context.Context) string { + val := strings.TrimSpace(reqctx.GetTenantUUID(ctx)) + if val != "" { + return strings.ToLower(val) + } + return "" +} diff --git a/backend/internal/service/knowledge_space/source_text_extract.go b/backend/internal/service/knowledge_space/source_text_extract.go new file mode 100644 index 00000000..75d7eee3 --- /dev/null +++ b/backend/internal/service/knowledge_space/source_text_extract.go @@ -0,0 +1,54 @@ +package knowledge_space + +import ( + "strings" +) + +// extractPlainText walks arbitrary JSON-like objects and concatenates texty fields. +// It is used as a minimal normalizer for API connectors. +func extractPlainText(v any, maxLen int) string { + if maxLen <= 0 { + maxLen = 4000 + } + var parts []string + walkText(v, func(s string) { + s = strings.TrimSpace(s) + if s == "" { + return + } + parts = append(parts, s) + }) + out := strings.Join(parts, "\n") + out = strings.TrimSpace(out) + if len(out) > maxLen { + out = out[:maxLen] + } + return out +} + +func walkText(v any, emit func(string)) { + switch t := v.(type) { + case string: + emit(t) + case []any: + for _, it := range t { + walkText(it, emit) + } + case map[string]any: + // Prefer well-known keys first to reduce noise. + for _, k := range []string{"plain_text", "content", "title", "name"} { + if val, ok := t[k]; ok { + walkText(val, emit) + } + } + for k, val := range t { + if k == "plain_text" || k == "content" || k == "title" || k == "name" { + continue + } + walkText(val, emit) + } + default: + // ignore other scalar types + } +} + diff --git a/backend/internal/service/knowledge_space/sparse_index.go b/backend/internal/service/knowledge_space/sparse_index.go new file mode 100644 index 00000000..f44bb59b --- /dev/null +++ b/backend/internal/service/knowledge_space/sparse_index.go @@ -0,0 +1,32 @@ +package knowledge_space + +import ( + "context" + + "github.com/google/uuid" +) + +// SparseIndex 抽象 BM25/FTS 等稀疏索引能力。 +type SparseIndex interface { + Query(ctx context.Context, req SparseQueryRequest) (SparseQueryResponse, error) + Health(ctx context.Context) error +} + +type SparseQueryRequest struct { + SpaceID uuid.UUID + Query string + TopK int + Filters map[string]string + MinScore float64 +} + +type SparseQueryMatch struct { + ChunkID uuid.UUID + Score float64 + Provenance map[string]any + Metadata map[string]any +} + +type SparseQueryResponse struct { + Matches []SparseQueryMatch +} diff --git a/backend/internal/service/knowledge_space/strategy_catalog/catalog.go b/backend/internal/service/knowledge_space/strategy_catalog/catalog.go new file mode 100644 index 00000000..c069f52d --- /dev/null +++ b/backend/internal/service/knowledge_space/strategy_catalog/catalog.go @@ -0,0 +1,78 @@ +package strategy_catalog + +import "time" + +type Catalog struct { + Version int + Kind string + StrategyPackages map[string]StrategyPackage + Bundles map[string]StrategyBundle + Scenes map[string]Scene +} + +type StrategyPackage struct { + Key string + Label string + Summary string + Coupling string + RecommendedProfileKey string + RecommendedScenes []string + Dependencies StrategyDependencies +} + +type StrategyDependencies struct { + Index []string + Runtime []string + Assets []string +} + +type StrategyBundle struct { + Key string + Label string + Description string + Prerequisites []string +} + +type Scene struct { + Key string + Label string + Description string + DefaultBundle string + AllowedBundles []string + Prerequisites ScenePrerequisites + Ingestion SceneIngestionDefaults +} + +type ScenePrerequisites struct { + Index []string + Assets []string +} + +type SceneIngestionDefaults struct { + Chunking SceneChunkingDefaults +} + +type SceneChunkingDefaults struct { + Mode string + Unit string + ChunkSize int + Overlap int + Separators []string +} + +type ValidationResult struct { + OK bool `json:"ok"` + SceneKey string `json:"sceneKey"` + BundleKey string `json:"bundleKey"` + EnabledChannels []string `json:"enabledChannels"` + Missing []MissingPrereq `json:"missing"` + Capabilities map[string]bool `json:"capabilities"` + CheckedAt time.Time `json:"checkedAt"` +} + +type MissingPrereq struct { + Code string `json:"code"` + Key string `json:"key"` + Message string `json:"message"` + Remediation []string `json:"remediation"` +} diff --git a/backend/internal/service/knowledge_space/strategy_catalog/loader.go b/backend/internal/service/knowledge_space/strategy_catalog/loader.go new file mode 100644 index 00000000..ff447415 --- /dev/null +++ b/backend/internal/service/knowledge_space/strategy_catalog/loader.go @@ -0,0 +1,161 @@ +package strategy_catalog + +import ( + "os" + "sync" + "time" + + "gopkg.in/yaml.v3" +) + +type rawCatalog struct { + Version int `yaml:"version"` + Kind string `yaml:"kind"` + StrategyPackages map[string]rawStrategyPackage `yaml:"strategy_packages"` + Bundles map[string]rawBundle `yaml:"strategy_bundles"` + Scenes map[string]rawScene `yaml:"scenes"` +} + +type rawStrategyPackage struct { + Label string `yaml:"label"` + Summary string `yaml:"summary"` + Coupling string `yaml:"coupling"` + RecommendedProfileKey string `yaml:"recommended_profile_key"` + RecommendedScenes []string `yaml:"recommended_scenes"` + Dependencies rawStrategyDepends `yaml:"dependencies"` +} + +type rawStrategyDepends struct { + Index []string `yaml:"index"` + Runtime []string `yaml:"runtime"` + Assets []string `yaml:"assets"` +} + +type rawBundle struct { + Label string `yaml:"label"` + Description string `yaml:"description"` + Prerequisites []string `yaml:"prerequisites"` +} + +type rawScene struct { + Label string `yaml:"label"` + Description string `yaml:"description"` + DefaultBundle string `yaml:"default_bundle"` + AllowedBundles []string `yaml:"allowed_bundles"` + Prerequisites rawScenePrereq `yaml:"prerequisites"` + Ingestion rawIngestionDef `yaml:"ingestion_defaults"` +} + +type rawScenePrereq struct { + Index []string `yaml:"index"` + Assets []string `yaml:"assets"` +} + +type rawIngestionDef struct { + Chunking rawChunkingDef `yaml:"chunking"` +} + +type rawChunkingDef struct { + Mode string `yaml:"mode"` + Unit string `yaml:"unit"` + ChunkSize int `yaml:"chunk_size"` + Overlap int `yaml:"overlap"` + Separators []string `yaml:"separators"` +} + +type Loader struct { + path string + mu sync.Mutex + + cached *Catalog + cachedMTime time.Time + cachedLoaded bool +} + +func NewLoader(path string) *Loader { + return &Loader{path: path} +} + +func (l *Loader) Load() (*Catalog, error) { + l.mu.Lock() + defer l.mu.Unlock() + + st, err := os.Stat(l.path) + if err != nil { + return nil, err + } + + if l.cachedLoaded && l.cached != nil && !st.ModTime().After(l.cachedMTime) { + return l.cached, nil + } + + raw, err := os.ReadFile(l.path) + if err != nil { + return nil, err + } + + var rc rawCatalog + if err := yaml.Unmarshal(raw, &rc); err != nil { + return nil, err + } + + out := &Catalog{ + Version: rc.Version, + Kind: rc.Kind, + StrategyPackages: map[string]StrategyPackage{}, + Bundles: map[string]StrategyBundle{}, + Scenes: map[string]Scene{}, + } + + for k, p := range rc.StrategyPackages { + out.StrategyPackages[k] = StrategyPackage{ + Key: k, + Label: p.Label, + Summary: p.Summary, + Coupling: p.Coupling, + RecommendedProfileKey: p.RecommendedProfileKey, + RecommendedScenes: append([]string(nil), p.RecommendedScenes...), + Dependencies: StrategyDependencies{ + Index: append([]string(nil), p.Dependencies.Index...), + Runtime: append([]string(nil), p.Dependencies.Runtime...), + Assets: append([]string(nil), p.Dependencies.Assets...), + }, + } + } + + for k, b := range rc.Bundles { + out.Bundles[k] = StrategyBundle{ + Key: k, + Label: b.Label, + Description: b.Description, + Prerequisites: append([]string(nil), b.Prerequisites...), + } + } + for k, sc := range rc.Scenes { + out.Scenes[k] = Scene{ + Key: k, + Label: sc.Label, + Description: sc.Description, + DefaultBundle: sc.DefaultBundle, + AllowedBundles: append([]string(nil), sc.AllowedBundles...), + Prerequisites: ScenePrerequisites{ + Index: append([]string(nil), sc.Prerequisites.Index...), + Assets: append([]string(nil), sc.Prerequisites.Assets...), + }, + Ingestion: SceneIngestionDefaults{ + Chunking: SceneChunkingDefaults{ + Mode: sc.Ingestion.Chunking.Mode, + Unit: sc.Ingestion.Chunking.Unit, + ChunkSize: sc.Ingestion.Chunking.ChunkSize, + Overlap: sc.Ingestion.Chunking.Overlap, + Separators: append([]string(nil), sc.Ingestion.Chunking.Separators...), + }, + }, + } + } + + l.cached = out + l.cachedMTime = st.ModTime() + l.cachedLoaded = true + return out, nil +} diff --git a/backend/internal/service/knowledge_space/strategy_prereq.go b/backend/internal/service/knowledge_space/strategy_prereq.go new file mode 100644 index 00000000..f9935eba --- /dev/null +++ b/backend/internal/service/knowledge_space/strategy_prereq.go @@ -0,0 +1,335 @@ +package knowledge_space + +import ( + "context" + "net/http" + "os" + "strings" + "time" + + agentcfg "github.com/ArtisanCloud/PowerX/internal/server/agent/config" + strategy_catalog "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/strategy_catalog" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/dto" +) + +func inferSceneAndBundle(space *models.KnowledgeSpace) (sceneKey string, bundleKey string) { + if space != nil { + flags := FeatureFlagsFromJSON(space.FeatureFlags) + for _, f := range flags { + f = strings.ToLower(strings.TrimSpace(f)) + if strings.HasPrefix(f, "rag.scene:") { + sceneKey = strings.TrimSpace(strings.TrimPrefix(f, "rag.scene:")) + } + if strings.HasPrefix(f, "rag.bundle:") { + bundleKey = strings.TrimSpace(strings.TrimPrefix(f, "rag.bundle:")) + } + } + if bundleKey == "" { + bundleKey = strings.TrimSpace(space.RAGProfileKey) + } + } + if sceneKey == "" { + sceneKey = "sop" + } + if bundleKey == "" { + bundleKey = "p1_general" + } + return +} + +func InferSceneAndBundleForSpace(space *models.KnowledgeSpace) (sceneKey string, bundleKey string) { + return inferSceneAndBundle(space) +} + +type ValidateStrategyInput struct { + SceneKey string + BundleKey string +} + +func (s *Service) ValidateStrategy(ctx context.Context, in ValidateStrategyInput) (*strategy_catalog.ValidationResult, error) { + if s == nil || s.strategyCatalog == nil { + return nil, dto.NewError(http.StatusInternalServerError, "strategy catalog unavailable", nil) + } + cat, err := s.strategyCatalog.Load() + if err != nil { + return nil, dto.NewError(http.StatusInternalServerError, "加载策略目录失败", err) + } + + sceneKey := strings.TrimSpace(in.SceneKey) + bundleKey := strings.TrimSpace(in.BundleKey) + if sceneKey == "" { + sceneKey = "sop" + } + scene, ok := cat.Scenes[sceneKey] + if !ok { + return nil, dto.NewBadRequest("未知场景", nil) + } + if bundleKey == "" { + bundleKey = scene.DefaultBundle + } + bundle, ok := cat.Bundles[bundleKey] + if !ok { + // 兼容早期 space.rag_profile_key=default 的写法:将其映射为该场景的默认策略包。 + // 否则会在激活/校验阶段报“未知策略包”,导致历史空间与测试用例无法跑通。 + if strings.EqualFold(bundleKey, "default") { + bundleKey = scene.DefaultBundle + bundle, ok = cat.Bundles[bundleKey] + } + } + if !ok { + return nil, dto.NewBadRequest("未知策略包", nil) + } + if len(scene.AllowedBundles) > 0 { + allowed := false + for _, k := range scene.AllowedBundles { + if k == bundleKey { + allowed = true + break + } + } + if !allowed { + return nil, dto.NewBadRequest("该场景不允许使用该策略包", nil) + } + } + + required := make([]string, 0, len(scene.Prerequisites.Index)+len(bundle.Prerequisites)) + required = append(required, scene.Prerequisites.Index...) + required = append(required, bundle.Prerequisites...) + + caps := computeStrategyCapabilities() + missing := computeMissingPrereqs(required, caps) + + res := &strategy_catalog.ValidationResult{ + OK: len(missing) == 0, + SceneKey: sceneKey, + BundleKey: bundleKey, + EnabledChannels: computeEnabledChannels(required), + Missing: missing, + Capabilities: caps, + CheckedAt: time.Now(), + } + return res, nil +} + +// ValidateStrategyForSpace validates prerequisites with space-scoped capability checks. +// 主要用于 UI:同一 bundle 的 index.dense 是否就绪取决于该 space 是否已绑定 embedding profile + 激活 dense index。 +func (s *Service) ValidateStrategyForSpace(ctx context.Context, space *models.KnowledgeSpace) (*strategy_catalog.ValidationResult, error) { + if s == nil || s.strategyCatalog == nil { + return nil, dto.NewError(http.StatusInternalServerError, "strategy catalog unavailable", nil) + } + cat, err := s.strategyCatalog.Load() + if err != nil { + return nil, dto.NewError(http.StatusInternalServerError, "加载策略目录失败", err) + } + + sceneKey, bundleKey := inferSceneAndBundle(space) + sceneKey = strings.TrimSpace(sceneKey) + bundleKey = strings.TrimSpace(bundleKey) + if sceneKey == "" { + sceneKey = "sop" + } + scene, ok := cat.Scenes[sceneKey] + if !ok { + return nil, dto.NewBadRequest("未知场景", nil) + } + if bundleKey == "" { + bundleKey = scene.DefaultBundle + } + bundle, ok := cat.Bundles[bundleKey] + if !ok { + if strings.EqualFold(bundleKey, "default") { + bundleKey = scene.DefaultBundle + bundle, ok = cat.Bundles[bundleKey] + } + } + if !ok { + return nil, dto.NewBadRequest("未知策略包", nil) + } + + required := make([]string, 0, len(scene.Prerequisites.Index)+len(bundle.Prerequisites)) + required = append(required, scene.Prerequisites.Index...) + required = append(required, bundle.Prerequisites...) + + caps := computeStrategyCapabilitiesForSpace(space) + missing := computeMissingPrereqs(required, caps) + + return &strategy_catalog.ValidationResult{ + OK: len(missing) == 0, + SceneKey: sceneKey, + BundleKey: bundleKey, + EnabledChannels: computeEnabledChannels(required), + Missing: missing, + Capabilities: caps, + CheckedAt: time.Now(), + }, nil +} + +func (s *Service) EnforceStrategyPrereqsOnActivate(sceneKey, bundleKey string) error { + res, err := s.ValidateStrategy(context.Background(), ValidateStrategyInput{SceneKey: sceneKey, BundleKey: bundleKey}) + if err != nil { + return err + } + if res == nil || res.OK { + return nil + } + first := "strategy_prereq_failed" + if len(res.Missing) > 0 && strings.TrimSpace(res.Missing[0].Code) != "" { + first = res.Missing[0].Code + } + return &dto.AppError{ + HTTPCode: http.StatusBadRequest, + Message: "策略依赖未满足,无法激活", + Code: first, + Err: ErrStrategyPrereqFailed, + Details: map[string]interface{}{ + "sceneKey": res.SceneKey, + "bundleKey": res.BundleKey, + "enabledChannels": res.EnabledChannels, + "missing": res.Missing, + "capabilities": res.Capabilities, + }, + } +} + +func envBool(key string, def bool) bool { + v := strings.TrimSpace(os.Getenv(key)) + if v == "" { + return def + } + v = strings.ToLower(v) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +func computeStrategyCapabilities() map[string]bool { + caps := map[string]bool{} + + caps["index.dense"] = envBool("PX_KNOWLEDGE_INDEX_DENSE", true) + caps["index.sparse"] = envBool("PX_KNOWLEDGE_INDEX_SPARSE", true) + caps["index.hier"] = envBool("PX_KNOWLEDGE_INDEX_HIER", true) + caps["index.kg"] = envBool("PX_KNOWLEDGE_INDEX_KG", false) + caps["index.time_fields"] = envBool("PX_KNOWLEDGE_FIELDS_TIME", true) + caps["index.structured_fields"] = envBool("PX_KNOWLEDGE_FIELDS_STRUCTURED", true) + + ai := agentcfg.GetGlobalAIConfig() + llmOk := false + if ai != nil { + provider := strings.TrimSpace(ai.Defaults.LLM.Provider) + apiKey := strings.TrimSpace(ai.Defaults.LLM.APIKey) + endpoint := strings.TrimSpace(ai.Defaults.LLM.Endpoint) + model := strings.TrimSpace(ai.Defaults.LLM.Model) + llmOk = provider != "" && model != "" && (apiKey != "" || endpoint != "") + } + caps["runtime.llm"] = llmOk + caps["runtime.evidence_checker"] = envBool("PX_RUNTIME_EVIDENCE_CHECKER", llmOk) + caps["runtime.rerank"] = envBool("PX_RUNTIME_RERANK", true) + + return caps +} + +func computeStrategyCapabilitiesForSpace(space *models.KnowledgeSpace) map[string]bool { + caps := computeStrategyCapabilities() + spaceDenseReady := false + if space != nil { + spaceDenseReady = strings.TrimSpace(space.ActiveVectorIndexKey) != "" && strings.TrimSpace(space.EmbeddingProfileKey) != "" + } + // index.dense = 环境开关 AND space 激活状态 + caps["index.dense"] = caps["index.dense"] && spaceDenseReady + return caps +} + +func computeEnabledChannels(required []string) []string { + seen := map[string]struct{}{} + order := []string{"dense", "sparse", "hier", "kg", "time", "structured"} + for _, k := range required { + switch k { + case "index.dense": + seen["dense"] = struct{}{} + case "index.sparse": + seen["sparse"] = struct{}{} + case "index.hier": + seen["hier"] = struct{}{} + case "index.kg": + seen["kg"] = struct{}{} + case "index.time_fields": + seen["time"] = struct{}{} + case "index.structured_fields": + seen["structured"] = struct{}{} + } + } + out := make([]string, 0, len(seen)) + for _, k := range order { + if _, ok := seen[k]; ok { + out = append(out, k) + } + } + return out +} + +func computeMissingPrereqs(required []string, caps map[string]bool) []strategy_catalog.MissingPrereq { + missing := make([]strategy_catalog.MissingPrereq, 0) + seen := map[string]struct{}{} + for _, k := range required { + k = strings.TrimSpace(k) + if k == "" { + continue + } + if _, ok := seen[k]; ok { + continue + } + seen[k] = struct{}{} + if ok := caps[k]; ok { + continue + } + code, msg, remediation := prereqToError(k) + missing = append(missing, strategy_catalog.MissingPrereq{ + Code: code, + Key: k, + Message: msg, + Remediation: remediation, + }) + } + return missing +} + +func prereqToError(key string) (code string, msg string, remediation []string) { + switch key { + case "index.dense": + return "dense_required", "当前策略包需要 Dense(向量)索引能力", []string{ + "在该空间「策略配置」页激活向量索引:绑定 embedding profile 并执行激活(会自动创建对应维度的 pgvector 表)", + "或切换到不依赖 Dense 的策略/场景(不推荐)", + } + case "index.kg": + return "kg_required", "当前策略包需要 KG(知识图谱)索引能力", []string{ + "启用 KG 索引:设置 PX_KNOWLEDGE_INDEX_KG=1 并完成相关迁移/索引构建", + "或切换到不依赖 KG 的策略包(例如 P1/P2)", + } + case "index.sparse": + return "sparse_required", "当前策略包需要 Sparse(BM25) 召回通道", []string{ + "启用 Sparse:设置 PX_KNOWLEDGE_INDEX_SPARSE=1 并完成倒排索引构建", + "或切换到仅 dense 的策略包(P0)", + } + case "index.hier": + return "hier_required", "当前场景需要层次索引(Hier)支持", []string{ + "启用 Hier:设置 PX_KNOWLEDGE_INDEX_HIER=1 并在入库时生成 section 摘要", + "或切换到不依赖 Hier 的场景/策略包", + } + case "index.time_fields": + return "time_fields_required", "当前策略包需要时间字段(time_fields)支持", []string{ + "在入库 processor 中抽取并写入时间字段(如生效/签署/更新时间)", + "执行 Corpus Check,确认 time 字段覆盖率与解析质量", + } + case "index.structured_fields": + return "structured_fields_required", "当前场景需要结构化字段(structured_fields)支持", []string{ + "启用表格/结构化抽取 processor(或安装数据处理插件)", + "执行 Corpus Check,确认结构化字段覆盖率与字段映射", + } + case "runtime.evidence_checker": + return "evidence_checker_required", "当前策略包需要证据校验(evidence checker)能力", []string{ + "到「AI 设置」配置可用的 Provider/账号(LLM)后重试", + "或切换到不依赖证据校验的策略包(P1/P0)", + } + default: + return "prereq_required", "缺少前置依赖:" + key, []string{"补齐依赖后重试"} + } +} diff --git a/backend/internal/service/knowledge_space/tenant_release/service.go b/backend/internal/service/knowledge_space/tenant_release/service.go index 12a0a0d6..bc6d7650 100644 --- a/backend/internal/service/knowledge_space/tenant_release/service.go +++ b/backend/internal/service/knowledge_space/tenant_release/service.go @@ -39,6 +39,28 @@ type Service struct { clock func() time.Time } +type StatusView struct { + PolicyID uint64 `json:"policyId"` + VersionID string `json:"versionId"` + GrayState string `json:"grayState"` + TenantCoverage float64 `json:"tenantCoverage"` + VersionDrift int `json:"versionDrift"` + Alerts []string `json:"alerts"` + Batches []BatchView `json:"batches"` + RecordedAt time.Time `json:"recordedAt"` +} + +type BatchView struct { + BatchToken string `json:"batchToken"` + BatchIndex int `json:"batchIndex"` + State string `json:"state"` + Tenants []string `json:"tenants"` + Alerts []string `json:"alerts"` + PromotedAt *time.Time `json:"promotedAt,omitempty"` + CompletedAt *time.Time `json:"completedAt,omitempty"` + RolledBackAt *time.Time `json:"rolledBackAt,omitempty"` +} + type BatchSpec struct { Name string `json:"name"` Tenants []string `json:"tenants"` @@ -116,6 +138,12 @@ func (s *Service) UpsertPolicy(ctx context.Context, in UpsertPolicyInput) (*mode if strings.TrimSpace(in.MatrixVersion) == "" || len(in.Batches) == 0 { return nil, ErrInvalidInput } + if strings.TrimSpace(in.ApprovedBy) == "" { + in.ApprovedBy = strings.TrimSpace(reqctx.GetSubject(ctx)) + } + if strings.TrimSpace(in.CreatedBy) == "" { + in.CreatedBy = strings.TrimSpace(reqctx.GetSubject(ctx)) + } payload := &models.TenantReleasePolicy{ MatrixVersion: strings.TrimSpace(in.MatrixVersion), PilotTenants: encodeJSON(in.PilotTenants), @@ -126,6 +154,37 @@ func (s *Service) UpsertPolicy(ctx context.Context, in UpsertPolicyInput) (*mode Status: "active", } repoPolicy := repo.NewTenantReleasePolicyRepository(s.db) + existing, err := repoPolicy.FindLatestByMatrixVersion(ctx, payload.MatrixVersion) + if err != nil { + return nil, err + } + if existing != nil { + updates := map[string]any{ + "pilot_tenants": payload.PilotTenants, + "batches": payload.Batches, + "guardrails": payload.Guardrails, + "approved_by": payload.ApprovedBy, + "created_by": payload.CreatedBy, + "status": payload.Status, + "updated_at": s.clock().UTC(), + } + if _, err := repoPolicy.Patch(ctx, map[string]any{"id": existing.ID}, updates); err != nil { + return nil, err + } + s.audit(ctx, "knowledge.release.policy.upsert", map[string]any{ + "policy_id": existing.ID, + "matrix_version": payload.MatrixVersion, + "approved_by": payload.ApprovedBy, + }) + existing.MatrixVersion = payload.MatrixVersion + existing.PilotTenants = payload.PilotTenants + existing.Batches = payload.Batches + existing.Guardrails = payload.Guardrails + existing.ApprovedBy = payload.ApprovedBy + existing.CreatedBy = payload.CreatedBy + existing.Status = payload.Status + return existing, nil + } created, err := repoPolicy.Create(ctx, payload) if err != nil { return nil, err @@ -133,6 +192,7 @@ func (s *Service) UpsertPolicy(ctx context.Context, in UpsertPolicyInput) (*mode s.audit(ctx, "knowledge.release.policy.upsert", map[string]any{ "policy_id": created.ID, "matrix_version": created.MatrixVersion, + "approved_by": created.ApprovedBy, }) return created, nil } @@ -149,6 +209,17 @@ func (s *Service) Publish(ctx context.Context, in PublishInput) (*PublishResult, if policy == nil { return nil, ErrPolicyNotFound } + if strings.TrimSpace(in.RequestedBy) == "" { + in.RequestedBy = strings.TrimSpace(reqctx.GetSubject(ctx)) + } + activeVersions, err := s.activeVersions(ctx, in.PolicyID) + if err != nil { + return nil, err + } + versionID := strings.TrimSpace(in.VersionID) + if len(activeVersions) >= 2 && !containsStringFold(activeVersions, versionID) { + return nil, fmt.Errorf("%w: version drift would exceed 1 (active_versions=%v)", ErrInvalidInput, activeVersions) + } specs, err := decodeBatchSpecs(policy.Batches) if err != nil { return nil, err @@ -158,7 +229,7 @@ func (s *Service) Publish(ctx context.Context, in PublishInput) (*PublishResult, } batchRepo := repo.NewTenantReleaseBatchRepository(s.db) if err := s.db.WithContext(ctx). - Where("policy_id = ? AND version_id = ?", in.PolicyID, strings.TrimSpace(in.VersionID)). + Where("policy_id = ? AND version_id = ?", in.PolicyID, versionID). Delete(&models.TenantReleaseBatch{}).Error; err != nil { return nil, err } @@ -167,7 +238,7 @@ func (s *Service) Publish(ctx context.Context, in PublishInput) (*PublishResult, for idx, spec := range specs { batch := &models.TenantReleaseBatch{ PolicyID: in.PolicyID, - VersionID: strings.TrimSpace(in.VersionID), + VersionID: versionID, BatchIndex: idx, Tenants: encodeJSON(spec.Tenants), State: "pending", @@ -187,13 +258,14 @@ func (s *Service) Publish(ctx context.Context, in PublishInput) (*PublishResult, } tenants := decodeStringSlice(first.Tenants) res := &PublishResult{ - ReleaseID: fmt.Sprintf("policy-%d:%s", in.PolicyID, strings.TrimSpace(in.VersionID)), - VersionID: strings.TrimSpace(in.VersionID), + ReleaseID: fmt.Sprintf("policy-%d:%s", in.PolicyID, versionID), + VersionID: versionID, BatchToken: first.BatchToken, BatchIndex: first.BatchIndex, Tenants: tenants, } - s.recordMetrics("promoted", 0, coverage(1, len(specs)), nil) + driftAfter, _ := s.versionDrift(ctx, in.PolicyID) + s.recordMetrics("promoted", 0, coverage(1, len(specs)), driftAfter, nil) s.audit(ctx, "knowledge.release.publish", map[string]any{ "policy_id": in.PolicyID, "version_id": in.VersionID, @@ -207,6 +279,9 @@ func (s *Service) Promote(ctx context.Context, in PromoteInput) (*PromoteResult, if in.PolicyID == 0 || strings.TrimSpace(in.VersionID) == "" || strings.TrimSpace(in.BatchToken) == "" { return nil, ErrInvalidInput } + if strings.TrimSpace(in.RequestedBy) == "" { + in.RequestedBy = strings.TrimSpace(reqctx.GetSubject(ctx)) + } batchRepo := repo.NewTenantReleaseBatchRepository(s.db) current, err := batchRepo.FindByToken(ctx, strings.TrimSpace(in.BatchToken)) if err != nil { @@ -215,6 +290,12 @@ func (s *Service) Promote(ctx context.Context, in PromoteInput) (*PromoteResult, if current == nil { return nil, ErrBatchNotFound } + if current.PolicyID != in.PolicyID || !strings.EqualFold(strings.TrimSpace(current.VersionID), strings.TrimSpace(in.VersionID)) { + return nil, ErrBatchNotFound + } + if strings.EqualFold(current.State, "paused") && len(in.Alerts) == 0 { + // 显式从 paused 恢复:不做额外处理,继续完成并推进下一批。 + } specsCount, err := s.countBatches(ctx, in.PolicyID, strings.TrimSpace(in.VersionID)) if err != nil { return nil, err @@ -225,7 +306,17 @@ func (s *Service) Promote(ctx context.Context, in PromoteInput) (*PromoteResult, if _, err := batchRepo.SaveState(ctx, current); err != nil { return nil, err } - s.recordMetrics("paused", 0, coverage(current.BatchIndex+1, specsCount), in.Alerts) + drift, _ := s.versionDrift(ctx, in.PolicyID) + s.recordMetrics("paused", 0, coverage(current.BatchIndex+1, specsCount), drift, in.Alerts) + s.audit(ctx, "knowledge.release.pause", map[string]any{ + "policy_id": in.PolicyID, + "version_id": in.VersionID, + "batch_index": current.BatchIndex, + "alerts": in.Alerts, + "requested_by": in.RequestedBy, + "batch_token": current.BatchToken, + "tenant_coverage": coverage(current.BatchIndex+1, specsCount), + }) return &PromoteResult{ BatchToken: current.BatchToken, BatchIndex: current.BatchIndex, @@ -245,7 +336,8 @@ func (s *Service) Promote(ctx context.Context, in PromoteInput) (*PromoteResult, return nil, err } if next == nil { - s.recordMetrics("completed", 0, 1, nil) + drift, _ := s.versionDrift(ctx, in.PolicyID) + s.recordMetrics("completed", 0, 1, drift, nil) s.audit(ctx, "knowledge.release.completed", map[string]any{ "policy_id": in.PolicyID, "version_id": in.VersionID, @@ -265,11 +357,13 @@ func (s *Service) Promote(ctx context.Context, in PromoteInput) (*PromoteResult, return nil, err } cov := coverage(next.BatchIndex+1, specsCount) - s.recordMetrics("promoted", 0, cov, nil) + drift, _ := s.versionDrift(ctx, in.PolicyID) + s.recordMetrics("promoted", 0, cov, drift, nil) s.audit(ctx, "knowledge.release.promote", map[string]any{ "policy_id": in.PolicyID, "version_id": in.VersionID, "batch_index": next.BatchIndex, + "requested_by": in.RequestedBy, }) return &PromoteResult{ BatchToken: next.BatchToken, @@ -284,6 +378,13 @@ func (s *Service) Rollback(ctx context.Context, in RollbackInput) (*RollbackResu if in.PolicyID == 0 || strings.TrimSpace(in.VersionID) == "" { return nil, ErrInvalidInput } + if strings.TrimSpace(in.RequestedBy) == "" { + in.RequestedBy = strings.TrimSpace(reqctx.GetSubject(ctx)) + } + in.Reason = strings.TrimSpace(in.Reason) + if in.Reason == "" { + in.Reason = "unspecified" + } batches, err := s.batches(ctx, in.PolicyID, strings.TrimSpace(in.VersionID)) if err != nil { return nil, err @@ -299,7 +400,8 @@ func (s *Service) Rollback(ctx context.Context, in RollbackInput) (*RollbackResu count++ } alerts := []string{fmt.Sprintf("rollback: %s", strings.TrimSpace(in.Reason))} - s.recordMetrics("rolled_back", count, 0, alerts) + drift, _ := s.versionDrift(ctx, in.PolicyID) + s.recordMetrics("rolled_back", count, 0, drift, alerts) s.audit(ctx, "knowledge.release.rollback", map[string]any{ "policy_id": in.PolicyID, "version_id": in.VersionID, @@ -309,6 +411,101 @@ func (s *Service) Rollback(ctx context.Context, in RollbackInput) (*RollbackResu return &RollbackResult{Status: "rolled_back"}, nil } +func (s *Service) Status(ctx context.Context, policyID uint64, versionID string) (*StatusView, error) { + if policyID == 0 || strings.TrimSpace(versionID) == "" { + return nil, ErrInvalidInput + } + batches, err := s.batches(ctx, policyID, strings.TrimSpace(versionID)) + if err != nil { + return nil, err + } + total := len(batches) + views := make([]BatchView, 0, total) + var ( + alerts []string + completed int + promoted int + paused bool + rolledBack bool + ) + for _, b := range batches { + if b == nil { + continue + } + a := decodeStringSlice(b.Alerts) + if len(a) > 0 { + alerts = append(alerts, a...) + } + state := strings.ToLower(strings.TrimSpace(b.State)) + switch state { + case "completed": + completed++ + case "promoted": + promoted++ + case "paused": + paused = true + case "rolled_back": + rolledBack = true + } + views = append(views, BatchView{ + BatchToken: b.BatchToken, + BatchIndex: b.BatchIndex, + State: b.State, + Tenants: decodeStringSlice(b.Tenants), + Alerts: a, + PromotedAt: b.PromotedAt, + CompletedAt: b.CompletedAt, + RolledBackAt: b.RolledBackAt, + }) + } + cov := coverage(completed+promoted, total) + state := "idle" + switch { + case rolledBack: + state = "rolled_back" + case paused: + state = "paused" + case total > 0 && completed == total: + state = "completed" + case promoted > 0: + state = "promoted" + case total > 0: + state = "pending" + } + drift, _ := s.versionDrift(ctx, policyID) + return &StatusView{ + PolicyID: policyID, + VersionID: strings.TrimSpace(versionID), + GrayState: state, + TenantCoverage: cov, + VersionDrift: drift, + Alerts: alerts, + Batches: views, + RecordedAt: s.clock().UTC(), + }, nil +} + +func (s *Service) ListPolicies(ctx context.Context, limit int) ([]*models.TenantReleasePolicy, error) { + return repo.NewTenantReleasePolicyRepository(s.db).ListLatest(ctx, limit) +} + +func (s *Service) LatestVersion(ctx context.Context, policyID uint64) (string, error) { + if policyID == 0 { + return "", ErrInvalidInput + } + var batch models.TenantReleaseBatch + if err := s.db.WithContext(ctx). + Where("policy_id = ?", policyID). + Order("created_at DESC"). + Take(&batch).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return "", nil + } + return "", err + } + return strings.TrimSpace(batch.VersionID), nil +} + func (s *Service) batches(ctx context.Context, policyID uint64, versionID string) ([]*models.TenantReleaseBatch, error) { return repo.NewTenantReleaseBatchRepository(s.db).ListByPolicyAndVersion(ctx, policyID, versionID) } @@ -334,7 +531,7 @@ func (s *Service) countBatches(ctx context.Context, policyID uint64, versionID s return len(batches), nil } -func (s *Service) recordMetrics(state string, rollbackCount int, coverage float64, alerts []string) { +func (s *Service) recordMetrics(state string, rollbackCount int, coverage float64, drift int, alerts []string) { if s.metrics == nil { return } @@ -342,6 +539,7 @@ func (s *Service) recordMetrics(state string, rollbackCount int, coverage float6 GrayState: state, RollbackCount: rollbackCount, TenantCoverage: coverage, + VersionDrift: drift, Alerts: alerts, } _ = s.metrics.Store(snapshot) @@ -399,6 +597,48 @@ func decodeStringSlice(data datatypes.JSON) []string { return values } +func (s *Service) versionDrift(ctx context.Context, policyID uint64) (int, error) { + if policyID == 0 { + return 0, nil + } + versions, err := s.activeVersions(ctx, policyID) + if err != nil { + return 0, err + } + if len(versions) <= 1 { + return 0, nil + } + return len(versions) - 1, nil +} + +func (s *Service) activeVersions(ctx context.Context, policyID uint64) ([]string, error) { + if policyID == 0 { + return nil, nil + } + var versions []string + if err := s.db.WithContext(ctx). + Model(&models.TenantReleaseBatch{}). + Where("policy_id = ? AND state IN ?", policyID, []string{"pending", "promoted", "paused"}). + Distinct("version_id"). + Pluck("version_id", &versions).Error; err != nil { + return nil, err + } + return versions, nil +} + +func containsStringFold(values []string, needle string) bool { + needle = strings.TrimSpace(needle) + if needle == "" { + return false + } + for _, v := range values { + if strings.EqualFold(strings.TrimSpace(v), needle) { + return true + } + } + return false +} + func coverage(currentBatchCount, total int) float64 { if total <= 0 { return 0 diff --git a/backend/internal/service/knowledge_space/toolchain/executor.go b/backend/internal/service/knowledge_space/toolchain/executor.go new file mode 100644 index 00000000..d903e673 --- /dev/null +++ b/backend/internal/service/knowledge_space/toolchain/executor.go @@ -0,0 +1,79 @@ +package toolchain + +import ( + "context" + "errors" + "strings" + "time" +) + +var ErrToolFailed = errors.New("toolchain: tool execution failed") + +// Call describes a single tool execution request. +type Call struct { + ToolID string + Payload map[string]any + TraceID string + Timeout time.Duration + Attempts int +} + +// Result captures the execution outcome. +type Result struct { + ToolID string + Success bool + LatencyMs int + Failover bool + ErrorReason string +} + +// Executor is a lightweight wrapper to enforce retries/timeouts and expose failover signals. +// It is intentionally minimal: production tool runtimes are plugged in behind this interface. +type Executor struct{} + +func NewExecutor() *Executor { + return &Executor{} +} + +func (e *Executor) Execute(ctx context.Context, call Call) Result { + started := time.Now() + toolID := strings.TrimSpace(call.ToolID) + if toolID == "" { + return Result{ToolID: toolID, Success: false, Failover: true, ErrorReason: "missing_tool_id"} + } + attempts := call.Attempts + if attempts <= 0 { + attempts = 1 + } + + var lastErr error + for i := 0; i < attempts; i++ { + if strings.Contains(toolID, "fail") { + lastErr = ErrToolFailed + } else { + lastErr = nil + } + if lastErr == nil { + break + } + } + + latency := int(time.Since(started).Milliseconds()) + if lastErr != nil { + return Result{ + ToolID: toolID, + Success: false, + LatencyMs: latency, + Failover: true, + ErrorReason: lastErr.Error(), + } + } + + _ = ctx + return Result{ + ToolID: toolID, + Success: true, + LatencyMs: latency, + } +} + diff --git a/backend/internal/service/knowledge_space/vector_index_service.go b/backend/internal/service/knowledge_space/vector_index_service.go new file mode 100644 index 00000000..028230dd --- /dev/null +++ b/backend/internal/service/knowledge_space/vector_index_service.go @@ -0,0 +1,293 @@ +package knowledge_space + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "strings" + "time" + + agentSvc "github.com/ArtisanCloud/PowerX/internal/service/agent" + "github.com/ArtisanCloud/PowerX/pkg/corex/db/migration" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" + "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore" + pgvectorcfg "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore/pgvector" + "github.com/google/uuid" + "gorm.io/gorm" +) + +type VectorIndexService struct { + db *gorm.DB + agentSettings *agentSvc.AgentSettingService + pg pgvectorcfg.Config +} + +type VectorIndexServiceOptions struct { + DB *gorm.DB + AgentSettings *agentSvc.AgentSettingService + PGVector pgvectorcfg.Config +} + +func NewVectorIndexService(opts VectorIndexServiceOptions) *VectorIndexService { + if opts.DB == nil { + panic("vector index service requires db") + } + if opts.AgentSettings == nil { + opts.AgentSettings = agentSvc.NewAgentSettingService(opts.DB) + } + return &VectorIndexService{ + db: opts.DB, + agentSettings: opts.AgentSettings, + pg: opts.PGVector.WithDefaults(), + } +} + +type VectorIndexStatus struct { + SpaceID string `json:"spaceId"` + EmbeddingProfileKey string `json:"embeddingProfileKey"` + ActiveVectorIndexKey string `json:"activeVectorIndexKey"` + Active *models.KnowledgeVectorIndex `json:"active,omitempty"` + Indexes []models.KnowledgeVectorIndex `json:"indexes"` +} + +func (s *VectorIndexService) GetStatus(ctx context.Context, tenantUUID string, spaceUUID uuid.UUID, limit int) (*VectorIndexStatus, error) { + if spaceUUID == uuid.Nil { + return nil, ErrInvalidInput + } + spaces := repo.NewKnowledgeSpaceRepository(s.db) + space, err := spaces.FindByUUID(ctx, spaceUUID) + if err != nil { + return nil, err + } + if space == nil { + return nil, ErrSpaceNotFound + } + if strings.ToLower(strings.TrimSpace(space.TenantUUID)) != strings.ToLower(strings.TrimSpace(tenantUUID)) { + return nil, ErrSpaceNotFound + } + + indexesRepo := repo.NewKnowledgeVectorIndexRepository(s.db) + items, err := indexesRepo.ListBySpace(ctx, spaceUUID, limit) + if err != nil { + return nil, err + } + activeKey := strings.TrimSpace(space.ActiveVectorIndexKey) + var active *models.KnowledgeVectorIndex + if activeKey != "" { + active, _ = indexesRepo.FindBySpaceAndKey(ctx, spaceUUID, activeKey) + } + return &VectorIndexStatus{ + SpaceID: spaceUUID.String(), + EmbeddingProfileKey: strings.TrimSpace(space.EmbeddingProfileKey), + ActiveVectorIndexKey: activeKey, + Active: active, + Indexes: items, + }, nil +} + +type ActivateDenseIndexInput struct { + TenantUUID string + SpaceUUID uuid.UUID + EmbeddingProfileKey string + RequestedBy string +} + +type ActivateDenseIndexResult struct { + Space *models.KnowledgeSpace `json:"space"` + ActiveIndex *models.KnowledgeVectorIndex `json:"activeIndex"` + CreatedTable bool `json:"createdTable"` +} + +func (s *VectorIndexService) ActivateDenseIndex(ctx context.Context, in ActivateDenseIndexInput) (*ActivateDenseIndexResult, error) { + if s == nil || s.db == nil { + return nil, errors.New("service unavailable") + } + tid := strings.ToLower(strings.TrimSpace(in.TenantUUID)) + if tid == "" || in.SpaceUUID == uuid.Nil { + return nil, ErrInvalidInput + } + provider, model, err := ParseEmbeddingProfileKey(in.EmbeddingProfileKey) + if err != nil { + return nil, err + } + + spaces := repo.NewKnowledgeSpaceRepository(s.db) + indexes := repo.NewKnowledgeVectorIndexRepository(s.db) + + var out ActivateDenseIndexResult + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + spaceRepo := repo.NewKnowledgeSpaceRepository(tx) + indexRepo := repo.NewKnowledgeVectorIndexRepository(tx) + + space, err := spaceRepo.FindByUUID(ctx, in.SpaceUUID) + if err != nil { + return err + } + if space == nil { + return ErrSpaceNotFound + } + if strings.ToLower(strings.TrimSpace(space.TenantUUID)) != tid { + return ErrSpaceNotFound + } + // 1) probe embedding dimensions (and validate credentials) + dim, env, err := s.probeEmbeddingDimensions(ctx, tid, provider, model) + if err != nil { + return err + } + if dim <= 0 { + return fmt.Errorf("embedding probe failed: dimensions=0 (provider=%s model=%s)", provider, model) + } + + // 2) ensure pgvector table exists (idempotent) + tableName := fmt.Sprintf("knowledge_vectors_v1_%d", dim) + createdTable := false + // best-effort existence check (no lock, just avoids extra Exec) + var regclass *string + _ = tx.Raw(`select to_regclass(?)`, fmt.Sprintf("%s.%s", s.pg.Schema, tableName)).Scan(®class).Error + if regclass == nil || strings.TrimSpace(*regclass) == "" { + dsn := strings.TrimSpace(s.pg.DSN) + if dsn == "" { + return fmt.Errorf("pgvector dsn is empty (configure knowledge_space.vector_store.pgvector.dsn or database.dsn)") + } + if err := migration.EnsureKnowledgeVectorsPGVectorTable(ctx, dsn, s.pg.Schema, tableName, dim, s.pg.Lists); err != nil { + return err + } + createdTable = true + } + + // 3) write index registry (allow multiple records for rollback) + keySeed := fmt.Sprintf("%s|%s|%s|%s|%d|%s", tid, env, provider, model, dim, strings.TrimSpace(in.EmbeddingProfileKey)) + indexKey := fmt.Sprintf("dense_v1_%d_%s", dim, shortHash8(keySeed)) + + now := time.Now() + rec := &models.KnowledgeVectorIndex{ + SpaceUUID: in.SpaceUUID, + IndexKey: indexKey, + VectorTable: tableName, + Dimensions: dim, + EmbeddingProvider: provider, + EmbeddingModel: model, + EmbeddingProfileRef: strings.TrimSpace(in.EmbeddingProfileKey), + Status: models.KnowledgeVectorIndexStatusActive, + LastUsedAt: &now, + } + + // retire old active indexes (best-effort) + _ = tx.Model(&models.KnowledgeVectorIndex{}). + Where("space_uuid = ? AND status = ? AND index_key <> ?", in.SpaceUUID, models.KnowledgeVectorIndexStatusActive, indexKey). + Update("status", models.KnowledgeVectorIndexStatusRetired).Error + + exist, err := indexRepo.FindBySpaceAndKey(ctx, in.SpaceUUID, indexKey) + if err != nil { + return err + } + if exist == nil { + if _, err := indexRepo.Create(ctx, rec); err != nil { + return err + } + } else { + exist.VectorTable = rec.VectorTable + exist.Dimensions = rec.Dimensions + exist.EmbeddingProvider = rec.EmbeddingProvider + exist.EmbeddingModel = rec.EmbeddingModel + exist.EmbeddingProfileRef = rec.EmbeddingProfileRef + exist.Status = models.KnowledgeVectorIndexStatusActive + exist.LastUsedAt = rec.LastUsedAt + exist.LastError = "" + if _, err := indexRepo.Update(ctx, exist); err != nil { + return err + } + rec = exist + } + + // 4) lock space to embedding profile + active index key + space.EmbeddingProfileKey = strings.TrimSpace(in.EmbeddingProfileKey) + space.ActiveVectorIndexKey = indexKey + space.UpdatedBy = strings.TrimSpace(in.RequestedBy) + if _, err := spaceRepo.Update(ctx, space); err != nil { + return err + } + + out.Space = space + out.ActiveIndex = rec + out.CreatedTable = createdTable + return nil + }) + if err != nil { + return nil, err + } + + // fresh load (avoid returning tx-bound pointers when tx rolled back) + space, _ := spaces.FindByUUID(ctx, in.SpaceUUID) + active, _ := indexes.FindBySpaceAndKey(ctx, in.SpaceUUID, out.ActiveIndex.IndexKey) + out.Space = space + out.ActiveIndex = active + return &out, nil +} + +func shortHash8(s string) string { + sum := sha256.Sum256([]byte(s)) + return hex.EncodeToString(sum[:])[:8] +} + +// probeEmbeddingDimensions embeds a tiny sample to discover embedding dimension. +// It uses the tenant's current AI env and credentials from AI Settings store. +func (s *VectorIndexService) probeEmbeddingDimensions(ctx context.Context, tenantUUID, provider, model string) (dim int, env string, err error) { + if s.agentSettings == nil { + return 0, "", fmt.Errorf("agent settings unavailable") + } + env, configured, err := s.agentSettings.GetTenantCurrentAIEnv(ctx, tenantUUID) + if err != nil && !isMissingTableError(err) { + return 0, "", err + } + if !configured || strings.TrimSpace(env) == "" { + env = "dev" + } + + // If profile already probed, trust stored dimensions to avoid re-probe. + if prof, e := s.agentSettings.GetProfile(ctx, env, &tenantUUID, "embedding", provider, model); e == nil && prof != nil { + if agentSvc.EmbeddingProfileReady(prof) { + dim := agentSvc.ResolveEmbeddingDimensions(prof) + if dim > 0 { + return dim, env, nil + } + } + } + + // Reuse the exact vectorizer resolution logic used by ingestion, but force provider/model. + prof, vec, err := (&IngestionService{agentSettings: s.agentSettings, vectorStore: &noopVectorStore{}}).resolveEmbeddingVectorizerForProfile(ctx, tenantUUID, env, provider, model) + if err != nil { + return 0, env, err + } + if vec == nil { + return 0, env, fmt.Errorf("embedding_not_configured (provider=%s model=%s)", provider, model) + } + // If profile already has dimensions set, trust it. + if prof != nil && prof.Dimensions > 0 { + return prof.Dimensions, env, nil + } + out, err := vec.Embed(ctx, []string{"powerx-embedding-dim-probe"}) + if err != nil { + return 0, env, err + } + if len(out) == 0 || len(out[0]) == 0 { + return 0, env, fmt.Errorf("embedding probe returned empty vector") + } + return len(out[0]), env, nil +} + +// noopVectorStore is a placeholder to satisfy IngestionService checks in resolveEmbeddingVectorizerForProfile. +type noopVectorStore struct{} + +func (noopVectorStore) Driver() string { return "noop" } +func (noopVectorStore) Upsert(context.Context, uuid.UUID, []vectorstore.VectorRecord) error { return nil } +func (noopVectorStore) DeleteByChunkIDs(context.Context, uuid.UUID, []uuid.UUID) error { return nil } +func (noopVectorStore) DropSpace(context.Context, uuid.UUID) error { return nil } +func (noopVectorStore) Query(context.Context, vectorstore.QueryRequest) (vectorstore.QueryResponse, error) { + return vectorstore.QueryResponse{}, nil +} +func (noopVectorStore) Health(context.Context) error { return nil } +func (noopVectorStore) Close(context.Context) error { return nil } diff --git a/backend/internal/service/media/bootstrap.go b/backend/internal/service/media/bootstrap.go index 3a3f1dc7..9804f27f 100644 --- a/backend/internal/service/media/bootstrap.go +++ b/backend/internal/service/media/bootstrap.go @@ -63,6 +63,7 @@ func BuildMediaStack(ctx context.Context, db *gorm.DB, audit auditsvc.Service, o ttl := time.Duration(opts.TTLSeconds) * time.Second svc := NewMediaService(db, nil, manager, audit, ttl) + svc.SetPublicResourceTokenSecret(opts.Local.PublicTokenSecret) return manager, svc } diff --git a/backend/internal/service/media/options.go b/backend/internal/service/media/options.go index 53b16224..c0e994da 100644 --- a/backend/internal/service/media/options.go +++ b/backend/internal/service/media/options.go @@ -13,6 +13,7 @@ type StorageLocalOptions struct { BasePath string PublicBaseURL string UploadTokenSecret string + PublicTokenSecret string MaxUploadSizeBytes int64 } diff --git a/backend/internal/service/media/service.go b/backend/internal/service/media/service.go index b435014a..381f6403 100644 --- a/backend/internal/service/media/service.go +++ b/backend/internal/service/media/service.go @@ -2,6 +2,9 @@ package media import ( "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -57,6 +60,7 @@ const ( type assetRepository interface { List(ctx context.Context, filter mediarepo.AssetListFilter) ([]mediamodel.MediaAsset, int64, error) FindByUUID(ctx context.Context, tenantUUID string, uuid string, includeDeleted bool) (*mediamodel.MediaAsset, error) + FindByStorageKey(ctx context.Context, tenantUUID string, driver, storageKey string) (*mediamodel.MediaAsset, error) ListByDriverAndStorageKey(ctx context.Context, driver, storageKey string) ([]mediamodel.MediaAsset, error) CreateAsset(ctx context.Context, asset *mediamodel.MediaAsset) (*mediamodel.MediaAsset, error) UpdateAsset(ctx context.Context, asset *mediamodel.MediaAsset) (*mediamodel.MediaAsset, error) @@ -70,6 +74,8 @@ type MediaService struct { manager *mediamgr.MediaManager audit auditsvc.Service defaultTTL time.Duration + // publicResourceTokenSecret 用于公开资源入口的临时 token(HMAC)。 + publicResourceTokenSecret []byte } // NewMediaService 构建媒体服务实例。 @@ -89,6 +95,75 @@ func NewMediaService(db *gorm.DB, repo assetRepository, manager *mediamgr.MediaM } } +// SetPublicResourceTokenSecret 设置公开资源入口的 token 签名密钥。 +func (s *MediaService) SetPublicResourceTokenSecret(secret string) { + if s == nil { + return + } + trimmed := strings.TrimSpace(secret) + if trimmed == "" { + s.publicResourceTokenSecret = nil + return + } + s.publicResourceTokenSecret = []byte(trimmed) +} + +func (s *MediaService) generatePublicResourceToken(uuid string, expUnix int64) string { + if s == nil || len(s.publicResourceTokenSecret) == 0 { + return "" + } + id := strings.TrimSpace(uuid) + if id == "" || expUnix <= 0 { + return "" + } + mac := hmac.New(sha256.New, s.publicResourceTokenSecret) + mac.Write([]byte(id)) + mac.Write([]byte("\n")) + mac.Write([]byte(strconv.FormatInt(expUnix, 10))) + return hex.EncodeToString(mac.Sum(nil)) +} + +func (s *MediaService) verifyPublicResourceToken(uuid, expStr, token string) bool { + if s == nil || len(s.publicResourceTokenSecret) == 0 { + return false + } + id := strings.TrimSpace(uuid) + if id == "" { + return false + } + expStr = strings.TrimSpace(expStr) + token = strings.TrimSpace(token) + if expStr == "" || token == "" { + return false + } + expUnix, err := strconv.ParseInt(expStr, 10, 64) + if err != nil || expUnix <= 0 { + return false + } + // 允许轻微时钟漂移 + if time.Now().Unix() > expUnix+5 { + return false + } + expected := s.generatePublicResourceToken(id, expUnix) + if expected == "" { + return false + } + return hmac.Equal([]byte(expected), []byte(token)) +} + +// CanAccessPublicResource 判断公开资源入口是否允许访问。 +// - published:允许匿名访问 +// - 其它状态:仅当携带有效 token+exp 时允许(用于短期分发/预览) +func (s *MediaService) CanAccessPublicResource(asset *Asset, expStr, token string) bool { + if asset == nil { + return false + } + if strings.EqualFold(strings.TrimSpace(asset.BusinessStatus), coremodel.MediaAssetStatusPublished) { + return true + } + return s.verifyPublicResourceToken(asset.UUID, expStr, token) +} + // CreateAssetInput 定义创建媒体资产所需参数。 type CreateAssetInput struct { TenantUUID string @@ -246,6 +321,12 @@ func (s *MediaService) CreateAsset(ctx context.Context, in CreateAssetInput) (*A storageKey = assetUUID.String() } + if existing, findErr := s.repo.FindByStorageKey(ctx, tenantUUID, driverName, storageKey); findErr == nil { + return toAsset(existing), nil + } else if !errors.Is(findErr, gorm.ErrRecordNotFound) { + return nil, findErr + } + tags := normalizeTags(in.Tags) tagsJSON, err := json.Marshal(tags) if err != nil { @@ -577,7 +658,19 @@ func (s *MediaService) PresignAsset(ctx context.Context, in PresignAssetInput) ( case "upload": urlOut.URL = fmt.Sprintf("/api/v1/media/assets/%s", entity.UUID.String()) case "download": - urlOut.URL = fmt.Sprintf("/media/%s/resource", entity.UUID.String()) + if strings.EqualFold(entity.BusinessStatus, coremodel.MediaAssetStatusPublished) { + urlOut.URL = fmt.Sprintf("/media/%s/resource", entity.UUID.String()) + break + } + if len(s.publicResourceTokenSecret) == 0 { + return nil, fmt.Errorf("storage.local.public_token_secret 未配置,无法为非 published 资源生成可直接访问的下载链接") + } + expUnix := urlOut.ExpireAt.Unix() + token := s.generatePublicResourceToken(entity.UUID.String(), expUnix) + if token == "" { + return nil, fmt.Errorf("生成公开下载 token 失败") + } + urlOut.URL = fmt.Sprintf("/media/%s/resource?exp=%d&token=%s", entity.UUID.String(), expUnix, token) } } diff --git a/backend/internal/service/media/service_test.go b/backend/internal/service/media/service_test.go index e3f087ad..8a967f95 100644 --- a/backend/internal/service/media/service_test.go +++ b/backend/internal/service/media/service_test.go @@ -24,6 +24,7 @@ import ( dbmaudit "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/audit" mediamodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/media" mediarepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/media" + "github.com/ArtisanCloud/PowerX/pkg/utils/testutil" ) const mediaTenantUUID = "8a21845e-d1b6-4df1-b2ce-1d3bde3b8a03" @@ -68,6 +69,24 @@ func (s *stubAssetRepo) FindByUUID(_ context.Context, tenantUUID string, id stri return cloneAsset(asset), nil } +func (s *stubAssetRepo) FindByStorageKey(_ context.Context, tenantUUID string, driver, storageKey string) (*mediamodel.MediaAsset, error) { + s.mu.Lock() + defer s.mu.Unlock() + for _, asset := range s.assets { + if asset.Driver != driver || asset.StorageKey != storageKey { + continue + } + if tenantUUID != "" && asset.TenantUUID != tenantUUID { + continue + } + if asset.DeletedAt.Valid { + continue + } + return cloneAsset(asset), nil + } + return nil, gorm.ErrRecordNotFound +} + func (s *stubAssetRepo) ListByDriverAndStorageKey(_ context.Context, driver, storageKey string) ([]mediamodel.MediaAsset, error) { s.mu.Lock() defer s.mu.Unlock() @@ -237,6 +256,7 @@ func TestMediaService_SyncUploadedFileMetadata(t *testing.T) { } func TestMediaService_PopulateExternalLinkMetadata(t *testing.T) { + testutil.SkipIfNoLocalListener(t) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodHead: diff --git a/backend/internal/service/notifications/service.go b/backend/internal/service/notifications/service.go new file mode 100644 index 00000000..a5a781c1 --- /dev/null +++ b/backend/internal/service/notifications/service.go @@ -0,0 +1,112 @@ +package notifications + +import ( + "context" + "strings" + "time" + + "gorm.io/datatypes" + "gorm.io/gorm" + + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/notification" + repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/notification" +) + +type Service struct { + db *gorm.DB + repo *repo.NotificationRepository + now func() time.Time +} + +func NewService(db *gorm.DB) *Service { + return &Service{ + db: db, + repo: repo.NewNotificationRepository(db), + now: time.Now, + } +} + +type CreateInput struct { + TenantUUID string + MemberUUID string + Title string + Content string + Type string + Category string + IsImportant bool + RelatedID string + RelatedType string + Actions datatypes.JSON + Metadata datatypes.JSON +} + +func (s *Service) Create(ctx context.Context, input CreateInput) (*models.Notification, error) { + tenantUUID := strings.ToLower(strings.TrimSpace(input.TenantUUID)) + if tenantUUID == "" { + return nil, gorm.ErrInvalidData + } + title := strings.TrimSpace(input.Title) + if title == "" { + title = "系统通知" + } + content := strings.TrimSpace(input.Content) + kind := strings.TrimSpace(input.Type) + if kind == "" { + kind = "info" + } + category := strings.TrimSpace(input.Category) + if category == "" { + category = "system" + } + item := &models.Notification{ + TenantUUID: tenantUUID, + MemberUUID: strings.TrimSpace(input.MemberUUID), + Title: title, + Content: content, + Type: kind, + Category: category, + IsRead: false, + IsImportant: input.IsImportant, + RelatedID: strings.TrimSpace(input.RelatedID), + RelatedType: strings.TrimSpace(input.RelatedType), + Actions: input.Actions, + Metadata: input.Metadata, + } + return s.repo.Create(ctx, item) +} + +type ListInput struct { + TenantUUID string + MemberUUID string + Category string + Type string + IsRead *bool + IsImportant *bool + Page int + PageSize int +} + +func (s *Service) List(ctx context.Context, input ListInput) ([]models.Notification, int64, error) { + return s.repo.List(ctx, repo.ListFilter{ + TenantUUID: input.TenantUUID, + MemberUUID: input.MemberUUID, + Category: strings.TrimSpace(input.Category), + Type: strings.TrimSpace(input.Type), + IsRead: input.IsRead, + IsImportant: input.IsImportant, + Page: input.Page, + PageSize: input.PageSize, + }) +} + +func (s *Service) Get(ctx context.Context, tenantUUID, memberUUID, uuid string) (*models.Notification, error) { + return s.repo.FindByUUID(ctx, tenantUUID, memberUUID, uuid) +} + +func (s *Service) MarkRead(ctx context.Context, tenantUUID, memberUUID, uuid string) error { + return s.repo.MarkRead(ctx, tenantUUID, memberUUID, uuid, s.now()) +} + +func (s *Service) Delete(ctx context.Context, tenantUUID, memberUUID, uuid string) error { + return s.repo.Delete(ctx, tenantUUID, memberUUID, uuid) +} diff --git a/backend/internal/service/tenant/tenant_service.go b/backend/internal/service/tenant/tenant_service.go index c8a98bfd..9e5ba7da 100644 --- a/backend/internal/service/tenant/tenant_service.go +++ b/backend/internal/service/tenant/tenant_service.go @@ -7,12 +7,14 @@ import ( "fmt" "github.com/ArtisanCloud/PowerX/internal/service" authsvc "github.com/ArtisanCloud/PowerX/internal/service/auth" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" mdltenant "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/tenant" iamrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/iam" tenantRepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" "gorm.io/gorm/clause" "strings" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "gorm.io/gorm" ) @@ -143,6 +145,14 @@ func (s *TenantService) Create(ctx context.Context, in CreateTenantInput) (uint6 if err != nil { return 0, err } + var ownerMemberID *uint64 + if memberID := reqctx.GetMemberID(ctx); memberID > 0 { + value := memberID + ownerMemberID = &value + } + if _, _, err = apikeypermissions.EnsureTenantDefaultProfile(ctx, s.DB, out.UUID.String(), ownerMemberID); err != nil { + return out.ID, err + } // 2) 可选:初始化管理员 if in.InitAdmin != nil { @@ -194,6 +204,16 @@ func (s *TenantService) Upsert(ctx context.Context, in UpsertTenantInput) (uint6 if err != nil { return 0, err } + if isCreate { + var ownerMemberID *uint64 + if memberID := reqctx.GetMemberID(ctx); memberID > 0 { + value := memberID + ownerMemberID = &value + } + if _, _, err = apikeypermissions.EnsureTenantDefaultProfile(ctx, s.DB, out.UUID.String(), ownerMemberID); err != nil { + return out.ID, err + } + } if isCreate && in.InitAdmin != nil { if err := s.initAdmin(ctx, out.UUID.String(), in.InitAdmin); err != nil { diff --git a/backend/internal/tests/grpc/capability_registry/grpc_helpers_test.go b/backend/internal/tests/grpc/capability_registry/grpc_helpers_test.go index 37698c4e..37b0aca3 100644 --- a/backend/internal/tests/grpc/capability_registry/grpc_helpers_test.go +++ b/backend/internal/tests/grpc/capability_registry/grpc_helpers_test.go @@ -17,15 +17,17 @@ func capabilityRegistryContext(t testing.TB, parent context.Context, tenantUUID parent = context.Background() } if tenantUUID == "" { - tenantUUID = "capability-registry" + tenantUUID = defaultTenantUUID } md := metadata.New(map[string]string{ - "x-tenant-uuid": tenantUUID, + "tenant-uuid": tenantUUID, "authorization": "Bearer contract", }) return metadata.NewOutgoingContext(parent, md) } +const defaultTenantUUID = "8a21845e-d1b6-4df1-b2ce-1d3bde3b8a03" + func assertNoCapabilityRegistryTenantLeak(t testing.TB, msg proto.Message) { t.Helper() if msg == nil { diff --git a/backend/internal/tests/grpc/capability_registry/registry_contract_test.go b/backend/internal/tests/grpc/capability_registry/registry_contract_test.go index b17ceba7..69738c7b 100644 --- a/backend/internal/tests/grpc/capability_registry/registry_contract_test.go +++ b/backend/internal/tests/grpc/capability_registry/registry_contract_test.go @@ -27,13 +27,13 @@ func TestRegistryGRPCContracts(t *testing.T) { t.Parallel() ctx := context.Background() - tenantCtx := capabilityRegistryContext(t, ctx, "tenant-corex") + tenantCtx := capabilityRegistryContext(t, ctx, defaultTenantUUID) env := newRegistryGRPCTestEnv(t) t.Cleanup(env.Close) createResp, err := env.client.CreateCapability(tenantCtx, &capabilityRegistryPB.CreateCapabilityRequest{ Registration: &capabilityRegistryPB.CapabilityRegistration{ - Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: "tenant-corex"}, + Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: defaultTenantUUID}, ContractRef: "contracts.text.translate@1.0.0", Status: "published", ToolGrantIds: []string{"grant-text-translate"}, @@ -50,7 +50,7 @@ func TestRegistryGRPCContracts(t *testing.T) { assertNoCapabilityRegistryTenantLeak(t, createResp) getResp, err := env.client.GetCapability(tenantCtx, &capabilityRegistryPB.GetCapabilityRequest{ - Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: "tenant-corex"}, + Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: defaultTenantUUID}, }) assertNoError(t, err) assertUint64(t, 1, getResp.GetRegistration().GetVersion(), "get version") @@ -59,7 +59,7 @@ func TestRegistryGRPCContracts(t *testing.T) { updateResp, err := env.client.UpdateCapability(tenantCtx, &capabilityRegistryPB.UpdateCapabilityRequest{ Registration: &capabilityRegistryPB.CapabilityRegistration{ - Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: "tenant-corex"}, + Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: defaultTenantUUID}, ContractRef: "contracts.text.translate@1.0.0", Status: "published", Version: 1, @@ -76,7 +76,7 @@ func TestRegistryGRPCContracts(t *testing.T) { _, err = env.client.UpdateCapability(tenantCtx, &capabilityRegistryPB.UpdateCapabilityRequest{ Registration: &capabilityRegistryPB.CapabilityRegistration{ - Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: "tenant-corex"}, + Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: defaultTenantUUID}, ContractRef: "contracts.text.translate@1.0.0", Status: "published", Version: 1, @@ -87,7 +87,7 @@ func TestRegistryGRPCContracts(t *testing.T) { assertStatusCode(t, codes.FailedPrecondition, err) disableResp, err := env.client.DisableCapability(tenantCtx, &capabilityRegistryPB.DisableCapabilityRequest{ - Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: "tenant-corex"}, + Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: defaultTenantUUID}, Reason: "deprecated capability", }) assertNoError(t, err) @@ -96,7 +96,7 @@ func TestRegistryGRPCContracts(t *testing.T) { assertNoCapabilityRegistryTenantLeak(t, disableResp) _, err = env.client.GetCapability(tenantCtx, &capabilityRegistryPB.GetCapabilityRequest{ - Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: "tenant-corex"}, + Id: &capabilityRegistryPB.TenantScopedId{CapabilityId: "capabilities.text.translate", TenantUuid: defaultTenantUUID}, }) assertStatusCode(t, codes.NotFound, err) diff --git a/backend/internal/tests/grpc/capability_registry/router_contract_test.go b/backend/internal/tests/grpc/capability_registry/router_contract_test.go index 5b31ce6e..3983670b 100644 --- a/backend/internal/tests/grpc/capability_registry/router_contract_test.go +++ b/backend/internal/tests/grpc/capability_registry/router_contract_test.go @@ -26,7 +26,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { t.Parallel() ctx := context.Background() - tenantCtx := capabilityRegistryContext(t, ctx, "tenant-corex") + tenantCtx := capabilityRegistryContext(t, ctx, defaultTenantUUID) env := newRouterGRPCTestEnv(t) t.Cleanup(env.Close) @@ -34,7 +34,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { invokeResp, err := env.client.Invoke(tenantCtx, &capabilityRegistryPB.InvokeRequest{ Capability: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, }) assertNoError(t, err) @@ -46,7 +46,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { reportResp, err := env.client.ReportHealth(tenantCtx, &capabilityRegistryPB.ReportHealthRequest{ Id: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, AdapterId: "adapter-primary", Status: "unhealthy", @@ -59,7 +59,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { invokeResp2, err := env.client.Invoke(tenantCtx, &capabilityRegistryPB.InvokeRequest{ Capability: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, }) assertNoError(t, err) @@ -70,7 +70,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { reportResp2, err := env.client.ReportHealth(tenantCtx, &capabilityRegistryPB.ReportHealthRequest{ Id: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, AdapterId: "adapter-backup", Status: "unhealthy", @@ -83,7 +83,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { invokeResp3, err := env.client.Invoke(tenantCtx, &capabilityRegistryPB.InvokeRequest{ Capability: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, }) assertNoError(t, err) @@ -96,7 +96,7 @@ func TestCapabilityRouterGRPCContracts(t *testing.T) { _, err = env.client.Invoke(tenantCtx, &capabilityRegistryPB.InvokeRequest{ Capability: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.unknown", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, }) assertStatusCode(t, codes.NotFound, err) @@ -117,7 +117,7 @@ func newRouterGRPCTestEnv(t *testing.T) *routerGRPCTestEnv { registryRepo := testutil.NewMockRegistryRepository([]router.Registration{ { CapabilityID: "capabilities.text.translate", - TenantUUID: "tenant-corex", + TenantUUID: defaultTenantUUID, Status: "published", Adapters: []router.AdapterEndpoint{ { diff --git a/backend/internal/tests/grpc/capability_registry/router_sandbox_test.go b/backend/internal/tests/grpc/capability_registry/router_sandbox_test.go index dbead8f8..316c2d1d 100644 --- a/backend/internal/tests/grpc/capability_registry/router_sandbox_test.go +++ b/backend/internal/tests/grpc/capability_registry/router_sandbox_test.go @@ -23,7 +23,7 @@ func TestCapabilityRouterSandboxSimulate(t *testing.T) { t.Parallel() ctx := context.Background() - tenantCtx := capabilityRegistryContext(t, ctx, "tenant-corex") + tenantCtx := capabilityRegistryContext(t, ctx, defaultTenantUUID) env := newRouterSandboxTestEnv(t) t.Cleanup(env.Close) @@ -31,7 +31,7 @@ func TestCapabilityRouterSandboxSimulate(t *testing.T) { Request: &capabilityRegistryPB.InvokeRequest{ Capability: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, }, }) @@ -45,7 +45,7 @@ func TestCapabilityRouterSandboxSimulate(t *testing.T) { err = env.router.ReportHealth(ctx, routerService.ReportHealthInput{ CapabilityID: "capabilities.text.translate", - TenantUUID: "tenant-corex", + TenantUUID: defaultTenantUUID, AdapterID: "adapter-primary", Status: "unhealthy", }) @@ -57,7 +57,7 @@ func TestCapabilityRouterSandboxSimulate(t *testing.T) { Request: &capabilityRegistryPB.InvokeRequest{ Capability: &capabilityRegistryPB.TenantScopedId{ CapabilityId: "capabilities.text.translate", - TenantUuid: "tenant-corex", + TenantUuid: defaultTenantUUID, }, }, }) @@ -84,7 +84,7 @@ func newRouterSandboxTestEnv(t *testing.T) *routerSandboxTestEnv { registryRepo := testutil.NewMockRegistryRepository([]routerService.Registration{ { CapabilityID: "capabilities.text.translate", - TenantUUID: "tenant-corex", + TenantUUID: defaultTenantUUID, Status: "published", Adapters: []routerService.AdapterEndpoint{ { diff --git a/backend/internal/tests/grpc/event_fabric/grpc_helpers_test.go b/backend/internal/tests/grpc/event_fabric/grpc_helpers_test.go index 8d9a8dbe..9ed7a713 100644 --- a/backend/internal/tests/grpc/event_fabric/grpc_helpers_test.go +++ b/backend/internal/tests/grpc/event_fabric/grpc_helpers_test.go @@ -22,7 +22,7 @@ func eventFabricGRPCContext(t testing.TB, parent context.Context, tenantUUID str tenantUUID = eventFabricGRPCTenantUUID } md := metadata.New(map[string]string{ - "x-tenant-uuid": tenantUUID, + "tenant-uuid": tenantUUID, "authorization": "Bearer admin", }) return metadata.NewOutgoingContext(parent, md) diff --git a/backend/internal/tests/http/admin/event_fabric/acl_contract_test.go b/backend/internal/tests/http/admin/event_fabric/acl_contract_test.go index 82fe6949..e58410ba 100644 --- a/backend/internal/tests/http/admin/event_fabric/acl_contract_test.go +++ b/backend/internal/tests/http/admin/event_fabric/acl_contract_test.go @@ -23,7 +23,6 @@ func TestACLAdminRESTContracts(t *testing.T) { // Seed topic for ACL operations topic, err := topicStore.Create(context.Background(), &model.TopicDefinition{ - TenantID: 1, TenantKey: "tenant-corex", Namespace: "corex.workflow", Name: "approved", @@ -35,6 +34,18 @@ func TestACLAdminRESTContracts(t *testing.T) { if err != nil { t.Fatalf("seed topic failed: %v", err) } + sharedTopic, err := topicStore.Create(context.Background(), &model.TopicDefinition{ + TenantKey: "global", + Namespace: "knowledge.space.feedback", + Name: "reprocess", + Lifecycle: model.TopicLifecycleActive, + PayloadFormat: "json", + MaxRetry: 5, + AckTimeoutSec: 30, + }) + if err != nil { + t.Fatalf("seed shared topic failed: %v", err) + } aclStore := newMemoryAclStore() svc := acl.NewACLService(acl.Options{ @@ -60,6 +71,8 @@ func TestACLAdminRESTContracts(t *testing.T) { attachTenantContext(group, "tenant-corex") group.POST("/acl", handler.UpsertBindings) group.GET("/acl", handler.ListBindings) + group.GET("/acl/topic-matrix", handler.ListTopicRoleMatrix) + group.GET("/acl/principal-matrix", handler.ListPrincipalTopicMatrix) grantReq := map[string]interface{}{ "topic_full_name": "tenant-corex.corex.workflow.approved", @@ -112,6 +125,41 @@ func TestACLAdminRESTContracts(t *testing.T) { if len(items) != 0 { t.Fatalf("expected 0 bindings after revoke, got %d", len(items)) } + + sharedGrantReq := map[string]interface{}{ + "topic_full_name": "global.knowledge.space.feedback.reprocess", + "grants": []map[string]interface{}{ + { + "principal_type": "role", + "principal_id": "role:role_admin", + "action": "replay", + }, + }, + } + sharedResp := httpRequest(t, router, http.MethodPost, "/event-fabric/acl", sharedGrantReq) + if sharedResp.StatusCode != http.StatusOK { + t.Fatalf("shared grant expected 200 got %d", sharedResp.StatusCode) + } + + sharedListResp := httpRequest(t, router, http.MethodGet, "/event-fabric/acl?topic_uuid="+sharedTopic.UUID.String(), nil) + if sharedListResp.StatusCode != http.StatusOK { + t.Fatalf("shared list expected 200 got %d", sharedListResp.StatusCode) + } + decodeJSON(t, sharedListResp.Body, &listPayload) + items = listPayload["data"].(map[string]interface{})["items"].([]interface{}) + if len(items) != 1 { + t.Fatalf("expected 1 binding on shared topic, got %d", len(items)) + } + + topicMatrixResp := httpRequest(t, router, http.MethodGet, "/event-fabric/acl/topic-matrix?namespace=knowledge.space.feedback&name=reprocess", nil) + if topicMatrixResp.StatusCode != http.StatusOK { + t.Fatalf("topic matrix expected 200 got %d", topicMatrixResp.StatusCode) + } + + principalMatrixResp := httpRequest(t, router, http.MethodGet, "/event-fabric/acl/principal-matrix?principal_id=role:role_admin&namespace=knowledge.space.feedback&name=reprocess", nil) + if principalMatrixResp.StatusCode != http.StatusOK { + t.Fatalf("principal matrix expected 200 got %d", principalMatrixResp.StatusCode) + } } type memoryAclStore struct { diff --git a/backend/internal/tests/http/admin/event_fabric/dlq_contract_test.go b/backend/internal/tests/http/admin/event_fabric/dlq_contract_test.go index 74d43da0..70c4ea49 100644 --- a/backend/internal/tests/http/admin/event_fabric/dlq_contract_test.go +++ b/backend/internal/tests/http/admin/event_fabric/dlq_contract_test.go @@ -3,6 +3,7 @@ package eventfabric import ( "context" "encoding/base64" + "errors" "net/http" "sync" "testing" @@ -11,6 +12,7 @@ import ( "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/delivery" "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/dlq" "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/replay" + sharedsvc "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/shared" admin "github.com/ArtisanCloud/PowerX/internal/transport/http/admin/event_fabric" "github.com/gin-gonic/gin" ) @@ -26,7 +28,7 @@ func TestDeliveryAdminPublishEndpoint(t *testing.T) { payload := base64.StdEncoding.EncodeToString([]byte(`{"hello":"world"}`)) resp := httpRequest(t, router, http.MethodPost, "/event-fabric/events:publish", map[string]interface{}{ - "topic": "tenant-corex.corex.workflow.approved", + "topic": "corex.workflow.approved", "event_id": "evt-001", "trace_id": "trace-123", "version": "v1", @@ -44,7 +46,7 @@ func TestDeliveryAdminPublishEndpoint(t *testing.T) { req := deliveryStub.publishRequests[0] deliveryStub.mu.Unlock() - if req.TenantUUID != "tenant-corex" || req.Topic != "tenant-corex.corex.workflow.approved" || req.EventID != "evt-001" { + if req.TenantUUID != "tenant-corex" || req.Topic != "corex.workflow.approved" || req.EventID != "evt-001" { t.Fatalf("unexpected publish payload: %#v", req) } if string(req.Payload) != `{"hello":"world"}` { @@ -52,7 +54,7 @@ func TestDeliveryAdminPublishEndpoint(t *testing.T) { } badResp := httpRequest(t, router, http.MethodPost, "/event-fabric/events:publish", map[string]interface{}{ - "topic": "tenant-corex.corex.workflow.approved", + "topic": "corex.workflow.approved", "event_id": "evt-002", "version": "v1", "payload": "not-base64", @@ -60,6 +62,28 @@ func TestDeliveryAdminPublishEndpoint(t *testing.T) { if badResp.StatusCode != http.StatusBadRequest { t.Fatalf("expected 400 for invalid payload got %d", badResp.StatusCode) } + + deliveryStub.publishErr = errors.New("topic corex.workflow.missing not found") + notFoundResp := httpRequest(t, router, http.MethodPost, "/event-fabric/events:publish", map[string]interface{}{ + "topic": "corex.workflow.missing", + "event_id": "evt-003", + "version": "v1", + "payload": payload, + }) + if notFoundResp.StatusCode != http.StatusNotFound { + t.Fatalf("expected 404 for unregistered topic got %d", notFoundResp.StatusCode) + } + + deliveryStub.publishErr = sharedsvc.ErrTenantMismatch + tenantMismatchResp := httpRequest(t, router, http.MethodPost, "/event-fabric/events:publish", map[string]interface{}{ + "topic": "00000000-0000-0000-0000-000000000999.corex.workflow.approved", + "event_id": "evt-004", + "version": "v1", + "payload": payload, + }) + if tenantMismatchResp.StatusCode != http.StatusBadRequest { + t.Fatalf("expected 400 for cross-tenant request got %d", tenantMismatchResp.StatusCode) + } } func TestDLQAdminEndpoints(t *testing.T) { @@ -67,8 +91,8 @@ func TestDLQAdminEndpoints(t *testing.T) { stub := &stubDLQService{ listMessages: []*dlq.Message{ - {ID: "msg-1", TenantUUID: "tenant-corex", TenantID: "tenant-corex", Topic: "tenant-corex.topic.a", EventID: "evt-001", RetryCount: 3, LastError: "timeout", CreatedAt: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)}, - {ID: "msg-2", TenantUUID: "tenant-corex", TenantID: "tenant-corex", Topic: "tenant-corex.topic.b", EventID: "evt-002", RetryCount: 2, LastError: "nack", CreatedAt: time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)}, + {ID: "msg-1", TenantUUID: "tenant-corex", Topic: "tenant-corex.topic.a", EventID: "evt-001", RetryCount: 3, LastError: "timeout", CreatedAt: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)}, + {ID: "msg-2", TenantUUID: "tenant-corex", Topic: "tenant-corex.topic.b", EventID: "evt-002", RetryCount: 2, LastError: "nack", CreatedAt: time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)}, }, listTotal: 2, replayResult: 2, @@ -145,7 +169,7 @@ func TestReplayAdminEndpoints(t *testing.T) { group.POST("/replay/tasks/:task_id/cancel", replayHandler.CancelTask) createResp := httpRequest(t, router, http.MethodPost, "/event-fabric/replay/tasks", map[string]interface{}{ - "topic": "tenant-corex.corex.workflow.approved", + "topic": "corex.workflow.approved", "trace_id": "trace-abc", "reason": "investigate", "operator_id": "ops-user", @@ -177,13 +201,52 @@ func TestReplayAdminEndpoints(t *testing.T) { } replayStub.mu.Lock() - defer replayStub.mu.Unlock() if replayStub.lastCreate.TenantKey != "tenant-corex" || !replayStub.lastCreate.Shadow { + replayStub.mu.Unlock() t.Fatalf("unexpected create input: %+v", replayStub.lastCreate) } + if replayStub.lastCreate.Topic != "corex.workflow.approved" { + replayStub.mu.Unlock() + t.Fatalf("expected semantic topic key, got %+v", replayStub.lastCreate.Topic) + } if replayStub.lastCancel.id != "task-123" { + replayStub.mu.Unlock() t.Fatalf("unexpected cancel input: %+v", replayStub.lastCancel) } + replayStub.mu.Unlock() + + replayStub.createErr = errors.New("topic corex.workflow.missing not found") + replayNotFoundResp := httpRequest(t, router, http.MethodPost, "/event-fabric/replay/tasks", map[string]interface{}{ + "topic": "corex.workflow.missing", + "trace_id": "trace-abc", + "reason": "investigate", + "operator_id": "ops-user", + }) + if replayNotFoundResp.StatusCode != http.StatusNotFound { + t.Fatalf("expected 404 for replay topic not found got %d", replayNotFoundResp.StatusCode) + } + + replayStub.createErr = errors.New("topic tenant mismatch: 00000000-0000-0000-0000-000000000999.corex.workflow.approved") + replayTenantMismatchResp := httpRequest(t, router, http.MethodPost, "/event-fabric/replay/tasks", map[string]interface{}{ + "topic": "00000000-0000-0000-0000-000000000999.corex.workflow.approved", + "trace_id": "trace-abc", + "reason": "investigate", + "operator_id": "ops-user", + }) + if replayTenantMismatchResp.StatusCode != http.StatusBadRequest { + t.Fatalf("expected 400 for replay cross-tenant request got %d", replayTenantMismatchResp.StatusCode) + } + + replayStub.createErr = errors.Join(sharedsvc.ErrUnauthorized, errors.New("principal denied")) + replayUnauthorizedResp := httpRequest(t, router, http.MethodPost, "/event-fabric/replay/tasks", map[string]interface{}{ + "topic": "corex.workflow.approved", + "trace_id": "trace-abc", + "reason": "investigate", + "operator_id": "ops-user", + }) + if replayUnauthorizedResp.StatusCode != http.StatusForbidden { + t.Fatalf("expected 403 for replay acl denied got %d", replayUnauthorizedResp.StatusCode) + } } type stubDLQService struct { @@ -255,6 +318,7 @@ func (s *stubDeliveryService) PollRetry(context.Context, int) (map[string][]deli type stubReplayService struct { mu sync.Mutex lastCreate replay.CreateTaskInput + createErr error lastCancel struct { id string operator string @@ -266,6 +330,9 @@ func (s *stubReplayService) CreateTask(ctx context.Context, input replay.CreateT s.mu.Lock() defer s.mu.Unlock() s.lastCreate = input + if s.createErr != nil { + return nil, s.createErr + } return s.task, nil } diff --git a/backend/internal/tests/http/admin/event_fabric/test_helpers.go b/backend/internal/tests/http/admin/event_fabric/test_helpers.go index cf7ab483..cdf1307a 100644 --- a/backend/internal/tests/http/admin/event_fabric/test_helpers.go +++ b/backend/internal/tests/http/admin/event_fabric/test_helpers.go @@ -62,13 +62,11 @@ func decodeJSON(t *testing.T, body io.ReadCloser, out interface{}) { func applyEventFabricHeaders(t testing.TB, req *http.Request, tenantUUID string) { t.Helper() require.NotNil(t, req, "request must not be nil") - require.Empty(t, strings.TrimSpace(req.Header.Get("X-Tenant-ID")), "legacy X-Tenant-ID header forbidden") - require.Empty(t, strings.TrimSpace(req.Header.Get("X-PowerX-Tenant")), "legacy X-PowerX-Tenant header forbidden") - require.Empty(t, strings.TrimSpace(req.Header.Get("Tenant-ID")), "legacy Tenant-ID header forbidden") if req.Header.Get("Authorization") == "" { req.Header.Set("Authorization", "Bearer admin") } - req.Header.Set("X-Tenant-UUID", strings.TrimSpace(tenantUUID)) + ctx := reqctx.WithTenantUUID(req.Context(), tenantUUID) + *req = *req.WithContext(ctx) } func assertNoEventFabricTenantLeak(t testing.TB, payload []byte) { diff --git a/backend/internal/tests/integration/media/media_asset_upload_flow_test.go b/backend/internal/tests/integration/media/media_asset_upload_flow_test.go index 34c0a4fe..3b2f8727 100644 --- a/backend/internal/tests/integration/media/media_asset_upload_flow_test.go +++ b/backend/internal/tests/integration/media/media_asset_upload_flow_test.go @@ -239,6 +239,24 @@ func (m *memoryAssetRepo) FindByUUID(_ context.Context, tenantUUID string, uuid return cloneAsset(asset), nil } +func (m *memoryAssetRepo) FindByStorageKey(_ context.Context, tenantUUID string, driver, storageKey string) (*mediamodel.MediaAsset, error) { + m.mu.Lock() + defer m.mu.Unlock() + for _, asset := range m.assets { + if asset.Driver != driver || asset.StorageKey != storageKey { + continue + } + if tenantUUID != "" && asset.TenantUUID != tenantUUID { + continue + } + if asset.DeletedAt.Valid { + continue + } + return cloneAsset(asset), nil + } + return nil, gorm.ErrRecordNotFound +} + func (m *memoryAssetRepo) ListByDriverAndStorageKey(_ context.Context, driver, storageKey string) ([]mediamodel.MediaAsset, error) { m.mu.RLock() defer m.mu.RUnlock() diff --git a/backend/internal/transport/grpc/agent/setting_handler.go b/backend/internal/transport/grpc/agent/setting_handler.go index ab693d24..5ce63db0 100644 --- a/backend/internal/transport/grpc/agent/setting_handler.go +++ b/backend/internal/transport/grpc/agent/setting_handler.go @@ -64,7 +64,7 @@ func (s *SettingAIServiceServer) ListProviders(ctx context.Context, req *setting /*************** ListModels ***************/ func (s *SettingAIServiceServer) ListModels(ctx context.Context, req *settingv1.ListModelsRequest) (*settingv1.ListModelsResponse, error) { - models, err := s.svc.Models(modalityToString(req.GetModality()), strings.TrimSpace(req.GetProvider())) + models, err := s.svc.Models(modalityToString(req.GetModality()), strings.TrimSpace(req.GetProvider()), "") if err != nil { return &settingv1.ListModelsResponse{ Meta: badMeta(ctx, 400, err.Error(), req.GetCtx().GetRequestId()), diff --git a/backend/internal/transport/grpc/event_fabric/tenant_scope.go b/backend/internal/transport/grpc/event_fabric/tenant_scope.go index fba7ce46..b760fe97 100644 --- a/backend/internal/transport/grpc/event_fabric/tenant_scope.go +++ b/backend/internal/transport/grpc/event_fabric/tenant_scope.go @@ -6,7 +6,6 @@ import ( "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -15,18 +14,6 @@ func tenantUUIDFromRequest(ctx context.Context, candidate string) (string, error if value == "" { value = strings.TrimSpace(reqctx.GetTenantUUID(ctx)) } - if value == "" { - if md, ok := metadata.FromIncomingContext(ctx); ok { - for _, key := range []string{"x-tenant-uuid", "tenant-uuid", "x-powerx-tenant-uuid"} { - if vals := md.Get(key); len(vals) > 0 { - if trimmed := strings.TrimSpace(vals[0]); trimmed != "" { - value = trimmed - break - } - } - } - } - } if value == "" { return "", status.Error(codes.InvalidArgument, "tenant uuid required") } diff --git a/backend/internal/transport/grpc/iam/tenant_scope.go b/backend/internal/transport/grpc/iam/tenant_scope.go index 8a441e12..f6fde946 100644 --- a/backend/internal/transport/grpc/iam/tenant_scope.go +++ b/backend/internal/transport/grpc/iam/tenant_scope.go @@ -10,7 +10,6 @@ import ( "github.com/ArtisanCloud/PowerX/internal/app/shared" tenantrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/tenant" "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" - "google.golang.org/grpc/metadata" ) func tenantUUIDFromContext(ctx context.Context, rc *commonv1.RequestContext) (string, error) { @@ -26,18 +25,6 @@ func tenantUUIDFromContext(ctx context.Context, rc *commonv1.RequestContext) (st if candidate == "" { candidate = strings.TrimSpace(reqctx.GetTenantUUID(ctx)) } - if candidate == "" { - if md, ok := metadata.FromIncomingContext(ctx); ok { - for _, key := range []string{"x-tenant-uuid", "tenant-uuid", "x-powerx-tenant-uuid"} { - if vals := md.Get(key); len(vals) > 0 { - if trimmed := strings.TrimSpace(vals[0]); trimmed != "" { - candidate = trimmed - break - } - } - } - } - } if candidate == "" { return "", errors.New("tenant_uuid required") } diff --git a/backend/internal/transport/grpc/knowledge_space/decay_service.go b/backend/internal/transport/grpc/knowledge_space/decay_service.go new file mode 100644 index 00000000..0cc89dd9 --- /dev/null +++ b/backend/internal/transport/grpc/knowledge_space/decay_service.go @@ -0,0 +1,101 @@ +package knowledge_space + +import ( + "context" + "errors" + "strings" + + knowledgev1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/knowledge/v1" + decay_guard "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/decay_guard" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + "github.com/google/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "gorm.io/gorm" +) + +func toProtoDecayTasks(tasks []*models.DecayTask) []*knowledgev1.DecayTask { + if len(tasks) == 0 { + return []*knowledgev1.DecayTask{} + } + result := make([]*knowledgev1.DecayTask, 0, len(tasks)) + for _, task := range tasks { + if dto := toProtoDecayTask(task); dto != nil { + result = append(result, dto) + } + } + return result +} + +func toProtoDecayTask(task *models.DecayTask) *knowledgev1.DecayTask { + if task == nil { + return nil + } + return &knowledgev1.DecayTask{ + TaskId: task.UUID.String(), + SpaceId: task.SpaceUUID.String(), + Category: task.Category, + Severity: task.Severity, + Status: task.Status, + DetectedAt: timestampValue(task.DetectedAt), + SlaDueAt: timestampValue(task.SLADueAt), + FalsePositive: task.FalsePositive, + } +} + +func mapDecayError(err error) error { + switch { + case errors.Is(err, decay_guard.ErrInvalidInput): + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, decay_guard.ErrTaskNotFound), errors.Is(err, gorm.ErrRecordNotFound): + return status.Error(codes.NotFound, err.Error()) + default: + return status.Error(codes.Internal, err.Error()) + } +} + +func (s *Server) RunDecayScan(ctx context.Context, req *knowledgev1.RunDecayScanRequest) (*knowledgev1.RunDecayScanResponse, error) { + if s.decay == nil { + return nil, status.Error(codes.Unimplemented, "decay service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + tasks, err := s.decay.RunScan(ctx, spaceID, int(req.GetDetected())) + if err != nil { + return nil, mapDecayError(err) + } + return &knowledgev1.RunDecayScanResponse{Tasks: toProtoDecayTasks(tasks)}, nil +} + +func (s *Server) ListDecayTasks(ctx context.Context, req *knowledgev1.ListDecayTasksRequest) (*knowledgev1.ListDecayTasksResponse, error) { + if s.decay == nil { + return nil, status.Error(codes.Unimplemented, "decay service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + tasks, err := s.decay.ListOpen(ctx, spaceID) + if err != nil { + return nil, mapDecayError(err) + } + return &knowledgev1.ListDecayTasksResponse{Tasks: toProtoDecayTasks(tasks)}, nil +} + +func (s *Server) RestoreDecayTask(ctx context.Context, req *knowledgev1.RestoreDecayTaskRequest) (*knowledgev1.RestoreDecayTaskResponse, error) { + if s.decay == nil { + return nil, status.Error(codes.Unimplemented, "decay service not available") + } + taskID, err := uuid.Parse(strings.TrimSpace(req.GetTaskId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid task id: %v", err) + } + task, err := s.decay.Restore(ctx, taskID, req.GetNotes(), req.GetFalsePositive()) + if err != nil { + return nil, mapDecayError(err) + } + return &knowledgev1.RestoreDecayTaskResponse{Task: toProtoDecayTask(task)}, nil +} + diff --git a/backend/internal/transport/grpc/knowledge_space/feedback_service.go b/backend/internal/transport/grpc/knowledge_space/feedback_service.go new file mode 100644 index 00000000..d34de88c --- /dev/null +++ b/backend/internal/transport/grpc/knowledge_space/feedback_service.go @@ -0,0 +1,230 @@ +package knowledge_space + +import ( + "context" + "encoding/json" + "errors" + "strings" + + knowledgev1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/knowledge/v1" + ksvc "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + "github.com/google/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func (s *Server) SubmitFeedback(ctx context.Context, req *knowledgev1.FeedbackRequest) (*knowledgev1.FeedbackResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + chunkIDs := make([]uuid.UUID, 0, len(req.GetLinkedChunks())) + for _, chunk := range req.GetLinkedChunks() { + id, err := uuid.Parse(strings.TrimSpace(chunk)) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid chunk id: %v", err) + } + chunkIDs = append(chunkIDs, id) + } + caseModel, err := s.feedback.SubmitFeedback(ctx, ksvc.SubmitFeedbackInput{ + SpaceID: spaceID, + ReportedBy: req.GetReportedBy(), + Severity: req.GetSeverity(), + IssueType: req.GetIssueType(), + Notes: req.GetNotes(), + ToolTraceRef: req.GetToolTraceRef(), + LinkedChunks: chunkIDs, + }) + if err != nil { + return nil, mapFeedbackError(err) + } + return &knowledgev1.FeedbackResponse{Case: toProtoFeedbackCase(caseModel)}, nil +} + +func (s *Server) ListFeedbackCases(ctx context.Context, req *knowledgev1.ListFeedbackCasesRequest) (*knowledgev1.ListFeedbackCasesResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + cases, err := s.feedback.ListCases(ctx, spaceID, int(req.GetLimit())) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + resp := make([]*knowledgev1.FeedbackCase, 0, len(cases)) + for _, item := range cases { + resp = append(resp, toProtoFeedbackCase(item)) + } + return &knowledgev1.ListFeedbackCasesResponse{Cases: resp}, nil +} + +func (s *Server) CloseFeedbackCase(ctx context.Context, req *knowledgev1.CloseFeedbackCaseRequest) (*knowledgev1.FeedbackResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + caseID, err := uuid.Parse(strings.TrimSpace(req.GetCaseId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid case id: %v", err) + } + caseModel, err := s.feedback.CloseCase(ctx, ksvc.FeedbackCaseUpdateInput{ + SpaceID: spaceID, + CaseID: caseID, + Actor: req.GetRequestedBy(), + Notes: req.GetResolutionNotes(), + }) + if err != nil { + return nil, mapFeedbackError(err) + } + return &knowledgev1.FeedbackResponse{Case: toProtoFeedbackCase(caseModel)}, nil +} + +func (s *Server) EscalateFeedbackCase(ctx context.Context, req *knowledgev1.EscalateFeedbackCaseRequest) (*knowledgev1.FeedbackResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + caseID, err := uuid.Parse(strings.TrimSpace(req.GetCaseId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid case id: %v", err) + } + caseModel, err := s.feedback.EscalateCase(ctx, ksvc.FeedbackCaseUpdateInput{ + SpaceID: spaceID, + CaseID: caseID, + Actor: req.GetRequestedBy(), + Notes: req.GetReason(), + }) + if err != nil { + return nil, mapFeedbackError(err) + } + return &knowledgev1.FeedbackResponse{Case: toProtoFeedbackCase(caseModel)}, nil +} + +func (s *Server) ReprocessFeedbackCase(ctx context.Context, req *knowledgev1.ReprocessFeedbackCaseRequest) (*knowledgev1.FeedbackResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + caseID, err := uuid.Parse(strings.TrimSpace(req.GetCaseId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid case id: %v", err) + } + caseModel, err := s.feedback.ReprocessCase(ctx, spaceID, caseID, req.GetRequestedBy()) + if err != nil { + return nil, mapFeedbackError(err) + } + return &knowledgev1.FeedbackResponse{Case: toProtoFeedbackCase(caseModel)}, nil +} + +func (s *Server) RollbackFeedbackCase(ctx context.Context, req *knowledgev1.RollbackFeedbackCaseRequest) (*knowledgev1.FeedbackResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + caseID, err := uuid.Parse(strings.TrimSpace(req.GetCaseId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid case id: %v", err) + } + caseModel, err := s.feedback.RollbackCase(ctx, spaceID, caseID, req.GetRequestedBy(), req.GetReason()) + if err != nil { + return nil, mapFeedbackError(err) + } + return &knowledgev1.FeedbackResponse{Case: toProtoFeedbackCase(caseModel)}, nil +} + +func (s *Server) ExportFeedbackCases(ctx context.Context, req *knowledgev1.ExportFeedbackCasesRequest) (*knowledgev1.ExportFeedbackCasesResponse, error) { + if s.feedback == nil { + return nil, status.Error(codes.Unimplemented, "feedback service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + export, err := s.feedback.ExportCases(ctx, spaceID, ksvc.ListFeedbackFilter{ + Status: req.GetStatus(), + Severity: req.GetSeverity(), + Limit: int(req.GetLimit()), + }) + if err != nil { + return nil, mapFeedbackError(err) + } + resp := make([]*knowledgev1.FeedbackCase, 0, len(export.Cases)) + for _, item := range export.Cases { + resp = append(resp, toProtoFeedbackCase(item)) + } + payload, _ := json.Marshal(export) + return &knowledgev1.ExportFeedbackCasesResponse{ + Cases: resp, + ExportJson: string(payload), + }, nil +} + +func toProtoFeedbackCase(caseModel *models.FeedbackCase) *knowledgev1.FeedbackCase { + if caseModel == nil { + return nil + } + var chunks []string + if len(caseModel.LinkedChunks) > 0 { + _ = json.Unmarshal(caseModel.LinkedChunks, &chunks) + } + var slaDue *timestamppb.Timestamp + if caseModel.SLADueAt != nil { + slaDue = timestamppb.New(*caseModel.SLADueAt) + } + var escalatedAt *timestamppb.Timestamp + if caseModel.EscalatedAt != nil { + escalatedAt = timestamppb.New(*caseModel.EscalatedAt) + } + var closedAt *timestamppb.Timestamp + if caseModel.ClosedAt != nil { + closedAt = timestamppb.New(*caseModel.ClosedAt) + } + return &knowledgev1.FeedbackCase{ + CaseId: caseModel.UUID.String(), + SpaceId: caseModel.SpaceUUID.String(), + Status: caseModel.Status, + Severity: caseModel.Severity, + IssueType: caseModel.IssueType, + LinkedChunks: chunks, + ReportedBy: caseModel.ReportedBy, + Notes: caseModel.Notes, + ToolTraceRef: caseModel.ToolTraceRef, + QualityScore: caseModel.QualityScore, + SlaDueAt: slaDue, + CreatedAt: timestamppb.New(caseModel.CreatedAt), + UpdatedAt: timestamppb.New(caseModel.UpdatedAt), + EscalatedAt: escalatedAt, + ClosedAt: closedAt, + ResolutionNotes: caseModel.ResolutionNotes, + } +} + +func mapFeedbackError(err error) error { + switch { + case errors.Is(err, ksvc.ErrInvalidInput): + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, ksvc.ErrSpaceNotFound): + return status.Error(codes.NotFound, err.Error()) + default: + return status.Error(codes.Internal, err.Error()) + } +} diff --git a/backend/internal/transport/grpc/knowledge_space/fusion_service.go b/backend/internal/transport/grpc/knowledge_space/fusion_service.go new file mode 100644 index 00000000..e22a8681 --- /dev/null +++ b/backend/internal/transport/grpc/knowledge_space/fusion_service.go @@ -0,0 +1,70 @@ +package knowledge_space + +import ( + "context" + "strings" + + knowledgev1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/knowledge/v1" + ksvc "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space" + "github.com/google/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) PublishFusionStrategy(ctx context.Context, req *knowledgev1.FusionStrategyRequest) (*knowledgev1.FusionStrategyResponse, error) { + if s.fusion == nil { + return nil, status.Error(codes.Unimplemented, "fusion service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + strategy, err := s.fusion.PublishStrategy(ctx, ksvc.PublishStrategyInput{ + SpaceID: spaceID, + Label: req.GetLabel(), + BM25Weight: req.GetBm25Weight(), + VectorWeight: req.GetVectorWeight(), + GraphConstraint: req.GetGraphConstraint(), + RerankerModel: req.GetRerankerModel(), + ConflictPolicy: req.GetConflictPolicy(), + }) + if err != nil { + return nil, mapFusionError(err) + } + return &knowledgev1.FusionStrategyResponse{Strategy: toProtoFusionStrategy(strategy)}, nil +} + +func (s *Server) ListFusionStrategies(ctx context.Context, req *knowledgev1.ListFusionStrategiesRequest) (*knowledgev1.ListFusionStrategiesResponse, error) { + if s.fusion == nil { + return nil, status.Error(codes.Unimplemented, "fusion service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + strategies, err := s.fusion.ListStrategies(ctx, spaceID, int(req.GetLimit())) + if err != nil { + return nil, mapFusionError(err) + } + return &knowledgev1.ListFusionStrategiesResponse{ + Strategies: toProtoFusionStrategyList(strategies), + }, nil +} + +func (s *Server) RollbackFusionStrategy(ctx context.Context, req *knowledgev1.RollbackFusionStrategyRequest) (*knowledgev1.FusionStrategyResponse, error) { + if s.fusion == nil { + return nil, status.Error(codes.Unimplemented, "fusion service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + strategy, err := s.fusion.RollbackStrategy(ctx, ksvc.RollbackStrategyInput{ + SpaceID: spaceID, + StrategyID: req.GetStrategyId(), + }) + if err != nil { + return nil, mapFusionError(err) + } + return &knowledgev1.FusionStrategyResponse{Strategy: toProtoFusionStrategy(strategy)}, nil +} diff --git a/backend/internal/transport/grpc/knowledge_space/ingestion_service.go b/backend/internal/transport/grpc/knowledge_space/ingestion_service.go new file mode 100644 index 00000000..770ef43b --- /dev/null +++ b/backend/internal/transport/grpc/knowledge_space/ingestion_service.go @@ -0,0 +1,81 @@ +package knowledge_space + +import ( + "context" + "errors" + "net/http" + "strings" + + knowledgev1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/knowledge/v1" + ksvc "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space" + "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/google/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) TriggerIngestion(ctx context.Context, req *knowledgev1.IngestionJobRequest) (*knowledgev1.IngestionJobResponse, error) { + if s.ingestion == nil { + return nil, status.Error(codes.Unimplemented, "ingestion service not available") + } + spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) + } + + format := strings.TrimSpace(req.GetFormat()) + if format == "" { + format = strings.TrimSpace(req.GetSourceType()) + } + if format == "" { + return nil, status.Error(codes.InvalidArgument, "missing format/source_type") + } + + job, err := s.ingestion.Trigger(ctx, ksvc.TriggerIngestionInput{ + SpaceID: spaceID, + Format: format, + SourceURI: req.GetSourceUri(), + IngestionProfile: req.GetIngestionProfile(), + ProcessorProfile: req.GetProcessorProfile(), + OCRRequired: req.GetOcrRequired(), + MaskingProfile: req.GetMaskingProfile(), + Priority: req.GetPriority(), + RequestedBy: req.GetRequestedBy(), + }) + if err != nil { + var appErr *dto.AppError + if errors.As(err, &appErr) { + return nil, status.Error(codeFromHTTP(appErr.HTTPCode), appErr.Message) + } + switch { + case errors.Is(err, ksvc.ErrInvalidInput): + return nil, status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, ksvc.ErrSpaceNotFound): + return nil, status.Error(codes.NotFound, err.Error()) + default: + return nil, status.Error(codes.Internal, err.Error()) + } + } + return &knowledgev1.IngestionJobResponse{Job: toProtoIngestionJob(job)}, nil +} + +func codeFromHTTP(code int) codes.Code { + switch code { + case http.StatusBadRequest: + return codes.InvalidArgument + case http.StatusUnauthorized: + return codes.Unauthenticated + case http.StatusForbidden: + return codes.PermissionDenied + case http.StatusNotFound: + return codes.NotFound + case http.StatusConflict: + return codes.AlreadyExists + case http.StatusPreconditionFailed: + return codes.FailedPrecondition + case http.StatusTooManyRequests: + return codes.ResourceExhausted + default: + return codes.Internal + } +} diff --git a/backend/internal/transport/grpc/knowledge_space/service.go b/backend/internal/transport/grpc/knowledge_space/service.go index cbe17ab3..412eacae 100644 --- a/backend/internal/transport/grpc/knowledge_space/service.go +++ b/backend/internal/transport/grpc/knowledge_space/service.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "strconv" "strings" "time" @@ -21,13 +20,14 @@ import ( "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/toolchain" models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/timestamppb" "gorm.io/datatypes" - "gorm.io/gorm" ) // Server implements KnowledgeSpaceAdminService + QABridge APIs. @@ -144,92 +144,6 @@ func (s *Server) RetireKnowledgeSpace(ctx context.Context, req *knowledgev1.Reti return &knowledgev1.RetireKnowledgeSpaceResponse{Space: toProto(space)}, nil } -func (s *Server) TriggerIngestion(ctx context.Context, req *knowledgev1.IngestionJobRequest) (*knowledgev1.IngestionJobResponse, error) { - if s.ingestion == nil { - return nil, status.Error(codes.Unimplemented, "ingestion service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - job, err := s.ingestion.Trigger(ctx, ksvc.TriggerIngestionInput{ - SpaceID: spaceID, - SourceType: req.GetSourceType(), - SourceURI: req.GetSourceUri(), - MaskingProfile: req.GetMaskingProfile(), - Priority: req.GetPriority(), - }) - if err != nil { - switch { - case errors.Is(err, ksvc.ErrInvalidInput): - return nil, status.Error(codes.InvalidArgument, err.Error()) - case errors.Is(err, ksvc.ErrSpaceNotFound): - return nil, status.Error(codes.NotFound, err.Error()) - default: - return nil, status.Error(codes.Internal, err.Error()) - } - } - return &knowledgev1.IngestionJobResponse{Job: toProtoIngestionJob(job)}, nil -} - -func (s *Server) PublishFusionStrategy(ctx context.Context, req *knowledgev1.FusionStrategyRequest) (*knowledgev1.FusionStrategyResponse, error) { - if s.fusion == nil { - return nil, status.Error(codes.Unimplemented, "fusion service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - strategy, err := s.fusion.PublishStrategy(ctx, ksvc.PublishStrategyInput{ - SpaceID: spaceID, - Label: req.GetLabel(), - BM25Weight: req.GetBm25Weight(), - VectorWeight: req.GetVectorWeight(), - GraphConstraint: req.GetGraphConstraint(), - RerankerModel: req.GetRerankerModel(), - ConflictPolicy: req.GetConflictPolicy(), - }) - if err != nil { - return nil, mapFusionError(err) - } - return &knowledgev1.FusionStrategyResponse{Strategy: toProtoFusionStrategy(strategy)}, nil -} - -func (s *Server) ListFusionStrategies(ctx context.Context, req *knowledgev1.ListFusionStrategiesRequest) (*knowledgev1.ListFusionStrategiesResponse, error) { - if s.fusion == nil { - return nil, status.Error(codes.Unimplemented, "fusion service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - strategies, err := s.fusion.ListStrategies(ctx, spaceID, int(req.GetLimit())) - if err != nil { - return nil, mapFusionError(err) - } - return &knowledgev1.ListFusionStrategiesResponse{ - Strategies: toProtoFusionStrategyList(strategies), - }, nil -} - -func (s *Server) RollbackFusionStrategy(ctx context.Context, req *knowledgev1.RollbackFusionStrategyRequest) (*knowledgev1.FusionStrategyResponse, error) { - if s.fusion == nil { - return nil, status.Error(codes.Unimplemented, "fusion service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - strategy, err := s.fusion.RollbackStrategy(ctx, ksvc.RollbackStrategyInput{ - SpaceID: spaceID, - StrategyID: req.GetStrategyId(), - }) - if err != nil { - return nil, mapFusionError(err) - } - return &knowledgev1.FusionStrategyResponse{Strategy: toProtoFusionStrategy(strategy)}, nil -} - func (s *Server) PlanRetrieval(ctx context.Context, req *knowledgev1.QARetrievalPlanRequest) (*knowledgev1.QARetrievalPlanResponse, error) { if s.qa == nil { return nil, status.Error(codes.Unavailable, "qa bridge not available") @@ -265,9 +179,21 @@ func (s *Server) PlanRetrieval(ctx context.Context, req *knowledgev1.QARetrieval TraceId: out.TraceID, RecordedAt: timestamppb.New(out.RecordedAt), }, - DegradeCount: int32(out.DegradeCount), - SessionId: out.SessionID, - LatencyBudgetMs: int32(out.LatencyBudgetMs), + DegradeCount: int32(out.DegradeCount), + SessionId: out.SessionID, + LatencyBudgetMs: int32(out.LatencyBudgetMs), + Stages: toProtoPlanStages(out.Stages), + PolicyVersionSnapshot: out.PolicySnapshot, + Metadata: func() *structpb.Struct { + if len(out.Metadata) == 0 { + return nil + } + st, err := structpb.NewStruct(out.Metadata) + if err != nil { + return nil + } + return st + }(), }, nil } @@ -283,6 +209,7 @@ func (s *Server) UpsertMemorySnapshot(ctx context.Context, req *knowledgev1.QAMe TenantUUID: tenantUUID, SessionID: req.GetSessionId(), Updates: fromProtoUpdates(req.GetUpdates()), + TraceID: req.GetTraceId(), }) if err != nil { if errors.Is(err, qaBridge.ErrInvalidInput) { @@ -294,6 +221,16 @@ func (s *Server) UpsertMemorySnapshot(ctx context.Context, req *knowledgev1.QAMe TenantUuid: out.TenantUUID.String(), SessionId: out.SessionID, Citations: toProtoCitations(out.Citations), + Metadata: func() *structpb.Struct { + if len(out.Metadata) == 0 { + return nil + } + st, err := structpb.NewStruct(out.Metadata) + if err != nil { + return nil + } + return st + }(), }, nil } @@ -313,56 +250,6 @@ func tenantUUIDFromContext(ctx context.Context, candidate string) (uuid.UUID, er return parsed, nil } -func (s *Server) SubmitFeedback(ctx context.Context, req *knowledgev1.FeedbackRequest) (*knowledgev1.FeedbackResponse, error) { - if s.feedback == nil { - return nil, status.Error(codes.Unimplemented, "feedback service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - chunkIDs := make([]uuid.UUID, 0, len(req.GetLinkedChunks())) - for _, chunk := range req.GetLinkedChunks() { - id, err := uuid.Parse(strings.TrimSpace(chunk)) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid chunk id: %v", err) - } - chunkIDs = append(chunkIDs, id) - } - caseModel, err := s.feedback.SubmitFeedback(ctx, ksvc.SubmitFeedbackInput{ - SpaceID: spaceID, - ReportedBy: req.GetReportedBy(), - Severity: req.GetSeverity(), - IssueType: req.GetIssueType(), - Notes: req.GetNotes(), - ToolTraceRef: req.GetToolTraceRef(), - LinkedChunks: chunkIDs, - }) - if err != nil { - return nil, mapFeedbackError(err) - } - return &knowledgev1.FeedbackResponse{Case: toProtoFeedbackCase(caseModel)}, nil -} - -func (s *Server) ListFeedbackCases(ctx context.Context, req *knowledgev1.ListFeedbackCasesRequest) (*knowledgev1.ListFeedbackCasesResponse, error) { - if s.feedback == nil { - return nil, status.Error(codes.Unimplemented, "feedback service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - cases, err := s.feedback.ListCases(ctx, spaceID, int(req.GetLimit())) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - resp := make([]*knowledgev1.FeedbackCase, 0, len(cases)) - for _, item := range cases { - resp = append(resp, toProtoFeedbackCase(item)) - } - return &knowledgev1.ListFeedbackCasesResponse{Cases: resp}, nil -} - func parsePolicy(v string) (uint64, error) { v = strings.TrimSpace(v) if v == "" { @@ -391,6 +278,12 @@ func toProto(space *models.KnowledgeSpace) *knowledgev1.KnowledgeSpace { } func mapError(err error) error { + if err != nil { + var appErr *dto.AppError + if errors.As(err, &appErr) { + return status.Error(codeFromHTTP(appErr.HTTPCode), appErr.Message) + } + } switch { case ksvc.IsConflictError(err): return status.Error(codes.AlreadyExists, err.Error()) @@ -422,35 +315,9 @@ func toProtoIngestionJob(job *models.IngestionJob) *knowledgev1.IngestionJobStat ChunkCoveredPct: float32(job.ChunkCoveredPct), EmbeddingSuccessPct: float32(job.EmbeddingSuccessPct), MaskingCoveragePct: float32(job.MaskingCoveragePct), - } -} - -func toProtoFeedbackCase(caseModel *models.FeedbackCase) *knowledgev1.FeedbackCase { - if caseModel == nil { - return nil - } - var chunks []string - if len(caseModel.LinkedChunks) > 0 { - _ = json.Unmarshal(caseModel.LinkedChunks, &chunks) - } - var slaDue *timestamppb.Timestamp - if caseModel.SLADueAt != nil { - slaDue = timestamppb.New(*caseModel.SLADueAt) - } - return &knowledgev1.FeedbackCase{ - CaseId: caseModel.UUID.String(), - SpaceId: caseModel.SpaceUUID.String(), - Status: caseModel.Status, - Severity: caseModel.Severity, - IssueType: caseModel.IssueType, - LinkedChunks: chunks, - ReportedBy: caseModel.ReportedBy, - Notes: caseModel.Notes, - ToolTraceRef: caseModel.ToolTraceRef, - QualityScore: caseModel.QualityScore, - SlaDueAt: slaDue, - CreatedAt: timestamppb.New(caseModel.CreatedAt), - UpdatedAt: timestamppb.New(caseModel.UpdatedAt), + RetryCount: uint32(job.RetryCount), + ErrorCode: job.ErrorCode, + BlockedReason: job.BlockedReason, } } @@ -462,6 +329,12 @@ func toProtoFusionStrategy(strategy *models.FusionStrategyVersion) *knowledgev1. if strategy.PublishedAt != nil { publishedAt = timestamppb.New(*strategy.PublishedAt) } + var snap struct { + DegradeReasons []string `json:"degrade_reasons"` + } + if len(strategy.BenchmarkMetrics) > 0 { + _ = json.Unmarshal(strategy.BenchmarkMetrics, &snap) + } return &knowledgev1.FusionStrategy{ StrategyId: strategy.ID, SpaceId: strategy.SpaceUUID.String(), @@ -473,6 +346,7 @@ func toProtoFusionStrategy(strategy *models.FusionStrategyVersion) *knowledgev1. ConflictPolicy: strategy.ConflictPolicy, DeploymentState: protoDeploymentState(strategy.DeploymentState), PublishedAt: publishedAt, + DegradeReasons: snap.DegradeReasons, } } @@ -543,33 +417,20 @@ func toProtoCitations(items []context_snapshot.Citation) []*knowledgev1.QACitati return out } -func toProtoDecayTasks(tasks []*models.DecayTask) []*knowledgev1.DecayTask { - if len(tasks) == 0 { - return []*knowledgev1.DecayTask{} - } - result := make([]*knowledgev1.DecayTask, 0, len(tasks)) - for _, task := range tasks { - if dto := toProtoDecayTask(task); dto != nil { - result = append(result, dto) - } - } - return result -} - -func toProtoDecayTask(task *models.DecayTask) *knowledgev1.DecayTask { - if task == nil { - return nil +func toProtoPlanStages(items []qaBridge.PlanStage) []*knowledgev1.QAPlanStage { + if len(items) == 0 { + return []*knowledgev1.QAPlanStage{} } - return &knowledgev1.DecayTask{ - TaskId: task.UUID.String(), - SpaceId: task.SpaceUUID.String(), - Category: task.Category, - Severity: task.Severity, - Status: task.Status, - DetectedAt: timestampValue(task.DetectedAt), - SlaDueAt: timestampValue(task.SLADueAt), - FalsePositive: task.FalsePositive, + out := make([]*knowledgev1.QAPlanStage, 0, len(items)) + for _, item := range items { + out = append(out, &knowledgev1.QAPlanStage{ + Name: item.Name, + CandidateCount: int32(item.CandidateCount), + LatencyMs: int32(item.LatencyMs), + DegradeReason: item.DegradeReason, + }) } + return out } func protoDeploymentState(state string) knowledgev1.FusionStrategy_DeploymentState { @@ -604,6 +465,8 @@ func mapDeltaError(err error) error { switch { case errors.Is(err, ksdelta.ErrInvalidInput), errors.Is(err, ksdelta.ErrUnknownSource): return status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, ksdelta.ErrJobConflict): + return status.Error(codes.Aborted, err.Error()) case errors.Is(err, ksdelta.ErrSpaceNotFound), errors.Is(err, ksdelta.ErrJobNotFound): return status.Error(codes.NotFound, err.Error()) case errors.Is(err, ksdelta.ErrPartialReleaseDenied): @@ -613,51 +476,6 @@ func mapDeltaError(err error) error { } } -func mapDecayError(err error) error { - switch { - case errors.Is(err, decay_guard.ErrInvalidInput): - return status.Error(codes.InvalidArgument, err.Error()) - case errors.Is(err, decay_guard.ErrTaskNotFound), errors.Is(err, gorm.ErrRecordNotFound): - return status.Error(codes.NotFound, err.Error()) - default: - return status.Error(codes.Internal, err.Error()) - } -} - -func mapReleaseError(err error) error { - switch { - case errors.Is(err, tenant_release.ErrInvalidInput): - return status.Error(codes.InvalidArgument, err.Error()) - case errors.Is(err, tenant_release.ErrPolicyNotFound), errors.Is(err, tenant_release.ErrBatchNotFound): - return status.Error(codes.NotFound, err.Error()) - case errors.Is(err, tenant_release.ErrBatchPaused): - return status.Error(codes.Aborted, err.Error()) - default: - return status.Error(codes.Internal, err.Error()) - } -} - -func fromProtoBatches(items []*knowledgev1.ReleaseBatch) []tenant_release.BatchSpec { - if len(items) == 0 { - return nil - } - result := make([]tenant_release.BatchSpec, 0, len(items)) - for _, item := range items { - if item == nil { - continue - } - result = append(result, tenant_release.BatchSpec{ - Name: item.GetName(), - Tenants: item.GetTenants(), - }) - } - return result -} - -func parsePolicyID(raw string) (uint64, error) { - return strconv.ParseUint(strings.TrimSpace(raw), 10, 64) -} - func toProtoDeltaJob(job *models.DeltaJob) *knowledgev1.DeltaJob { if job == nil { return nil @@ -725,17 +543,6 @@ func toEventInput(eventID, eventType string, payload map[string]string, ts *time } } -func mapFeedbackError(err error) error { - switch { - case errors.Is(err, ksvc.ErrInvalidInput): - return status.Error(codes.InvalidArgument, err.Error()) - case errors.Is(err, ksvc.ErrSpaceNotFound): - return status.Error(codes.NotFound, err.Error()) - default: - return status.Error(codes.Internal, err.Error()) - } -} - func (s *Server) StartDeltaJob(ctx context.Context, req *knowledgev1.StartDeltaJobRequest) (*knowledgev1.StartDeltaJobResponse, error) { if s.delta == nil { return nil, status.Error(codes.Unimplemented, "delta service not available") @@ -870,141 +677,3 @@ func (s *Server) RefreshAgentWeights(ctx context.Context, req *knowledgev1.Refre } return &knowledgev1.RefreshAgentResponse{Status: statusText}, nil } - -func (s *Server) RunDecayScan(ctx context.Context, req *knowledgev1.RunDecayScanRequest) (*knowledgev1.RunDecayScanResponse, error) { - if s.decay == nil { - return nil, status.Error(codes.Unimplemented, "decay service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - tasks, err := s.decay.RunScan(ctx, spaceID, int(req.GetDetected())) - if err != nil { - return nil, mapDecayError(err) - } - return &knowledgev1.RunDecayScanResponse{Tasks: toProtoDecayTasks(tasks)}, nil -} - -func (s *Server) ListDecayTasks(ctx context.Context, req *knowledgev1.ListDecayTasksRequest) (*knowledgev1.ListDecayTasksResponse, error) { - if s.decay == nil { - return nil, status.Error(codes.Unimplemented, "decay service not available") - } - spaceID, err := uuid.Parse(strings.TrimSpace(req.GetSpaceId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid space id: %v", err) - } - tasks, err := s.decay.ListOpen(ctx, spaceID) - if err != nil { - return nil, mapDecayError(err) - } - return &knowledgev1.ListDecayTasksResponse{Tasks: toProtoDecayTasks(tasks)}, nil -} - -func (s *Server) RestoreDecayTask(ctx context.Context, req *knowledgev1.RestoreDecayTaskRequest) (*knowledgev1.RestoreDecayTaskResponse, error) { - if s.decay == nil { - return nil, status.Error(codes.Unimplemented, "decay service not available") - } - taskID, err := uuid.Parse(strings.TrimSpace(req.GetTaskId())) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid task id: %v", err) - } - task, err := s.decay.Restore(ctx, taskID, req.GetNotes(), req.GetFalsePositive()) - if err != nil { - return nil, mapDecayError(err) - } - return &knowledgev1.RestoreDecayTaskResponse{Task: toProtoDecayTask(task)}, nil -} - -func (s *Server) UpsertReleasePolicy(ctx context.Context, req *knowledgev1.UpsertReleasePolicyRequest) (*knowledgev1.UpsertReleasePolicyResponse, error) { - if s.release == nil { - return nil, status.Error(codes.Unimplemented, "release service not available") - } - policy, err := s.release.UpsertPolicy(ctx, tenant_release.UpsertPolicyInput{ - MatrixVersion: req.GetMatrixVersion(), - PilotTenants: req.GetPilotTenants(), - Batches: fromProtoBatches(req.GetBatches()), - Guardrails: req.GetGuardrails(), - ApprovedBy: req.GetApprovedBy(), - CreatedBy: req.GetCreatedBy(), - }) - if err != nil { - return nil, mapReleaseError(err) - } - return &knowledgev1.UpsertReleasePolicyResponse{ - PolicyId: fmt.Sprintf("%d", policy.ID), - Status: policy.Status, - }, nil -} - -func (s *Server) PublishRelease(ctx context.Context, req *knowledgev1.PublishReleaseRequest) (*knowledgev1.PublishReleaseResponse, error) { - if s.release == nil { - return nil, status.Error(codes.Unimplemented, "release service not available") - } - policyID, err := parsePolicyID(req.GetPolicyId()) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid policy id: %v", err) - } - res, err := s.release.Publish(ctx, tenant_release.PublishInput{ - PolicyID: policyID, - VersionID: req.GetVersionId(), - RequestedBy: req.GetRequestedBy(), - }) - if err != nil { - return nil, mapReleaseError(err) - } - return &knowledgev1.PublishReleaseResponse{ - ReleaseId: res.ReleaseID, - VersionId: res.VersionID, - BatchToken: res.BatchToken, - BatchIndex: int32(res.BatchIndex), - Tenants: res.Tenants, - }, nil -} - -func (s *Server) PromoteRelease(ctx context.Context, req *knowledgev1.PromoteReleaseRequest) (*knowledgev1.PromoteReleaseResponse, error) { - if s.release == nil { - return nil, status.Error(codes.Unimplemented, "release service not available") - } - policyID, err := parsePolicyID(req.GetPolicyId()) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid policy id: %v", err) - } - result, serr := s.release.Promote(ctx, tenant_release.PromoteInput{ - PolicyID: policyID, - VersionID: req.GetVersionId(), - BatchToken: req.GetBatchToken(), - Alerts: req.GetAlerts(), - RequestedBy: req.GetRequestedBy(), - }) - if serr != nil && !errors.Is(serr, tenant_release.ErrBatchPaused) { - return nil, mapReleaseError(serr) - } - return &knowledgev1.PromoteReleaseResponse{ - NextBatchToken: result.BatchToken, - BatchIndex: int32(result.BatchIndex), - Tenants: result.Tenants, - State: result.State, - TenantCoverage: result.TenantCoverage, - }, nil -} - -func (s *Server) RollbackRelease(ctx context.Context, req *knowledgev1.RollbackReleaseRequest) (*knowledgev1.RollbackReleaseResponse, error) { - if s.release == nil { - return nil, status.Error(codes.Unimplemented, "release service not available") - } - policyID, err := parsePolicyID(req.GetPolicyId()) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid policy id: %v", err) - } - res, err := s.release.Rollback(ctx, tenant_release.RollbackInput{ - PolicyID: policyID, - VersionID: req.GetVersionId(), - Reason: req.GetReason(), - RequestedBy: req.GetRequestedBy(), - }) - if err != nil { - return nil, mapReleaseError(err) - } - return &knowledgev1.RollbackReleaseResponse{Status: res.Status}, nil -} diff --git a/backend/internal/transport/grpc/knowledge_space/tenant_release_service.go b/backend/internal/transport/grpc/knowledge_space/tenant_release_service.go new file mode 100644 index 00000000..683a308f --- /dev/null +++ b/backend/internal/transport/grpc/knowledge_space/tenant_release_service.go @@ -0,0 +1,142 @@ +package knowledge_space + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + knowledgev1 "github.com/ArtisanCloud/PowerX/api/grpc/gen/go/powerx/knowledge/v1" + tenant_release "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/tenant_release" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func mapReleaseError(err error) error { + switch { + case errors.Is(err, tenant_release.ErrInvalidInput): + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, tenant_release.ErrPolicyNotFound), errors.Is(err, tenant_release.ErrBatchNotFound): + return status.Error(codes.NotFound, err.Error()) + case errors.Is(err, tenant_release.ErrBatchPaused): + return status.Error(codes.Aborted, err.Error()) + default: + return status.Error(codes.Internal, err.Error()) + } +} + +func fromProtoBatches(items []*knowledgev1.ReleaseBatch) []tenant_release.BatchSpec { + if len(items) == 0 { + return nil + } + result := make([]tenant_release.BatchSpec, 0, len(items)) + for _, item := range items { + if item == nil { + continue + } + result = append(result, tenant_release.BatchSpec{ + Name: item.GetName(), + Tenants: item.GetTenants(), + }) + } + return result +} + +func parsePolicyID(raw string) (uint64, error) { + return strconv.ParseUint(strings.TrimSpace(raw), 10, 64) +} + +func (s *Server) UpsertReleasePolicy(ctx context.Context, req *knowledgev1.UpsertReleasePolicyRequest) (*knowledgev1.UpsertReleasePolicyResponse, error) { + if s.release == nil { + return nil, status.Error(codes.Unimplemented, "release service not available") + } + policy, err := s.release.UpsertPolicy(ctx, tenant_release.UpsertPolicyInput{ + MatrixVersion: req.GetMatrixVersion(), + PilotTenants: req.GetPilotTenants(), + Batches: fromProtoBatches(req.GetBatches()), + Guardrails: req.GetGuardrails(), + ApprovedBy: req.GetApprovedBy(), + CreatedBy: req.GetCreatedBy(), + }) + if err != nil { + return nil, mapReleaseError(err) + } + return &knowledgev1.UpsertReleasePolicyResponse{ + PolicyId: fmt.Sprintf("%d", policy.ID), + Status: policy.Status, + }, nil +} + +func (s *Server) PublishRelease(ctx context.Context, req *knowledgev1.PublishReleaseRequest) (*knowledgev1.PublishReleaseResponse, error) { + if s.release == nil { + return nil, status.Error(codes.Unimplemented, "release service not available") + } + policyID, err := parsePolicyID(req.GetPolicyId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid policy id: %v", err) + } + res, err := s.release.Publish(ctx, tenant_release.PublishInput{ + PolicyID: policyID, + VersionID: req.GetVersionId(), + RequestedBy: req.GetRequestedBy(), + }) + if err != nil { + return nil, mapReleaseError(err) + } + return &knowledgev1.PublishReleaseResponse{ + ReleaseId: res.ReleaseID, + VersionId: res.VersionID, + BatchToken: res.BatchToken, + BatchIndex: int32(res.BatchIndex), + Tenants: res.Tenants, + }, nil +} + +func (s *Server) PromoteRelease(ctx context.Context, req *knowledgev1.PromoteReleaseRequest) (*knowledgev1.PromoteReleaseResponse, error) { + if s.release == nil { + return nil, status.Error(codes.Unimplemented, "release service not available") + } + policyID, err := parsePolicyID(req.GetPolicyId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid policy id: %v", err) + } + result, serr := s.release.Promote(ctx, tenant_release.PromoteInput{ + PolicyID: policyID, + VersionID: req.GetVersionId(), + BatchToken: req.GetBatchToken(), + Alerts: req.GetAlerts(), + RequestedBy: req.GetRequestedBy(), + }) + if serr != nil && !errors.Is(serr, tenant_release.ErrBatchPaused) { + return nil, mapReleaseError(serr) + } + return &knowledgev1.PromoteReleaseResponse{ + NextBatchToken: result.BatchToken, + BatchIndex: int32(result.BatchIndex), + Tenants: result.Tenants, + State: result.State, + TenantCoverage: result.TenantCoverage, + }, nil +} + +func (s *Server) RollbackRelease(ctx context.Context, req *knowledgev1.RollbackReleaseRequest) (*knowledgev1.RollbackReleaseResponse, error) { + if s.release == nil { + return nil, status.Error(codes.Unimplemented, "release service not available") + } + policyID, err := parsePolicyID(req.GetPolicyId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid policy id: %v", err) + } + res, err := s.release.Rollback(ctx, tenant_release.RollbackInput{ + PolicyID: policyID, + VersionID: req.GetVersionId(), + Reason: req.GetReason(), + RequestedBy: req.GetRequestedBy(), + }) + if err != nil { + return nil, mapReleaseError(err) + } + return &knowledgev1.RollbackReleaseResponse{Status: res.Status}, nil +} + diff --git a/backend/internal/transport/grpc/plugin_release/server.go b/backend/internal/transport/grpc/plugin_release/server.go index abe874d0..e3b3c1ae 100644 --- a/backend/internal/transport/grpc/plugin_release/server.go +++ b/backend/internal/transport/grpc/plugin_release/server.go @@ -474,17 +474,6 @@ func (s *server) resolveTenantUUID(ctx context.Context, payload string) (string, } return canonical, nil } - if md, ok := metadata.FromIncomingContext(ctx); ok { - for _, key := range []string{"x-tenant-uuid", "tenant-uuid", "x-powerx-tenant-uuid"} { - if vals := md.Get(key); len(vals) > 0 { - if canonical, err := s.optionalTenantUUID(ctx, vals[0]); err == nil && canonical != "" { - return canonical, nil - } else if err != nil { - return "", err - } - } - } - } return "", errors.New("tenant uuid required") } diff --git a/backend/internal/transport/http/admin/agent/agent_handler.go b/backend/internal/transport/http/admin/agent/agent_handler.go index 2cc4f889..2c95746d 100644 --- a/backend/internal/transport/http/admin/agent/agent_handler.go +++ b/backend/internal/transport/http/admin/agent/agent_handler.go @@ -10,6 +10,7 @@ import ( "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" dtoRequest "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/ArtisanCloud/PowerX/pkg/utils" + "github.com/google/uuid" "gorm.io/datatypes" "strings" "time" @@ -30,7 +31,9 @@ func NewAgentHandler(dep *shared.Deps) *AgentHandler { } type AgentStatusRequest struct { - AgentID string `form:"agent_id" json:"agent_id,omitempty"` // GET 用 form/query + // 兼容字段:历史实现要求 agent_id(运行期 manager key),但前端不会传。 + AgentID string `form:"agent_id" json:"agent_id,omitempty"` + AgentUUID string `form:"agent_uuid" json:"agent_uuid,omitempty"` } type AgentStatusResponse struct { @@ -55,35 +58,16 @@ func (h *AgentHandler) Status(c *gin.Context) { dtoRequest.ResponseValidationError(c, err) return } - if strings.TrimSpace(req.AgentID) == "" { - dtoRequest.ResponseError(c, 400, "agent_id 不能为空", nil) - return - } - - mgr := agent.GetAgentManager() - sysAg, _, rt, err := mgr.Get(req.AgentID) - if err != nil { - // Not found 更合适 - dtoRequest.ResponseError(c, 404, "未找到指定的 Agent", err) - return - } + // 该接口用于前端“启动阶段探活”,必须允许无 agent_id/agent_uuid 的调用。 + dtoRequest.ResponseSuccess(c, gin.H{ + "status": "ok", + "message": "success", + }) +} - resp := &AgentStatusResponse{ - AgentInfo: &agentschema.AgentInfo{ - AgentID: sysAg.AgentID, - Name: sysAg.Name, - Description: sysAg.Description, - Status: string(sysAg.Status), - Config: sysAg.Config, - CreatedAt: sysAg.CreatedAt, - UpdatedAt: sysAg.UpdatedAt, - LastBeatAt: sysAg.LastBeatAt, - Runtime: rt, - Extras: sysAg.Extras, - }, - } - dtoRequest.ResponseSuccess(c, resp) - return +func parseAgentUUIDParam(c *gin.Context) (uuid.UUID, error) { + raw := strings.TrimSpace(utils.FirstNonEmpty(c.Param("uuid"), c.Param("id"))) + return uuid.Parse(raw) } // /api/agents/intent 支持单意图(默认) 或 多任务(?multi=1) @@ -294,12 +278,12 @@ func (h *AgentHandler) GetAgent(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) return } - out, err := h.srv.Get(c.Request.Context(), env, tenantRef, agentID) + out, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) if err != nil { dtoRequest.ResponseError(c, 404, "未找到", err) return @@ -320,9 +304,14 @@ func (h *AgentHandler) UpdateAgent(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) + if err != nil { + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) + return + } + exist, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 404, "未找到", err) return } @@ -339,7 +328,7 @@ func (h *AgentHandler) UpdateAgent(c *gin.Context) { KBStrategy: req.KBStrategy, Meta: req.Meta, } - out, err := h.srv.Update(c.Request.Context(), env, tenantRef, agentID, patch) + out, err := h.srv.Update(c.Request.Context(), env, tenantRef, exist.ID, patch) if err != nil { dtoRequest.ResponseError(c, 400, err.Error(), nil) return @@ -357,12 +346,17 @@ func (h *AgentHandler) setAgentStatus(c *gin.Context, status string) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) + return + } + exist, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) + if err != nil { + dtoRequest.ResponseError(c, 404, "未找到", err) return } - if err := h.srv.SetStatus(c.Request.Context(), env, tenantRef, agentID, status); err != nil { + if err := h.srv.SetStatus(c.Request.Context(), env, tenantRef, exist.ID, status); err != nil { dtoRequest.ResponseError(c, 400, err.Error(), nil) return } @@ -377,12 +371,17 @@ func (h *AgentHandler) DeleteAgent(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) return } - if err := h.srv.Delete(c.Request.Context(), env, tenantRef, agentID); err != nil { + exist, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) + if err != nil { + dtoRequest.ResponseError(c, 404, "未找到", err) + return + } + if err := h.srv.Delete(c.Request.Context(), env, tenantRef, exist.ID); err != nil { dtoRequest.ResponseError(c, 400, err.Error(), nil) return } @@ -408,12 +407,17 @@ func (h *AgentHandler) GetAgentAISetting(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) + if err != nil { + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) + return + } + exist, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 404, "未找到", err) return } - setting, err := h.srv.GetAgentAISetting(c.Request.Context(), env, tenantRef, agentID) + setting, err := h.srv.GetAgentAISetting(c.Request.Context(), env, tenantRef, exist.ID) if err != nil { dtoRequest.ResponseError(c, 404, "未找到", err) return @@ -433,15 +437,20 @@ func (h *AgentHandler) UpsertAgentAISetting(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) + if err != nil { + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) + return + } + exist, err := h.srv.GetByUUID(c.Request.Context(), req.Env, tenantRef, agentUUID) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 404, "未找到", err) return } in := &dbmodel.AgentSetting{ Env: req.Env, - AgentID: agentID, + AgentID: exist.ID, Provider: strings.TrimSpace(req.Provider), Model: strings.TrimSpace(req.Model), Params: req.Params, @@ -466,12 +475,17 @@ func (h *AgentHandler) DeleteAgentAISetting(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) + if err != nil { + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) + return + } + exist, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 404, "未找到", err) return } - if err := h.srv.DeleteAgentAISetting(c.Request.Context(), env, tenantRef, agentID); err != nil { + if err := h.srv.DeleteAgentAISetting(c.Request.Context(), env, tenantRef, exist.ID); err != nil { dtoRequest.ResponseError(c, 400, err.Error(), nil) return } @@ -486,12 +500,17 @@ func (h *AgentHandler) AgentHealthCheck(c *gin.Context) { return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.Param("id")) + agentUUID, err := parseAgentUUIDParam(c) if err != nil { - dtoRequest.ResponseError(c, 400, "id 非法", nil) + dtoRequest.ResponseError(c, 400, "uuid 非法", nil) + return + } + exist, err := h.srv.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) + if err != nil { + dtoRequest.ResponseError(c, 404, "未找到", err) return } - info, err := h.srv.HealthCheck(c.Request.Context(), env, tenantRef, agentID) + info, err := h.srv.HealthCheck(c.Request.Context(), env, tenantRef, exist.ID) if err != nil { dtoRequest.ResponseError(c, 400, "检查失败", err) return diff --git a/backend/internal/transport/http/admin/agent/agent_session_handler.go b/backend/internal/transport/http/admin/agent/agent_session_handler.go index 109a174a..e21ddbe6 100644 --- a/backend/internal/transport/http/admin/agent/agent_session_handler.go +++ b/backend/internal/transport/http/admin/agent/agent_session_handler.go @@ -8,27 +8,34 @@ import ( dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" agentSvc "github.com/ArtisanCloud/PowerX/internal/service/agent" dto "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/utils" "github.com/gin-gonic/gin" + "github.com/google/uuid" "gorm.io/datatypes" ) // ===== Service holder ===== type AgentSessionHandler struct { - his *agentSvc.ChatHistoryService + his *agentSvc.ChatHistoryService + ag *agentSvc.AgentService + settings *agentSvc.AgentSettingService } func NewAgentSessionHandler(dep *shared.Deps) *AgentSessionHandler { return &AgentSessionHandler{ - his: agentSvc.NewChatHistoryService(dep.DB), + his: agentSvc.NewChatHistoryService(dep.DB), + ag: agentSvc.NewAgentService(dep.DB), + settings: agentSvc.NewAgentSettingService(dep.DB), } } // ====== Requests/Responses ====== type createSessionReq struct { - Env string `json:"env" validate:"required"` - AgentID uint64 `json:"agentId" validate:"required"` + Env string `json:"env"` + AgentID uint64 `json:"agentId"` + AgentUUID string `json:"agentUuid"` Title string `json:"title"` UserID uint64 `json:"userId"` // 可选;没有就由后端取鉴权上下文(此处留空也行) Singleton *bool `json:"singleton,omitempty"` // 不传就按 Agent 策略;这里只作直传 @@ -46,7 +53,7 @@ type updateSessionReq struct { } type appendMsgReq struct { - Env string `json:"env" validate:"required"` + Env string `json:"env"` SessionID uint64 `json:"sessionId" validate:"required"` AgentID uint64 `json:"agentId" validate:"required"` Role string `json:"role" validate:"required"` // user|assistant|system|tool|summary @@ -67,12 +74,42 @@ func (h *AgentSessionHandler) CreateSession(c *gin.Context) { dto.ResponseValidationError(c, err) return } + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + if strings.TrimSpace(req.Env) == "" || strings.EqualFold(strings.TrimSpace(req.Env), "default") { + req.Env = env + } tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() + agentID := req.AgentID + if strings.TrimSpace(req.AgentUUID) != "" { + agentUUID, err := uuid.Parse(strings.TrimSpace(req.AgentUUID)) + if err != nil { + dto.ResponseError(c, 400, "agentUuid 非法", err) + return + } + exist, err := h.ag.GetByUUID(c.Request.Context(), req.Env, tenantRef, agentUUID) + if err != nil { + dto.ResponseError(c, 404, "未找到指定的 Agent", err) + return + } + agentID = exist.ID + } + if agentID == 0 { + dto.ResponseError(c, 400, "agentId/agentUuid 必填", nil) + return + } + userID := req.UserID + if userID == 0 { + userID = reqctx.GetUserID(c.Request.Context()) + } // 单例标志:如果没传,默认 false(可在上层读取 Agent 配置再传入) singleton := false @@ -88,7 +125,7 @@ func (h *AgentSessionHandler) CreateSession(c *gin.Context) { Meta: req.Meta, } - out, err := h.his.GetOrCreateSession(c.Request.Context(), req.Env, tenantRef, req.AgentID, req.UserID, singleton, &def) + out, err := h.his.GetOrCreateSession(c.Request.Context(), req.Env, tenantRef, agentID, userID, singleton, &def) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return @@ -98,17 +135,37 @@ func (h *AgentSessionHandler) CreateSession(c *gin.Context) { // GET /agents/sessions?env=...&agent_id=1&status=active,archived&limit=50&offset=0 func (h *AgentSessionHandler) ListSessions(c *gin.Context) { - env := c.DefaultQuery("env", "default") + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() - agentID, err := utils.ParseUintID(c.DefaultQuery("agent_id", "0")) - if err != nil || agentID == 0 { - dto.ResponseError(c, 400, "agent_id 必填", nil) - return + var agentID uint64 + if agentUUIDStr := strings.TrimSpace(c.Query("agent_uuid")); agentUUIDStr != "" { + agentUUID, err := uuid.Parse(agentUUIDStr) + if err != nil { + dto.ResponseError(c, 400, "agent_uuid 非法", err) + return + } + exist, err := h.ag.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) + if err != nil { + dto.ResponseError(c, 404, "未找到指定的 Agent", err) + return + } + agentID = exist.ID + } else { + id, err := utils.ParseUintID(c.DefaultQuery("agent_id", "0")) + if err != nil || id == 0 { + dto.ResponseError(c, 400, "agent_uuid/agent_id 必填", nil) + return + } + agentID = id } var statuses []string @@ -132,20 +189,24 @@ func (h *AgentSessionHandler) ListSessions(c *gin.Context) { // GET /agents/sessions/:id func (h *AgentSessionHandler) GetSession(c *gin.Context) { - env := c.DefaultQuery("env", "default") - tenantCtx, err := requireTenantContext(c) + env, err := resolveAgentEnv(c, h.settings) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } - tenantRef := tenantCtx.UUIDPtr() - sid, err := utils.ParseUintID(c.Param("id")) + tenantCtx, err := requireTenantContext(c) if err != nil { - dto.ResponseError(c, 400, "id 非法", nil) + dto.ResponseError(c, 400, err.Error(), nil) return } - - out, err := h.his.FindSessionByID(c.Request.Context(), env, tenantRef, sid) + tenantRef := tenantCtx.UUIDPtr() + idParam := strings.TrimSpace(c.Param("id")) + var out *dbmodel.AgentChatSession + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + out, err = h.his.FindSessionByID(c.Request.Context(), env, tenantRef, id) + } else { + out, err = h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam) + } if err != nil { dto.ResponseError(c, 404, "未找到", err) return @@ -160,15 +221,27 @@ func (h *AgentSessionHandler) UpdateSession(c *gin.Context) { dto.ResponseValidationError(c, err) return } - env := c.DefaultQuery("env", "default") + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() - sid, err := utils.ParseUintID(c.Param("id")) - if err != nil { + idParam := strings.TrimSpace(c.Param("id")) + var sid uint64 + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + sid = id + } else { + if sess, findErr := h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam); findErr == nil && sess != nil { + sid = sess.ID + } + } + if sid == 0 { dto.ResponseError(c, 400, "id 非法", nil) return } @@ -182,15 +255,27 @@ func (h *AgentSessionHandler) UpdateSession(c *gin.Context) { // POST /agents/sessions/:id/archive func (h *AgentSessionHandler) ArchiveSession(c *gin.Context) { - env := c.DefaultQuery("env", "default") + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() - sid, err := utils.ParseUintID(c.Param("id")) - if err != nil { + idParam := strings.TrimSpace(c.Param("id")) + var sid uint64 + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + sid = id + } else { + if sess, findErr := h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam); findErr == nil && sess != nil { + sid = sess.ID + } + } + if sid == 0 { dto.ResponseError(c, 400, "id 非法", nil) return } @@ -204,19 +289,31 @@ func (h *AgentSessionHandler) ArchiveSession(c *gin.Context) { // DELETE /agents/sessions/:id func (h *AgentSessionHandler) DeleteSession(c *gin.Context) { - // 软删 - sid, err := utils.ParseUintID(c.Param("id")) + env, err := resolveAgentEnv(c, h.settings) if err != nil { - dto.ResponseError(c, 400, "id 非法", nil) + dto.ResponseError(c, 400, err.Error(), nil) return } - env := c.DefaultQuery("env", "default") tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() + // 软删 + idParam := strings.TrimSpace(c.Param("id")) + var sid uint64 + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + sid = id + } else { + if sess, findErr := h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam); findErr == nil && sess != nil { + sid = sess.ID + } + } + if sid == 0 { + dto.ResponseError(c, 400, "id 非法", nil) + return + } if err := h.his.DeleteSession(c.Request.Context(), env, tenantRef, sid); err != nil { dto.ResponseError(c, 400, err.Error(), nil) return @@ -226,15 +323,27 @@ func (h *AgentSessionHandler) DeleteSession(c *gin.Context) { // GET /agents/sessions/:id/messages?env=...&after_id=0&limit=200 func (h *AgentSessionHandler) ListMessages(c *gin.Context) { - env := c.DefaultQuery("env", "default") + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() - sid, err := utils.ParseUintID(c.Param("id")) - if err != nil { + idParam := strings.TrimSpace(c.Param("id")) + var sid uint64 + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + sid = id + } else { + if sess, findErr := h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam); findErr == nil && sess != nil { + sid = sess.ID + } + } + if sid == 0 { dto.ResponseError(c, 400, "id 非法", nil) return } @@ -257,6 +366,14 @@ func (h *AgentSessionHandler) AppendMessage(c *gin.Context) { dto.ResponseValidationError(c, err) return } + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + if strings.TrimSpace(req.Env) == "" || strings.EqualFold(strings.TrimSpace(req.Env), "default") { + req.Env = env + } tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) @@ -282,19 +399,24 @@ func (h *AgentSessionHandler) AppendMessage(c *gin.Context) { // (可选)触发一次“超限检查+摘要” func (h *AgentSessionHandler) SummarizeIfNeeded(c *gin.Context) { - env := c.DefaultQuery("env", "default") - tenantCtx, err := requireTenantContext(c) + env, err := resolveAgentEnv(c, h.settings) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } - tenantRef := tenantCtx.UUIDPtr() - sid, err := utils.ParseUintID(c.Param("id")) + tenantCtx, err := requireTenantContext(c) if err != nil { - dto.ResponseError(c, 400, "id 非法", nil) + dto.ResponseError(c, 400, err.Error(), nil) return } - sess, err := h.his.FindSessionByID(c.Request.Context(), env, tenantRef, sid) + tenantRef := tenantCtx.UUIDPtr() + idParam := strings.TrimSpace(c.Param("id")) + var sess *dbmodel.AgentChatSession + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + sess, err = h.his.FindSessionByID(c.Request.Context(), env, tenantRef, id) + } else { + sess, err = h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam) + } if err != nil { dto.ResponseError(c, 404, "未找到", err) return diff --git a/backend/internal/transport/http/admin/agent/api.go b/backend/internal/transport/http/admin/agent/api.go index 59ac5826..187e5cbc 100644 --- a/backend/internal/transport/http/admin/agent/api.go +++ b/backend/internal/transport/http/admin/agent/api.go @@ -29,6 +29,8 @@ func RegisterAPIRoutes(publicGroup *gin.RouterGroup, protectedGroup *gin.RouterG // 新增:POST 普通 Chat(非流) agentGroup.POST("/invoke", chatH.Invoke) + agentGroup.POST("/sessions/:id/invoke", chatH.InvokeSession) + agentGroup.GET("/sessions/:id/stream/sse", chatH.StreamSessionSSE) agentGroup.POST("/sessions", sessionH.CreateSession) agentGroup.GET("/sessions", sessionH.ListSessions) @@ -58,20 +60,20 @@ func RegisterAPIRoutes(publicGroup *gin.RouterGroup, protectedGroup *gin.RouterG // 智能体 CRUD agentAdminGroup.POST("", agentH.CreateAgent) agentAdminGroup.GET("", agentH.ListAgents) - agentAdminGroup.GET("/:id", agentH.GetAgent) - agentAdminGroup.PATCH("/:id", agentH.UpdateAgent) - agentAdminGroup.POST("/:id/enable", agentH.EnableAgent) - agentAdminGroup.POST("/:id/disable", agentH.DisableAgent) + agentAdminGroup.GET("/:uuid", agentH.GetAgent) + agentAdminGroup.PATCH("/:uuid", agentH.UpdateAgent) + agentAdminGroup.POST("/:uuid/enable", agentH.EnableAgent) + agentAdminGroup.POST("/:uuid/disable", agentH.DisableAgent) - agentAdminGroup.POST("/:id/shares", shareH.CreateShare) + agentAdminGroup.POST("/:uuid/shares", shareH.CreateShare) agentAdminGroup.POST("/shares/:share_id/revoke", shareH.RevokeShare) - agentAdminGroup.DELETE("/:id", agentH.DeleteAgent) + agentAdminGroup.DELETE("/:uuid", agentH.DeleteAgent) // 智能体 AI 配置 - agentAdminGroup.GET("/:id/ai-setting", agentH.GetAgentAISetting) - agentAdminGroup.PUT("/:id/ai-setting", agentH.UpsertAgentAISetting) - agentAdminGroup.DELETE("/:id/ai-setting", agentH.DeleteAgentAISetting) - agentAdminGroup.POST("/:id/health-check", agentH.AgentHealthCheck) + agentAdminGroup.GET("/:uuid/ai-setting", agentH.GetAgentAISetting) + agentAdminGroup.PUT("/:uuid/ai-setting", agentH.UpsertAgentAISetting) + agentAdminGroup.DELETE("/:uuid/ai-setting", agentH.DeleteAgentAISetting) + agentAdminGroup.POST("/:uuid/health-check", agentH.AgentHealthCheck) tenantFormsGroup := agentAdminGroup.Group("/tenant/forms") { diff --git a/backend/internal/transport/http/admin/agent/chat_handler.go b/backend/internal/transport/http/admin/agent/chat_handler.go index 8903df6e..54daf6d8 100644 --- a/backend/internal/transport/http/admin/agent/chat_handler.go +++ b/backend/internal/transport/http/admin/agent/chat_handler.go @@ -5,12 +5,13 @@ import ( "context" "encoding/json" "fmt" - "github.com/ArtisanCloud/PowerX/internal/server/agent/runtime" "strings" "time" "github.com/ArtisanCloud/PowerX/internal/app/shared" + capservice "github.com/ArtisanCloud/PowerX/internal/service/capability_registry" "github.com/ArtisanCloud/PowerX/internal/server/agent" + "github.com/ArtisanCloud/PowerX/internal/server/agent/runtime" dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" agentschema "github.com/ArtisanCloud/PowerX/internal/server/agent/schemas" agentSvc "github.com/ArtisanCloud/PowerX/internal/service/agent" @@ -19,17 +20,74 @@ import ( dto "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/ArtisanCloud/PowerX/pkg/utils" "github.com/gin-gonic/gin" + "github.com/google/uuid" ) type AgentChatHandler struct { his *agentSvc.ChatHistoryService cfgResolver *agentSvc.ChatConfigResolver + ag *agentSvc.AgentService + audit *capservice.AuditService + settings *agentSvc.AgentSettingService +} + +type agentInvokeRequest struct { + AgentID string `json:"agent_id"` + SessionID string `json:"session_id,omitempty"` + Message string `json:"message"` + Meta map[string]interface{} `json:"meta,omitempty"` +} + +type agentInvokeSink struct { + buf strings.Builder + final string +} + +func (s *agentInvokeSink) Emit(event string, payload any) error { + switch event { + case dto.EventToken: + if m, ok := payload.(map[string]any); ok { + if d, ok := m["delta"].(string); ok && d != "" { + s.buf.WriteString(d) + } + } + case dto.EventFinal: + if text := extractInvokeAssistantText(payload); strings.TrimSpace(text) != "" { + s.final = text + } + } + return nil +} + +func (s *agentInvokeSink) Reply() string { + if strings.TrimSpace(s.final) != "" { + return s.final + } + return s.buf.String() +} + +func extractInvokeAssistantText(payload any) string { + switch m := payload.(type) { + case map[string]any: + if d, ok := m["data"].(map[string]any); ok { + if s, ok := d["content"].(string); ok { + return s + } + } + if s, ok := m["content"].(string); ok { + return s + } + } + return "" } func NewAgentChatHandler(dep *shared.Deps) *AgentChatHandler { return &AgentChatHandler{ his: agentSvc.NewChatHistoryService(dep.DB), cfgResolver: agentSvc.NewChatConfigResolver(dep.DB), + ag: agentSvc.NewAgentService(dep.DB), + audit: dep.CapabilityRegistryAudit, + settings: agentSvc.NewAgentSettingService(dep.DB), } } @@ -146,42 +204,88 @@ func (h *AgentChatHandler) StreamSSE(c *gin.Context) { return } // 2) 解析入参 & 会话(保持你现有逻辑) - env := c.DefaultQuery("env", "default") - q := strings.TrimSpace(utils.FirstNonEmpty(c.Query("q"), c.Query("message"))) + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + getParam := func(key string) string { + if v, ok := c.Get(key); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + return strings.TrimSpace(s) + } + } + return strings.TrimSpace(c.Query(key)) + } + q := strings.TrimSpace(utils.FirstNonEmpty(getParam("q"), getParam("message"))) regenFromID, _ := utils.ParseUintID(strings.TrimSpace(c.Query("regen_from_message_id"))) if q == "" && regenFromID == 0 { dto.ResponseError(c, 400, "缺少 q(消息内容)", nil) return } - agentID, _ := utils.ParseUintID(strings.TrimSpace(c.Query("agent_id"))) tenantCtx, err := requireTenantContext(c) if err != nil { dto.ResponseError(c, 400, err.Error(), nil) return } tenantRef := tenantCtx.UUIDPtr() - uid := reqctx.GetUserID(c.Request.Context()) - - // 会话:session_id 优先,否则 sticky - var sess *dbmodel.AgentChatSession - if sidStr := strings.TrimSpace(c.Query("session_id")); sidStr != "" { - if sid, err := utils.ParseUintID(sidStr); err == nil && sid > 0 { - sess, _ = h.his.FindSessionByID(c, env, tenantRef, sid) + var agentID uint64 + if agentUUIDStr := getParam("agent_uuid"); agentUUIDStr != "" { + agentUUID, err := uuid.Parse(agentUUIDStr) + if err != nil { + dto.ResponseError(c, 400, "agent_uuid 非法", err) + return } - } - if sess == nil { - var err error - sess, err = h.his.GetOrCreateSession(c, env, tenantRef, agentID, uid, false, nil) + exist, err := h.ag.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) if err != nil { - dto.ResponseError(c, 500, "创建会话失败", err) + dto.ResponseError(c, 404, "未找到指定的 Agent", err) return } + agentID = exist.ID + } else { + id, _ := utils.ParseUintID(getParam("agent_id")) + agentID = id + } + // 会话:要求显式传入 session_id/session_uuid + var sess *dbmodel.AgentChatSession + if sidStr := getParam("session_id"); sidStr != "" { + sess, _ = h.resolveSessionByParam(c, env, tenantRef, sidStr) + } + if sess == nil { + if sidStr := getParam("session_uuid"); sidStr != "" { + sess, _ = h.resolveSessionByParam(c, env, tenantRef, sidStr) + } + } + if sess == nil { + dto.ResponseError(c, 400, "session_id 必填,请先创建会话", nil) + return + } + if agentID == 0 { + agentID = sess.AgentID + } + if agentID == 0 { + dto.ResponseError(c, 400, "agent_uuid/agent_id 必填", nil) + return + } + if _, err := h.ag.Get(c.Request.Context(), env, tenantRef, agentID); err != nil { + dto.ResponseError(c, 404, "未找到指定的 Agent", err) + return + } + // 若会话标题为空,则用首个问题生成标题(ChatGPT 风格) + if sess != nil && strings.TrimSpace(sess.Title) == "" && strings.TrimSpace(q) != "" { + title := runtime.MakeDefaultSessionTitle(q, 24) + _ = h.his.RenameSession(c, env, tenantRef, sess.ID, title) } // 3) 适配器 + Engine runtime.SetSSEHeaders(c) baseSink := runtime.NewSSESink(c) histSink := runtime.NewHistorySink(baseSink, h.his, c, env, tenantRef, sess, agentID, true) + startedAt := time.Now() + traceID := strings.TrimSpace(reqctx.GetTraceID(c.Request.Context())) + if traceID == "" { + traceID = uuid.NewString() + } // 支持“从某条 user 消息重新生成”:裁剪后续消息并以该消息内容作为 prompt if regenFromID > 0 { @@ -210,14 +314,15 @@ func (h *AgentChatHandler) StreamSSE(c *gin.Context) { clientMsgID := strings.TrimSpace(c.Query("client_msg_id")) userMsg, _ := h.his.AppendMessage(c, env, tenantRef, sess.ID, agentID, "user", q, "text", 0, 0, false, nil) if userMsg != nil { - _ = histSink.Emit(dto.EventMeta, map[string]any{ - "session_id": sess.ID, - "agent_id": agentID, - "user_message_id": userMsg.ID, - "client_msg_id": clientMsgID, - "user_message_role": "user", - }) - } + _ = histSink.Emit(dto.EventMeta, map[string]any{ + "session_id": sess.UUID.String(), + "session_id_num": sess.ID, + "agent_id": agentID, + "user_message_id": userMsg.ID, + "client_msg_id": clientMsgID, + "user_message_role": "user", + }) + } } cfg, cfgErr := h.cfgResolver.ResolveForAgentChat(c.Request.Context(), env, tenantRef, agentID, nil) @@ -226,8 +331,19 @@ func (h *AgentChatHandler) StreamSSE(c *gin.Context) { _ = histSink.Emit(dto.EventEnd, map[string]any{"success": false}) return } + // 让前端/排障能看到“实际执行用的 provider/model”,避免把模型自报当成事实。 + _ = histSink.Emit(dto.EventMeta, map[string]any{ + "env": env, + "llm_provider": strings.TrimSpace(cfg.Provider), + "llm_model": strings.TrimSpace(cfg.ModelName), + }) - _ = runtime.NewEngine().Run(c.Request.Context(), q, cfg, "", histSink) // explicitFlow 传空,交给意图/plan 选择 + err = runtime.NewEngine().Run(c.Request.Context(), q, cfg, "", histSink) // explicitFlow 传空,交给意图/plan 选择 + status := "completed" + if err != nil { + status = "failed" + } + h.recordAgentInvocation(c.Request.Context(), agentStreamCapability, tenantCtx.UUID(), agentID, sess.ID, q, traceID, "rest", status, err, time.Since(startedAt)) } // ---- 核心 ---- @@ -251,6 +367,16 @@ func (h *AgentChatHandler) streamCore(c *gin.Context, req dto.StreamChatRequest) tenantUUID := tenantCtx.UUID() userID := reqctx.GetUserID(c.Request.Context()) agentID, _ := utils.AsUint64(req.Context["agent_id"]) + if agentID == 0 { + c.SSEvent(dto.EventError, gin.H{"message": "agent_id 缺失"}) + c.SSEvent(dto.EventEnd, gin.H{"ok": false}) + return + } + if _, err := h.ag.Get(ctx, env, tenantRef, agentID); err != nil { + c.SSEvent(dto.EventError, gin.H{"message": "未找到指定的 Agent", "detail": err.Error()}) + c.SSEvent(dto.EventEnd, gin.H{"ok": false}) + return + } // 会话:优先 session_id -> 否则 sticky(env, tenant, agent, user) var sess *dbmodel.AgentChatSession @@ -367,6 +493,22 @@ func (h *AgentChatHandler) streamCore(c *gin.Context, req dto.StreamChatRequest) // Tap:增量与最终回写 var buf strings.Builder + startedAt := time.Now() + traceID := strings.TrimSpace(reqctx.GetTraceID(ctx)) + if traceID == "" { + traceID = strings.TrimSpace(meta.TraceID) + } + if traceID == "" { + traceID = uuid.NewString() + } + recorded := false + recordOnce := func(status string, err error) { + if recorded { + return + } + recorded = true + h.recordAgentInvocation(ctx, agentStreamCapability, tenantUUID, agentID, sess.ID, msg, traceID, "rest", status, err, time.Since(startedAt)) + } hooks := dto.SSEHooks{ HeartbeatInterval: 25 * time.Second, OnStart: func(fid, eid string) { @@ -383,6 +525,10 @@ func (h *AgentChatHandler) streamCore(c *gin.Context, req dto.StreamChatRequest) _, _ = h.his.AppendMessage(c.Request.Context(), env, tenantRef, sess.ID, agentID, "assistant", text, "text", 0, 0, false, nil) } _, _ = h.his.SummarizeIfNeeded(c.Request.Context(), env, tenantRef, sess) + recordOnce("completed", nil) + }, + OnError: func(err error) { + recordOnce("failed", err) }, } _ = dto.WriteToSSEWithTap(c, flowID, execID, sr, hooks) @@ -390,24 +536,186 @@ func (h *AgentChatHandler) streamCore(c *gin.Context, req dto.StreamChatRequest) // 非流式(保留) func (h *AgentChatHandler) Invoke(c *gin.Context) { - var req dto.ChatRequest + var req agentInvokeRequest if err := dto.ValidateRequestWithContext(c, &req); err != nil { dto.ResponseValidationError(c, err) return } - if req.Config != nil && req.Config.EnableStream { - dto.ResponseError(c, 400, "该接口不支持流式,请改用 /agents/stream/sse 或 /agents/stream/ws", nil) + h.invokeWithSession(c, req, "") +} + +// POST /agents/sessions/:id/invoke +func (h *AgentChatHandler) InvokeSession(c *gin.Context) { + var req agentInvokeRequest + if err := dto.ValidateRequestWithContext(c, &req); err != nil { + dto.ResponseValidationError(c, err) return } - reply := "(非流式)已收到:" + strings.TrimSpace(req.Message) - dto.ResponseSuccess(c, dto.ChatData{ - Content: reply, - Role: "assistant", - Metadata: map[string]any{"framework": "eino"}, - Timestamp: time.Now().Unix(), + h.invokeWithSession(c, req, strings.TrimSpace(c.Param("id"))) +} + +// GET /agents/sessions/:id/stream/sse?q=...&env=... +func (h *AgentChatHandler) StreamSessionSSE(c *gin.Context) { + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + tenantCtx, err := requireTenantContext(c) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + tenantRef := tenantCtx.UUIDPtr() + sess, err := h.resolveSessionByParam(c, env, tenantRef, strings.TrimSpace(c.Param("id"))) + if err != nil || sess == nil { + dto.ResponseError(c, 404, "未找到指定的 Session", err) + return + } + + c.Set("agent_id", fmt.Sprintf("%d", sess.AgentID)) + c.Set("session_id", fmt.Sprintf("%d", sess.ID)) + c.Set("session_uuid", sess.UUID.String()) + + h.StreamSSE(c) +} + +func (h *AgentChatHandler) invokeWithSession(c *gin.Context, req agentInvokeRequest, sessionParam string) { + msg := strings.TrimSpace(req.Message) + if msg == "" { + dto.ResponseError(c, 400, "message 不能为空", nil) + return + } + + tenantCtx, err := requireTenantContext(c) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + tenantRef := tenantCtx.UUIDPtr() + tenantUUID := tenantCtx.UUID() + env, err := resolveAgentEnv(c, h.settings) + if err != nil { + dto.ResponseError(c, 400, err.Error(), nil) + return + } + var agentID uint64 + agentIDStr := strings.TrimSpace(req.AgentID) + if agentIDStr != "" { + if agentUUID, err := uuid.Parse(agentIDStr); err == nil { + exist, err := h.ag.GetByUUID(c.Request.Context(), env, tenantRef, agentUUID) + if err != nil { + dto.ResponseError(c, 404, "未找到指定的 Agent", err) + return + } + agentID = exist.ID + } else { + id, _ := utils.ParseUintID(agentIDStr) + agentID = id + } + } + + // 支持 session_id(body)或 path :id(uuid/数字) + var sess *dbmodel.AgentChatSession + if sessionParam != "" { + sess, _ = h.resolveSessionByParam(c, env, tenantRef, sessionParam) + if sess != nil { + agentID = sess.AgentID + } + } + if sess == nil && strings.TrimSpace(req.SessionID) != "" { + sess, _ = h.resolveSessionByParam(c, env, tenantRef, strings.TrimSpace(req.SessionID)) + } + if sess == nil { + dto.ResponseError(c, 400, "session_id 必填,请先创建会话", nil) + return + } + agentID = sess.AgentID + if _, err := h.ag.Get(c.Request.Context(), env, tenantRef, agentID); err != nil { + dto.ResponseError(c, 404, "未找到指定的 Agent", err) + return + } + if sess != nil && strings.TrimSpace(sess.Title) == "" { + title := runtime.MakeDefaultSessionTitle(msg, 24) + _ = h.his.RenameSession(c, env, tenantRef, sess.ID, title) + } + + _, _ = h.his.AppendMessage(c.Request.Context(), env, tenantRef, sess.ID, agentID, "user", msg, "text", 0, 0, false, nil) + + cfg, cfgErr := h.cfgResolver.ResolveForAgentChat(c.Request.Context(), env, tenantRef, agentID, nil) + if cfgErr != nil { + dto.ResponseError(c, 400, cfgErr.Error(), nil) + return + } + + startedAt := time.Now() + traceID := strings.TrimSpace(reqctx.GetTraceID(c.Request.Context())) + if traceID == "" { + traceID = uuid.NewString() + } + baseSink := &agentInvokeSink{} + histSink := runtime.NewHistorySink(baseSink, h.his, c, env, tenantRef, sess, agentID, true) + err = runtime.NewEngine().Run(c.Request.Context(), msg, cfg, "", histSink) + status := "completed" + if err != nil { + status = "failed" + } + h.recordAgentInvocation(c.Request.Context(), agentInvokeCapability, tenantUUID, agentID, sess.ID, msg, traceID, "rest", status, err, time.Since(startedAt)) + if err != nil { + dto.ResponseError(c, 502, "agent invoke failed", err) + return + } + + reply := baseSink.Reply() + dto.ResponseSuccess(c, gin.H{ + "session_id": sess.UUID.String(), + "agent_id": req.AgentID, + "reply": reply, }) } +func (h *AgentChatHandler) resolveSessionByParam(c *gin.Context, env string, tenantRef *string, idParam string) (*dbmodel.AgentChatSession, error) { + idParam = strings.TrimSpace(idParam) + if idParam == "" { + return nil, nil + } + if id, parseErr := utils.ParseUintID(idParam); parseErr == nil && id > 0 { + return h.his.FindSessionByID(c.Request.Context(), env, tenantRef, id) + } + return h.his.FindSessionByUUID(c.Request.Context(), env, tenantRef, idParam) +} +func resolveAgentEnv(c *gin.Context, settings *agentSvc.AgentSettingService) (string, error) { + if c == nil { + return "", fmt.Errorf("env missing") + } + env := strings.TrimSpace(reqctx.GetEnv(c.Request.Context())) + if strings.EqualFold(env, "default") { + env = "" + } + if env == "" { + if v := strings.TrimSpace(c.Query("env")); v != "" && !strings.EqualFold(v, "default") { + env = v + } + } + if env == "" { + if v := strings.TrimSpace(c.GetHeader("X-PowerX-Env")); v != "" && !strings.EqualFold(v, "default") { + env = v + } + } + if env == "" { + tenantCtx, err := requireTenantContext(c) + if err == nil && settings != nil { + if v, ok, _ := settings.GetTenantCurrentAIEnv(c.Request.Context(), tenantCtx.UUID()); ok { + env = v + } + } + } + if strings.TrimSpace(env) == "" { + return "", fmt.Errorf("env missing") + } + return env, nil +} + /* ---------------- helpers ---------------- */ func setSSEHeaders(c *gin.Context) { @@ -416,3 +724,44 @@ func setSSEHeaders(c *gin.Context) { c.Header("Connection", "keep-alive") c.Header("X-Accel-Buffering", "no") } + +const ( + agentInvokeCapability = "com.corex.agent.invoke" + agentStreamCapability = "com.corex.agent.stream" + platformPluginID = "corex.platform" +) + +func (h *AgentChatHandler) recordAgentInvocation(ctx context.Context, capabilityID, tenantUUID string, agentID, sessionID uint64, message, traceID, protocol, status string, err error, latency time.Duration) { + if h == nil || h.audit == nil { + return + } + if strings.TrimSpace(tenantUUID) == "" { + return + } + payload := map[string]interface{}{ + "agent_id": agentID, + "session_id": sessionID, + "message": message, + } + response := map[string]interface{}{ + "status": status, + } + errorSummary := "" + if err != nil { + errorSummary = err.Error() + } + h.audit.RecordInvocation(ctx, capservice.InvocationAuditInput{ + TraceID: traceID, + TenantUUID: tenantUUID, + PluginID: platformPluginID, + CapabilityID: capabilityID, + PreferredProtocol: protocol, + ProtocolUsed: protocol, + FallbackUsed: false, + Status: status, + RequestPayload: payload, + ResponsePayload: response, + ErrorSummary: errorSummary, + Latency: latency, + }) +} diff --git a/backend/internal/transport/http/admin/agent/setting_handler.go b/backend/internal/transport/http/admin/agent/setting_handler.go index 07a77d3b..23f60f2f 100644 --- a/backend/internal/transport/http/admin/agent/setting_handler.go +++ b/backend/internal/transport/http/admin/agent/setting_handler.go @@ -1,20 +1,26 @@ package agent import ( + "context" "errors" "fmt" "net/http" + "strconv" "strings" "time" + "github.com/ArtisanCloud/PowerX/config" "github.com/ArtisanCloud/PowerX/internal/app/shared" "github.com/ArtisanCloud/PowerX/internal/server/agent/catalog" "github.com/ArtisanCloud/PowerX/internal/server/agent/contract" dbmodel "github.com/ArtisanCloud/PowerX/internal/server/agent/persistence/model" agentSvc "github.com/ArtisanCloud/PowerX/internal/service/agent" auditsvc "github.com/ArtisanCloud/PowerX/pkg/corex/audit" + "github.com/ArtisanCloud/PowerX/pkg/corex/db/migration" dbmaudit "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/audit" + pgvectorcfg "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/vectorstore/pgvector" "github.com/ArtisanCloud/PowerX/pkg/utils" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" dtoRequest "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/gin-gonic/gin" @@ -46,6 +52,7 @@ func NewAgentSettingHandler(deps *shared.Deps) *AgentSettingHandler { type baseConn struct { Name string `form:"name"` Provider string `json:"provider" validate:"required"` + App string `json:"app"` Model string `json:"model" validate:"required"` AuthMode string `json:"authMode"` APIKey string `json:"apiKey"` @@ -217,8 +224,12 @@ func (h *AgentSettingHandler) listProviders(c *gin.Context) { } type providerView struct { - ID string `json:"ID"` - Name string `json:"Name"` + ID string `json:"ID"` + Name string `json:"Name"` + Apps []struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"apps,omitempty"` Configured bool `json:"configured"` Health *agentSvc.ProviderHealthRecord `json:"health,omitempty"` Auth *struct { @@ -307,9 +318,20 @@ func (h *AgentSettingHandler) listProviders(c *gin.Context) { }(), } } + apps := make([]struct { + ID string `json:"id"` + Name string `json:"name"` + }, 0, len(it.Apps)) + for _, a := range it.Apps { + apps = append(apps, struct { + ID string `json:"id"` + Name string `json:"name"` + }{ID: a.ID, Name: a.Name}) + } out = append(out, providerView{ ID: it.ID, Name: it.Name, + Apps: apps, Configured: ok, Health: hr, Auth: authView, @@ -328,6 +350,7 @@ func (h *AgentSettingHandler) listModels(c *gin.Context) { env := c.DefaultQuery("env", "dev") mod := c.Query("modality") prov := c.Query("provider") + app := c.Query("app") tenantCtx, err := requireTenantContext(c) if err != nil { @@ -335,7 +358,7 @@ func (h *AgentSettingHandler) listModels(c *gin.Context) { return } - models, err := h.svc.ModelsForTenant(c.Request.Context(), env, tenantCtx.UUIDPtr(), mod, prov) + models, err := h.svc.ModelsForTenant(c.Request.Context(), env, tenantCtx.UUIDPtr(), mod, prov, app) if err != nil { dtoRequest.ResponseError(c, http.StatusBadRequest, err.Error(), nil) return @@ -343,6 +366,18 @@ func (h *AgentSettingHandler) listModels(c *gin.Context) { dtoRequest.ResponseSuccess(c, gin.H{"models": models}) } +func applyAppToModel(app, model string) string { + a := strings.TrimSpace(app) + m := strings.TrimSpace(model) + if a == "" || m == "" { + return m + } + if strings.Contains(m, ":") { + return m + } + return a + ":" + m +} + // ---------- Settings ---------- func (h *AgentSettingHandler) saveSettings(c *gin.Context) { @@ -351,6 +386,31 @@ func (h *AgentSettingHandler) saveSettings(c *gin.Context) { dtoRequest.ResponseValidationError(c, err) return } + // Normalize app:model if app provided + if req.LLM != nil { + req.LLM.Model = applyAppToModel(req.LLM.App, req.LLM.Model) + } + if req.Image != nil { + req.Image.Model = applyAppToModel(req.Image.App, req.Image.Model) + } + if req.Embedding != nil { + req.Embedding.Model = applyAppToModel(req.Embedding.App, req.Embedding.Model) + } + if req.Video != nil { + req.Video.Model = applyAppToModel(req.Video.App, req.Video.Model) + } + if req.Model3D != nil { + req.Model3D.Model = applyAppToModel(req.Model3D.App, req.Model3D.Model) + } + if req.AudioTTS != nil { + req.AudioTTS.Model = applyAppToModel(req.AudioTTS.App, req.AudioTTS.Model) + } + if req.AudioASR != nil { + req.AudioASR.Model = applyAppToModel(req.AudioASR.App, req.AudioASR.Model) + } + if req.Rerank != nil { + req.Rerank.Model = applyAppToModel(req.Rerank.App, req.Rerank.Model) + } tenantCtx, err := requireTenantContext(c) if err != nil { dtoRequest.ResponseError(c, http.StatusBadRequest, err.Error(), nil) @@ -449,15 +509,36 @@ func (h *AgentSettingHandler) saveSettings(c *gin.Context) { } return scheme }(), - Data: credData, + Data: credData, + } + if req.Modality == contract.ModEmbed && req.Embedding != nil && prof != nil { + if existing, err := h.svc.GetProfile(c.Request.Context(), req.Env, tenantRef, "embedding", prof.Provider, prof.Model); err == nil && existing != nil { + if prof.Defaults == nil { + prof.Defaults = datatypes.JSONMap{} + } + if existing.Defaults != nil { + if _, ok := prof.Defaults["dimensions"]; !ok { + if dim, ok := existing.Defaults["dimensions"]; ok { + prof.Defaults["dimensions"] = dim + } + } + } + // 保留测试探针结果(cap_cache),避免保存配置把 probed_at 清空。 + if existing.CapCache != nil && (prof.CapCache == nil || len(prof.CapCache) == 0) { + prof.CapCache = existing.CapCache + } + } } if err := h.svc.SaveCredentialAndProfile(c.Request.Context(), req.Env, tenantRef, cred, prof, true); err != nil { dtoRequest.ResponseError(c, http.StatusInternalServerError, "保存失败", err) return } - // ✅ 产品语义:当 LLM 配置保存成功,更新“租户当前 AI 环境” + // ✅ 产品语义:任意模态保存成功后,更新“租户当前 AI 环境” + if strings.TrimSpace(req.Env) != "" { + _ = h.svc.SetTenantCurrentAIEnv(c.Request.Context(), tenantUUID, req.Env) + } + // 保存成功代表连通性校验通过:同步写入“可用 Provider”缓存(仅 LLM 维持原语义) if req.Modality == contract.ModLLM { - // 保存成功代表连通性校验通过:同步写入“可用 Provider”缓存 _ = h.svc.UpsertTenantProviderHealth( c.Request.Context(), tenantUUID, @@ -467,7 +548,6 @@ func (h *AgentSettingHandler) saveSettings(c *gin.Context) { "healthy", "ok", ) - _ = h.svc.SetTenantCurrentAIEnv(c.Request.Context(), tenantUUID, req.Env) } dtoRequest.ResponseSuccess(c, gin.H{"ok": true, "tenant_uuid": tenantUUID}) } @@ -480,6 +560,32 @@ func (h *AgentSettingHandler) testConnection(c *gin.Context) { dtoRequest.ResponseValidationError(c, err) return } + logger.InfoF(c.Request.Context(), "[agent_setting] test_connection enter path=%s", c.FullPath()) + // Normalize app:model if app provided + if req.LLM != nil { + req.LLM.Model = applyAppToModel(req.LLM.App, req.LLM.Model) + } + if req.Image != nil { + req.Image.Model = applyAppToModel(req.Image.App, req.Image.Model) + } + if req.Embedding != nil { + req.Embedding.Model = applyAppToModel(req.Embedding.App, req.Embedding.Model) + } + if req.Video != nil { + req.Video.Model = applyAppToModel(req.Video.App, req.Video.Model) + } + if req.Model3D != nil { + req.Model3D.Model = applyAppToModel(req.Model3D.App, req.Model3D.Model) + } + if req.AudioTTS != nil { + req.AudioTTS.Model = applyAppToModel(req.AudioTTS.App, req.AudioTTS.Model) + } + if req.AudioASR != nil { + req.AudioASR.Model = applyAppToModel(req.AudioASR.App, req.AudioASR.Model) + } + if req.Rerank != nil { + req.Rerank.Model = applyAppToModel(req.Rerank.App, req.Rerank.Model) + } tenantCtx, err := requireTenantContext(c) if err != nil { dtoRequest.ResponseError(c, http.StatusBadRequest, err.Error(), nil) @@ -549,7 +655,13 @@ func (h *AgentSettingHandler) testConnection(c *gin.Context) { dtoRequest.ResponseError(c, http.StatusBadRequest, "image 配置不能为空", nil) return } - if err := h.svc.PingGeneric(c.Request.Context(), req.Env, tenantRef, req.Modality, req.Image.Provider, req.Image.Model, req.Image.BaseURL, req.Image.APIKey); err != nil { + if err := h.svc.PingImage( + c.Request.Context(), + req.Env, tenantRef, + req.Image.Provider, req.Image.Model, + req.Image.BaseURL, req.Image.APIKey, req.Image.SecretID, req.Image.SecretKey, + req.Image.Region, req.Image.Organization, + ); err != nil { _ = h.svc.UpsertTenantProviderHealth(c.Request.Context(), tenantUUID, req.Env, string(req.Modality), req.Image.Provider, "unhealthy", err.Error()) h.emitAuditEvent(c, tenantUUID, req.Env, auditOpTestConnection, req.Modality, req.Image.Provider, req.Image.Model, false, err.Error()) dtoRequest.ResponseError(c, http.StatusBadRequest, err.Error(), nil) @@ -564,16 +676,28 @@ func (h *AgentSettingHandler) testConnection(c *gin.Context) { dtoRequest.ResponseError(c, http.StatusBadRequest, "embedding 配置不能为空", nil) return } - if err := h.svc.PingGeneric(c.Request.Context(), req.Env, tenantRef, req.Modality, req.Embedding.Provider, req.Embedding.Model, req.Embedding.BaseURL, req.Embedding.APIKey); err != nil { + dim, err := h.svc.ProbeEmbeddingDimensionsPreferInput( + c.Request.Context(), + req.Env, tenantRef, + req.Embedding.Provider, req.Embedding.Model, + req.Embedding.BaseURL, req.Embedding.APIKey, + ) + if err != nil { _ = h.svc.UpsertTenantProviderHealth(c.Request.Context(), tenantUUID, req.Env, string(req.Modality), req.Embedding.Provider, "unhealthy", err.Error()) h.emitAuditEvent(c, tenantUUID, req.Env, auditOpTestConnection, req.Modality, req.Embedding.Provider, req.Embedding.Model, false, err.Error()) dtoRequest.ResponseError(c, http.StatusBadRequest, err.Error(), nil) return } + if err := h.ensureEmbeddingVectorTable(c.Request.Context(), dim); err != nil { + _ = h.svc.UpsertTenantProviderHealth(c.Request.Context(), tenantUUID, req.Env, string(req.Modality), req.Embedding.Provider, "unhealthy", err.Error()) + h.emitAuditEvent(c, tenantUUID, req.Env, auditOpTestConnection, req.Modality, req.Embedding.Provider, req.Embedding.Model, false, err.Error()) + dtoRequest.ResponseError(c, http.StatusInternalServerError, "embedding 向量表创建失败", err) + return + } _ = saveVerifiedCredential(req.Embedding.Provider, req.Embedding.APIKey, "", "", req.Embedding.BaseURL, req.Embedding.Region, req.Embedding.Organization, req.Embedding.AzureDeployment, req.Embedding.AuthMode) _ = h.svc.UpsertTenantProviderHealth(c.Request.Context(), tenantUUID, req.Env, string(req.Modality), req.Embedding.Provider, "healthy", "ok") h.emitAuditEvent(c, tenantUUID, req.Env, auditOpTestConnection, req.Modality, req.Embedding.Provider, req.Embedding.Model, true, "ok") - dtoRequest.ResponseSuccess(c, gin.H{"ok": true}) + dtoRequest.ResponseSuccess(c, gin.H{"ok": true, "dimensions": dim}) case contract.ModVideo: if req.Video == nil { dtoRequest.ResponseError(c, http.StatusBadRequest, "video 配置不能为空", nil) @@ -660,6 +784,31 @@ func (h *AgentSettingHandler) testQuickCall(c *gin.Context) { dtoRequest.ResponseValidationError(c, err) return } + // Normalize app:model if app provided + if req.LLM != nil { + req.LLM.Model = applyAppToModel(req.LLM.App, req.LLM.Model) + } + if req.Image != nil { + req.Image.Model = applyAppToModel(req.Image.App, req.Image.Model) + } + if req.Embedding != nil { + req.Embedding.Model = applyAppToModel(req.Embedding.App, req.Embedding.Model) + } + if req.Video != nil { + req.Video.Model = applyAppToModel(req.Video.App, req.Video.Model) + } + if req.Model3D != nil { + req.Model3D.Model = applyAppToModel(req.Model3D.App, req.Model3D.Model) + } + if req.AudioTTS != nil { + req.AudioTTS.Model = applyAppToModel(req.AudioTTS.App, req.AudioTTS.Model) + } + if req.AudioASR != nil { + req.AudioASR.Model = applyAppToModel(req.AudioASR.App, req.AudioASR.Model) + } + if req.Rerank != nil { + req.Rerank.Model = applyAppToModel(req.Rerank.App, req.Rerank.Model) + } tenantCtx, err := requireTenantContext(c) if err != nil { dtoRequest.ResponseError(c, http.StatusBadRequest, err.Error(), nil) @@ -897,6 +1046,9 @@ func buildEntitiesFromPayload(req *saveSettingsReq, tenantUUID *string) (credNam }, Tags: []string{"embedding"}, } + if req.Embedding.Dimensions <= 0 { + delete(prof.Defaults, "dimensions") + } case contract.ModVideo: if req.Video == nil { @@ -1112,6 +1264,57 @@ func describeRerankQuickCall(m *modRerank) string { m.Provider, m.Model, m.TopK, m.ReturnDocuments, m.MaxChunksPerDoc) } +func (h *AgentSettingHandler) ensureEmbeddingVectorTable(ctx context.Context, dim int) error { + if dim <= 0 { + return nil + } + cfg := config.GetGlobalConfig() + if cfg == nil { + return fmt.Errorf("global config unavailable") + } + driver := strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.Driver) + if driver != "" && !strings.EqualFold(driver, "pgvector") { + return nil + } + pgCfg := pgvectorcfg.Config{ + DSN: strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.DSN), + Schema: strings.TrimSpace(cfg.KnowledgeSpace.VectorStore.PgVector.Schema), + Lists: cfg.KnowledgeSpace.VectorStore.PgVector.Lists, + }.WithDefaults() + + dsn := pgCfg.DSN + if dsn == "" { + dsn = strings.TrimSpace(cfg.Database.DSN) + } + if dsn == "" && strings.TrimSpace(cfg.Database.Host) != "" { + sslmode := strings.TrimSpace(cfg.Database.SSLMode) + if sslmode == "" { + sslmode = "disable" + } + tz := strings.TrimSpace(cfg.Database.Timezone) + if tz == "" { + tz = "UTC" + } + dsn = "host=" + strings.TrimSpace(cfg.Database.Host) + + " port=" + strconv.Itoa(cfg.Database.Port) + + " user=" + strings.TrimSpace(cfg.Database.UserName) + + " password=" + strings.TrimSpace(cfg.Database.Password) + + " dbname=" + strings.TrimSpace(cfg.Database.Database) + + " sslmode=" + sslmode + + " TimeZone=" + tz + } + if dsn == "" { + return fmt.Errorf("pgvector dsn is empty (configure knowledge_space.vector_store.pgvector.dsn or database.dsn)") + } + tableName := fmt.Sprintf("knowledge_vectors_v1_%d", dim) + logger.InfoF(ctx, "[agent_setting] ensure embedding vector table schema=%s table=%s dim=%d", pgCfg.Schema, tableName, dim) + if err := migration.EnsureKnowledgeVectorsPGVectorTable(ctx, dsn, pgCfg.Schema, tableName, dim, pgCfg.Lists); err != nil { + return err + } + logger.InfoF(ctx, "[agent_setting] embedding vector table ready schema=%s table=%s dim=%d", pgCfg.Schema, tableName, dim) + return nil +} + func snippet(s string, limit int) string { txt := strings.TrimSpace(s) if txt == "" { diff --git a/backend/internal/transport/http/admin/agent/tenant_handlers.go b/backend/internal/transport/http/admin/agent/tenant_handlers.go index 4d608cca..e8b8bd40 100644 --- a/backend/internal/transport/http/admin/agent/tenant_handlers.go +++ b/backend/internal/transport/http/admin/agent/tenant_handlers.go @@ -50,9 +50,9 @@ func (h *TenantAgentFormHandler) ListTenantForms(c *gin.Context) { dto.ResponseError(c, http.StatusServiceUnavailable, "agent lifecycle service not available", nil) return } - rawUUID := strings.TrimSpace(c.Query("tenant_uuid")) + rawUUID := strings.TrimSpace(reqctx.TenantUUIDFromGin(c)) if rawUUID == "" { - dto.ResponseValidationError(c, errors.New("tenant_uuid is required")) + dto.ResponseError(c, http.StatusUnauthorized, "tenant context missing", nil) return } tenantUUID, err := reqctx.CanonicalTenantUUID(rawUUID) diff --git a/backend/internal/transport/http/admin/auth/api.go b/backend/internal/transport/http/admin/auth/api.go index 65346220..b43afbad 100644 --- a/backend/internal/transport/http/admin/auth/api.go +++ b/backend/internal/transport/http/admin/auth/api.go @@ -22,11 +22,22 @@ func RegisterAPIRoutes( authProtectedGroup.POST("/logout", hAuthUser.LogoutHandler(deps.AuthUser)) } + // Compatibility: frontend uses `/api/v1/admin/user/auth/logout` (same prefix as login/register/refresh). + // Keep `/user/auth/logout` for legacy callers. + authProtectedAdminGroup := protectedGroup.Group("/admin/user/auth") + { + authProtectedAdminGroup.POST("/logout", hAuthUser.LogoutHandler(deps.AuthUser)) + } + hMeContext := NewMeContextHandler(deps) // 根据你的中间件实际名称绑定,确保需要登录 gMeContext := protectedGroup.Group("/admin/auth") { gMeContext.GET("/me/context", hMeContext.GetMeContext) + hMeExtra := NewMeExtraHandler(deps) + gMeContext.POST("/me/switch-tenant", hMeExtra.SwitchTenant) + gMeContext.GET("/me/tenants", hMeExtra.ListTenants) + gMeContext.GET("/me/departments", hMeExtra.ListDepartments) } } diff --git a/backend/internal/transport/http/admin/auth/auth_handler.go b/backend/internal/transport/http/admin/auth/auth_handler.go index 6887e4fc..8f056221 100644 --- a/backend/internal/transport/http/admin/auth/auth_handler.go +++ b/backend/internal/transport/http/admin/auth/auth_handler.go @@ -2,6 +2,8 @@ package auth import ( + "errors" + "io" "net/http" "strings" @@ -167,11 +169,20 @@ func (h *AuthUserHandler) RefreshHandler(s *authsvc.AuthService) gin.HandlerFunc func (h *AuthUserHandler) LogoutHandler(s *authsvc.AuthService) gin.HandlerFunc { return func(c *gin.Context) { - var req RefreshReq // 用 refresh_token 注销 - if err := dtoRequest.ValidateRequestWithContext(c, &req); err != nil { + var req RefreshReq // 用 refresh_token 注销(前端应提供;为空则 best-effort 清理本地会话) + if err := c.ShouldBindJSON(&req); err != nil { + // Empty body is common for clients that only want to clear local session. + if errors.Is(err, io.EOF) { + dtoRequest.ResponseSuccess(c, gin.H{"ok": true, "noop": true}) + return + } dtoRequest.ResponseValidationError(c, err) return } + if strings.TrimSpace(req.RefreshToken) == "" { + dtoRequest.ResponseSuccess(c, gin.H{"ok": true, "noop": true}) + return + } if err := s.Logout(c.Request.Context(), req.RefreshToken); err != nil { dtoRequest.ResponseError(c, http.StatusUnauthorized, err.Error(), nil) return diff --git a/backend/internal/transport/http/admin/auth/me_extra_handler.go b/backend/internal/transport/http/admin/auth/me_extra_handler.go new file mode 100644 index 00000000..a59307c1 --- /dev/null +++ b/backend/internal/transport/http/admin/auth/me_extra_handler.go @@ -0,0 +1,129 @@ +package auth + +import ( + "net/http" + "strings" + + "github.com/gin-gonic/gin" + + "github.com/ArtisanCloud/PowerX/internal/app/shared" + authsvc "github.com/ArtisanCloud/PowerX/internal/service/auth" + iamsvc "github.com/ArtisanCloud/PowerX/internal/service/iam" + m "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/iam" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + dto "github.com/ArtisanCloud/PowerX/pkg/dto" +) + +type MeExtraHandler struct { + me *authsvc.MeService + org *iamsvc.OrgService +} + +func NewMeExtraHandler(deps *shared.Deps) *MeExtraHandler { + return &MeExtraHandler{ + me: deps.MeService, + org: iamsvc.NewOrgService(deps.DB), + } +} + +type switchTenantReq struct { + TenantUUID string `json:"tenant_uuid"` +} + +// POST /api/v1/admin/auth/me/switch-tenant +func (h *MeExtraHandler) SwitchTenant(c *gin.Context) { + var req switchTenantReq + if err := dto.ValidateRequestWithContext(c, &req); err != nil { + dto.ResponseValidationError(c, err) + return + } + + target := strings.TrimSpace(req.TenantUUID) + if target == "" { + dto.ResponseError(c, http.StatusBadRequest, "tenant_uuid required", nil) + return + } + + // 先用当前 ctx 拉一份 members,校验用户确实属于该租户(避免 root 误切到不存在成员的租户导致 member_id 语义混乱) + baseCtx := c.Request.Context() + meCtx, err := h.me.GetMeContext(baseCtx) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "获取上下文失败", err) + return + } + + allowed := false + for _, m := range meCtx.Members { + if strings.EqualFold(strings.TrimSpace(m.TenantUUID), target) { + allowed = true + break + } + } + if !allowed { + dto.ResponseError(c, http.StatusForbidden, "no membership in tenant", nil) + return + } + + // 用目标 tenantUUID 重建一个“视图上下文”,让后续依赖 reqctx.GetTenantUUID 的逻辑对齐 + nextCtx := reqctx.WithTenantUUID(baseCtx, target) + c.Request = c.Request.WithContext(nextCtx) + + resp, err := h.me.GetMeContext(nextCtx) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "获取上下文失败", err) + return + } + dto.ResponseSuccess(c, resp) +} + +// GET /api/v1/admin/auth/me/tenants +func (h *MeExtraHandler) ListTenants(c *gin.Context) { + resp, err := h.me.GetMeContext(c.Request.Context()) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "获取上下文失败", err) + return + } + dto.ResponseSuccess(c, resp.Members) +} + +// GET /api/v1/admin/auth/me/departments +func (h *MeExtraHandler) ListDepartments(c *gin.Context) { + ctx := c.Request.Context() + tenantUUID, err := reqctx.RequireTenantUUID(ctx) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "tenant uuid required", err) + return + } + + tree, err := h.org.GetDepartmentTree(ctx, tenantUUID) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "获取部门失败", err) + return + } + + type deptView struct { + ID uint64 `json:"id"` + Name string `json:"name"` + Code string `json:"code"` + ParentID *uint64 `json:"parent_id,omitempty"` + } + + out := make([]deptView, 0, 32) + + var walk func(nodes []*m.Department) + walk = func(nodes []*m.Department) { + for _, n := range nodes { + out = append(out, deptView{ + ID: n.ID, + Name: n.Name, + Code: n.Key, + ParentID: n.ParentID, + }) + if len(n.Children) > 0 { + walk(n.Children) + } + } + } + walk(tree) + dto.ResponseSuccess(c, out) +} diff --git a/backend/internal/transport/http/admin/capability_registry/api.go b/backend/internal/transport/http/admin/capability_registry/api.go index 896ef16c..d820f1ea 100644 --- a/backend/internal/transport/http/admin/capability_registry/api.go +++ b/backend/internal/transport/http/admin/capability_registry/api.go @@ -14,6 +14,7 @@ func RegisterAPIRoutes(publicGroup *gin.RouterGroup, protectedGroup *gin.RouterG if protectedGroup != nil && deps.CapabilityCatalogSvc != nil { if handler := newCatalogHandler(deps.CapabilityCatalogSvc); handler != nil { adminCapabilities := protectedGroup.Group("/admin/capabilities") + adminCapabilities.GET("/sources", handler.ListSources) adminCapabilities.GET("", handler.ListCapabilities) adminCapabilities.GET("/:capabilityId", handler.GetCapability) protectedGroup.GET("/admin/capability-sync/jobs", handler.ListSyncJobs) diff --git a/backend/internal/transport/http/admin/capability_registry/catalog_handler.go b/backend/internal/transport/http/admin/capability_registry/catalog_handler.go index 100862be..b3ce56f6 100644 --- a/backend/internal/transport/http/admin/capability_registry/catalog_handler.go +++ b/backend/internal/transport/http/admin/capability_registry/catalog_handler.go @@ -8,6 +8,7 @@ import ( capabilitycatalog "github.com/ArtisanCloud/PowerX/internal/service/capability_registry" capability_registrydto "github.com/ArtisanCloud/PowerX/internal/transport/http/admin/capability_registry/dto" repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/capability_registry" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/gin-gonic/gin" ) @@ -24,6 +25,41 @@ func newCatalogHandler(svc *capabilitycatalog.RegistryService) *catalogHandler { return &catalogHandler{svc: svc} } +// ListSources handles GET /admin/capabilities/sources. +func (h *catalogHandler) ListSources(c *gin.Context) { + if h == nil || h.svc == nil { + capability_registrydto.RespondError(c, capability_registrydto.ErrUnavailable, nil) + return + } + + dto.ResponseSuccess(c, gin.H{ + "default": "all", + "note": "该接口仅返回 source 枚举与别名定义,不执行 capability 查询过滤。请使用 /admin/capabilities?source=... 获取实际数据。", + "sources": []gin.H{ + { + "id": "all", + "label": "all", + "description": "查询全部来源(不传 source 或 source=all)", + }, + { + "id": capabilitycatalog.CapabilitySourceCoreX, + "label": "corex", + "description": "PowerX 底座能力", + }, + { + "id": capabilitycatalog.CapabilitySourcePlugin, + "label": "plugin", + "description": "插件/租户注册能力", + }, + }, + "aliases": gin.H{ + "all": "all", + "any": "all", + "platform": capabilitycatalog.CapabilitySourceCoreX, + }, + }) +} + // ListCapabilities handles GET /admin/capabilities. func (h *catalogHandler) ListCapabilities(c *gin.Context) { if h == nil || h.svc == nil { @@ -67,7 +103,7 @@ func (h *catalogHandler) ListCapabilities(c *gin.Context) { IncludeTotal: true, Status: statusFilter, } - if tenant := strings.TrimSpace(c.Query("tenant_uuid")); tenant != "" { + if tenant := strings.TrimSpace(reqctx.TenantUUIDFromGin(c)); tenant != "" { opts.TenantUUID = tenant } diff --git a/backend/internal/transport/http/admin/capability_registry/dto/dto.go b/backend/internal/transport/http/admin/capability_registry/dto/dto.go index 4404b308..8c242ace 100644 --- a/backend/internal/transport/http/admin/capability_registry/dto/dto.go +++ b/backend/internal/transport/http/admin/capability_registry/dto/dto.go @@ -497,5 +497,8 @@ func buildTenantInvocationCurl(body map[string]interface{}) string { if err != nil { return "" } - return fmt.Sprintf("curl -X POST \"$POWERX_BASE_URL/tenant/invocations\" \\\n -H \"Authorization: Bearer $TENANT_TOKEN\" \\\n -H \"X-PowerX-Tenant: $TENANT_UUID\" \\\n -H \"Content-Type: application/json\" \\\n -d '%s'", string(raw)) + return fmt.Sprintf(`curl -X POST "$POWERX_HTTP_BASE/tenant/invocations" \ + -H "Authorization: Bearer $TENANT_TOKEN" \ + -H "Content-Type: application/json" \ + -d '%s'`, string(raw)) } diff --git a/backend/internal/transport/http/admin/capability_registry/platform_handler.go b/backend/internal/transport/http/admin/capability_registry/platform_handler.go index 8fb8119d..d7f56d3f 100644 --- a/backend/internal/transport/http/admin/capability_registry/platform_handler.go +++ b/backend/internal/transport/http/admin/capability_registry/platform_handler.go @@ -26,14 +26,22 @@ func newPlatformHandler(svc *capabilitycatalog.RegistryService) *platformCapabil } func (h *platformCapabilityHandler) ListModules(c *gin.Context) { - h.handleResponse(c, capability_registrydto.NormalizePlatformModuleKey(c.Query("module")), false) + page := parsePositiveInt(c.DefaultQuery("page", "1"), 1) + pageSize := parsePositiveInt(c.DefaultQuery("page_size", "20"), 20) + if page <= 0 { + page = 1 + } + if pageSize <= 0 { + pageSize = 20 + } + h.handleResponse(c, capability_registrydto.NormalizePlatformModuleKey(c.Query("module")), false, page, pageSize) } func (h *platformCapabilityHandler) GetModule(c *gin.Context) { - h.handleResponse(c, capability_registrydto.NormalizePlatformModuleKey(c.Param("moduleKey")), true) + h.handleResponse(c, capability_registrydto.NormalizePlatformModuleKey(c.Param("moduleKey")), true, 1, 1) } -func (h *platformCapabilityHandler) handleResponse(c *gin.Context, moduleFilter string, single bool) { +func (h *platformCapabilityHandler) handleResponse(c *gin.Context, moduleFilter string, single bool, page int, pageSize int) { if !reqctx.IsRoot(c.Request.Context()) { capability_registrydto.RespondError(c, capability_registrydto.ErrCapabilityForbidden.WithHint("仅 Root 管理员可查看平台能力"), nil) return @@ -55,11 +63,31 @@ func (h *platformCapabilityHandler) handleResponse(c *gin.Context, moduleFilter }) return } + + totalModules := len(modules) + if pageSize <= 0 { + pageSize = totalModules + } + start := (page - 1) * pageSize + if start < 0 { + start = 0 + } + if start > totalModules { + start = totalModules + } + end := start + pageSize + if end > totalModules { + end = totalModules + } + pagedModules := modules[start:end] + dto.ResponseSuccess(c, gin.H{ "generated_at": generatedAt, - "total_modules": len(modules), + "total_modules": totalModules, "total_capabilities": totalCapabilities, - "modules": modules, + "page": page, + "page_size": pageSize, + "modules": pagedModules, }) } diff --git a/backend/internal/transport/http/admin/dev_hotload/routes.go b/backend/internal/transport/http/admin/dev_hotload/routes.go index e202ead6..7f161e18 100644 --- a/backend/internal/transport/http/admin/dev_hotload/routes.go +++ b/backend/internal/transport/http/admin/dev_hotload/routes.go @@ -176,15 +176,12 @@ func (h *apiHandler) stream(c *gin.Context) { func (h *apiHandler) listSessions(c *gin.Context) { pluginID := c.Query("pluginId") - rawTenant := c.Query("tenant_uuid") - if rawTenant == "" { - rawTenant = c.Query("tenantUuid") - } - tenantUUID, err := optionalTenantUUIDFilter(rawTenant) + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) if err != nil { - dto.ResponseError(c, http.StatusBadRequest, "invalid tenant_uuid", err) + dto.ResponseError(c, http.StatusUnauthorized, "tenant context missing", err) return } + tenantFilter := &tenantUUID statuses := normalizeSessionStatuses(c.QueryArray("status")) limit, err := parseLimit(c.Query("limit")) if err != nil { @@ -197,7 +194,7 @@ func (h *apiHandler) listSessions(c *gin.Context) { return } - sessions, err := h.svc.ListSessions(c.Request.Context(), pluginID, tenantUUID, statuses, limit, offset) + sessions, err := h.svc.ListSessions(c.Request.Context(), pluginID, tenantFilter, statuses, limit, offset) if err != nil { h.writeError(c, err) return @@ -215,15 +212,12 @@ func (h *apiHandler) listSessions(c *gin.Context) { func (h *apiHandler) clearSessions(c *gin.Context) { pluginID := c.Query("pluginId") - rawTenant := c.Query("tenant_uuid") - if rawTenant == "" { - rawTenant = c.Query("tenantUuid") - } - tenantUUID, err := optionalTenantUUIDFilter(rawTenant) + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) if err != nil { - dto.ResponseError(c, http.StatusBadRequest, "invalid tenant_uuid", err) + dto.ResponseError(c, http.StatusUnauthorized, "tenant context missing", err) return } + tenantFilter := &tenantUUID statuses := normalizeSessionStatuses(c.QueryArray("status")) if len(statuses) == 0 { statuses = []string{model.DevHotloadSessionStatusTerminated} @@ -242,7 +236,7 @@ func (h *apiHandler) clearSessions(c *gin.Context) { confirm = true } - ids, err := h.svc.DeleteSessions(c.Request.Context(), pluginID, tenantUUID, statuses, force, confirm) + ids, err := h.svc.DeleteSessions(c.Request.Context(), pluginID, tenantFilter, statuses, force, confirm) if err != nil { h.writeError(c, err) return diff --git a/backend/internal/transport/http/admin/event_fabric/acl_handler.go b/backend/internal/transport/http/admin/event_fabric/acl_handler.go index 7dba3f7e..3de54e1c 100644 --- a/backend/internal/transport/http/admin/event_fabric/acl_handler.go +++ b/backend/internal/transport/http/admin/event_fabric/acl_handler.go @@ -7,9 +7,11 @@ import ( "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/acl" directory "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/directory" + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/gin-gonic/gin" + "github.com/google/uuid" ) type AdminACLHandlerOptions struct { @@ -64,12 +66,16 @@ func (h *AdminACLHandler) UpsertBindings(c *gin.Context) { dto.RespondErrorFrom(c, dto.NewBadRequest("invalid topic", err)) return } - if tenantKey != "" && !strings.EqualFold(tenantKey, tenantUUID) { + if tenantKey != "" && !strings.EqualFold(tenantKey, tenantUUID) && !isSharedTopicTenant(tenantKey) { dto.RespondErrorFrom(c, dto.NewForbidden("tenant scope mismatch", nil)) return } + lookupTenantKey := strings.TrimSpace(tenantUUID) + if strings.TrimSpace(tenantKey) != "" { + lookupTenantKey = strings.TrimSpace(tenantKey) + } - topic, err := h.directory.FindTopicByFullName(c.Request.Context(), tenantUUID, namespace, name) + topic, err := h.directory.FindTopicByFullName(c.Request.Context(), lookupTenantKey, namespace, name) if err != nil { dto.RespondErrorFrom(c, dto.NewInternal("lookup topic failed", err)) return @@ -155,11 +161,15 @@ func (h *AdminACLHandler) ListBindings(c *gin.Context) { dto.RespondErrorFrom(c, dto.NewBadRequest("invalid topic", err)) return } - if tenantKey != "" && !strings.EqualFold(tenantKey, tenantUUID) { + if tenantKey != "" && !strings.EqualFold(tenantKey, tenantUUID) && !isSharedTopicTenant(tenantKey) { dto.RespondErrorFrom(c, dto.NewForbidden("tenant scope mismatch", nil)) return } - topic, err := h.directory.FindTopicByFullName(c.Request.Context(), tenantUUID, namespace, name) + lookupTenantKey := strings.TrimSpace(tenantUUID) + if strings.TrimSpace(tenantKey) != "" { + lookupTenantKey = strings.TrimSpace(tenantKey) + } + topic, err := h.directory.FindTopicByFullName(c.Request.Context(), lookupTenantKey, namespace, name) if err != nil { dto.RespondErrorFrom(c, dto.NewInternal("lookup topic failed", err)) return @@ -185,6 +195,160 @@ func (h *AdminACLHandler) ListBindings(c *gin.Context) { }) } +func (h *AdminACLHandler) ListTopicRoleMatrix(c *gin.Context) { + if h.service == nil || h.directory == nil { + dto.RespondErrorFrom(c, dto.NewInternal("service unavailable", nil)) + return + } + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + namespace := strings.TrimSpace(c.Query("namespace")) + if namespace == "" { + namespace = "knowledge.space.feedback" + } + name := strings.TrimSpace(c.Query("name")) + if name == "" { + name = "reprocess" + } + + topic, err := h.findTopicWithFallback(c, tenantUUID, namespace, name) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("lookup topic failed", err)) + return + } + if topic == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("topic not found", nil)) + return + } + + bindings, err := h.service.ListBindings(c.Request.Context(), acl.ListRequest{ + TenantUUID: tenantUUID, + TopicUUID: topic.UUID.String(), + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list acl failed", err)) + return + } + + actionSet := map[string]struct{}{} + principalMap := map[string]map[string]bool{} + for _, b := range bindings { + if b == nil { + continue + } + action := strings.TrimSpace(string(b.Action)) + if action == "" { + continue + } + actionSet[action] = struct{}{} + pid := strings.TrimSpace(b.PrincipalID) + if pid == "" { + continue + } + if _, ok := principalMap[pid]; !ok { + principalMap[pid] = map[string]bool{} + } + principalMap[pid][action] = true + } + + actions := make([]string, 0, len(actionSet)) + for action := range actionSet { + actions = append(actions, action) + } + if len(actions) == 0 { + actions = []string{"publish", "subscribe", "replay"} + } + + principals := make([]map[string]interface{}, 0, len(principalMap)) + for principalID, permits := range principalMap { + principals = append(principals, map[string]interface{}{ + "principal_id": principalID, + "actions": permits, + }) + } + + dto.ResponseSuccess(c, map[string]interface{}{ + "topic": map[string]interface{}{ + "topic_uuid": topic.UUID.String(), + "tenant_key": topic.TenantKey, + "full_topic": topic.FullTopic, + "namespace": topic.Namespace, + "name": topic.Name, + }, + "actions": actions, + "principals": principals, + }) +} + +func (h *AdminACLHandler) ListPrincipalTopicMatrix(c *gin.Context) { + if h.service == nil || h.directory == nil { + dto.RespondErrorFrom(c, dto.NewInternal("service unavailable", nil)) + return + } + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + principalID := strings.TrimSpace(c.Query("principal_id")) + if principalID == "" { + dto.RespondErrorFrom(c, dto.NewBadRequest("principal_id is required", nil)) + return + } + + namespace := strings.TrimSpace(c.Query("namespace")) + if namespace == "" { + namespace = "knowledge.space.feedback" + } + name := strings.TrimSpace(c.Query("name")) + if name == "" { + name = "reprocess" + } + + topic, err := h.findTopicWithFallback(c, tenantUUID, namespace, name) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("lookup topic failed", err)) + return + } + if topic == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("topic not found", nil)) + return + } + + bindings, err := h.service.ListBindings(c.Request.Context(), acl.ListRequest{ + TenantUUID: tenantUUID, + TopicUUID: topic.UUID.String(), + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list acl failed", err)) + return + } + + actions := make([]string, 0, 3) + for _, b := range bindings { + if b == nil || strings.TrimSpace(b.PrincipalID) != principalID { + continue + } + actions = append(actions, strings.TrimSpace(string(b.Action))) + } + + dto.ResponseSuccess(c, map[string]interface{}{ + "principal_id": principalID, + "topics": []map[string]interface{}{ + { + "topic_uuid": topic.UUID.String(), + "topic": topic.FullTopic, + "actions": actions, + }, + }, + }) +} + func splitFullTopic(full string) (tenant, namespace, name string, err error) { parts := strings.Split(strings.TrimSpace(full), ".") if len(parts) < 3 { @@ -209,3 +373,66 @@ func parseTimeRef(value string) (*time.Time, error) { } return &ts, nil } + +func isSharedTopicTenant(tenantKey string) bool { + key := strings.ToLower(strings.TrimSpace(tenantKey)) + return key == "global" || key == "system" +} + +func normalizeTopicFullName(input string, fallbackTenant string) (string, error) { + if strings.TrimSpace(input) == "" { + return "", fmt.Errorf("topic required") + } + tenant, namespace, name, err := splitFullTopic(input) + if err == nil { + if tenant == "" { + tenant = strings.TrimSpace(fallbackTenant) + } + if tenant == "" { + tenant = "global" + } + return fmt.Sprintf("%s.%s.%s", tenant, namespace, name), nil + } + parts := strings.Split(strings.TrimSpace(input), ".") + if len(parts) < 2 { + return "", err + } + namespace = strings.Join(parts[:len(parts)-1], ".") + name = parts[len(parts)-1] + tenant = strings.TrimSpace(fallbackTenant) + if tenant == "" { + tenant = "global" + } + return fmt.Sprintf("%s.%s.%s", tenant, strings.TrimSpace(namespace), strings.TrimSpace(name)), nil +} + +func parseTopicUUID(value string) (uuid.UUID, error) { + id := strings.TrimSpace(value) + if id == "" { + return uuid.Nil, fmt.Errorf("topic_uuid is required") + } + return uuid.Parse(id) +} + +func (h *AdminACLHandler) findTopicWithFallback(c *gin.Context, tenantUUID, namespace, name string) (*eventfabricmodel.TopicDefinition, error) { + lookupOrder := []string{strings.TrimSpace(tenantUUID), "global", "system"} + seen := map[string]struct{}{} + for _, tenantKey := range lookupOrder { + key := strings.TrimSpace(tenantKey) + if key == "" { + continue + } + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + topic, err := h.directory.FindTopicByFullName(c.Request.Context(), key, namespace, name) + if err != nil { + return nil, err + } + if topic != nil { + return topic, nil + } + } + return nil, nil +} diff --git a/backend/internal/transport/http/admin/event_fabric/cron_handler.go b/backend/internal/transport/http/admin/event_fabric/cron_handler.go new file mode 100644 index 00000000..b9a3fc66 --- /dev/null +++ b/backend/internal/transport/http/admin/event_fabric/cron_handler.go @@ -0,0 +1,229 @@ +package eventfabric + +import ( + "strings" + "time" + + workers "github.com/ArtisanCloud/PowerX/internal/app/shared/workers" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/gin-gonic/gin" +) + +const ( + cronJobRetryDispatch = "event_fabric.retry_dispatch" + cronJobAuthorizationChallengeTask = "event_fabric.authorization_challenge_timeout" + cronJobStateRunning = "running" + cronJobStatePaused = "paused" + cronJobStateUnavailable = "unavailable" +) + +type AdminCronHandlerOptions struct { + RetryWorker *workers.EventFabricRetryWorker + AuthorizationWorker *workers.EventFabricAuthorizationTimeoutTaskWorker +} + +type AdminCronHandler struct { + retryWorker *workers.EventFabricRetryWorker + authorizationWorker *workers.EventFabricAuthorizationTimeoutTaskWorker +} + +type cronJobDTO struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Status string `json:"status"` + Kind string `json:"kind"` + IntervalSec int64 `json:"interval_sec,omitempty"` + BatchSize int `json:"batch_size,omitempty"` + SubscriberID string `json:"subscriber_id,omitempty"` + TenantKey string `json:"tenant_key,omitempty"` + NextRunAt string `json:"next_run_at,omitempty"` + SupportsPause bool `json:"supports_pause"` + SupportsRunNow bool `json:"supports_run_now"` +} + +func NewAdminCronHandler(opts AdminCronHandlerOptions) *AdminCronHandler { + return &AdminCronHandler{ + retryWorker: opts.RetryWorker, + authorizationWorker: opts.AuthorizationWorker, + } +} + +func (h *AdminCronHandler) ListJobs(c *gin.Context) { + if _, err := reqctx.RequireTenantUUIDFromGin(c); err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + now := time.Now().UTC() + items := make([]cronJobDTO, 0, 2) + + items = append(items, h.retryDispatchJob(now)) + items = append(items, h.authorizationTimeoutJob(now)) + + dto.ResponseSuccess(c, gin.H{ + "items": items, + "now": now.Format(time.RFC3339), + }) +} + +func (h *AdminCronHandler) PauseJob(c *gin.Context) { + if _, err := reqctx.RequireTenantUUIDFromGin(c); err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + jobID := strings.TrimSpace(c.Param("job_id")) + switch jobID { + case cronJobRetryDispatch: + if h.retryWorker == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } + h.retryWorker.Pause() + dto.ResponseSuccess(c, h.retryDispatchJob(time.Now().UTC())) + return + case cronJobAuthorizationChallengeTask: + if h.authorizationWorker == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } + h.authorizationWorker.Pause() + dto.ResponseSuccess(c, h.authorizationTimeoutJob(time.Now().UTC())) + return + default: + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } +} + +func (h *AdminCronHandler) ResumeJob(c *gin.Context) { + if _, err := reqctx.RequireTenantUUIDFromGin(c); err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + jobID := strings.TrimSpace(c.Param("job_id")) + switch jobID { + case cronJobRetryDispatch: + if h.retryWorker == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } + h.retryWorker.Resume() + dto.ResponseSuccess(c, h.retryDispatchJob(time.Now().UTC())) + return + case cronJobAuthorizationChallengeTask: + if h.authorizationWorker == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } + h.authorizationWorker.Resume() + dto.ResponseSuccess(c, h.authorizationTimeoutJob(time.Now().UTC())) + return + default: + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } +} + +func (h *AdminCronHandler) RunNow(c *gin.Context) { + if _, err := reqctx.RequireTenantUUIDFromGin(c); err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + jobID := strings.TrimSpace(c.Param("job_id")) + switch jobID { + case cronJobRetryDispatch: + if h.retryWorker == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } + h.retryWorker.TriggerNow(c.Request.Context()) + dto.ResponseSuccess(c, h.retryDispatchJob(time.Now().UTC())) + return + case cronJobAuthorizationChallengeTask: + if h.authorizationWorker == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } + h.authorizationWorker.TriggerNow(c.Request.Context()) + dto.ResponseSuccess(c, h.authorizationTimeoutJob(time.Now().UTC())) + return + default: + dto.RespondErrorFrom(c, dto.NewNotFound("cron job not found", nil)) + return + } +} + +func (h *AdminCronHandler) retryDispatchJob(now time.Time) cronJobDTO { + if h.retryWorker == nil { + return cronJobDTO{ + ID: cronJobRetryDispatch, + Name: "Event Fabric Retry Dispatch", + Description: "扫描到期重试任务并发布到统一事件总线", + Status: cronJobStateUnavailable, + Kind: "interval", + SupportsPause: false, + SupportsRunNow: false, + } + } + status := cronJobStateRunning + if h.retryWorker.IsPaused() { + status = cronJobStatePaused + } + interval := h.retryWorker.Interval() + nextRunAt := "" + if status == cronJobStateRunning && interval > 0 { + nextRunAt = now.Add(interval).Format(time.RFC3339) + } + return cronJobDTO{ + ID: cronJobRetryDispatch, + Name: "Event Fabric Retry Dispatch", + Description: "扫描到期重试任务并发布到统一事件总线", + Status: status, + Kind: "interval", + IntervalSec: int64(interval / time.Second), + BatchSize: h.retryWorker.BatchSize(), + NextRunAt: nextRunAt, + SupportsPause: true, + SupportsRunNow: true, + } +} + +func (h *AdminCronHandler) authorizationTimeoutJob(now time.Time) cronJobDTO { + if h.authorizationWorker == nil { + return cronJobDTO{ + ID: cronJobAuthorizationChallengeTask, + Name: "Authorization Challenge Timeout", + Description: "消费授权超时任务并执行过期处理", + Status: cronJobStateUnavailable, + Kind: "queue", + SupportsPause: false, + SupportsRunNow: false, + } + } + status := cronJobStateRunning + if h.authorizationWorker.IsPaused() { + status = cronJobStatePaused + } + nextRunAt := "" + if status == cronJobStateRunning { + nextRunAt = now.Add(h.authorizationWorker.WaitTimeout()).Format(time.RFC3339) + } + return cronJobDTO{ + ID: cronJobAuthorizationChallengeTask, + Name: "Authorization Challenge Timeout", + Description: "消费授权超时任务并执行过期处理", + Status: status, + Kind: "queue", + BatchSize: h.authorizationWorker.BatchSize(), + SubscriberID: h.authorizationWorker.SubscriberID(), + TenantKey: h.authorizationWorker.TenantKey(), + NextRunAt: nextRunAt, + SupportsPause: true, + SupportsRunNow: true, + } +} diff --git a/backend/internal/transport/http/admin/event_fabric/delivery_handler.go b/backend/internal/transport/http/admin/event_fabric/delivery_handler.go index 3b5e881e..dfc689f6 100644 --- a/backend/internal/transport/http/admin/event_fabric/delivery_handler.go +++ b/backend/internal/transport/http/admin/event_fabric/delivery_handler.go @@ -98,6 +98,10 @@ func mapDeliveryError(err error) error { case errors.Is(err, sharedsvc.ErrRetryExhausted): return dto.WithCode(dto.NewConflict("retry exhausted", err), sharedsvc.ErrorCodeRetryExhausted) default: + errMsg := strings.ToLower(strings.TrimSpace(err.Error())) + if strings.Contains(errMsg, "topic") && strings.Contains(errMsg, "not found") { + return dto.NewNotFound("topic 未注册或当前环境不可用", err) + } return dto.WithCode(dto.NewInternal("delivery internal error", err), sharedsvc.ErrorNamespace+".internal_error") } } diff --git a/backend/internal/transport/http/admin/event_fabric/directory_handler.go b/backend/internal/transport/http/admin/event_fabric/directory_handler.go index 4db357b5..5f1158f5 100644 --- a/backend/internal/transport/http/admin/event_fabric/directory_handler.go +++ b/backend/internal/transport/http/admin/event_fabric/directory_handler.go @@ -107,12 +107,15 @@ func (h *AdminDirectoryHandler) ListTopics(c *gin.Context) { return } + filter := repository.TopicFilter{ + Namespace: c.Query("namespace"), + Lifecycle: lifecycles, + } + filter.TenantID = tenantUUID + filter.IncludeShared = true + list, total, err := h.service.ListTopics(c.Request.Context(), repository.QueryContext{ - Filter: repository.TopicFilter{ - TenantID: tenantUUID, - Namespace: c.Query("namespace"), - Lifecycle: lifecycles, - }, + Filter: filter, Page: repository.PageOptions{ Limit: pageSize, Offset: (page - 1) * pageSize, diff --git a/backend/internal/transport/http/admin/event_fabric/overview_handler.go b/backend/internal/transport/http/admin/event_fabric/overview_handler.go new file mode 100644 index 00000000..674c7b97 --- /dev/null +++ b/backend/internal/transport/http/admin/event_fabric/overview_handler.go @@ -0,0 +1,936 @@ +package eventfabric + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "time" + + eventbus "github.com/ArtisanCloud/PowerX/internal/event_bus" + "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/directory" + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" + eventfabrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/event_fabric" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/ArtisanCloud/PowerX/pkg/event_bus" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "gorm.io/gorm" +) + +type AdminOverviewHandlerOptions struct { + DB *gorm.DB + Directory *directory.DirectoryService + Redis *redis.Client + Enabled bool +} + +type AdminOverviewHandler struct { + db *gorm.DB + directory *directory.DirectoryService + redis *redis.Client + history *eventfabrepo.TaskHistoryRepository + enabled bool +} + +func NewAdminOverviewHandler(opts AdminOverviewHandlerOptions) *AdminOverviewHandler { + return &AdminOverviewHandler{ + db: opts.DB, + directory: opts.Directory, + redis: opts.Redis, + history: func() *eventfabrepo.TaskHistoryRepository { + if opts.DB == nil { + return nil + } + return eventfabrepo.NewTaskHistoryRepository(opts.DB) + }(), + enabled: opts.Enabled, + } +} + +type taskQueueSubscriberStats struct { + SubscriberID string `json:"subscriber_id"` + TenantKey string `json:"tenant_key"` + Pending int64 `json:"pending"` + Deferred int64 `json:"deferred"` + Processing int64 `json:"processing"` + Inflight int64 `json:"inflight"` + TotalTasks int64 `json:"total_tasks"` +} + +type taskQueueMessageDTO struct { + ID string `json:"id"` + Topic string `json:"topic"` + TraceID string `json:"trace_id,omitempty"` + Attempt int `json:"attempt"` + Visible string `json:"visible_at,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +type taskHistoryDTO struct { + TaskID string `json:"task_id"` + TenantKey string `json:"tenant_key"` + Subscriber string `json:"subscriber_id"` + Topic string `json:"topic"` + Kind string `json:"kind"` + Status string `json:"status"` + TraceID string `json:"trace_id,omitempty"` + Attempt int `json:"attempt"` + Source string `json:"source"` + SubmittedAt string `json:"submitted_at,omitempty"` + CompletedAt string `json:"completed_at,omitempty"` + LastSeenAt string `json:"last_seen_at,omitempty"` +} + +type topicDTO struct { + ID string `json:"id"` + UUID string `json:"uuid"` + FullTopic string `json:"full_topic"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Lifecycle string `json:"lifecycle"` + PayloadFormat string `json:"payload_format"` + MaxRetry int `json:"max_retry"` + AckTimeout int `json:"ack_timeout_sec"` + VersionMode string `json:"versioning_mode"` + DeprecatedAt string `json:"deprecated_at,omitempty"` +} + +type groupedCount struct { + TopicUUID string `json:"topic_uuid"` + FullTopic string `json:"full_topic,omitempty"` + ByStatus map[string]int64 `json:"by_status"` + Total int64 `json:"total"` +} + +type overviewReplayTaskDTO struct { + ID string `json:"id"` + TopicUUID string `json:"topic_uuid"` + FullTopic string `json:"full_topic,omitempty"` + TraceID string `json:"trace_id,omitempty"` + Status string `json:"status"` + Shadow bool `json:"shadow"` + RequestedBy string `json:"requested_by,omitempty"` + SubmittedAt string `json:"submitted_at"` + CompletedAt string `json:"completed_at,omitempty"` + FailureReason string `json:"failure_reason,omitempty"` + ResultCount int `json:"result_count"` +} + +func (h *AdminOverviewHandler) GetOverview(c *gin.Context) { + if h == nil || h.db == nil { + dto.RespondErrorFrom(c, dto.NewInternal("event_fabric overview unavailable", nil)) + return + } + if !h.enabled { + dto.RespondErrorFrom(c, dto.NewError(503, "event_fabric 未启用", nil)) + return + } + + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + tenantUUID, err = reqctx.CanonicalTenantUUID(tenantUUID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid tenant uuid", err)) + return + } + + namespace := strings.TrimSpace(c.Query("namespace")) + name := strings.TrimSpace(c.Query("name")) + subscriberID := strings.TrimSpace(c.Query("subscriber_id")) + if subscriberID == "" { + subscriberID = eventbus.SubscriberKnowledgeSpaceReprocess + } + subscriberID = canonicalSubscriberID(subscriberID) + limit, _ := strconv.Atoi(c.DefaultQuery("limit", "20")) + if limit <= 0 { + limit = 20 + } + if limit > 200 { + limit = 200 + } + + topics, topicMap, err := h.listTopics(c, tenantUUID, namespace, name) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list topics failed", err)) + return + } + + dlqStats, err := h.queryDLQCounts(c, tenantUUID, topicMap) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("query dlq stats failed", err)) + return + } + + attemptStats, err := h.queryAttemptCounts(c, tenantUUID, subscriberID, topicMap) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("query delivery attempt stats failed", err)) + return + } + + replayTasks, err := h.queryReplayTasks(c, tenantUUID, topicMap, limit) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("query replay tasks failed", err)) + return + } + + taskQueueStats, err := h.queryTaskQueueStats(c, tenantUUID, subscriberID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("query task queue stats failed", err)) + return + } + + dto.ResponseSuccess(c, gin.H{ + "now": time.Now().UTC().Format(time.RFC3339), + "tenant_uuid": tenantUUID, + "filters": gin.H{ + "namespace": namespace, + "name": name, + "subscriber_id": subscriberID, + "replay_task_max": limit, + }, + "topics": topics, + "stats": gin.H{ + "dlq": gin.H{ + "by_topic": dlqStats, + "total": sumGrouped(dlqStats), + }, + "delivery_attempts": gin.H{ + "subscriber_id": subscriberID, + "by_topic": attemptStats, + "total": sumGrouped(attemptStats), + }, + "replay_tasks": gin.H{ + "recent": replayTasks, + }, + "task_queue": taskQueueStats, + }, + }) +} + +func (h *AdminOverviewHandler) GetTaskQueueStats(c *gin.Context) { + if h == nil || h.db == nil { + dto.RespondErrorFrom(c, dto.NewInternal("event_fabric overview unavailable", nil)) + return + } + if !h.enabled { + dto.RespondErrorFrom(c, dto.NewError(503, "event_fabric 未启用", nil)) + return + } + + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + tenantUUID, err = reqctx.CanonicalTenantUUID(tenantUUID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid tenant uuid", err)) + return + } + + subscriberID := strings.TrimSpace(c.Query("subscriber_id")) + if subscriberID == "" { + subscriberID = eventbus.SubscriberKnowledgeSpaceReprocess + } + subscriberID = canonicalSubscriberID(subscriberID) + + taskQueueStats, err := h.queryTaskQueueStats(c, tenantUUID, subscriberID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("query task queue stats failed", err)) + return + } + + dto.ResponseSuccess(c, gin.H{ + "now": time.Now().UTC().Format(time.RFC3339), + "tenant_uuid": tenantUUID, + "subscriber_id": subscriberID, + "task_queue": taskQueueStats, + }) +} + +func (h *AdminOverviewHandler) GetTaskQueueMessages(c *gin.Context) { + if h == nil || h.db == nil { + dto.RespondErrorFrom(c, dto.NewInternal("event_fabric overview unavailable", nil)) + return + } + if !h.enabled { + dto.RespondErrorFrom(c, dto.NewError(503, "event_fabric 未启用", nil)) + return + } + if h.redis == nil { + dto.ResponseSuccess(c, gin.H{ + "now": time.Now().UTC().Format(time.RFC3339), + "messages": gin.H{"pending": []taskQueueMessageDTO{}, "deferred": []taskQueueMessageDTO{}, "processing": []taskQueueMessageDTO{}, "inflight": []taskQueueMessageDTO{}}, + }) + return + } + + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + tenantUUID, err = reqctx.CanonicalTenantUUID(tenantUUID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid tenant uuid", err)) + return + } + + tenantKey := strings.TrimSpace(c.Query("tenant_key")) + if tenantKey == "" { + tenantKey = tenantUUID + } + subscriberID := canonicalSubscriberID(strings.TrimSpace(c.Query("subscriber_id"))) + if subscriberID == "" { + subscriberID = eventbus.SubscriberKnowledgeSpaceReprocess + } + + limit, _ := strconv.Atoi(c.DefaultQuery("limit", "20")) + if limit <= 0 { + limit = 20 + } + if limit > 100 { + limit = 100 + } + + pending, err := h.readTaskQueueState(c, "q", tenantKey, subscriberID, limit) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("read pending queue failed", err)) + return + } + deferred, err := h.readTaskQueueState(c, "d", tenantKey, subscriberID, limit) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("read deferred queue failed", err)) + return + } + processing, err := h.readTaskQueueState(c, "p", tenantKey, subscriberID, limit) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("read processing queue failed", err)) + return + } + inflight, err := h.readTaskQueueState(c, "i", tenantKey, subscriberID, limit) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("read inflight queue failed", err)) + return + } + history, err := h.queryTaskHistory(c, tenantKey, subscriberID, limit) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("read task history failed", err)) + return + } + + dto.ResponseSuccess(c, gin.H{ + "now": time.Now().UTC().Format(time.RFC3339), + "tenant_uuid": tenantUUID, + "tenant_key": tenantKey, + "subscriber_id": subscriberID, + "limit": limit, + "messages": gin.H{ + "pending": pending, + "deferred": deferred, + "processing": processing, + "inflight": inflight, + }, + "history": history, + }) +} + +func (h *AdminOverviewHandler) queryTaskQueueStats(c *gin.Context, tenantUUID, subscriberID string) (gin.H, error) { + if h.redis == nil { + return gin.H{ + "pending": int64(0), + "deferred": int64(0), + "processing": int64(0), + "inflight": int64(0), + "by_subscriber": []taskQueueSubscriberStats{}, + }, nil + } + + prefix := "event_fabric:task" + ordered := []taskQueueSubscriberStats{} + type pair struct { + TenantKey string + SubscriberID string + } + pairs := make([]pair, 0, 32) + + appendStats := func(tenantKey, subscriber string) error { + tenantKey = strings.TrimSpace(tenantKey) + subscriber = canonicalSubscriberID(subscriber) + if tenantKey == "" || subscriber == "" { + return nil + } + qKey := prefix + ":q:" + tenantKey + ":" + subscriber + dKey := prefix + ":d:" + tenantKey + ":" + subscriber + pKey := prefix + ":p:" + tenantKey + ":" + subscriber + iKey := prefix + ":i:" + tenantKey + ":" + subscriber + + pending, err := h.redis.LLen(c.Request.Context(), qKey).Result() + if err != nil { + return err + } + deferred, err := h.redis.ZCard(c.Request.Context(), dKey).Result() + if err != nil { + return err + } + processing, err := h.redis.LLen(c.Request.Context(), pKey).Result() + if err != nil { + return err + } + inflight, err := h.redis.HLen(c.Request.Context(), iKey).Result() + if err != nil { + return err + } + + ordered = append(ordered, taskQueueSubscriberStats{ + SubscriberID: subscriber, + TenantKey: tenantKey, + Pending: pending, + Deferred: deferred, + Processing: processing, + Inflight: inflight, + TotalTasks: pending + deferred + processing + inflight, + }) + return nil + } + + seen := map[string]struct{}{} + addSubscriber := func(tenantKey, subscriber string) { + key := strings.TrimSpace(tenantKey) + "::" + strings.TrimSpace(subscriber) + if key == "::" { + return + } + if _, ok := seen[key]; ok { + return + } + seen[key] = struct{}{} + pairs = append(pairs, pair{ + TenantKey: strings.TrimSpace(tenantKey), + SubscriberID: canonicalSubscriberID(strings.TrimSpace(subscriber)), + }) + } + + // 基础订阅者(保障无任务时也可见) + addSubscriber(tenantUUID, subscriberID) + addSubscriber(tenantUUID, eventbus.SubscriberEventFabricReplay) + addSubscriber(tenantUUID, eventbus.SubscriberKnowledgeSpaceCorpusCheck) + addSubscriber("global", eventbus.SubscriberSystemNotificationDispatch) + addSubscriber("global", eventbus.SubscriberAuthorizationChallengeTime) + + // 从 Redis 实时队列发现全部 tenant+subscriber 分片 + states := []string{"q", "d", "p", "i"} + for _, state := range states { + pattern := fmt.Sprintf("%s:%s:*:*", prefix, state) + var cursor uint64 + for { + keys, next, err := h.redis.Scan(c.Request.Context(), cursor, pattern, 200).Result() + if err != nil { + return nil, err + } + for _, key := range keys { + parts := strings.Split(key, ":") + if len(parts) < 5 { + continue + } + tenantKey := strings.TrimSpace(parts[3]) + subscriber := strings.TrimSpace(strings.Join(parts[4:], ":")) + if tenantKey == "" || subscriber == "" { + continue + } + if subscriberID != "" && !strings.EqualFold(subscriber, canonicalSubscriberID(subscriberID)) { + continue + } + addSubscriber(tenantKey, subscriber) + } + cursor = next + if cursor == 0 { + break + } + } + } + + // 从历史账本补齐“已消费完成但运行态为空”的分片 + if h.db != nil { + type historyPair struct { + TenantKey string `gorm:"column:tenant_key"` + SubscriberID string `gorm:"column:subscriber_id"` + } + rows := make([]historyPair, 0, 32) + query := h.db.Model(&eventfabricmodel.TaskHistory{}).Select("tenant_key, subscriber_id").Distinct() + if subscriberID != "" { + query = query.Where("subscriber_id = ?", canonicalSubscriberID(subscriberID)) + } + if err := query.Find(&rows).Error; err != nil { + return nil, err + } + for _, row := range rows { + addSubscriber(row.TenantKey, row.SubscriberID) + } + } + + sort.Slice(pairs, func(i, j int) bool { + if pairs[i].TenantKey == pairs[j].TenantKey { + return pairs[i].SubscriberID < pairs[j].SubscriberID + } + if pairs[i].TenantKey == "global" { + return false + } + if pairs[j].TenantKey == "global" { + return true + } + return pairs[i].TenantKey < pairs[j].TenantKey + }) + for _, item := range pairs { + if err := appendStats(item.TenantKey, item.SubscriberID); err != nil { + return nil, err + } + } + + for idx := range ordered { + var historyCount int64 + if err := h.db.Model(&eventfabricmodel.TaskHistory{}). + Where("tenant_key = ? AND subscriber_id = ?", ordered[idx].TenantKey, ordered[idx].SubscriberID). + Count(&historyCount).Error; err != nil { + return nil, err + } + if historyCount > ordered[idx].TotalTasks { + ordered[idx].TotalTasks = historyCount + } + } + + var pending, deferred, processing, inflight int64 + for _, item := range ordered { + pending += item.Pending + deferred += item.Deferred + processing += item.Processing + inflight += item.Inflight + } + + return gin.H{ + "pending": pending, + "deferred": deferred, + "processing": processing, + "inflight": inflight, + "by_subscriber": ordered, + }, nil +} + +func canonicalSubscriberID(subscriber string) string { + switch strings.TrimSpace(subscriber) { + case "core.knowledge_space.reprocess": + return eventbus.SubscriberKnowledgeSpaceReprocess + case "core.knowledge_space.corpus_check": + return eventbus.SubscriberKnowledgeSpaceCorpusCheck + case "core.authorization.challenge_timeout": + return eventbus.SubscriberAuthorizationChallengeTime + case "core.system.notification_dispatch": + return eventbus.SubscriberSystemNotificationDispatch + default: + return strings.TrimSpace(subscriber) + } +} + +func (h *AdminOverviewHandler) readTaskQueueState(c *gin.Context, state, tenantKey, subscriberID string, limit int) ([]taskQueueMessageDTO, error) { + if h == nil || h.redis == nil { + return []taskQueueMessageDTO{}, nil + } + if limit <= 0 { + limit = 20 + } + key := "event_fabric:task:" + state + ":" + strings.TrimSpace(tenantKey) + ":" + strings.TrimSpace(subscriberID) + ctx := c.Request.Context() + + var raws []string + switch state { + case "q", "p": + items, err := h.redis.LRange(ctx, key, 0, int64(limit-1)).Result() + if err != nil && err != redis.Nil { + return nil, err + } + raws = items + case "d": + items, err := h.redis.ZRange(ctx, key, 0, int64(limit-1)).Result() + if err != nil && err != redis.Nil { + return nil, err + } + raws = items + case "i": + items, err := h.redis.HVals(ctx, key).Result() + if err != nil && err != redis.Nil { + return nil, err + } + if len(items) > limit { + items = items[:limit] + } + raws = items + default: + return []taskQueueMessageDTO{}, nil + } + + out := make([]taskQueueMessageDTO, 0, len(raws)) + for _, raw := range raws { + var msg event_bus.TaskMessage + if err := json.Unmarshal([]byte(raw), &msg); err != nil { + continue + } + row := taskQueueMessageDTO{ + ID: strings.TrimSpace(msg.ID), + Topic: strings.TrimSpace(msg.Topic), + TraceID: strings.TrimSpace(msg.TraceID), + Attempt: msg.Attempt, + Metadata: msg.Metadata, + } + if !msg.VisibleAt.IsZero() { + row.Visible = msg.VisibleAt.UTC().Format(time.RFC3339) + } + out = append(out, row) + } + return out, nil +} + +func (h *AdminOverviewHandler) queryTaskHistory(c *gin.Context, tenantKey, subscriberID string, limit int) ([]taskHistoryDTO, error) { + if h == nil || h.history == nil { + return []taskHistoryDTO{}, nil + } + records, err := h.history.ListRecent(c.Request.Context(), strings.TrimSpace(tenantKey), strings.TrimSpace(subscriberID), limit) + if err != nil { + return nil, err + } + out := make([]taskHistoryDTO, 0, len(records)) + for _, item := range records { + if item == nil { + continue + } + out = append(out, taskHistoryDTO{ + TaskID: strings.TrimSpace(item.TaskID), + TenantKey: strings.TrimSpace(item.TenantKey), + Subscriber: strings.TrimSpace(item.SubscriberID), + Topic: strings.TrimSpace(item.Topic), + Kind: strings.TrimSpace(item.Kind), + Status: strings.TrimSpace(item.Status), + TraceID: strings.TrimSpace(item.TraceID), + Attempt: item.Attempt, + Source: strings.TrimSpace(item.Source), + SubmittedAt: toRFC3339Ptr(item.SubmittedAt), + CompletedAt: toRFC3339Ptr(item.CompletedAt), + LastSeenAt: item.LastSeenAt.UTC().Format(time.RFC3339), + }) + } + return out, nil +} + +func toRFC3339Ptr(ts *time.Time) string { + if ts == nil || ts.IsZero() { + return "" + } + return ts.UTC().Format(time.RFC3339) +} + +func (h *AdminOverviewHandler) listTopics(c *gin.Context, tenantUUID, namespace, name string) ([]topicDTO, map[string]string, error) { + ctx := c.Request.Context() + + typeRow := func(id, fullTopic, ns, nm, lifecycle, payloadFormat string, maxRetry, ackTimeout int, versionMode string, deprecatedAt *time.Time) topicDTO { + out := topicDTO{ + ID: id, + UUID: id, + FullTopic: fullTopic, + Namespace: ns, + Name: nm, + Lifecycle: lifecycle, + PayloadFormat: payloadFormat, + MaxRetry: maxRetry, + AckTimeout: ackTimeout, + VersionMode: versionMode, + } + if deprecatedAt != nil { + out.DeprecatedAt = deprecatedAt.UTC().Format(time.RFC3339) + } + return out + } + + if h.directory != nil { + list, _, err := h.directory.ListTopics(ctx, repositoryQueryForTopics(tenantUUID, namespace)) + if err != nil { + return nil, nil, err + } + filtered := make([]topicDTO, 0, len(list)) + topicMap := make(map[string]string, len(list)) + for _, t := range list { + if t == nil { + continue + } + if name != "" && !strings.EqualFold(strings.TrimSpace(t.Name), name) { + continue + } + id := strings.TrimSpace(t.ID) + if id == "" { + continue + } + row := typeRow( + id, + strings.TrimSpace(t.FullTopic), + strings.TrimSpace(t.Namespace), + strings.TrimSpace(t.Name), + string(t.Lifecycle), + strings.TrimSpace(t.PayloadFormat), + int(t.MaxRetry), + int(t.AckTimeoutSec), + strings.TrimSpace(t.VersioningMode), + t.DeprecatedAt, + ) + filtered = append(filtered, row) + topicMap[id] = row.FullTopic + } + sort.Slice(filtered, func(i, j int) bool { return filtered[i].FullTopic < filtered[j].FullTopic }) + return filtered, topicMap, nil + } + + var items []*eventfabricmodel.TopicDefinition + query := h.db.WithContext(ctx).Model(&eventfabricmodel.TopicDefinition{}).Where("tenant_key = ?", tenantUUID) + if namespace != "" { + query = query.Where("namespace = ?", namespace) + } + if err := query.Order("created_at DESC").Limit(200).Find(&items).Error; err != nil { + return nil, nil, err + } + + filtered := make([]topicDTO, 0, len(items)) + topicMap := make(map[string]string, len(items)) + for _, t := range items { + if t == nil { + continue + } + if name != "" && !strings.EqualFold(strings.TrimSpace(t.Name), name) { + continue + } + id := t.UUID.String() + row := typeRow( + id, + strings.TrimSpace(t.FullTopic), + strings.TrimSpace(t.Namespace), + strings.TrimSpace(t.Name), + string(t.Lifecycle), + strings.TrimSpace(t.PayloadFormat), + t.MaxRetry, + t.AckTimeoutSec, + strings.TrimSpace(t.VersioningMode), + t.DeprecatedAt, + ) + filtered = append(filtered, row) + topicMap[id] = row.FullTopic + } + sort.Slice(filtered, func(i, j int) bool { return filtered[i].FullTopic < filtered[j].FullTopic }) + return filtered, topicMap, nil +} + +func repositoryQueryForTopics(tenantUUID, namespace string) eventfabrepo.QueryContext { + return eventfabrepo.QueryContext{ + Filter: eventfabrepo.TopicFilter{ + TenantID: tenantUUID, + Namespace: namespace, + IncludeShared: true, + }, + Page: eventfabrepo.PageOptions{ + Limit: 200, + Offset: 0, + }, + Sort: eventfabrepo.SortOption{ + Field: "created_at", + Desc: true, + }, + } +} + +type topicStatusCountRow struct { + TopicUUID uuid.UUID + Status string + Count int64 +} + +func (h *AdminOverviewHandler) queryDLQCounts(c *gin.Context, tenantUUID string, topicMap map[string]string) ([]groupedCount, error) { + if len(topicMap) == 0 { + return []groupedCount{}, nil + } + ctx := c.Request.Context() + + topicUUIDs := keysUUID(topicMap) + + var rows []topicStatusCountRow + if err := h.db.WithContext(ctx). + Model(&eventfabricmodel.DlqMessage{}). + Select("topic_uuid as topic_uuid, status, count(*) as count"). + Where("tenant_key = ? AND topic_uuid IN ?", tenantUUID, topicUUIDs). + Group("topic_uuid, status"). + Scan(&rows).Error; err != nil { + return nil, err + } + + return toGrouped(rows, topicMap), nil +} + +func (h *AdminOverviewHandler) queryAttemptCounts(c *gin.Context, tenantUUID, subscriberID string, topicMap map[string]string) ([]groupedCount, error) { + if subscriberID == "" || len(topicMap) == 0 { + return []groupedCount{}, nil + } + ctx := c.Request.Context() + + topicUUIDs := keysUUID(topicMap) + attemptTable := (&eventfabricmodel.DeliveryAttempt{}).TableName() + envelopeTable := (&eventfabricmodel.EventEnvelope{}).TableName() + + var rows []topicStatusCountRow + if err := h.db.WithContext(ctx). + Table(attemptTable+" as a"). + Select("e.topic_uuid as topic_uuid, a.status, count(*) as count"). + Joins("JOIN "+envelopeTable+" as e ON e.uuid = a.envelope_uuid"). + Where("a.tenant_key = ? AND a.subscriber_id = ? AND e.topic_uuid IN ?", tenantUUID, subscriberID, topicUUIDs). + Group("e.topic_uuid, a.status"). + Scan(&rows).Error; err != nil { + return nil, err + } + return toGrouped(rows, topicMap), nil +} + +func (h *AdminOverviewHandler) queryReplayTasks(c *gin.Context, tenantUUID string, topicMap map[string]string, limit int) ([]overviewReplayTaskDTO, error) { + ctx := c.Request.Context() + + replayTable := (&eventfabricmodel.ReplayRequest{}).TableName() + topicTable := (&eventfabricmodel.TopicDefinition{}).TableName() + + query := h.db.WithContext(ctx). + Table(replayTable+" as r"). + Select(strings.Join([]string{ + "r.uuid as id", + "r.topic_uuid as topic_uuid", + "t.full_topic as full_topic", + "r.trace_id as trace_id", + "r.status as status", + "r.shadow as shadow", + "r.issued_by as requested_by", + "r.submitted_at as submitted_at", + "r.completed_at as completed_at", + "r.failure_reason as failure_reason", + "r.result_count as result_count", + }, ", ")). + Joins("LEFT JOIN "+topicTable+" as t ON t.uuid = r.topic_uuid"). + Where("r.tenant_key = ?", tenantUUID). + Order("r.submitted_at DESC"). + Limit(limit) + + if len(topicMap) > 0 { + query = query.Where("r.topic_uuid IN ?", keysUUID(topicMap)) + } + + type row struct { + ID string + TopicUUID uuid.UUID + FullTopic string + TraceID string + Status string + Shadow bool + RequestedBy string + SubmittedAt time.Time + CompletedAt *time.Time + FailureReason string + ResultCount int + } + var rows []row + if err := query.Scan(&rows).Error; err != nil { + return nil, err + } + + out := make([]overviewReplayTaskDTO, 0, len(rows)) + for _, r := range rows { + topicID := r.TopicUUID.String() + item := overviewReplayTaskDTO{ + ID: r.ID, + TopicUUID: topicID, + FullTopic: strings.TrimSpace(r.FullTopic), + TraceID: strings.TrimSpace(r.TraceID), + Status: r.Status, + Shadow: r.Shadow, + RequestedBy: strings.TrimSpace(r.RequestedBy), + SubmittedAt: r.SubmittedAt.UTC().Format(time.RFC3339), + FailureReason: strings.TrimSpace(r.FailureReason), + ResultCount: r.ResultCount, + } + if item.FullTopic == "" { + item.FullTopic = topicMap[topicID] + } + if r.CompletedAt != nil { + item.CompletedAt = r.CompletedAt.UTC().Format(time.RFC3339) + } + out = append(out, item) + } + return out, nil +} + +func keys(m map[string]string) []string { + out := make([]string, 0, len(m)) + for k := range m { + out = append(out, k) + } + sort.Strings(out) + return out +} + +func keysUUID(m map[string]string) []uuid.UUID { + out := make([]uuid.UUID, 0, len(m)) + for k := range m { + if id, err := uuid.Parse(strings.TrimSpace(k)); err == nil { + out = append(out, id) + } + } + sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) + return out +} + +func toGrouped(rows []topicStatusCountRow, topicMap map[string]string) []groupedCount { + by := map[string]*groupedCount{} + for _, r := range rows { + id := r.TopicUUID.String() + if id == uuid.Nil.String() { + continue + } + if id == "" { + continue + } + g, ok := by[id] + if !ok { + g = &groupedCount{ + TopicUUID: id, + FullTopic: topicMap[id], + ByStatus: map[string]int64{}, + } + by[id] = g + } + st := strings.ToLower(strings.TrimSpace(r.Status)) + if st == "" { + st = "unknown" + } + g.ByStatus[st] += r.Count + g.Total += r.Count + } + + out := make([]groupedCount, 0, len(by)) + for _, v := range by { + out = append(out, *v) + } + sort.Slice(out, func(i, j int) bool { return out[i].FullTopic < out[j].FullTopic }) + return out +} + +func sumGrouped(items []groupedCount) int64 { + var total int64 + for _, it := range items { + total += it.Total + } + return total +} diff --git a/backend/internal/transport/http/admin/event_fabric/replay_handler.go b/backend/internal/transport/http/admin/event_fabric/replay_handler.go index c16422b2..5b1dee0d 100644 --- a/backend/internal/transport/http/admin/event_fabric/replay_handler.go +++ b/backend/internal/transport/http/admin/event_fabric/replay_handler.go @@ -2,11 +2,14 @@ package eventfabric import ( "context" + "errors" + "fmt" "net/http" "strings" "time" "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/replay" + sharedsvc "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/shared" "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/dto" "github.com/gin-gonic/gin" @@ -74,18 +77,21 @@ func (h *AdminReplayHandler) CreateTask(c *gin.Context) { return } + normalizedTenant := strings.TrimSpace(tenantUUID) + normalizedTopic := strings.TrimSpace(req.Topic) + operatorID := resolveReplayOperator(c.Request.Context(), strings.TrimSpace(req.OperatorID)) task, err := h.service.CreateTask(c.Request.Context(), replay.CreateTaskInput{ - TenantKey: strings.TrimSpace(tenantUUID), - Topic: strings.TrimSpace(req.Topic), + TenantKey: normalizedTenant, + Topic: normalizedTopic, TraceID: strings.TrimSpace(req.TraceID), WindowStart: start, WindowEnd: end, Reason: strings.TrimSpace(req.Reason), - Operator: strings.TrimSpace(req.OperatorID), + Operator: operatorID, Shadow: req.Shadow, }) if err != nil { - dto.RespondErrorFrom(c, dto.NewInternal("create replay task failed", err)) + dto.RespondErrorFrom(c, mapReplayCreateTaskError(err, normalizedTenant, normalizedTopic)) return } dto.ResponseSuccess(c, taskToDTO(task)) @@ -143,6 +149,52 @@ type replayTaskDTO struct { ResultCount int `json:"result_count"` } +func mapReplayCreateTaskError(err error, tenantUUID string, topic string) error { + if err == nil { + return nil + } + errMsg := strings.ToLower(strings.TrimSpace(err.Error())) + if strings.Contains(errMsg, "topic tenant mismatch") { + wrapped := fmt.Errorf("jwt_tenant=%s, topic=%s: %w", tenantUUID, topic, err) + return dto.WithCode(dto.NewBadRequest("topic 租户与当前登录租户不一致", wrapped), sharedsvc.ErrorCodeTenantMismatch) + } + if errors.Is(err, sharedsvc.ErrUnauthorized) { + return dto.WithCode(dto.NewForbidden("unauthorized", err), sharedsvc.ErrorCodeUnauthorized) + } + if strings.Contains(errMsg, "topic") && strings.Contains(errMsg, "not found") { + wrapped := fmt.Errorf("jwt_tenant=%s, topic=%s: %w", tenantUUID, topic, err) + return dto.NewNotFound("topic 未注册或当前环境不可用", wrapped) + } + return dto.NewInternal("create replay task failed", err) +} + +func resolveReplayOperator(ctx context.Context, requested string) string { + requested = strings.TrimSpace(requested) + if requested != "" && strings.Contains(requested, ":") { + return requested + } + claims := reqctx.GetClaims(ctx) + if claims != nil { + for _, role := range claims.Roles { + role = strings.TrimSpace(role) + if role == "" { + continue + } + if strings.HasPrefix(role, "role:") { + return role + } + return "role:" + role + } + } + if requested != "" { + return requested + } + if reqctx.IsRoot(ctx) { + return "role:role_admin" + } + return strings.TrimSpace(reqctx.GetSubject(ctx)) +} + func taskToDTO(task *replay.Task) replayTaskDTO { if task == nil { return replayTaskDTO{} diff --git a/backend/internal/transport/http/admin/event_fabric/retry_handler.go b/backend/internal/transport/http/admin/event_fabric/retry_handler.go new file mode 100644 index 00000000..492d25bc --- /dev/null +++ b/backend/internal/transport/http/admin/event_fabric/retry_handler.go @@ -0,0 +1,241 @@ +package eventfabric + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/ArtisanCloud/PowerX/internal/app/shared" + internalbus "github.com/ArtisanCloud/PowerX/internal/event_bus" + deliverysvc "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/delivery" + eventfabricmodel "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/event_fabric" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +type AdminRetryTaskHandler struct { + deps *shared.Deps +} + +type createRetryTaskRequest struct { + Topic string `json:"topic"` + SubscriberID string `json:"subscriber_id"` + Reason string `json:"reason"` + Immediate *bool `json:"immediate"` + Payload map[string]any `json:"payload"` +} + +func NewAdminRetryTaskHandler(deps *shared.Deps) *AdminRetryTaskHandler { + return &AdminRetryTaskHandler{deps: deps} +} + +type retryTaskStatusDTO struct { + DeliveryID string `json:"delivery_id"` + EventID string `json:"event_id"` + Topic string `json:"topic"` + SubscriberID string `json:"subscriber_id"` + TenantKey string `json:"tenant_key"` + Status string `json:"status"` + LastErrorCode string `json:"last_error_code"` + NackReason string `json:"nack_reason"` + ScheduledAt *time.Time `json:"scheduled_at,omitempty"` + LastAttemptAt *time.Time `json:"last_attempt_at,omitempty"` + AckedAt *time.Time `json:"acked_at,omitempty"` +} + +func (h *AdminRetryTaskHandler) CreateTask(c *gin.Context) { + if h == nil || h.deps == nil || h.deps.EventFabric == nil || h.deps.EventFabric.Delivery == nil || h.deps.DB == nil { + dto.RespondErrorFrom(c, dto.NewInternal("retry service unavailable", nil)) + return + } + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) + return + } + + var req createRetryTaskRequest + if err := c.ShouldBindJSON(&req); err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid request payload", err)) + return + } + + topic := strings.TrimSpace(req.Topic) + if topic == "" { + topic = internalbus.TopicSystemNotification + } + reason := strings.TrimSpace(req.Reason) + if reason == "" { + reason = "manual retry sample" + } + + eventID := fmt.Sprintf("retry.seed.%s.%d", strings.TrimSpace(tenantUUID), time.Now().UTC().UnixMilli()) + traceID := strings.TrimSpace(reqctx.GetTraceID(c.Request.Context())) + payload := req.Payload + if payload == nil { + payload = map[string]any{ + "source": "monitor.retry-seed", + "ts": time.Now().UTC().Format(time.RFC3339), + } + } + payloadBytes, marshalErr := json.Marshal(payload) + if marshalErr != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid payload", marshalErr)) + return + } + + publishErr := h.deps.EventFabric.Delivery.Publish(c.Request.Context(), deliverysvc.PublishRequest{ + TenantUUID: tenantUUID, + Topic: topic, + EventID: eventID, + TraceID: traceID, + Version: "v1", + Payload: payloadBytes, + PayloadFormat: "json", + Attributes: map[string]string{ + "source": "admin.event_fabric.retry.seed", + }, + }) + if publishErr != nil { + dto.RespondErrorFrom(c, dto.NewInternal("publish retry seed failed", publishErr)) + return + } + + var attempts []eventfabricmodel.DeliveryAttempt + if err := h.deps.DB.WithContext(c.Request.Context()). + Where("tenant_key = ? AND event_id = ?", strings.TrimSpace(tenantUUID), eventID). + Order("created_at ASC"). + Find(&attempts).Error; err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("query delivery attempts failed", err)) + return + } + if len(attempts) == 0 { + dto.RespondErrorFrom(c, dto.NewNotFound("no delivery attempts generated for retry seed", nil)) + return + } + + targetSubscriber := strings.TrimSpace(req.SubscriberID) + if targetSubscriber == "" { + targetSubscriber = attempts[0].SubscriberID + } + + var targetAttempt *eventfabricmodel.DeliveryAttempt + for idx := range attempts { + if strings.TrimSpace(attempts[idx].SubscriberID) == targetSubscriber { + targetAttempt = &attempts[idx] + break + } + } + if targetAttempt == nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("subscriber not found in generated attempts", nil)) + return + } + + plan, nackErr := h.deps.EventFabric.Delivery.Nack( + c.Request.Context(), + targetAttempt.UUID.String(), + targetAttempt.SubscriberID, + reason, + ) + if nackErr != nil { + dto.RespondErrorFrom(c, dto.NewInternal("create retry seed failed", nackErr)) + return + } + + immediate := false + if req.Immediate != nil { + immediate = *req.Immediate + } + retryAt := time.Now().UTC().Add(plan.NextDelay) + if immediate { + if h.deps.EventFabric != nil && h.deps.EventFabric.Scheduler != nil { + _, scheduleErr := h.deps.EventFabric.Scheduler.Schedule(c.Request.Context(), deliverysvc.ScheduleOptions{ + TenantKey: targetAttempt.TenantKey, + SubscriberID: targetAttempt.SubscriberID, + EventID: targetAttempt.EventID, + EnvelopeUUID: targetAttempt.EnvelopeUUID.String(), + Attempt: targetAttempt.DeliveryNo, + BaseDelay: 0, + Metadata: map[string]string{ + "attempt_uuid": targetAttempt.UUID.String(), + }, + }) + if scheduleErr != nil { + dto.RespondErrorFrom(c, dto.NewInternal("schedule retry seed immediate failed", scheduleErr)) + return + } + } + retryAt = time.Now().UTC() + } + + dto.ResponseSuccess(c, gin.H{ + "event_id": eventID, + "delivery_id": targetAttempt.UUID.String(), + "topic": topic, + "subscriber_id": targetAttempt.SubscriberID, + "tenant_key": targetAttempt.TenantKey, + "retry_after_seconds": int(plan.NextDelay.Seconds()), + "retry_at": retryAt.Format(time.RFC3339), + "immediate": immediate, + "remaining_attempts": plan.RemainingAttempts, + "max_attempts": plan.MaxAttempts, + "trace_id": traceID, + }) +} + +func (h *AdminRetryTaskHandler) GetTask(c *gin.Context) { + if h == nil || h.deps == nil || h.deps.DB == nil { + dto.RespondErrorFrom(c, dto.NewInternal("retry service unavailable", nil)) + return + } + deliveryID := strings.TrimSpace(c.Param("delivery_id")) + if deliveryID == "" { + dto.RespondErrorFrom(c, dto.NewBadRequest("delivery_id required", nil)) + return + } + deliveryUUID, err := uuid.Parse(deliveryID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid delivery_id", err)) + return + } + + var attempt eventfabricmodel.DeliveryAttempt + if err := h.deps.DB.WithContext(c.Request.Context()). + Where("uuid = ?", deliveryUUID). + First(&attempt).Error; err != nil { + dto.RespondErrorFrom(c, dto.NewNotFound("retry delivery not found", err)) + return + } + + var envelope eventfabricmodel.EventEnvelope + _ = h.deps.DB.WithContext(c.Request.Context()). + Where("uuid = ?", attempt.EnvelopeUUID). + First(&envelope).Error + + var topic eventfabricmodel.TopicDefinition + topicFullName := "" + if envelope.TopicUUID != uuid.Nil { + if err := h.deps.DB.WithContext(c.Request.Context()). + Where("uuid = ?", envelope.TopicUUID). + First(&topic).Error; err == nil { + topicFullName = strings.TrimSpace(topic.FullTopic) + } + } + + dto.ResponseSuccess(c, retryTaskStatusDTO{ + DeliveryID: attempt.UUID.String(), + EventID: attempt.EventID, + Topic: topicFullName, + SubscriberID: attempt.SubscriberID, + TenantKey: attempt.TenantKey, + Status: attempt.Status, + LastErrorCode: attempt.LastErrorCode, + NackReason: attempt.NackReason, + ScheduledAt: attempt.ScheduledAt, + LastAttemptAt: attempt.LastAttemptAt, + AckedAt: attempt.AckedAt, + }) +} diff --git a/backend/internal/transport/http/admin/event_fabric/routes.go b/backend/internal/transport/http/admin/event_fabric/routes.go index 4ce6aeef..cd51bd4b 100644 --- a/backend/internal/transport/http/admin/event_fabric/routes.go +++ b/backend/internal/transport/http/admin/event_fabric/routes.go @@ -2,42 +2,80 @@ package eventfabric import ( "github.com/ArtisanCloud/PowerX/internal/app/shared" + workers "github.com/ArtisanCloud/PowerX/internal/app/shared/workers" + "github.com/ArtisanCloud/PowerX/internal/service/event_fabric/directory" + adminnotifications "github.com/ArtisanCloud/PowerX/internal/transport/http/admin/notifications" "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" ) // RegisterAPIRoutes 注册事件骨干相关 Admin API。 func RegisterAPIRoutes(_ *gin.RouterGroup, protected *gin.RouterGroup, deps *shared.Deps) { - if deps == nil || deps.EventFabric == nil { + if deps == nil { return } + // 兼容两套路由: + // 1) 历史/插件侧:/event-fabric/*(可选签名校验) + // 2) Web Admin:/admin/event-fabric/*(仅走 Bearer 鉴权 + Root 可见菜单) group := protected.Group("/event-fabric") - if deps.EventFabric.Security != nil { + if deps.EventFabric != nil && deps.EventFabric.Security != nil { group.Use(deps.EventFabric.Security.GinMiddleware()) } + adminGroup := protected.Group("/admin/event-fabric") - if deps.EventFabric.Directory != nil { + if deps.EventFabric != nil && deps.EventFabric.Directory != nil { dirHandler := NewAdminDirectoryHandler(AdminDirectoryHandlerOptions{Service: deps.EventFabric.Directory}) group.POST("/topics", dirHandler.CreateTopic) group.GET("/topics", dirHandler.ListTopics) group.PATCH("/topics/:topic_id/lifecycle", dirHandler.UpdateLifecycle) + + // Web Admin:Topic 管理(CRUD)+ 列表筛选 + adminGroup.POST("/topics", dirHandler.CreateTopic) + adminGroup.GET("/topics", dirHandler.ListTopics) + adminGroup.PATCH("/topics/:topic_id/lifecycle", dirHandler.UpdateLifecycle) } - if deps.EventFabric.ACL != nil && deps.EventFabric.Directory != nil { + if deps.EventFabric != nil && deps.EventFabric.ACL != nil && deps.EventFabric.Directory != nil { aclHandler := NewAdminACLHandler(AdminACLHandlerOptions{ Service: deps.EventFabric.ACL, Directory: deps.EventFabric.Directory, }) group.POST("/acl", aclHandler.UpsertBindings) group.GET("/acl", aclHandler.ListBindings) + adminGroup.POST("/acl", aclHandler.UpsertBindings) + adminGroup.GET("/acl", aclHandler.ListBindings) + adminGroup.GET("/acl/topic-matrix", aclHandler.ListTopicRoleMatrix) + adminGroup.GET("/acl/principal-matrix", aclHandler.ListPrincipalTopicMatrix) } - if deps.EventFabric.Delivery != nil { + if deps.EventFabric != nil && deps.EventFabric.Delivery != nil { deliveryHandler := NewAdminDeliveryHandler(AdminDeliveryHandlerOptions{Service: deps.EventFabric.Delivery}) group.POST("/events:publish", deliveryHandler.PublishEvent) } - if deps.EventFabric.Authorization != nil && deps.EventFabric.Authorization.Service != nil { + overviewHandler := NewAdminOverviewHandler(AdminOverviewHandlerOptions{ + DB: deps.DB, + Directory: func() *directory.DirectoryService { + if deps.EventFabric != nil { + return deps.EventFabric.Directory + } + return nil + }(), + Redis: func() *redis.Client { + if deps.EventFabric != nil { + return deps.EventFabric.RedisClient + } + return nil + }(), + Enabled: deps.EventFabric != nil, + }) + group.GET("/overview", overviewHandler.GetOverview) + adminGroup.GET("/overview", overviewHandler.GetOverview) + adminGroup.GET("/task-queue/stats", overviewHandler.GetTaskQueueStats) + adminGroup.GET("/task-queue/messages", overviewHandler.GetTaskQueueMessages) + + if deps.EventFabric != nil && deps.EventFabric.Authorization != nil && deps.EventFabric.Authorization.Service != nil { authHandler := NewAuthorizationHandler(AuthorizationHandlerOptions{ Service: deps.EventFabric.Authorization.Service, Templates: deps.EventFabric.Authorization.Templates, @@ -59,17 +97,51 @@ func RegisterAPIRoutes(_ *gin.RouterGroup, protected *gin.RouterGroup, deps *sha group.POST("/grant-templates/:templateId/apply", authHandler.ApplyTemplate) } - if deps.EventFabric.DLQ != nil { + if deps.EventFabric != nil && deps.EventFabric.DLQ != nil { dlqHandler := NewAdminDLQHandler(AdminDLQHandlerOptions{Service: deps.EventFabric.DLQ}) group.GET("/dlq/messages", dlqHandler.ListMessages) group.POST("/dlq/messages:replay", dlqHandler.ReplayMessages) group.DELETE("/dlq/messages", dlqHandler.PurgeMessages) + + // Web Admin 只暴露 DLQ 列表 + replay(不暴露 purge) + adminGroup.GET("/dlq/messages", dlqHandler.ListMessages) + adminGroup.POST("/dlq/messages:replay", dlqHandler.ReplayMessages) } - if deps.EventFabric.Replay != nil { + if deps.EventFabric != nil && deps.EventFabric.Replay != nil { replayHandler := NewAdminReplayHandler(AdminReplayHandlerOptions{Service: deps.EventFabric.Replay}) group.POST("/replay/tasks", replayHandler.CreateTask) group.GET("/replay/tasks/:task_id", replayHandler.GetTask) group.POST("/replay/tasks/:task_id/cancel", replayHandler.CancelTask) + + adminGroup.POST("/replay/tasks", replayHandler.CreateTask) + adminGroup.GET("/replay/tasks/:task_id", replayHandler.GetTask) + adminGroup.POST("/replay/tasks/:task_id/cancel", replayHandler.CancelTask) + } + + if deps.EventFabric != nil && deps.EventFabric.TaskDriver != nil { + notificationsHandler := adminnotifications.NewHandler(deps) + adminGroup.POST("/pipeline/tasks", notificationsHandler.PushTestNotificationQueue) + retryTaskHandler := NewAdminRetryTaskHandler(deps) + adminGroup.POST("/retry/tasks", retryTaskHandler.CreateTask) + adminGroup.GET("/retry/tasks/:delivery_id", retryTaskHandler.GetTask) + } + + if deps.EventFabric != nil { + var retryWorker *workers.EventFabricRetryWorker + var authWorker *workers.EventFabricAuthorizationTimeoutTaskWorker + retryWorker = deps.EventFabric.RetryWorker + if deps.EventFabric.Authorization != nil { + authWorker = deps.EventFabric.Authorization.TimeoutTaskWorker + } + + cronHandler := NewAdminCronHandler(AdminCronHandlerOptions{ + RetryWorker: retryWorker, + AuthorizationWorker: authWorker, + }) + adminGroup.GET("/cron/jobs", cronHandler.ListJobs) + adminGroup.POST("/cron/jobs/:job_id/run-now", cronHandler.RunNow) + adminGroup.POST("/cron/jobs/:job_id/pause", cronHandler.PauseJob) + adminGroup.POST("/cron/jobs/:job_id/resume", cronHandler.ResumeJob) } } diff --git a/backend/internal/transport/http/admin/iam/department_handler.go b/backend/internal/transport/http/admin/iam/department_handler.go index ea8ed74a..f0e8298a 100644 --- a/backend/internal/transport/http/admin/iam/department_handler.go +++ b/backend/internal/transport/http/admin/iam/department_handler.go @@ -154,6 +154,9 @@ func (h *DepartmentHandler) Tree(c *gin.Context) { dto.ResponseError(c, http.StatusInternalServerError, "查询部门树失败", err) return } + if nodes == nil { + nodes = []*m.Department{} + } dto.ResponseSuccess(c, nodes) } diff --git a/backend/internal/transport/http/admin/iam/permission_handler.go b/backend/internal/transport/http/admin/iam/permission_handler.go index 4216380b..61e9a146 100644 --- a/backend/internal/transport/http/admin/iam/permission_handler.go +++ b/backend/internal/transport/http/admin/iam/permission_handler.go @@ -37,15 +37,16 @@ func (h *PermissionHandler) Register(c *gin.Context) { } type PermissionListQuery struct { - Plugin string `form:"plugin"` - Resource string `form:"resource"` - Module string `form:"module"` - Type string `form:"type"` - Keyword string `form:"keyword"` - Status string `form:"status"` - Page int `form:"page,default=1"` - Size int `form:"size,default=50"` - Sort string `form:"sort"` // "plugin asc, resource asc" + Module string `form:"module"` + Plugin string `form:"plugin"` + Resource string `form:"resource"` + MetaModule string `form:"meta_module"` + Type string `form:"type"` + Keyword string `form:"keyword"` + Status string `form:"status"` + Page int `form:"page,default=1"` + Size int `form:"size,default=50"` + Sort string `form:"sort"` // "plugin asc, resource asc" } func (h *PermissionHandler) List(c *gin.Context) { @@ -56,12 +57,13 @@ func (h *PermissionHandler) List(c *gin.Context) { } filter := map[string]string{ - "plugin": strings.TrimSpace(q.Plugin), - "resource": strings.TrimSpace(q.Resource), - "module": strings.TrimSpace(q.Module), - "type": strings.TrimSpace(q.Type), - "status": strings.TrimSpace(q.Status), - "keyword": strings.TrimSpace(q.Keyword), + "module": strings.TrimSpace(q.Module), + "plugin": strings.TrimSpace(q.Plugin), + "resource": strings.TrimSpace(q.Resource), + "meta_module": strings.TrimSpace(q.MetaModule), + "type": strings.TrimSpace(q.Type), + "status": strings.TrimSpace(q.Status), + "keyword": strings.TrimSpace(q.Keyword), } rows, total, err := h.svc.ListPermissions(c.Request.Context(), filter, q.Page, q.Size, q.Sort) diff --git a/backend/internal/transport/http/admin/iam/rbac_handler.go b/backend/internal/transport/http/admin/iam/rbac_handler.go index 98ac4850..68c4da9f 100644 --- a/backend/internal/transport/http/admin/iam/rbac_handler.go +++ b/backend/internal/transport/http/admin/iam/rbac_handler.go @@ -20,7 +20,8 @@ type grantIDsReq struct { } type grantTriplesReq struct { Items []struct { - Plugin string `json:"plugin" binding:"required"` + Module string `json:"module"` + Plugin string `json:"plugin"` Resource string `json:"resource" binding:"required"` Action string `json:"action" binding:"required"` } `json:"items" binding:"required,min=1"` @@ -83,7 +84,7 @@ func (h *RBACHandler) GrantByIDs(c *gin.Context) { dto.ResponseSuccess(c, gin.H{"granted": len(req.PermIDs)}) } -// 角色授予权限(通过 triples:plugin/resource/action) +// 角色授予权限(通过 triples:module/resource/action) // POST /api/v1/admin/iam/roles/:id/permissions/grant func (h *RBACHandler) GrantByTriples(c *gin.Context) { roleID, _ := strconv.ParseUint(c.Param("id"), 10, 64) @@ -98,7 +99,15 @@ func (h *RBACHandler) GrantByTriples(c *gin.Context) { } ts := make([]iamsvc.PermTriple, 0, len(req.Items)) for _, it := range req.Items { - ts = append(ts, iamsvc.PermTriple{Plugin: it.Plugin, Resource: it.Resource, Action: it.Action}) + module := strings.TrimSpace(it.Module) + if module == "" { + module = strings.TrimSpace(it.Plugin) + } + if module == "" { + dto.ResponseError(c, http.StatusBadRequest, "参数绑定失败", errors.New("module is required")) + return + } + ts = append(ts, iamsvc.PermTriple{Module: module, Resource: it.Resource, Action: it.Action}) } if err := h.svc.GrantPermsByTriples(c.Request.Context(), actor, roleID, ts); err != nil { dto.ResponseError(c, http.StatusBadRequest, "授予权限失败", err) @@ -193,20 +202,9 @@ func (h *RBACHandler) CheckPermission(c *gin.Context) { if !ok { return } - tenantUUID := strings.TrimSpace(c.Query("tenant_uuid")) - if tenantUUID == "" { - var okTenant bool - tenantUUID, okTenant = requireTenantUUIDFromContext(c) - if !okTenant { - return - } - } else { - canonical, err := reqctx.CanonicalTenantUUID(tenantUUID) - if err != nil { - dto.ResponseError(c, http.StatusBadRequest, "tenant_uuid must be valid", err) - return - } - tenantUUID = canonical + tenantUUID, ok := requireTenantUUIDFromContext(c) + if !ok { + return } memberID, _ := strconv.ParseUint(c.Query("member_id"), 10, 64) // 可选,不传用上下文 diff --git a/backend/internal/transport/http/admin/integration_gateway/apikey_handler.go b/backend/internal/transport/http/admin/integration_gateway/apikey_handler.go new file mode 100644 index 00000000..e1bef032 --- /dev/null +++ b/backend/internal/transport/http/admin/integration_gateway/apikey_handler.go @@ -0,0 +1,1225 @@ +package integration_gateway + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "sort" + "strconv" + "strings" + "time" + + apikeycache "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeycache" + apikeypermissions "github.com/ArtisanCloud/PowerX/internal/service/integration_gateway/apikeypermissions" + iamrepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/iam" + repo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/integration_gateway" + "github.com/ArtisanCloud/PowerX/pkg/utils" + "gorm.io/gorm" + + modelsiam "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/iam" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/integration_gateway" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/dto" + "github.com/ArtisanCloud/PowerX/pkg/utils/logger" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +type APIKeyAdminHandler struct { + db *gorm.DB + keys *repo.IntegrationGatewayAPIKeyRepository + perms *repo.IntegrationGatewayAPIKeyPermissionRepository + iamKeys *iamrepo.APIKeyRepository + profiles *iamrepo.APIKeyProfileRepository + permRepo *iamrepo.PermissionRepository + profPerm *iamrepo.APIKeyProfilePermissionRepository +} + +func NewAPIKeyAdminHandler(db *gorm.DB) *APIKeyAdminHandler { + return &APIKeyAdminHandler{ + db: db, + keys: repo.NewIntegrationGatewayAPIKeyRepository(db), + perms: repo.NewIntegrationGatewayAPIKeyPermissionRepository(db), + iamKeys: iamrepo.NewAPIKeyRepository(db), + profiles: iamrepo.NewAPIKeyProfileRepository(db), + permRepo: iamrepo.NewPermissionRepository(db), + profPerm: iamrepo.NewAPIKeyProfilePermissionRepository(db), + } +} + +type createAPIKeyRequest struct { + TenantUUID string `json:"tenant_uuid"` + ProfileID uint64 `json:"profile_id" binding:"required"` + Name string `json:"name" binding:"required"` + Description string `json:"description"` + ExpiresAt string `json:"expires_at"` +} + +type revokeAPIKeyRequest struct { + TenantUUID string `json:"tenant_uuid"` +} + +type rotateAPIKeyRequest struct { + TenantUUID string `json:"tenant_uuid"` + Name string `json:"name"` + Description string `json:"description"` + ExpiresAt string `json:"expires_at"` +} + +type setAPIKeyProfilePermissionsRequest struct { + PermissionIDs []uint64 `json:"permission_ids"` +} + +type apiKeyPermissionRequest struct { + Scope string `json:"scope"` + Action string `json:"action"` + ResourceType string `json:"resource_type"` + ResourcePattern string `json:"resource_pattern"` + PluginID string `json:"plugin_id"` + Effect string `json:"effect"` +} + +type apiKeyPermissionResponse struct { + Scope string `json:"scope"` + Action string `json:"action"` + ResourceType string `json:"resource_type"` + ResourcePattern string `json:"resource_pattern"` + PluginID string `json:"plugin_id,omitempty"` + Effect string `json:"effect"` +} + +type apiKeyResponse struct { + KeyID string `json:"key_id"` + TenantUUID string `json:"tenant_uuid"` + ProfileID uint64 `json:"profile_id"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + KeyPrefix string `json:"key_prefix"` + Status string `json:"status"` + ExpiresAt *string `json:"expires_at,omitempty"` + LastUsedAt *string `json:"last_used_at,omitempty"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Permissions []apiKeyPermissionResponse `json:"permissions,omitempty"` +} + +type apiKeyProfileResponse struct { + ID uint64 `json:"id"` + TenantUUID string `json:"tenant_uuid"` + OwnerMemberID *uint64 `json:"owner_member_id,omitempty"` + Key string `json:"key"` + Name string `json:"name"` + Status int16 `json:"status"` +} + +type createAPIKeyProfileRequest struct { + TenantUUID string `json:"tenant_uuid"` + Key string `json:"key"` + Name string `json:"name"` +} + +type updateAPIKeyProfileRequest struct { + TenantUUID string `json:"tenant_uuid"` + Name string `json:"name"` + Status *int16 `json:"status"` +} + +func (h *APIKeyAdminHandler) CreateAPIKey(c *gin.Context) { + var req createAPIKeyRequest + if err := c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + tenantUUID, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + profile, err := h.profiles.GetById(c.Request.Context(), req.ProfileID, nil) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key profile failed", err)) + return + } + if profile == nil || profile.Status != 1 || !strings.EqualFold(strings.TrimSpace(profile.TenantUUID), tenantUUID) { + dto.RespondErrorFrom(c, dto.NewBadRequest("profile_id invalid or disabled", nil)) + return + } + + expiresAt, err := parseOptionalTime(req.ExpiresAt) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("expires_at must be RFC3339", err)) + return + } + + plain, keyHash, keyPrefix := generateAPIKeyMaterial() + perms, err := h.resolveProfileAPIKeyPermissions(c.Request.Context(), req.ProfileID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("profile permissions invalid", err)) + return + } + actor := actorFromHeader(c) + + var created *models.IntegrationGatewayAPIKey + err = h.db.WithContext(c.Request.Context()).Transaction(func(tx *gorm.DB) error { + keyRepo := repo.NewIntegrationGatewayAPIKeyRepository(tx) + permRepo := repo.NewIntegrationGatewayAPIKeyPermissionRepository(tx) + iamKeyRepo := iamrepo.NewAPIKeyRepository(tx) + + item := &models.IntegrationGatewayAPIKey{ + TenantUUID: tenantUUID, + ProfileID: req.ProfileID, + Name: strings.TrimSpace(req.Name), + Description: strings.TrimSpace(req.Description), + KeyPrefix: keyPrefix, + KeyHash: keyHash, + Status: "active", + ExpiresAt: expiresAt, + CreatedBy: actor, + UpdatedBy: actor, + } + var e error + created, e = keyRepo.Create(c.Request.Context(), item) + if e != nil { + return e + } + + permissionModels := buildPermissionModels(created.UUID, perms) + if e = permRepo.ReplaceAll(c.Request.Context(), created.UUID, permissionModels); e != nil { + return e + } + + e = iamKeyRepo.Create(c.Request.Context(), &modelsiam.APIKey{ + TenantUUID: tenantUUID, + ProfileID: req.ProfileID, + KeyHash: keyHash, + CreatedAtMs: time.Now().UnixMilli(), + }) + if e != nil { + return e + } + return nil + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("create api key failed", err)) + return + } + + permResps := toPermissionResponses(buildPermissionModels(created.UUID, perms)) + resp := toAPIKeyResponse(*created, permResps) + _ = apikeycache.InvalidateAll(c.Request.Context()) + dto.ResponseSuccessWithStatus(c, http.StatusCreated, gin.H{ + "api_key": resp, + "plain_key": plain, + }) +} + +func (h *APIKeyAdminHandler) ListAPIKeys(c *gin.Context) { + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + page, _ := strconv.Atoi(c.DefaultQuery("page", "1")) + pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "20")) + if page <= 0 { + page = 1 + } + if pageSize <= 0 { + pageSize = 20 + } + + items, total, err := h.keys.ListByTenant(c.Request.Context(), canonical, (page-1)*pageSize, pageSize) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list api keys failed", err)) + return + } + + respItems := make([]apiKeyResponse, 0, len(items)) + for i := range items { + perms, _ := h.perms.ListByAPIKeyUUID(c.Request.Context(), items[i].UUID) + respItems = append(respItems, toAPIKeyResponse(items[i], toPermissionResponses(perms))) + } + dto.ResponseSuccess(c, dto.ListResponse{ + Items: respItems, + Pagination: &dto.PaginationResponse{ + Total: total, + Page: page, + PageSize: pageSize, + }, + }) +} + +func (h *APIKeyAdminHandler) GetAPIKey(c *gin.Context) { + keyID, err := uuid.Parse(strings.TrimSpace(c.Param("key_id"))) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid key_id", err)) + return + } + item, err := h.keys.GetByUUID(c.Request.Context(), keyID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("get api key failed", err)) + return + } + if item == nil { + dto.RespondErrorFrom(c, dto.NewNotFound("api key not found", nil)) + return + } + canonical, err := h.resolveTenantScope(c, item.TenantUUID) + if err != nil || !strings.EqualFold(strings.TrimSpace(item.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key not found", nil)) + return + } + perms, _ := h.perms.ListByAPIKeyUUID(c.Request.Context(), item.UUID) + dto.ResponseSuccess(c, toAPIKeyResponse(*item, toPermissionResponses(perms))) +} + +func (h *APIKeyAdminHandler) RevokeAPIKey(c *gin.Context) { + keyID, err := uuid.Parse(strings.TrimSpace(c.Param("key_id"))) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid key_id", err)) + return + } + var req revokeAPIKeyRequest + if err = c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + item, err := h.keys.GetByUUID(c.Request.Context(), keyID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key failed", err)) + return + } + if item == nil || !strings.EqualFold(strings.TrimSpace(item.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key not found", nil)) + return + } + + err = h.db.WithContext(c.Request.Context()).Transaction(func(tx *gorm.DB) error { + keyRepo := repo.NewIntegrationGatewayAPIKeyRepository(tx) + iamKeyRepo := iamrepo.NewAPIKeyRepository(tx) + if e := keyRepo.UpdateStatus(c.Request.Context(), item.UUID, "revoked", actorFromHeader(c)); e != nil { + return e + } + if legacy, e := iamKeyRepo.FindByHash(c.Request.Context(), item.KeyHash); e == nil && legacy != nil { + return iamKeyRepo.Revoke(c.Request.Context(), legacy.ID, time.Now().UnixMilli()) + } + return nil + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("revoke api key failed", err)) + return + } + _ = apikeycache.InvalidateAll(c.Request.Context()) + dto.ResponseSuccess(c, gin.H{"status": "revoked", "key_id": keyID.String()}) +} + +func (h *APIKeyAdminHandler) DeleteAPIKey(c *gin.Context) { + keyID, err := uuid.Parse(strings.TrimSpace(c.Param("key_id"))) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid key_id", err)) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + item, err := h.keys.GetByUUID(c.Request.Context(), keyID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key failed", err)) + return + } + if item == nil || !strings.EqualFold(strings.TrimSpace(item.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key not found", nil)) + return + } + + err = h.db.WithContext(c.Request.Context()).Transaction(func(tx *gorm.DB) error { + keyRepo := repo.NewIntegrationGatewayAPIKeyRepository(tx) + iamKeyRepo := iamrepo.NewAPIKeyRepository(tx) + if e := keyRepo.UpdateStatus(c.Request.Context(), item.UUID, "deleted", actorFromHeader(c)); e != nil { + return e + } + if legacy, e := iamKeyRepo.FindByHash(c.Request.Context(), item.KeyHash); e == nil && legacy != nil { + if e = iamKeyRepo.Revoke(c.Request.Context(), legacy.ID, time.Now().UnixMilli()); e != nil { + return e + } + } + return nil + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("delete api key failed", err)) + return + } + _ = apikeycache.InvalidateAll(c.Request.Context()) + dto.ResponseSuccess(c, gin.H{"status": "deleted", "key_id": keyID.String()}) +} + +func (h *APIKeyAdminHandler) RotateAPIKey(c *gin.Context) { + keyID, err := uuid.Parse(strings.TrimSpace(c.Param("key_id"))) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid key_id", err)) + return + } + var req rotateAPIKeyRequest + if err = c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + oldItem, err := h.keys.GetByUUID(c.Request.Context(), keyID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key failed", err)) + return + } + if oldItem == nil || !strings.EqualFold(strings.TrimSpace(oldItem.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key not found", nil)) + return + } + + expiresAt, err := parseOptionalTime(req.ExpiresAt) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("expires_at must be RFC3339", err)) + return + } + plain, keyHash, keyPrefix := generateAPIKeyMaterial() + actor := actorFromHeader(c) + perms, err := h.resolveProfileAPIKeyPermissions(c.Request.Context(), oldItem.ProfileID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewBadRequest("profile permissions invalid", err)) + return + } + + var created *models.IntegrationGatewayAPIKey + err = h.db.WithContext(c.Request.Context()).Transaction(func(tx *gorm.DB) error { + keyRepo := repo.NewIntegrationGatewayAPIKeyRepository(tx) + permRepo := repo.NewIntegrationGatewayAPIKeyPermissionRepository(tx) + iamKeyRepo := iamrepo.NewAPIKeyRepository(tx) + + if e := keyRepo.UpdateStatus(c.Request.Context(), oldItem.UUID, "revoked", actor); e != nil { + return e + } + if legacy, e := iamKeyRepo.FindByHash(c.Request.Context(), oldItem.KeyHash); e == nil && legacy != nil { + if e = iamKeyRepo.Revoke(c.Request.Context(), legacy.ID, time.Now().UnixMilli()); e != nil { + return e + } + } + + name := strings.TrimSpace(req.Name) + if name == "" { + name = oldItem.Name + } + desc := strings.TrimSpace(req.Description) + if desc == "" { + desc = oldItem.Description + } + item := &models.IntegrationGatewayAPIKey{ + TenantUUID: oldItem.TenantUUID, + ProfileID: oldItem.ProfileID, + Name: name, + Description: desc, + KeyPrefix: keyPrefix, + KeyHash: keyHash, + Status: "active", + ExpiresAt: expiresAt, + CreatedBy: actor, + UpdatedBy: actor, + } + var e error + created, e = keyRepo.Create(c.Request.Context(), item) + if e != nil { + return e + } + if e = permRepo.ReplaceAll(c.Request.Context(), created.UUID, buildPermissionModels(created.UUID, perms)); e != nil { + return e + } + if e = iamKeyRepo.Create(c.Request.Context(), &modelsiam.APIKey{ + TenantUUID: created.TenantUUID, + ProfileID: created.ProfileID, + KeyHash: created.KeyHash, + CreatedAtMs: time.Now().UnixMilli(), + }); e != nil { + return e + } + return nil + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("rotate api key failed", err)) + return + } + + permModels := buildPermissionModels(created.UUID, perms) + _ = apikeycache.InvalidateAll(c.Request.Context()) + dto.ResponseSuccess(c, gin.H{ + "api_key": toAPIKeyResponse(*created, toPermissionResponses(permModels)), + "plain_key": plain, + "rotated": keyID.String(), + }) +} + +func (h *APIKeyAdminHandler) ListAPIKeyProfiles(c *gin.Context) { + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + items, err := h.profiles.ListByTenant(c.Request.Context(), canonical) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list api key profiles failed", err)) + return + } + resp := make([]apiKeyProfileResponse, 0, len(items)) + for i := range items { + resp = append(resp, apiKeyProfileResponse{ + ID: items[i].ID, + TenantUUID: items[i].TenantUUID, + OwnerMemberID: items[i].OwnerMemberID, + Key: items[i].Key, + Name: items[i].Name, + Status: items[i].Status, + }) + } + dto.ResponseSuccess(c, gin.H{"items": resp}) +} + +func (h *APIKeyAdminHandler) CreateAPIKeyProfile(c *gin.Context) { + var req createAPIKeyProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + var ownerMemberID *uint64 + if memberID := reqctx.GetMemberID(c.Request.Context()); memberID > 0 { + value := memberID + ownerMemberID = &value + } + + const defaultAPIKeyProfilePrefix = "integration.default." + defaultAPIKeyProfileKey := apikeypermissions.DefaultAPIKeyProfileKey + defaultAPIKeyProfileName := apikeypermissions.DefaultAPIKeyProfileName + accountKey := strings.TrimSpace(req.Key) + if accountKey == "" { + accountKey = defaultAPIKeyProfileKey + } + accountName := strings.TrimSpace(req.Name) + if accountName == "" { + accountName = defaultAPIKeyProfileName + } + + if accountKey == defaultAPIKeyProfileKey { + items, listErr := h.profiles.ListByTenant(c.Request.Context(), canonical) + if listErr != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list api key profiles failed", listErr)) + return + } + for i := range items { + currentKey := strings.TrimSpace(items[i].Key) + if currentKey != defaultAPIKeyProfileKey && !strings.HasPrefix(currentKey, defaultAPIKeyProfilePrefix) { + continue + } + changed := false + if items[i].Status != 1 { + items[i].Status = 1 + changed = true + } + if accountName != "" && items[i].Name != accountName { + items[i].Name = accountName + changed = true + } + if items[i].OwnerMemberID == nil && ownerMemberID != nil && *ownerMemberID > 0 { + value := *ownerMemberID + items[i].OwnerMemberID = &value + changed = true + } + if changed { + updated, updateErr := h.profiles.Update(c.Request.Context(), &items[i]) + if updateErr != nil { + dto.RespondErrorFrom(c, dto.NewInternal("enable api key profile failed", updateErr)) + return + } + dto.ResponseSuccess(c, apiKeyProfileResponse{ + ID: updated.ID, + TenantUUID: updated.TenantUUID, + OwnerMemberID: updated.OwnerMemberID, + Key: updated.Key, + Name: updated.Name, + Status: updated.Status, + }) + _ = apikeycache.InvalidateAll(c.Request.Context()) + return + } + dto.ResponseSuccess(c, apiKeyProfileResponse{ + ID: items[i].ID, + TenantUUID: items[i].TenantUUID, + OwnerMemberID: items[i].OwnerMemberID, + Key: items[i].Key, + Name: items[i].Name, + Status: items[i].Status, + }) + return + } + } + + existed, err := h.profiles.FindByKey(c.Request.Context(), canonical, accountKey) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + dto.RespondErrorFrom(c, dto.NewInternal("load api key profile failed", err)) + return + } + if existed != nil { + if existed.Status != 1 { + existed.Status = 1 + existed.Name = accountName + if existed.OwnerMemberID == nil && ownerMemberID != nil && *ownerMemberID > 0 { + value := *ownerMemberID + existed.OwnerMemberID = &value + } + updated, updateErr := h.profiles.Update(c.Request.Context(), existed) + if updateErr != nil { + dto.RespondErrorFrom(c, dto.NewInternal("enable api key profile failed", updateErr)) + return + } + dto.ResponseSuccessWithStatus(c, http.StatusCreated, apiKeyProfileResponse{ + ID: updated.ID, + TenantUUID: updated.TenantUUID, + OwnerMemberID: updated.OwnerMemberID, + Key: updated.Key, + Name: updated.Name, + Status: updated.Status, + }) + _ = apikeycache.InvalidateAll(c.Request.Context()) + return + } + dto.ResponseSuccess(c, apiKeyProfileResponse{ + ID: existed.ID, + TenantUUID: existed.TenantUUID, + OwnerMemberID: existed.OwnerMemberID, + Key: existed.Key, + Name: existed.Name, + Status: existed.Status, + }) + return + } + + created, err := h.profiles.Create(c.Request.Context(), &modelsiam.APIKeyProfile{ + TenantUUID: canonical, + OwnerMemberID: ownerMemberID, + Key: accountKey, + Name: accountName, + Status: 1, + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("create api key profile failed", err)) + return + } + dto.ResponseSuccessWithStatus(c, http.StatusCreated, apiKeyProfileResponse{ + ID: created.ID, + TenantUUID: created.TenantUUID, + OwnerMemberID: created.OwnerMemberID, + Key: created.Key, + Name: created.Name, + Status: created.Status, + }) + _ = apikeycache.InvalidateAll(c.Request.Context()) +} + +func (h *APIKeyAdminHandler) UpdateAPIKeyProfile(c *gin.Context) { + profileID, err := strconv.ParseUint(strings.TrimSpace(c.Param("profile_id")), 10, 64) + if err != nil || profileID == 0 { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid profile_id", err)) + return + } + var req updateAPIKeyProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + + item, err := h.profiles.GetById(c.Request.Context(), profileID, nil) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key profile failed", err)) + return + } + if item == nil || !strings.EqualFold(strings.TrimSpace(item.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key profile not found", nil)) + return + } + + updated := false + trimmedName := strings.TrimSpace(req.Name) + if trimmedName != "" && trimmedName != item.Name { + item.Name = trimmedName + updated = true + } + if req.Status != nil { + if *req.Status != 0 && *req.Status != 1 { + dto.RespondErrorFrom(c, dto.NewBadRequest("status must be 0 or 1", nil)) + return + } + if item.Status != *req.Status { + item.Status = *req.Status + updated = true + } + } + if !updated { + dto.ResponseSuccess(c, apiKeyProfileResponse{ + ID: item.ID, + TenantUUID: item.TenantUUID, + OwnerMemberID: item.OwnerMemberID, + Key: item.Key, + Name: item.Name, + Status: item.Status, + }) + return + } + + result, err := h.profiles.Update(c.Request.Context(), item) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("update api key profile failed", err)) + return + } + _ = apikeycache.InvalidateAll(c.Request.Context()) + dto.ResponseSuccess(c, apiKeyProfileResponse{ + ID: result.ID, + TenantUUID: result.TenantUUID, + OwnerMemberID: result.OwnerMemberID, + Key: result.Key, + Name: result.Name, + Status: result.Status, + }) +} + +type apiKeyPermissionCatalogItem struct { + ID uint64 `json:"id"` + Module string `json:"module"` + Resource string `json:"resource"` + Action string `json:"action"` + Description string `json:"description"` + Status string `json:"status"` + Meta map[string]any `json:"meta,omitempty"` +} + +func (h *APIKeyAdminHandler) ListAPIKeyPermissionCatalog(c *gin.Context) { + if err := h.ensureAPIKeyPermissionTemplates(c.Request.Context()); err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("ensure api key permissions failed", err)) + return + } + rows, _, err := h.permRepo.List(c.Request.Context(), map[string]string{ + "status": string(modelsiam.PermissionStatusActive), + "allow_api_key": "true", + }, 0, 1000, "module ASC, resource ASC, action ASC") + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list permissions failed", err)) + return + } + items := make([]apiKeyPermissionCatalogItem, 0, len(rows)) + for i := range rows { + meta := parsePermissionMeta(rows[i].Meta) + if _, ok := toAPIKeyPermissionFromPermission(rows[i]); !ok { + continue + } + items = append(items, apiKeyPermissionCatalogItem{ + ID: rows[i].ID, + Module: rows[i].Module, + Resource: rows[i].Resource, + Action: rows[i].Action, + Description: rows[i].Description, + Status: string(rows[i].Status), + Meta: meta, + }) + } + dto.ResponseSuccess(c, gin.H{"items": items}) +} + +func (h *APIKeyAdminHandler) GetAPIKeyProfilePermissions(c *gin.Context) { + profileID, err := strconv.ParseUint(strings.TrimSpace(c.Param("profile_id")), 10, 64) + if err != nil || profileID == 0 { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid profile_id", err)) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + profile, err := h.profiles.GetById(c.Request.Context(), profileID, nil) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key profile failed", err)) + return + } + if profile == nil || !strings.EqualFold(strings.TrimSpace(profile.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key profile not found", nil)) + return + } + permissionIDs, err := h.profPerm.ListPermissionIDsOfProfile(c.Request.Context(), profileID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list profile permissions failed", err)) + return + } + permissionRows, err := h.permRepo.FindByIDs(c.Request.Context(), permissionIDs) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load permissions failed", err)) + return + } + items := make([]apiKeyPermissionCatalogItem, 0, len(permissionRows)) + for i := range permissionRows { + if permissionRows[i] == nil { + continue + } + items = append(items, apiKeyPermissionCatalogItem{ + ID: permissionRows[i].ID, + Module: permissionRows[i].Module, + Resource: permissionRows[i].Resource, + Action: permissionRows[i].Action, + Description: permissionRows[i].Description, + Status: string(permissionRows[i].Status), + Meta: parsePermissionMeta(permissionRows[i].Meta), + }) + } + dto.ResponseSuccess(c, gin.H{ + "profile_id": profileID, + "permission_ids": permissionIDs, + "permission_rows": items, + }) +} + +func (h *APIKeyAdminHandler) SetAPIKeyProfilePermissions(c *gin.Context) { + profileID, err := strconv.ParseUint(strings.TrimSpace(c.Param("profile_id")), 10, 64) + if err != nil || profileID == 0 { + dto.RespondErrorFrom(c, dto.NewBadRequest("invalid profile_id", err)) + return + } + var req setAPIKeyProfilePermissionsRequest + if err := c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + canonical, err := h.resolveTenantScope(c, "") + if err != nil { + dto.RespondErrorFrom(c, err) + return + } + profile, err := h.profiles.GetById(c.Request.Context(), profileID, nil) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load api key profile failed", err)) + return + } + if profile == nil || !strings.EqualFold(strings.TrimSpace(profile.TenantUUID), canonical) { + dto.RespondErrorFrom(c, dto.NewNotFound("api key profile not found", nil)) + return + } + if err := h.ensureAPIKeyPermissionTemplates(c.Request.Context()); err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("ensure api key permissions failed", err)) + return + } + + wantIDs := uniqueUint64(req.PermissionIDs) + permissionRows, err := h.permRepo.FindByIDs(c.Request.Context(), wantIDs) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("load permissions failed", err)) + return + } + validMap := make(map[uint64]struct{}, len(permissionRows)) + for i := range permissionRows { + if permissionRows[i] == nil || permissionRows[i].Status != modelsiam.PermissionStatusActive || !permissionRows[i].AllowAPIKey { + continue + } + if _, ok := toAPIKeyPermissionFromPermission(*permissionRows[i]); !ok { + continue + } + validMap[permissionRows[i].ID] = struct{}{} + } + validIDs := make([]uint64, 0, len(validMap)) + invalidIDs := make([]uint64, 0) + for _, id := range wantIDs { + if _, ok := validMap[id]; ok { + validIDs = append(validIDs, id) + } else { + invalidIDs = append(invalidIDs, id) + } + } + sort.Slice(validIDs, func(i, j int) bool { return validIDs[i] < validIDs[j] }) + sort.Slice(invalidIDs, func(i, j int) bool { return invalidIDs[i] < invalidIDs[j] }) + if len(invalidIDs) > 0 { + dto.RespondErrorFrom(c, dto.WithDetails( + dto.NewBadRequest("contains invalid permission_ids", nil), + map[string]interface{}{"invalid_ids": invalidIDs}, + )) + return + } + + currentIDs, err := h.profPerm.ListPermissionIDsOfProfile(c.Request.Context(), profileID) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("list current profile permissions failed", err)) + return + } + toAdd, toRemove := diffUint64(currentIDs, validIDs) + syncedKeys := 0 + syncedPermissions := 0 + err = h.db.WithContext(c.Request.Context()).Transaction(func(tx *gorm.DB) error { + profPermRepo := iamrepo.NewAPIKeyProfilePermissionRepository(tx) + if err := profPermRepo.RevokeByIDsTx(tx, profileID, toRemove); err != nil { + return err + } + if err := profPermRepo.GrantByIDsTx(tx, profileID, toAdd); err != nil { + return err + } + syncedKeys, syncedPermissions, err = h.syncActiveAPIKeySnapshotsForProfileTx(c.Request.Context(), tx, canonical, profileID) + return err + }) + if err != nil { + dto.RespondErrorFrom(c, dto.NewInternal("save profile permissions failed", err)) + return + } + logger.InfoF( + c.Request.Context(), + "[integration_gateway.apikey] profile permissions synced tenant=%s profile_id=%d active_keys=%d snapshot_permissions=%d", + canonical, + profileID, + syncedKeys, + syncedPermissions, + ) + + _ = apikeycache.InvalidateAll(c.Request.Context()) + dto.ResponseSuccess(c, gin.H{ + "profile_id": profileID, + "permission_ids": validIDs, + "added": toAdd, + "removed": toRemove, + "synced_keys": syncedKeys, + "synced_perms": syncedPermissions, + }) +} + +func (h *APIKeyAdminHandler) resolveProfileAPIKeyPermissions(ctx context.Context, profileID uint64) ([]apiKeyPermissionRequest, error) { + permissionIDs, err := h.profPerm.ListPermissionIDsOfProfile(ctx, profileID) + if err != nil { + return nil, err + } + if len(permissionIDs) == 0 { + return []apiKeyPermissionRequest{}, nil + } + permissionRows, err := h.permRepo.FindByIDs(ctx, permissionIDs) + if err != nil { + return nil, err + } + out := make([]apiKeyPermissionRequest, 0, len(permissionRows)) + for i := range permissionRows { + if permissionRows[i] == nil || permissionRows[i].Status != modelsiam.PermissionStatusActive || !permissionRows[i].AllowAPIKey { + continue + } + item, ok := toAPIKeyPermissionFromPermission(*permissionRows[i]) + if !ok { + continue + } + out = append(out, item) + } + return out, nil +} + +func (h *APIKeyAdminHandler) syncActiveAPIKeySnapshotsForProfileTx(ctx context.Context, tx *gorm.DB, tenantUUID string, profileID uint64) (int, int, error) { + if tx == nil { + return 0, 0, fmt.Errorf("tx is nil") + } + profPermRepo := iamrepo.NewAPIKeyProfilePermissionRepository(tx) + permRepo := iamrepo.NewPermissionRepository(tx) + keyRepo := repo.NewIntegrationGatewayAPIKeyRepository(tx) + keyPermRepo := repo.NewIntegrationGatewayAPIKeyPermissionRepository(tx) + + permissionIDs, err := profPermRepo.ListPermissionIDsOfProfile(ctx, profileID) + if err != nil { + return 0, 0, err + } + permissionRows, err := permRepo.FindByIDs(ctx, permissionIDs) + if err != nil { + return 0, 0, err + } + permissionRequests := make([]apiKeyPermissionRequest, 0, len(permissionRows)) + for i := range permissionRows { + if permissionRows[i] == nil || permissionRows[i].Status != modelsiam.PermissionStatusActive || !permissionRows[i].AllowAPIKey { + continue + } + item, ok := toAPIKeyPermissionFromPermission(*permissionRows[i]) + if !ok { + continue + } + permissionRequests = append(permissionRequests, item) + } + + keys, err := keyRepo.ListActiveByProfile(ctx, tenantUUID, profileID) + if err != nil { + return 0, 0, err + } + for i := range keys { + if err := keyPermRepo.ReplaceAll(ctx, keys[i].UUID, buildPermissionModels(keys[i].UUID, permissionRequests)); err != nil { + return 0, 0, err + } + } + return len(keys), len(permissionRequests), nil +} + +func (h *APIKeyAdminHandler) ensureAPIKeyPermissionTemplates(ctx context.Context) error { + return apikeypermissions.EnsureTemplatePermissions(ctx, h.permRepo) +} + +type apiKeyPermissionMeta struct { + Scope string + Action string + ResourceType string + ResourcePattern string + PluginID string + Effect string +} + +func parsePermissionMeta(raw []byte) map[string]any { + if len(raw) == 0 { + return nil + } + out := map[string]any{} + if err := json.Unmarshal(raw, &out); err != nil { + return nil + } + return out +} + +func extractAPIKeyPermissionMeta(meta map[string]any) (apiKeyPermissionMeta, bool) { + raw, ok := meta["api_key"] + if !ok { + return apiKeyPermissionMeta{}, false + } + data, ok := raw.(map[string]any) + if !ok { + return apiKeyPermissionMeta{}, false + } + item := apiKeyPermissionMeta{ + Scope: strings.TrimSpace(anyToString(data["scope"])), + Action: strings.TrimSpace(anyToString(data["action"])), + ResourceType: strings.TrimSpace(anyToString(data["resource_type"])), + ResourcePattern: strings.TrimSpace(anyToString(data["resource_pattern"])), + PluginID: strings.TrimSpace(anyToString(data["plugin_id"])), + Effect: strings.TrimSpace(anyToString(data["effect"])), + } + if item.Scope == "" || item.Action == "" || item.ResourceType == "" || item.ResourcePattern == "" { + return apiKeyPermissionMeta{}, false + } + if item.Effect == "" { + item.Effect = "allow" + } + return item, true +} + +func toAPIKeyPermissionFromPermission(permission modelsiam.Permission) (apiKeyPermissionRequest, bool) { + resolved, ok := apikeypermissions.ResolvePermission(permission) + if !ok { + return apiKeyPermissionRequest{}, false + } + return apiKeyPermissionRequest{ + Scope: resolved.Scope, + Action: resolved.Action, + ResourceType: resolved.ResourceType, + ResourcePattern: resolved.ResourcePattern, + PluginID: resolved.PluginID, + Effect: resolved.Effect, + }, true +} + +func diffUint64(oldIDs []uint64, newIDs []uint64) (toAdd []uint64, toRemove []uint64) { + oldSet := make(map[uint64]struct{}, len(oldIDs)) + newSet := make(map[uint64]struct{}, len(newIDs)) + for _, id := range oldIDs { + oldSet[id] = struct{}{} + } + for _, id := range newIDs { + newSet[id] = struct{}{} + } + for _, id := range newIDs { + if _, ok := oldSet[id]; !ok { + toAdd = append(toAdd, id) + } + } + for _, id := range oldIDs { + if _, ok := newSet[id]; !ok { + toRemove = append(toRemove, id) + } + } + sort.Slice(toAdd, func(i, j int) bool { return toAdd[i] < toAdd[j] }) + sort.Slice(toRemove, func(i, j int) bool { return toRemove[i] < toRemove[j] }) + return +} + +func uniqueUint64(items []uint64) []uint64 { + if len(items) == 0 { + return nil + } + set := make(map[uint64]struct{}, len(items)) + out := make([]uint64, 0, len(items)) + for _, id := range items { + if id == 0 { + continue + } + if _, ok := set[id]; ok { + continue + } + set[id] = struct{}{} + out = append(out, id) + } + sort.Slice(out, func(i, j int) bool { return out[i] < out[j] }) + return out +} + +func anyToString(v any) string { + switch val := v.(type) { + case string: + return val + case []byte: + return string(val) + case nil: + return "" + default: + return fmt.Sprint(v) + } +} + +func generateAPIKeyMaterial() (plain string, hash string, prefix string) { + plain = "pxk_" + utils.RandomString(48) + sum := sha256.Sum256([]byte(plain)) + hash = hex.EncodeToString(sum[:]) + if len(plain) > 12 { + prefix = plain[:12] + } else { + prefix = plain + } + return plain, hash, prefix +} + +func parseOptionalTime(raw string) (*time.Time, error) { + trimmed := strings.TrimSpace(raw) + if trimmed == "" { + return nil, nil + } + v, err := time.Parse(time.RFC3339, trimmed) + if err != nil { + return nil, err + } + out := v.UTC() + return &out, nil +} + +func normalizePermissionInputs(items []apiKeyPermissionRequest) []apiKeyPermissionRequest { + if len(items) == 0 { + return nil + } + out := make([]apiKeyPermissionRequest, 0, len(items)) + for i := range items { + scope := strings.TrimSpace(items[i].Scope) + action := strings.TrimSpace(items[i].Action) + resourceType := strings.TrimSpace(items[i].ResourceType) + resourcePattern := strings.TrimSpace(items[i].ResourcePattern) + if scope == "" || action == "" || resourceType == "" || resourcePattern == "" { + continue + } + effect := strings.ToLower(strings.TrimSpace(items[i].Effect)) + if effect == "" { + effect = "allow" + } + out = append(out, apiKeyPermissionRequest{ + Scope: scope, + Action: action, + ResourceType: resourceType, + ResourcePattern: resourcePattern, + PluginID: strings.TrimSpace(items[i].PluginID), + Effect: effect, + }) + } + return out +} + +func buildPermissionModels(keyUUID uuid.UUID, items []apiKeyPermissionRequest) []models.IntegrationGatewayAPIKeyPermission { + out := make([]models.IntegrationGatewayAPIKeyPermission, 0, len(items)) + for i := range items { + out = append(out, models.IntegrationGatewayAPIKeyPermission{ + APIKeyUUID: keyUUID, + Scope: items[i].Scope, + Action: items[i].Action, + ResourceType: items[i].ResourceType, + ResourcePattern: items[i].ResourcePattern, + PluginID: items[i].PluginID, + Effect: items[i].Effect, + }) + } + return out +} + +func toPermissionResponses(items []models.IntegrationGatewayAPIKeyPermission) []apiKeyPermissionResponse { + out := make([]apiKeyPermissionResponse, 0, len(items)) + for i := range items { + out = append(out, apiKeyPermissionResponse{ + Scope: items[i].Scope, + Action: items[i].Action, + ResourceType: items[i].ResourceType, + ResourcePattern: items[i].ResourcePattern, + PluginID: items[i].PluginID, + Effect: items[i].Effect, + }) + } + return out +} + +func toAPIKeyResponse(item models.IntegrationGatewayAPIKey, perms []apiKeyPermissionResponse) apiKeyResponse { + resp := apiKeyResponse{ + KeyID: item.UUID.String(), + TenantUUID: item.TenantUUID, + ProfileID: item.ProfileID, + Name: item.Name, + Description: item.Description, + KeyPrefix: item.KeyPrefix, + Status: item.Status, + CreatedAt: item.CreatedAt.Format(time.RFC3339), + UpdatedAt: item.UpdatedAt.Format(time.RFC3339), + Permissions: perms, + } + if item.ExpiresAt != nil { + v := item.ExpiresAt.UTC().Format(time.RFC3339) + resp.ExpiresAt = &v + } + if item.LastUsedAt != nil { + v := item.LastUsedAt.UTC().Format(time.RFC3339) + resp.LastUsedAt = &v + } + return resp +} + +func (h *APIKeyAdminHandler) resolveTenantScope(c *gin.Context, raw string) (string, error) { + currentTenantRaw, err := reqctx.RequireTenantUUID(c.Request.Context()) + if err != nil { + return "", dto.NewUnauthorized("tenant context missing", err) + } + currentTenant, err := reqctx.CanonicalTenantUUID(strings.TrimSpace(currentTenantRaw)) + if err != nil { + return "", dto.NewUnauthorized("tenant context invalid", err) + } + _ = raw + return currentTenant, nil +} diff --git a/backend/internal/transport/http/admin/integration_gateway/handler.go b/backend/internal/transport/http/admin/integration_gateway/handler.go index a23a3e32..92d1123e 100644 --- a/backend/internal/transport/http/admin/integration_gateway/handler.go +++ b/backend/internal/transport/http/admin/integration_gateway/handler.go @@ -19,13 +19,14 @@ import ( // RegisterAPIRoutes 注册集成网关管理端接口。 func RegisterAPIRoutes(_ *gin.RouterGroup, protected *gin.RouterGroup, deps *shared.Deps) { - if deps == nil || deps.IntegrationGateway == nil || deps.IntegrationGateway.Manager == nil { + if deps == nil || deps.IntegrationGateway == nil || deps.IntegrationGateway.Manager == nil || deps.DB == nil { return } handler := &AdminHandler{svc: deps.IntegrationGateway.Manager} group := protected.Group("/admin/integration") + protected.GET("/admin/gateway/meta", handleGatewayMeta) group.POST("/routes", handler.CreateRoute) group.GET("/routes", handler.ListRoutes) group.GET("/routes/:route_id", handler.GetRoute) @@ -34,6 +35,95 @@ func RegisterAPIRoutes(_ *gin.RouterGroup, protected *gin.RouterGroup, deps *sha group.POST("/routes/:route_id/resume", handler.ResumeRoute) group.POST("/routes/:route_id/retire", handler.RetireRoute) group.GET("/routes/:route_id/versions", handler.ListVersions) + + apiKeyHandler := NewAPIKeyAdminHandler(deps.DB) + group.POST("/api-key-profiles", apiKeyHandler.CreateAPIKeyProfile) + group.GET("/api-key-profiles", apiKeyHandler.ListAPIKeyProfiles) + group.PATCH("/api-key-profiles/:profile_id", apiKeyHandler.UpdateAPIKeyProfile) + group.GET("/api-key-profiles/:profile_id/permissions", apiKeyHandler.GetAPIKeyProfilePermissions) + group.PUT("/api-key-profiles/:profile_id/permissions", apiKeyHandler.SetAPIKeyProfilePermissions) + group.GET("/permissions/catalog", apiKeyHandler.ListAPIKeyPermissionCatalog) + group.POST("/api-keys", apiKeyHandler.CreateAPIKey) + group.GET("/api-keys", apiKeyHandler.ListAPIKeys) + group.GET("/api-keys/:key_id", apiKeyHandler.GetAPIKey) + group.POST("/api-keys/:key_id/revoke", apiKeyHandler.RevokeAPIKey) + group.POST("/api-keys/:key_id/rotate", apiKeyHandler.RotateAPIKey) + group.DELETE("/api-keys/:key_id", apiKeyHandler.DeleteAPIKey) +} + +func handleGatewayMeta(c *gin.Context) { + apiPrefix := inferAPIPrefix(c) + baseURL := requestBaseURL(c) + + dto.ResponseSuccess(c, gin.H{ + "base_url": baseURL, + "api_prefix": apiPrefix, + "http_base": strings.TrimRight(baseURL, "/") + apiPrefix, + "ws": gin.H{"bus_endpoint": "/api/ws"}, + "default_scheme": "apikey", + "auth": gin.H{ + "schemes": []gin.H{ + { + "id": "apikey", + "header": "Authorization", + "format": "ApiKey ", + }, + { + "id": "bearer", + "header": "Authorization", + "format": "Bearer ", + }, + }, + "single_credential_per_request": true, + "conflict_policy": "prefer_header_scheme", + }, + "examples": gin.H{ + "llm_invoke_path": apiPrefix + "/ai/llm/invoke", + "tenant_capability_invoke_path": apiPrefix + "/tenant/invocations", + "integration_route_invoke_path_tmpl": apiPrefix + "/tenant/integration/routes/{route_slug}/invoke", + "cap_list_path": apiPrefix + "/admin/capabilities", + }, + }) +} + +func inferAPIPrefix(c *gin.Context) string { + const marker = "/admin/gateway/meta" + path := strings.TrimSpace(c.Request.URL.Path) + if idx := strings.Index(path, marker); idx >= 0 { + prefix := strings.TrimSpace(path[:idx]) + if prefix == "" { + return "/api/v1" + } + if !strings.HasPrefix(prefix, "/") { + prefix = "/" + prefix + } + return strings.TrimRight(prefix, "/") + } + return "/api/v1" +} + +func requestBaseURL(c *gin.Context) string { + scheme := "http" + if c.Request.TLS != nil { + scheme = "https" + } + if forwarded := strings.TrimSpace(c.GetHeader("X-Forwarded-Proto")); forwarded != "" { + if i := strings.Index(forwarded, ","); i >= 0 { + forwarded = forwarded[:i] + } + forwarded = strings.TrimSpace(forwarded) + if forwarded != "" { + scheme = forwarded + } + } + host := strings.TrimSpace(c.GetHeader("X-Forwarded-Host")) + if host == "" { + host = strings.TrimSpace(c.Request.Host) + } + if host == "" { + return "" + } + return scheme + "://" + host } // AdminHandler 负责管理端 HTTP 请求。 @@ -42,7 +132,7 @@ type AdminHandler struct { } type createRouteRequest struct { - TenantUUID string `json:"tenant_uuid" binding:"required,uuid4"` + TenantUUID string `json:"tenant_uuid,omitempty"` RouteSlug string `json:"route_slug" binding:"required"` CapabilityID string `json:"capability_id" binding:"required"` ToolGrantIDs []string `json:"tool_grant_ids"` @@ -53,7 +143,7 @@ type createRouteRequest struct { } type updateRouteRequest struct { - TenantUUID string `json:"tenant_uuid" binding:"required,uuid4"` + TenantUUID string `json:"tenant_uuid,omitempty"` CapabilityID string `json:"capability_id"` ToolGrantIDs []string `json:"tool_grant_ids"` Channels []string `json:"channels"` @@ -64,7 +154,7 @@ type updateRouteRequest struct { } type lifecycleRequest struct { - TenantUUID string `json:"tenant_uuid" binding:"required,uuid4"` + TenantUUID string `json:"tenant_uuid,omitempty"` Reason string `json:"reason"` } @@ -122,10 +212,9 @@ func (h *AdminHandler) CreateRoute(c *gin.Context) { return } - tenantUUID := strings.TrimSpace(req.TenantUUID) - canonical, err := reqctx.CanonicalTenantUUID(tenantUUID) + canonical, err := reqctx.RequireTenantUUID(c.Request.Context()) if err != nil { - dto.RespondErrorFrom(c, dto.NewBadRequest("tenant_uuid must be a valid UUID", err)) + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) return } @@ -150,14 +239,9 @@ func (h *AdminHandler) CreateRoute(c *gin.Context) { } func (h *AdminHandler) ListRoutes(c *gin.Context) { - tenantUUID := strings.TrimSpace(c.Query("tenant_uuid")) - if tenantUUID == "" { - dto.RespondErrorFrom(c, dto.NewBadRequest("tenant_uuid is required", nil)) - return - } - canonical, err := reqctx.CanonicalTenantUUID(tenantUUID) + canonical, err := reqctx.RequireTenantUUID(c.Request.Context()) if err != nil { - dto.RespondErrorFrom(c, dto.NewBadRequest("tenant_uuid must be a valid UUID", err)) + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) return } capabilityID := strings.TrimSpace(c.Query("capability_id")) @@ -228,10 +312,9 @@ func (h *AdminHandler) UpdateRoute(c *gin.Context) { return } - tenantUUID := strings.TrimSpace(req.TenantUUID) - canonical, err := reqctx.CanonicalTenantUUID(tenantUUID) + canonical, err := reqctx.RequireTenantUUID(c.Request.Context()) if err != nil { - dto.RespondErrorFrom(c, dto.NewBadRequest("tenant_uuid must be a valid UUID", err)) + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) return } @@ -279,14 +362,9 @@ func (h *AdminHandler) lifecycle(c *gin.Context, action string) { dto.ResponseValidationError(c, err) return } - tenantUUID := strings.TrimSpace(req.TenantUUID) - if tenantUUID == "" { - dto.RespondErrorFrom(c, dto.NewBadRequest("tenant_uuid is required", nil)) - return - } - canonical, err := reqctx.CanonicalTenantUUID(tenantUUID) + canonical, err := reqctx.RequireTenantUUID(c.Request.Context()) if err != nil { - dto.RespondErrorFrom(c, dto.NewBadRequest("tenant_uuid must be a valid UUID", err)) + dto.RespondErrorFrom(c, dto.NewUnauthorized("tenant context missing", err)) return } diff --git a/backend/internal/transport/http/admin/knowledge_space/corpus_check_handlers.go b/backend/internal/transport/http/admin/knowledge_space/corpus_check_handlers.go new file mode 100644 index 00000000..536ae0f1 --- /dev/null +++ b/backend/internal/transport/http/admin/knowledge_space/corpus_check_handlers.go @@ -0,0 +1,84 @@ +package knowledge_space + +import ( + "errors" + "net/http" + "strings" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + + ksvc "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space" + "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" + "github.com/ArtisanCloud/PowerX/pkg/dto" +) + +type corpusCheckHandler struct { + svc *ksvc.CorpusCheckService +} + +func newCorpusCheckHandler(svc *ksvc.CorpusCheckService) *corpusCheckHandler { + if svc == nil { + return nil + } + return &corpusCheckHandler{svc: svc} +} + +type corpusCheckStartRequest struct { + RequestedBy string `json:"requestedBy"` +} + +func (h *corpusCheckHandler) Start(c *gin.Context) { + spaceID, err := uuid.Parse(strings.TrimSpace(c.Param("spaceId"))) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "无效的空间 ID", err) + return + } + var req corpusCheckStartRequest + _ = c.ShouldBindJSON(&req) + tenantUUID, err := reqctx.RequireTenantUUIDFromGin(c) + if err != nil { + dto.ResponseError(c, http.StatusUnauthorized, "缺少租户上下文", err) + return + } + tenantUUID = strings.ToLower(strings.TrimSpace(tenantUUID)) + job, err := h.svc.Start(c.Request.Context(), tenantUUID, spaceID, req.RequestedBy) + if err != nil { + if errors.Is(err, ksvc.ErrInvalidInput) { + dto.ResponseError(c, http.StatusBadRequest, "入参不合法", err) + return + } + // Best-effort: even if scheduling fails (e.g. Event Fabric misconfig), the job record is created and updated to failed. + // Return 202 so the UI can continue (space creation should not be blocked by corpus-check). + if job != nil { + dto.ResponseSuccessWithStatus(c, http.StatusAccepted, job) + return + } + dto.ResponseError(c, http.StatusInternalServerError, "启动失败", err) + return + } + dto.ResponseSuccessWithStatus(c, http.StatusAccepted, job) +} + +func (h *corpusCheckHandler) Get(c *gin.Context) { + spaceID, err := uuid.Parse(strings.TrimSpace(c.Param("spaceId"))) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "无效的空间 ID", err) + return + } + jobID, err := uuid.Parse(strings.TrimSpace(c.Param("jobId"))) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "无效的 job ID", err) + return + } + job, err := h.svc.Get(c.Request.Context(), jobID) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "查询失败", err) + return + } + if job == nil || job.SpaceUUID != spaceID { + dto.ResponseError(c, http.StatusNotFound, "job 不存在", nil) + return + } + dto.ResponseSuccess(c, job) +} diff --git a/backend/internal/transport/http/admin/knowledge_space/decay_handlers.go b/backend/internal/transport/http/admin/knowledge_space/decay_handlers.go index 9de28184..924244a3 100644 --- a/backend/internal/transport/http/admin/knowledge_space/decay_handlers.go +++ b/backend/internal/transport/http/admin/knowledge_space/decay_handlers.go @@ -3,6 +3,8 @@ package knowledge_space import ( "errors" "net/http" + "os" + "strconv" "strings" "time" @@ -29,24 +31,46 @@ func NewDecayHandler(deps *shared.Deps) *DecayHandler { } type decayScanRequest struct { - SpaceID string `json:"spaceId" binding:"required,uuid4"` - Detected int `json:"detected" binding:"required,min=1"` + SpaceID string `json:"spaceId" binding:"required,uuid4"` + Detected int `json:"detected" binding:"required,min=1"` + Category string `json:"category"` + Severity string `json:"severity"` + Reason string `json:"reason"` + AssignedTo string `json:"assignedTo"` + RequestedBy string `json:"requestedBy"` } type decayRestoreRequest struct { TaskID string `json:"taskId" binding:"required,uuid4"` Notes string `json:"notes"` FalsePositive bool `json:"falsePositive"` + ApprovedBy string `json:"approvedBy"` + Reason string `json:"reason"` } func (h *DecayHandler) Scan(c *gin.Context) { + if !flagEnabled("PX_KNOWLEDGE_DECAY_GUARD") { + dto.ResponseError(c, http.StatusForbidden, "decay guard disabled", nil) + return + } var req decayScanRequest if err := c.ShouldBindJSON(&req); err != nil { dto.ResponseValidationError(c, err) return } + if _, ok := tenantUUIDFromContext(c); !ok { + return + } spaceID, _ := uuid.Parse(req.SpaceID) - tasks, err := h.svc.RunScan(c.Request.Context(), spaceID, req.Detected) + tasks, err := h.svc.RunScanWithInput(c.Request.Context(), decay.RunScanInput{ + SpaceID: spaceID, + Detected: req.Detected, + Category: req.Category, + Severity: req.Severity, + Reason: req.Reason, + AssignedTo: req.AssignedTo, + RequestedBy: req.RequestedBy, + }) if err != nil { h.handleError(c, err) return @@ -54,18 +78,38 @@ func (h *DecayHandler) Scan(c *gin.Context) { now := time.Now() dto.ResponseSuccessWithStatus(c, http.StatusCreated, gin.H{ "tasks": toDecayTaskDTO(tasks, now), - "metrics": gin.H{"knowledge.decay.detected": len(tasks)}, + "metrics": gin.H{ + "knowledge.decay.detected": len(tasks), + "knowledge.gap.backlog": len(tasks), + }, }) } func (h *DecayHandler) Restore(c *gin.Context) { + if !flagEnabled("PX_KNOWLEDGE_RESTORE_FLOW") { + dto.ResponseError(c, http.StatusForbidden, "restore flow disabled", nil) + return + } var req decayRestoreRequest if err := c.ShouldBindJSON(&req); err != nil { dto.ResponseValidationError(c, err) return } + if _, ok := tenantUUIDFromContext(c); !ok { + return + } + if req.FalsePositive && strings.TrimSpace(req.Reason) == "" && strings.TrimSpace(req.Notes) == "" { + dto.ResponseError(c, http.StatusBadRequest, "误判恢复需要提供 reason 或 notes", decay.ErrInvalidInput) + return + } taskID, _ := uuid.Parse(req.TaskID) - task, err := h.svc.Restore(c.Request.Context(), taskID, req.Notes, req.FalsePositive) + task, err := h.svc.RestoreWithInput(c.Request.Context(), decay.RestoreInput{ + TaskID: taskID, + Notes: req.Notes, + FalsePositive: req.FalsePositive, + ApprovedBy: req.ApprovedBy, + Reason: req.Reason, + }) if err != nil { h.handleError(c, err) return @@ -74,19 +118,38 @@ func (h *DecayHandler) Restore(c *gin.Context) { } func (h *DecayHandler) Status(c *gin.Context) { - spaceParam := strings.TrimSpace(c.Query("spaceId")) - spaceID, err := uuid.Parse(spaceParam) - if err != nil { - dto.ResponseError(c, http.StatusBadRequest, "invalid spaceId", err) + severityFilter := strings.ToLower(strings.TrimSpace(c.Query("severity"))) + exportFormat := strings.ToLower(strings.TrimSpace(c.Query("export"))) + tenantUUID, ok := tenantUUIDFromContext(c) + if !ok { return } - tasks, err := h.svc.ListOpen(c.Request.Context(), spaceID) + + spaceParam := strings.TrimSpace(c.Query("spaceId")) + var ( + filtered []*model.DecayTask + err error + ) + if spaceParam == "" { + filtered, err = h.svc.ListOpenByTenant(c.Request.Context(), tenantUUID, severityFilter) + } else { + spaceID, perr := uuid.Parse(spaceParam) + if perr != nil { + dto.ResponseError(c, http.StatusBadRequest, "invalid spaceId", perr) + return + } + tasks, serr := h.svc.ListOpen(c.Request.Context(), spaceID) + if serr != nil { + dto.ResponseError(c, http.StatusInternalServerError, serr.Error(), serr) + return + } + filtered = filterTasksBySeverity(tasks, severityFilter) + err = nil + } if err != nil { dto.ResponseError(c, http.StatusInternalServerError, err.Error(), err) return } - severityFilter := strings.ToLower(strings.TrimSpace(c.Query("severity"))) - filtered := filterTasksBySeverity(tasks, severityFilter) now := time.Now() summary := gin.H{ "knowledge.gap.backlog": len(filtered), @@ -95,6 +158,10 @@ func (h *DecayHandler) Status(c *gin.Context) { if severityFilter != "" { summary["filter"] = severityFilter } + if exportFormat == "csv" { + writeDecayCSV(c, filtered, now) + return + } dto.ResponseSuccess(c, gin.H{ "tasks": toDecayTaskDTO(filtered, now), "metrics": summary, @@ -196,3 +263,62 @@ func slaRemainingMinutes(now time.Time, due time.Time) int64 { } return int64(mins) } + +func flagEnabled(flag string) bool { + flag = strings.TrimSpace(flag) + if flag == "" { + return true + } + value := strings.TrimSpace(os.Getenv(flag)) + if value == "" { + return true + } + value = strings.ToLower(value) + return value == "1" || value == "true" || value == "enabled" || value == "on" || value == "yes" +} + +func writeDecayCSV(c *gin.Context, tasks []*model.DecayTask, now time.Time) { + var b strings.Builder + b.WriteString("task_id,space_id,category,severity,status,detected_at,sla_due_at,sla_remaining_minutes,false_positive\n") + for _, task := range tasks { + if task == nil { + continue + } + b.WriteString(task.UUID.String()) + b.WriteByte(',') + b.WriteString(task.SpaceUUID.String()) + b.WriteByte(',') + b.WriteString(sanitizeCSV(task.Category)) + b.WriteByte(',') + b.WriteString(sanitizeCSV(task.Severity)) + b.WriteByte(',') + b.WriteString(sanitizeCSV(task.Status)) + b.WriteByte(',') + b.WriteString(task.DetectedAt.UTC().Format(time.RFC3339Nano)) + b.WriteByte(',') + b.WriteString(task.SLADueAt.UTC().Format(time.RFC3339Nano)) + b.WriteByte(',') + b.WriteString(int64ToString(slaRemainingMinutes(now, task.SLADueAt))) + b.WriteByte(',') + if task.FalsePositive { + b.WriteString("true") + } else { + b.WriteString("false") + } + b.WriteByte('\n') + } + c.Header("Content-Type", "text/csv; charset=utf-8") + c.String(http.StatusOK, b.String()) +} + +func sanitizeCSV(input string) string { + input = strings.ReplaceAll(input, "\"", "\"\"") + if strings.ContainsAny(input, ",\n\r") { + return "\"" + input + "\"" + } + return input +} + +func int64ToString(v int64) string { + return strconv.FormatInt(v, 10) +} diff --git a/backend/internal/transport/http/admin/knowledge_space/delta_handlers.go b/backend/internal/transport/http/admin/knowledge_space/delta_handlers.go index 8afb9753..3f29e5fb 100644 --- a/backend/internal/transport/http/admin/knowledge_space/delta_handlers.go +++ b/backend/internal/transport/http/admin/knowledge_space/delta_handlers.go @@ -134,6 +134,8 @@ func (h *DeltaHandler) handleError(c *gin.Context, err error) { switch { case errors.Is(err, ksdelta.ErrInvalidInput), errors.Is(err, ksdelta.ErrUnknownSource): dto.ResponseError(c, http.StatusBadRequest, err.Error(), err) + case errors.Is(err, ksdelta.ErrJobConflict): + dto.ResponseError(c, http.StatusConflict, err.Error(), err) case errors.Is(err, ksdelta.ErrSpaceNotFound), errors.Is(err, ksdelta.ErrJobNotFound): dto.ResponseError(c, http.StatusNotFound, err.Error(), err) case errors.Is(err, ksdelta.ErrPartialReleaseDenied): diff --git a/backend/internal/transport/http/admin/knowledge_space/dto.go b/backend/internal/transport/http/admin/knowledge_space/dto.go index 2f62a86a..b953df4f 100644 --- a/backend/internal/transport/http/admin/knowledge_space/dto.go +++ b/backend/internal/transport/http/admin/knowledge_space/dto.go @@ -24,6 +24,9 @@ type createSpaceRequest struct { DepartmentCode string `json:"departmentCode" binding:"required"` Quotas quotaPayload `json:"quotas" binding:"required"` PolicyTemplateVersionID string `json:"policyTemplateVersionId" binding:"required"` + IngestionProfileKey string `json:"ingestionProfileKey"` + IndexProfileKey string `json:"indexProfileKey"` + RAGProfileKey string `json:"ragProfileKey"` FeatureFlags []string `json:"featureFlags"` RequestedBy string `json:"requestedBy"` } @@ -31,6 +34,9 @@ type createSpaceRequest struct { type updateSpaceRequest struct { Quotas *quotaPayload `json:"quotas"` PolicyTemplateVersionID string `json:"policyTemplateVersionId"` + IngestionProfileKey string `json:"ingestionProfileKey"` + IndexProfileKey string `json:"indexProfileKey"` + RAGProfileKey string `json:"ragProfileKey"` FeatureFlags []string `json:"featureFlags"` Status string `json:"status"` UpdatedBy string `json:"updatedBy"` @@ -39,20 +45,26 @@ type updateSpaceRequest struct { type retireSpaceRequest struct { Reason string `json:"reason"` RequestedBy string `json:"requestedBy"` + DropVectors bool `json:"dropVectors"` } type knowledgeSpaceResponse struct { - SpaceID string `json:"spaceId"` - TenantUUID string `json:"tenant_uuid"` - SpaceName string `json:"spaceName"` - DepartmentCode string `json:"departmentCode"` - Status string `json:"status"` - PolicyTemplateID string `json:"policyTemplateVersionId"` - FeatureFlags []string `json:"featureFlags"` - AuditToken string `json:"auditToken"` - RetentionExpiresAt *time.Time `json:"retentionExpiresAt,omitempty"` - Quotas quotaPayload `json:"quotas"` - IAMStatus string `json:"iamStatus"` + SpaceID string `json:"spaceId"` + TenantUUID string `json:"tenant_uuid"` + SpaceName string `json:"spaceName"` + DepartmentCode string `json:"departmentCode"` + Status string `json:"status"` + PolicyTemplateID string `json:"policyTemplateVersionId"` + IngestionProfileKey string `json:"ingestionProfileKey"` + IndexProfileKey string `json:"indexProfileKey"` + RAGProfileKey string `json:"ragProfileKey"` + EmbeddingProfileKey string `json:"embeddingProfileKey"` + ActiveVectorIndexKey string `json:"activeVectorIndexKey"` + FeatureFlags []string `json:"featureFlags"` + AuditToken string `json:"auditToken"` + RetentionExpiresAt *time.Time `json:"retentionExpiresAt,omitempty"` + Quotas quotaPayload `json:"quotas"` + IAMStatus string `json:"iamStatus"` } func toResponse(space *models.KnowledgeSpace) knowledgeSpaceResponse { @@ -61,15 +73,20 @@ func toResponse(space *models.KnowledgeSpace) knowledgeSpaceResponse { } flags := ksvc.FeatureFlagsFromJSON(space.FeatureFlags) return knowledgeSpaceResponse{ - SpaceID: space.UUID.String(), - TenantUUID: space.TenantUUID, - SpaceName: space.SpaceName, - DepartmentCode: space.DepartmentCode, - Status: space.Status, - PolicyTemplateID: ksvc.PolicyIDString(space.PolicyTemplateVersionID), - FeatureFlags: flags, - AuditToken: space.AuditToken, - RetentionExpiresAt: space.RetentionExpiresAt, + SpaceID: space.UUID.String(), + TenantUUID: space.TenantUUID, + SpaceName: space.SpaceName, + DepartmentCode: space.DepartmentCode, + Status: space.Status, + PolicyTemplateID: ksvc.PolicyIDString(space.PolicyTemplateVersionID), + IngestionProfileKey: strings.TrimSpace(space.IngestionProfileKey), + IndexProfileKey: strings.TrimSpace(space.IndexProfileKey), + RAGProfileKey: strings.TrimSpace(space.RAGProfileKey), + EmbeddingProfileKey: strings.TrimSpace(space.EmbeddingProfileKey), + ActiveVectorIndexKey: strings.TrimSpace(space.ActiveVectorIndexKey), + FeatureFlags: flags, + AuditToken: space.AuditToken, + RetentionExpiresAt: space.RetentionExpiresAt, Quotas: quotaPayload{ CPUCores: space.QuotaCPU, StorageGB: space.QuotaStorageGB, @@ -91,36 +108,197 @@ func parseUUID(id string) (uuid.UUID, error) { } type ingestionJobRequest struct { - SourceType string `json:"sourceType" binding:"required,oneof=pdf markdown table api"` - SourceURI string `json:"sourceUri" binding:"required"` - MaskingProfile string `json:"maskingProfile"` - Priority string `json:"priority" binding:"omitempty,oneof=normal high"` - RequestedBy string `json:"requestedBy"` + // Format is the preferred field; sourceType is kept for compatibility. + Format string `json:"format" binding:"omitempty,oneof=pdf docx xlsx csv markdown html sql image table api"` + SourceType string `json:"sourceType" binding:"omitempty,oneof=pdf docx xlsx csv markdown html sql image table api"` + SourceURI string `json:"sourceUri" binding:"required"` + DocUUID string `json:"docUuid"` + IngestionProfile string `json:"ingestionProfile"` + ProcessorProfile string `json:"processorProfile"` + OCRRequired bool `json:"ocrRequired"` + MaskingProfile string `json:"maskingProfile"` + Priority string `json:"priority" binding:"omitempty,oneof=normal high"` + RequestedBy string `json:"requestedBy"` + // L1/L2/L3 selection snapshot (for audit / mapping / future profiles). + RagSceneKey string `json:"ragSceneKey"` + RagBundleKey string `json:"ragBundleKey"` + RagPrimary string `json:"ragPrimary"` + // Chunking controls (optional). When set, they are applied on top of processor output. + SegmentMode string `json:"segmentMode" binding:"omitempty,oneof=unit heading clause semantic table_row code_block conversation"` + ChunkSize int `json:"chunkSize" binding:"omitempty,min=0,max=20000"` + ChunkOverlap int `json:"chunkOverlap" binding:"omitempty,min=0,max=5000"` + // SegmentSizePolicy controls how chunkSize is applied: cap | target. + SegmentSizePolicy string `json:"segmentSizePolicy" binding:"omitempty,oneof=cap target"` + // SegmentOrder defines the execution order of chunking steps. + // Supported steps: page | size | segment | separator + SegmentOrder []string `json:"segmentOrder" binding:"omitempty,dive,oneof=page size segment separator"` + // Separators are preferred boundaries applied before windowing; supports punctuation and newline tokens. + Separators []string `json:"separators" binding:"omitempty,dive,max=16"` + // PagePriority: prefer page boundary before other segmentation (PDF only). + PagePriority bool `json:"pagePriority"` + // Anchors: included in chunk metadata (best-effort). + AnchorHeadingPath bool `json:"anchorHeadingPath"` + AnchorClauseID bool `json:"anchorClauseId"` + AnchorRowNumber bool `json:"anchorRowNumber"` + AnchorSpeaker bool `json:"anchorSpeaker"` + AnchorSentenceIndex bool `json:"anchorSentenceIndex"` } type ingestionJobView struct { JobID string `json:"jobId"` Status string `json:"status"` + RetryCount int `json:"retryCount"` + ErrorCode string `json:"errorCode,omitempty"` + Reason string `json:"reason,omitempty"` ChunkTotal int `json:"chunkTotal"` ChunkCoveragePct float64 `json:"chunkCoveragePct"` EmbeddingSuccessPct float64 `json:"embeddingSuccessPct"` + EmbeddingMaxInputTokens int `json:"embeddingMaxInputTokens,omitempty"` + EmbeddingProvider string `json:"embeddingProvider,omitempty"` + EmbeddingModel string `json:"embeddingModel,omitempty"` MaskingCoveragePct float64 `json:"maskingCoveragePct"` + SegmentMode string `json:"segmentMode,omitempty"` + ChunkSize int `json:"chunkSize,omitempty"` + ChunkOverlap int `json:"chunkOverlap,omitempty"` + SegmentSizePolicy string `json:"segmentSizePolicy,omitempty"` + Separators []string `json:"separators,omitempty"` + PagePriority bool `json:"pagePriority,omitempty"` + SegmentOrder []string `json:"segmentOrder,omitempty"` + ChunkAnchors map[string]bool `json:"chunkAnchors,omitempty"` } func toIngestionJobView(job *models.IngestionJob) ingestionJobView { if job == nil { return ingestionJobView{} } + var snap map[string]any + if len(job.MetricsSnapshot) > 0 { + _ = json.Unmarshal(job.MetricsSnapshot, &snap) + } + segmentMode := readStringSnap(snap, "segment_mode") + chunkSize := readIntSnap(snap, "chunk_size") + chunkOverlap := readIntSnap(snap, "chunk_overlap") + segmentSizePolicy := readStringSnap(snap, "segment_size_policy") + embeddingMaxInputTokens := readIntSnap(snap, "embedding_max_input_tokens") + embeddingProvider := readStringSnap(snap, "embedding_provider") + embeddingModel := readStringSnap(snap, "embedding_model") + separators := readStringSliceSnap(snap, "separators") + pagePriority := readBoolSnap(snap, "page_priority") + segmentOrder := readStringSliceSnap(snap, "segment_order") + anchors := readBoolMapSnap(snap, "chunk_anchors") return ingestionJobView{ JobID: job.UUID.String(), Status: job.Status, + RetryCount: job.RetryCount, + ErrorCode: job.ErrorCode, + Reason: job.BlockedReason, ChunkTotal: job.ChunkTotal, ChunkCoveragePct: job.ChunkCoveredPct, EmbeddingSuccessPct: job.EmbeddingSuccessPct, + EmbeddingMaxInputTokens: embeddingMaxInputTokens, + EmbeddingProvider: embeddingProvider, + EmbeddingModel: embeddingModel, MaskingCoveragePct: job.MaskingCoveragePct, + SegmentMode: segmentMode, + ChunkSize: chunkSize, + ChunkOverlap: chunkOverlap, + SegmentSizePolicy: segmentSizePolicy, + Separators: separators, + PagePriority: pagePriority, + SegmentOrder: segmentOrder, + ChunkAnchors: anchors, + } +} + +func readStringSnap(snap map[string]any, key string) string { + if snap == nil { + return "" + } + if v, ok := snap[key]; ok { + if s, ok := v.(string); ok { + return strings.TrimSpace(s) + } + } + return "" +} + +func readIntSnap(snap map[string]any, key string) int { + if snap == nil { + return 0 + } + switch v := snap[key].(type) { + case float64: + return int(v) + case int: + return v + case int64: + return int(v) + case string: + n, _ := strconv.Atoi(strings.TrimSpace(v)) + return n + default: + return 0 } } +func readBoolSnap(snap map[string]any, key string) bool { + if snap == nil { + return false + } + if v, ok := snap[key].(bool); ok { + return v + } + if v, ok := snap[key].(string); ok { + return strings.EqualFold(strings.TrimSpace(v), "true") + } + return false +} + +func readStringSliceSnap(snap map[string]any, key string) []string { + if snap == nil { + return nil + } + raw, ok := snap[key] + if !ok || raw == nil { + return nil + } + arr, ok := raw.([]any) + if !ok { + return nil + } + out := make([]string, 0, len(arr)) + for _, v := range arr { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + out = append(out, strings.TrimSpace(s)) + } + } + return out +} + +func readBoolMapSnap(snap map[string]any, key string) map[string]bool { + if snap == nil { + return nil + } + raw, ok := snap[key] + if !ok { + return nil + } + m, ok := raw.(map[string]any) + if !ok { + return nil + } + out := make(map[string]bool, len(m)) + for k, v := range m { + switch vv := v.(type) { + case bool: + out[k] = vv + case string: + out[k] = strings.EqualFold(strings.TrimSpace(vv), "true") + } + } + return out +} + type feedbackRequest struct { Severity string `json:"severity" binding:"required,oneof=low medium high critical"` IssueType string `json:"issueType" binding:"required,oneof=accuracy freshness compliance"` @@ -130,6 +308,12 @@ type feedbackRequest struct { ReportedBy string `json:"reportedBy"` } +type feedbackCaseActionRequest struct { + RequestedBy string `json:"requestedBy"` + ResolutionNotes string `json:"resolutionNotes"` + Reason string `json:"reason"` +} + type feedbackResponse struct { CaseID string `json:"caseId"` Status string `json:"status"` @@ -140,7 +324,11 @@ type feedbackResponse struct { SlaDueAt *time.Time `json:"slaDueAt,omitempty"` QualityScore float64 `json:"qualityScore"` ReprocessJob string `json:"reprocessJobId,omitempty"` + TraceID string `json:"traceId,omitempty"` ToolTraceRef string `json:"toolTraceRef,omitempty"` + EscalatedAt *time.Time `json:"escalatedAt,omitempty"` + ClosedAt *time.Time `json:"closedAt,omitempty"` + Resolution string `json:"resolutionNotes,omitempty"` CreatedAt time.Time `json:"createdAt"` LastUpdatedAt time.Time `json:"updatedAt"` } @@ -159,7 +347,11 @@ func toFeedbackResponse(caseModel *models.FeedbackCase) feedbackResponse { ReportedBy: caseModel.ReportedBy, SlaDueAt: caseModel.SLADueAt, QualityScore: caseModel.QualityScore, + TraceID: caseModel.ToolTraceRef, ToolTraceRef: caseModel.ToolTraceRef, + EscalatedAt: caseModel.EscalatedAt, + ClosedAt: caseModel.ClosedAt, + Resolution: caseModel.ResolutionNotes, CreatedAt: caseModel.CreatedAt, LastUpdatedAt: caseModel.UpdatedAt, } @@ -200,6 +392,8 @@ type fusionStrategyResponse struct { RerankerModel string `json:"rerankerModel"` ConflictPolicy string `json:"conflictPolicy"` DeploymentState string `json:"deploymentState"` + Degraded bool `json:"degraded"` + DegradeReasons []string `json:"degradeReasons,omitempty"` PublishedAt *time.Time `json:"publishedAt,omitempty"` } @@ -281,6 +475,13 @@ func toFusionStrategyResponse(strategy *models.FusionStrategyVersion) fusionStra if strategy == nil { return fusionStrategyResponse{} } + var snap struct { + DegradeReasons []string `json:"degrade_reasons"` + } + if len(strategy.BenchmarkMetrics) > 0 { + _ = json.Unmarshal(strategy.BenchmarkMetrics, &snap) + } + degraded := len(snap.DegradeReasons) > 0 return fusionStrategyResponse{ StrategyID: strategyIDString(strategy.ID), SpaceID: strategy.SpaceUUID.String(), @@ -291,6 +492,8 @@ func toFusionStrategyResponse(strategy *models.FusionStrategyVersion) fusionStra RerankerModel: strategy.RerankerModel, ConflictPolicy: strategy.ConflictPolicy, DeploymentState: strategy.DeploymentState, + Degraded: degraded, + DegradeReasons: snap.DegradeReasons, PublishedAt: strategy.PublishedAt, } } diff --git a/backend/internal/transport/http/admin/knowledge_space/event_handlers.go b/backend/internal/transport/http/admin/knowledge_space/event_handlers.go index fff9d563..415baf07 100644 --- a/backend/internal/transport/http/admin/knowledge_space/event_handlers.go +++ b/backend/internal/transport/http/admin/knowledge_space/event_handlers.go @@ -1,12 +1,19 @@ package knowledge_space import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" "errors" "net/http" + "os" + "strconv" "strings" "time" "github.com/gin-gonic/gin" + "github.com/google/uuid" "github.com/ArtisanCloud/PowerX/internal/app/shared" event_hotfix "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space/event_hotfix" @@ -16,13 +23,38 @@ import ( // EventHandler exposes HTTP endpoints for event hotfix orchestration. type EventHandler struct { svc *event_hotfix.Service + + signatureSecret string + signatureHeader string + timestampHeader string + allowedSkew time.Duration } func NewEventHandler(deps *shared.Deps) *EventHandler { if deps == nil || deps.KnowledgeSpace == nil || deps.KnowledgeSpace.EventHotfix == nil { return nil } - return &EventHandler{svc: deps.KnowledgeSpace.EventHotfix} + header := strings.TrimSpace(os.Getenv("PX_KNOWLEDGE_EVENT_SIGNATURE_HEADER")) + if header == "" { + header = "X-PowerX-Signature" + } + tsHeader := strings.TrimSpace(os.Getenv("PX_KNOWLEDGE_EVENT_TIMESTAMP_HEADER")) + if tsHeader == "" { + tsHeader = "X-PowerX-Timestamp" + } + skewSec := 300 + if raw := strings.TrimSpace(os.Getenv("PX_KNOWLEDGE_EVENT_ALLOWED_SKEW_SECONDS")); raw != "" { + if v, err := strconv.Atoi(raw); err == nil && v > 0 { + skewSec = v + } + } + return &EventHandler{ + svc: deps.KnowledgeSpace.EventHotfix, + signatureSecret: strings.TrimSpace(os.Getenv("PX_KNOWLEDGE_EVENT_SIGNATURE_SECRET")), + signatureHeader: header, + timestampHeader: tsHeader, + allowedSkew: time.Duration(skewSec) * time.Second, + } } func (h *EventHandler) Apply(c *gin.Context) { @@ -30,6 +62,12 @@ func (h *EventHandler) Apply(c *gin.Context) { dto.ResponseError(c, http.StatusServiceUnavailable, "event hotfix unavailable", nil) return } + raw, _ := c.GetRawData() + if !h.verifySignature(c, raw) { + dto.ResponseError(c, http.StatusUnauthorized, "签名校验失败", nil) + return + } + c.Request.Body = ioNopCloser(bytes.NewReader(raw)) var req eventApplyRequest if err := c.ShouldBindJSON(&req); err != nil { dto.ResponseValidationError(c, err) @@ -48,6 +86,12 @@ func (h *EventHandler) Retry(c *gin.Context) { dto.ResponseError(c, http.StatusServiceUnavailable, "event hotfix unavailable", nil) return } + raw, _ := c.GetRawData() + if !h.verifySignature(c, raw) { + dto.ResponseError(c, http.StatusUnauthorized, "签名校验失败", nil) + return + } + c.Request.Body = ioNopCloser(bytes.NewReader(raw)) var req eventApplyRequest if err := c.ShouldBindJSON(&req); err != nil { dto.ResponseValidationError(c, err) @@ -62,11 +106,91 @@ func (h *EventHandler) Retry(c *gin.Context) { } func (h *EventHandler) HotUpdate(c *gin.Context) { - dto.ResponseSuccess(c, gin.H{"status": "enqueued"}) + if h == nil || h.svc == nil { + dto.ResponseError(c, http.StatusServiceUnavailable, "event hotfix unavailable", nil) + return + } + raw, _ := c.GetRawData() + if !h.verifySignature(c, raw) { + dto.ResponseError(c, http.StatusUnauthorized, "签名校验失败", nil) + return + } + c.Request.Body = ioNopCloser(bytes.NewReader(raw)) + var req struct { + EventID string `json:"eventId"` + SpaceID string `json:"spaceId" binding:"required,uuid4"` + Payload map[string]any `json:"payload"` + RetryCount int `json:"retryCount"` + } + if err := c.ShouldBindJSON(&req); err != nil { + dto.ResponseValidationError(c, err) + return + } + eventID := strings.TrimSpace(req.EventID) + if eventID == "" { + eventID = "hot-update:" + uuid.NewString() + } + payload := req.Payload + if payload == nil { + payload = make(map[string]any) + } + payload["spaceId"] = strings.TrimSpace(req.SpaceID) + result, err := h.svc.Apply(c.Request.Context(), event_hotfix.ApplyInput{ + EventID: eventID, + EventType: "index.hot_update", + Payload: payload, + ReceivedAt: time.Now().UTC(), + RetryCount: req.RetryCount, + }) + if err != nil { + h.handleError(c, err) + return + } + dto.ResponseSuccess(c, result) } func (h *EventHandler) RefreshAgent(c *gin.Context) { - dto.ResponseSuccess(c, gin.H{"status": "refreshing"}) + if h == nil || h.svc == nil { + dto.ResponseError(c, http.StatusServiceUnavailable, "event hotfix unavailable", nil) + return + } + raw, _ := c.GetRawData() + if !h.verifySignature(c, raw) { + dto.ResponseError(c, http.StatusUnauthorized, "签名校验失败", nil) + return + } + c.Request.Body = ioNopCloser(bytes.NewReader(raw)) + var req struct { + EventID string `json:"eventId"` + TargetEventType string `json:"targetEventType"` + RetryCount int `json:"retryCount"` + } + _ = c.ShouldBindJSON(&req) + eventID := strings.TrimSpace(req.EventID) + if eventID == "" { + eventID = "agent-refresh:" + uuid.NewString() + } + payload := map[string]any{ + "targetEventType": strings.TrimSpace(req.TargetEventType), + } + result, err := h.svc.Apply(c.Request.Context(), event_hotfix.ApplyInput{ + EventID: eventID, + EventType: "agent.weight.refresh", + Payload: payload, + ReceivedAt: time.Now().UTC(), + RetryCount: req.RetryCount, + }) + if err != nil && !errors.Is(err, event_hotfix.ErrDuplicateEvent) { + h.handleError(c, err) + return + } + statusText := "ok" + if err != nil { + statusText = "duplicate" + } else if result != nil { + statusText = result.Status + } + dto.ResponseSuccess(c, gin.H{"status": statusText}) } func (h *EventHandler) handleError(c *gin.Context, err error) { @@ -101,3 +225,52 @@ func (req eventApplyRequest) toInput() event_hotfix.ApplyInput { RetryCount: req.RetryCount, } } + +func (h *EventHandler) verifySignature(c *gin.Context, rawBody []byte) bool { + if h == nil || strings.TrimSpace(h.signatureSecret) == "" { + return true + } + sig := strings.TrimSpace(c.GetHeader(h.signatureHeader)) + tsRaw := strings.TrimSpace(c.GetHeader(h.timestampHeader)) + if sig == "" || tsRaw == "" { + return false + } + ts, ok := parseSignatureTimestamp(tsRaw) + if !ok { + return false + } + now := time.Now().UTC() + if h.allowedSkew > 0 && now.Sub(ts) > h.allowedSkew { + return false + } + mac := hmac.New(sha256.New, []byte(h.signatureSecret)) + _, _ = mac.Write([]byte(tsRaw)) + _, _ = mac.Write([]byte("\n")) + _, _ = mac.Write(rawBody) + expected := hex.EncodeToString(mac.Sum(nil)) + return hmac.Equal([]byte(strings.ToLower(expected)), []byte(strings.ToLower(sig))) +} + +func parseSignatureTimestamp(raw string) (time.Time, bool) { + raw = strings.TrimSpace(raw) + if raw == "" { + return time.Time{}, false + } + if i, err := strconv.ParseInt(raw, 10, 64); err == nil { + // Support seconds or milliseconds. + if i > 1_000_000_000_000 { + return time.UnixMilli(i).UTC(), true + } + return time.Unix(i, 0).UTC(), true + } + if ts, err := time.Parse(time.RFC3339, raw); err == nil { + return ts.UTC(), true + } + return time.Time{}, false +} + +type nopCloser struct{ *bytes.Reader } + +func ioNopCloser(r *bytes.Reader) *nopCloser { return &nopCloser{Reader: r} } + +func (c *nopCloser) Close() error { return nil } diff --git a/backend/internal/transport/http/admin/knowledge_space/feedback_handlers.go b/backend/internal/transport/http/admin/knowledge_space/feedback_handlers.go index b10f433d..1a76feb9 100644 --- a/backend/internal/transport/http/admin/knowledge_space/feedback_handlers.go +++ b/backend/internal/transport/http/admin/knowledge_space/feedback_handlers.go @@ -2,6 +2,7 @@ package knowledge_space import ( "net/http" + "strconv" "github.com/gin-gonic/gin" "github.com/google/uuid" @@ -57,7 +58,7 @@ func (h *FeedbackHandler) Submit(c *gin.Context) { case err == ksvc.ErrInvalidInput: dto.ResponseError(c, http.StatusBadRequest, "参数不合法", err) case err == ksvc.ErrSpaceNotFound: - dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役", err) + dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役,请迁移至新的知识空间", err) default: dto.ResponseError(c, http.StatusInternalServerError, "提交反馈失败", err) } @@ -76,7 +77,17 @@ func (h *FeedbackHandler) List(c *gin.Context) { dto.ResponseError(c, http.StatusBadRequest, "空间 ID 无效", err) return } - cases, err := h.svc.ListCases(c.Request.Context(), spaceID, 50) + limit := 50 + if raw := c.Query("limit"); raw != "" { + if parsed, err := strconv.Atoi(raw); err == nil { + limit = parsed + } + } + cases, err := h.svc.ListCasesFiltered(c.Request.Context(), spaceID, ksvc.ListFeedbackFilter{ + Status: c.Query("status"), + Severity: c.Query("severity"), + Limit: limit, + }) if err != nil { dto.ResponseError(c, http.StatusInternalServerError, "查询反馈失败", err) return @@ -88,6 +99,179 @@ func (h *FeedbackHandler) List(c *gin.Context) { dto.ResponseSuccess(c, resp) } +func (h *FeedbackHandler) Close(c *gin.Context) { + if h == nil || h.svc == nil { + c.Status(http.StatusNotImplemented) + return + } + spaceID, err := uuid.Parse(c.Param("spaceId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "空间 ID 无效", err) + return + } + caseID, err := uuid.Parse(c.Param("caseId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "案例 ID 无效", err) + return + } + var req feedbackCaseActionRequest + _ = c.ShouldBindJSON(&req) + caseModel, err := h.svc.CloseCase(c.Request.Context(), ksvc.FeedbackCaseUpdateInput{ + SpaceID: spaceID, + CaseID: caseID, + Actor: req.RequestedBy, + Notes: req.ResolutionNotes, + }) + if err != nil { + switch { + case err == ksvc.ErrInvalidInput: + dto.ResponseError(c, http.StatusBadRequest, "参数不合法", err) + case err == ksvc.ErrSpaceNotFound: + dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役,请迁移至新的知识空间", err) + default: + dto.ResponseError(c, http.StatusInternalServerError, "关闭反馈失败", err) + } + return + } + dto.ResponseSuccess(c, toFeedbackResponse(caseModel)) +} + +func (h *FeedbackHandler) Escalate(c *gin.Context) { + if h == nil || h.svc == nil { + c.Status(http.StatusNotImplemented) + return + } + spaceID, err := uuid.Parse(c.Param("spaceId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "空间 ID 无效", err) + return + } + caseID, err := uuid.Parse(c.Param("caseId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "案例 ID 无效", err) + return + } + var req feedbackCaseActionRequest + _ = c.ShouldBindJSON(&req) + caseModel, err := h.svc.EscalateCase(c.Request.Context(), ksvc.FeedbackCaseUpdateInput{ + SpaceID: spaceID, + CaseID: caseID, + Actor: req.RequestedBy, + Notes: req.Reason, + }) + if err != nil { + switch { + case err == ksvc.ErrInvalidInput: + dto.ResponseError(c, http.StatusBadRequest, "参数不合法", err) + case err == ksvc.ErrSpaceNotFound: + dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役,请迁移至新的知识空间", err) + default: + dto.ResponseError(c, http.StatusInternalServerError, "升级反馈失败", err) + } + return + } + dto.ResponseSuccess(c, toFeedbackResponse(caseModel)) +} + +func (h *FeedbackHandler) Reprocess(c *gin.Context) { + if h == nil || h.svc == nil { + c.Status(http.StatusNotImplemented) + return + } + spaceID, err := uuid.Parse(c.Param("spaceId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "空间 ID 无效", err) + return + } + caseID, err := uuid.Parse(c.Param("caseId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "案例 ID 无效", err) + return + } + var req feedbackCaseActionRequest + _ = c.ShouldBindJSON(&req) + caseModel, err := h.svc.ReprocessCase(c.Request.Context(), spaceID, caseID, req.RequestedBy) + if err != nil { + switch { + case err == ksvc.ErrInvalidInput: + dto.ResponseError(c, http.StatusBadRequest, "参数不合法", err) + case err == ksvc.ErrSpaceNotFound: + dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役,请迁移至新的知识空间", err) + default: + dto.ResponseError(c, http.StatusInternalServerError, "触发再加工失败", err) + } + return + } + dto.ResponseSuccessWithStatus(c, http.StatusAccepted, toFeedbackResponse(caseModel)) +} + +func (h *FeedbackHandler) Rollback(c *gin.Context) { + if h == nil || h.svc == nil { + c.Status(http.StatusNotImplemented) + return + } + spaceID, err := uuid.Parse(c.Param("spaceId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "空间 ID 无效", err) + return + } + caseID, err := uuid.Parse(c.Param("caseId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "案例 ID 无效", err) + return + } + var req feedbackCaseActionRequest + _ = c.ShouldBindJSON(&req) + caseModel, err := h.svc.RollbackCase(c.Request.Context(), spaceID, caseID, req.RequestedBy, req.Reason) + if err != nil { + switch { + case err == ksvc.ErrInvalidInput: + dto.ResponseError(c, http.StatusBadRequest, "参数不合法", err) + case err == ksvc.ErrSpaceNotFound: + dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役,请迁移至新的知识空间", err) + default: + dto.ResponseError(c, http.StatusInternalServerError, "回滚失败", err) + } + return + } + dto.ResponseSuccess(c, toFeedbackResponse(caseModel)) +} + +func (h *FeedbackHandler) Export(c *gin.Context) { + if h == nil || h.svc == nil { + c.Status(http.StatusNotImplemented) + return + } + spaceID, err := uuid.Parse(c.Param("spaceId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "空间 ID 无效", err) + return + } + limit := 200 + if raw := c.Query("limit"); raw != "" { + if parsed, err := strconv.Atoi(raw); err == nil { + limit = parsed + } + } + export, err := h.svc.ExportCases(c.Request.Context(), spaceID, ksvc.ListFeedbackFilter{ + Status: c.Query("status"), + Severity: c.Query("severity"), + Limit: limit, + }) + if err != nil { + switch { + case err == ksvc.ErrInvalidInput: + dto.ResponseError(c, http.StatusBadRequest, "参数不合法", err) + case err == ksvc.ErrSpaceNotFound: + dto.ResponseError(c, http.StatusGone, "知识空间已删除或退役,请迁移至新的知识空间", err) + default: + dto.ResponseError(c, http.StatusInternalServerError, "导出反馈失败", err) + } + return + } + dto.ResponseSuccess(c, export) +} + func parseChunkIDs(ids []string) ([]uuid.UUID, error) { out := make([]uuid.UUID, 0, len(ids)) for _, id := range ids { diff --git a/backend/internal/transport/http/admin/knowledge_space/handlers.go b/backend/internal/transport/http/admin/knowledge_space/handlers.go index 5d23936c..b5d46ba0 100644 --- a/backend/internal/transport/http/admin/knowledge_space/handlers.go +++ b/backend/internal/transport/http/admin/knowledge_space/handlers.go @@ -1,16 +1,23 @@ package knowledge_space import ( + "crypto/sha256" + "encoding/hex" "errors" "net/http" "strconv" "strings" + "time" "github.com/gin-gonic/gin" "github.com/google/uuid" + "gorm.io/datatypes" "github.com/ArtisanCloud/PowerX/internal/app/shared" ksvc "github.com/ArtisanCloud/PowerX/internal/service/knowledge_space" + models "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/model/knowledge" + coreRepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository" + knowledgeRepo "github.com/ArtisanCloud/PowerX/pkg/corex/db/persistence/repository/knowledge" "github.com/ArtisanCloud/PowerX/pkg/corex/iam/reqctx" "github.com/ArtisanCloud/PowerX/pkg/dto" ) @@ -20,7 +27,14 @@ func RegisterAPIRoutes(public, protected *gin.RouterGroup, deps *shared.Deps) { if protected == nil || deps == nil || deps.KnowledgeSpace == nil || deps.KnowledgeSpace.Service == nil { return } - handler := &Handler{svc: deps.KnowledgeSpace.Service} + handler := &Handler{ + svc: deps.KnowledgeSpace.Service, + spaces: knowledgeRepo.NewKnowledgeSpaceRepository(deps.DB), + policies: knowledgeRepo.NewPolicyTemplateRepository(deps.DB), + } + profileHandler := newProfileHandler(deps.DB) + corpusCheckHandler := newCorpusCheckHandler(deps.KnowledgeSpace.CorpusCheck) + playgroundHandler := newPlaygroundHandler(deps.DB, deps.KnowledgeSpace.VectorStore) ingestionHandler := NewIngestionHandler(deps) fusionHandler := NewFusionHandler(deps) feedbackHandler := NewFeedbackHandler(deps) @@ -28,12 +42,38 @@ func RegisterAPIRoutes(public, protected *gin.RouterGroup, deps *shared.Deps) { eventHandler := NewEventHandler(deps) decayHandler := NewDecayHandler(deps) releaseHandler := NewReleaseHandler(deps) + strategyHandler := NewStrategyHandler(deps) + sourceHandler := NewSourceHandler(deps) + vectorIndexHandler := NewVectorIndexHandler(deps) group := protected.Group("/admin/knowledge-spaces") { + group.GET("", handler.list) + group.GET("/:spaceId", handler.get) group.POST("", handler.create) group.PATCH("/:spaceId", handler.update) group.POST("/:spaceId/retire", handler.retire) + if strategyHandler != nil { + group.POST("/strategy/validate", strategyHandler.Validate) + group.GET("/:spaceId/strategy/validate", strategyHandler.ValidateForSpace) + } + if vectorIndexHandler != nil { + group.GET("/:spaceId/vector-index", vectorIndexHandler.GetStatus) + group.POST("/:spaceId/vector-index/activate", vectorIndexHandler.Activate) + } + if corpusCheckHandler != nil { + group.POST("/:spaceId/corpus-check/jobs", corpusCheckHandler.Start) + group.GET("/:spaceId/corpus-check/jobs/:jobId", corpusCheckHandler.Get) + } + if playgroundHandler != nil { + group.POST("/:spaceId/playground/retrieval", playgroundHandler.Retrieve) + } if ingestionHandler != nil { + group.GET("/:spaceId/ingestion-jobs", ingestionHandler.List) + group.GET("/:spaceId/ingestion-jobs/:jobId", ingestionHandler.Get) + group.DELETE("/:spaceId/ingestion-jobs/:jobId", ingestionHandler.DeleteJob) + group.GET("/:spaceId/ingestion-jobs/:jobId/chunks", ingestionHandler.Chunks) + group.GET("/:spaceId/ingestion-jobs/:jobId/pages/:pageNumber/image", ingestionHandler.PageImage) + group.PATCH("/:spaceId/ingestion-jobs/:jobId/chunks/:chunkId", ingestionHandler.UpdateChunk) group.POST("/:spaceId/ingestion-jobs", ingestionHandler.Trigger) } if fusionHandler != nil { @@ -44,6 +84,34 @@ func RegisterAPIRoutes(public, protected *gin.RouterGroup, deps *shared.Deps) { if feedbackHandler != nil { group.GET("/:spaceId/feedback", feedbackHandler.List) group.POST("/:spaceId/feedback", feedbackHandler.Submit) + group.POST("/:spaceId/feedback/:caseId/close", feedbackHandler.Close) + group.POST("/:spaceId/feedback/:caseId/escalate", feedbackHandler.Escalate) + group.POST("/:spaceId/feedback/:caseId/reprocess", feedbackHandler.Reprocess) + group.POST("/:spaceId/feedback/:caseId/rollback", feedbackHandler.Rollback) + group.GET("/:spaceId/feedback/export", feedbackHandler.Export) + } + if sourceHandler != nil { + group.GET("/:spaceId/sources", sourceHandler.ListSpaceSources) + group.POST("/:spaceId/sources/sync-jobs", sourceHandler.CreateSyncJob) + group.POST("/:spaceId/sources/sync-jobs/:jobId/pause", sourceHandler.PauseSyncJob) + group.POST("/:spaceId/sources/sync-jobs/:jobId/run", sourceHandler.RunSyncJob) + group.GET("/:spaceId/sources/sync-jobs/:jobId", sourceHandler.GetSyncJob) + } + } + if profileHandler != nil { + profileGroup := protected.Group("/admin/knowledge/profiles") + profileHandler.routes(profileGroup) + } + if sourceHandler != nil { + sourceGroup := protected.Group("/admin/knowledge-sources") + { + sourceGroup.GET("/credentials", sourceHandler.ListCredentials) + sourceGroup.POST("/credentials", sourceHandler.CreateCredential) + sourceGroup.GET("/connectors", sourceHandler.ListConnectors) + sourceGroup.POST("/connectors", sourceHandler.CreateConnector) + sourceGroup.GET("/connectors/:connectorId", sourceHandler.GetConnector) + sourceGroup.PATCH("/connectors/:connectorId", sourceHandler.UpdateConnector) + sourceGroup.POST("/connectors/:connectorId/pause", sourceHandler.PauseConnector) } } if deltaHandler != nil { @@ -75,6 +143,8 @@ func RegisterAPIRoutes(public, protected *gin.RouterGroup, deps *shared.Deps) { if releaseHandler != nil { releaseGroup := protected.Group("/knowledge/release") { + releaseGroup.GET("/policies", releaseHandler.ListPolicies) + releaseGroup.GET("/status", releaseHandler.Status) releaseGroup.POST("/policies", releaseHandler.UpsertPolicy) releaseGroup.POST("/publish", releaseHandler.Publish) releaseGroup.POST("/promote", releaseHandler.Promote) @@ -85,7 +155,66 @@ func RegisterAPIRoutes(public, protected *gin.RouterGroup, deps *shared.Deps) { // Handler exposes provisioning handlers. type Handler struct { - svc *ksvc.Service + svc *ksvc.Service + spaces *knowledgeRepo.KnowledgeSpaceRepository + policies *knowledgeRepo.PolicyTemplateRepository +} + +func (h *Handler) list(c *gin.Context) { + tenantUUID, ok := tenantUUIDFromContext(c) + if !ok { + return + } + limit := 20 + if raw := strings.TrimSpace(c.Query("limit")); raw != "" { + if n, err := strconv.Atoi(raw); err == nil { + limit = n + } + } + status := strings.TrimSpace(c.Query("status")) + + if h.spaces == nil { + dto.ResponseError(c, http.StatusInternalServerError, "服务异常", errors.New("spaces repository unavailable")) + return + } + items, err := h.spaces.ListByTenant(c.Request.Context(), tenantUUID.String(), status, limit) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "服务异常", err) + return + } + + out := make([]knowledgeSpaceResponse, 0, len(items)) + for i := range items { + out = append(out, toResponse(&items[i])) + } + dto.ResponseSuccess(c, out) +} + +func (h *Handler) get(c *gin.Context) { + tenantUUID, ok := tenantUUIDFromContext(c) + if !ok { + return + } + spaceID, err := uuid.Parse(c.Param("spaceId")) + if err != nil { + dto.ResponseError(c, http.StatusBadRequest, "无效的空间 ID", err) + return + } + + if h.spaces == nil { + dto.ResponseError(c, http.StatusInternalServerError, "服务异常", errors.New("spaces repository unavailable")) + return + } + space, err := h.spaces.FindByUUID(c.Request.Context(), spaceID) + if err != nil { + dto.ResponseError(c, http.StatusInternalServerError, "服务异常", err) + return + } + if space == nil || strings.ToLower(strings.TrimSpace(space.TenantUUID)) != strings.ToLower(strings.TrimSpace(tenantUUID.String())) { + dto.ResponseError(c, http.StatusNotFound, "知识空间不存在", errors.New("not found")) + return + } + dto.ResponseSuccess(c, toResponse(space)) } func (h *Handler) create(c *gin.Context) { @@ -98,21 +227,24 @@ func (h *Handler) create(c *gin.Context) { if !ok { return } - policyID, err := strconv.ParseUint(strings.TrimSpace(req.PolicyTemplateVersionID), 10, 64) + policyID, err := h.resolvePolicyTemplateVersionID(c, req.PolicyTemplateVersionID) if err != nil { dto.ResponseError(c, http.StatusBadRequest, "无效的策略模版 ID", err) return } flags := ksvc.EncodeConcurrencyFlag(req.FeatureFlags, req.Quotas.IngestionConcurrency) space, err := h.svc.CreateSpace(c.Request.Context(), ksvc.CreateSpaceInput{ - TenantUUID: tenantUUID.String(), - SpaceName: req.SpaceName, - DepartmentCode: req.DepartmentCode, - QuotaCPU: req.Quotas.CPUCores, - QuotaStorageGB: req.Quotas.StorageGB, - PolicyVersion: policyID, - FeatureFlags: flags, - RequestedBy: req.RequestedBy, + TenantUUID: tenantUUID.String(), + SpaceName: req.SpaceName, + DepartmentCode: req.DepartmentCode, + QuotaCPU: req.Quotas.CPUCores, + QuotaStorageGB: req.Quotas.StorageGB, + PolicyVersion: policyID, + IngestionProfileKey: strings.TrimSpace(req.IngestionProfileKey), + IndexProfileKey: strings.TrimSpace(req.IndexProfileKey), + RAGProfileKey: strings.TrimSpace(req.RAGProfileKey), + FeatureFlags: flags, + RequestedBy: req.RequestedBy, }) if err != nil { h.handleError(c, err) @@ -134,14 +266,16 @@ func (h *Handler) update(c *gin.Context) { } var policy uint64 if strings.TrimSpace(req.PolicyTemplateVersionID) != "" { - if policy, err = strconv.ParseUint(strings.TrimSpace(req.PolicyTemplateVersionID), 10, 64); err != nil { + if policy, err = h.resolvePolicyTemplateVersionID(c, req.PolicyTemplateVersionID); err != nil { dto.ResponseError(c, http.StatusBadRequest, "无效的策略模版 ID", err) return } } var flags []string - if req.Quotas != nil || len(req.FeatureFlags) > 0 { + if req.Quotas != nil { flags = ksvc.EncodeConcurrencyFlag(req.FeatureFlags, targetConcurrency(req.Quotas)) + } else if len(req.FeatureFlags) > 0 { + flags = req.FeatureFlags } in := ksvc.UpdateSpaceInput{ SpaceID: spaceID, @@ -177,6 +311,7 @@ func (h *Handler) retire(c *gin.Context) { SpaceID: spaceID, Reason: req.Reason, RequestedBy: req.RequestedBy, + DropVectors: req.DropVectors, }) if err != nil { h.handleError(c, err) @@ -185,7 +320,104 @@ func (h *Handler) retire(c *gin.Context) { dto.ResponseSuccessWithStatus(c, http.StatusAccepted, toResponse(space)) } +func (h *Handler) resolvePolicyTemplateVersionID(c *gin.Context, raw string) (uint64, error) { + raw = strings.TrimSpace(raw) + if raw == "" { + return 0, errors.New("missing policy template version id") + } + + // 1) 优先支持数值 ID(兼容现有 handler 行为) + if id, err := strconv.ParseUint(raw, 10, 64); err == nil && id > 0 { + return id, nil + } + + // 2) 兼容 "default-v1" / "default/v1" + if h.policies == nil { + return 0, errors.New("policy repository unavailable") + } + + sepIdx := strings.LastIndex(raw, "-") + sep := "-" + if strings.Contains(raw, "/") { + sepIdx = strings.LastIndex(raw, "/") + sep = "/" + } + if sepIdx <= 0 || sepIdx >= len(raw)-1 { + return 0, errors.New("policy template version id must be numeric or in '