Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 87 additions & 30 deletions cmd/testdata/stores.yaml
Original file line number Diff line number Diff line change
@@ -1,35 +1,92 @@
stores:
- name: git
kind:
name: atest-store-git
enabled: true
url: xxx
readonly: false
disabled: false
- name: ai
kind:
name: atest-ext-ai
enabled: true
url: ""
readonly: false
disabled: false
properties:
- name: git
kind:
name: atest-store-git
dependencies: []
url: "unix:///tmp/atest-store-git.sock"
params: []
link: ""
enabled: true
categories: []
description: ""
url: xxx
username: ""
password: ""
readonly: false
disabled: false
properties: {}
- name: ai
kind:
name: atest-ext-ai
dependencies: [] # 无依赖
url: "unix:///tmp/atest-ext-ai.sock"
params:
- key: "provider"
description: "AI provider (local, openai, claude)"
defaultValue: "local"
- key: "model"
description: "AI model name"
defaultValue: "codellama"
description: "AI provider (ollama, openai, deepseek)"
defaultValue: "ollama"
- key: "endpoint"
description: "AI service endpoint"
description: "AI service endpoint URL"
defaultValue: "http://localhost:11434"
plugins:
- name: atest-store-git
url: unix:///tmp/atest-store-git.sock
enabled: true
- name: atest-ext-ai
url: unix:///tmp/atest-ext-ai.sock
- key: "api_key"
description: "API key for OpenAI/Deepseek providers"
defaultValue: ""
- key: "model"
description: "AI model name (auto-discovered for ollama)"
defaultValue: ""
- key: "max_tokens"
description: "Maximum tokens for AI generation"
defaultValue: "4096"
- key: "temperature"
description: "Generation temperature (0.0-2.0)"
defaultValue: "0.7"
- key: "timeout"
description: "Request timeout duration"
defaultValue: "30s"
link: "https://github.com/LinuxSuRen/atest-ext-ai"
enabled: true
description: "AI Extension Plugin for intelligent SQL generation and execution"
version: "latest"
registry: "ghcr.io/linuxsuren/atest-ext-ai"
categories: ["ai", "sql-generation"]
description: "AI Extension Plugin for natural language to SQL conversion"
url: "unix:///tmp/atest-ext-ai.sock"
username: ""
password: ""
readonly: false
disabled: false
properties:
provider: "ollama"
endpoint: "http://localhost:11434"
api_key: ""
model: ""
max_tokens: "4096"
temperature: "0.7"
timeout: "30s"

plugins:
- name: atest-store-git
dependencies: []
url: "unix:///tmp/atest-store-git.sock"
params: []
link: ""
enabled: true
categories: []
- name: atest-ext-ai
dependencies: []
url: "unix:///tmp/atest-ext-ai.sock"
params:
- key: "provider"
description: "AI provider (ollama, openai, deepseek)"
defaultValue: "ollama"
- key: "endpoint"
description: "AI service endpoint"
defaultValue: "http://localhost:11434"
- key: "api_key"
description: "API key for external AI services"
defaultValue: ""
- key: "model"
description: "AI model name (auto-discovered for ollama)"
defaultValue: ""
link: "https://github.com/LinuxSuRen/atest-ext-ai"
enabled: true
categories: ["ai", "sql-generation"]
description: "AI Extension Plugin for natural language to SQL conversion"
version: "v0.1.0"
registry: "ghcr.io/linuxsuren/atest-ext-ai"
2 changes: 1 addition & 1 deletion console/atest-ui/src/App.vue
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ API.GetMenus((menus) => {
<WelcomePage v-else-if="panelName === 'welcome' || panelName === ''" />

<span v-for="menu in extensionMenus" :key="menu.index" :index="menu.index">
<Extension v-if="panelName === menu.index" :name="menu.name" />
<Extension v-if="panelName === menu.index" :name="menu.index" />
</span>
</el-main>

Expand Down
46 changes: 34 additions & 12 deletions console/atest-ui/src/views/Extension.vue
Original file line number Diff line number Diff line change
Expand Up @@ -9,33 +9,55 @@ const props = defineProps<Props>()
const loading = ref(true)
const loadPlugin = async (): Promise<void> => {
try {
// First load CSS
API.GetPageOfCSS(props.name, (d) => {
const style = document.createElement('style');
style.textContent = d.message;
document.head.appendChild(style);
});

// Then load JS and mount plugin
API.GetPageOfJS(props.name, (d) => {
const script = document.createElement('script');
script.type = 'text/javascript';
script.textContent = d.message;
document.head.appendChild(script);

const plugin = window.ATestPlugin;

if (plugin && plugin.mount) {
console.log('extension load success');
const container = document.getElementById("plugin-container");
if (container) {
container.innerHTML = ''; // Clear previous content
plugin.mount(container);
// Implement retry mechanism with exponential backoff
const checkPluginLoad = (retries = 0, maxRetries = 10) => {
const plugin = (window as any).ATestPlugin;

console.log(`Plugin load attempt ${retries + 1}/${maxRetries + 1}`);

if (plugin && plugin.mount) {
console.log('extension load success');
const container = document.getElementById("plugin-container");
if (container) {
container.innerHTML = ''; // Clear previous content
plugin.mount(container);
loading.value = false;
} else {
console.error('Plugin container not found');
loading.value = false;
}
} else if (retries < maxRetries) {
// Incremental retry mechanism: 50ms, 100ms, 150ms...
const delay = 50 + retries * 50;
console.log(`ATestPlugin not ready, retrying in ${delay}ms (attempt ${retries + 1}/${maxRetries + 1})`);
setTimeout(() => checkPluginLoad(retries + 1, maxRetries), delay);
} else {
console.error('ATestPlugin not found or missing mount method after max retries');
console.error('Window.ATestPlugin value:', (window as any).ATestPlugin);
loading.value = false;
}
}
};

// Start the retry mechanism
checkPluginLoad();
});
} catch (error) {
console.log(`extension load error: ${(error as Error).message}`)
} finally {
console.log('extension load finally');
console.log(`extension load error: ${(error as Error).message}`);
loading.value = false; // Set loading to false on error
}
};
try {
Expand Down
2 changes: 1 addition & 1 deletion console/atest-ui/vite.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ export default defineConfig({
vue({
template: {
compilerOptions: {
nodeTransforms: true ? [removeDataTestAttrs] : [],
nodeTransforms: process.env.NODE_ENV === 'production' ? [removeDataTestAttrs] : [],
},
},
}),
Expand Down
70 changes: 70 additions & 0 deletions pkg/testing/remote/grpc_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@ package remote

import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"time"

"github.com/linuxsuren/api-testing/pkg/logging"
Expand Down Expand Up @@ -316,6 +318,12 @@ func (g *gRPCLoader) PProf(name string) []byte {
}

func (g *gRPCLoader) Query(query map[string]string) (result testing.DataResult, err error) {
// Detect AI method calls
if method := query["method"]; strings.HasPrefix(method, "ai.") {
return g.handleAIQuery(query)
}

// Original standard query logic
var dataResult *server.DataQueryResult
offset, _ := strconv.ParseInt(query["offset"], 10, 64)
limit, _ := strconv.ParseInt(query["limit"], 10, 64)
Expand Down Expand Up @@ -444,3 +452,65 @@ func (g *gRPCLoader) Close() {
g.conn.Close()
}
}

// handleAIQuery handles AI-specific queries
func (g *gRPCLoader) handleAIQuery(query map[string]string) (testing.DataResult, error) {
method := query["method"]

var dataQuery *server.DataQuery
switch method {
case "ai.generate":
dataQuery = &server.DataQuery{
Type: "ai",
Key: "generate",
Sql: g.encodeAIGenerateParams(query),
}
case "ai.capabilities":
dataQuery = &server.DataQuery{
Type: "ai",
Key: "capabilities",
Sql: "", // No additional parameters needed
}
default:
return testing.DataResult{}, fmt.Errorf("unsupported AI method: %s", method)
}

// Call existing gRPC Query
dataResult, err := g.client.Query(g.ctx, dataQuery)
if err != nil {
return testing.DataResult{}, err
}

// Convert response to testing.DataResult format
return g.convertAIResponse(dataResult), nil
}

// encodeAIGenerateParams encodes AI generation parameters into SQL field
func (g *gRPCLoader) encodeAIGenerateParams(query map[string]string) string {
params := map[string]string{
"model": query["model"],
"prompt": query["prompt"],
"config": query["config"],
}
data, _ := json.Marshal(params)
return string(data)
}

// convertAIResponse converts AI response to standard format
func (g *gRPCLoader) convertAIResponse(dataResult *server.DataQueryResult) testing.DataResult {
result := testing.DataResult{
Pairs: pairToMap(dataResult.Data),
}

// Map AI-specific response fields
if content := result.Pairs["generated_sql"]; content != "" {
result.Pairs["content"] = content // Follow AI interface standard
}
if result.Pairs["error"] != "" {
result.Pairs["success"] = "false"
} else {
result.Pairs["success"] = "true"
}

return result
}
Loading