Skip to content

Commit 037bab8

Browse files
committed
refractor: 拆分 command_system 的测试
1 parent d2125d7 commit 037bab8

File tree

2 files changed

+148
-195
lines changed

2 files changed

+148
-195
lines changed

test/plugin_system/command_system/test_advanced_parser.py renamed to test/plugin_system/command_system/test_parser_advanced.py

Lines changed: 3 additions & 195 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,4 @@
1-
"""高级命令解析器测试
2-
3-
测试 AdvancedCommandParser 的功能,包括:
4-
- 选项解析(布尔状态)
5-
- 命名参数解析(键值对)
6-
- Element 解析(未解析内容)
7-
- 位置保持
8-
- 与 StringTokenizer 的组合使用
9-
"""
1+
"""高级命令解析器高级和集成测试"""
102

113
from ncatbot.core.service.builtin.unified_registry.command_system.lexer import (
124
StringTokenizer,
@@ -16,143 +8,8 @@
168
)
179

1810

19-
class TestAdvancedCommandParser:
20-
"""高级命令解析器测试类"""
21-
22-
def test_options_parsing(self):
23-
"""测试选项解析"""
24-
parser = AdvancedCommandParser()
25-
26-
# 短选项
27-
tokenizer = StringTokenizer("-v")
28-
tokens = tokenizer.tokenize()
29-
result = parser.parse(tokens)
30-
31-
assert result.options == {"v": True}
32-
assert result.named_params == {}
33-
assert result.elements == []
34-
35-
# 组合短选项
36-
tokenizer = StringTokenizer("-xvf")
37-
tokens = tokenizer.tokenize()
38-
result = parser.parse(tokens)
39-
40-
assert result.options == {"x": True, "v": True, "f": True}
41-
assert result.named_params == {}
42-
assert result.elements == []
43-
44-
# 长选项
45-
tokenizer = StringTokenizer("--verbose --debug")
46-
tokens = tokenizer.tokenize()
47-
result = parser.parse(tokens)
48-
49-
assert result.options == {"verbose": True, "debug": True}
50-
assert result.named_params == {}
51-
assert result.elements == []
52-
53-
# 混合选项
54-
tokenizer = StringTokenizer("-v --debug -xf --help")
55-
tokens = tokenizer.tokenize()
56-
result = parser.parse(tokens)
57-
58-
expected_options = {
59-
"v": True,
60-
"debug": True,
61-
"x": True,
62-
"f": True,
63-
"help": True,
64-
}
65-
assert result.options == expected_options
66-
assert result.named_params == {}
67-
assert result.elements == []
68-
69-
def test_named_params_parsing(self):
70-
"""测试命名参数解析"""
71-
parser = AdvancedCommandParser()
72-
73-
# 短选项赋值
74-
tokenizer = StringTokenizer("-p=8080")
75-
tokens = tokenizer.tokenize()
76-
result = parser.parse(tokens)
77-
78-
assert result.options == {}
79-
assert result.named_params == {"p": "8080"}
80-
assert result.elements == []
81-
82-
# 长选项赋值
83-
tokenizer = StringTokenizer("--port=8080 --host=localhost")
84-
tokens = tokenizer.tokenize()
85-
result = parser.parse(tokens)
86-
87-
assert result.options == {}
88-
assert result.named_params == {"port": "8080", "host": "localhost"}
89-
assert result.elements == []
90-
91-
# 引用字符串赋值
92-
tokenizer = StringTokenizer('--message="hello world" -c="gzip"')
93-
tokens = tokenizer.tokenize()
94-
result = parser.parse(tokens)
95-
96-
assert result.options == {}
97-
assert result.named_params == {"message": "hello world", "c": "gzip"}
98-
assert result.elements == []
99-
100-
# 复杂值
101-
tokenizer = StringTokenizer(
102-
'--env="NODE_ENV=production" --config="/path/to/config.json"'
103-
)
104-
tokens = tokenizer.tokenize()
105-
result = parser.parse(tokens)
106-
107-
expected_params = {
108-
"env": "NODE_ENV=production",
109-
"config": "/path/to/config.json",
110-
}
111-
assert result.named_params == expected_params
112-
113-
def test_elements_parsing(self):
114-
"""测试 Element 解析"""
115-
parser = AdvancedCommandParser()
116-
117-
# 基本元素
118-
tokenizer = StringTokenizer('backup "my files" document.txt')
119-
tokens = tokenizer.tokenize()
120-
result = parser.parse(tokens)
121-
122-
assert result.options == {}
123-
assert result.named_params == {}
124-
assert len(result.elements) == 3
125-
126-
# 检查元素内容和位置
127-
assert result.elements[0].type == "text"
128-
assert result.elements[0].content == "backup"
129-
assert result.elements[0].position == 0
130-
131-
assert result.elements[1].type == "text"
132-
assert result.elements[1].content == "my files"
133-
assert result.elements[1].position == 1
134-
135-
assert result.elements[2].type == "text"
136-
assert result.elements[2].content == "document.txt"
137-
assert result.elements[2].position == 2
138-
139-
# 只有引用字符串
140-
tokenizer = StringTokenizer('"first string" "second string"')
141-
tokens = tokenizer.tokenize()
142-
result = parser.parse(tokens)
143-
144-
assert len(result.elements) == 2
145-
assert result.elements[0].content == "first string"
146-
assert result.elements[1].content == "second string"
147-
148-
# 混合元素类型
149-
tokenizer = StringTokenizer('command "quoted arg" normal_arg')
150-
tokens = tokenizer.tokenize()
151-
result = parser.parse(tokens)
152-
153-
assert len(result.elements) == 3
154-
contents = [e.content for e in result.elements]
155-
assert contents == ["command", "quoted arg", "normal_arg"]
11+
class TestAdvancedCommandParserAdvanced:
12+
"""高级功能测试"""
15613

15714
def test_mixed_parsing(self):
15815
"""测试混合解析"""
@@ -385,52 +242,3 @@ def test_integration_with_string_tokenizer():
385242
assert actual_elements == case["expected_elements"], (
386243
f"Elements mismatch for: {case['input']}"
387244
)
388-
389-
390-
if __name__ == "__main__":
391-
print("运行高级命令解析器测试...")
392-
393-
# 创建测试实例
394-
test_instance = TestAdvancedCommandParser()
395-
396-
# 运行所有测试方法
397-
test_methods = [
398-
("选项解析", test_instance.test_options_parsing),
399-
("命名参数解析", test_instance.test_named_params_parsing),
400-
("Element 解析", test_instance.test_elements_parsing),
401-
("混合解析", test_instance.test_mixed_parsing),
402-
("位置保持", test_instance.test_position_preservation),
403-
("复杂场景", test_instance.test_complex_scenarios),
404-
("边界情况", test_instance.test_edge_cases),
405-
("原始 Token 保存", test_instance.test_raw_tokens_preservation),
406-
("错误恢复", test_instance.test_error_resilience),
407-
]
408-
409-
for test_name, test_method in test_methods:
410-
try:
411-
test_method()
412-
print(f"✓ {test_name}测试通过")
413-
except Exception as e:
414-
print(f"✗ {test_name}测试失败: {e}")
415-
416-
# 集成测试
417-
try:
418-
test_integration_with_string_tokenizer()
419-
print("✓ 与字符串分词器集成测试通过")
420-
except Exception as e:
421-
print(f"✗ 集成测试失败: {e}")
422-
423-
print("\n高级命令解析器所有测试完成!✨")
424-
425-
# 演示用法
426-
print("\n演示完整用法:")
427-
command = 'backup "important files" --dest=/backup -xvf --compress=gzip --verbose'
428-
tokenizer = StringTokenizer(command)
429-
tokens = tokenizer.tokenize()
430-
parser = AdvancedCommandParser()
431-
result = parser.parse(tokens)
432-
433-
print(f"输入: {command}")
434-
print(f"选项: {result.options}")
435-
print(f"命名参数: {result.named_params}")
436-
print(f"元素: {[e.content for e in result.elements]}")
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
"""高级命令解析器基础测试"""
2+
3+
from ncatbot.core.service.builtin.unified_registry.command_system.lexer import (
4+
StringTokenizer,
5+
AdvancedCommandParser,
6+
)
7+
8+
9+
class TestAdvancedCommandParserBasics:
10+
"""基础功能测试"""
11+
12+
def test_options_parsing(self):
13+
"""测试选项解析"""
14+
parser = AdvancedCommandParser()
15+
16+
# 短选项
17+
tokenizer = StringTokenizer("-v")
18+
tokens = tokenizer.tokenize()
19+
result = parser.parse(tokens)
20+
21+
assert result.options == {"v": True}
22+
assert result.named_params == {}
23+
assert result.elements == []
24+
25+
# 组合短选项
26+
tokenizer = StringTokenizer("-xvf")
27+
tokens = tokenizer.tokenize()
28+
result = parser.parse(tokens)
29+
30+
assert result.options == {"x": True, "v": True, "f": True}
31+
assert result.named_params == {}
32+
assert result.elements == []
33+
34+
# 长选项
35+
tokenizer = StringTokenizer("--verbose --debug")
36+
tokens = tokenizer.tokenize()
37+
result = parser.parse(tokens)
38+
39+
assert result.options == {"verbose": True, "debug": True}
40+
assert result.named_params == {}
41+
assert result.elements == []
42+
43+
# 混合选项
44+
tokenizer = StringTokenizer("-v --debug -xf --help")
45+
tokens = tokenizer.tokenize()
46+
result = parser.parse(tokens)
47+
48+
expected_options = {
49+
"v": True,
50+
"debug": True,
51+
"x": True,
52+
"f": True,
53+
"help": True,
54+
}
55+
assert result.options == expected_options
56+
assert result.named_params == {}
57+
assert result.elements == []
58+
59+
def test_named_params_parsing(self):
60+
"""测试命名参数解析"""
61+
parser = AdvancedCommandParser()
62+
63+
# 短选项赋值
64+
tokenizer = StringTokenizer("-p=8080")
65+
tokens = tokenizer.tokenize()
66+
result = parser.parse(tokens)
67+
68+
assert result.options == {}
69+
assert result.named_params == {"p": "8080"}
70+
assert result.elements == []
71+
72+
# 长选项赋值
73+
tokenizer = StringTokenizer("--port=8080 --host=localhost")
74+
tokens = tokenizer.tokenize()
75+
result = parser.parse(tokens)
76+
77+
assert result.options == {}
78+
assert result.named_params == {"port": "8080", "host": "localhost"}
79+
assert result.elements == []
80+
81+
# 引用字符串赋值
82+
tokenizer = StringTokenizer('--message="hello world" -c="gzip"')
83+
tokens = tokenizer.tokenize()
84+
result = parser.parse(tokens)
85+
86+
assert result.options == {}
87+
assert result.named_params == {"message": "hello world", "c": "gzip"}
88+
assert result.elements == []
89+
90+
# 复杂值
91+
tokenizer = StringTokenizer(
92+
'--env="NODE_ENV=production" --config="/path/to/config.json"'
93+
)
94+
tokens = tokenizer.tokenize()
95+
result = parser.parse(tokens)
96+
97+
expected_params = {
98+
"env": "NODE_ENV=production",
99+
"config": "/path/to/config.json",
100+
}
101+
assert result.named_params == expected_params
102+
103+
def test_elements_parsing(self):
104+
"""测试 Element 解析"""
105+
parser = AdvancedCommandParser()
106+
107+
# 基本元素
108+
tokenizer = StringTokenizer('backup "my files" document.txt')
109+
tokens = tokenizer.tokenize()
110+
result = parser.parse(tokens)
111+
112+
assert result.options == {}
113+
assert result.named_params == {}
114+
assert len(result.elements) == 3
115+
116+
# 检查元素内容和位置
117+
assert result.elements[0].type == "text"
118+
assert result.elements[0].content == "backup"
119+
assert result.elements[0].position == 0
120+
121+
assert result.elements[1].type == "text"
122+
assert result.elements[1].content == "my files"
123+
assert result.elements[1].position == 1
124+
125+
assert result.elements[2].type == "text"
126+
assert result.elements[2].content == "document.txt"
127+
assert result.elements[2].position == 2
128+
129+
# 只有引用字符串
130+
tokenizer = StringTokenizer('"first string" "second string"')
131+
tokens = tokenizer.tokenize()
132+
result = parser.parse(tokens)
133+
134+
assert len(result.elements) == 2
135+
assert result.elements[0].content == "first string"
136+
assert result.elements[1].content == "second string"
137+
138+
# 混合元素类型
139+
tokenizer = StringTokenizer('command "quoted arg" normal_arg')
140+
tokens = tokenizer.tokenize()
141+
result = parser.parse(tokens)
142+
143+
assert len(result.elements) == 3
144+
contents = [e.content for e in result.elements]
145+
assert contents == ["command", "quoted arg", "normal_arg"]

0 commit comments

Comments
 (0)