Skip to content

Commit 19f8941

Browse files
cameronrsudo-tee
authored andcommitted
refactor(api)!: new format for run/run_new args
As discussed in #84, the previous cmd refactor had broken argument handling for run/run_new. The old format had an ambiguity between prompt and context args. This change is breaking because we no longer support the old format. The new format is: :OpencodeRun [agent=<name>] [model=<model>] [context=<overrides>] <prompt> :OpencodeRunNewSession [agent=<name>] [model=<model>] [context=<overrides>] <prompt>
1 parent ccbc79c commit 19f8941

File tree

4 files changed

+176
-4
lines changed

4 files changed

+176
-4
lines changed

lua/opencode/api.lua

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -958,24 +958,24 @@ M.commands = {
958958
run = {
959959
desc = 'Run prompt in current session',
960960
fn = function(args)
961-
local prompt = table.concat(args, ' ')
961+
local opts, prompt = util.parse_run_args(args)
962962
if prompt == '' then
963963
vim.notify('Prompt required', vim.log.levels.ERROR)
964964
return
965965
end
966-
M.run(prompt)
966+
M.run(prompt, opts)
967967
end,
968968
},
969969

970970
run_new = {
971971
desc = 'Run prompt in new session',
972972
fn = function(args)
973-
local prompt = table.concat(args, ' ')
973+
local opts, prompt = util.parse_run_args(args)
974974
if prompt == '' then
975975
vim.notify('Prompt required', vim.log.levels.ERROR)
976976
return
977977
end
978-
M.run_new_session(prompt)
978+
M.run_new_session(prompt, opts)
979979
end,
980980
},
981981

lua/opencode/util.lua

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -420,4 +420,38 @@ function M.strdisplaywidth(str)
420420
return vim.fn.strdisplaywidth(str)
421421
end
422422

423+
--- Parse run command arguments with optional agent, model, and context prefixes.
424+
--- Returns opts table and remaining prompt string.
425+
--- Format: [agent=<name>] [model=<model>] [context=<key=value,...>] <prompt>
426+
--- @param args string[]
427+
--- @return table opts, string prompt
428+
function M.parse_run_args(args)
429+
local opts = {}
430+
local prompt_start_idx = 1
431+
432+
for i, token in ipairs(args) do
433+
local agent = token:match('^agent=(.+)$')
434+
local model = token:match('^model=(.+)$')
435+
local context = token:match('^context=(.+)$')
436+
437+
if agent then
438+
opts.agent = agent
439+
prompt_start_idx = i + 1
440+
elseif model then
441+
opts.model = model
442+
prompt_start_idx = i + 1
443+
elseif context then
444+
opts.context = M.parse_dot_args(context:gsub(',', ' '))
445+
prompt_start_idx = i + 1
446+
else
447+
break
448+
end
449+
end
450+
451+
local prompt_tokens = vim.list_slice(args, prompt_start_idx)
452+
local prompt = table.concat(prompt_tokens, ' ')
453+
454+
return opts, prompt
455+
end
456+
423457
return M

tests/unit/api_spec.lua

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,4 +141,77 @@ describe('opencode.api', function()
141141
})
142142
end)
143143
end)
144+
145+
describe('run command argument parsing', function()
146+
it('parses agent prefix and passes to send_message', function()
147+
api.commands.run.fn({ 'agent=plan', 'analyze', 'this', 'code' })
148+
assert.stub(core.send_message).was_called()
149+
assert.stub(core.send_message).was_called_with('analyze this code', {
150+
new_session = false,
151+
focus = 'output',
152+
agent = 'plan',
153+
})
154+
end)
155+
156+
it('parses model prefix and passes to send_message', function()
157+
api.commands.run.fn({ 'model=openai/gpt-4', 'test', 'prompt' })
158+
assert.stub(core.send_message).was_called()
159+
assert.stub(core.send_message).was_called_with('test prompt', {
160+
new_session = false,
161+
focus = 'output',
162+
model = 'openai/gpt-4',
163+
})
164+
end)
165+
166+
it('parses context prefix and passes to send_message', function()
167+
api.commands.run.fn({ 'context=current_file.enabled=false', 'test' })
168+
assert.stub(core.send_message).was_called()
169+
assert.stub(core.send_message).was_called_with('test', {
170+
new_session = false,
171+
focus = 'output',
172+
context = { current_file = { enabled = false } },
173+
})
174+
end)
175+
176+
it('parses multiple prefixes and passes all to send_message', function()
177+
api.commands.run.fn({ 'agent=plan', 'model=openai/gpt-4', 'context=current_file.enabled=false', 'analyze', 'code' })
178+
assert.stub(core.send_message).was_called()
179+
assert.stub(core.send_message).was_called_with('analyze code', {
180+
new_session = false,
181+
focus = 'output',
182+
agent = 'plan',
183+
model = 'openai/gpt-4',
184+
context = { current_file = { enabled = false } },
185+
})
186+
end)
187+
188+
it('works with run_new command', function()
189+
api.commands.run_new.fn({ 'agent=plan', 'model=openai/gpt-4', 'new', 'session', 'prompt' })
190+
assert.stub(core.send_message).was_called()
191+
assert.stub(core.send_message).was_called_with('new session prompt', {
192+
new_session = true,
193+
focus = 'output',
194+
agent = 'plan',
195+
model = 'openai/gpt-4',
196+
})
197+
end)
198+
199+
it('requires a prompt after prefixes', function()
200+
local notify_stub = stub(vim, 'notify')
201+
api.commands.run.fn({ 'agent=plan' })
202+
assert.stub(notify_stub).was_called_with('Prompt required', vim.log.levels.ERROR)
203+
notify_stub:revert()
204+
end)
205+
206+
it('Lua API accepts opts directly without parsing', function()
207+
api.run('test prompt', { agent = 'plan', model = 'openai/gpt-4' })
208+
assert.stub(core.send_message).was_called()
209+
assert.stub(core.send_message).was_called_with('test prompt', {
210+
new_session = false,
211+
focus = 'output',
212+
agent = 'plan',
213+
model = 'openai/gpt-4',
214+
})
215+
end)
216+
end)
144217
end)

tests/unit/util_spec.lua

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,3 +26,68 @@ describe('util.parse_dot_args', function()
2626
assert.are.same({}, args)
2727
end)
2828
end)
29+
30+
describe('util.parse_run_args', function()
31+
it('parses no prefixes', function()
32+
local opts, prompt = util.parse_run_args({ 'just', 'a', 'regular', 'prompt' })
33+
assert.are.same({}, opts)
34+
assert.equals('just a regular prompt', prompt)
35+
end)
36+
37+
it('parses single agent prefix', function()
38+
local opts, prompt = util.parse_run_args({ 'agent=plan', 'hello', 'world' })
39+
assert.are.same({ agent = 'plan' }, opts)
40+
assert.equals('hello world', prompt)
41+
end)
42+
43+
it('parses single model prefix', function()
44+
local opts, prompt = util.parse_run_args({ 'model=openai/gpt-4', 'analyze', 'this' })
45+
assert.are.same({ model = 'openai/gpt-4' }, opts)
46+
assert.equals('analyze this', prompt)
47+
end)
48+
49+
it('parses single context prefix', function()
50+
local opts, prompt = util.parse_run_args({ 'context=current_file.enabled=false', 'test' })
51+
assert.are.same({ context = { current_file = { enabled = false } } }, opts)
52+
assert.equals('test', prompt)
53+
end)
54+
55+
it('parses multiple prefixes in order', function()
56+
local opts, prompt = util.parse_run_args({ 'agent=plan', 'model=openai/gpt-4', 'context=current_file.enabled=false', 'prompt', 'here' })
57+
assert.are.same({
58+
agent = 'plan',
59+
model = 'openai/gpt-4',
60+
context = { current_file = { enabled = false } }
61+
}, opts)
62+
assert.equals('prompt here', prompt)
63+
end)
64+
65+
it('parses context with multiple comma-delimited values', function()
66+
local opts, prompt = util.parse_run_args({ 'context=current_file.enabled=false,selection.enabled=true', 'test' })
67+
assert.are.same({
68+
context = {
69+
current_file = { enabled = false },
70+
selection = { enabled = true }
71+
}
72+
}, opts)
73+
assert.equals('test', prompt)
74+
end)
75+
76+
it('handles empty prompt after prefixes', function()
77+
local opts, prompt = util.parse_run_args({ 'agent=plan' })
78+
assert.are.same({ agent = 'plan' }, opts)
79+
assert.equals('', prompt)
80+
end)
81+
82+
it('handles empty string', function()
83+
local opts, prompt = util.parse_run_args({})
84+
assert.are.same({}, opts)
85+
assert.equals('', prompt)
86+
end)
87+
88+
it('stops parsing at first non-prefix token', function()
89+
local opts, prompt = util.parse_run_args({ 'agent=plan', 'some', 'prompt', 'model=openai/gpt-4' })
90+
assert.are.same({ agent = 'plan' }, opts)
91+
assert.equals('some prompt model=openai/gpt-4', prompt)
92+
end)
93+
end)

0 commit comments

Comments
 (0)