@@ -4,8 +4,9 @@ import chalk from "chalk";
4
4
import clipboardy from "clipboardy" ;
5
5
import { Command } from "commander" ;
6
6
7
- import type { LLMResponse } from "@bashbuddy/validators" ;
8
- import { processPrompt } from "@bashbuddy/agent" ;
7
+ import type { LLMMessage } from "@bashbuddy/agent" ;
8
+ import type { LLMContext , LLMResponse } from "@bashbuddy/validators" ;
9
+ import { processPrompt , yamlPrompt } from "@bashbuddy/agent" ;
9
10
import { SITE_URLS } from "@bashbuddy/consts" ;
10
11
11
12
import { LocalLLM } from "../llms/localllm" ;
@@ -22,8 +23,14 @@ import { runCommandWithStream } from "../utils/runner";
22
23
export function createAskCommand ( ) : Command {
23
24
const askCommand = new Command ( "ask" )
24
25
. description ( "Ask a question to the AI" )
25
- . argument ( "<question...>" , "The question to ask the AI" )
26
- . action ( ( questionParts : string [ ] ) => {
26
+ . argument ( "[question...]" , "The question to ask the AI" )
27
+ . action ( ( questionParts : string [ ] = [ ] ) => {
28
+ // If no question parts, prompt the user
29
+ if ( questionParts . length === 0 ) {
30
+ promptForQuestion ( ) . catch ( console . error ) ;
31
+ return ;
32
+ }
33
+
27
34
// Join all parts of the question with spaces
28
35
const question = questionParts . join ( " " ) ;
29
36
execute ( question ) . catch ( console . error ) ;
@@ -32,6 +39,34 @@ export function createAskCommand(): Command {
32
39
return askCommand ;
33
40
}
34
41
42
+ /**
43
+ * Prompt the user for a question if none was provided
44
+ */
45
+ async function promptForQuestion ( ) {
46
+ p . intro ( "BashBuddy" ) ;
47
+
48
+ const question = await p . text ( {
49
+ message : "What would you like to ask?" ,
50
+ placeholder : "Ask for a command" ,
51
+ } ) ;
52
+
53
+ if ( p . isCancel ( question ) || ! question ) {
54
+ p . cancel ( "Operation cancelled" ) ;
55
+ return ;
56
+ }
57
+
58
+ await execute ( question ) ;
59
+ }
60
+
61
+ interface ConversationState {
62
+ messages : LLMMessage [ ] ;
63
+ context : LLMContext ;
64
+ chatId : string ;
65
+ llm ?: LocalLLM ;
66
+ isCloudMode : boolean ;
67
+ revisionCount : number ;
68
+ }
69
+
35
70
async function execute ( question : string ) {
36
71
p . intro ( "BashBuddy" ) ;
37
72
@@ -42,6 +77,7 @@ async function execute(question: string) {
42
77
] ) ;
43
78
44
79
let commandToRun : string | undefined ;
80
+ let conversationState : ConversationState ;
45
81
46
82
switch ( mode ) {
47
83
case LOCAL_MODE : {
@@ -61,15 +97,26 @@ async function execute(question: string) {
61
97
await llm . init ( ) ;
62
98
modelSpinner . stop ( "Model loaded!" ) ;
63
99
64
- const createNewOutputStream = ( newUserInput : string ) =>
65
- Promise . resolve ( processPrompt ( llm , context , newUserInput , true ) ) ;
66
-
67
- commandToRun = await cliInfer (
68
- await createNewOutputStream ( question ) ,
69
- createNewOutputStream ,
70
- 1 ,
71
- false ,
72
- ) ;
100
+ conversationState = {
101
+ messages : [
102
+ {
103
+ role : "system" ,
104
+ content : yamlPrompt ( context ) ,
105
+ } ,
106
+ {
107
+ role : "user" ,
108
+ content : question ,
109
+ } ,
110
+ ] ,
111
+ context,
112
+ chatId : "local" ,
113
+ llm,
114
+ isCloudMode : false ,
115
+ revisionCount : 1 ,
116
+ } ;
117
+
118
+ const stream = processPrompt ( llm , conversationState . messages ) ;
119
+ commandToRun = await handleInference ( stream , conversationState ) ;
73
120
74
121
await llm . dispose ( ) ;
75
122
@@ -79,20 +126,27 @@ async function execute(question: string) {
79
126
try {
80
127
const chatId = await trpc . chat . createChat . mutate ( ) ;
81
128
82
- const createNewOutputStream = ( newUserInput : string ) =>
83
- trpc . chat . ask . mutate ( {
84
- input : newUserInput ,
85
- context,
86
- chatId,
87
- useYaml : true ,
88
- } ) ;
89
-
90
- commandToRun = await cliInfer (
91
- await createNewOutputStream ( question ) ,
92
- createNewOutputStream ,
93
- 1 ,
94
- true ,
95
- ) ;
129
+ conversationState = {
130
+ messages : [
131
+ {
132
+ role : "user" ,
133
+ content : question ,
134
+ } ,
135
+ ] ,
136
+ context,
137
+ chatId,
138
+ isCloudMode : true ,
139
+ revisionCount : 1 ,
140
+ } ;
141
+
142
+ const stream = await trpc . chat . ask . mutate ( {
143
+ input : question ,
144
+ context,
145
+ chatId,
146
+ useYaml : true ,
147
+ } ) ;
148
+
149
+ commandToRun = await handleInference ( stream , conversationState ) ;
96
150
} catch ( err ) {
97
151
if ( err instanceof TRPCClientError ) {
98
152
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
@@ -140,24 +194,32 @@ async function execute(question: string) {
140
194
}
141
195
}
142
196
143
- async function cliInfer (
197
+ /**
198
+ * Process LLM inference and return the parsed response
199
+ */
200
+ async function processInference (
144
201
outputStream : AsyncIterable < string > ,
145
- createNewOutputStream : (
146
- newUserInput : string ,
147
- ) => Promise < AsyncIterable < string > > ,
148
- revisionCount = 1 ,
149
- isCloudMode = false ,
150
- ) : Promise < string | undefined > {
202
+ state : ConversationState ,
203
+ ) : Promise < LLMResponse | undefined > {
151
204
const llmSpinner = p . spinner ( ) ;
152
205
llmSpinner . start ( "Processing..." ) ;
153
206
154
207
let finalResponse : LLMResponse ;
155
208
156
209
try {
157
- finalResponse = await parseYamlResponse ( outputStream , ( response ) => {
158
- if ( response . command ) {
159
- llmSpinner . message ( response . command ) ;
160
- }
210
+ const { parsed, raw } = await parseYamlResponse (
211
+ outputStream ,
212
+ ( response ) => {
213
+ if ( response . command ) {
214
+ llmSpinner . message ( response . command ) ;
215
+ }
216
+ } ,
217
+ ) ;
218
+
219
+ finalResponse = parsed ;
220
+ state . messages . push ( {
221
+ role : "model" ,
222
+ content : raw ,
161
223
} ) ;
162
224
} catch ( err ) {
163
225
if ( err instanceof ResponseParseError ) {
@@ -171,25 +233,70 @@ async function cliInfer(
171
233
}
172
234
173
235
llmSpinner . stop ( finalResponse . command ) ;
236
+ return finalResponse ;
237
+ }
174
238
175
- if ( finalResponse . wrong ) {
239
+ /**
240
+ * Display command information to the user
241
+ */
242
+ function displayCommandInfo ( response : LLMResponse ) : void {
243
+ if ( response . wrong ) {
176
244
p . log . message ( chalk . red ( "Please, limit yourself to ask for commands. " ) ) ;
177
-
178
245
return ;
179
246
}
180
247
181
- if ( finalResponse . explanation ) {
182
- p . log . message ( chalk . dim ( `Explanation: ${ finalResponse . explanation } ` ) ) ;
248
+ if ( response . explanation ) {
249
+ p . log . message ( chalk . dim ( `Explanation: ${ response . explanation } ` ) ) ;
183
250
}
184
251
185
- if ( finalResponse . dangerous ) {
252
+ if ( response . dangerous ) {
186
253
p . log . message (
187
254
chalk . red (
188
255
`⚠️ Be careful, buddy has marked this command as dangerous. Make sure to know what it does.` ,
189
256
) ,
190
257
) ;
191
258
}
259
+ }
260
+
261
+ /**
262
+ * Generate a new inference stream based on user suggestion
263
+ */
264
+ async function generateNewStream (
265
+ suggestion : string ,
266
+ state : ConversationState ,
267
+ ) : Promise < AsyncIterable < string > > {
268
+ // Add the suggestion to the messages
269
+ state . messages . push ( {
270
+ role : "user" ,
271
+ content : suggestion ,
272
+ } ) ;
273
+
274
+ // Increment revision count
275
+ state . revisionCount += 1 ;
192
276
277
+ // Generate a new stream based on mode
278
+ if ( state . isCloudMode ) {
279
+ return trpc . chat . ask . mutate ( {
280
+ input : suggestion ,
281
+ context : state . context ,
282
+ chatId : state . chatId ,
283
+ useYaml : true ,
284
+ } ) ;
285
+ } else {
286
+ if ( ! state . llm ) {
287
+ throw new Error ( "LLM not initialized" ) ;
288
+ }
289
+ return processPrompt ( state . llm , state . messages ) ;
290
+ }
291
+ }
292
+
293
+ /**
294
+ * Handle user action on the command
295
+ */
296
+ async function handleUserAction (
297
+ response : LLMResponse ,
298
+ state : ConversationState ,
299
+ ) : Promise < string | undefined > {
193
300
// Options for the select component
194
301
const options = [
195
302
{ value : "copyAndRun" , label : "Copy & Run" } ,
@@ -198,9 +305,9 @@ async function cliInfer(
198
305
] ;
199
306
200
307
// Only add the suggest option if we haven't reached the revision limit in cloud mode
201
- if ( ! isCloudMode || revisionCount < 5 ) {
308
+ if ( ! state . isCloudMode || state . revisionCount < 5 ) {
202
309
options . push ( { value : "suggest" , label : "Suggest changes" } ) ;
203
- } else if ( revisionCount >= 5 ) {
310
+ } else if ( state . revisionCount >= 5 ) {
204
311
p . log . message (
205
312
chalk . yellow ( "You've reached the maximum of 5 revisions in cloud mode." ) ,
206
313
) ;
@@ -221,19 +328,19 @@ async function cliInfer(
221
328
222
329
switch ( action ) {
223
330
case "run" :
224
- return finalResponse . command ;
331
+ return response . command ;
225
332
case "copy" : {
226
333
// Copy the command to clipboard
227
334
try {
228
- await clipboardy . write ( finalResponse . command ) ;
335
+ await clipboardy . write ( response . command ) ;
229
336
p . log . success ( "Command copied to clipboard" ) ;
230
337
} catch {
231
338
p . log . error ( "Failed to copy command to clipboard" ) ;
232
339
}
233
340
234
341
p . log . message (
235
342
chalk . dim (
236
- `Feel free to paste the command into your terminal: ${ finalResponse . command } ` ,
343
+ `Feel free to paste the command into your terminal: ${ response . command } ` ,
237
344
) ,
238
345
) ;
239
346
@@ -242,18 +349,18 @@ async function cliInfer(
242
349
case "copyAndRun" : {
243
350
// Copy the command to clipboard and run it
244
351
try {
245
- await clipboardy . write ( finalResponse . command ) ;
352
+ await clipboardy . write ( response . command ) ;
246
353
p . log . success ( "Command copied to clipboard" ) ;
247
354
} catch {
248
355
p . log . error (
249
- `Failed to copy command to clipboard, but will still run. Feel free to copy it: ${ finalResponse . command } ` ,
356
+ `Failed to copy command to clipboard, but will still run. Feel free to copy it: ${ response . command } ` ,
250
357
) ;
251
358
}
252
359
253
- return finalResponse . command ;
360
+ return response . command ;
254
361
}
255
362
case "suggest" : {
256
- // Allow user to suggest changes (original behavior when typing)
363
+ // Allow user to suggest changes
257
364
const suggestion = await p . text ( {
258
365
message : "What changes would you like to suggest?" ,
259
366
placeholder : "Type your suggestion here" ,
@@ -265,16 +372,33 @@ async function cliInfer(
265
372
}
266
373
267
374
if ( suggestion ) {
268
- return cliInfer (
269
- await createNewOutputStream ( suggestion ) ,
270
- createNewOutputStream ,
271
- revisionCount + 1 ,
272
- isCloudMode ,
273
- ) ;
375
+ const newStream = await generateNewStream ( suggestion , state ) ;
376
+ return handleInference ( newStream , state ) ;
274
377
}
275
378
return undefined ;
276
379
}
277
380
default :
278
381
return undefined ;
279
382
}
280
383
}
384
+
385
+ /**
386
+ * Handle the entire inference process
387
+ */
388
+ async function handleInference (
389
+ outputStream : AsyncIterable < string > ,
390
+ state : ConversationState ,
391
+ ) : Promise < string | undefined > {
392
+ // Process the inference
393
+ const finalResponse = await processInference ( outputStream , state ) ;
394
+
395
+ if ( ! finalResponse ) {
396
+ return undefined ;
397
+ }
398
+
399
+ // Display command information
400
+ displayCommandInfo ( finalResponse ) ;
401
+
402
+ // Handle user action
403
+ return handleUserAction ( finalResponse , state ) ;
404
+ }
0 commit comments