Skip to content

Commit 88f1180

Browse files
authored
Merge pull request #36 from wosherco/pol/w-52-add-gemma-3
feat: add Gemma 3 and better command and LLM structure
2 parents 6e216ce + 4e423dc commit 88f1180

File tree

15 files changed

+469
-268
lines changed

15 files changed

+469
-268
lines changed

.changeset/stale-buckets-roll.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@bashbuddy/cli": patch
3+
---
4+
5+
Added gemma 3, and better command structure

apps/cli/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
"@trpc/client": "catalog:",
3939
"clipboardy": "^4.0.0",
4040
"commander": "^13.1.0",
41-
"node-llama-cpp": "^3.6.0",
41+
"node-llama-cpp": "^3.7.0",
4242
"superjson": "catalog:",
4343
"yaml": "^2.7.0",
4444
"zod": "catalog:"

apps/cli/src/commands/ask.ts

Lines changed: 182 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,9 @@ import chalk from "chalk";
44
import clipboardy from "clipboardy";
55
import { Command } from "commander";
66

7-
import type { LLMResponse } from "@bashbuddy/validators";
8-
import { processPrompt } from "@bashbuddy/agent";
7+
import type { LLMMessage } from "@bashbuddy/agent";
8+
import type { LLMContext, LLMResponse } from "@bashbuddy/validators";
9+
import { processPrompt, yamlPrompt } from "@bashbuddy/agent";
910
import { SITE_URLS } from "@bashbuddy/consts";
1011

1112
import { LocalLLM } from "../llms/localllm";
@@ -22,8 +23,14 @@ import { runCommandWithStream } from "../utils/runner";
2223
export function createAskCommand(): Command {
2324
const askCommand = new Command("ask")
2425
.description("Ask a question to the AI")
25-
.argument("<question...>", "The question to ask the AI")
26-
.action((questionParts: string[]) => {
26+
.argument("[question...]", "The question to ask the AI")
27+
.action((questionParts: string[] = []) => {
28+
// If no question parts, prompt the user
29+
if (questionParts.length === 0) {
30+
promptForQuestion().catch(console.error);
31+
return;
32+
}
33+
2734
// Join all parts of the question with spaces
2835
const question = questionParts.join(" ");
2936
execute(question).catch(console.error);
@@ -32,6 +39,34 @@ export function createAskCommand(): Command {
3239
return askCommand;
3340
}
3441

42+
/**
43+
* Prompt the user for a question if none was provided
44+
*/
45+
async function promptForQuestion() {
46+
p.intro("BashBuddy");
47+
48+
const question = await p.text({
49+
message: "What would you like to ask?",
50+
placeholder: "Ask for a command",
51+
});
52+
53+
if (p.isCancel(question) || !question) {
54+
p.cancel("Operation cancelled");
55+
return;
56+
}
57+
58+
await execute(question);
59+
}
60+
61+
interface ConversationState {
62+
messages: LLMMessage[];
63+
context: LLMContext;
64+
chatId: string;
65+
llm?: LocalLLM;
66+
isCloudMode: boolean;
67+
revisionCount: number;
68+
}
69+
3570
async function execute(question: string) {
3671
p.intro("BashBuddy");
3772

@@ -42,6 +77,7 @@ async function execute(question: string) {
4277
]);
4378

4479
let commandToRun: string | undefined;
80+
let conversationState: ConversationState;
4581

4682
switch (mode) {
4783
case LOCAL_MODE: {
@@ -61,15 +97,26 @@ async function execute(question: string) {
6197
await llm.init();
6298
modelSpinner.stop("Model loaded!");
6399

64-
const createNewOutputStream = (newUserInput: string) =>
65-
Promise.resolve(processPrompt(llm, context, newUserInput, true));
66-
67-
commandToRun = await cliInfer(
68-
await createNewOutputStream(question),
69-
createNewOutputStream,
70-
1,
71-
false,
72-
);
100+
conversationState = {
101+
messages: [
102+
{
103+
role: "system",
104+
content: yamlPrompt(context),
105+
},
106+
{
107+
role: "user",
108+
content: question,
109+
},
110+
],
111+
context,
112+
chatId: "local",
113+
llm,
114+
isCloudMode: false,
115+
revisionCount: 1,
116+
};
117+
118+
const stream = processPrompt(llm, conversationState.messages);
119+
commandToRun = await handleInference(stream, conversationState);
73120

74121
await llm.dispose();
75122

@@ -79,20 +126,27 @@ async function execute(question: string) {
79126
try {
80127
const chatId = await trpc.chat.createChat.mutate();
81128

82-
const createNewOutputStream = (newUserInput: string) =>
83-
trpc.chat.ask.mutate({
84-
input: newUserInput,
85-
context,
86-
chatId,
87-
useYaml: true,
88-
});
89-
90-
commandToRun = await cliInfer(
91-
await createNewOutputStream(question),
92-
createNewOutputStream,
93-
1,
94-
true,
95-
);
129+
conversationState = {
130+
messages: [
131+
{
132+
role: "user",
133+
content: question,
134+
},
135+
],
136+
context,
137+
chatId,
138+
isCloudMode: true,
139+
revisionCount: 1,
140+
};
141+
142+
const stream = await trpc.chat.ask.mutate({
143+
input: question,
144+
context,
145+
chatId,
146+
useYaml: true,
147+
});
148+
149+
commandToRun = await handleInference(stream, conversationState);
96150
} catch (err) {
97151
if (err instanceof TRPCClientError) {
98152
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
@@ -140,24 +194,32 @@ async function execute(question: string) {
140194
}
141195
}
142196

143-
async function cliInfer(
197+
/**
198+
* Process LLM inference and return the parsed response
199+
*/
200+
async function processInference(
144201
outputStream: AsyncIterable<string>,
145-
createNewOutputStream: (
146-
newUserInput: string,
147-
) => Promise<AsyncIterable<string>>,
148-
revisionCount = 1,
149-
isCloudMode = false,
150-
): Promise<string | undefined> {
202+
state: ConversationState,
203+
): Promise<LLMResponse | undefined> {
151204
const llmSpinner = p.spinner();
152205
llmSpinner.start("Processing...");
153206

154207
let finalResponse: LLMResponse;
155208

156209
try {
157-
finalResponse = await parseYamlResponse(outputStream, (response) => {
158-
if (response.command) {
159-
llmSpinner.message(response.command);
160-
}
210+
const { parsed, raw } = await parseYamlResponse(
211+
outputStream,
212+
(response) => {
213+
if (response.command) {
214+
llmSpinner.message(response.command);
215+
}
216+
},
217+
);
218+
219+
finalResponse = parsed;
220+
state.messages.push({
221+
role: "model",
222+
content: raw,
161223
});
162224
} catch (err) {
163225
if (err instanceof ResponseParseError) {
@@ -171,25 +233,70 @@ async function cliInfer(
171233
}
172234

173235
llmSpinner.stop(finalResponse.command);
236+
return finalResponse;
237+
}
174238

175-
if (finalResponse.wrong) {
239+
/**
240+
* Display command information to the user
241+
*/
242+
function displayCommandInfo(response: LLMResponse): void {
243+
if (response.wrong) {
176244
p.log.message(chalk.red("Please, limit yourself to ask for commands. "));
177-
178245
return;
179246
}
180247

181-
if (finalResponse.explanation) {
182-
p.log.message(chalk.dim(`Explanation: ${finalResponse.explanation}`));
248+
if (response.explanation) {
249+
p.log.message(chalk.dim(`Explanation: ${response.explanation}`));
183250
}
184251

185-
if (finalResponse.dangerous) {
252+
if (response.dangerous) {
186253
p.log.message(
187254
chalk.red(
188255
`⚠️ Be careful, buddy has marked this command as dangerous. Make sure to know what it does.`,
189256
),
190257
);
191258
}
259+
}
260+
261+
/**
262+
* Generate a new inference stream based on user suggestion
263+
*/
264+
async function generateNewStream(
265+
suggestion: string,
266+
state: ConversationState,
267+
): Promise<AsyncIterable<string>> {
268+
// Add the suggestion to the messages
269+
state.messages.push({
270+
role: "user",
271+
content: suggestion,
272+
});
273+
274+
// Increment revision count
275+
state.revisionCount += 1;
192276

277+
// Generate a new stream based on mode
278+
if (state.isCloudMode) {
279+
return trpc.chat.ask.mutate({
280+
input: suggestion,
281+
context: state.context,
282+
chatId: state.chatId,
283+
useYaml: true,
284+
});
285+
} else {
286+
if (!state.llm) {
287+
throw new Error("LLM not initialized");
288+
}
289+
return processPrompt(state.llm, state.messages);
290+
}
291+
}
292+
293+
/**
294+
* Handle user action on the command
295+
*/
296+
async function handleUserAction(
297+
response: LLMResponse,
298+
state: ConversationState,
299+
): Promise<string | undefined> {
193300
// Options for the select component
194301
const options = [
195302
{ value: "copyAndRun", label: "Copy & Run" },
@@ -198,9 +305,9 @@ async function cliInfer(
198305
];
199306

200307
// Only add the suggest option if we haven't reached the revision limit in cloud mode
201-
if (!isCloudMode || revisionCount < 5) {
308+
if (!state.isCloudMode || state.revisionCount < 5) {
202309
options.push({ value: "suggest", label: "Suggest changes" });
203-
} else if (revisionCount >= 5) {
310+
} else if (state.revisionCount >= 5) {
204311
p.log.message(
205312
chalk.yellow("You've reached the maximum of 5 revisions in cloud mode."),
206313
);
@@ -221,19 +328,19 @@ async function cliInfer(
221328

222329
switch (action) {
223330
case "run":
224-
return finalResponse.command;
331+
return response.command;
225332
case "copy": {
226333
// Copy the command to clipboard
227334
try {
228-
await clipboardy.write(finalResponse.command);
335+
await clipboardy.write(response.command);
229336
p.log.success("Command copied to clipboard");
230337
} catch {
231338
p.log.error("Failed to copy command to clipboard");
232339
}
233340

234341
p.log.message(
235342
chalk.dim(
236-
`Feel free to paste the command into your terminal: ${finalResponse.command}`,
343+
`Feel free to paste the command into your terminal: ${response.command}`,
237344
),
238345
);
239346

@@ -242,18 +349,18 @@ async function cliInfer(
242349
case "copyAndRun": {
243350
// Copy the command to clipboard and run it
244351
try {
245-
await clipboardy.write(finalResponse.command);
352+
await clipboardy.write(response.command);
246353
p.log.success("Command copied to clipboard");
247354
} catch {
248355
p.log.error(
249-
`Failed to copy command to clipboard, but will still run. Feel free to copy it: ${finalResponse.command}`,
356+
`Failed to copy command to clipboard, but will still run. Feel free to copy it: ${response.command}`,
250357
);
251358
}
252359

253-
return finalResponse.command;
360+
return response.command;
254361
}
255362
case "suggest": {
256-
// Allow user to suggest changes (original behavior when typing)
363+
// Allow user to suggest changes
257364
const suggestion = await p.text({
258365
message: "What changes would you like to suggest?",
259366
placeholder: "Type your suggestion here",
@@ -265,16 +372,33 @@ async function cliInfer(
265372
}
266373

267374
if (suggestion) {
268-
return cliInfer(
269-
await createNewOutputStream(suggestion),
270-
createNewOutputStream,
271-
revisionCount + 1,
272-
isCloudMode,
273-
);
375+
const newStream = await generateNewStream(suggestion, state);
376+
return handleInference(newStream, state);
274377
}
275378
return undefined;
276379
}
277380
default:
278381
return undefined;
279382
}
280383
}
384+
385+
/**
386+
* Handle the entire inference process
387+
*/
388+
async function handleInference(
389+
outputStream: AsyncIterable<string>,
390+
state: ConversationState,
391+
): Promise<string | undefined> {
392+
// Process the inference
393+
const finalResponse = await processInference(outputStream, state);
394+
395+
if (!finalResponse) {
396+
return undefined;
397+
}
398+
399+
// Display command information
400+
displayCommandInfo(finalResponse);
401+
402+
// Handle user action
403+
return handleUserAction(finalResponse, state);
404+
}

0 commit comments

Comments
 (0)