Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit efa55f1

Browse files
authored
fix: start and run models are not outputting last error logs (#951)
1 parent c9df2ac commit efa55f1

File tree

3 files changed

+16
-55
lines changed

3 files changed

+16
-55
lines changed

cortex-js/src/infrastructure/commanders/models/model-start.command.ts

Lines changed: 10 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import { exit } from 'node:process';
44
import { CortexUsecases } from '@/usecases/cortex/cortex.usecases';
55
import { SetCommandContext } from '../decorators/CommandContext';
66
import { ContextService } from '@/infrastructure/services/context/context.service';
7-
import { createReadStream, existsSync, statSync, watchFile } from 'node:fs';
7+
import { existsSync } from 'node:fs';
88
import { FileManagerService } from '@/infrastructure/services/file-manager/file-manager.service';
99
import { join } from 'node:path';
1010
import { Engines } from '../types/engine.interface';
@@ -14,11 +14,12 @@ import { isRemoteEngine } from '@/utils/normalize-model-id';
1414
import { downloadProgress } from '@/utils/download-progress';
1515
import { CortexClient } from '../services/cortex.client';
1616
import { DownloadType } from '@/domain/models/download.interface';
17+
import { printLastErrorLines } from '@/utils/logs';
1718

1819
type ModelStartOptions = {
19-
attach: boolean;
2020
preset?: string;
2121
};
22+
2223
@SubCommand({
2324
name: 'start',
2425
description: 'Start a model by ID.',
@@ -77,16 +78,17 @@ export class ModelStartCommand extends BaseCommand {
7778
await downloadProgress(this.cortex, undefined, DownloadType.Engine);
7879
}
7980

80-
// Attached - stdout logs
81-
if (options.attach) {
82-
this.attachLogWatch();
83-
}
84-
8581
const parsedPreset = await this.fileService.getPreset(options.preset);
8682

83+
const startingSpinner = ora('Loading model...').start();
84+
8785
await this.cortex.models
8886
.start(modelId, parsedPreset)
89-
.then(() => options.attach && ora('Model is running...').start());
87+
.then(() => startingSpinner.succeed('Model loaded'))
88+
.catch(async (error) => {
89+
startingSpinner.fail(error.message ?? error);
90+
printLastErrorLines(await this.fileService.getLogPath());
91+
});
9092
}
9193

9294
modelInquiry = async () => {
@@ -104,55 +106,11 @@ export class ModelStartCommand extends BaseCommand {
104106
return model;
105107
};
106108

107-
@Option({
108-
flags: '-a, --attach',
109-
description: 'Attach to interactive chat session',
110-
defaultValue: false,
111-
name: 'attach',
112-
})
113-
parseAttach() {
114-
return true;
115-
}
116-
117109
@Option({
118110
flags: '-p, --preset <preset>',
119111
description: 'Apply a chat preset to the chat session',
120112
})
121113
parseTemplate(value: string) {
122114
return value;
123115
}
124-
125-
/**
126-
* Attach to the log file and watch for changes
127-
*/
128-
private async attachLogWatch() {
129-
const logPath = await this.fileService.getLogPath();
130-
const initialSize = statSync(logPath).size;
131-
const logStream = createReadStream(logPath, {
132-
start: initialSize,
133-
encoding: 'utf-8',
134-
autoClose: false,
135-
});
136-
logStream.on('data', (chunk) => {
137-
console.log(chunk);
138-
});
139-
watchFile(logPath, (curr, prev) => {
140-
// Check if the file size has increased
141-
if (curr.size > prev.size) {
142-
// Calculate the position to start reading from
143-
const position = prev.size;
144-
145-
// Create a new read stream from the updated position
146-
const updateStream = createReadStream(logPath, {
147-
encoding: 'utf8',
148-
start: position,
149-
});
150-
151-
// Read the newly written content
152-
updateStream.on('data', (chunk) => {
153-
console.log(chunk);
154-
});
155-
}
156-
});
157-
}
158116
}

cortex-js/src/infrastructure/commanders/run.command.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import { CortexClient } from './services/cortex.client';
1515
import { DownloadType } from '@/domain/models/download.interface';
1616
import { isLocalFile } from '@/utils/urls';
1717
import { parse } from 'node:path';
18+
import { printLastErrorLines } from '@/utils/logs';
1819

1920
type RunOptions = {
2021
threadId?: string;
@@ -109,8 +110,10 @@ export class RunCommand extends BaseCommand {
109110
if (options.chat) this.chatClient.chat(modelId, options.threadId);
110111
else console.log("To start a chat session, use the '--chat' flag");
111112
})
112-
.catch((e) => {
113+
.catch(async (e) => {
113114
startingSpinner.fail(e.message ?? e);
115+
116+
printLastErrorLines(await this.fileService.getLogPath());
114117
});
115118
}
116119

cortex-js/src/utils/huggingface.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -225,9 +225,9 @@ export async function getHFModelMetadata(
225225
const stopWord: string = metadata['tokenizer.ggml.tokens'][index] ?? '';
226226
const name = metadata['general.name'];
227227
const contextLength = metadata['llama.context_length'] ?? 4096;
228-
const ngl = (metadata['llama.block_count'] ?? 32) + 1
228+
const ngl = (metadata['llama.block_count'] ?? 32) + 1;
229229
const version: number = metadata['version'];
230-
230+
231231
return {
232232
contextLength,
233233
ngl,

0 commit comments

Comments
 (0)