Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "sre",
"version": "0.1.0",
"version": "0.1.1",
"description": "",
"author": "Alaa-eddine KADDOURI",
"license": "MIT",
Expand Down
2 changes: 1 addition & 1 deletion packages/core/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@smythos/sre",
"version": "1.5.20",
"version": "1.5.21",
"description": "Smyth Runtime Environment",
"author": "Alaa-eddine KADDOURI",
"license": "MIT",
Expand Down
128 changes: 128 additions & 0 deletions packages/core/src/Components/MultimodalLLM.class.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import Joi from 'joi';
import { Component } from './Component.class';
import { LLMInference } from '@sre/LLMManager/LLM.inference';
import { TemplateString } from '@sre/helpers/TemplateString.helper';

import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';

export class MultimodalLLM extends Component {
protected configSchema = Joi.object({
prompt: Joi.string().required().max(8_000_000).label('Prompt'), // 2M tokens is around 8M characters
maxTokens: Joi.number().min(1).label('Maximum Tokens'),
model: Joi.string().max(200).required(),
passthrough: Joi.boolean().optional().label('Passthrough'),
});

constructor() {
super();
}

init() {}

async process(input, config, agent) {
await super.process(input, config, agent);

const logger = this.createComponentLogger(agent, config);

logger.debug(`=== Multimodal LLM Log ===`);

try {
const passThrough: boolean = config.data.passthrough || false;
const model: string = config.data.model || 'gpt-4o-mini';
const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));

if (!llmInference.connector) {
return {
_error: `The model '${model}' is not available. Please try a different one.`,
_debug: logger.output,
};
}

const modelId = await agent.modelsProvider.getModelId(model);
logger.debug(` Model : ${modelId || model}`);

let prompt: any = TemplateString(config.data.prompt).parse(input).result;

logger.debug(` Prompt\n`, prompt, '\n');

const outputs = {};
for (let con of config.outputs) {
if (con.default) continue;
outputs[con.name] = con?.description ? `<${con?.description}>` : '';
}

const excludedKeys = ['_debug', '_error'];
const outputKeys = Object.keys(outputs).filter((key) => !excludedKeys.includes(key));

if (outputKeys.length > 0) {
const outputFormat = {};
outputKeys.forEach((key) => (outputFormat[key] = '<value>'));

prompt +=
'\n\nExpected output format = ' +
JSON.stringify(outputFormat) +
'\n\n The output JSON should only use the entries from the output format.';

logger.debug(`[Component enhanced prompt]\n${prompt}\n\n`);
}

const files = Array.isArray(input.Input) ? input.Input : [input.Input];

let response: any;
if (passThrough) {
const contentPromise = new Promise(async (resolve, reject) => {
let _content = '';
const eventEmitter: any = await llmInference.multimodalStreamRequestLegacy(prompt, files, config, agent).catch((error) => {
console.error('Error on multimodalStreamRequest: ', error);
reject(error);
});
eventEmitter.on('content', (content) => {
if (typeof agent.callback === 'function') {
agent.callback({ content });
}
agent.sse.send('llm/passthrough/content', content);
_content += content;
});
eventEmitter.on('thinking', (thinking) => {
if (typeof agent.callback === 'function') {
agent.callback({ thinking });
}
agent.sse.send('llm/passthrough/thinking', thinking);
});
eventEmitter.on('end', () => {
console.log('end');
resolve(_content);
});
});
response = await contentPromise;
} else {
response = await llmInference.prompt({ query: prompt, files, params: { ...config, agentId: agent.id } });
}

// in case we have the response but it's empty string, undefined or null
if (!response) {
return { _error: ' LLM Error = Empty Response!', _debug: logger.output };
}

if (response?.error) {
const error = response?.error + ' ' + (response?.details || '');
logger.error(` LLM Error=`, error);
return { Reply: response?.data, _error: error, _debug: logger.output };
}

logger.debug(' Response \n', response);

const result = { Reply: response };
result['_debug'] = logger.output;

return result;
} catch (error: any) {
const _error = `${error?.error || ''} ${error?.details || ''}`.trim() || error?.message || 'Something went wrong!';
logger.error(`Error processing File(s)!`, _error);
return {
_error,
_debug: logger.output,
};
}
}
}
72 changes: 72 additions & 0 deletions packages/core/src/Components/OpenAPI.class.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import Joi from 'joi';

import { Agent } from '@sre/AgentManager/Agent.class';
import { Conversation } from '@sre/helpers/Conversation.helper';
import { TemplateString } from '@sre/helpers/TemplateString.helper';

import { Component } from './Component.class';

export class OpenAPI extends Component {
protected configSchema = Joi.object({
model: Joi.string().optional(),
openAiModel: Joi.string().optional(), // for backward compatibility
specUrl: Joi.string().max(2048).uri().required().description('URL of the OpenAPI specification'),
descForModel: Joi.string().max(5000).required().allow('').label('Description for Model'),
name: Joi.string().max(500).required().allow(''),
desc: Joi.string().max(5000).required().allow('').label('Description'),
logoUrl: Joi.string().max(8192).allow(''),
id: Joi.string().max(200),
version: Joi.string().max(100).allow(''),
domain: Joi.string().max(253).allow(''),
});

constructor() {
super();
}

init() {}

async process(input, config, agent: Agent) {
await super.process(input, config, agent);
const logger = this.createComponentLogger(agent, config);

logger.debug(`=== Open API Log ===`);

try {
const specUrl = config?.data?.specUrl;

if (!specUrl) {
return { _error: 'Please provide a Open API Specification URL!', _debug: logger.output };
}

const model = config?.data?.model || config?.data?.openAiModel;
const descForModel = TemplateString(config?.data?.descForModel).parse(input).result;
let prompt = '';

if (input?.Prompt) {
prompt = typeof input?.Prompt === 'string' ? input?.Prompt : JSON.stringify(input?.Prompt);
} else if (input?.Query) {
prompt = typeof input?.Query === 'string' ? input?.Query : JSON.stringify(input?.Query);
}

if (!prompt) {
return { _error: 'Please provide a prompt', _debug: logger.output };
}

// TODO [Forhad]: Need to check and validate input prompt token

const conv = new Conversation(model, specUrl, { systemPrompt: descForModel, agentId: agent?.id });

const result = await conv.prompt(prompt);

logger.debug(`Response:\n`, result, '\n');

return { Output: result, _debug: logger.output };
} catch (error: any) {
console.error('Error on running Open API: ', error);
return { _error: `Error on running Open API!\n${error?.message || JSON.stringify(error)}`, _debug: logger.output };
}
}
}

export default OpenAPI;
104 changes: 104 additions & 0 deletions packages/core/src/Components/VisionLLM.class.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import Joi from 'joi';

import { TemplateString } from '@sre/helpers/TemplateString.helper';
import { Component } from './Component.class';
import { LLMInference } from '@sre/LLMManager/LLM.inference';
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';

export class VisionLLM extends Component {
protected configSchema = Joi.object({
prompt: Joi.string().required().max(8_000_000).label('Prompt'), // 2M tokens is around 8M characters
maxTokens: Joi.number().min(1).label('Maximum Tokens'),
model: Joi.string().max(200).required(),
passthrough: Joi.boolean().optional().label('Passthrough'),
});

constructor() {
super();
}

init() {}

async process(input, config, agent) {
await super.process(input, config, agent);

const logger = this.createComponentLogger(agent, config);
try {
logger.debug(`=== Vision LLM Log ===`);

const passThrough: boolean = config.data.passthrough || false;
const model: string = config.data?.model;

const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
// if the llm is undefined, then it means we removed the model from our system
if (!llmInference.connector) {
return {
_error: `The model '${model}' is not available. Please try a different one.`,
_debug: logger.output,
};
}

const modelId = await agent.modelsProvider.getModelId(model);
logger.debug(` Model : ${modelId || model}`);

let prompt: any = TemplateString(config.data.prompt).parse(input).result;

logger.debug(` Prompt\n`, prompt, '\n');

const files = Array.isArray(input.Images) ? input.Images : [input.Images];

let response: any;
if (passThrough) {
const contentPromise = new Promise(async (resolve, reject) => {
let _content = '';
const eventEmitter: any = await llmInference.multimodalStreamRequestLegacy(prompt, files, config, agent).catch((error) => {
console.error('Error on multimodalStreamRequest: ', error);
reject(error);
});
eventEmitter.on('content', (content) => {
if (typeof agent.callback === 'function') {
agent.callback({ content });
}
agent.sse.send('llm/passthrough/content', content);
_content += content;
});
eventEmitter.on('thinking', (thinking) => {
if (typeof agent.callback === 'function') {
agent.callback({ thinking });
}
agent.sse.send('llm/passthrough/thinking', thinking);
});
eventEmitter.on('end', () => {
console.log('end');
resolve(_content);
});
});
response = await contentPromise;
} else {
response = await llmInference.prompt({ query: prompt, files, params: { ...config, agentId: agent.id } });
}

// in case we have the response but it's empty string, undefined or null
if (!response) {
return { _error: ' LLM Error = Empty Response!', _debug: logger.output };
}

if (response?.error) {
const error = response?.error + ' ' + (response?.details || '');
logger.error(` LLM Error=`, error);

return { Reply: response?.data, _error: error, _debug: logger.output };
}

logger.debug(' Response \n', response);

const result = { Reply: response };

result['_debug'] = logger.output;

return result;
} catch (error: any) {
return { _error: error.message, _debug: logger.output };
}
}
}
Loading