Skip to content

Commit

Permalink
feat: add Segment AI requests events
Browse files Browse the repository at this point in the history
  • Loading branch information
abuyak-vf committed Oct 6, 2023
1 parent 08f2d56 commit b61452f
Show file tree
Hide file tree
Showing 3 changed files with 201 additions and 14 deletions.
133 changes: 123 additions & 10 deletions lib/controllers/test/index.ts
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
import { Validator } from '@voiceflow/backend-utils';
import { BaseUtils } from '@voiceflow/base-types';
import { BaseModels, BaseUtils } from '@voiceflow/base-types';
import { KnowledgeBaseSettings } from '@voiceflow/base-types/build/cjs/models/project/knowledgeBase';
import VError from '@voiceflow/verror';
import _merge from 'lodash/merge';

import AI from '@/lib/clients/ai';
import { getAPIBlockHandlerOptions } from '@/lib/services/runtime/handlers/api';
import { fetchKnowledgeBase, promptSynthesis } from '@/lib/services/runtime/handlers/utils/knowledgeBase';
import { answerSynthesis } from '@/lib/services/runtime/handlers/utils/knowledgeBase/answer';
import { AiRequestActionType, SegmentEventType } from '@/lib/services/runtime/types';
import log from '@/logger';
import { callAPI } from '@/runtime/lib/Handlers/api/utils';
import { ivmExecute } from '@/runtime/lib/Handlers/code/utils';
import { Request, Response } from '@/types';

import { QuotaName } from '../../services/billing';
import { fetchPrompt } from '../../services/runtime/handlers/utils/ai';
import { AIResponse, fetchPrompt } from '../../services/runtime/handlers/utils/ai';
import { validate } from '../../utils';
import { AbstractController } from '../utils';
import { TestFunctionBody, TestFunctionParams, TestFunctionResponse, TestFunctionStatus } from './interface';
Expand Down Expand Up @@ -56,6 +57,68 @@ class TestController extends AbstractController {
}
}

testSendSegmentEvent = async ({
req,
answer,
startTime,
settings,
project,
action_type,
isKB,
}: {
req: Request;
answer: AIResponse | null;
startTime: any;
settings: KnowledgeBaseSettings;
project: BaseModels.Project.Model<any, any>;
action_type: AiRequestActionType;
isKB: boolean;
}) => {
const responseTokens = answer?.answerTokens ?? 0;
const queryTokens = answer?.queryTokens ?? 0;
const responseOutput = answer?.output;
const currentDate = new Date().toISOString().slice(0, 10);

const properties = {
KB: isKB,
action_type,
source: 'Runtime',
method: 'Preview',
runtime: performance.now() - startTime,
response_tokens: responseTokens,
response_content: responseOutput,
total_tokens: queryTokens + responseTokens,
workspace_id: req.params.workspaceID,
organization_id: project?.teamID,
project_id: project?._id,
project_platform: project?.platform,
project_type: project?.type,
prompt_type: settings?.summarization?.mode,
model: settings?.summarization?.model,
temperature: settings?.summarization?.temperature,
max_tokens: settings?.summarization?.maxTokens,
system_prompt: settings?.summarization?.prompt,
prompt_tokens: queryTokens,
prompt_content: req.body.prompt,
success: !!answer?.output,
http_return_code: answer?.output ? 200 : 500,
};

const analyticsPlatformClient = await this.services.analyticsPlatform.getClient();

if (analyticsPlatformClient) {
try {
analyticsPlatformClient.track({
identity: { userID: project.creatorID },
name: SegmentEventType.AI_REQUEST,
properties: { ...properties, last_product_activity: currentDate },
});
} catch (error: any) {
log.error(`[analytics] test KB failed Segement track ${log.vars({ projectID: project?._id, error })}`);
}
}
};

async testKnowledgeBasePrompt(req: Request) {
const api = await this.services.dataAPI.get();

Expand All @@ -64,9 +127,22 @@ class TestController extends AbstractController {
const settings = _merge({}, project.knowledgeBase?.settings, req.body.settings);

const { prompt } = req.body;
const startTime = performance.now();

const answer = await promptSynthesis(project._id, project.teamID, { ...settings.summarization, prompt }, {});

const segmentSourceInfo = {
req,
answer,
startTime,
settings,
project,
action_type: AiRequestActionType.AI_RESPONSE_STEP,
isKB: true,
};

this.testSendSegmentEvent(segmentSourceInfo);

if (!answer?.output) return { output: null };

if (typeof answer.tokens === 'number' && answer.tokens > 0) {
Expand Down Expand Up @@ -113,9 +189,24 @@ class TestController extends AbstractController {
}));

if (!synthesis) return { output: null, chunks };
const startTime = performance.now();

const answer = await answerSynthesis({ question, data, options: settings?.summarization });

if (!(req.headers.authorization && req.headers.authorization.startsWith('ApiKey '))) {
const segmentSourceInfo = {
req,
answer,
startTime,
settings,
project,
action_type: AiRequestActionType.KB_PAGE,
isKB: true,
};

this.testSendSegmentEvent(segmentSourceInfo);
}

if (!answer?.output) return { output: null, chunks };

// do this async to not block the response
Expand All @@ -137,18 +228,40 @@ class TestController extends AbstractController {
}

async testCompletion(
req: Request<BaseUtils.ai.AIModelParams & BaseUtils.ai.AIContextParams & { workspaceID: string }>
req: Request<BaseUtils.ai.AIModelParams & BaseUtils.ai.AIContextParams & { workspaceID: string; identity: object }>
) {
const ai = AI.get(req.body.model);

if (!ai) throw new VError('invalid model', VError.HTTP_STATUS.BAD_REQUEST);
if (typeof req.body.prompt !== 'string') throw new VError('invalid prompt', VError.HTTP_STATUS.BAD_REQUEST);

const { output, tokens } = await fetchPrompt(req.body);
const startTime = performance.now();

const answer = await fetchPrompt(req.body);
const { output } = answer;

const segmentSourceInfo = {
req,
answer,
startTime,
settings: {
summarization: {
mode: req.body?.mode,
model: req.body?.model,
temperature: req.body?.temperature,
maxTokens: req.body?.maxTokens,
prompt: req.body?.prompt,
},
},
project: {},
action_type:
req.body.type && req.body.type === 'ai_set'
? AiRequestActionType.AI_SET_STEP
: AiRequestActionType.AI_RESPONSE_STEP,
isKB: true,
};
this.testSendSegmentEvent(segmentSourceInfo);

if (typeof tokens === 'number' && tokens > 0) {
if (typeof answer.tokens === 'number' && answer.tokens > 0) {
await this.services.billing
.consumeQuota(req.params.workspaceID, QuotaName.OPEN_API_TOKENS, tokens)
.consumeQuota(req.params.workspaceID, QuotaName.OPEN_API_TOKENS, answer.tokens)
.catch((err: Error) =>
log.warn(
`[Completion Test] Error consuming quota for workspace ${req.params.workspaceID}: ${log.vars({ err })}`
Expand Down
70 changes: 66 additions & 4 deletions lib/services/runtime/handlers/noMatch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,14 @@ import { VoiceflowConstants, VoiceflowNode } from '@voiceflow/voiceflow-types';
import _ from 'lodash';

import AI from '@/lib/clients/ai';
import { Runtime, Store } from '@/runtime';

import { isPrompt, NoMatchCounterStorage, Output, StorageType } from '../types';
import {
AiRequestActionType,
isPrompt,
NoMatchCounterStorage,
Output,
SegmentEventType,
StorageType,
} from '@/lib/services/runtime/types';
import {
addButtonsIfExists,
addOutputTrace,
Expand All @@ -14,7 +19,11 @@ import {
isPromptContentEmpty,
isPromptContentInitialized,
removeEmptyPrompts,
} from '../utils';
slateToPlaintext,
} from '@/lib/services/runtime/utils';
import log from '@/logger';
import { Runtime, Store } from '@/runtime';

import { addNoReplyTimeoutIfExists } from './noReply';
import { checkTokens, consumeResources } from './utils/ai';
import { generateNoMatch } from './utils/generativeNoMatch';
Expand All @@ -40,6 +49,48 @@ export const convertDeprecatedNoMatch = ({ noMatch, elseId, noMatches, randomize
...node,
} as NoMatchNode);

const sendSegmentEvent = async (
runtime: Runtime,
action_type: AiRequestActionType,
startTime: any,
result: { output?: Output; tokens: number; queryTokens: number; answerTokens: number } | null
): Promise<void | null> => {
const responseTokens = result?.answerTokens ?? 0;
const queryTokens = result?.queryTokens ?? 0;
const currentDate = new Date().toISOString().slice(0, 10);

const properties = {
action_type,
source: 'Runtime',
response_tokens: responseTokens,
response_content: slateToPlaintext(result?.output as BaseText.SlateTextValue),
runtime: performance.now() - startTime,
total_tokens: queryTokens + responseTokens,
workspace_id: runtime?.project?.teamID,
organiztion_id: runtime?.project?.teamID,
project_id: runtime?.project?._id,
project_platform: runtime?.project?.platform,
project_type: runtime?.project?.type,
model: runtime?.project?.knowledgeBase?.settings?.summarization.model,
temperature: runtime?.project?.knowledgeBase?.settings?.summarization.temperature,
max_tokens: runtime?.project?.knowledgeBase?.settings?.summarization.maxTokens,
system_prompt: runtime?.project?.knowledgeBase?.settings?.summarization.prompt,
prompt_tokens: queryTokens,
success: !!result?.output,
http_return_code: !result?.output ? 500 : 200,
};

const analyticsPlatformClient = await runtime.services.analyticsPlatform.getClient();

if (analyticsPlatformClient) {
analyticsPlatformClient.track({
identity: { userID: Number(runtime?.project?.creatorID.toString()) },
name: SegmentEventType.AI_REQUEST,
properties: { ...properties, last_product_activity: currentDate },
});
}
};

const removeEmptyNoMatches = (node: NoMatchNode) => {
const prompts: Array<BaseText.SlateTextValue | string> = node.noMatch?.prompts ?? [];

Expand Down Expand Up @@ -78,14 +129,25 @@ const getOutput = async (
// use knowledge base if it exists
let result: { output?: Output; tokens: number; queryTokens: number; answerTokens: number } | null = null;
if (Object.values(runtime.project?.knowledgeBase?.documents || {}).length > 0) {
const startTime = performance.now();
result = await knowledgeBaseNoMatch(runtime);
const action_type = AiRequestActionType.KB_FALLBACK;

sendSegmentEvent(runtime, action_type, startTime, result).catch(() => null);

const model = AI.get(runtime.project?.knowledgeBase?.settings?.summarization.model);
await consumeResources('KB Fallback', runtime, model, result);
}

// hit global no match if KB wasn't successful
if (!result?.output && globalNoMatch?.type === BaseVersion.GlobalNoMatchType.GENERATIVE) {
const startTime = performance.now();
result = await generateNoMatch(runtime, globalNoMatch.prompt);

const action_type = AiRequestActionType.AI_GLOBAL_NO_MATCH;

sendSegmentEvent(runtime, action_type, startTime, result).catch(() => null);

const model = AI.get(globalNoMatch.prompt.model);
await consumeResources('Generative No Match', runtime, model, result);
}
Expand Down
12 changes: 12 additions & 0 deletions lib/services/runtime/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,18 @@ export enum StreamAudioDirective {
REPLACE_ALL = 'REPLACE_ALL',
}

export enum AiRequestActionType {
AI_RESPONSE_STEP = 'AI Response Step',
AI_SET_STEP = 'AI Set Step',
KB_PAGE = 'KB Page',
KB_FALLBACK = 'KB Fallback',
AI_GLOBAL_NO_MATCH = 'AI Global No Match',
}

export enum SegmentEventType {
AI_REQUEST = 'AI Request',
}

export interface StreamPlayStorage {
src: string;
loop: boolean;
Expand Down

0 comments on commit b61452f

Please sign in to comment.