Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/Elastic.Documentation.Site/Assets/eui-icons-cache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import { icon as EuiIconCopy } from '@elastic/eui/es/components/icon/assets/copy
import { icon as EuiIconCopyClipboard } from '@elastic/eui/es/components/icon/assets/copy_clipboard'
import { icon as EuiIconCross } from '@elastic/eui/es/components/icon/assets/cross'
import { icon as EuiIconDocument } from '@elastic/eui/es/components/icon/assets/document'
import { icon as EuiIconDot } from '@elastic/eui/es/components/icon/assets/dot'
import { icon as EuiIconEmpty } from '@elastic/eui/es/components/icon/assets/empty'
import { icon as EuiIconError } from '@elastic/eui/es/components/icon/assets/error'
import { icon as EuiIconFaceHappy } from '@elastic/eui/es/components/icon/assets/face_happy'
import { icon as EuiIconFaceSad } from '@elastic/eui/es/components/icon/assets/face_sad'
Expand All @@ -32,6 +34,8 @@ appendIconComponentCache({
arrowLeft: EuiIconArrowLeft,
arrowRight: EuiIconArrowRight,
document: EuiIconDocument,
dot: EuiIconDot,
empty: EuiIconEmpty,
search: EuiIconSearch,
trash: EuiIconTrash,
user: EuiIconUser,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/** @jsxImportSource @emotion/react */
import { useAiProviderStore } from './aiProviderStore'
import { EuiRadioGroup } from '@elastic/eui'
import type { EuiRadioGroupOption } from '@elastic/eui'
import { css } from '@emotion/react'

const containerStyles = css`
padding: 1rem;
display: flex;
justify-content: center;
`

const options: EuiRadioGroupOption[] = [
{
id: 'LlmGateway',
label: 'LLM Gateway',
},
{
id: 'AgentBuilder',
label: 'Agent Builder',
},
]

export const AiProviderSelector = () => {
const { provider, setProvider } = useAiProviderStore()

return (
<div css={containerStyles}>
<EuiRadioGroup
options={options}
idSelected={provider}
onChange={(id) =>
setProvider(id as 'AgentBuilder' | 'LlmGateway')
}
name="aiProvider"
legend={{
children: 'AI Provider',
display: 'visible',
}}
/>
</div>
)
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
// Canonical AskAI event types - matches backend AskAiEvent records
import * as z from 'zod'

// Event type constants for type-safe referencing
export const EventTypes = {
CONVERSATION_START: 'conversation_start',
CHUNK: 'chunk',
CHUNK_COMPLETE: 'chunk_complete',
SEARCH_TOOL_CALL: 'search_tool_call',
TOOL_CALL: 'tool_call',
TOOL_RESULT: 'tool_result',
REASONING: 'reasoning',
CONVERSATION_END: 'conversation_end',
ERROR: 'error',
} as const

// Individual event schemas
export const ConversationStartEventSchema = z.object({
type: z.literal(EventTypes.CONVERSATION_START),
id: z.string(),
timestamp: z.number(),
conversationId: z.string(),
})

export const ChunkEventSchema = z.object({
type: z.literal(EventTypes.CHUNK),
id: z.string(),
timestamp: z.number(),
content: z.string(),
})

export const ChunkCompleteEventSchema = z.object({
type: z.literal(EventTypes.CHUNK_COMPLETE),
id: z.string(),
timestamp: z.number(),
fullContent: z.string(),
})

export const SearchToolCallEventSchema = z.object({
type: z.literal(EventTypes.SEARCH_TOOL_CALL),
id: z.string(),
timestamp: z.number(),
toolCallId: z.string(),
searchQuery: z.string(),
})

export const ToolCallEventSchema = z.object({
type: z.literal(EventTypes.TOOL_CALL),
id: z.string(),
timestamp: z.number(),
toolCallId: z.string(),
toolName: z.string(),
arguments: z.string(),
})

export const ToolResultEventSchema = z.object({
type: z.literal(EventTypes.TOOL_RESULT),
id: z.string(),
timestamp: z.number(),
toolCallId: z.string(),
result: z.string(),
})

export const ReasoningEventSchema = z.object({
type: z.literal(EventTypes.REASONING),
id: z.string(),
timestamp: z.number(),
message: z.string().nullable(),
})

export const ConversationEndEventSchema = z.object({
type: z.literal(EventTypes.CONVERSATION_END),
id: z.string(),
timestamp: z.number(),
})

export const ErrorEventSchema = z.object({
type: z.literal(EventTypes.ERROR),
id: z.string(),
timestamp: z.number(),
message: z.string(),
})

// Discriminated union of all event types
export const AskAiEventSchema = z.discriminatedUnion('type', [
ConversationStartEventSchema,
ChunkEventSchema,
ChunkCompleteEventSchema,
SearchToolCallEventSchema,
ToolCallEventSchema,
ToolResultEventSchema,
ReasoningEventSchema,
ConversationEndEventSchema,
ErrorEventSchema,
])

// Infer TypeScript types from schemas
export type ConversationStartEvent = z.infer<
typeof ConversationStartEventSchema
>
export type ChunkEvent = z.infer<typeof ChunkEventSchema>
export type ChunkCompleteEvent = z.infer<typeof ChunkCompleteEventSchema>
export type SearchToolCallEvent = z.infer<typeof SearchToolCallEventSchema>
export type ToolCallEvent = z.infer<typeof ToolCallEventSchema>
export type ToolResultEvent = z.infer<typeof ToolResultEventSchema>
export type ReasoningEvent = z.infer<typeof ReasoningEventSchema>
export type ConversationEndEvent = z.infer<typeof ConversationEndEventSchema>
export type ErrorEvent = z.infer<typeof ErrorEventSchema>
export type AskAiEvent = z.infer<typeof AskAiEventSchema>
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
/** @jsxImportSource @emotion/react */
import { AiProviderSelector } from './AiProviderSelector'
import { AskAiSuggestions } from './AskAiSuggestions'
import { ChatMessageList } from './ChatMessageList'
import { useChatActions, useChatMessages } from './chat.store'
Expand Down Expand Up @@ -137,12 +138,17 @@ export const Chat = () => {
<h2>Hi! I'm the Elastic Docs AI Assistant</h2>
}
body={
<p>
I can help answer your questions about
Elastic documentation. <br />
Ask me anything about Elasticsearch, Kibana,
Observability, Security, and more.
</p>
<>
<p>
I can help answer your questions about
Elastic documentation. <br />
Ask me anything about Elasticsearch,
Kibana, Observability, Security, and
more.
</p>
<EuiSpacer size="m" />
<AiProviderSelector />
</>
}
footer={
<>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import { initCopyButton } from '../../../copybutton'
import { hljs } from '../../../hljs'
import { AskAiEvent, EventTypes } from './AskAiEvent'
import { GeneratingStatus } from './GeneratingStatus'
import { References } from './RelatedResources'
import { ChatMessage as ChatMessageType } from './chat.store'
import { LlmGatewayMessage } from './useLlmGateway'
import { useStatusMinDisplay } from './useStatusMinDisplay'
import {
EuiButtonIcon,
EuiCallOut,
Expand Down Expand Up @@ -56,16 +57,16 @@ const markedInstance = createMarkedInstance()

interface ChatMessageProps {
message: ChatMessageType
llmMessages?: LlmGatewayMessage[]
events?: AskAiEvent[]
streamingContent?: string
error?: Error | null
onRetry?: () => void
}

const getAccumulatedContent = (messages: LlmGatewayMessage[]) => {
const getAccumulatedContent = (messages: AskAiEvent[]) => {
return messages
.filter((m) => m.type === 'ai_message_chunk')
.map((m) => m.data.content)
.filter((m) => m.type === 'chunk')
.map((m) => m.content)
.join('')
}

Expand Down Expand Up @@ -100,57 +101,86 @@ const getMessageState = (message: ChatMessageType) => ({
hasError: message.status === 'error',
})

// Helper functions for computing AI status
const getToolCallSearchQuery = (
messages: LlmGatewayMessage[]
): string | null => {
const toolCallMessage = messages.find((m) => m.type === 'tool_call')
if (!toolCallMessage) return null
// Status message constants
const STATUS_MESSAGES = {
THINKING: 'Thinking',
ANALYZING: 'Analyzing results',
GATHERING: 'Gathering resources',
GENERATING: 'Generating',
} as const

// Helper to extract search query from tool call arguments
const tryParseSearchQuery = (argsJson: string): string | null => {
try {
const toolCalls = toolCallMessage.data?.toolCalls
if (toolCalls && toolCalls.length > 0) {
const firstToolCall = toolCalls[0]
return firstToolCall.args?.searchQuery || null
}
} catch (e) {
console.error('Error extracting search query from tool call:', e)
const args = JSON.parse(argsJson)
return args.searchQuery || args.query || null
} catch {
return null
}

return null
}

const hasContentStarted = (messages: LlmGatewayMessage[]): boolean => {
return messages.some((m) => m.type === 'ai_message_chunk' && m.data.content)
}
// Helper to get tool call status message
const getToolCallStatus = (event: AskAiEvent): string => {
if (event.type !== EventTypes.TOOL_CALL) {
return STATUS_MESSAGES.THINKING
}

const hasReachedReferences = (messages: LlmGatewayMessage[]): boolean => {
const accumulatedContent = messages
.filter((m) => m.type === 'ai_message_chunk')
.map((m) => m.data.content)
.join('')
return accumulatedContent.includes('<!--REFERENCES')
const query = tryParseSearchQuery(event.arguments)
return query ? `Searching for "${query}"` : `Using ${event.toolName}`
}

// Helper function for computing AI status - time-based latest status
const computeAiStatus = (
llmMessages: LlmGatewayMessage[],
events: AskAiEvent[],
isComplete: boolean
): string | null => {
if (isComplete) return null

const searchQuery = getToolCallSearchQuery(llmMessages)
const contentStarted = hasContentStarted(llmMessages)
const reachedReferences = hasReachedReferences(llmMessages)
// Get events sorted by timestamp (most recent last)
const statusEvents = events
.filter(
(m) =>
m.type === EventTypes.REASONING ||
m.type === EventTypes.SEARCH_TOOL_CALL ||
m.type === EventTypes.TOOL_CALL ||
m.type === EventTypes.TOOL_RESULT ||
m.type === EventTypes.CHUNK
)
.sort((a, b) => a.timestamp - b.timestamp)

if (reachedReferences) {
return 'Gathering resources'
} else if (contentStarted) {
return 'Generating'
} else if (searchQuery) {
return `Searching for "${searchQuery}"`
}
// Get the most recent status-worthy event
const latestEvent = statusEvents[statusEvents.length - 1]

if (!latestEvent) return STATUS_MESSAGES.THINKING

switch (latestEvent.type) {
case EventTypes.REASONING:
return latestEvent.message || STATUS_MESSAGES.THINKING

return 'Thinking'
case EventTypes.SEARCH_TOOL_CALL:
return `Searching Elastic's Docs for "${latestEvent.searchQuery}"`

case EventTypes.TOOL_CALL:
return getToolCallStatus(latestEvent)

case EventTypes.TOOL_RESULT:
return STATUS_MESSAGES.ANALYZING

case EventTypes.CHUNK: {
const allContent = events
.filter((m) => m.type === EventTypes.CHUNK)
.map((m) => m.content)
.join('')

if (allContent.includes('<!--REFERENCES')) {
return STATUS_MESSAGES.GATHERING
}
return STATUS_MESSAGES.GENERATING
}

default:
return STATUS_MESSAGES.THINKING
}
}

// Action bar for complete AI messages
Expand Down Expand Up @@ -215,7 +245,7 @@ const ActionBar = ({

export const ChatMessage = ({
message,
llmMessages = [],
events = [],
streamingContent,
error,
onRetry,
Expand Down Expand Up @@ -251,9 +281,7 @@ export const ChatMessage = ({

const content =
streamingContent ||
(llmMessages.length > 0
? getAccumulatedContent(llmMessages)
: message.content)
(events.length > 0 ? getAccumulatedContent(events) : message.content)

const hasError = message.status === 'error' || !!error

Expand All @@ -279,11 +307,14 @@ export const ChatMessage = ({
return DOMPurify.sanitize(html)
}, [mainContent])

const aiStatus = useMemo(
() => computeAiStatus(llmMessages, isComplete),
[llmMessages, isComplete]
const rawAiStatus = useMemo(
() => computeAiStatus(events, isComplete),
[events, isComplete]
)

// Apply minimum display time to prevent status flickering
const aiStatus = useStatusMinDisplay(rawAiStatus)

const ref = React.useRef<HTMLDivElement>(null)

useEffect(() => {
Expand Down
Loading
Loading