diff --git a/.github/workflows/deploy-docs-staging.yaml b/.github/workflows/deploy-docs-staging.yaml index 27dc69c9..d9a3c30b 100644 --- a/.github/workflows/deploy-docs-staging.yaml +++ b/.github/workflows/deploy-docs-staging.yaml @@ -49,8 +49,8 @@ jobs: - name: Deploy to S3 run: | - aws s3 sync ./site s3://openobserve-website-staging/docs --exclude=".git/*" + aws s3 sync ./site s3://openobserve-website-staging/docs --exclude=".git/*" --delete - name: Invalidate CloudFront cache run: | - aws cloudfront create-invalidation --distribution-id E2GZJM0TJIDFRM --paths "/docs/*" \ No newline at end of file + aws cloudfront create-invalidation --distribution-id E2GZJM0TJIDFRM --paths "/docs/*" diff --git a/docs/assets/feedback.svg b/docs/assets/feedback.svg new file mode 100644 index 00000000..16b0d145 --- /dev/null +++ b/docs/assets/feedback.svg @@ -0,0 +1,3 @@ + + + diff --git a/docs/images/action-as-destination.png b/docs/images/action-as-destination.png index e489938a..b62ec696 100644 Binary files a/docs/images/action-as-destination.png and b/docs/images/action-as-destination.png differ diff --git a/docs/images/add-regex-pattern.png b/docs/images/add-regex-pattern.png new file mode 100644 index 00000000..48ed42ff Binary files /dev/null and b/docs/images/add-regex-pattern.png differ diff --git a/docs/images/alert-email-destination.png b/docs/images/alert-email-destination.png index 676035b8..99b9640b 100644 Binary files a/docs/images/alert-email-destination.png and b/docs/images/alert-email-destination.png differ diff --git a/docs/images/audit-trail.png b/docs/images/audit-trail.png new file mode 100644 index 00000000..6677a9de Binary files /dev/null and b/docs/images/audit-trail.png differ diff --git a/docs/images/cipher-keys-akeyless.png b/docs/images/cipher-keys-akeyless.png index 0687b6e0..d8b7cfe2 100644 Binary files a/docs/images/cipher-keys-akeyless.png and b/docs/images/cipher-keys-akeyless.png differ diff --git a/docs/images/cipher-keys-encryption-mechanism.png b/docs/images/cipher-keys-encryption-mechanism.png index d9563985..eaf208ea 100644 Binary files a/docs/images/cipher-keys-encryption-mechanism.png and b/docs/images/cipher-keys-encryption-mechanism.png differ diff --git a/docs/images/cipher-keys-o2local.png b/docs/images/cipher-keys-o2local.png index 29ecc7b8..2fcce25b 100644 Binary files a/docs/images/cipher-keys-o2local.png and b/docs/images/cipher-keys-o2local.png differ diff --git a/docs/images/cipher-keys-page.png b/docs/images/cipher-keys-page.png new file mode 100644 index 00000000..bd30e665 Binary files /dev/null and b/docs/images/cipher-keys-page.png differ diff --git a/docs/images/cipher-keys.png b/docs/images/cipher-keys.png index 21d81faf..2d8a5b7f 100644 Binary files a/docs/images/cipher-keys.png and b/docs/images/cipher-keys.png differ diff --git a/docs/images/config-custom-trace-id.png b/docs/images/config-custom-trace-id.png new file mode 100644 index 00000000..088a675b Binary files /dev/null and b/docs/images/config-custom-trace-id.png differ diff --git a/docs/images/config-traces-in-org-parameter.png b/docs/images/config-traces-in-org-parameter.png new file mode 100644 index 00000000..e12b08bf Binary files /dev/null and b/docs/images/config-traces-in-org-parameter.png differ diff --git a/docs/images/create-regex-pattern.png b/docs/images/create-regex-pattern.png index 36edfdbc..51bcf325 100644 Binary files a/docs/images/create-regex-pattern.png and b/docs/images/create-regex-pattern.png differ diff --git a/docs/images/current-cluster-query-result.png b/docs/images/current-cluster-query-result.png new file mode 100644 index 00000000..752c7e96 Binary files /dev/null and b/docs/images/current-cluster-query-result.png differ diff --git a/docs/images/current-cluster-query.png b/docs/images/current-cluster-query.png new file mode 100644 index 00000000..64137626 Binary files /dev/null and b/docs/images/current-cluster-query.png differ diff --git a/docs/images/detailed-span-view.png b/docs/images/detailed-span-view.png new file mode 100644 index 00000000..d8db9aa2 Binary files /dev/null and b/docs/images/detailed-span-view.png differ diff --git a/docs/images/drop-at-ingestion-time-result.png b/docs/images/drop-at-ingestion-time-result.png new file mode 100644 index 00000000..489d5111 Binary files /dev/null and b/docs/images/drop-at-ingestion-time-result.png differ diff --git a/docs/images/drop-at-ingestion-time-test-config.png b/docs/images/drop-at-ingestion-time-test-config.png new file mode 100644 index 00000000..4eab6e3c Binary files /dev/null and b/docs/images/drop-at-ingestion-time-test-config.png differ diff --git a/docs/images/drop-at-query-time-result.png b/docs/images/drop-at-query-time-result.png new file mode 100644 index 00000000..1cdeaebf Binary files /dev/null and b/docs/images/drop-at-query-time-result.png differ diff --git a/docs/images/drop-at-query-time-test-config.png b/docs/images/drop-at-query-time-test-config.png new file mode 100644 index 00000000..7793bdb9 Binary files /dev/null and b/docs/images/drop-at-query-time-test-config.png differ diff --git a/docs/images/email-regex.png b/docs/images/email-regex.png index 7a5056be..10803873 100644 Binary files a/docs/images/email-regex.png and b/docs/images/email-regex.png differ diff --git a/docs/images/error-badge-spans.png b/docs/images/error-badge-spans.png new file mode 100644 index 00000000..591c0021 Binary files /dev/null and b/docs/images/error-badge-spans.png differ diff --git a/docs/images/explore-logs-openobserve-plugin.png b/docs/images/explore-logs-openobserve-plugin.png new file mode 100644 index 00000000..139c0839 Binary files /dev/null and b/docs/images/explore-logs-openobserve-plugin.png differ diff --git a/docs/images/federated-search-multi-select.png b/docs/images/federated-search-multi-select.png new file mode 100644 index 00000000..13dd22a5 Binary files /dev/null and b/docs/images/federated-search-multi-select.png differ diff --git a/docs/images/federated-search-result.png b/docs/images/federated-search-result.png new file mode 100644 index 00000000..be0e0867 Binary files /dev/null and b/docs/images/federated-search-result.png differ diff --git a/docs/images/federated-search.png b/docs/images/federated-search.png new file mode 100644 index 00000000..0fd3bc4e Binary files /dev/null and b/docs/images/federated-search.png differ diff --git a/docs/images/full-name-regex.png b/docs/images/full-name-regex.png index 38628e94..ab4a0e49 100644 Binary files a/docs/images/full-name-regex.png and b/docs/images/full-name-regex.png differ diff --git a/docs/images/navigate-to-traces.png b/docs/images/navigate-to-traces.png new file mode 100644 index 00000000..b5a5abfd Binary files /dev/null and b/docs/images/navigate-to-traces.png differ diff --git a/docs/images/openobserve-data-source-config.png b/docs/images/openobserve-data-source-config.png new file mode 100644 index 00000000..19232389 Binary files /dev/null and b/docs/images/openobserve-data-source-config.png differ diff --git a/docs/images/openobserve-plugin.png b/docs/images/openobserve-plugin.png new file mode 100644 index 00000000..db7253f7 Binary files /dev/null and b/docs/images/openobserve-plugin.png differ diff --git a/docs/images/opentelemetry-collector-for-traces.png b/docs/images/opentelemetry-collector-for-traces.png new file mode 100644 index 00000000..5dd5ef07 Binary files /dev/null and b/docs/images/opentelemetry-collector-for-traces.png differ diff --git a/docs/images/redact-at-ingestion-time-result.png b/docs/images/redact-at-ingestion-time-result.png new file mode 100644 index 00000000..aecca830 Binary files /dev/null and b/docs/images/redact-at-ingestion-time-result.png differ diff --git a/docs/images/redact-at-ingestion-time-test-config.png b/docs/images/redact-at-ingestion-time-test-config.png new file mode 100644 index 00000000..23c0618d Binary files /dev/null and b/docs/images/redact-at-ingestion-time-test-config.png differ diff --git a/docs/images/redact-at-query-test-config.png b/docs/images/redact-at-query-test-config.png new file mode 100644 index 00000000..05bd3d07 Binary files /dev/null and b/docs/images/redact-at-query-test-config.png differ diff --git a/docs/images/redact-at-query-time-result.png b/docs/images/redact-at-query-time-result.png new file mode 100644 index 00000000..e1d6b6e2 Binary files /dev/null and b/docs/images/redact-at-query-time-result.png differ diff --git a/docs/images/redact-or-drop-during-regex-pattern-execution.png b/docs/images/redact-or-drop-during-regex-pattern-execution.png new file mode 100644 index 00000000..739200ff Binary files /dev/null and b/docs/images/redact-or-drop-during-regex-pattern-execution.png differ diff --git a/docs/images/regex-pattern-execution-time.png b/docs/images/regex-pattern-execution-time.png new file mode 100644 index 00000000..3b3846bd Binary files /dev/null and b/docs/images/regex-pattern-execution-time.png differ diff --git a/docs/images/regex-patterns-drop.png b/docs/images/regex-patterns-drop.png new file mode 100644 index 00000000..9cf00284 Binary files /dev/null and b/docs/images/regex-patterns-drop.png differ diff --git a/docs/images/regex-patterns-redact.png b/docs/images/regex-patterns-redact.png new file mode 100644 index 00000000..729ae702 Binary files /dev/null and b/docs/images/regex-patterns-redact.png differ diff --git a/docs/images/sensitive-data-redaction.png b/docs/images/sensitive-data-redaction.png new file mode 100644 index 00000000..5f557840 Binary files /dev/null and b/docs/images/sensitive-data-redaction.png differ diff --git a/docs/images/service-map.png b/docs/images/service-map.png new file mode 100644 index 00000000..79685fcb Binary files /dev/null and b/docs/images/service-map.png differ diff --git a/docs/images/span-details.png b/docs/images/span-details.png new file mode 100644 index 00000000..5553b046 Binary files /dev/null and b/docs/images/span-details.png differ diff --git a/docs/images/span-view.png b/docs/images/span-view.png new file mode 100644 index 00000000..908e1000 Binary files /dev/null and b/docs/images/span-view.png differ diff --git a/docs/images/stream-settings-add-regex.png b/docs/images/stream-settings-add-regex.png index 25afda4b..d388a78b 100644 Binary files a/docs/images/stream-settings-add-regex.png and b/docs/images/stream-settings-add-regex.png differ diff --git a/docs/images/stream-settings-sensitive-fields.png b/docs/images/stream-settings-sensitive-fields.png new file mode 100644 index 00000000..966d02ed Binary files /dev/null and b/docs/images/stream-settings-sensitive-fields.png differ diff --git a/docs/images/timeline.png b/docs/images/timeline.png new file mode 100644 index 00000000..78a8c44b Binary files /dev/null and b/docs/images/timeline.png differ diff --git a/docs/images/trace-list.png b/docs/images/trace-list.png new file mode 100644 index 00000000..7e45d423 Binary files /dev/null and b/docs/images/trace-list.png differ diff --git a/docs/images/trace-timeline.png b/docs/images/trace-timeline.png new file mode 100644 index 00000000..c10f9a0c Binary files /dev/null and b/docs/images/trace-timeline.png differ diff --git a/docs/images/traces-to-logs.png b/docs/images/traces-to-logs.png new file mode 100644 index 00000000..49d9c838 Binary files /dev/null and b/docs/images/traces-to-logs.png differ diff --git a/docs/images/traces.png b/docs/images/traces.png new file mode 100644 index 00000000..9d99fd80 Binary files /dev/null and b/docs/images/traces.png differ diff --git a/docs/images/update-regex-patterns.png b/docs/images/update-regex-patterns.png new file mode 100644 index 00000000..e51ce6ba Binary files /dev/null and b/docs/images/update-regex-patterns.png differ diff --git a/docs/images/view-trace-from-logs.png b/docs/images/view-trace-from-logs.png new file mode 100644 index 00000000..390502ac Binary files /dev/null and b/docs/images/view-trace-from-logs.png differ diff --git a/docs/images/warning-spans.png b/docs/images/warning-spans.png new file mode 100644 index 00000000..77c6f73c Binary files /dev/null and b/docs/images/warning-spans.png differ diff --git a/docs/images/webhook-template-for-alert.png b/docs/images/webhook-template-for-alert.png new file mode 100644 index 00000000..bbb24af1 Binary files /dev/null and b/docs/images/webhook-template-for-alert.png differ diff --git a/docs/images/webook-as-alert-destination.png b/docs/images/webook-as-alert-destination.png index ad9b2c4c..bdd76976 100644 Binary files a/docs/images/webook-as-alert-destination.png and b/docs/images/webook-as-alert-destination.png differ diff --git a/docs/js/feedback.js b/docs/js/feedback.js new file mode 100644 index 00000000..97e86b00 --- /dev/null +++ b/docs/js/feedback.js @@ -0,0 +1,226 @@ +// feedback.js + +function feedbackModalInit() { + // Remove any previous event listeners by replacing the button and modal with clones + const oldBtn = document.querySelector("#feedbackButton"); + const oldModal = document.querySelector("#feedbackModal"); + if (oldBtn) { + const newBtn = oldBtn.cloneNode(true); + oldBtn.parentNode.replaceChild(newBtn, oldBtn); + } + if (oldModal) { + const newModal = oldModal.cloneNode(true); + oldModal.parentNode.replaceChild(newModal, oldModal); + } + + // Now re-select after replacement + const feedbackButton = document.querySelector("#feedbackButton"); + const modal = document.querySelector("#feedbackModal"); + + if (!feedbackButton || !modal) { + return; + } + + const form = modal.querySelector("form"); + const successView = modal.querySelector(".success-view"); + const formView = modal.querySelector(".form-view"); + const errorView = modal.querySelector(".error-view"); + const tabs = modal.querySelectorAll(".feedback-tab"); + let lastActiveElement = null; + + // Ensure the form exists before touching it + if (!form) { + return; + } + + // ensure there's an input[name=type] for the form (hidden) so the submit code can read it + let typeInput = form.querySelector("input[name=type]"); + if (!typeInput) { + typeInput = document.createElement("input"); + typeInput.type = "hidden"; + typeInput.name = "type"; + typeInput.value = "Issue"; + form.appendChild(typeInput); + } + + function openModal() { + lastActiveElement = document.activeElement; + modal.classList.remove("tw-hidden"); + calculatePosition(); + const ta = modal.querySelector("textarea"); + if (ta) ta.focus(); + } + + function closeModal() { + modal.classList.add("tw-hidden"); + form.reset(); + errorView.classList.add("tw-hidden"); + successView.classList.add("tw-hidden"); + successView.classList.remove("tw-flex"); + try { + modal.style.top = ""; + modal.style.bottom = ""; + } catch (e) {} + formView.classList.remove("tw-hidden"); + try { + if (lastActiveElement && typeof lastActiveElement.focus === "function") { + lastActiveElement.focus(); + } + } catch (e) {} + } + + function calculatePosition() { + try { + const btnRect = feedbackButton.getBoundingClientRect(); + const screenHeight = window.innerHeight; + const buttonCenter = btnRect.top + btnRect.height / 2; + const placeAbove = buttonCenter > screenHeight / 2; + modal.classList.remove( + "tw-top-full", + "tw-bottom-full", + "tw-mt-4", + "tw-mb-4" + ); + if (placeAbove) { + modal.classList.add("tw-bottom-full", "tw-mb-4"); + modal.style.bottom = "100%"; + modal.style.top = ""; + } else { + modal.classList.add("tw-top-full", "tw-mt-4"); + modal.style.top = "100%"; + modal.style.bottom = ""; + } + if (!modal.classList.contains("tw-right-0")) + modal.classList.add("tw-right-0"); + } catch (err) {} + } + + if (tabs && tabs.length) { + const setActiveTab = (index) => { + tabs.forEach((tb, i) => { + const selected = i === index; + tb.classList.toggle("tw-bg-white", selected); + tb.classList.toggle("tw-text-gray-900", selected); + tb.classList.toggle("tw-shadow-sm", selected); + tb.setAttribute("aria-selected", selected ? "true" : "false"); + if (selected) { + const type = tb.getAttribute("data-type") || tb.textContent.trim(); + typeInput.value = type; + const ta = modal.querySelector("textarea"); + if (ta) ta.placeholder = `Type your ${type.toLowerCase()} here...`; + } + }); + }; + + tabs.forEach((t, idx) => { + t.addEventListener("click", () => { + setActiveTab(idx); + t.focus(); + }); + + t.addEventListener("keydown", (ev) => { + const key = ev.key; + let newIndex = null; + if (key === "ArrowRight") newIndex = (idx + 1) % tabs.length; + else if (key === "ArrowLeft") + newIndex = (idx - 1 + tabs.length) % tabs.length; + else if (key === "Home") newIndex = 0; + else if (key === "End") newIndex = tabs.length - 1; + + if (newIndex !== null) { + ev.preventDefault(); + setActiveTab(newIndex); + tabs[newIndex].focus(); + } + }); + }); + + setActiveTab(0); + } + + feedbackButton.addEventListener("click", () => { + try { + if (modal.classList.contains("tw-hidden")) { + openModal(); + } else { + closeModal(); + } + } catch (err) {} + }); + + document.addEventListener("keydown", (e) => { + if (e.key === "Escape") closeModal(); + }); + + document.addEventListener("mousedown", (e) => { + if (!modal.contains(e.target) && !feedbackButton.contains(e.target)) { + closeModal(); + } + }); + + window.addEventListener("resize", calculatePosition); + + form.addEventListener("submit", (e) => { + e.preventDefault(); + if (typeof form.reportValidity === "function") { + const ok = form.reportValidity(); + if (!ok) { + return; + } + } + try { + errorView.classList.add("tw-hidden"); + } catch (err) {} + const ta = + form.querySelector("textarea") || modal.querySelector("textarea"); + const message = (ta && ta.value && ta.value.trim()) || ""; + const data = { + type: (typeInput && typeInput.value) || "Issue", + message: message, + currentUrl: window.location.href, + userAgent: navigator.userAgent, + source: "feedback_form", + }; + if (typeof window.trackFeedback === "function") { + try { + window.trackFeedback(data); + } catch (e) {} + } + formView.classList.add("tw-hidden"); + successView.classList.add("tw-flex"); + successView.classList.remove("tw-hidden"); + setTimeout(closeModal, 1500); + fetch( + "https://script.google.com/macros/s/AKfycby5A7NSQCmG4KIBdM0HkRP-5zpRPy8aTrQHiQoe9uG_c_rv1VCiAnnZE8co7-kofgw-hg/exec", + { + method: "POST", + mode: "no-cors", + body: JSON.stringify(data), + headers: { "Content-Type": "application/json" }, + } + ).catch(() => { + try { + successView.classList.add("tw-hidden"); + successView.classList.remove("tw-flex"); + formView.classList.remove("tw-hidden"); + if (errorView) { + errorView.textContent = + "Failed to submit feedback. Please try again."; + errorView.classList.remove("tw-hidden"); + } + if (ta && typeof ta.focus === "function") ta.focus(); + } catch (err) {} + }); + }); +} + +// Run on DOMContentLoaded and MkDocs instant navigation +if (typeof window.document$ !== "undefined") { + window.document$.subscribe(() => { + setTimeout(feedbackModalInit, 0); + }); +} +// Always run on DOMContentLoaded (for initial load) +document.addEventListener("DOMContentLoaded", () => { + setTimeout(feedbackModalInit, 0); +}); diff --git a/docs/js/search-tracking.js b/docs/js/search-tracking.js index e29bdd1f..f2b6a78c 100644 --- a/docs/js/search-tracking.js +++ b/docs/js/search-tracking.js @@ -101,9 +101,7 @@ // pageType, // docs / blog / marketing (uncomment if needed) ...payload, }), - }); }); - } catch (error) { console.warn("Analytics API error:", error); } diff --git a/docs/js/segment.js b/docs/js/segment.js new file mode 100644 index 00000000..46a671d0 --- /dev/null +++ b/docs/js/segment.js @@ -0,0 +1,39 @@ +function getAnonymousId() { + let anonId = localStorage.getItem("segment_anonymous_id"); + if (!anonId) { + anonId = + "anon_" + Math.random().toString(36).substr(2, 9) + "_" + Date.now(); + localStorage.setItem("segment_anonymous_id", anonId); + } + return anonId; +} + +function getSegmentProxyUrl() { + // Set your Segment proxy URL here or from a global variable + return "https://swisspipe.dev.zinclabs.dev/api/v1/4e5cac41-4d34-46f9-b862-e7ac551b5a8f/trigger"; // e.g., set in your template +} + +function trackFeedback(feedbackData) { + const proxyUrl = getSegmentProxyUrl(); + if (!proxyUrl) { + return; + } + const message = { + user: { anonymousId: getAnonymousId() }, + event: "O2 Website Docs Feedback", + properties: feedbackData, + timestamp: new Date().toISOString(), + type: "track", + }; + fetch(proxyUrl, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(message), + }) + .then((res) => { + return res.text().then((text) => {}); + }) + .catch((e) => {}); +} + +window.trackFeedback = trackFeedback; diff --git a/docs/js/theme.js b/docs/js/theme.js new file mode 100644 index 00000000..06641bd4 --- /dev/null +++ b/docs/js/theme.js @@ -0,0 +1,106 @@ +// Simple theme persistence - leverages MkDocs Material's built-in theme system +(function () { + // Apply saved theme immediately to prevent flash + function applySavedTheme() { + const savedScheme = localStorage.getItem("theme-preference"); + if (savedScheme) { + document.documentElement.setAttribute( + "data-md-color-scheme", + savedScheme + ); + // Set the correct radio button as checked + var paletteForm = document.querySelector( + 'form[data-md-component="palette"]' + ); + if (paletteForm) { + var inputs = paletteForm.querySelectorAll('input[name="__palette"]'); + inputs.forEach(function (input) { + if (input.getAttribute("data-md-color-scheme") === savedScheme) { + input.checked = true; + } else { + input.checked = false; + } + }); + } + } + } + + // Save theme preference when changed + function attachPaletteListeners() { + var paletteForm = document.querySelector( + 'form[data-md-component="palette"]' + ); + if (!paletteForm) return false; + // avoid attaching twice to the same form + if (paletteForm.getAttribute("data-theme-listeners") === "1") return true; + var inputs = paletteForm.querySelectorAll('input[name="__palette"]'); + inputs.forEach(function (input) { + input.addEventListener("change", function () { + if (this.checked) { + var scheme = this.getAttribute("data-md-color-scheme"); + document.documentElement.setAttribute("data-md-color-scheme", scheme); + localStorage.setItem("theme-preference", scheme); + } + }); + }); + paletteForm.setAttribute("data-theme-listeners", "1"); + return true; + } + + // Observe changes to the documentElement attribute for scheme changes + function observeSchemeAttribute() { + const observer = new MutationObserver(function (mutations) { + mutations.forEach(function (mutation) { + if (mutation.attributeName === "data-md-color-scheme") { + const scheme = document.documentElement.getAttribute( + "data-md-color-scheme" + ); + if (scheme) { + localStorage.setItem("theme-preference", scheme); + } + } + }); + }); + observer.observe(document.documentElement, { + attributes: true, + attributeFilter: ["data-md-color-scheme"], + }); + } + + // Watch the body for insertions/replacements of the palette form and re-attach listeners + function observeBodyForPalette() { + // Try attach immediately in case it's already present + attachPaletteListeners(); + + const bodyObserver = new MutationObserver(function (mutations) { + // If nodes are added/removed we attempt to (re)attach listeners + for (var i = 0; i < mutations.length; i++) { + var mutation = mutations[i]; + if ( + mutation.type === "childList" && + (mutation.addedNodes.length || mutation.removedNodes.length) + ) { + // small debounce: try attach; attachPaletteListeners is idempotent + attachPaletteListeners(); + } + } + }); + bodyObserver.observe(document.body || document.documentElement, { + childList: true, + subtree: true, + }); + } + + // Setup theme persistence (apply + observe) + function setupThemePersistence() { + applySavedTheme(); + observeSchemeAttribute(); + observeBodyForPalette(); + } + + // Initial setup + setupThemePersistence(); + + // Re-apply on every DOMContentLoaded (instant navigation) + document.addEventListener("DOMContentLoaded", setupThemePersistence); +})(); diff --git a/docs/js/zinc.js b/docs/js/zinc.js index fc140536..68485666 100644 --- a/docs/js/zinc.js +++ b/docs/js/zinc.js @@ -150,70 +150,3 @@ handleTracking(); })(); -// Simple theme persistence - leverages MkDocs Material's built-in theme system -(function() { - // Apply saved theme immediately to prevent flash - function applySavedTheme() { - const savedScheme = localStorage.getItem('theme-preference'); - if (savedScheme) { - document.documentElement.setAttribute('data-md-color-scheme', savedScheme); - // Set the correct radio button as checked - var paletteForm = document.querySelector('form[data-md-component="palette"]'); - if (paletteForm) { - var inputs = paletteForm.querySelectorAll('input[name="__palette"]'); - inputs.forEach(function(input) { - if (input.getAttribute('data-md-color-scheme') === savedScheme) { - input.checked = true; - } else { - input.checked = false; - } - }); - } - } - } - - // Save theme preference when changed - function observeThemeChanges() { - // Attach change event to theme radio buttons - var paletteForm = document.querySelector('form[data-md-component="palette"]'); - if (paletteForm) { - var inputs = paletteForm.querySelectorAll('input[name="__palette"]'); - inputs.forEach(function(input) { - input.addEventListener('change', function() { - if (this.checked) { - var scheme = this.getAttribute('data-md-color-scheme'); - document.documentElement.setAttribute('data-md-color-scheme', scheme); - localStorage.setItem('theme-preference', scheme); - } - }); - }); - } - // Also keep the MutationObserver for manual changes - const observer = new MutationObserver(function(mutations) { - mutations.forEach(function(mutation) { - if (mutation.attributeName === 'data-md-color-scheme') { - const scheme = document.documentElement.getAttribute('data-md-color-scheme'); - if (scheme) { - localStorage.setItem('theme-preference', scheme); - } - } - }); - }); - observer.observe(document.documentElement, { - attributes: true, - attributeFilter: ['data-md-color-scheme'] - }); - } - - // Setup theme persistence (apply + observe) - function setupThemePersistence() { - applySavedTheme(); - observeThemeChanges(); - } - - // Initial setup - setupThemePersistence(); - - // Re-apply on every DOMContentLoaded (instant navigation) - document.addEventListener('DOMContentLoaded', setupThemePersistence); -})(); diff --git a/docs/operator-guide/.pages b/docs/operator-guide/.pages index f45b4a6a..e3b58f20 100644 --- a/docs/operator-guide/.pages +++ b/docs/operator-guide/.pages @@ -3,7 +3,7 @@ nav: - Systemd: systemd.md - SIMD: simd.md - Mimalloc: mimalloc.md - - Grafana plugin: grafana_plugin.md + - Grafana plugin: openobserve-plugin-for-grafana.md - Etcd maintenance: etcd.md - Etcd restore: etcd_restore.md - Nginx proxy: nginx_proxy.md diff --git a/docs/operator-guide/grafana_plugin.md b/docs/operator-guide/grafana_plugin.md deleted file mode 100644 index 80a01083..00000000 --- a/docs/operator-guide/grafana_plugin.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: OpenObserve Grafana Plugin -weight: 4450 -description: >- - Use the OpenObserve Grafana plugin to visualize logs and metrics if you're - already using Grafana. Includes setup steps for Kubernetes and non-K8s - installs. ---- -# OpenObserve Grafana Plugin - -## What is Grafana? - -Grafana is a popular open-source dashboarding and visualization platform. Originally designed for time series data, it has evolved into a comprehensive tool that can pull data from multiple sources and create unified dashboards for logs, metrics, and traces. It's widely used for monitoring and observability across organizations. - -## Do You Need Grafana with OpenObserve? - -**Short Answer: No, but you might want it.** - -OpenObserve comes with a powerful built-in GUI that handles all your visualization needs, including: - -- [Logs analysis and search](../features/logs.md) -- [Metrics monitoring](../features/metrics.md) -- [Distributed tracing](../features/distributed-tracing.md) -- [Frontend monitoring](../features/frontend.md) -- [Interactive dashboards](../user-guide/dashboards/dashboards-in-openobserve.md) -- [Alerting and notifications](../user-guide/alerts/alerts.md) - -**When to Use the Grafana Plugin:** - -You should consider using OpenObserve's Grafana plugin if you: - -1. **Already use Grafana** for other monitoring needs (e.g., Prometheus metrics) -2. **Have existing Grafana dashboards** you want to keep -3. **Need to consolidate** OpenObserve data with other data sources in a single Grafana instance - -!!! warning "Plugin Maintenance Status" - This Grafana plugin is not actively maintained. It may work with current Grafana and OpenObserve versions, but compatibility isn’t guaranteed. Test thoroughly before production use. For best results, use OpenObserve’s built-in visualizations. - -## Getting Started - -The following guide will walk you through installing and configuring the plugin in a Kubernetes environment. The steps can be adapted for non-Kubernetes deployments. - -**Quick Start:** If you are already familiar with grafana plugin installation, you can download the plugin from [here](https://zincsearch-releases.s3.us-west-2.amazonaws.com/zo_gp/zo_gp.tar.gz) and get started. Feel free to skip the configuration section, you can directly jump [here](#using-grafana-plugin) - -## Install Grafana - -Grafana requires a persistent store to store its data and configuration. While configuration can be stored in a configmap or secret, data needs to be stored in a database. Grafana supports sqlite, mysql and postgres. Most installations I have seen in the wild use a single node grafana installation using sqlite. I have also seen that many of these use a kubernetes `deployment` . - -If you are using a single node grafana installation using sqlite then you should use `statefulset` instead of `deployment` so you do not lose your data when the pod restarts. If you are using mysql/postgres then you can use `deployment` as the data is stored in the database. - -You would also need a `grafana.ini` config file to configure grafana. You can use the below minimalistic working grafana.ini file to start. You can add more configuration as needed. - - -### Configuration - -```ini title="grafana.ini" linenums="1" hl_lines="4 9" -[date_formats] -default_timezone = UTC -[server] -root_url = https://grafana.yourdomain.com - -[plugins] -enable_alpha = true -app_tls_skip_verify_insecure = false -allow_loading_unsigned_plugins = zinclabs_openobserve -``` - -`Line 4` should be updated with the root url of your grafana installation. This is the url that you will use to access grafana. e.g. `https://grafana.yourdomain.com` - -`Line 9` is the one that is important where we specify that grafana should use the unsigned plugin `zinclabs_openobserve`. This is the plugin that we will install using the init container in the statefulset. - -Once you have created the file, you can create a kubernetes secret using the below command. - - -```bash linenums="1" -kubectl create secret generic grafana-config --from-file=grafana.ini -``` - - -### Deployment - -Now let's install grafana. - -```yaml title="grafana_statefulset.yaml" linenums="1" -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: grafana -spec: - serviceName: "grafana" - replicas: 1 - selector: - matchLabels: - app: grafana - template: - metadata: - labels: - app: grafana - spec: - securityContext: - fsGroup: 2000 - runAsUser: 10000 - runAsGroup: 3000 - initContainers: - - name: openobserve-plugin-loader - image: wbitt/network-multitool - imagePullPolicy: IfNotPresent - command: - [ - "sh", - "-c", - "curl -o /var/lib/grafana/plugins/zo_gp.tar.gz https://zincsearch-releases.s3.us-west-2.amazonaws.com/zo_gp/zo_gp.tar.gz && cd /var/lib/grafana/plugins && tar -zxvf zo_gp.tar.gz", - ] - volumeMounts: - - name: grafana-base - mountPath: /var/lib/grafana - - name: grafana-plugins - mountPath: /var/lib/grafana/plugins - containers: - - name: grafana - image: grafana/grafana:latest - ports: - - containerPort: 3000 - name: grafana - volumeMounts: - - name: grafana-base - mountPath: /var/lib/grafana - - name: grafana-plugins - mountPath: /var/lib/grafana/plugins - - name: grafana-config - mountPath: /etc/grafana - volumes: - - name: grafana-base - persistentVolumeClaim: - claimName: grafana-base - - name: grafana-plugins - persistentVolumeClaim: - claimName: grafana-plugins - - name: grafana-config - secret: - defaultMode: 420 - secretName: grafana-config - volumeClaimTemplates: - - metadata: - name: grafana-base - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi - - metadata: - name: grafana-plugins - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi ---- -apiVersion: v1 -kind: Service -metadata: - name: grafana -spec: - ports: - - port: 3000 - targetPort: 3000 - selector: - app: grafana - -``` - -We are using an `init container` in this case to download and configure openobserve plugin for the grafana container. - - -Now let's install the statefulset using the below command. - -```bash linenums="1" -kubectl apply -f grafana_statefulset.yaml -``` - -## Using Grafana plugin - - -### Set up data source - -Once grafana starts you could go to the data sources section and search for `OpenObserve` - - - -Next let's add OpenObserve data source server details. - -You need to do following: - -1. Update URL -1. Enable Basic Auth -1. Provide user id and password for basic auth details. - -Once you have updated the above, click on `Save and Test` button. If everything is correct you should see a success message. - - - - -### Explore logs - -Now let's explore some logs. Click on Explore menu item on the left and select OpenObserve as the data source. - - - - - -Select appropriate: -1. organization -1. stream -1. time range - -and click on `Run Query` button. You should see the logs from the stream. - - - -You should now be able to see the results. - - - -If you want to explore metrics from OpenObserve in Grafana, you can set up OpenObserve as a Prometheus-compatible data source using an endpoint like https://api.openobserve.ai/api/org_name/prometheus. You do not need the plugin for this, as Grafana supports Prometheus natively. - - - diff --git a/docs/operator-guide/index.md b/docs/operator-guide/index.md index a73e4d2a..444a386f 100644 --- a/docs/operator-guide/index.md +++ b/docs/operator-guide/index.md @@ -3,7 +3,7 @@ Learn more: - [Systemd](systemd) - [SIMD](simd) - [Mimalloc](mimalloc) -- [Grafana plugin](grafana_plugin) +- [Grafana plugin](openobserve-plugin-for-grafana) - [Etcd maintenance](etcd) - [Etcd restore](etcd_restore) - [Nginx proxy](nginx_proxy) diff --git a/docs/operator-guide/openobserve-plugin-for-grafana.md b/docs/operator-guide/openobserve-plugin-for-grafana.md new file mode 100644 index 00000000..4fcc512e --- /dev/null +++ b/docs/operator-guide/openobserve-plugin-for-grafana.md @@ -0,0 +1,211 @@ +--- +title: OpenObserve Plugin for Grafana +weight: 4450 +description: >- + Use the OpenObserve plugin in Grafana to visualize logs and metrics if you are + already using Grafana. Includes setup steps for Kubernetes and non-K8s + installs. +--- +# OpenObserve Plugin for Grafana +This guide walks you through installing and configuring the openobserve plugin in Grafana within a Kubernetes environment. If you are not using Kubernetes, you can adapt these steps for your environment. + +## Overview +Grafana is an open-source platform for creating dashboards and visualizations. +OpenObserve provides its own interface for [logs](../features/logs.md), [metrics](../features/metrics.md), [tracing](../features/distributed-tracing.md), [frontend monitoring](../features/frontend.md), [dashboards](../user-guide/dashboards/dashboards-in-openobserve.md), and [alerting and notifications](../user-guide/alerts/alerts.md). + +The OpenObserve plugin for Grafana is an optional integration. It enables OpenObserve to be added as a Grafana data source, making it possible to use existing Grafana dashboards or combine OpenObserve data with other monitoring systems. + +!!! note "Plugin maintenance status" + OpenObserve actively maintains this Grafana plugin. + +!!! note "Quick start" + If you are familiar with the Grafana plugin installation process, proceed to download the plugin from [here](https://zincsearch-releases.s3.us-west-2.amazonaws.com/zo_gp/zo_gp.tar.gz) and move to [this](#use-grafana-plugin) step. + + +## Install the OpenObserve plugin in Grafana + +??? "Prerequisite" + **Storage Requirements**
+ Grafana requires persistent storage for two things: configuration and data. + + - Configuration can be stored in a ConfigMap or Secret. + - Data must be stored in a database. Grafana supports SQLite, MySQL, and PostgreSQL. + Most Grafana installations run on a single node with SQLite. + + + **Deployment method**
+ In Kubernetes, you deploy applications using either a Deployment or StatefulSet. Since Grafana needs persistent storage for its data, your choice depends on which database you use: + + - **SQLite** stores data files inside the pod itself. Use a `StatefulSet` to preserve this data when the pod restarts. + - **MySQL or PostgreSQL** store data in an external database. Use a `Deployment` since the data persists outside the pod. + + !!! note "Note" + This guide uses a StatefulSet with SQLite. + +??? "Step 1: Create the configuration file" + Create a `grafana.ini` file with the following configurations. + + ```ini title="grafana.ini" linenums="1" hl_lines="4 9" + [date_formats] + default_timezone = UTC + [server] + root_url = https://grafana.yourdomain.com + + [plugins] + enable_alpha = true + app_tls_skip_verify_insecure = false + allow_loading_unsigned_plugins = openobserve + ``` + + **Important**: + + - In `Line 4`, you must update the `root_url` with the root URL of your Grafana installation. This is the URL that you will use to access grafana. For example, `https://grafana.yourdomain.com`. + - In `Line 9`, you must specify that Grafana should use the unsigned plugin `openobserve`.You will install this plugin using the `init container` in the `statefulset`. + + +??? "Step 2: Create a Kubernetes secret" + Once you have created the file, you can create a Kubernetes secret using the below command. + + ```bash linenums="1" + kubectl create secret generic grafana-config --from-file=grafana.ini + ``` + This secret is mounted into the Grafana container in the following StatefulSet YAML. + +??? "Step 3: Create `grafana_statefulset.yaml`" + This file includes an init container that downloads and configures the `openobserve` plugin for the Grafana container. + + ```yaml title="grafana_statefulset.yaml" linenums="1" + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: grafana + spec: + serviceName: "grafana" + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + securityContext: + fsGroup: 2000 + runAsUser: 10000 + runAsGroup: 3000 + initContainers: + - name: openobserve-plugin-loader + image: wbitt/network-multitool + imagePullPolicy: IfNotPresent + command: + [ + "sh", + "-c", + "curl -o /var/lib/grafana/plugins/zo_gp.tar.gz https://zincsearch-releases.s3.us-west-2.amazonaws.com/zo_gp/zo_gp.tar.gz && cd /var/lib/grafana/plugins && tar -zxvf zo_gp.tar.gz", + ] + volumeMounts: + - name: grafana-base + mountPath: /var/lib/grafana + - name: grafana-plugins + mountPath: /var/lib/grafana/plugins + containers: + - name: grafana + image: grafana/grafana:latest + ports: + - containerPort: 3000 + name: grafana + volumeMounts: + - name: grafana-base + mountPath: /var/lib/grafana + - name: grafana-plugins + mountPath: /var/lib/grafana/plugins + - name: grafana-config + mountPath: /etc/grafana + volumes: + - name: grafana-base + persistentVolumeClaim: + claimName: grafana-base + - name: grafana-plugins + persistentVolumeClaim: + claimName: grafana-plugins + - name: grafana-config + secret: + defaultMode: 420 + secretName: grafana-config + volumeClaimTemplates: + - metadata: + name: grafana-base + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + - metadata: + name: grafana-plugins + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + --- + apiVersion: v1 + kind: Service + metadata: + name: grafana + spec: + ports: + - port: 3000 + targetPort: 3000 + selector: + app: grafana + + ``` +??? "Step 4: Deploy the `openobserve` plugin" + Run the following `kubectl apply` command to deploy the plugin: + ```bash linenums="1" + kubectl apply -f grafana_statefulset.yaml + ``` + +## Use the OpenObserve plugin in Grafana + +??? "Step 1: Set up data source" + From the Grafana interface, go to the **Data Sources** section and search for `openobserve`. + ![openobserve-plugin](../images/openobserve-plugin.png) + +??? "Step 2: Add OpenObserve data source server details" + 1. In the data source configuration screen, update the following fields: + + - In the **HTTP** section, add the **URL**. + - In the **Auth** section, enable **Basic Auth** toggle. + - Under **Basic Auth Details**, add the User ID and password. + 2. Click **Save and Test** to save the changes.
+ ![openobserve-data-source-config](../images/openobserve-data-source-config.png) + If everything is correct you should see a success message. + +??? "Step 3: Explore logs" + 1. Click the **Explore** menu and select `openobserve` as the data source. + 2. Select appropriate organization, stream, and time range. + 3. Click **Run Query**.
+ You should now be able to see the results. + ![alt text](../images/explore-logs-openobserve-plugin.png) + + +!!! note "Note" + If you want to explore metrics from OpenObserve in Grafana, you can set up OpenObserve as a Prometheus-compatible data source using an endpoint like [https://api.openobserve.ai/api/org_name/prometheus](https://api.openobserve.ai/api/org_name/prometheus). You do not need the plugin for this, as Grafana supports Prometheus natively. + + + + + + + + + + + + + + + + diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 8d9e5924..5575b368 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -2,18 +2,6 @@ .md-search__close { display: none !important; } -/* === FINAL SECONDARY SIDEBAR POSITIONING OVERRIDE === */ -.md-sidebar--secondary, -.md-sidebar--secondary[data-md-component="sidebar"], -.md-sidebar--secondary[data-md-state], -.md-sidebar--secondary[data-md-state="lock"] { - position: sticky !important; - top: 8rem !important; - transform: none !important; - transition: none !important; - will-change: auto !important; - z-index: 200 !important; -} /* STRONG FINAL MOBILE SCROLL FIX (highest priority): Lock the sidebar inner to the viewport and force the scrollwrap to handle @@ -786,6 +774,64 @@ html .md-sidebar--primary { display: none !important; } +/* ------------------------------------------------------------------ */ +/* Mirror primary sidebar height/scroll behavior for secondary only */ +/* Only height/scroll/positioning properties are applied to avoid */ +/* changing colors, spacing, or layout in other views. */ +/* ------------------------------------------------------------------ */ +.md-sidebar--secondary, +.md-sidebar--secondary[data-md-component="sidebar"], +.md-sidebar--secondary[data-md-state], +.md-sidebar--secondary[data-md-state="lock"] { + /* Match primary's sticky positioning and footer clearance */ + position: sticky !important; + top: 8rem !important; + transform: none !important; + transition: none !important; + will-change: auto !important; + z-index: 200 !important; + + /* Content-based height with same footer clearance as primary */ + height: auto !important; + min-height: auto !important; + max-height: calc(100vh - 9rem) !important; + overflow: hidden !important; + display: block !important; +} + +/* Ensure inner container is content-based like primary */ +.md-sidebar--secondary .md-sidebar__inner { + height: auto !important; + min-height: auto !important; + display: block !important; + overflow: hidden !important; +} + +/* Provide a scrollwrap that matches primary's behavior and clearance */ +.md-sidebar--secondary .md-sidebar__scrollwrap { + overflow-y: auto !important; + padding: 0.25rem 0 1rem !important; + overscroll-behavior: contain !important; + width: 100% !important; + padding-right: 0 !important; + margin-right: 0 !important; + box-sizing: border-box !important; + /* Allow content-based height but limit to viewport minus footer/header */ + height: auto !important; + max-height: calc(100vh - 12rem) !important; +} + +/* Remove potential masking pseudo-elements that could hide final items */ +.md-sidebar--secondary .md-sidebar__scrollwrap:after { + display: none !important; +} +.md-sidebar--secondary:before, +.md-sidebar--secondary:after, +.md-sidebar--secondary .md-sidebar__inner:before, +.md-sidebar--secondary .md-sidebar__inner:after { + display: none !important; +} + .md-header { position: fixed !important; top: 70px !important; @@ -1457,7 +1503,7 @@ nav.md-grid { width: 100%; margin-left: auto; margin-right: auto; - padding-left: 1rem; /* px-4 */ + padding-left: 1rem; /* px-4 */ padding-right: 1rem; display: flex; align-items: center; @@ -1467,7 +1513,7 @@ nav.md-grid { @media (min-width: 640px) { .md-main__inner.md-grid { max-width: 640px; - padding-left: 1.5rem; /* sm:px-6 */ + padding-left: 1.5rem; /* sm:px-6 */ padding-right: 1.5rem; } } @@ -1483,7 +1529,7 @@ nav.md-grid { @media (min-width: 1024px) { .md-main__inner.md-grid { max-width: 1024px; - padding-left: 2rem; /* lg:px-8 */ + padding-left: 2rem; /* lg:px-8 */ padding-right: 2rem; justify-content: space-between; /* lg:justify-between */ } @@ -1493,7 +1539,7 @@ nav.md-grid { @media (min-width: 1280px) { .md-main__inner.md-grid { max-width: 1480px; - padding-left: 2.5rem; /* xl:px-10 */ + padding-left: 2.5rem; /* xl:px-10 */ padding-right: 2.5rem; } } @@ -1501,6 +1547,30 @@ nav.md-grid { /* 2xl (≥1536px) */ @media (min-width: 1536px) { .md-main__inner.md-grid { - max-width: 1736px; + max-width: 70rem; + } +} + +/* Strong mobile hide: ensure secondary sidebar stays hidden on small screens + This rule targets the same selector variants used elsewhere and is placed + at the end of the stylesheet with !important to avoid accidental overrides. */ +@media (max-width: 76.24em) { + .md-sidebar--secondary, + .md-sidebar--secondary[data-md-component="sidebar"], + .md-sidebar--secondary[data-md-state], + .md-sidebar--secondary[data-md-state="lock"] { + display: none !important; + visibility: hidden !important; + pointer-events: none !important; + transform: none !important; /* neutralize any transforms */ + } + + /* Also make sure secondary inner containers don't show up */ + .md-sidebar--secondary .md-sidebar__inner, + .md-sidebar--secondary .md-sidebar__scrollwrap, + .md-nav--secondary { + display: none !important; + visibility: hidden !important; + pointer-events: none !important; } } diff --git a/docs/user-guide/.pages b/docs/user-guide/.pages index 7867266c..acb26453 100644 --- a/docs/user-guide/.pages +++ b/docs/user-guide/.pages @@ -5,6 +5,7 @@ nav: - Streams: streams - Ingestion: ingestion - Pipelines: pipelines + - Traces: traces - Alerts: alerts - Dashboards: dashboards - Actions: actions @@ -15,6 +16,7 @@ nav: - Management: management - Profile: profile - Performance: performance + - Federated Search: federated-search - Best Practices: best-practices - Migration: migration diff --git a/docs/user-guide/federated-search/.pages b/docs/user-guide/federated-search/.pages new file mode 100644 index 00000000..30ec3685 --- /dev/null +++ b/docs/user-guide/federated-search/.pages @@ -0,0 +1,5 @@ +nav: + +- Federated Search Overview: index.md +- How to Use Federated Search: how-to-use-federated-search.md +- Federated Search Architecture: federated-search-architecture.md diff --git a/docs/user-guide/federated-search/federated-search-architecture.md b/docs/user-guide/federated-search/federated-search-architecture.md new file mode 100644 index 00000000..e0aad3bc --- /dev/null +++ b/docs/user-guide/federated-search/federated-search-architecture.md @@ -0,0 +1,146 @@ +--- +title: Federated Search in OpenObserve - Architecture +description: Technical explanation of OpenObserve deployment modes, normal cluster query execution, and how federated search works across single and multiple clusters. +--- +This document explains the technical architecture of OpenObserve deployments, how queries execute in normal clusters, and how [federated search](../) coordinates queries across clusters in a supercluster. + +> This feature is available in Enterprise Edition. + +## Understanding OpenObserve deployments +Before diving into how federated search works, you need to understand how OpenObserve can be deployed. OpenObserve scales from a single machine to a globally distributed infrastructure. + +## Single node deployment +The simplest deployment: one instance of OpenObserve runs all functions on one machine. Data stores locally, and the node processes queries directly. This works for testing or small deployments. + +## Single cluster deployment +When you need scale, multiple specialized nodes work together as a cluster. Each node type has a specific role: + +- **Router**: Entry point that forwards queries to queriers +- **Querier**: Processes queries in parallel with other queriers +- **Ingester**: Receives and stores data in object storage +- **Compactor**: Optimizes files and enforces retention +- **Alertmanager**: Executes alerts and sends notifications + +A single cluster handles more data and provides higher availability than a single node. + +## Supercluster deployment +When you need to operate across multiple geographical regions, multiple clusters connect as a supercluster. This is where federated search becomes relevant. + +!!! note "Key point" + Each cluster in a supercluster operates independently with its own data storage. Data ingested into one cluster stays in that cluster. However, configuration metadata synchronizes across all clusters, allowing unified management. + +## Region and cluster hierarchy +In a supercluster, regions organize clusters geographically. A region may contain one or more clusters. +
+**Example:** +
+ +```bash +Region: us-test-3 + ├─ Cluster: dev3 + └─ Cluster: dev3-backup + +Region: us-test-4 + └─ Cluster: dev4 +``` +Each cluster has independent data storage. Data stays where it was ingested. + +## How queries execute +Understanding query execution helps you understand how federated search works whether querying one cluster or multiple clusters. + +### Normal cluster query execution +This section explains how any OpenObserve cluster processes queries internally, regardless of whether it is a standalone cluster or part of a supercluster. Understanding this internal process is essential because: + +- This is how standalone clusters work +- This is what happens when you query your current cluster in a supercluster without federated search coordination +- During federated search, each individual cluster uses this same internal process to search its own data + +When a cluster receives a query: + +1. Router forwards the query to an available querier. +2. That querier becomes the leader querier. +3. Leader querier parses SQL, identifies data files, creates execution plan. +4. Leader querier distributes work among available queriers. These queriers become worker queriers. +5. All worker queriers search their assigned files in parallel. +6. Worker queriers send results to the leader querier. +7. Leader querier merges results and returns final answer. + +### Query execution for your current cluster in a supercluster +Your current cluster is the cluster you are logged into. When you select your current cluster from the Region dropdown, this is not federated search. +
+For example, if you are logged into Cluster A and you select Cluster A from the Region dropdown, the query executes using the normal cluster query execution process described above. No cross-cluster communication occurs, and no federated search coordination is needed. + +### Federated search for one different cluster in a supercluster +When you select a different cluster from the Region dropdown, not the cluster you are logged into, federated search coordination is used: +
+ +**Step 1: Coordination setup** +
+Your current cluster becomes the leader cluster. +
+ +**Step 2: Query distribution** +
+Leader cluster sends the query to the selected cluster via gRPC. +
+ +**Step 3: Query processing** +
+The selected cluster processes the query using its normal cluster query execution process. +
+ +**Step 4: Result return** +
+The selected cluster sends its results back to the leader cluster. +
+ +**Step 5: Result presentation** +
+The leader cluster displays the results. + +### Federated search for multiple clusters in a supercluster + +When you select no cluster or multiple clusters from the Region dropdown, federated search extends the query across all selected clusters: +
+ +**Step 1: Coordination setup** +
+Your current cluster becomes the leader cluster. The leader cluster identifies all selected clusters, or all clusters if none selected, that contain data for the queried stream. These other clusters become worker clusters. +
+ +**Step 2: Query distribution** +
+The leader cluster sends the query to all worker clusters via gRPC. All clusters now have the same query to execute. +
+ +**Step 3: Parallel processing** +
+Each cluster processes the query using its normal cluster query execution process. The leader cluster searches its own data if it contains data for that stream. Worker clusters search their own data. All processing happens simultaneously. +
+ +**Step 4: Result aggregation** +
+Each cluster aggregates its own results internally using its leader querier and worker queriers. Worker clusters send their aggregated results to the leader cluster. The leader cluster merges all results from all clusters and returns the unified response. + +## Metadata synchronization +In a supercluster, clusters share configuration and schema information in real-time while keeping actual data separate. This synchronization happens via NATS, a messaging system that coordinates communication between clusters. +
+While stream schemas are synchronized across all clusters in real-time, the actual data for a stream only exists in the cluster or clusters where it was ingested. + +| **Synchronized across clusters** | **NOT synchronized (stays local)** | +|----------------------------------|-----------------------------------| +| Schema definitions | Log data | +| User-defined functions | Metric data | +| Dashboards and folders | Trace data | +| Alerts and notifications | Raw ingested data | +| Scheduled tasks and reports | Parquet files and WAL files | +| User and organization settings | Search indices | +| System configurations | | +| Job metadata | | +| Enrichment metadata | | + +This design maintains data residency compliance while enabling unified configuration management. + +## Limitations + +**No cluster identification in results:** Query results do not indicate which cluster provided specific data. To identify the source, query each cluster individually. \ No newline at end of file diff --git a/docs/user-guide/federated-search/how-to-use-federated-search.md b/docs/user-guide/federated-search/how-to-use-federated-search.md new file mode 100644 index 00000000..437181fb --- /dev/null +++ b/docs/user-guide/federated-search/how-to-use-federated-search.md @@ -0,0 +1,76 @@ +--- +title: Federated Search in OpenObserve - How-to Guide +description: Step-by-step instructions for querying your current cluster and performing federated searches across one or more clusters in a supercluster setup. +--- +This document explains how to query your current cluster and how to perform [federated searches](../) across one or more different clusters in a supercluster setup. +> This feature is available in Enterprise Edition. + +## How to query your current cluster in a supercluster + +Query your current cluster when you know the data is in your cluster or when you need the fastest query performance. + +!!! note "What you need to know:" + + - This is not federated search + - You are querying the current cluster. + - No cross-cluster communication occurs. + - Results will include data from the current cluster only. +
+**Steps:** +![current-cluster-query](current-cluster-query.png) + +1. Navigate to the **Logs** page. +2. Enter your query in the SQL Query Editor. +3. Select a time range. +4. Select one specific cluster from the **Region** dropdown. +5. Select **Run query**. + +> For detailed explanation, see **Normal cluster query execution** in the [Federated Search Architecture](../federated-search/federated-search-architecture/) page. +
+ +**Result**
+Data from the selected cluster only. +![current-cluster-query-result](current-cluster-query-result.png) + + +## How to query one or more different clusters in a supercluster + +Use federated search when you need data from multiple clusters. + +!!! note "What you need to know" + + - Multiple clusters will process your query simultaneously. + - Results will combine data from all selected clusters. + +**Steps** +
+![federated-search](federated-search.png) + +1. Navigate to the **Logs** page. +2. Enter your query in the SQL Query Editor. +3. Select a time range. +4. Leave the **Region** dropdown unselected, or select multiple clusters. +5. Select **Run query**. + +> For detailed explanation, see **Federated search for one different cluster** and **Federated search for multiple clusters** in the [Federated Search Architecture](../federated-search-architecture/) page. +
+ +**Result**
+Combined data from all selected clusters. +![federated-search-result](federated-search-result.png) +## Region selection reference + +Use this quick reference to understand how region selection affects query execution: + +| **Region/Cluster Selection** | **Behavior** | **Query Type** | **Communication** | +|------------------------------|--------------|----------------|-------------------| +| None selected | Queries all clusters | Federated search | Cross-cluster via gRPC | +| Your current cluster selected | Queries only your current cluster | Normal cluster query (NOT federated) | Internal only, no cross-cluster | +| One different cluster selected (same region) | Queries only that cluster | Federated search | Cross-cluster via gRPC | +| One different cluster selected (different region) | Queries only that cluster | Federated search | Cross-cluster via gRPC | +| Multiple clusters selected | Queries all selected clusters | Federated search | Cross-cluster via gRPC | + + +**Next step** + +- [Federated Search Architecture](../federated-search-architecture/) \ No newline at end of file diff --git a/docs/user-guide/federated-search/index.md b/docs/user-guide/federated-search/index.md new file mode 100644 index 00000000..9b80aae6 --- /dev/null +++ b/docs/user-guide/federated-search/index.md @@ -0,0 +1,64 @@ +--- +title: Federated Search in OpenObserve - Overview +description: Learn what federated search is, key concepts, prerequisites, and when to use it. +--- +This document provides an overview of federated search in OpenObserve. + +> This feature is available in Enterprise Edition. + +## What is federated search? + +Federated search enables querying across multiple OpenObserve clusters that are connected as a supercluster, all from one interface. +
+ +Without federated search, investigating issues across regions requires logging into each cluster separately, running the same query multiple times, and manually combining results. This wastes time during critical incidents. +With federated search, you query once and receive unified results from all clusters. + +!!! note "Prerequisites" + + - OpenObserve Enterprise edition + - Multiple clusters configured as a supercluster + +## How to verify if your environment is in a supercluster +Check whether the Region dropdown appears on the Logs page. If visible, your clusters are configured as a supercluster. +![federated-search](../../images/federated-search.png) + +## Key concepts in federated search + +Before using federated search, understand these core concepts: + +- **Node:** A single instance of OpenObserve running on one machine or server. +- **Cluster:** A group of OpenObserve nodes working together to handle data ingestion, storage, and querying. Each cluster has its own data storage. +- **Region:** A geographical location that contains one or more clusters. For example, Region us-east may contain cluster prod-east-1 and cluster prod-east-2. +- **Supercluster:** Multiple OpenObserve clusters across different geographical regions connected to work as a unified system. This enables federated search capability. +- **Data distribution:** Data ingested into a specific cluster stays in that cluster's storage. It is not replicated to other clusters. This ensures data residency compliance. +- **Metadata synchronization:** Configuration information such as schemas, dashboards, and alerts synchronize across all clusters in a supercluster. This allows unified management while keeping data distributed. +- **Federated search:** The capability to query data across different clusters in a supercluster. Federated search activates when you: + + - Select one or more different clusters, meaning clusters other than your current cluster: The selected clusters' data is searched via federated coordination. + - Select none: All clusters search simultaneously via federated coordination and results are combined. + +> **Important**: Querying your current cluster uses normal cluster query execution, not federated search architecture. + +> For detailed technical explanations of deployment modes, architecture, and how queries execute, see the [Federated Search Architecture](../federated-search-architecture/) page. + +## When to use federated search + +| **Use case** | **Cluster selection** | **Reason** | +|--------------|----------------------|------------| +| Data is in one specific different cluster | Select that different cluster | Access only that cluster's data via federated search | +| Multi-region deployments | Select none or multiple clusters | Query all regions at once via federated search | +| Centralized search across teams | Select none or multiple clusters | Unified visibility across all clusters via federated search | + + +## When not to use federated search + +| **Use case** | **Cluster selection** | **Reason** | +|--------------|----------------------|------------| +| Data is in your current cluster | Select your current cluster | Uses normal cluster query without cross-cluster communication | + + +**Next steps** + +- [How to Use Federated Search](../how-to-use-federated-search/) +- [Federated Search Architecture](../federated-search-architecture/) \ No newline at end of file diff --git a/docs/user-guide/management/.pages b/docs/user-guide/management/.pages index 527fc58f..28307121 100644 --- a/docs/user-guide/management/.pages +++ b/docs/user-guide/management/.pages @@ -9,4 +9,4 @@ nav: - Cipher Keys: cipher-keys.md - Nodes in OpenObserve: nodes.md - SSO Domain Restrictions: sso-domain-restrictions.md - - Regex Patterns: regex-patterns.md \ No newline at end of file + - Sensitive Data Redaction: sensitive-data-redaction.md \ No newline at end of file diff --git a/docs/user-guide/management/alert-destinations.md b/docs/user-guide/management/alert-destinations.md index a37cde9a..1c5ab221 100644 --- a/docs/user-guide/management/alert-destinations.md +++ b/docs/user-guide/management/alert-destinations.md @@ -9,6 +9,7 @@ In OpenObserve, **Destinations** define how and where alert notifications are de The **Destinations** section provides three configuration options. Select a destination type to view configuration instructions. === "Webhook" + ## Webhook When the alert gets triggered, OpenObserve will send alert data to an external system using an HTTP POST request. Use this option to integrate with services that support webhook ingestion. You can customize the request payload using templates to match the format expected by the target system. ??? "View use cases." @@ -16,14 +17,35 @@ The **Destinations** section provides three configuration options. Select a dest - Create incident tickets in **Jira Service Management** or **ServiceNow**. - Send notifications to **Slack** or **Microsoft Teams channels**. + + ### Prerequisites + ??? "Set up an alert template." + This allows you to define the content and layout of the alert message. + + 1. In OpenObserve, go to **Management > Templates**. + 2. Click **Add Template** to create a new template. + 3. In the template creation screen: + ![Alert Destination -Webhook](../../images/webhook-template-for-alert.png) + + - Enter a name for the template. + - Select **Webhook** as the template type. + - Fill in the **Body** fields. + + Body Example: + ``` + { + "text": "{alert_name} is active" + } + + ``` - **Steps to Configure Webhooks as Alert Destination:** + **Steps to configure webhooks as alert destination** 1. Go to **Management > Alert Destinations**. 2. In the **Add Destination** page, click **Webhook**. 3. Fill in the following details: ![Add Destinations](../../images/webook-as-alert-destination.png) - + - **Name**: Enter a descriptive name for the Webhook destination. For example, SlackNotifications. Note that characters such as `,`, `:`, `?`, `/`, `#`, and `spaces` are not allowed. - **Template**: Choose a predefined alert message template from the dropdown menu. - **URL**: Specify the Webhook endpoint URL where notifications will be sent. For example, `https://hooks.slack.com/services/T02QBH105PF/B04C7NLLLRE/HY3fXf123` @@ -37,6 +59,7 @@ The **Destinations** section provides three configuration options. Select a dest 6. Click **Save** to create the Webhook destination. === "Email" + ## Email When the alert gets triggered, OpenObserve will send alert notifications to one or more email addresses using SMTP. Use this option when email is the preferred channel for receiving alerts. You must configure the email delivery settings under your OpenObserve SMTP setup. The alert payload can be formatted using a predefined template. ??? "View use cases." @@ -45,7 +68,7 @@ The **Destinations** section provides three configuration options. Select a dest - Notify on-call team members or distribution lists. - Route alerts to incident mailboxes used by helpdesk. - **Prerequisites:** + ### Prerequisites ??? "1. Set up an email account for sending alerts." To send email alerts using Gmail SMTP, you must create an App Password. Follow these steps: @@ -123,9 +146,9 @@ The **Destinations** section provides three configuration options. Select a dest 4. Click **Save**. After the user is added, they become eligible to receive email alerts. - **Steps to Configure Emails as Alert Destination:**
+ ### Steps to configure emails as alert destination ![Alert Destination](../../images/alert-email-destination.png) - + 1. Go to **Management** > **Alert Destinations**. 2. In the **Add Destination** page, click **Email**. 3. Enter a name for the destination. @@ -136,20 +159,46 @@ The **Destinations** section provides three configuration options. Select a dest This creates the email as alert destination. === "Actions" - When an alert gets triggered, OpenObserve executes a Real-time Action script. Use this destination type when the alert data needs to be processed or routed using custom logic.Action scripts are stateful. They can retain values across executions, enabling more advanced workflows than webhook or email destinations. + ## Actions + When an alert gets triggered, OpenObserve executes a Real-time Action script. Use this destination type when the alert data needs to be processed or routed using custom logic.Action scripts are stateful. They can retain values across executions, enabling more advanced workflows than webhook or email destinations. ??? "View use cases." - For example, you can use this destination to: - - - Send the alert to Slack, and also ingest a structured copy of the alert into a custom stream in your organization - - Track how often a specific alert has triggered, then write aggregated metrics, such as trigger count per hour, to a stream for trend analysis. + For example, you can use this destination to: + + - Send the alert to Slack, and also ingest a structured copy of the alert into a custom stream in your organization + - Track how often a specific alert has triggered, then write aggregated metrics, such as trigger count per hour, to a stream for trend analysis. - **Prerequisites:** + ### Prerequisites - 1. Create the real-time action script as per your requirement. For more details, visit the [Create and Use Real-time Actions](../../actions/create-and-use-real-time-actions/) page. - 2. Create the alert template. - - **Steps to Configure Actions as Alert Destination:** + ??? "1. Create the real-time action" + Create the real-time action script as per your requirement. For more details, visit the [Create and Use Real-time Actions](../../actions/create-and-use-real-time-actions/) page. + ??? "2. Create the alert template" + When you configure an Action as the alert destination, OpenObserve passes the alert data to your Real-time Action script through a template. + The template defines the structure of the alert payload that your Python script will receive as the `data` argument in its `main()` function. +
+ **Note:** Unlike email or webhook templates (which are meant for human-readable or HTTP payload formatting), this template is meant to be machine-readable. + Hence, it should be a valid JSON object that matches how your script expects to read alert fields. + + You can create the template from Management > Templates > Add Template > Web Hook (because Action templates also use JSON structure). + + Use the following structure as an example: + + { + "alert_name": "{alert_name}", + "alert_type": "{alert_type}", + "stream_name": "{stream_name}", + "org_name": "{org_name}", + "alert_period": "{alert_period}", + "alert_operator": "{alert_operator}", + "alert_threshold": "{alert_threshold}", + "alert_start_time": "{alert_start_time}", + "alert_end_time": "{alert_end_time}", + "alert_trigger_time": "{alert_trigger_time}" + } + This ensures that when the alert triggers, your real-time Action script receives this JSON as `data`. + Your Python script can then parse these fields directly. + + ### Steps to configure actions as alert destination ![Action Destination](../../images/action-as-destination.png) 1. Go to **Management > Alert Destinations**. diff --git a/docs/user-guide/management/audit-trail.md b/docs/user-guide/management/audit-trail.md index 3aedcef5..f83c2934 100644 --- a/docs/user-guide/management/audit-trail.md +++ b/docs/user-guide/management/audit-trail.md @@ -11,40 +11,29 @@ description: >- ## What is Audit Trail Audit Trail records user actions across all organizations in OpenObserve. It captures non-ingestion API calls and helps you monitor activity and improve security. -## Who can access it -All Enterprise Edition users with access to the `_meta` organization can use Audit Trail. +!!! note "Who can access" + All Enterprise Edition users with access to the `_meta` organization can use Audit Trail. -## Where to find it -Audit events are published into the `audit` stream under the `_meta` organization. +!!! note "Where to find it" + Audit events are published into the `audit` stream under the `_meta` organization. ## Configuration -To enable and configure Audit Trail, set the following environment variables: +| Environment Variable | Description | Default | +| --------------------------- | ---------------------------------------------------------------- | ------- | +| `O2_AUDIT_ENABLED` | Enables audit logging | `false` | +| `O2_AUDIT_BATCH_SIZE` | Number of audit records to batch before publishing | `500` | +| `O2_AUDIT_PUBLISH_INTERVAL` | Interval in seconds after which unpublished audits are published | `600` | -1. `O2_AUDIT_ENABLED`: - - - **Description:** Enables audit logging - - **Default:** `false` - -2. `O2_AUDIT_BATCH_SIZE`: - - - **Description:** Number of audit records to batch before publishing - - **Default:** `500` - -3. `O2_AUDIT_PUBLISH_INTERVAL`: - - - **Description:** Interval in seconds after which unpublished audits are published - - **Default:** `600` ## How it works -When Audit Trail is enabled, OpenObserve collects details of every non-ingestion API call made by users across all organizations. These events are stored temporarily in memory. Once the number of events reaches the batch size or the publish interval is reached, they are sent to the `audit` stream in the `_meta` organization. From there, you can view, query, or use them in dashboards and alerts. - -## Example -The following example shows a captured audit event from the `audit` stream: -![audit trail screenshot](../../images/audit.webp) - -## Use case +When audit logging is enabled using the `O2_AUDIT_ENABLED` environment variable, OpenObserve collects details of every non-ingestion API call made by users across all organizations. These events are stored temporarily in memory. Once the number of events reaches the batch size or the publish interval is reached, they are sent to the `audit` stream in the `_meta` organization. From there, you can view, query, or use them in dashboards and alerts. -Because audit events are stored in a log stream, you can: +!!! note "Example" + The following example shows a captured audit event from the `audit` stream: + ![audit-trail](../../images/audit-trail.png) + +!!! note "Use cases" + Because audit events are stored in a log stream, you can: -- Build dashboards to track user activity -- Configure alerts to detect unusual trends \ No newline at end of file + - Build dashboards to track user activity + - Configure alerts to detect unusual trends \ No newline at end of file diff --git a/docs/user-guide/management/cipher-keys.md b/docs/user-guide/management/cipher-keys.md index f55107bd..0690d786 100644 --- a/docs/user-guide/management/cipher-keys.md +++ b/docs/user-guide/management/cipher-keys.md @@ -13,12 +13,13 @@ The **Cipher Keys** feature is essential for handling sensitive data stored in e Follow these steps to create and configure **Cipher Keys** in OpenObserve: -??? "Step 1: Navigate to the Cipher Keys Section" +??? "Step 1: Navigate to the Cipher Keys section" 1. From the top navigation bar, click the gear icon to open the **Management** page. 2. Select **Cipher Keys**. ![cipher-keys](../../images/cipher-keys.png) + ??? "Step 2: Create a New Cipher Key" @@ -47,163 +48,162 @@ Follow these steps to create and configure **Cipher Keys** in OpenObserve: ![Cipher keys encryption mechanism](../../images/cipher-keys-encryption-mechanism.png) After you have filled in all the details, click **Save**. Your new **Cipher Key** is now available to use in OpenObserve. + ![cipher-keys-page](../../images/cipher-keys-page.png) --- ## Query Encrypted Logs with `decrypt` and `decrypt_path` -You can retrieve original values from encrypted logs using the `decrypt()` and `decrypt_path()` functions. These functions operate at query time and do not write decrypted data to disk. +To retrieve original values from encrypted logs, use the `decrypt()` and `decrypt_path()` functions. These functions operate at query time and do not write decrypted data to disk. +??? "Use the `decrypt` function" + ### Use the `decrypt` function + The `decrypt()` function performs brute-force decryption. It attempts to decrypt any sub-string in the input that appears to be **base64-encoded**. If the decryption is successful, the sub-string is replaced with the decrypted output. If not, the sub-string is retained unchanged. -### Use the `decrypt` function -The `decrypt()` function performs brute-force decryption. It attempts to decrypt any sub-string in the input that appears to be base64-encoded. If the decryption is successful, the sub-string is replaced with the decrypted output. If not, the sub-string is retained unchanged. + !!! warning + The `decrypt()` function can be slower on larger input fields due to its brute-force behavior. For inputs smaller than 500 characters, performance degradation is typically around 10 percent. However, for inputs exceeding 10,000 characters, the slowdown can increase to 50 to 100 percent. Avoid using this function on unnecessarily large fields unless required. -!!! warning - The `decrypt()` function can be slower on larger input fields due to its brute-force behavior. For inputs smaller than 500 characters, performance degradation is typically around 10 percent. However, for inputs exceeding 10,000 characters, the slowdown can increase to 50 to 100 percent. Avoid using this function on unnecessarily large fields unless required. + !!! node "Syntax" + ```sql + decrypt(encrypted_field, 'cipher_key_name') + ``` + Here: -**Syntax**
-```sql -decrypt(encrypted_field, 'cipher_key_name') -``` -Here: - -- `encrypted_field`: The field containing the encrypted value. -- `cipher_key_name`: The name of the **Cipher Key** used during encryption. + - `encrypted_field`: The field containing the encrypted value. + - `cipher_key_name`: The name of the **Cipher Key** used during encryption. -**Sample Encrypted Dataset**
-The following log entries exist in the `customer_feedback_encrypt` stream: -
-![Cipher keys encrypted data](../../images/cipher-keys-encrypted-data.png) + **Example**
+ **Sample Encrypted Dataset**: The following log entries exist in the `customer_feedback_encrypt` stream: + ![Cipher keys encrypted data](../../images/cipher-keys-encrypted-data.png) -**1. Decrypt a single field**
+ **1. Decrypt a single field**
-```sql -SELECT - decrypt(name_encrypted, 'customer_feedback_key') AS name -FROM "customer_feedback_encrypt" -``` -This returns the decrypted value of the `name_encrypted` field. -
-![decrypt a single field](../../images/decrypt-single-field.png) - -**2. Decrypt multiple fields**
-```sql -SELECT - decrypt(name_encrypted, 'customer_feedback_key') AS name, - decrypt(feedback_encrypted, 'customer_feedback_key') AS feedback_text -FROM "customer_feedback_encrypt" -``` -This returns decrypted values for both `name_encrypted` and `feedback_encrypted`. -
-![Decrypt multiple fields](../../images/decrypt-multiple-fields.png) + ```sql + SELECT + decrypt(name_encrypted, 'customer_feedback_key') AS name + FROM "customer_feedback_encrypt" + ``` + This returns the decrypted value of the `name_encrypted` field. +
+ ![decrypt a single field](../../images/decrypt-single-field.png) + + **2. Decrypt multiple fields**
+ ```sql + SELECT + decrypt(name_encrypted, 'customer_feedback_key') AS name, + decrypt(feedback_encrypted, 'customer_feedback_key') AS feedback_text + FROM "customer_feedback_encrypt" + ``` + This returns decrypted values for both `name_encrypted` and `feedback_encrypted`. +
+ ![Decrypt multiple fields](../../images/decrypt-multiple-fields.png) --- +??? "Use the `decrypt_path` function" + ### Use the `decrypt_path` function + Use `decrypt_path()` to decrypt a specific value nested inside a JSON field. The field itself must be a valid JSON object. The path specifies the location of the encrypted value inside that object. The function will apply decryption only at that path. -### Use the `decrypt_path` Function -Use `decrypt_path()` to decrypt a specific value nested inside a JSON field. The field itself must be a valid JSON object. The path specifies the location of the encrypted value inside that object. The function will apply decryption only at that path. + !!! node "Syntax" + ```sql + decrypt_path(encrypted_field, 'cipher_key_name', 'path') + ``` + Here: -**Syntax**
-```sql -decrypt_path(encrypted_field, 'cipher_key_name', 'path') -``` -Here: + - `encrypted_field`: A JSON object or nested data structure that contains encrypted values at various paths. + - `cipher_key_name`: The name of the Cipher Key used during encryption. + - `path`: A dot-delimited string that specifies the location of the encrypted value within the nested JSON structure. Use '.' to return the entire decrypted object. -- `encrypted_field`: A JSON object or nested data structure that contains encrypted values at various paths. -- `cipher_key_name`: The name of the Cipher Key used during encryption. -- `path`: A dot-delimited string that specifies the location of the encrypted value within the nested JSON structure. Use '.' to return the entire decrypted object. + **Example:** -Example: + If a log entry from the `encrypted_logs` stream contains: -If a log entry from the `encrypted_logs` stream contains: - -```json -{ - log: { - "a": { - "b": { - "c": "ENCRYPTED_STRING" + ```json + { + log: { + "a": { + "b": { + "c": "ENCRYPTED_STRING" + } + } } } - } -} -``` -And `ENCRYPTED_STRING` decrypts to `xyz` using the cipher key `my_key`. Then the following SQL query: + ``` + And `ENCRYPTED_STRING` decrypts to `xyz` using the cipher key `my_key`. Then the following SQL query: -```sql -SELECT decrypt_path(log, 'my_key', 'a.b.c') AS decrypted_value FROM "encrypted_logs" -``` -returns: + ```sql + SELECT decrypt_path(log, 'my_key', 'a.b.c') AS decrypted_value FROM "encrypted_logs" + ``` + returns: -```json -{ -decrypted_value: xyz -} -``` + ```json + { + decrypted_value: xyz + } + ``` -**Sample Encrypted Dataset**
-The following records exist in encrypted format in the `customer_feedback_nested` stream. Each record has multiple encrypted fields at different nesting levels: -
-![Nested Dataset](../../images/nested-dataset.png) - -**1. Decrypt a top-level field** -The `feedback_text` field contains an encrypted plain-text value. To retrieve its original value, use `decrypt_path()` with the path set to '.'. -```sql -SELECT - decrypt_path(feedback_text, 'customer_feedback_key', '.') AS feedback_text_decrypted -FROM "customer_feedback_nested" -``` -
+ **Sample Encrypted Dataset**: The following records exist in encrypted format in the `customer_feedback_nested` stream. Each record has multiple encrypted fields at different nesting levels: +
+ ![Nested Dataset](../../images/nested-dataset.png) + + **1. Decrypt a top-level field** + The `feedback_text` field contains an encrypted plain-text value. To retrieve its original value, use `decrypt_path()` with the path set to '.'. + ```sql + SELECT + decrypt_path(feedback_text, 'customer_feedback_key', '.') AS feedback_text_decrypted + FROM "customer_feedback_nested" + ``` +
-![Decrypt a top-level field](../../images/decrypt-path-top-level.png) + ![Decrypt a top-level field](../../images/decrypt-path-top-level.png) -**2. Decrypt a nested field** -To retrieve the decrypted value of `name`, you can write the query as shown below: -```sql -SELECT - decrypt_path(metadata, 'customer_feedback_key', 'user.name') AS name -FROM "customer_feedback_nested" -``` -
+ **2. Decrypt a nested field** + To retrieve the decrypted value of `name`, you can write the query as shown below: + ```sql + SELECT + decrypt_path(metadata, 'customer_feedback_key', 'user.name') AS name + FROM "customer_feedback_nested" + ``` +
-![Decrypt a nested field](../../images/decrypt-a-nested-field.png) + ![Decrypt a nested field](../../images/decrypt-a-nested-field.png) -!!! Info "Path Examples for `decrypt_path()`" - The `decrypt_path()` function allows you to specify exact JSON paths to selectively decrypt fields.
- The following examples show how to define the path in the decrypt_path() function for different JSON structures, including nested arrays and objects. + !!! Info "Path Examples for `decrypt_path()`" + The `decrypt_path()` function allows you to specify exact JSON paths to selectively decrypt fields.
+ The following examples show how to define the path in the decrypt_path() function for different JSON structures, including nested arrays and objects. - - Decrypt tokens in an array of sessions - **Input:** + - Decrypt tokens in an array of sessions + **Input:** - ```json + ```json - { - "sessions": [ - { "id": "1", "token": "ENC(...)" }, - { "id": "2", "token": "ENC(...)" } - ] - } - ``` - **Path**: `sessions.*.token`
- This path decrypts the token field in each object within the sessions array. + { + "sessions": [ + { "id": "1", "token": "ENC(...)" }, + { "id": "2", "token": "ENC(...)" } + ] + } + ``` + **Path**: `sessions.*.token`
+ This path decrypts the token field in each object within the sessions array. - - Decrypt nested fields in a multi-level array - **Input:** + - Decrypt nested fields in a multi-level array + **Input:** - ```json - { - "data": [ + ```json { - "details": [ - { "secure": "ENC(...)" }, - { "secure": "ENC(...)" } + "data": [ + { + "details": [ + { "secure": "ENC(...)" }, + { "secure": "ENC(...)" } + ] + } ] } - ] - } - ``` - **Path:** `data.*.details.*.secure`
- This path decrypts every secure field in each nested details array within the data array. + ``` + **Path:** `data.*.details.*.secure`
+ This path decrypts every secure field in each nested details array within the data array. --- diff --git a/docs/user-guide/management/index.md b/docs/user-guide/management/index.md index 1a86cb94..7628b847 100644 --- a/docs/user-guide/management/index.md +++ b/docs/user-guide/management/index.md @@ -1,16 +1,16 @@ The Management section includes tools for maintaining and securing OpenObserve operations. It allows users to manage saved queries, configure alert destinations, create reusable templates, review audit trails, handle encryption keys, and monitor node-level performance and resource metrics. -Learn more: +!!! note "Learn more" -- [Streaming Search](../management/streaming-search/) -- [Streaming Aggregation](../management/aggregation-cache/) -- [Query Management](../management/query-management/) -- [Alert Destinations](../management/alert-destinations/) -- [Templates](../management/templates/) -- [Audit trail](../management/audit-trail/) -- [Cipher Keys](../management/cipher-keys/) -- [Nodes in OpenObserve](../management/nodes/) -- [SSO Domain Restrictions](../management/sso-domain-restrictions/) -- [Regex Patterns](../management/regex-patterns/) + - ### [Streaming Search](../management/streaming-search/) + - ### [Streaming Aggregation](../management/aggregation-cache/) + - ### [Query Management](../management/query-management/) + - ### [Alert Destinations](../management/alert-destinations/) + - ### [Templates](../management/templates/) + - ### [Audit trail](../management/audit-trail/) + - ### [Cipher Keys](../management/cipher-keys/) + - ### [Nodes in OpenObserve](../management/nodes/) + - ### [SSO Domain Restrictions](../management/sso-domain-restrictions/) + - ### [Sensitive Data Redaction](../management/sensitive-data-redaction/) diff --git a/docs/user-guide/management/nodes.md b/docs/user-guide/management/nodes.md index 0926d3e0..f3e2059e 100644 --- a/docs/user-guide/management/nodes.md +++ b/docs/user-guide/management/nodes.md @@ -1,8 +1,12 @@ --- +title: Monitor Node Health and Performance description: >- Monitor node health and performance in OpenObserve clusters, including CPU, memory, and TCP connections across ingesters, queriers, and other node types. --- +This document explains how to monitor the health and performance of nodes in OpenObserve clusters. + +## Overview In OpenObserve, a node is an instance of the OpenObserve application. Each node performs one or more specific tasks, such as collecting incoming data, answering search queries, storing data efficiently, or managing alert rules. A group of such nodes working together is called a cluster. Clusters allow OpenObserve to distribute work across nodes, helping the system scale, stay responsive, and remain available even if one node goes offline. Depending on how OpenObserve is deployed, this cluster can be: @@ -12,21 +16,22 @@ A group of such nodes working together is called a cluster. Clusters allow OpenO This guide explains how to monitor the health and performance of each node in your OpenObserve deployment. -### Availability -The **Nodes** page is available only when OpenObserve is running in a clustered deployment, either single-cluster or super-cluster. +!!! note "Where to find" + The **Nodes** page is available only when OpenObserve is running in a clustered deployment, either single-cluster or super-cluster. -### Access -In the OpenObserve UI, select `_meta` organization and go to **Management > Nodes** from the top navigation menu. +!!! note "Who can access" + In the OpenObserve UI, select `_meta` organization and go to **Management > Nodes** from the top navigation menu. -By default, `root` users and `Admins` with access to the `_meta` organization can access the **Nodes** page. RBAC cannot grant access to other user roles. + By default, `root` users and `Admins` with access to the `_meta` organization can access the **Nodes** page. RBAC cannot grant access to other user roles. +## Nodes interface **For Single-Cluster Setup**
![nodes-single-cluster](../../images/nodes-single-cluster.png) **For Super-Cluster Setup**
![nodes-multi-cluster](../../images/nodes-super-cluster.png) -## Node Types +## Node types - **Ingester**: Handles incoming data and writes it to storage. - **Querier**: Responds to user queries and retrieves data from storage. @@ -34,7 +39,7 @@ By default, `root` users and `Admins` with access to the `_meta` organization ca - **Router**: Routes API requests and serves the OpenObserve user interface. - **Alert Manager**: Manages alerting rules and runs scheduled tasks. -## View Key Metrics +## View ky metrics Each node displays the following metrics in a tabular format with progress bars and color indicators: @@ -59,7 +64,7 @@ Each node displays the following metrics in a tabular format with progress bars - **Online** (Green): Node is healthy and active. ![nodes-status-online](../../images/nodes-status-online.png) -## Use Refresh, Search, and Filters +## Use refresh, search, and filters - **Refresh:** Click the **Refresh** button in the top-right corner of the page to update the list with the latest node data. - **Search Bar:** Use the search bar to locate nodes by names and version. @@ -74,7 +79,7 @@ Each node displays the following metrics in a tabular format with progress bars ![nodes-search-filter](../../images/nodes-search-filter.png) -## Monitor and Troubleshoot Node Health +## Monitor and troubleshoot node health Use the following signals to monitor node health and take action when needed: - **CPU and Memory Usage**: If you notice sustained usage above 70%, consider scaling out your deployment or investigating the workload on that node. Note that this percentage may vary depending upon the use case. diff --git a/docs/user-guide/management/regex-patterns.md b/docs/user-guide/management/regex-patterns.md deleted file mode 100644 index 491a2f71..00000000 --- a/docs/user-guide/management/regex-patterns.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Regex Pattern Redaction in OpenObserve Enterprise -description: Learn how to create and apply regex patterns to redact or drop sensitive data during log ingestion in OpenObserve Enterprise Edition. ---- - -This document helps users understand, create, and use regex patterns to automatically redact or drop sensitive data and Personally Identifiable Information (PII) during log ingestion. -> Note: This feature is applicable to the OpenObserve [Enterprise Edition](../../../openobserve-enterprise-edition-installation-guide/). - -## Overview -The **Regex Pattern** feature in OpenObserve allows you to automatically detect and redact or drop sensitive data during log ingestion using regular expressions. This ensures field-level control over PII, authentication details, financial data, and other confidential values before they are stored or indexed. - -!!! note "Where to find" - To access the **Regex Pattern** interface: - - 1. Select the appropriate organization from the dropdown in the top-right corner. - 2. Select **Management** > **Regex Pattern**. - - This opens the regex pattern management interface, where you can view, create, and manage regex patterns available to the selected organization. - -!!! note "Who can access" - Access to **Regex Pattern** is controlled via the **Regexp Patterns** module in the **IAM** settings, using [role-based access control (RBAC)](https://openobserve.ai/docs/user-guide/identity-and-access-management/role-based-access-control/). - - - `Root` users have full access by default. - - Other user permissions must be assigned access through **Roles** in **IAM**. - - You can control access at both the module level (all regex patterns) and the individual pattern level. This allows precise control over which users can view, create, edit, or delete specific regex patterns. - -!!! warning "Important Note" - - Regex patterns can only be applied to fields with UTF8 data type. - - The stream must have ingested data before you can apply regex patterns. Empty streams will not show field options for pattern association. - -## Create Regex Patterns - -**To create a regex pattern:** -??? note "Step 1: Discover Sensitive Data" - Identify which fields may contain sensitive or Personally Identifiable Information (PII). - - 1. From the left-hand menu, select **Logs**. - 2. In the stream selection dropdown, select the stream. - 3. Select an appropriate time range and click **Run Query**. - This shows the records for the selected time range. -
- - ![Identify PII for regex application](../../images/identify-pii-for-regex-application.png) - - **Look for common sensitive patterns.** - - | Sensitive Data Category | Examples | Common Fields | - |-----------|----------|---------------| - | **Personal Information** | Names, emails, phone numbers | `message`, `user_info`, `contact` | - | **Financial Data** | Credit cards, SSNs, bank accounts | `payment_info`, `transaction_data` | - | **Authentication** | API keys, tokens, passwords | `headers`, `auth_data`, `debug_info` | - | **Network Data** | IP addresses, MAC addresses | `client_ip`, `network_info` | - - **Example Sensitive Data in Logs:**
- ```json - { - "message": "User John Doe with email john.doe@company.com logged in from IP 192.168.1.100. SSN: 123-45-6789. Credit Card: 4111-1111-1111-1111", - "timestamp": "2025-07-30T10:30:00Z" - } - ``` -??? note "Step 2: Create and Test Regex Patterns" - To create regex patterns, naviagte to **Management** > **Regex Pattern** > **Create Pattern**. - ![Create regex](../../images/create-regex-pattern.png) - - In the pattern creation form, enter the following details: - - 1. **Name**: Enter a clear, descriptive name. For example, Email Detection. - 2. **Description:** (Optional) Explain what the pattern is intended to detect. - 3. **Regex Pattern**: Paste or write the regular expression you want to use. Refer to the following **common patterns**. - 4. **Test Pattern**: Provide a sample input to validate that the regex works as expected. - 5. Click the **Create and Close** button to save the pattern. - - **Common Patterns** - - | Type | Pattern | Example | - |------|---------|---------| - | **Email** | `\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z\|a-z]{2,}\b` | `user@company.com` | - | **Full Name** | `\b[A-Z][a-z]+ [A-Z][a-z]+\b` | `John Doe` | - | **Phone (US)** | `\+?1?[-.\s]?\(?[0-9]{3}\)?[-.\s]?[0-9]{3}[-.\s]?[0-9]{4}` | `+1-555-123-4567` | - | **Credit Card** | `\b(?:\d{4}[-\s]?){3}\d{4}\b` | `4111-1111-1111-1111` | - | **SSN (US)** | `\b\d{3}-?\d{2}-?\d{4}\b` | `123-45-6789` | - | **API Key** | `\b[A-Za-z0-9]{32,}\b` | `sk_live_1234567890abcdef` | - | **IP Address** | `\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b` | `192.168.1.100` | - | **Password Field** | `(?i)password[\"':\s=]+[\"']?([^\"'\s,}]+)` | `password: "secret123"` | - - **Example**
- The following screenshots illustrate the pattern creation process: - - 1. Reviewing a log that includes PII -
- The log message in the `pii_test_stream` contains names, email addresses, IP addresses, SSNs, and credit card numbers. -
- ![Stream with PII](../../images/stream-with-pii.png) - 2. Creating and testing the regex patterns -
- **Full Name**: - ![Full name regex](../../images/full-name-regex.png) -
- **Email Addresses**: - ![Email regex](../../images/email-regex.png) - 3. Adding more patterns -
- Continue creating patterns for additional sensitive fields such as IP addresses, SSNs, and credit card numbers. - ![Regex example](../../images/all-regex-patterns.png) - - -## Apply Regex Patterns -Once your regex patterns are created and tested, you can apply them to specific fields in a stream to redact or drop sensitive data during ingestion. - -**To apply a pattern to a field:** - -??? "Step 1: Go to the Stream Field" - 1. From the left-hand menu, go to **Streams**. - 2. Locate the stream where you want to apply regex patterns and select **Stream Details** from the **Actions** column. - 3. In the **Stream Settings** tab, locate the field that contains sensitive data. -
- -??? "Step 2: Add Pattern" - 1. Select **Add Pattern** for that field. This opens the pattern panel, where you can view Applied Patterns (if any) and browse all available patterns.
- ![Add regex pattern](../../images/stream-settings-add-regex.png) - - 2. Select a pattern to apply. - ![Select regex pattern](../../images/select-regex.png) -
- After you select a pattern, a detail view appears, showing the following: - ![Regex selection](../../images/regex-selection-view.png) - - - **Pattern Name** and **Description** - - The **Regex Pattern** - - A **Test Input** area (optional) - - Options for how to handle matches. - -??? "Step 3: Choose Whether to Redact or Drop" - When applying a regex pattern, you must choose one of the following actions. - - **Redact**: - - - Replaces only the matching portion of the field value with `[REDACTED]`, while preserving the rest of the field. - - Use this when the field contains both sensitive and non-sensitive information and you want to retain the overall context. - - **Drop**: - - - Removes the entire field from the log record if the regex pattern matches. - - Use this when the entire field should be excluded from storage or analysis. - - Select the appropriate action (Redact or Drop) and click **Add Pattern**. - -??? "Step 4: Apply Multiple Patterns (Optional)" - You can apply multiple patterns to the same field. - All applied patterns will appear in the left-hand panel with check marks. - ![Multiple regex selection](../../images/multiple-regex-selection.png) - -??? "Step 4: Save Configuration" - When finished, click **Update Changes** to save the configuration. This activates the regex rules for the selected field. - ![Regex applied](../../images/regex-applied.png) - -## Test Redaction and Drop -Once regex patterns are applied to a stream, you can verify the results by inspecting incoming log records. - -**Note:** These tests assume that data ingestion is active for the target stream. If ingestion is paused, ensure that you select the correct time range while running search queries to include previously ingested test data. - -??? "To test redaction and drop" - - 1. Go to **Logs** in the left-hand menu. - 2. Select the stream where you applied the redaction pattern. - 3. Set an appropriate time range and select **Run Query**. - - **For redaction**:
- When the pattern is configured to **Redact**, matched values will appear as `[REDACTED]` in the field. - ![redact-test-regex](../../images/redact-test-regex.png) - - **For drop**:
- To demonstrate drop behavior, the pattern configuration for the `message` field was updated by changing the match action in one of the applied patterns (for example, the credit card pattern) from **Redact** to **Drop**.
- ![Drop regex test](../../images/regex-drop-test.png) -
- When the pattern is configured to **Drop**, the target field, such as `message`, will not appear in the ingested record. - ![regex-drop-result](../../images/regex-drop-result.png) - -## Limitations - -- **Pattern Matching Engine**: OpenObserve uses the Intel Hyperscan library for regex evaluation. All Hyperscan limitations apply to pattern syntax and matching behavior. -- **Field Type Restrictions**: Regex patterns can only be applied to fields with a UTF8 data type. Other field types are not supported. -- **Data Requirements**: Patterns can only be applied after the stream has ingested data. Empty streams will not show any fields in the Stream Settings tab for pattern association. -- **Performance**: Complex patterns may impact ingestion speed, but overall performance remains faster than VRL-based redaction. -- **Data Impact**: Regex patterns apply only to newly ingested data. Existing log data is not modified. - -## Troubleshooting - -**Problem**: **Add Pattern** not visible in **Stream Details**.
-**Cause**: The field is not of UTF8 type.
-**Solution**: Check the field type in the Stream Details view. Only UTF8 fields support regex patterns.
- -**Problem**: Pattern does not apply.
-**Cause**: Configuration changes were not saved.
-**Solution**: Ensure that you selected **Update Changes** after applying the pattern. - - diff --git a/docs/user-guide/management/sensitive-data-redaction.md b/docs/user-guide/management/sensitive-data-redaction.md new file mode 100644 index 00000000..0190e1e2 --- /dev/null +++ b/docs/user-guide/management/sensitive-data-redaction.md @@ -0,0 +1,338 @@ +--- +title: Sensitive Data Redaction in OpenObserve Enterprise +description: Learn how to redact or drop sensitive data using regex patterns during log ingestion or query time in OpenObserve Enterprise Edition. +--- + +This document explains how to configure and manage regex patterns for redacting or dropping sensitive data in OpenObserve. +> Note: This feature is applicable to the OpenObserve [Enterprise Edition](../../../openobserve-enterprise-edition-installation-guide/). + +## Overview +The **Sensitive Data Redaction** feature helps prevent accidental exposure of sensitive data by applying regex-based detection to values ingested into streams and to values already stored in streams. Based on this detection, sensitive values can be either **redacted** or **dropped**. This ensures data is protected before it is stored and hidden when displayed in query results. You can configure these actions to run at ingestion time or at query time. + +**Ingestion time** + +> **Note**: Use ingestion time redaction or drop when you want to ensure sensitive data is never stored on disk. This is the most secure option for compliance requirements, as the original sensitive data cannot be recovered once it's redacted or dropped during ingestion. + +- **Redaction**: Sensitive data is masked before being stored on disk. +- **Drop**: Sensitive data is removed before being stored on disk. + +**Query time** +> **Note**: If you have already ingested sensitive data and it's stored on disk, you can use query time redaction or drop to protect it. This allows you to apply sensitive data redaction to existing data. + +- **Redaction**: Sensitive data is read from disk but masked before results are displayed. +- **Drop**: Sensitive data is read from disk but excluded from the query results. + +!!! note "Where to find" + To access the **Sensitive Data Redaction** interface: + + 1. Select the appropriate organization from the dropdown in the top-right corner. + 2. Select **Management** > **Sensitive Data Redaction**. + + ![Sensitive Data Redaction](../../images/sensitive-data-redaction.png) + + This opens the Sensitive Data Redaction interface, where you can view, create, and manage regex patterns available to the selected organization. + +!!! note "Who can access" + `Root` users have full access to both pattern creation and pattern association by default. For other users, permissions are controlled via the **Regexp Patterns** and **Streams** module in the **IAM** settings, using [role-based access control (RBAC)](https://openobserve.ai/docs/user-guide/identity-and-access-management/role-based-access-control/). + + **Pattern Creation:** + + - Users need permissions on the **Regexp Patterns** module to create, view, edit, or delete regex patterns. + - You can control access at both the module level (all regex patterns) and the individual pattern level for precise control. + + **Pattern Association:** + + - To associate patterns with stream fields, users need List permission on **Regexp Patterns** AND edit permission on **Streams** modules. + +!!! warning "Important Note" + - Regex patterns can only be applied to fields with UTF8 data type. + - The stream must have ingested data before you can apply regex patterns. Empty streams will not show field options for pattern association. + +## Create Regex Patterns + +**To create a regex pattern:** + +??? "Step 1: Discover sensitive data" + ### Step 1: Discover sensitive data + Identify which fields may contain sensitive data. + + 1. From the left-hand menu, select **Logs**. + 2. In the stream selection dropdown, select the stream. + 3. Select an appropriate time range and click **Run Query**. + This shows the records for the selected time range. +
+ ![Identify PII for regex application](../../images/identify-pii-for-regex-application.png) + + **Look for common sensitive patterns.** + + | Sensitive Data Category | Examples | Common Fields | + |-----------|----------|---------------| + | **Personal Information** | Names, emails, phone numbers | `message`, `user_info`, `contact` | + | **Financial Data** | Credit cards, SSNs, bank accounts | `payment_info`, `transaction_data` | + | **Authentication** | API keys, tokens, passwords | `headers`, `auth_data`, `debug_info` | + | **Network Data** | IP addresses, MAC addresses | `client_ip`, `network_info` | + + **Example Sensitive Data in Logs:**
+ ```json + { + "message": "User John Doe with email john.doe@company.com logged in from IP 192.168.1.100. SSN: 123-45-6789. Credit Card: 4111-1111-1111-1111", + "timestamp": "2025-07-30T10:30:00Z" + } + ``` +??? "Step 2: Create and test regex patterns" + ### Step 2: Create and test regex patterns + To create regex patterns, naviagte to **Management** > **Sensitive Data Redaction** > **Create Pattern**. + + ![Create regex](../../images/create-regex-pattern.png) + + In the pattern creation form, enter the following details: + + 1. **Name**: Enter a clear, descriptive name. For example, Email Detection. + 2. **Description:** (Optional) Explain what the pattern is intended to detect. + 3. **Regex Pattern**: Paste or write the regular expression you want to use. Refer to the following **common patterns**. + 4. **Test Pattern**: Provide a sample input to validate that the regex works as expected. + 5. Click the **Create and Close** button to save the pattern. + + **Common Patterns** + + | Type | Pattern | Example | + |------|---------|---------| + | **Email** | `\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z\|a-z]{2,}\b` | `user@company.com` | + | **Full Name** | `\b[A-Z][a-z]+ [A-Z][a-z]+\b` | `John Doe` | + | **Phone (US)** | `\+?1?[-.\s]?\(?[0-9]{3}\)?[-.\s]?[0-9]{3}[-.\s]?[0-9]{4}` | `+1-555-123-4567` | + | **Credit Card** | `\b(?:\d{4}[-\s]?){3}\d{4}\b` | `4111-1111-1111-1111` | + | **SSN (US)** | `\b\d{3}-?\d{2}-?\d{4}\b` | `123-45-6789` | + | **API Key** | `\b[A-Za-z0-9]{32,}\b` | `sk_live_1234567890abcdef` | + | **IP Address** | `\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b` | `192.168.1.100` | + | **Password Field** | `(?i)password[\"':\s=]+[\"']?([^\"'\s,}]+)` | `password: "secret123"` | + + **Example**
+ The following screenshots illustrate the pattern creation process: + + 1. Review the logs that includes PII. +
+ The `message` field in the `pii_test_stream` contains names, email addresses, IP addresses, SSNs, and credit card numbers. +
+ ![Stream with PII](../../images/stream-with-pii.png) + 2. Create and test the regex patterns. +
+ **Full Name**: + ![Full name regex](../../images/full-name-regex.png) +
+ **Email Addresses**: + ![Email regex](../../images/email-regex.png) + +## Apply Regex Patterns +Once your patterns are created and tested, you can apply them to specific fields in a stream to redact or drop sensitive data during ingestion or at query time. +
+**To apply a pattern to a field:** + +??? "Step 1: Go to the stream field" + ### Step 1: Go to the stream field + 1. From the left-hand menu, go to **Streams**. + 2. Locate the stream where you want to apply regex patterns and select **Stream Details** from the **Actions** column. + 3. In the **Stream Settings** tab, locate the field that contains sensitive data. + + ![Field with sesitive data](../../images/stream-settings-sensitive-fields.png) + +??? "Step 2: Add pattern" + ### Step 2: Add pattern + 1. Select **Add Pattern** for the target field. This opens the pattern panel, where you can view already applied patterns and add new ones. + ![Add regex pattern](../../images/stream-settings-add-regex.png) + 2. From the **All Patterns** section, select a pattern you want to apply. + ![Select regex pattern](../../images/select-regex.png) +
+ After selecting a pattern, a detail view appears. + ![Regex selection](../../images/regex-selection-view.png) + +??? "Step 3: Choose whether to Redact or Drop" + ### Step 3: Choose whether to Redact or Drop + ![Regex pattern execution action- redact or drop](../../images/redact-or-drop-during-regex-pattern-execution.png) + + When applying a regex pattern, you must choose one of the following actions in the pattern details screen: + + **Redact**: + + - Replaces only the matching portion of the field value with `[REDACTED]`, while preserving the rest of the field. + - Use this when the field contains both sensitive and non-sensitive information and you want to retain the overall context. + + **Drop**: + + - Removes the entire field from the log record if the regex pattern matches. + - Use this when the entire field should be excluded from storage or analysis. + + Select the appropriate action between Redact and Drop. + +??? "Step 4: Choose when the action needs to be executed" + ### Step 4: Choose when the action needs to be executed + In the pattern details screen, select when the chosen action (redact or drop) should be executed, at ingestion time, query time, or both. + + ![Regex pattern execution time](../../images/regex-pattern-execution-time.png) + + **Ingestion**: + + - The data is redacted or dropped before it is written to disk. + - This ensures that sensitive information is never stored in OpenObserve. + - Example: If an email address is redacted at ingestion, only the masked value `[REDACTED]` will be stored in the logs. + + **Query**: + + - The data is stored in its original form but is redacted or dropped only when queried. + - This allows administrators to preserve the original data while preventing exposure of sensitive values during searches. + - Example: An email address stored in raw form will be hidden as `[REDACTED]` in query results. + + You can select one or both options depending on your security and compliance requirements. + **If neither ingestion time nor query time is selected, no redaction or drop is applied.** + +??? "Step 5: Add pattern and update changes" + ### Step 5: Add pattern and update changes + + 1. To add the regex pattern to Applied Patterns, click **Add Pattern**. + ![Add regex pattern](../../images/add-regex-pattern.png) + 2. Select **Update Changes**. + ![Update regex patterns](../../images/update-regex-patterns.png) + +??? "Step 6: (Optional) Apply multiple patterns" + + You can apply multiple patterns to the same field, as shown below: + Configure a regex pattern to detect emails: + ![regex-patterns-redact](../../images/regex-patterns-redact.png) + Configure a regex pattern to IP addresses: + ![regex-patterns-drop](../../images/regex-patterns-drop.png) + All applied patterns will appear in the left-hand panel with check marks. + +??? "Step 7: Save configuration" + ### Step 7: Save configuration + When finished, click **Update Changes** to save the configuration. This activates the regex rules for the selected field. + + +## Test Redaction and Drop Operations + +The following regex patterns are applied to the `message` field of the `pii_test` stream: + +| Pattern Name | Action | Timing | Description | +|--------------|--------|--------|-------------| +| Full Name | Redact | Ingestion | Masks names like "John Doe" | +| Email | Redact | Query | Masks email addresses at query time | +| IP Address | Drop | Ingestion | Removes IP addresses before storage | +| Credit Card | Drop | Query | Excludes credit card numbers from results | + +??? "Test 1: Redact at ingestion time" + ### Test 1: Redact at ingestion time + **Pattern Configuration**: + ![redact-at-ingestion-time-test-config](../../images/redact-at-ingestion-time-test-config.png) + + **Test Steps**: + + 1. From the left-hand menu, select **Logs**. + 2. Select the `pii_test` stream from the dropdown. + 3. Ingest a log entry containing a full name in the message field. + ```bash + $ curl -u root@example.com:FNIB8MWspXZRkRgS -k https://dev2.internal.zinclabs.dev/api/default/pii_test/_json -d '[{"level":"info","job":"test","message":"User John Doe logged in successfully"}]' + {"code":200,"status":[{"name":"pii_test","successful":1,"failed":0}]} + ``` + 4. Set the time range to include the test data. + 5. Click **Run Query**. + 6. Verify results: + ![redact-at-ingestion-time-result](../../images/redact-at-ingestion-time-result.png) + + **Key points:** + + - The name "John Doe" is replaced with [REDACTED]. + - The rest of the message field remains intact. + - This is the actual stored value on disk. + + +??? "Test 2: Drop at ingestion time" + ### Test 2: Drop at ingestion time + **Pattern Configuration**: + ![drop-at-query-time-test-config](../../images/drop-at-ingestion-time-test-config.png) + + **Test Steps:** + + 1. From the left-hand menu, select **Logs**. + 2. Select the `pii_test` stream from the dropdown. + 3. Ingest a log entry containing a IP address in the message field. + ```bash + $ curl -u root@example.com:FNIB8MWspXZRkRgS -k https://dev2.internal.zinclabs.dev/api/default/pii_test/_json -d '[{"level":"info","job":"test","message":"Connection from IP 192.168.1.100 established"}]' + {"code":200,"status":[{"name":"pii_test","successful":1,"failed":0}]} + ``` + 4. Set the time range to include the test data. + 5. Click **Run Query**. + 6. Verify results: + ![drop-at-ingestion-time-result](../../images/drop-at-ingestion-time-result.png) + + **Key points:** + + - The entire message field is missing from the stored record. + - Other fields remain intact. + - This demonstrates field-level drop at ingestion. + +??? "Test 3: Redact at query time" + ### Test 3: Redact at query time + **Pattern Configuration**: + ![redact-at-query-test-config](../../images/redact-at-query-test-config.png) + + **Test Steps:** + + 1. From the left-hand menu, select **Logs**. + 2. Select the `pii_test` stream from the dropdown. + 3. Ingest a log entry containing a email addresses in the message field. + ```bash + $ curl -u root@example.com:FNIB8MWspXZRkRgS -k https://dev2.internal.zinclabs.dev/api/default/pii_test/_json -d '[{"level":"info","job":"test","message":"Password reset requested for john.doe@company.com"}]' + {"code":200,"status":[{"name":"pii_test","successful":1,"failed":0}]} + + ``` + 4. Set the time range to include the test data. + 5. Click **Run Query**. + 6. Verify results: + ![redact-at-query-time-result](../../images/redact-at-query-time-result.png) + + **Key points:** + + - Original data is preserved on disk. + - Email address appears as [REDACTED] in query results. + - Useful for compliance while maintaining data for authorized access. + + +??? "Test 4: Drop at query time" + ### Test 4: Drop at query time + **Pattern Configuration**: + ![Drop at Query Time- Test Config](../../images/drop-at-query-time-test-config.png) + + **Test Steps:** + + 1. From the left-hand menu, select **Logs**. + 2. Select the `pii_test` stream from the dropdown. + 3. Ingest a log entry containing credit card details in the message field. + ```bash + $ curl -u root@example.com:FNIB8MWspXZRkRgS -k https://dev2.internal.zinclabs.dev/api/default/pii_test/_json -d '[{"level":"info","job":"test","message":"Payment processed with card 4111-1111-1111-1111"}]' + {"code":200,"status":[{"name":"pii_test","successful":1,"failed":0}]} + ``` + 4. Set the time range to include the test data. + 5. Click **Run Query**. + 6. Verify results: + ![Drop at Query Time](../../images/drop-at-query-time-result.png) + + **Key points:** + + - Original data is preserved on disk. + - The `message` field with the credit card details gets dropped in query results. + - This demonstrates field-level drop at query time. + + +## Limitations + +- **Pattern Matching Engine**: OpenObserve uses the Intel Hyperscan library for regex evaluation. All Hyperscan limitations apply to pattern syntax and matching behavior. +- **Field Type Restrictions**: Regex patterns can only be applied to fields with a UTF8 data type. Other field types are not supported. +- **Data Requirements**: Patterns can only be applied after the stream has ingested data. Empty streams will not show any fields in the Stream Settings tab for pattern association. +- **Performance**: Complex patterns may impact ingestion speed, but overall performance remains faster than VRL-based redaction. + +## Troubleshooting +| **Issue** | **Cause** | **Solution** | +| -------------------------------------------------------- | ------------------------------------- | ----------------------------------------------------------------------------------------- | +| The Add Pattern option is not visible in Stream Details. | The field is not of UTF8 type. | Check the field type in the Stream Details view. Only UTF8 fields support regex patterns. | +| Pattern does not apply. | Configuration changes were not saved. | Ensure that you selected **Update Changes** after applying the pattern. | + + + diff --git a/docs/user-guide/management/sso-domain-restrictions.md b/docs/user-guide/management/sso-domain-restrictions.md index f93d1b3c..582fc8b9 100644 --- a/docs/user-guide/management/sso-domain-restrictions.md +++ b/docs/user-guide/management/sso-domain-restrictions.md @@ -7,15 +7,16 @@ description: Learn how to configure SSO domain restrictions in OpenObserve Enter This user guide provides step-by-step instructions for configuring and managing **SSO Domain Restrictions** in OpenObserve.
This feature allows you to control which users can log in to OpenObserve using Single Sign-On (SSO) providers. You can allow access to specific domains or even individual users from those domains. -!!! note "Where to Find" +!!! note "Where to find" The **SSO Domain Restrictions** page is available in the `_meta` org under **Management**. -!!! note "Who Can Access" +!!! note "Who can access" `Root` user and any other user who has access to the `_meta` org can access the **SSO Domain Restrictions** page. ## Add Domain Restrictions -??? "Step 1: Add a New Domain" +??? "Step 1: Add a new domain" + ### Step 1: Add a new domain 1. In the **Domain and allowed users** section, enter the domain name in the text field. > Enter only the domain name, for example, `example.com` and do not include the `@` symbol. 2. Click the **Add Domain** button. @@ -45,19 +46,20 @@ This feature allows you to control which users can log in to OpenObserve using S 4. Repeat for additional users. 5. Use the **X** button next to any email to remove it. -??? "Step 3: Save Configuration" +??? "Step 3: Save configuration" + ### Step 3: Save configuration 1. Review your domain restrictions. 2. Click **Save Changes** to apply the configuration. 3. Click **Cancel** to discard changes. -## Domain Limits +## Domain limits There is no limit on the number of domains you can configure. Add as many domains and specific users as needed for your organization. -## Error Messages +## Error messages When **SSO Domain Restrictions** are configured, any user attempting to log in from domains or email addresses that are **NOT** in the allowed list will see an `unauthorized` error during SSO login. -## Supported SSO Login Options +## Supported SSO login options OpenObserve allows users to log in through the following Single Sign-On options, and domain restrictions apply to all of them: - GitHub @@ -68,10 +70,10 @@ OpenObserve allows users to log in through the following Single Sign-On options, Domain restrictions will be enforced when users attempt to log in using any of these SSO options. ## Troubleshooting -**Problem**: SSO Domain Restrictions menu not visible.
-**Solution**: Verify you are in the `_meta` organization.
+| **Problem** | **Solution** | +| ----------------------------------------- | -------------------------------------------------------------------- | +| SSO Domain Restrictions menu not visible. | Verify that you are in the `_meta` organization. | +| Changes not taking effect. | Ensure that you clicked **Save Changes** and refresh the login page. | -**Problem**: Changes not taking effect.
-**Solution**: Ensure you clicked **Save Changes** and refresh the login page.
diff --git a/docs/user-guide/management/streaming-search.md b/docs/user-guide/management/streaming-search.md index 4ae5692d..04faab65 100644 --- a/docs/user-guide/management/streaming-search.md +++ b/docs/user-guide/management/streaming-search.md @@ -16,6 +16,7 @@ Streaming Search allows OpenObserve to return query results through a single, pe
![User Access](../../images/streaming-search-access.png) + ## Concepts ### Partition diff --git a/docs/user-guide/pipelines/use-pipelines.md b/docs/user-guide/pipelines/use-pipelines.md index 5f2a0988..6b66f850 100644 --- a/docs/user-guide/pipelines/use-pipelines.md +++ b/docs/user-guide/pipelines/use-pipelines.md @@ -53,9 +53,10 @@ This opens up the pipeline editor. ![condition in realtime pipeline](../../images/pipeline3_transform_using_condition.png) - - **For a Function node**: In the **Associate Function** form, select an existing function or create a new function to associate with the pipeline. + - **For a Function node**: In the **Associate Function** form, select an existing function or create a new function to associate with the pipeline.
![function in realtime pipeline](../../images/pipeline-new1-associate-function.png) -
To create a new function: +
+ To create a new function: 1. In the **Associate Function** form, enable the **Create new function** toggle. ![function in realtime pipeline](../../images/Pipeline-new1-add-function.png) @@ -81,6 +82,16 @@ This opens up the pipeline editor. ### Step 5: Edit the Destination node +!!! warning "Important" + If you create a route with a condition or filter that forwards events to a new destination, only the matching events go there.
+ Events that do not match the condition are dropped. They are not stored in the source stream unless you explicitly add a destination node that points to the source stream. + + **Suggested pattern**: Always add two or more routes in your real-time pipeline. + + - Route A: Add a catch-all route without a filter. Point it back to the same source stream to prevent data loss. + - Route B: Filter number 1 and forward matching events to destination 1. + - Route C: Filter number 2 and forward matching events to destination 2. + 1. Drag a **Stream** node into the editor. 2. Click the edit icon in the destination **Stream** node. 3. In the **Associate Stream** form: @@ -93,8 +104,8 @@ This opens up the pipeline editor. ![destination stream](../../images/pipeline6_destination_stream.png) ### Step 6: Connect the Source, Transform, and Destination nodes to complete the data flow order -- Use the **remove icon** (![remove icon](../../images/pipeline10_remove.png)) to remove any incorrect connection. -- Use the **connection icon** (![connection icon](../../images/pipelines11_connect_icon.png)) to build a connection between two nodes. +- ![remove icon](../../images/pipeline10_remove.png): Use the **remove icon** to remove any incorrect connection. +- ![connection icon](../../images/pipelines11_connect_icon.png): Use the **connection icon** to build a connection between two nodes. ![realtime pipeline node connection](../../images/pipeline-new1-connect-nodes.png) @@ -105,6 +116,7 @@ After you click Save, it gets activated automatically. Learn how to [manage pipe ![active pipeline](../../images/pipeline8_save_pipeline.png) + ## Use the Pipeline ### Prerequisite diff --git a/docs/user-guide/traces/.pages b/docs/user-guide/traces/.pages new file mode 100644 index 00000000..1f9899fb --- /dev/null +++ b/docs/user-guide/traces/.pages @@ -0,0 +1,5 @@ +nav: + +- Traces Overview: index.md +- Traces in OpenObserve: traces.md + diff --git a/docs/user-guide/traces/index.md b/docs/user-guide/traces/index.md new file mode 100644 index 00000000..5d385032 --- /dev/null +++ b/docs/user-guide/traces/index.md @@ -0,0 +1,5 @@ +Traces help you understand how requests flow across services, identify performance bottlenecks, and troubleshoot errors in distributed systems. + +!!! note "Learn more" + - ### [Traces in OpenObserve](../traces/traces/) + diff --git a/docs/user-guide/traces/traces.md b/docs/user-guide/traces/traces.md new file mode 100644 index 00000000..62402a52 --- /dev/null +++ b/docs/user-guide/traces/traces.md @@ -0,0 +1,293 @@ +--- +title: View and Configure Traces in OpenObserve +description: Configure tracing, correlate logs, and analyze performance in OpenObserve. +--- +This document explains how to use OpenObserve to collect, view, and analyze distributed traces. It covers how to configure tracing for self-monitoring and external applications, correlate traces with logs, and explore trace data in the UI to identify performance bottlenecks and errors. +=== "Overview" + !!! note "Who can use it" + All OpenObserve users can view traces. + + !!! note "Where to find it" + Select **Traces** from the left navigation in the OpenObserve UI. + ![Traces](../../images/traces.png) + + ## Why traces matter + When an application serves a request, such as placing an order, it often calls many different services behind the scenes. If something slows down or fails, it can be difficult to know where the problem happened. Traces solve this by recording the journey of a single request as it flows through your system. + + ## What is a trace? + A trace is the complete story of one request. It begins when the request enters your system and ends when the final response is sent back. + A trace is made up of smaller pieces called spans. Each span represents one operation in the request journey. Examples of operations include verifying a user’s login, reserving inventory, or charging a payment card. + + - Every trace has a trace_id, which is the unique identifier that ties all its spans together. + - Every span has its own `span_id`, which shows its place in the request journey. + - Parent and child relationships between spans make it clear which operation triggered which. + + ## How OpenObserve helps + OpenObserve collects and stores spans from your services. It then reconstructs the request flow in the Traces UI, where you can see how the request moved across different services, how long each step took, and where issues may have occurred. +

+ **Example: Retail Application**
+ Imagine a customer clicks **place order** in your online shop. That single action triggers work across several services: + + ```bash + Browser + ├─ API Gateway + | + orders-service • operation: POST /checkout + ├─ auth-service • operation: verify token + ├─ inventory-service • operation: reserve items + ├─ payments-service • operation: charge card + │ └─ payments database • operation: SQL query + └─ email-service • operation: send receipt + ``` + ## How this appears as a trace + **Root span:** orders-service POST /checkout starts when the API receives the request and ends when the response is sent. +
+ + **Child spans:** + + - auth-service verify token. + - inventory-service reserve items. + - payments-service charge card. + - payments database SQL query. + - email-service send receipt. + + !!! note "Note" + - All spans share one `trace_id`. Each span has a unique `span_id`. The parent and child links show who called whom and in what order. + - The application component that performs an operation is called a service. For example, auth-service, inventory-service, and payments-service are all services involved in processing the order. + - The specific action performed by the service is called an operation. For example, the auth-service runs verify token, the inventory-service runs reserve items, and the payments-service runs charge card. + + ## Views available in the UI + **Trace list:** Displays recent traces for the selected time range and selected stream. + + Each row shows: + + - The service and operation name + - The number of spans + - The request timestamp + - The total duration + - Badges for the services involved + - The first span determines the label. + + ![trace-list](../../images/trace-list.png) + + **Timeline:** Shows spans as color-coded horizontal bars. Parent spans contain child spans. Parent time includes the time of child spans, which may run in parallel. + ![Timeline](../../images/timeline.png) + **Service map:** Shows all services involved in the trace and how long each took. + ![Service map](../../images/service-map.png) + **Span details:** Provide metadata such as file path, code line, service version, thread ID, and additional attributes. Events and error messages appear when available. + ![Span details](../../images/span-details.png) +=== "Configure" +
+ The following configuration steps show how to set up OpenObserve for self-monitoring and for collecting traces from external applications. + + ## Configure for self-monitoring + + **Steps**
+ ??? "Step 1: Navigate to Data Sources and collect authorization details" + Go to **Data sources** > **Custom** > **Traces**. + ![opentelemetry-collector-for-traces](../../images/opentelemetry-collector-for-traces.png) + Note the following values: + + - HTTP Endpoint under OTLP HTTP + - Authorization header value + + ![Navigate to traces](../../images/navigate-to-traces.png) + + ??? "Step 2: Configure the environment variables" + Set the following environment variables on the OpenObserve deployment: + ``` + ZO_TRACING_ENABLED=true + ZO_TRACING_SEARCH_ENABLED=true + OTEL_OTLP_HTTP_ENDPOINT=https:///api//v1/traces + ZO_TRACING_HEADER_KEY=Authorization + ZO_TRACING_HEADER_VALUE= + ``` + + Replace `` and `` with the values you saw in the UI. Keep /v1/traces. + This indicates that from the instance + + ??? "Step 4: Apply changes" + Apply the environment variable changes and restart the service. + + ??? "Verify and view your first trace" + + 1. From the left navigation menu, select **Logs**. + 2. Select a stream and time range, and run a simple log search query. + 3. From the left navigation menu, go to **Traces**. + 4. Select the trace stream. + 5. Set the time range to the last few minutes, and open the newest trace. + 6. You should see spans from services such as querier, ingester, or alertmanager. + + ## Configure to monitor an external application + You can send traces directly from an external application to OpenObserve. + + ??? "Step 1: Navigate to Data Sources and collect authorization details" + In your OpenObserve application, go to Data sources > Custom > Traces. + ![opentelemetry-collector-for-traces](../../images/opentelemetry-collector-for-traces.png) + From the **OTLP HTTP** section, copy the following data: + + - HTTP Endpoint + - Authorization header value + + From the **OTLP gRPC** section, copy the following data: + + - Endpoint + - Headers: Authorization, organization, stream-name + - TLS setting insecure or secure + + ??? "Step 2: Choose how you want to send the data" + ![opentelemetry-collector-for-traces](../../images/opentelemetry-collector-for-traces.png) + + === "OpenTelemetry SDK" + **You can send the data directly from the application using an OpenTelemetry SDK** +
+ + + In this method, add the copied authorization values in the exporter setup within your code: + + - Set the **endpoint** to the HTTP Endpoint you copied. It ends with /v1/traces. + - Add the **headers** exactly as shown in the UI. + - For **HTTP**, add Authorization. + - For **gRPC**, add Authorization, organization, and stream-name. + - Set **TLS** to secure or insecure to match the UI if you use gRPC. + === "OpenTelemetry Collector" + You can also send data through the **OpenTelemetry Collector**.
+ To configure this: + + - Open your team's Collector config file. + - In the exporters section, paste the exporter block exactly as shown in **Data sources** > **Custom** > **Traces** > **OTEL Collector** for the protocol you use. + + ??? "Verify and view your first trace" + 1. In the external application, perform an operation. + 2. In your OpenObserve instance, go to Traces. + 3. Select the trace stream. + 4. Set the time range to the last few minutes, and open the newest trace. + 5. You should see spans from services such as querier, ingester, or alertmanager. + + ## Configure and view log and trace correlation + Configure correlation to navigate between traces and their related log records using shared `trace_id` and `span_id` fields. + + ### Understanding log and trace correlation + + - **Logs and Traces Correlation:** Logs become easy to join with traces when each log record includes the active TraceId and SpanId. The OpenTelemetry logs spec calls this out so that a UI can jump from a span to its related logs and back. Field names can be trace_id and span_id or equivalents that you choose. + - **Trace context:** A small set of identifiers that travels with each request so the tracing systems can follow the same request across services. In OpenTelemetry this context is carried in a SpanContext that contains a TraceId and a SpanId and flags. + - **The `traceparent` header:** An HTTP header that passes the current trace context between services. It has four parts in this order: version, trace-id, parent-id, and trace-flags. Example: + `traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01` + + ### Requirements + - Logs and traces must come from the same application run. + - Your application must write trace context fields into every log record. + - OpenObserve must know the exact field names you used in your logs. + + ### What makes logs correlatable + The spans are generated by the system or application that you are monitoring. They are created by its tracing library or agent, not by OpenObserve. +
+ A span is active while your code is handling an operation. When a span is active, the logging component can read the following identifiers from it and add them to each log line. + + - trace_id + - span_id + + ??? "Step 1: Add trace identifiers to your logs in the application" + 1. Ensure your tracing library starts a span for each request and keeps the span active during request processing. Many libraries do this automatically once enabled. + 2. When you write a log record during that time, include the current span identifiers. Add `trace_id` and `span_id` to the log record. + 3. Choose log field names that fit your logging format. Examples include `trace_id` and `span_id` or `traceId` and `spanId`. Make a note of the exact names you used. You will map these field names in OpenObserve in the next step. + + ??? "Step 2: Tell OpenObserve how your log fields are named" + ![config-custom-trace-id](../../images/config-custom-trace-id.png) + 1. Open the OpenObserve UI. + 2. Go to **Management** > **Organization Parameters**. + 3. Under **Log** details: + - Set **Trace ID** field name to match your log field name. Example `trace_id` or `traceId`. + - Set **Span ID** field name to match your log field name. Example `span_id` or `spanId`. + 4. Save the settings. + + !!! note "Why this matters" + Traces in OpenObserve always use `trace_id` and `span_id` as defined by OTLP. + + Your logs can use different names. The mapping you set here allows the UI to build the correct filter when you open related logs from a span. + + ??? "Step 3. View the correlation in the UI" + + === "From Traces to Logs" + + 1. Open **Traces**. + 2. Select the trace stream. + 3. Choose the time range and select **Run query**. + 4. Open a trace to view its timeline. + 5. Now choose any of these ways to reach the related logs: + ![traces-to-logs](../../images/traces-to-logs.png) + **A. Use the header stream selector**: + + - In the trace header, open the stream dropdown next to View Logs. + - Pick the log stream that holds your application logs. + - Select View Logs. The Logs page opens, filtered to the current trace_id and span_id. + + **B. Use the magnifier on a span row**: + + - Hover any span in Operation Name. + - Select the magnifier icon. + - It goes to the correlated logs stream and shows the log record which is linked to the trace ID and span ID. + + **C. Use the span details panel**: + + - Select a span to open the detailed span view + - Select View Logs next to the Span Id. + - It goes to the correlated logs stream and shows the log record which is linked to the trace ID and span ID. + + === "From Logs to Trace" + + 1. Open **Logs**. + 2. Select the appropriate log stream. + 3. Select the time range and select **Run query**. + 4. Expand a log record. + 5. Select **View Trace**. + ![view-trace-from-logs](../../images/view-trace-from-logs.png) + 6. The system shows the trace that matches the `trace_id`. + + **Note**: If no results appear after selecting **View Trace**, confirm that in your log stream the trace ID and span ID are available in the `trace_id` and `span_id` respectively. If not, ensure the custom trace ID and span ID fields are configured in the Organization parameter section under Management. + ![config-traces-in-org-parameter](../../images/config-traces-in-org-parameter.png) +=== "How to" + + ## View a trace + 1. Go to **Traces**. + 2. Select the trace stream. + 3. Select time range. + 4. Click **Run query**. + 5. From the query result, select the desired trace. + + + ## Use Trace Timeline and Service Map + Use Trace Timeline and Service Map to inspect performance and find slow spans: + ![trace-timeline](../../images/trace-timeline.png) + 1. Go to **Traces**. + 2. From the stream selector, select the trace stream. + 3. Set a time range. + 4. Select **Run query**. + 5. From the list of traces, open a trace. + 6. In the trace view, select the Timeline toggle at the top right corner. + 7. In **Operation Name**, expand the tree and look for the longest bars. The longest bar indicates the most time consuming span. The duration is shown at the right edge of each bar. + 8. Switch to **Service Map** toggle next to **Timeline** at the top right corner. + 9. The nodes show which services participated and their time contribution for this trace. + + ## Use filters to find the related logs + 1. Go to **Traces**. + 2. From the stream selector, select the trace stream. + 3. Add a filter in the non-SQL editor to find spans with errors, for example, `span_status = "ERROR"`.4. Set a time range. + 5. Select **Run query**. + 6. From the list of traces, open a trace. Traces should have an error badge. + ![error-badge-spans](../../images/error-badge-spans.png) + 7. In the span tree, click the span highlighted in red warning sign. + ![warning-spans](../../images/warning-spans.png) + 8. The detailed span view opens, showing the complete context for that span. + ![span-view](../../images/span-view.png) + 9. If log and trace correlation is configured, select **View Logs** in the span details panel to open the related logs for further investigation. + + ## Explore the detailed span view + ![detailed-span-view](../../images/detailed-span-view.png) + + | Tab | What it shows | When to use | + |-----|---------------|-------------| + | Tags | Key/value attributes on the span (code location, service identity, status, parent links, etc.). | Identify where the work ran, whether it showed an error, and the parent/child relationship. | + | Process | Compact identity of the emitting process (service, instance, version). | Confirm the exact instance/version for rollbacks or targeted restarts. | + | Events | Time-ordered records emitted inside the span (INFO/ERROR messages with context). | Read the span's timeline narrative and error messages without leaving the trace. | + | Attributes | The full JSON document is stored for this span. | Copy/paste into tickets, verify raw values, or see fields not surfaced elsewhere. | \ No newline at end of file diff --git a/docs/versions.json b/docs/versions.json deleted file mode 100644 index 544b7b4d..00000000 --- a/docs/versions.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - -} \ No newline at end of file diff --git a/input.css b/input.css index 2fe164b6..050698a2 100644 --- a/input.css +++ b/input.css @@ -109,3 +109,7 @@ main { -webkit-text-fill-color: transparent; background-clip: text; } + +.header-list-style { + list-style: none; +} diff --git a/mkdocs.yml b/mkdocs.yml index 43e84106..950c2736 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -23,11 +23,14 @@ extra_javascript: # - js/vector_co.js - js/search-close-minimal.js - js/search-tracking.js + - js/theme.js # - js/toc-highlight.js - https://buttons.github.io/buttons.js # - js/reo.js - js/landing-individual-card-ms-tracking.js - js/google-tag-manager.js + - js/feedback.js + - js/segment.js extra: # social: # - icon: fontawesome/brands/linkedin @@ -47,8 +50,8 @@ extra: # lang: zh generator: false default_color_scheme: default - version: - provider: mike + # version: + # provider: mike analytics: provider: google property: G-3383ZJ2HH7 diff --git a/overrides/css/output.css b/overrides/css/output.css index b43d224a..e86118ca 100644 --- a/overrides/css/output.css +++ b/overrides/css/output.css @@ -457,9 +457,10 @@ legend { padding: 0; } +ol, ul, menu { - list-style: none; + /* list-style: none; */ margin: 0; padding: 0; } @@ -631,6 +632,10 @@ video { left: 50%; } +.tw-right-0 { + right: 0px; +} + .tw-right-2 { right: 0.5rem; } @@ -647,6 +652,10 @@ video { top: 100%; } +.tw-z-10 { + z-index: 10; +} + .tw-z-50 { z-index: 50; } @@ -656,6 +665,11 @@ video { margin-right: auto; } +.tw-my-2 { + margin-top: 0.5rem; + margin-bottom: 0.5rem; +} + .tw-mb-2 { margin-bottom: 0.5rem; } @@ -688,6 +702,10 @@ video { margin-top: 0.5rem; } +.tw-mt-4 { + margin-top: 1rem; +} + .tw-mt-5 { margin-top: 1.25rem; } @@ -720,10 +738,18 @@ video { height: 0.125rem; } +.tw-h-10 { + height: 2.5rem; +} + .tw-h-12 { height: 3rem; } +.tw-h-16 { + height: 4rem; +} + .tw-h-4 { height: 1rem; } @@ -740,6 +766,14 @@ video { height: 1.75rem; } +.tw-h-8 { + height: 2rem; +} + +.tw-h-9 { + height: 2.25rem; +} + .tw-h-\[calc\(100vh-120px\)\] { height: calc(100vh - 120px); } @@ -748,10 +782,18 @@ video { height: 100%; } +.tw-w-10 { + width: 2.5rem; +} + .tw-w-12 { width: 3rem; } +.tw-w-16 { + width: 4rem; +} + .tw-w-4 { width: 1rem; } @@ -764,10 +806,22 @@ video { width: 1.25rem; } +.tw-w-6 { + width: 1.5rem; +} + .tw-w-7 { width: 1.75rem; } +.tw-w-8 { + width: 2rem; +} + +.tw-w-9 { + width: 2.25rem; +} + .tw-w-\[430px\] { width: 430px; } @@ -784,6 +838,10 @@ video { width: 100%; } +.tw-max-w-\[70rem\] { + max-width: 70rem; +} + .tw-flex-1 { flex: 1 1 0%; } @@ -806,14 +864,34 @@ video { transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); } +.tw-scale-90 { + --tw-scale-x: .9; + --tw-scale-y: .9; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); +} + .tw-transform { transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); } +@keyframes tw-spin { + to { + transform: rotate(360deg); + } +} + +.tw-animate-spin { + animation: tw-spin 1s linear infinite; +} + .tw-cursor-pointer { cursor: pointer; } +.tw-resize-none { + resize: none; +} + .tw-grid-cols-2 { grid-template-columns: repeat(2, minmax(0, 1fr)); } @@ -854,6 +932,10 @@ video { gap: 0.625rem; } +.tw-gap-3 { + gap: 0.75rem; +} + .tw-gap-4 { gap: 1rem; } @@ -862,6 +944,10 @@ video { gap: 1.5rem; } +.tw-gap-y-2 { + row-gap: 0.5rem; +} + .tw-space-x-0\.5 > :not([hidden]) ~ :not([hidden]) { --tw-space-x-reverse: 0; margin-right: calc(0.125rem * var(--tw-space-x-reverse)); @@ -922,6 +1008,10 @@ video { border-radius: 0.375rem; } +.tw-rounded-xl { + border-radius: 0.75rem; +} + .tw-border { border-width: 1px; } @@ -930,6 +1020,10 @@ video { border-width: 2px; } +.tw-border-4 { + border-width: 4px; +} + .tw-border-t { border-top-width: 1px; } @@ -949,16 +1043,39 @@ video { border-color: rgb(0 0 0 / var(--tw-border-opacity, 1)); } +.tw-border-gray-200 { + --tw-border-opacity: 1; + border-color: rgb(229 231 235 / var(--tw-border-opacity, 1)); +} + .tw-border-gray-300 { --tw-border-opacity: 1; border-color: rgb(209 213 219 / var(--tw-border-opacity, 1)); } +.tw-border-gray-400 { + --tw-border-opacity: 1; + border-color: rgb(156 163 175 / var(--tw-border-opacity, 1)); +} + .tw-border-gray-700 { --tw-border-opacity: 1; border-color: rgb(55 65 81 / var(--tw-border-opacity, 1)); } +.tw-border-white { + --tw-border-opacity: 1; + border-color: rgb(255 255 255 / var(--tw-border-opacity, 1)); +} + +.tw-border-t-transparent { + border-top-color: transparent; +} + +.tw-border-opacity-80 { + --tw-border-opacity: 0.8; +} + .tw-bg-\[\#121212\] { --tw-bg-opacity: 1; background-color: rgb(18 18 18 / var(--tw-bg-opacity, 1)); @@ -969,6 +1086,16 @@ video { background-color: rgb(119 130 255 / var(--tw-bg-opacity, 1)); } +.tw-bg-black { + --tw-bg-opacity: 1; + background-color: rgb(0 0 0 / var(--tw-bg-opacity, 1)); +} + +.tw-bg-gray-200 { + --tw-bg-opacity: 1; + background-color: rgb(229 231 235 / var(--tw-bg-opacity, 1)); +} + .tw-bg-transparent { background-color: transparent; } @@ -978,11 +1105,33 @@ video { background-color: rgb(255 255 255 / var(--tw-bg-opacity, 1)); } +.tw-bg-opacity-60 { + --tw-bg-opacity: 0.6; +} + +.tw-bg-gradient-to-r { + background-image: linear-gradient(to right, var(--tw-gradient-stops)); +} + +.tw-from-blue-500 { + --tw-gradient-from: #3b82f6 var(--tw-gradient-from-position); + --tw-gradient-to: rgb(59 130 246 / 0) var(--tw-gradient-to-position); + --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); +} + +.tw-to-purple-600 { + --tw-gradient-to: #9333ea var(--tw-gradient-to-position); +} + .tw-object-contain { -o-object-fit: contain; object-fit: contain; } +.tw-p-0\.5 { + padding: 0.125rem; +} + .tw-p-2 { padding: 0.5rem; } @@ -1048,6 +1197,11 @@ video { padding-bottom: 0.5rem; } +.tw-py-3 { + padding-top: 0.75rem; + padding-bottom: 0.75rem; +} + .tw-pt-4 { padding-top: 1rem; } @@ -1064,6 +1218,14 @@ video { text-align: center; } +.tw-font-sans { + font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; +} + +.tw-text-\[12px\] { + font-size: 12px; +} + .tw-text-\[14px\] { font-size: 14px; } @@ -1091,6 +1253,11 @@ video { line-height: 1.25rem; } +.tw-text-xl { + font-size: 1.25rem; + line-height: 1.75rem; +} + .tw-text-xs { font-size: 0.75rem; line-height: 1rem; @@ -1112,6 +1279,10 @@ video { text-transform: capitalize; } +.tw-leading-relaxed { + line-height: 1.625; +} + .\!tw-text-white { --tw-text-opacity: 1 !important; color: rgb(255 255 255 / var(--tw-text-opacity, 1)) !important; @@ -1141,11 +1312,31 @@ video { color: rgb(107 114 128 / var(--tw-text-opacity, 1)); } +.tw-text-gray-600 { + --tw-text-opacity: 1; + color: rgb(75 85 99 / var(--tw-text-opacity, 1)); +} + +.tw-text-gray-900 { + --tw-text-opacity: 1; + color: rgb(17 24 39 / var(--tw-text-opacity, 1)); +} + .tw-text-red-400 { --tw-text-opacity: 1; color: rgb(248 113 113 / var(--tw-text-opacity, 1)); } +.tw-text-red-500 { + --tw-text-opacity: 1; + color: rgb(239 68 68 / var(--tw-text-opacity, 1)); +} + +.tw-text-teal-500 { + --tw-text-opacity: 1; + color: rgb(20 184 166 / var(--tw-text-opacity, 1)); +} + .tw-text-white { --tw-text-opacity: 1; color: rgb(255 255 255 / var(--tw-text-opacity, 1)); @@ -1172,6 +1363,16 @@ video { color: rgba(255,255,255,0.6); } +.tw-placeholder-gray-400::-moz-placeholder { + --tw-placeholder-opacity: 1; + color: rgb(156 163 175 / var(--tw-placeholder-opacity, 1)); +} + +.tw-placeholder-gray-400::placeholder { + --tw-placeholder-opacity: 1; + color: rgb(156 163 175 / var(--tw-placeholder-opacity, 1)); +} + .tw-caret-white { caret-color: #fff; } @@ -1186,6 +1387,18 @@ video { box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); } +.tw-shadow-md { + --tw-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1); + --tw-shadow-colored: 0 4px 6px -1px var(--tw-shadow-color), 0 2px 4px -2px var(--tw-shadow-color); + box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); +} + +.tw-shadow-sm { + --tw-shadow: 0 1px 2px 0 rgb(0 0 0 / 0.05); + --tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color); + box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); +} + .tw-outline-none { outline: 2px solid transparent; outline-offset: 2px; @@ -1346,6 +1559,15 @@ main { background-clip: text; } +.header-list-style { + list-style: none; +} + +.hover\:tw-border-gray-200:hover { + --tw-border-opacity: 1; + border-color: rgb(229 231 235 / var(--tw-border-opacity, 1)); +} + .hover\:tw-bg-\[\#6672fa\]:hover { --tw-bg-opacity: 1; background-color: rgb(102 114 250 / var(--tw-bg-opacity, 1)); @@ -1370,6 +1592,16 @@ main { --tw-bg-opacity: 0.1; } +.hover\:tw-from-blue-600:hover { + --tw-gradient-from: #2563eb var(--tw-gradient-from-position); + --tw-gradient-to: rgb(37 99 235 / 0) var(--tw-gradient-to-position); + --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); +} + +.hover\:tw-to-purple-700:hover { + --tw-gradient-to: #7e22ce var(--tw-gradient-to-position); +} + .hover\:tw-text-\[\#6b76e3\]:hover { --tw-text-opacity: 1; color: rgb(107 118 227 / var(--tw-text-opacity, 1)); @@ -1385,12 +1617,37 @@ main { color: rgb(0 0 0 / var(--tw-text-opacity, 1)); } +.hover\:tw-text-gray-900:hover { + --tw-text-opacity: 1; + color: rgb(17 24 39 / var(--tw-text-opacity, 1)); +} + .hover\:tw-shadow-xl:hover { --tw-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1); --tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color), 0 8px 10px -6px var(--tw-shadow-color); box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); } +.focus\:tw-border-transparent:focus { + border-color: transparent; +} + +.focus\:tw-outline-none:focus { + outline: 2px solid transparent; + outline-offset: 2px; +} + +.focus\:tw-ring-2:focus { + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); +} + +.focus\:tw-ring-blue-500:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity, 1)); +} + .focus-visible\:tw-outline-none:focus-visible { outline: 2px solid transparent; outline-offset: 2px; diff --git a/overrides/partials/footer.html b/overrides/partials/footer.html index 04982aa1..ec5d3f51 100644 --- a/overrides/partials/footer.html +++ b/overrides/partials/footer.html @@ -85,7 +85,13 @@
-
+
@@ -118,7 +124,10 @@ id="newsletter-error" class="tw-text-left tw-ml-2 tw-text-red-400 tw-text-sm tw-hidden" >
+ +
+ + +
+
+ +
+ + +
+
+
+
+

@@ -138,91 +168,443 @@

@@ -231,7 +613,7 @@
@@ -382,18 +764,158 @@

3000 Sand Hill Rd Building 1, Suite 260, Menlo Park, CA 94025

+ + + + + + +
+
+
+ +
+ + OpenObserve + +
+ + Get Demo + + Star - -
-
-
- +
- - OpenObserve - - +
+
- + +
+ +
+
    + +
  • + Platform Open Menu - Close Menu
    -
-
- - -
- -
-
- -
-
- + +
  • + Pipelines +
  • +
  • + Alerts +
  • +
  • + Visualization & Dashboards +
  • + +
    +
    + - -
  • -
    +
  • +
    + Solutions + Arrow +
    +
  • - + +
  • + GCP Monitoring +
  • +
  • + Kubernetes Monitoring +
  • +
  • + Database Monitoring +
  • +
  • + OpenTelemetry +
  • + +
    +
    + - + +
  • - PricingResources + Arrow +
    + -
  • +
    + + + + +
    -
    +
    - - {% include "partials/navbar.html" %} + + + +{% include "partials/navbar.html" %} - - - + }); + }); + diff --git a/overrides/partials/navbar.html b/overrides/partials/navbar.html index c232bad3..1748a3fb 100644 --- a/overrides/partials/navbar.html +++ b/overrides/partials/navbar.html @@ -29,7 +29,7 @@