From 32228a7de86bb21a0eb13e1aa957b37cd120b54a Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 8 Dec 2023 01:18:26 +0900 Subject: [PATCH 1/7] feat: improve SEO keywords --- docs/docs/examples/jan.md | 1 + docs/docs/examples/openai-node.md | 1 + docs/docs/examples/openai-python.md | 1 + docs/docs/examples/palchat.md | 1 + docs/docs/features/chat.md | 1 + docs/docs/features/cont-batch.md | 1 + docs/docs/features/embed.md | 1 + docs/docs/features/feat.md | 1 + docs/docs/features/load-unload.md | 1 + docs/docs/features/multi-thread.md | 1 + docs/docs/features/prompt.md | 1 + docs/docs/features/warmup.md | 1 + docs/docs/new/about.md | 1 + docs/docs/new/architecture.md | 1 + docs/docs/new/build-source.md | 1 + docs/docs/new/faq.md | 1 + docs/docs/new/install.md | 1 + docs/docs/new/model-cycle.md | 1 + docs/docs/new/quickstart.md | 1 + docs/docusaurus.config.js | 6 +++--- 20 files changed, 22 insertions(+), 3 deletions(-) diff --git a/docs/docs/examples/jan.md b/docs/docs/examples/jan.md index 365050737..d4b6fd670 100644 --- a/docs/docs/examples/jan.md +++ b/docs/docs/examples/jan.md @@ -1,6 +1,7 @@ --- title: Nitro with Jan description: Nitro integrates with Jan to enable a ChatGPT-like functional app, optimized for local AI. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- You can effortlessly utilize Nitro through [Jan](https://jan.ai/), as it is fully integrated with all its functions. With Jan, using Nitro becomes straightforward without the need for any coding. diff --git a/docs/docs/examples/openai-node.md b/docs/docs/examples/openai-node.md index f12539e0f..dfa515bdf 100644 --- a/docs/docs/examples/openai-node.md +++ b/docs/docs/examples/openai-node.md @@ -1,6 +1,7 @@ --- title: Nitro with openai-node description: Nitro intergration guide for Node.js. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- You can migrate from OAI API or Azure OpenAI to Nitro using your existing NodeJS code quickly diff --git a/docs/docs/examples/openai-python.md b/docs/docs/examples/openai-python.md index be36d6d43..6fb54c2e8 100644 --- a/docs/docs/examples/openai-python.md +++ b/docs/docs/examples/openai-python.md @@ -1,6 +1,7 @@ --- title: Nitro with openai-python description: Nitro intergration guide for Python. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- diff --git a/docs/docs/examples/palchat.md b/docs/docs/examples/palchat.md index fd675eb81..598a18104 100644 --- a/docs/docs/examples/palchat.md +++ b/docs/docs/examples/palchat.md @@ -1,6 +1,7 @@ --- title: Nitro with Pal Chat description: Nitro intergration guide for mobile device usage. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- This guide demonstrates how to use Nitro with Pal Chat, enabling local AI chat capabilities on mobile devices. diff --git a/docs/docs/features/chat.md b/docs/docs/features/chat.md index 229fb8b0e..939c46b07 100644 --- a/docs/docs/features/chat.md +++ b/docs/docs/features/chat.md @@ -1,6 +1,7 @@ --- title: Chat Completion description: Inference engine for chat completion, the same as OpenAI's +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- The Chat Completion feature in Nitro provides a flexible way to interact with any local Large Language Model (LLM). diff --git a/docs/docs/features/cont-batch.md b/docs/docs/features/cont-batch.md index 65a5f950f..d853db933 100644 --- a/docs/docs/features/cont-batch.md +++ b/docs/docs/features/cont-batch.md @@ -1,6 +1,7 @@ --- title: Continuous Batching description: Nitro's continuous batching combines multiple requests, enhancing throughput. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- Continuous batching boosts throughput and minimizes latency in large language model (LLM) inference. This technique groups multiple inference requests, significantly improving GPU utilization. diff --git a/docs/docs/features/embed.md b/docs/docs/features/embed.md index 9e19cd125..77f610981 100644 --- a/docs/docs/features/embed.md +++ b/docs/docs/features/embed.md @@ -1,6 +1,7 @@ --- title: Embedding description: Inference engine for embedding, the same as OpenAI's +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- Embeddings are lists of numbers (floats). To find how similar two embeddings are, we measure the [distance](https://en.wikipedia.org/wiki/Cosine_similarity) between them. diff --git a/docs/docs/features/feat.md b/docs/docs/features/feat.md index 51a526331..bc091a547 100644 --- a/docs/docs/features/feat.md +++ b/docs/docs/features/feat.md @@ -1,6 +1,7 @@ --- title: Nitro Features description: What Nitro supports +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- Nitro enhances the `llama.cpp` research base, optimizing it for production environments with advanced features: diff --git a/docs/docs/features/load-unload.md b/docs/docs/features/load-unload.md index ca3980069..6c4cd5ff0 100644 --- a/docs/docs/features/load-unload.md +++ b/docs/docs/features/load-unload.md @@ -1,6 +1,7 @@ --- title: Load and Unload models description: Nitro loads and unloads local AI models (local LLMs). +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- ## Load model diff --git a/docs/docs/features/multi-thread.md b/docs/docs/features/multi-thread.md index a2ba2583b..2fe9b23d9 100644 --- a/docs/docs/features/multi-thread.md +++ b/docs/docs/features/multi-thread.md @@ -1,6 +1,7 @@ --- title: Multithreading description: Nitro utilizes multithreading to optimize hardware usage. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- Multithreading in programming allows concurrent task execution, improving efficiency and responsiveness. It's key for optimizing hardware and application performance. diff --git a/docs/docs/features/prompt.md b/docs/docs/features/prompt.md index 99418f8ac..28c498671 100644 --- a/docs/docs/features/prompt.md +++ b/docs/docs/features/prompt.md @@ -1,6 +1,7 @@ --- title: Prompt Role Support description: Setting up Nitro prompts to build an AI assistant. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- System, user, and assistant prompt is crucial for effectively utilizing the Large Language Model. These prompts work together to create a coherent and functional conversational flow. diff --git a/docs/docs/features/warmup.md b/docs/docs/features/warmup.md index b709cfd7f..cebf61069 100644 --- a/docs/docs/features/warmup.md +++ b/docs/docs/features/warmup.md @@ -1,6 +1,7 @@ --- title: Warming Up Model description: Nitro warms up the model to optimize delays. +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- Model warming up involves pre-running requests through an AI model to fine-tune its components for production. This step minimizes delays during initial inferences, ensuring readiness for immediate use. diff --git a/docs/docs/new/about.md b/docs/docs/new/about.md index 202336b1e..b49c834a4 100644 --- a/docs/docs/new/about.md +++ b/docs/docs/new/about.md @@ -2,6 +2,7 @@ title: About Nitro slug: /docs description: Efficient LLM inference engine for edge computing +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- Nitro is a high-efficiency C++ inference engine for edge computing, powering [Jan](https://jan.ai/). It is lightweight and embeddable, ideal for product integration. diff --git a/docs/docs/new/architecture.md b/docs/docs/new/architecture.md index e6aae0bd2..f23657465 100644 --- a/docs/docs/new/architecture.md +++ b/docs/docs/new/architecture.md @@ -1,6 +1,7 @@ --- title: Architecture slug: /achitecture +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- ![Nitro Architecture](img/architecture.drawio.png) diff --git a/docs/docs/new/build-source.md b/docs/docs/new/build-source.md index 62e4e55b2..046fd7189 100644 --- a/docs/docs/new/build-source.md +++ b/docs/docs/new/build-source.md @@ -2,6 +2,7 @@ title: Build From Source slug: /build-source description: Install Nitro manually +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- This guide provides step-by-step instructions for building Nitro from source on Linux, macOS, and Windows systems. diff --git a/docs/docs/new/faq.md b/docs/docs/new/faq.md index 0bd25f1a8..c4250cd91 100644 --- a/docs/docs/new/faq.md +++ b/docs/docs/new/faq.md @@ -2,6 +2,7 @@ title: FAQs slug: /faq description: Frequently Asked Questions about Nitro +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] ---
diff --git a/docs/docs/new/install.md b/docs/docs/new/install.md index 4b737c9dd..0a323a57a 100644 --- a/docs/docs/new/install.md +++ b/docs/docs/new/install.md @@ -2,6 +2,7 @@ title: Installation slug: /install description: How to install Nitro +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- # Nitro Installation Guide diff --git a/docs/docs/new/model-cycle.md b/docs/docs/new/model-cycle.md index 06f9b3214..d06ff8dd0 100644 --- a/docs/docs/new/model-cycle.md +++ b/docs/docs/new/model-cycle.md @@ -1,6 +1,7 @@ --- title: Model Life Cycle slug: /model-cycle +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- ## Load model diff --git a/docs/docs/new/quickstart.md b/docs/docs/new/quickstart.md index ecc192733..30326b0c4 100644 --- a/docs/docs/new/quickstart.md +++ b/docs/docs/new/quickstart.md @@ -2,6 +2,7 @@ title: Quickstart slug: /quickstart description: How to use Nitro +keywords: [Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama] --- ## Step 1: Install Nitro diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 3ef4a1b8f..c58793c1d 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -126,12 +126,12 @@ const config = { }, metadata: [ { name: 'description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.'}, - { name: 'keywords', content: 'Nitro, OpenAI compatible, fast inference, local AI, llm, small AI, free, open source, production ready' }, - { property: 'og:title', content: 'Embeddable AI | Nitro' }, + { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, + { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, { property: 'twitter:card', content: 'summary_large_image' }, { property: 'twitter:site', content: '@janhq_' }, - { property: 'twitter:title', content: 'Embeddable AI | Nitro' }, + { property: 'twitter:title', content: 'Fast inference engine | Nitro' }, { property: 'twitter:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, ], headTags: [ From 0b9701e2a3073368895191c321325f499fdfd445 Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 8 Dec 2023 01:22:50 +0900 Subject: [PATCH 2/7] add missing Meta Open Graph properties --- docs/docusaurus.config.js | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index c58793c1d..fe02fdd74 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -129,6 +129,7 @@ const config = { { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, + { property: 'og:type', content: 'website'}, { property: 'twitter:card', content: 'summary_large_image' }, { property: 'twitter:site', content: '@janhq_' }, { property: 'twitter:title', content: 'Fast inference engine | Nitro' }, From b015b60341cf469e1d98ae4fdf2f04c532b0f15c Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 8 Dec 2023 01:26:40 +0900 Subject: [PATCH 3/7] improve robots index --- docs/docusaurus.config.js | 3 +++ docs/static/robots.txt | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index fe02fdd74..56a9a589b 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -37,6 +37,8 @@ const config = { mermaid: true, }, + noIndex: false, + // Plugins we added plugins: [ "docusaurus-plugin-sass", @@ -127,6 +129,7 @@ const config = { metadata: [ { name: 'description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.'}, { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, + { name: 'robots', content: 'index, follow' }, { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, { property: 'og:type', content: 'website'}, diff --git a/docs/static/robots.txt b/docs/static/robots.txt index 6f27bb66a..14267e903 100644 --- a/docs/static/robots.txt +++ b/docs/static/robots.txt @@ -1,2 +1,2 @@ User-agent: * -Disallow: \ No newline at end of file +Allow: / \ No newline at end of file From 3f29ccf5bfe0750e6ffbf0b42a6c34bd7f7c8b7d Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 8 Dec 2023 01:41:52 +0900 Subject: [PATCH 4/7] change robots tag --- docs/docusaurus.config.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 56a9a589b..75e53b993 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -126,10 +126,13 @@ const config = { liveCodeBlock: { playgroundPosition: "bottom", }, + meta: [ + { name: 'robots', content: 'index, follow' }, + ], metadata: [ { name: 'description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.'}, { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, - { name: 'robots', content: 'index, follow' }, + { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, { property: 'og:type', content: 'website'}, From ee48ad3fcc26a907b2e4edb1f26d3db0ce2f7a84 Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 8 Dec 2023 01:56:57 +0900 Subject: [PATCH 5/7] update docusaurus.config.js --- docs/docusaurus.config.js | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 75e53b993..045577a60 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -36,9 +36,6 @@ const config = { markdown: { mermaid: true, }, - - noIndex: false, - // Plugins we added plugins: [ "docusaurus-plugin-sass", @@ -126,13 +123,10 @@ const config = { liveCodeBlock: { playgroundPosition: "bottom", }, - meta: [ - { name: 'robots', content: 'index, follow' }, - ], metadata: [ { name: 'description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.'}, { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, - + { name: "robots", content: "all" }, { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, { property: 'og:type', content: 'website'}, From c7abe2e49a27a8494d429fa1f1feecb0acaa4336 Mon Sep 17 00:00:00 2001 From: hieu-jan <150573299+hieu-jan@users.noreply.github.com> Date: Fri, 8 Dec 2023 04:34:29 +0900 Subject: [PATCH 6/7] update docusaurus.config.js --- docs/docusaurus.config.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 045577a60..53c8ef117 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -126,7 +126,7 @@ const config = { metadata: [ { name: 'description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.'}, { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, - { name: "robots", content: "all" }, + { name: "robots", content: "index, follow" }, { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, { property: 'og:type', content: 'website'}, From 06540d6120869c0ef27eb0aa67f0359c26c09bb5 Mon Sep 17 00:00:00 2001 From: Hieu <150573299+hieu-jan@users.noreply.github.com> Date: Mon, 11 Dec 2023 11:19:40 +0900 Subject: [PATCH 7/7] add canonical URL --- docs/docusaurus.config.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 53c8ef117..4df697ad5 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -124,12 +124,22 @@ const config = { playgroundPosition: "bottom", }, metadata: [ + { name: 'description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.'}, { name: 'keywords', content: 'Nitro, Jan, fast inference, inference server, local AI, large language model, OpenAI compatible, open source, llama' }, + + // Canonical URL + { name: 'canonical', content: 'https://nitro.jan.ai/' }, + + // Robots tags { name: "robots", content: "index, follow" }, + + // Open Graph tags { property: 'og:title', content: 'Fast inference engine | Nitro' }, { property: 'og:description', content: 'Nitro is a high-efficiency Large Language Model inference engine for edge computing.' }, { property: 'og:type', content: 'website'}, + + // Twitter card tags { property: 'twitter:card', content: 'summary_large_image' }, { property: 'twitter:site', content: '@janhq_' }, { property: 'twitter:title', content: 'Fast inference engine | Nitro' },