diff --git a/examples/customize-models/cats-vs-dogs-identification-from-images.mdx b/examples/customize-models/cats-vs-dogs-identification-from-images.mdx
new file mode 100644
index 0000000..1ea3e3f
--- /dev/null
+++ b/examples/customize-models/cats-vs-dogs-identification-from-images.mdx
@@ -0,0 +1,38 @@
+# Cats vs dogs identification from images
+
+[](https://github.com/Paulescu/image-classification-with-local-vlms/)
+
+In this example, you will learn how to fine-tune a Liquid Foundation Model to identify cats and dogs from images.
+
+## Environment setup
+
+You will need:
+
+- [uv](https://docs.astral.sh/uv/) to manage Python dependencies and run the application efficiently without creating virtual environments manually.
+
+## Step 1. Establishing baseline accuracy
+
+Go ahead and clone the repository:
+```sh
+git clone https://github.com/Paulescu/image-classification-with-local-vlms.git
+cd image-classification-with-local-vlms
+```
+
+Then, install the dependencies into a virtual environment using uv:
+```sh
+uv sync
+```
+
+Evaluation (and especially fine-tuning) scripts typically contain many parameters that need to be set. It is best practice to extracted
+them into separate configuration files using whatever format you prefer. In this project we use YAML files, that you
+place under the `configs` directory.
+
+Fine-tuning scripts typically contain many parameters that need to be set. It is best practice to extracted
+them into separate configuration files using whatever format you prefer. In this project we use YAML files, that you
+place under the `configs` directory, and load them into our Python script using
+
+```python
+
+```
+
+## Step 2. Fine tuning
diff --git a/examples/customize-models/index.md b/examples/customize-models/index.md
new file mode 100644
index 0000000..45c776d
--- /dev/null
+++ b/examples/customize-models/index.md
@@ -0,0 +1,14 @@
+# Getting started
+
+
+
+ Cats vs dogs identification from images
+
+
+ Learn how to fine-tune a Liquid Foundation Model to identify cats and dogs from images.
+
+ Fine-tuning
+ VLM
+
+
+
diff --git a/examples/deploy-models-on-ios/index.md b/examples/deploy-models-on-ios/index.md
new file mode 100644
index 0000000..2f5563a
--- /dev/null
+++ b/examples/deploy-models-on-ios/index.md
@@ -0,0 +1,14 @@
+# Getting started
+
+
+
+ Slogan Generator iOS app
+
+
+ A simple iOS app that generates creative slogans using local AI models, with no internet connection required.
+
+ iOS
+ LEAP SDK
+
+
+
diff --git a/examples/deploy-models-on-ios/slogan-generator-app.mdx b/examples/deploy-models-on-ios/slogan-generator-app.mdx
new file mode 100644
index 0000000..c453a1e
--- /dev/null
+++ b/examples/deploy-models-on-ios/slogan-generator-app.mdx
@@ -0,0 +1,530 @@
+# A Slogan Generator iOS app
+
+[](https://github.com/Liquid4All/LeapSDK-Examples/tree/main/iOS/LeapSloganExample)
+
+A simple iOS app that generates creative slogans using local AI models, with no internet connection required.
+
+## This is what you will learn
+
+By the end of this guide, you'll understand:
+
+- How to integrate the LeapSDK into your iOS project
+- How to load and run AI models locally on an iPhone or iPad
+- How to implement real-time streaming text generation
+
+## Understanding the Architecture
+
+Before we write code, let's understand what we're building. The LeapSlogan app has a clean, three-layer architecture:
+
+```
+┌─────────────────────────────────┐
+│ SwiftUI View Layer │ ← User Interface
+│ (ContentView, UI Components) │
+└────────────┬────────────────────┘
+ │
+┌────────────▼────────────────────┐
+│ ViewModel Layer │ ← Business Logic
+│ (SloganViewModel, @Observable) │
+└────────────┬────────────────────┘
+ │
+┌────────────▼────────────────────┐
+│ LeapSDK Layer │ ← AI Inference
+│ (ModelRunner, Conversation) │
+└─────────────────────────────────┘
+```
+
+Let's trace what happens when a user generates a slogan:
+
+```
+1. User enters "coffee shop" and taps Generate
+ ↓
+2. UI disables input and shows "Generating..."
+ ↓
+3. ViewModel creates prompt with business type
+ ↓
+4. ChatMessage is sent to Conversation
+ ↓
+5. LeapSDK starts model inference
+ ↓
+6. Tokens stream back one-by-one
+ ├─ "Wake" → UI updates
+ ├─ " up" → UI updates
+ ├─ " to" → UI updates
+ ├─ " flavor" → UI updates
+ └─ "!" → UI updates
+ ↓
+7. .complete event fires
+ ↓
+8. UI re-enables input, shows final slogan
+```
+
+Let's start building the app!
+
+## Environment setup
+
+You will need:
+
+- **Xcode 15.0+** with Swift 5.9 or later
+- **iOS 15.0+** deployment target
+- A **physical iOS device** (iPhone or iPad) for best performance
+ - *The iOS Simulator works but will be significantly slower*
+- Basic familiarity with **SwiftUI** and Swift's async/await syntax
+
+## Step 1: Create a New Xcode Project
+
+1. Open Xcode and create a new iOS App
+2. Choose **SwiftUI** for the interface
+3. Set minimum deployment target to **iOS 15.0**
+
+## Step 2: Add LeapSDK via Swift Package Manager
+
+LeapSDK is distributed as a Swift Package, making integration straightforward:
+
+1. In Xcode, go to **File → Add Package Dependencies**
+2. Enter the repository URL:
+ ```
+ https://github.com/Liquid4All/leap-ios.git
+ ```
+3. Select the latest version (0.6.0 or newer)
+4. Add **both** products to your target:
+ - ✅ `LeapSDK`
+ - ✅ `LeapSDKTypes`
+
+> **Important**: Starting with version 0.5.0, you must add both `LeapSDK` and `LeapSDKTypes` for proper runtime linking.
+
+## Step 3: Download a Model Bundle
+
+Now we need an AI model. LeapSDK uses **model bundles** - packaged files containing the model and its configuration:
+
+1. Visit the [Leap Model Library](https://leap.liquid.ai/models)
+2. For this tutorial, download a small model like **LFM2-350M** (great for mobile, ~500MB)
+3. Download the `.bundle` file for your chosen model
+4. Drag the `.bundle` file into your Xcode project
+5. ✅ Make sure "Add to target" is checked
+
+Your project structure should now look like:
+```
+YourApp/
+├── YourApp.swift
+├── ContentView.swift
+├── Models/
+│ └── LFM2-350M-8da4w_output_8da8w-seq_4096.bundle ← Your model
+└── Assets.xcassets
+```
+
+## Step 4: Building the ViewModel
+
+The ViewModel is the heart of our app. It manages the model lifecycle and handles generation. Let's build it step by step.
+
+### Step 4.1: Create the Basic Structure
+
+Create a new Swift file called `SloganViewModel.swift`:
+
+```swift
+import Foundation
+import SwiftUI
+import LeapSDK
+import Observation
+
+@Observable
+class SloganViewModel {
+ // MARK: - Published State
+ var isModelLoading = true
+ var isGenerating = false
+ var generatedSlogan = ""
+ var errorMessage: String?
+
+ // MARK: - Private Properties
+ private var modelRunner: ModelRunner?
+ private var conversation: Conversation?
+
+ // MARK: - Initialization
+ init() {
+ // Model will be loaded when view appears
+ }
+}
+```
+
+**What's happening here?**
+- `@Observable` is Swift's new observation macro (iOS 17+, but works great on iOS 15 with backports)
+- We track four pieces of UI state: loading, generating, the slogan text, and any errors
+- `ModelRunner` and `Conversation` are private—these are our LeapSDK objects
+
+### Step 4.2: Implement Model Loading
+
+Add the model loading function:
+
+```swift
+// MARK: - Model Management
+@MainActor
+func setupModel() async {
+ isModelLoading = true
+ errorMessage = nil
+
+ do {
+ // 1. Get the model bundle URL from app bundle
+ guard let modelURL = Bundle.main.url(
+ forResource: "qwen-0.6b", // Change to match your bundle name
+ withExtension: "bundle"
+ ) else {
+ errorMessage = "Model bundle not found in app bundle"
+ isModelLoading = false
+ return
+ }
+
+ // 2. Load the model using LeapSDK
+ print("Loading model from: \(modelURL.path)")
+ modelRunner = try await Leap.load(url: modelURL)
+
+ // 3. Create an initial conversation
+ conversation = Conversation(
+ modelRunner: modelRunner!,
+ history: []
+ )
+
+ isModelLoading = false
+ print("Model loaded successfully!")
+
+ } catch {
+ errorMessage = "Failed to load model: \(error.localizedDescription)"
+ isModelLoading = false
+ print("Error loading model: \(error)")
+ }
+}
+```
+
+**Understanding the code:**
+
+1. **Bundle lookup**: We find the model bundle in our app's resources
+2. **Async loading**: `Leap.load()` is async because loading models takes time (1-5 seconds)
+3. **Conversation creation**: Every generation needs a `Conversation` object that tracks history
+4. **Error handling**: We catch and display any loading failures
+
+> **💡 Pro Tip**: Model loading is the slowest part. In production apps, show a nice loading screen!
+
+### Step 4.3: Implement Slogan Generation
+
+Now for the exciting part—generating slogans! Add this function:
+
+```swift
+// MARK: - Generation
+@MainActor
+func generateSlogan(for businessType: String) async {
+ // Guard against invalid states
+ guard let conversation = conversation,
+ !isGenerating else { return }
+
+ isGenerating = true
+ generatedSlogan = "" // Clear previous slogan
+ errorMessage = nil
+
+ // 1. Create the prompt
+ let prompt = """
+ Create a catchy, memorable slogan for a \(businessType) business. \
+ Make it creative, concise, and impactful. \
+ Return only the slogan, nothing else.
+ """
+
+ // 2. Create a chat message
+ let userMessage = ChatMessage(
+ role: .user,
+ content: [.text(prompt)]
+ )
+
+ // 3. Generate response with streaming
+ let stream = conversation.generateResponse(message: userMessage)
+
+ // 4. Process the stream
+ do {
+ for await response in stream {
+ switch response {
+ case .chunk(let text):
+ // Append each text chunk as it arrives
+ generatedSlogan += text
+
+ case .reasoningChunk(let reasoning):
+ // Some models output reasoning - we can log it
+ print("Reasoning: \(reasoning)")
+
+ case .complete(let usage, let completeInfo):
+ // Generation finished!
+ print("✅ Generation complete!")
+ print("Tokens used: \(usage.totalTokens)")
+ print("Speed: \(completeInfo.stats?.tokenPerSecond ?? 0) tokens/sec")
+ isGenerating = false
+ }
+ }
+ } catch {
+ errorMessage = "Generation failed: \(error.localizedDescription)"
+ isGenerating = false
+ }
+}
+```
+
+**Breaking down the streaming API:**
+
+The `generateResponse()` method returns an **AsyncStream** that emits three types of events:
+
+1. **`.chunk(text)`**: Each piece of generated text arrives here
+ - This is what makes the UI feel responsive!
+ - Text appears word-by-word, just like ChatGPT
+
+2. **`.reasoningChunk(reasoning)`**: Some models show their "thinking"
+ - Advanced feature for models that explain their reasoning
+
+3. **`.complete(usage, info)`**: The final event when generation finishes
+ - Contains token usage statistics
+ - Includes performance metrics (tokens/second)
+
+## Step 5: Building the User Interface
+
+Now let's create a beautiful, interactive UI. Create or modify `ContentView.swift`:
+
+```swift
+import SwiftUI
+
+struct ContentView: View {
+ @State private var viewModel = SloganViewModel()
+ @State private var businessType = ""
+
+ var body: some View {
+ NavigationStack {
+ ZStack {
+ // Background gradient
+ LinearGradient(
+ colors: [.blue.opacity(0.1), .purple.opacity(0.1)],
+ startPoint: .topLeading,
+ endPoint: .bottomTrailing
+ )
+ .ignoresSafeArea()
+
+ VStack(spacing: 24) {
+ if viewModel.isModelLoading {
+ modelLoadingView
+ } else {
+ mainContentView
+ }
+ }
+ .padding()
+ }
+ .navigationTitle("AI Slogan Generator")
+ .navigationBarTitleDisplayMode(.large)
+ }
+ .task {
+ // Load model when view appears
+ await viewModel.setupModel()
+ }
+ }
+
+ // MARK: - Subviews
+
+ private var modelLoadingView: some View {
+ VStack(spacing: 20) {
+ ProgressView()
+ .scaleEffect(1.5)
+ Text("Loading AI Model...")
+ .font(.headline)
+ Text("This may take a few seconds")
+ .font(.caption)
+ .foregroundColor(.secondary)
+ }
+ }
+
+ private var mainContentView: some View {
+ VStack(spacing: 24) {
+ // Error message if any
+ if let error = viewModel.errorMessage {
+ errorBanner(error)
+ }
+
+ // Instructions
+ instructionsCard
+
+ // Input field
+ businessTypeInput
+
+ // Generate button
+ generateButton
+
+ // Generated slogan display
+ if !viewModel.generatedSlogan.isEmpty {
+ sloganResultCard
+ }
+
+ Spacer()
+ }
+ }
+
+ private var instructionsCard: some View {
+ VStack(alignment: .leading, spacing: 12) {
+ Label("How it works", systemImage: "lightbulb.fill")
+ .font(.headline)
+ .foregroundColor(.blue)
+
+ Text("Enter a business type and I'll generate a creative slogan using AI—completely on your device!")
+ .font(.subheadline)
+ .foregroundColor(.secondary)
+ }
+ .padding()
+ .frame(maxWidth: .infinity, alignment: .leading)
+ .background(Color.blue.opacity(0.1))
+ .cornerRadius(12)
+ }
+
+ private var businessTypeInput: some View {
+ VStack(alignment: .leading, spacing: 8) {
+ Text("Business Type")
+ .font(.subheadline)
+ .fontWeight(.semibold)
+
+ TextField("e.g., coffee shop, tech startup, bakery", text: $businessType)
+ .textFieldStyle(.roundedBorder)
+ .autocapitalization(.none)
+ .disabled(viewModel.isGenerating)
+ }
+ }
+
+ private var generateButton: some View {
+ Button(action: {
+ Task {
+ await viewModel.generateSlogan(for: businessType)
+ }
+ }) {
+ HStack {
+ if viewModel.isGenerating {
+ ProgressView()
+ .tint(.white)
+ } else {
+ Image(systemName: "sparkles")
+ }
+
+ Text(viewModel.isGenerating ? "Generating..." : "Generate Slogan")
+ .fontWeight(.semibold)
+ }
+ .frame(maxWidth: .infinity)
+ .padding()
+ .background(
+ businessType.isEmpty || viewModel.isGenerating
+ ? Color.gray
+ : Color.blue
+ )
+ .foregroundColor(.white)
+ .cornerRadius(12)
+ }
+ .disabled(businessType.isEmpty || viewModel.isGenerating)
+ }
+
+ private var sloganResultCard: some View {
+ VStack(alignment: .leading, spacing: 12) {
+ HStack {
+ Label("Your Slogan", systemImage: "quote.bubble.fill")
+ .font(.headline)
+ .foregroundColor(.purple)
+
+ Spacer()
+
+ // Copy button
+ Button(action: {
+ UIPasteboard.general.string = viewModel.generatedSlogan
+ }) {
+ Image(systemName: "doc.on.doc")
+ .foregroundColor(.blue)
+ }
+ }
+
+ Text(viewModel.generatedSlogan)
+ .font(.title3)
+ .fontWeight(.medium)
+ .foregroundColor(.primary)
+ .padding()
+ .frame(maxWidth: .infinity, alignment: .leading)
+ .background(Color.purple.opacity(0.1))
+ .cornerRadius(8)
+ }
+ .padding()
+ .background(Color.white)
+ .cornerRadius(12)
+ .shadow(color: .black.opacity(0.1), radius: 5, y: 2)
+ }
+
+ private func errorBanner(_ message: String) -> some View {
+ HStack {
+ Image(systemName: "exclamationmark.triangle.fill")
+ Text(message)
+ .font(.caption)
+ Spacer()
+ }
+ .padding()
+ .background(Color.red.opacity(0.1))
+ .foregroundColor(.red)
+ .cornerRadius(8)
+ }
+}
+
+#Preview {
+ ContentView()
+}
+```
+
+**UI Design Highlights:**
+
+1. **Progressive disclosure**: Loading screen → Main interface
+2. **Clear visual feedback**: Loading states, disabled states, animations
+3. **Helpful instructions**: Users understand what to do immediately
+4. **Polished details**: Gradient background, shadows, rounded corners
+5. **Copy functionality**: Users can easily copy the generated slogan
+
+## Troubleshooting Common Issues
+
+### Issue 1: "Model bundle not found"
+
+**Solution**:
+- Check that `.bundle` file is in Xcode project
+- Verify "Target Membership" is checked
+- Ensure bundle name in code matches actual filename
+
+### Issue 2: "Failed to load model"
+
+**Solution**:
+- Test on a physical device (Simulator is unreliable)
+- Ensure iOS version is 15.0+
+- Check device has enough free storage (~2-3x model size)
+- Try a smaller model first
+
+### Issue 3: Slow generation speed
+
+**Solution**:
+- Use a physical device (10-100x faster than Simulator)
+- Choose a smaller model (350M-1B)
+- Lower `maxTokens` in GenerationOptions
+- Reduce temperature for faster but less creative output
+
+### Issue 4: App crashes on launch
+
+**Solution**:
+- Ensure both `LeapSDK` and `LeapSDKTypes` are added
+- Check frameworks are set to "Embed & Sign"
+- Clean build folder (Cmd+Shift+K)
+- Restart Xcode
+
+## Next Steps
+
+Congratulations! 🎉 You've built a fully functional on-device AI app. Here are some ideas to expand your skills:
+
+### Immediate Next Projects
+
+1. **LeapChat**: Build a full chat interface with history
+ - Check out the [LeapChatExample](https://github.com/Liquid4All/LeapSDK-Examples/tree/main/iOS/LeapChatExample)
+
+2. **Add Structured Output**: Use `@Generatable` macros
+ - Generate JSON data structures
+ - Validate output format at compile-time
+
+3. **Implement Function Calling**: Let AI call your functions
+ - Weather lookup, calculations, database queries
+ - See [Function Calling Guide](https://leap.liquid.ai/docs/edge-sdk/ios/function-calling)
+
+## Need help?
+
+Join the [Liquid AI Discord Community](https://discord.gg/DFU3WQeaYD) and ask.
+[](https://discord.gg/DFU3WQeaYD)
diff --git a/examples/index.md b/examples/index.md
index 24ca404..6a14d92 100644
--- a/examples/index.md
+++ b/examples/index.md
@@ -1,3 +1,21 @@
-# Introduction
+# What do you want to learn?
-Coming soon...
+Choose your path based on what you'd like to accomplish with Liquid AI models:
+
+- **[Laptop examples](/examples/laptop-examples)** - Learn how to build agentic workflows that run entirely on your local machine: no API keys, no cloud costs, no private data shared with third-parties.
+
+- **[iOS examples](/examples/deploy-models-on-ios)** - Discover how to integrate pre-trained Liquid AI models into your iOS applications using the LEAP SDK. Ideal for building on-device AI features in iPhone and iPad apps.
+
+- **[Customize a model](/examples/customize-models)** - Learn how to fine-tune and adapt Liquid AI models for your specific use cases. Perfect for developers who want to create specialized AI solutions.
+
+
+
+## Cannot find what you are looking for?
+
+Join the [Liquid AI Community Server](https://discord.gg/DFU3WQeaYD) and let us know.
+
+[](https://discord.gg/DFU3WQeaYD)
diff --git a/examples/laptop-examples/index.md b/examples/laptop-examples/index.md
new file mode 100644
index 0000000..4aa6211
--- /dev/null
+++ b/examples/laptop-examples/index.md
@@ -0,0 +1,14 @@
+# Getting started
+
+
+
+ Invoice extractor tool
+
+
+ A Python CLI that monitors a folder for new invoice files and automatically extracts key information from them using two Liquid Foundational Models.
+
+ VLM
+ Nanos
+
+
+
diff --git a/examples/laptop-examples/invoice-extractor-tool-with-liquid-nanos.mdx b/examples/laptop-examples/invoice-extractor-tool-with-liquid-nanos.mdx
new file mode 100644
index 0000000..cfadc21
--- /dev/null
+++ b/examples/laptop-examples/invoice-extractor-tool-with-liquid-nanos.mdx
@@ -0,0 +1,174 @@
+# Invoice extractor tool
+
+[](https://github.com/Liquid4All/cookbook/tree/main/examples/invoice-parser)
+
+A Python CLI that monitors a folder for new invoice files and automatically extracts key information from them using
+two Liquid Foundational Models.
+
+This a practical example of building agentic workflows that run entirely on your local machine: no API keys, no cloud costs, no private data shared with third-parties.
+
+## What's inside?
+
+In this example, you will learn how to:
+
+- **Chain multiple Liquid Foundational Models** to build a complete workflow that processes visual data (invoice images) and extracts structured information
+- **Set up local AI inference** using Ollama to run Liquid models entirely on your machine without requiring cloud services or API keys
+- **Build a file monitoring system** that automatically processes new files dropped into a directory
+- **Extract text from images** using the LFM2-VL-3B vision-language model for optical character recognition
+- **Transform unstructured text into structured data** using the LFM2-1.2B-Extract model for information extraction
+- **Create agentic workflows** that combine multiple AI models to solve real-world business problems while keeping your data private and secure
+
+## Understanding the architecture
+
+When you drop an invoice photo into a watched directory, the tool uses a chain with 2 Liquid Foundational Models:
+
+1. [LFM2-VL-3B](https://huggingface.co/LiquidAI/LFM2-VL-3B) extracts a raw textual description from an invoice picture.
+2. [LFM2-1.2B-Extract](https://huggingface.co/LiquidAI/LFM2-1.2B-Extract) tranforms the raw textual description into a structured record. This record is appended to a CSV file.
+
+
+
+## Environment setup
+
+You will need
+
+- [Ollama](https://ollama.com/) to serve the Language Models locally.
+- [uv](https://docs.astral.sh/uv/) to manage Python dependencies and run the application efficiently without creating virtual environments manually.
+
+### Install Ollama
+
+
+Click to see installation instructions for your platform
+
+**macOS:**
+```bash
+# Download and install from the website
+# Visit: https://ollama.ai/download
+
+# Or use Homebrew
+brew install ollama
+```
+
+**Linux:**
+```bash
+curl -fsSL https://ollama.ai/install.sh | sh
+```
+
+**Windows:**
+Download the installer from [https://ollama.ai/download](https://ollama.ai/download)
+
+
+
+### Install UV
+
+
+Click to see installation instructions for your platform
+
+**macOS/Linux:**
+```bash
+curl -LsSf https://astral.sh/uv/install.sh | sh
+```
+
+**Windows:**
+```powershell
+powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
+```
+
+
+
+## How to run it?
+
+Let's start by cloning the repository:
+
+```sh
+git clone https://github.com/Liquid4All/cookbook.git
+cd cookbook/examples/invoice-parser
+```
+
+Then, run the application using the invoices that are already in the repository:
+```sh
+uv run python src/invoice_parser/main.py \
+ --dir invoices/ \
+ --image-model hf.co/LiquidAI/LFM2-VL-3B-GGUF:F16 \
+ --extractor-model hf.co/LiquidAI/LFM2-1.2B-Extract-GGUF:F16 \
+ --process-existing
+```
+
+Feel free to modify the path to the invoices directory and the model IDs to suit your needs.
+
+:::tip Example: Using different model sizes
+For example, you can use the 1.6B version of the VLM model and the 350M version of the extractor model as follows:
+
+```sh
+uv run python src/invoice_parser/main.py \
+ --dir invoices/ \
+ --image-model hf.co/LiquidAI/LFM2-VL-1.6B-GGUF:F16 \
+ --extractor-model hf.co/LiquidAI/LFM2-350M-Extract-GGUF:F16 \
+ --process-existing
+```
+:::
+
+If you have `make` installed, you can run the application with the following command:
+```sh
+make run
+```
+
+The data extracted from the invoices is be saved in the same directory as the invoices, in a file called `bills.csv`.
+If you open the file, you will see the following data:
+
+| processed_at | file_path | utility | amount | currency |
+|--------------|-----------|---------|--------|----------|
+| 2025-10-31 11:25:47 | invoices/water_australia.png | electricity | 68.46 | AUD |
+| 2025-10-31 11:26:00 | invoices/Sample-electric-Bill-2023.jpg | electricity | 28.32 | USD |
+| 2025-10-31 11:26:09 | invoices/british_gas.png | electricity | 81.31 | GBP |
+| 2025-10-31 11:42:35 | invoices/castlewater1.png | electricity | 150.0 | USD |
+
+Observations:
+- The first 3 invoices are properly extracted, with the correct amount and currency.
+- The fourth invoice is not properly extracted, where both amount and currency are not correct.
+
+## How to improve it?
+
+We have a tool that works well 75% of the time on our sample of invoices, which is
+
+- good enough for a demo
+- not good enough for a production-ready application
+
+To improve the tool you need to:
+
+- Collect more invoices
+- Collect model input and outputs at each step of the pipeline, including
+ - the VLM model input and output
+ - the extractor model input and output
+- Flag and correct (input, output) pairs that are not properly extracted
+- Fine-tune the model(s) on the corrected (input, output) pairs
+
+:::tip
+Our tool uses two Liquid Foundational Models:
+- LFM2-VL-3B for vision-language understanding
+- LFM2-1.2B-Extract for information extraction
+
+LFM2-1.2B-Extract is a highly specialized model for structurd data extraction from text. So the problem is likely not here.
+On the other hand, LFM2-VL-3B is a more general-purpose model for vision-language understanding,
+which has not necessarily been trained on the task of invoice extraction. This is the first place to look for improvements.
+:::
+
+If you are interested in learning more about model customization for Vision Language Models,
+we recommend you to check out the following examples:
+
+
+
+ Cats vs dogs identification from images
+
+
+ Learn how to fine-tune a Liquid Foundation Model to identify cats and dogs from images.
+
+ Fine-tuning
+ VLM
+
+
+
+
+## Need help?
+
+Join the [Liquid AI Discord Community](https://discord.gg/DFU3WQeaYD) and ask.
+[](https://discord.gg/DFU3WQeaYD)
diff --git a/sidebarsExamples.ts b/sidebarsExamples.ts
index 6a8c54a..ca1d790 100644
--- a/sidebarsExamples.ts
+++ b/sidebarsExamples.ts
@@ -1,7 +1,24 @@
import type { SidebarsConfig } from '@docusaurus/plugin-content-docs';
const sidebars: SidebarsConfig = {
- examples: ['index'],
+ examples: [
+ 'index',
+ {
+ type: 'category',
+ label: 'Laptop examples',
+ items: ['laptop-examples/index', 'laptop-examples/invoice-extractor-tool-with-liquid-nanos'],
+ },
+ {
+ type: 'category',
+ label: 'iOS examples',
+ items: ['deploy-models-on-ios/index', 'deploy-models-on-ios/slogan-generator-app'],
+ },
+ {
+ type: 'category',
+ label: 'Customize Models',
+ items: ['customize-models/index', 'customize-models/cats-vs-dogs-identification-from-images'],
+ },
+ ],
};
export default sidebars;
diff --git a/static/img/examples/invoice-extractor-tool-with-liquid-nanos/chain_diagram.gif b/static/img/examples/invoice-extractor-tool-with-liquid-nanos/chain_diagram.gif
new file mode 100644
index 0000000..f968448
Binary files /dev/null and b/static/img/examples/invoice-extractor-tool-with-liquid-nanos/chain_diagram.gif differ
diff --git a/static/img/examples/invoice-extractor-tool-with-liquid-nanos/water_bill.png b/static/img/examples/invoice-extractor-tool-with-liquid-nanos/water_bill.png
new file mode 100644
index 0000000..0b4c1e0
Binary files /dev/null and b/static/img/examples/invoice-extractor-tool-with-liquid-nanos/water_bill.png differ