diff --git a/.gitignore b/.gitignore
index fe4ded99..57dc24d9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,4 +13,9 @@ public/
# nodejs
package-lock.json
-node_modules/
\ No newline at end of file
+node_modules/
+
+# AI DEV Tools
+*.mcp
+.mcp.json
+.env
\ No newline at end of file
diff --git a/content/en/docs/porch/config-as-data.md b/content/en/docs/porch/config-as-data.md
index 0b882807..c91cc8e1 100644
--- a/content/en/docs/porch/config-as-data.md
+++ b/content/en/docs/porch/config-as-data.md
@@ -1,156 +1,168 @@
---
-title: "Configuration as Data"
+title: "Configuration as Data (CaD)"
type: docs
weight: 1
description:
---
-## Why
+This document provides the background context for Package Orchestration, which is further
+elaborated in a dedicated [document](package-orchestration.md).
-This document provides background context for Package Orchestration, which is further elaborated in a dedicated
-[document](package-orchestration.md).
+## Configuration as data (CaD)
-## Configuration as Data
+CaD is an approach to the management of configuration. It includes the configuration of
+infrastructure, policy, services, applications, and so on. CaD performs the following actions:
-Configuration as Data is an approach to management of configuration (incl.
-configuration of infrastructure, policy, services, applications, etc.) which:
-
-* makes configuration data the source of truth, stored separately from the live
- state
-* uses a uniform, serializable data model to represent configuration
-* separates code that acts on the configuration from the data and from packages
- / bundles of the data
-* abstracts configuration file structure and storage from operations that act
- upon the configuration data; clients manipulating configuration data don’t
- need to directly interact with storage (git, container images)
+* Making configuration data the source of truth, stored separately from the live state.
+* Using a uniform, serializable data model to represent the configuration.
+* Separating the code that acts on the configuration from the data and from packages/bundles of
+ data.
+* Abstracting the configuration file structure and storage from the operations that act on the
+ configuration data. Clients manipulating the configuration data do not need to interact directly
+ with the storage (such as git, container images, and so on).

-## Key Principles
+## Key principles
A system based on CaD should observe the following key principles:
-* secrets should be stored separately, in a secret-focused storage
-system ([example](https://cert-manager.io/))
-* stores a versioned history of configuration changes by change sets to bundles
- of related configuration data
-* relies on uniformity and consistency of the configuration format, including
- type metadata, to enable pattern-based operations on the configuration data,
- along the lines of duck typing
-* separates schemas for the configuration data from the data, and relies on
- schema information for strongly typed operations and to disambiguate data
- structures and other variations within the model
-* decouples abstractions of configuration from collections of configuration data
-* represents abstractions of configuration generators as data with schemas, like
- other configuration data
-* finds, filters / queries / selects, and/or validates configuration data that
- can be operated on by given code (functions)
-* finds and/or filters / queries / selects code (functions) that can operate on
- resource types contained within a body of configuration data
-* actuation (reconciliation of configuration data with live state) is separate
- from transformation of configuration data, and is driven by the declarative
- data model
-* transformations, particularly value propagation, are preferable to wholesale
- configuration generation except when the expansion is dramatic (say, >10x)
-* transformation input generation should usually be decoupled from propagation
-* deployment context inputs should be taken from well defined “provider context”
- objects
-* identifiers and references should be declarative
-* live state should be linked back to sources of truth (configuration)
-
-## KRM CaD
+* Separate handling of secrets in secret storage, in a secret-focused storage system, such as
+ ([example](https://cert-manager.io/)).
+* Storage of a versioned history of configuration changes by change sets to bundles of related
+ configuration data.
+* Reliance on the uniformity and consistency of the configuration format, including type metadata,
+ to enable pattern-based operations on the configuration data, along the lines of duck typing.
+* Separation of the configuration data from its schemas, and reliance on the schema information for
+ strongly typed operations and disambiguation of data structures and other variations within the
+ model.
+* Decoupling of abstractions of configuration from collections of configuration data.
+* Representation of abstractions of configuration generators as data with schemas, as with other
+ configuration data.
+* Finding, filtering, querying, selecting, and/or validating of configuration data that can be
+ operated on by given code (functions).
+* Finding and/or filtering, querying, and selecting of code (functions) that can operate on
+ resource types contained within a body of configuration data.
+* Actuation (reconciliation of configuration data with live state) that is separate from the
+ transformation of the configuration data, and is driven by the declarative data model.
+* Transformations. Transformations, particularly value propagation, are preferable to wholesale
+ configuration generation, except when the expansion is dramatic (for example, >10x).
+* Transformation input generation: this should usually be decoupled from propagation.
+* Deployment context inputs: these should be taken from well-defined “provider context” objects.
+* Identifiers and references: these should be declarative.
+* Live state: this should be linked back to sources of truth (configuration).
+
+## Kubernetes Resouce Model configuration as data (KRM CaD)
Our implementation of the Configuration as Data approach (
[kpt](https://kpt.dev),
[Config Sync](https://cloud.google.com/anthos-config-management/docs/config-sync-overview),
and [Package Orchestration](https://github.com/nephio-project/porch))
-is built on the foundation of
+is built on the foundation of the
[Kubernetes Resource Model](https://github.com/kubernetes/design-proposals-archive/blob/main/architecture/resource-management.md)
(KRM).
{{% alert title="Note" color="primary" %}}
-Even though KRM is not a requirement of Config as Data (just like
-Python or Go templates or Jinja are not specifically
-requirements for [IaC](https://en.wikipedia.org/wiki/Infrastructure_as_code)), the choice of
-another foundational config representation format would necessitate
-implementing adapters for all types of infrastructure and applications
-configured, including Kubernetes, CRDs, GCP resources and more. Likewise, choice
-of another configuration format would require redesign of a number of the
-configuration management mechanisms that have already been designed for KRM,
-such as 3-way merge, structural merge patch, schema descriptions, resource
-metadata, references, status conventions, etc.
+Even though KRM is not a requirement of CaD (just as Python or Go templates, or Jinja, are not
+specifically requirements for [IaC](https://en.wikipedia.org/wiki/Infrastructure_as_code)), the
+choice of another foundational configuration representation format would necessitate the
+implementation of adapters for all types of infrastructure and applications configured, including
+Kubernetes, CRDs, GCP resources, and more. Likewise, choosing another configuration format would
+require the redesign of several of the configuration management mechanisms that have already been
+designed for KRM, such as three-way merge, structural merge patch, schema descriptions, resource
+metadata, references, status conventions, and so on.
{{% /alert %}}
-**KRM CaD** is therefore a specific approach to implementing *Configuration as Data* which:
-
-* uses [KRM](https://github.com/kubernetes/design-proposals-archive/blob/main/architecture/resource-management.md)
- as the configuration serialization data model
-* uses [Kptfile](https://kpt.dev/reference/schema/kptfile/) to store package metadata
-* uses [ResourceList](https://kpt.dev/reference/schema/resource-list/) as a serialized package wire-format
-* uses a function `ResourceList → ResultList` (*kpt* function) as the foundational, composable unit of
- package-manipulation code (note that other forms of code can manipulate packages as well, i.e. UIs, custom algorithms
- not necessarily packaged and used as kpt functions)
-
-and provides the following basic functionality:
-
-* load a serialized package from a repository (as ResourceList) (examples of repository may be one or more of: local
- HDD, Git repository, OCI, Cloud Storage, etc.)
-* save a serialized package (as ResourceList) to a package repository
-* evaluate a function on a serialized package (ResourceList)
-* [render](https://kpt.dev/book/04-using-functions/01-declarative-function-execution) a package (evaluate functions
- declared within the package itself)
-* create a new (empty) package
-* fork (or clone) an existing package from one package repository (called upstream) to another (called downstream)
-* delete a package from a repository
-* associate a version with the package; guarantee immutability of packages with an assigned version
-* incorporate changes from the new version of an upstream package into a new version of a downstream package (3 way merge)
-* revert to a prior version of a package
-
-## Value
-
-The Config as Data approach enables some key value which is available in other
-configuration management approaches to a lesser extent or is not available
-at all.
-
-* simplified authoring of configuration using a variety of methods and sources
-* WYSIWYG interaction with configuration using a simple data serialization formation rather than a code-like format
-* layering of interoperable interface surfaces (notably GUI) over declarative configuration mechanisms rather than
- forcing choices between exclusive alternatives (exclusively UI/CLI or IaC initially followed by exclusively
- UI/CLI or exclusively IaC)
-* the ability to apply UX techniques to simplify configuration authoring and viewing
-* compared to imperative tools (e.g., UI, CLI) that directly modify the live state via APIs, CaD enables versioning,
- undo, audits of configuration history, review/approval, pre-deployment preview, validation, safety checks,
- constraint-based policy enforcement, and disaster recovery
-* bulk changes to configuration data in their sources of truth
-* injection of configuration to address horizontal concerns
-* merging of multiple sources of truth
-* state export to reusable blueprints without manual templatization
-* cooperative editing of configuration by humans and automation, such as for security remediation (which is usually
- implemented against live-state APIs)
-* reusability of configuration transformation code across multiple bodies of configuration data containing the same
- resource types, amortizing the effort of writing, testing, documenting the code
-* combination of independent configuration transformations
-* implementation of config transformations using the languages of choice, including both programming and scripting
- approaches
-* reducing the frequency of changes to existing transformation code
-* separation of roles between developer and non-developer configuration users
-* defragmenting the configuration transformation ecosystem
-* admission control and invariant enforcement on sources of truth
-* maintaining variants of configuration blueprints without one-size-fits-all full struct-constructor-style
- parameterization and without manually constructing and maintaining patches
-* drift detection and remediation for most of the desired state via continuous reconciliation using apply and/or for
- specific attributes via targeted mutation of the sources of truth
-
-## Related Articles
-
-For more information about Configuration as Data and Kubernetes Resource Model,
-visit the following links:
+**KRM CaD** is, therefore, a specific approach to implementing *Configuration as Data* which uses
+the following:
+
+* [KRM](https://github.com/kubernetes/design-proposals-archive/blob/main/architecture/resource-management.md)
+ as the configuration serialization data model.
+* [Kptfile](https://kpt.dev/reference/schema/kptfile/) to store package metadata.
+* [ResourceList](https://kpt.dev/reference/schema/resource-list/) as a serialized package wire
+ format.
+* A function `ResourceList → ResultList` (*kpt* function) as the foundational, composable unit of
+ package manipulation code.
+
+ {{% alert title="Note" color="primary" %}}
+
+ Other forms of code can also manipulate packages, such as UIs and custom algorithms not
+ necessarily packaged and used as kpt functions.
+
+ {{% /alert %}}
+
+
+**KRM CaD** provides the following basic functionalities:
+
+* Loading a serialized package from a repository (as a ResourceList). Examples of a repository may
+ be one or more of the following:
+ * Local HDD
+ * Git repository
+ * OCI
+ * Cloud storage
+* Saving a serialized package (as a ResourceList) to a package repository.
+* Evaluating a function on a serialized package (ResourceList).
+* [Rendering](https://kpt.dev/book/04-using-functions/01-declarative-function-execution) a package
+ (evaluating the functions declared within the package itself).
+* Creating a new (empty) package.
+* Forking (or cloning) an existing package from one package repository (called upstream) to another
+ (called downstream).
+* Deleting a package from a repository.
+* Associating a version with the package and guaranteeing the immutability of packages with an
+ assigned version.
+* Incorporating changes from the new version of an upstream package into a new version of a
+ downstream package (three-way merge).
+* Reverting to a prior version of a package.
+
+## Configuration values
+
+The configuration as data approach enables some key values which are available in other
+configuration management approaches to a lesser extent or not at all.
+
+The values enabled by the configuration as data approach are as follows:
+
+* Simplified authoring of the configuration using a variety of methods and sources.
+* What-you-see-is-what-you-get (WYSIWYG) interaction with the configuration using a simple data
+ serialization formation, rather than a code-like format.
+* Layering of interoperable interface surfaces (notably GUIs) over declarative configuration
+ mechanisms, rather than forcing choices between exclusive alternatives (exclusively, UI/CLI or
+ IaC initially, followed by exclusively UI/CLI or exclusively IaC).
+* The ability to apply UX techniques to simplify configuration authoring and viewing.
+* Compared to imperative tools, such as UI and CLI, that directly modify the live state via APIs,
+ CaD enables versioning, undo, audits of configuration history, review/approval, predeployment
+ preview, validation, safety checks, constraint-based policy enforcement, and disaster recovery.
+* Bulk changes to configuration data in their sources of truth.
+* Injection of configuration to address horizontal concerns.
+* Merging of multiple sources of truth.
+* State export to reusable blueprints without manual templatization.
+* Cooperative editing of configurations by humans and automation, such as for security remediation,
+ which is usually implemented against live-state APIs.
+* Reusability of the configuration transformation code across multiple bodies of configuration data
+ containing the same resource types, amortizing the effort of writing, testing, and documenting
+ the code.
+* A combination of independent configuration transformations.
+* Implementation of configuration transformations using the languages of choice, including both
+ programming and scripting approaches.
+* Reducing the frequency of changes to the existing transformation code.
+* Separation of roles between developer and non-developer configuration users.
+* Defragmenting the configuration transformation ecosystem.
+* Admission control and invariant enforcement on sources of truth.
+* Maintaining variants of configuration blueprints without one-size-fits-all full
+ struct-constructor-style parameterization and without manually constructing and maintaining
+ patches.
+* Drift detection and remediation for most of the desired state via continuous reconciliation,
+ using apply and/or for specific attributes via a targeted mutation of the sources of truth.
+
+## Related articles
+
+For more information about configuration as data and the Kubernetes Resource Model, visit the
+following links:
* [Rationale for kpt](https://kpt.dev/guides/rationale)
* [Understanding Configuration as Data](https://cloud.google.com/blog/products/containers-kubernetes/understanding-configuration-as-data-in-kubernetes)
- blog post.
+ blog post
* [Kubernetes Resource Model](https://cloud.google.com/blog/topics/developers-practitioners/build-platform-krm-part-1-whats-platform)
blog post series
diff --git a/content/en/docs/porch/package-orchestration.md b/content/en/docs/porch/package-orchestration.md
index 9e899713..608675f6 100644
--- a/content/en/docs/porch/package-orchestration.md
+++ b/content/en/docs/porch/package-orchestration.md
@@ -5,393 +5,452 @@ weight: 2
description:
---
-## Why
+Customers who want to take advantage of the benefits of [Configuration as Data](config-as-data.md)
+can do so today using the [kpt](https://kpt.dev) CLI and the kpt function ecosystem, including its
+[functions catalog](https://catalog.kpt.dev/). Package authoring is possible using a variety of
+editors with [YAML](https://yaml.org/) support. That said, a UI experience of
+what-you-see-is-what-you-get (WYSIWYG) package authoring which supports a broader package lifecycle,
+including package authoring with *guardrails*, approval workflows, package deployment, and more, is
+not yet available.
-People who want to take advantage of the benefits of [Configuration as Data](config-as-data.md) can do so today using
-a [kpt](https://kpt.dev) CLI and the kpt function ecosystem, including its [functions catalog](https://catalog.kpt.dev/).
-Package authoring is possible using a variety of editors with [YAML](https://yaml.org/) support. That said, a delightful
-UI experience of WYSIWYG package authoring which supports broader package lifecycle, including package authoring with
-*guardrails*, approval workflow, package deployment, and more, is not yet available.
+The *Package Orchestration* (Porch) service is a part of the Nephio implementation of the
+Configuration as Data approach. It offers an API and a CLI that enable you to build the UI
+experience for supporting the configuration lifecycle.
-Porch *Package Orchestration* (Porch) is part of the Nephio implementation of a Configuration as Data approach. It offers an API and
-a CLI that enables building that delightful UI experience for supporting the configuration lifecycle.
-
-## Core Concepts
+## Core concepts
This section briefly describes core concepts of package orchestration:
-***Package***: Package is a collection of related configuration files containing configuration of [KRM][krm]
-**resources**. Specifically, configuration packages are [kpt packages](https://kpt.dev/).
-
-***Repository***: Repositories store packages. For example [git][] or [OCI][oci]. ([more details](#repositories))
-
-Packages are sequentially ***versioned***; multiple versions of the same package may exist in a repository.
-([more details](#package-versioning))
-
-A package may have a link (URL) to an ***upstream package*** (a specific version) from which it was cloned.
-([more details](#package-relationships))
+***Package***: A package is a collection of related configuration files containing configurations
+of [KRM][krm] **resources**. Specifically, configuration packages are [kpt packages](https://kpt.dev/book/02-concepts/01-packages).
+Packages are sequentially ***versioned***. Multiple versions of the same package may exist in a
+([repository](#package-versioning)). A package may have a link (URL) to an
+***upstream package*** (a specific version) ([from which it was cloned](#package-relationships)) . Packages go through three lifecycle stages: ***Draft***, ***Proposed***, and ***Published***:
-Package may be in one of several lifecycle stages:
+ * ***Draft***: The package is being created or edited. The contents of the package can be
+ modified; however, the package is not ready to be used (or deployed).
+ * ***Proposed***: The author of the package has proposed that the package be published.
+ * ***Published***: The changes to the package have been approved and the package is ready to be
+ used. Published packages can be deployed or cloned.
-* ***Draft*** - package is being created or edited. The package contents can be modified but package is not ready to be
- used (i.e. deployed)
-* ***Proposed*** - author of the package proposed that the package be published
-* ***Published*** - the changes to the package have been approved and the package is ready to be used. Published
- packages can be deployed or cloned
+***Repository***: The repository stores packages. [git][] and [OCI][oci] are two examples of a
+([repository](#repositories)). A repository can be designated as a
+***deployment repository***. *Published* packages in a deployment repository are considered to be
+([deployment-ready](#deployment)).
+***Functions***: Functions (specifically, [KRM functions][krm functions]) can be applied to
+packages to mutate or validate the resources within them. Functions can be applied to a
+package to create specific package mutations while editing a package draft. Functions can be added
+to a package's Kptfile [pipeline][].
-***Functions*** (specifically, [KRM functions][krm functions]) can be applied to packages to mutate or validate resources
-within them. Functions can be applied to a package to create specific package mutation while editing a package draft,
-functions can be added to package's Kptfile [pipeline][].
+## Core components of the Configuration as Data (CAD) implementation
-A repository can be designated as ***deployment repository***. *Published* packages in a deployment repository are
-considered deployment-ready. ([more details](#deployment))
+The core implementation of Configuration as Data, or *CaD Core*, is a set of components and APIs
+which collectively enable the following:
-## Core Components of Configuration as Data Implementation
+* Registration of the repositories (Git, OCI) containing kpt packages and the discovery of packages.
+* Management of package lifecycles. This includes the authoring, versioning, deletion, creation,
+and mutations of a package draft, the process of proposing the package draft, and the publishing of
+the approved package.
+* Package lifecycle operations, such as the following:
-The Core implementation of Configuration as Data, *CaD Core*, is a set of components and APIs which collectively enable:
+ * The assisted or automated rollout of a package upgrade when a new version of the upstream
+ package version becomes available (the three-way merge).
+ * The rollback of a package to its previous version.
-* Registration of repositories (Git, OCI) containing kpt packages and the discovery of packages
-* Management of package lifecycles, including authoring, versioning, deletion, creation and mutations of a package draft,
- process of proposing the package draft, and publishing of the approved package
-* Package lifecycle operations such as:
+* The deployment of the packages from the deployment repositories, and the observability of their
+deployment status.
+* A permission model that allows role-based access control (RBAC).
- * assisted or automated rollout of package upgrade when a new version of the upstream package version becomes
- available (3 way merge)
- * rollback of a package to previous version
-* Deployment of packages from deployment repositories and observability of their deployment status
-* Permission model that allows role-based access control
+### High-level architecture
-### High-Level Architecture
+At the high level, the Core CaD functionality consists of the following components:
-At the high level, the Core CaD functionality comprises:
-
-* a generic (i.e. not task-specific) package orchestration service implementing
+* A generic (that is, not task-specific) package orchestration service implementing the following:
* package repository management
- * package discovery, authoring and lifecycle management
+ * package discovery, authoring, and lifecycle management
-* [porchctl](user-guides/porchctl-cli-guide.md) - a Git-native, schema-aware, extensible client-side tool for managing KRM packages
-* a GitOps-based deployment mechanism (for example [configsync][]), which distributes and deploys configuration, and
- provides observability of the status of deployed resources
-* a task-specific UI supporting repository management, package discovery, authoring, and lifecycle
+* The Porch CLI tool [porchctl](user-guides/porchctl-cli-guide.md): this is a Git-native,
+schema-aware, extensible client-side tool for managing KRM packages.
+* A GitOps-based deployment mechanism (for example [configsync][]), which distributes and deploys
+configurations, and provides observability of the status of the deployed resources.
+* A task-specific UI supporting repository management, package discovery, authoring, and lifecycle.

-## CaD Concepts Elaborated
+## CaD concepts elaborated
-Concepts briefly introduced above are elaborated in more detail in this section.
+The concepts that were briefly introduced in **High-level architecture** are elaborated in more
+detail in this section.
### Repositories
-Porch and [configsync][] currently integrate with [git][] repositories, and there is an existing design to add OCI
-support to kpt. Initially, the Package Orchestration service will prioritize integration with [git][], and support for
-additional repository types may be added in the future as required.
+Porch and [configsync][] currently integrate with [git][] repositories. There is an existing design
+that adds OCI support to kpt. Initially, the Package Orchestration service will prioritize
+integration with [git][]. Support for additional repository types may be added in the future, as
+required.
-Requirements applicable to all repositories include: ability to store packages, their versions, and sufficient metadata
-associated with package to capture:
+Requirements applicable to all repositories include the ability to store the packages and their
+versions, and sufficient metadata associated with the packages to capture the following:
* package dependency relationships (upstream - downstream)
* package lifecycle state (draft, proposed, published)
* package purpose (base package)
-* (optionally) customer-defined attributes
+* customer-defined attributes (optional)
-At repository registration, customers must be able to specify details needed to store packages in appropriate locations
-in the repository. For example, registration of a Git repository must accept a branch and a directory.
+At repository registration, the customers must be able to specify the details needed to store the
+packages in appropriate locations in the repository. For example, registration of a Git repository
+must accept a branch and a directory.
{{% alert title="Note" color="primary" %}}
-A user role with sufficient permissions can register a package or function repository, including repositories
-containing functions authored by the customer, or other providers. Since the functions in the registered repositories
-become discoverable, customers must be aware of the implications of registering function repositories and trust the
-contents thereof.
+A user role with sufficient permissions can register a package or a function repository, including
+repositories containing functions authored by the customer, or by other providers. Since the
+functions in the registered repositories become discoverable, customers must be aware of the
+implications of registering function repositories and trust the contents thereof.
{{% /alert %}}
-### Package Versioning
+### Package versioning
-Packages are sequentially versioned. The important requirements are:
+Packages are versioned sequentially. The requirements are as follows:
-* ability to compare any 2 versions of a package to be either "newer than", equal, or "older than" relationship
-* ability to support automatic assignment of versions
-* ability to support [optimistic concurrency][optimistic-concurrency] of package changes via version numbers
-* a simple model which easily supports automation
+* The ability to compare any two versions of a package as "newer than", "equal to", or "older than"
+ the other.
+* The ability to support the automatic assignment of versions.
+* The ability to support the [optimistic concurrency][optimistic-concurrency] of package changes
+ via version numbers.
+* A simple model that easily supports automation.
-We use a simple integer sequence to represent package versions.
+A simple integer sequence is used to represent the package versions.
-### Package Relationships
+### Package relationships
-Kpt packages support the concept of ***upstream***. When a package is cloned from another, the new package
-(called the ***downstream*** package) maintains an upstream link to the specific version of the package from which it was
-cloned. If a new version of the upstream package becomes available, the upstream link can be used to update the downstream package.
+The Kpt packages support the concept of ***upstream***. When one package is cloned from another,
+the new package, known as the ***downstream*** package, maintains an upstream link to the version
+of the package from which it was cloned. If a new version of the upstream package becomes available,
+then the upstream link can be used to update the downstream package.
### Deployment
-The deployment mechanism is responsible for deploying configuration packages from a repository and affecting the live
-state. Because the configuration is stored in standard repositories (Git, and in the future OCI), the deployment
-component is pluggable. By default, [configsync][] is the deployment mechanism used by CaD Core implementation but
-others can be used as well.
-
-Here we highlight some key attributes of the deployment mechanism and its integration within the CaD Core:
-
-* _Published_ packages in a deployment repository are considered ready to be deployed
-* configsync supports deploying individual packages and whole repositories. For Git specifically that translates to a
- requirement to be able to specify repository, branch/tag/ref, and directory when instructing configsync to deploy a
+The deployment mechanism is responsible for deploying the configuration packages from a repository
+and affecting the live state. Because the configuration is stored in standard repositories (Git,
+and in the future OCI), the deployment component is pluggable. By default, [configsync][https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/overview] is the
+deployment mechanism used by CaD Core implementation. However, other deployment mechanisms can be
+also used.
+
+Some of the key attributes of the deployment mechanism and its integration within the CaD Core are
+highlighted here:
+
+* _Published_ packages in a deployment repository are considered to be ready to be deployed.
+* configsync supports the deployment of individual packages and whole repositories. For Git
+ specifically, that translates to a requirement to be able to specify the repository,
+ branch/tag/ref, and directory when instructing configsync to deploy a package.
+* _Draft_ packages need to be identified in such a way that configsync can easily avoid deploying
+ them.
+* configsync needs to be able to pin to specific versions of deployable packages, in order to
+ orchestrate rollouts and rollbacks. This means it must be possible to get a specific version of a
package.
-* _Draft_ packages need to be identified in such a way that configsync can easily avoid deploying them.
-* configsync needs to be able to pin to specific versions of deployable packages in order to orchestrate rollouts and
- rollbacks. This means it must be possible to GET a specific version of a package.
* configsync needs to be able to discover when new versions are available for deployment.
-## Package Orchestration - Porch
+## Package Orchestration (Porch)
-Having established the context of the CaD Core components and the overall architecture, the remainder of the document
-will focus on **Porch** - Package Orchestration service.
+Having established the context of the CaD Core components and the overall architecture, the
+remainder of the document will focus on the Package Orchestration service, or **Porch** for short.
-To reiterate the role of Package Orchestration service among the CaD Core components, it is:
+The role of the Package Orchestration service among the CaD Core components covers the following
+areas:
* [Repository Management](#repository-management)
* [Package Discovery](#package-discovery)
* [Package Authoring](#package-authoring) and Lifecycle
-In the following section we'll expand more on each of these areas. The term _client_ used in these sections can be
-either a person interacting with the UI such as a web application or a command-line tool, or an automated agent or
-process.
+In the next sections we will expand on each of these areas. The term _client_ used in these
+sections can be either a person interacting with the user interface, such as a web application or a
+command-line tool, or an automated agent or process.
-### Repository Management
+### Repository management
-The repository management functionality of Package Orchestration service enables the client to:
+The repository management functionality of the Package Orchestration service enables the client to
+do the following:
-* register, unregister, update registration of repositories, and discover registered repositories. Git repository
- integration will be available first, with OCI and possibly more delivered in the subsequent releases.
-* manage repository-wide upstream/downstream relationships, i.e. designate default upstream repository from which
- packages will be cloned.
-* annotate repository with metadata such as whether repository contains deployment ready packages or not; metadata can
- be application or customer specific
+* Register, unregister, and update the registration of the repositories, and discover registered
+ repositories. Git repository integration will be available first, with OCI and possibly more
+ delivered in the subsequent releases.
+* Manage repository-wide upstream/downstream relationships, that is, designate the default upstream
+ repositories from which the packages will be cloned.
+* Annotate the repositories with metadata, such as whether or not each repository contains
+ deployment-ready packages. Metadata can be application- or customer-specific.
-### Package Discovery
+### Package discovery
-The package discovery functionality of Package Orchestration service enables the client to:
+The package discovery functionality of the Package Orchestration service enables the client to do
+the following:
-* browse packages in a repository
-* discover configuration packages in registered repositories and sort/filter based on the repository containing the
- package, package metadata, version, package lifecycle stage (draft, proposed, published)
-* retrieve resources and metadata of an individual package, including latest version or any specific version or draft
- of a package, for the purpose of introspection of a single package or for comparison of contents of multiple
- versions of a package, or related packages
-* enumerate _upstream_ packages available for creating (cloning) a _downstream_ package
-* identify downstream packages that need to be upgraded after a change is made to an upstream package
-* identify all deployment-ready packages in a deployment repository that are ready to be synced to a deployment target
- by configsync
-* identify new versions of packages in a deployment repository that can be rolled out to a deployment target by configsync
+* Browse the packages in a repository.
+* Discover the configuration packages in the registered repositories, and sort and/or filter them
+ based on the repository containing the package, package metadata, version, and package lifecycle
+ stage (draft, proposed, and published).
+* Retrieve the resources and metadata of an individual package, including the latest version, or
+ any specific version or draft of a package, for the purpose of introspection of a single package,
+ or for comparison of the contents of multiple versions of a package or related packages.
+* Enumerate the _upstream_ packages that are available for creating (cloning) a _downstream_
+ package.
+* Identify the downstream packages that need to be upgraded after a change has been made to an
+ upstream package.
+* Identify all the deployment-ready packages in a deployment repository that are ready to be synced
+ to a deployment target by configsync.
+* Identify new versions of packages in a deployment repository that can be rolled out to a
+ deployment target by configsync.
-### Package Authoring
+### Package authoring
-The package authoring and lifecycle functionality of the package Orchestration service enables the client to:
+The package authoring and lifecycle functionality of the package Orchestration service enables the
+client to do the following:
* Create a package _draft_ via one of the following means:
- * an empty draft 'from scratch' (`porchctl rpkg init`)
- * clone of an upstream package (`porchctl rpkg clone`) from either a
- registered upstream repository or from another accessible, unregistered, repository
- * edit an existing package (`porchctl rpkg pull`)
- * roll back / restore a package to any of its previous versions
- (`porchctl rpkg pull` of a previous version)
-
-* Push changes to a package _draft_. In general, mutations include adding/modifying/deleting any part of the package's
- contents. Some specific examples include:
-
- * add/change/delete package metadata (i.e. some properties in the `Kptfile`)
- * add/change/delete resources in the package
- * add function mutators/validators to the package's pipeline
- * add/change/delete sub-package
- * retrieve the contents of the package for arbitrary client-side mutations (`porchctl rpkg pull`)
- * update/replace the package contents with new contents, for example results of a client-side mutations by a UI
- (`porchctl rpkg push`)
-
-* Rebase a package onto another upstream base package or onto a newer version of the same package (to
- aid with conflict resolution during the process of publishing a draft package)
-
-* Get feedback during package authoring, and assistance in recovery from merge conflicts, invalid package changes, guardrail violations
-
-* Propose for a _draft_ package be _published_.
-* Apply an arbitrary decision criteria, and by a manual or automated action, approve (or reject) proposal of a _draft_
- package to be _published_.
-* Perform bulk operations such as:
-
- * Assisted/automated update (upgrade, rollback) of groups of packages matching specific criteria (i.e. base package
- has new version or specific base package version has a vulnerability and should be rolled back)
- * Proposed change validation (pre-validating change that adds a validator function to a base package)
+ * An empty draft from scratch (`porchctl rpkg init`).
+ * A clone of an upstream package (`porchctl rpkg clone`) from a registered upstream repository or
+ from another accessible, unregistered repository.
+ * Editing an existing package (`porchctl rpkg pull`).
+ * Rolling back or restoring a package to any of its previous versions
+ (`porchctl rpkg pull` of a previous version).
+
+* Push changes to a package _draft_. In general, mutations include adding, modifying, and deleting
+ any part of the package's contents. Specific examples include the following:
+
+ * Adding, changing, or deleting package metadata (that is, some properties in the `Kptfile`).
+ * Adding, changing, or deleting resources in the package.
+ * Adding function mutators/validators to the package's pipeline.
+ * Adding, changing, or deleting sub-packages.
+ * Retrieving the contents of the package for arbitrary client-side mutations
+ (`porchctl rpkg pull`).
+ * Updating or replacing the package contents with new contents, for example, the results of
+ client-side mutations by a UI (`porchctl rpkg push`).
+
+* Rebase a package onto another upstream base package or onto a newer version of the same package
+ (to assist with conflict resolution during the process of publishing a draft package).
+
+* Get feedback during package authoring, and assistance in recovery from merge conflicts, invalid
+ package changes, or guardrail violations.
+
+* Propose that a _draft_ package be _published_.
+* Apply arbitrary decision criteria, and by a manual or an automated action, approve or reject a
+ proposal for _draft_ package to be _published_.
+* Perform bulk operations, such as the following:
+
+ * Assisted/automated updates (upgrades and rollbacks) of groups of packages matching specific
+ criteria (for example, if a base package has new version or a specific base package version has
+ a vulnerability and needs to be rolled back).
+ * Proposed change validation (prevalidating changes that add a validator function to a base
+ package).
* Delete an existing package.
-#### Authoring & Latency
+#### Authoring and latency
-An important goal of the Package Orchestration service is to support building of task-specific UIs. In order to deliver
-low latency user experience acceptable to UI interactions, the innermost authoring loop (depicted below) will require:
+An important aim of the Package Orchestration service is to support the building of task-specific
+UIs. To deliver a low-latency user experience that is acceptable to UI interactions, the innermost
+authoring loop depicted below requires the following:
-* high performance access to the package store (load/save package) with caching
-* low latency execution of mutations and transformations on the package contents
-* low latency [KRM function][krm functions] evaluation and package rendering (evaluation of package's function
- pipelines)
+* high-performance access to the package store (loading or saving a package) with caching
+* low-latency execution of mutations and transformations of the package contents
+* low-latency [KRM function][krm functions] evaluation and package rendering (evaluation of a
+ package's function pipelines)

-#### Authoring & Access Control
+#### Authoring and access control
-A client can assign actors (persons, service accounts) to roles that determine which operations they are allowed to
-perform in order to satisfy requirements of the basic roles. For example, only permitted roles can:
+A client can assign actors (for example, persons, service accounts, and so on) to roles that
+determine which operations they are allowed to perform, in order to satisfy the requirements of the
+basic roles. For example, only permitted roles can do the following:
-* manipulate repository registration, enforcement of repository-wide invariants and guardrails
-* create a draft of a package and propose the draft be published
-* approve (or reject) the proposal to publish a draft package
-* clone a package from a specific upstream repository
-* perform bulk operations such as rollout upgrade of downstream packages, including rollouts across multiple downstream
- repositories
-* etc.
+* Manipulate repository registration, and enforcement of repository-wide invariants and guardrails.
+* Create a draft of a package and propose that the draft be published.
+* Approve or reject a proposal to publish a draft package.
+* Clone a package from a specific upstream repository.
+* Perform bulk operations, such as rollout upgrade of downstream packages, including rollouts
+ across multiple downstream repositories.
-### Porch Architecture
+### Porch architecture
-The Package Orchestration service, **Porch** is designed to be hosted in a [Kubernetes](https://kubernetes.io/) cluster.
+The Package Orchestration (**Porch**) service is designed to be hosted in a
+[Kubernetes](https://kubernetes.io/) cluster.
-The overall architecture is shown below, and includes also existing components (k8s apiserver and configsync).
+The overall architecture is shown in the following figure. It also includes existing components,
+such as the k8s apiserver and configsync.

-In addition to satisfying requirements highlighted above, the focus of the architecture was to:
+In addition to satisfying the requirements highlighted above, the focus of the architecture was to
+do the following:
-* establish clear components and interfaces
-* support a low-latency package authoring experience required by the UIs
+* Establish clear components and interfaces.
+* Support a low-latency package authoring experience required by the UIs.
-The Porch components are:
+The Porch architecture comprises three components:
-#### Porch Server
+* the Porch server
+* the function runner
+* the CaD Library
-The Porch server is implemented as [Kubernetes extension API server][apiserver]. The benefits of using Kubernetes
-extension API server are:
+#### Porch server
-* well-defined and familiar API style
-* availability of generated clients
-* integration with existing Kubernetes ecosystem and tools such as `kubectl` CLI,
- [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
-* avoids requirement to open another network port to access a separate endpoint running inside k8s cluster (this is a
- distinct advantage over GRPC which we considered as an alternative approach)
+The Porch server is implemented as a [Kubernetes extension API server][apiserver]. The benefits of
+using the Kubernetes extension API server are as follows:
-Resources implemented by Porch include:
+* A well-defined and familiar API style.
+* The availability of generated clients.
+* Integration with the existing Kubernetes ecosystem and tools, such as the `kubectl` CLI,
+ [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
+* The Kubernetes extension API server removes the need to open another network port to access a
+ separate endpoint running inside the k8s cluster. This is a clear advantage over Google Remote
+ Procedure Calls (GRPC), which was considered as an alternative approach.
-* `PackageRevision` - represents the _metadata_ of the configuration package revision stored in a _package_ repository.
-* `PackageRevisionResources` - represents the _contents_ of the package revision
+The resources implemented by Porch include the following:
-Note that each configuration package revision is represented by a _pair_ of resources which each present a different
-view (or [representation][] of the same underlying package revision.
+* `PackageRevision`: This represents the _metadata_ of the configuration package revision stored in
+ a _package_ repository.
+* `PackageRevisionResources`: This represents the _contents_ of the package revision.
-Repository registration is supported by a `Repository` [custom resource][crds].
+{{% alert title="Note" color="primary"%}}
-**Porch server** itself comprises several key components, including:
-
-* The *Porch aggregated apiserver* which implements the integration into the main Kubernetes apiserver, and directly
- serves API requests for the `PackageRevision`, `PackageRevisionResources` resources.
-* Package orchestration *engine* which implements the package lifecycle operations, and package mutation workflows
-* *CaD Library* which implements specific package manipulation algorithms such as package rendering (evaluation of
- package's function *pipeline*), initialization of a new package, etc. The CaD Library is shared with `kpt`
- where it likewise provides the core package manipulation algorithms.
-* *Package cache* which enables both local caching, as well as abstract manipulation of packages and their contents
- irrespectively of the underlying storage mechanism (Git, or OCI)
-* *Repository adapters* for Git and OCI which implement the specific logic of interacting with those types of package
- repositories.
-* *Function runtime* which implements support for evaluating [kpt functions][functions] and multi-tier cache of
- functions to support low latency function evaluation
+Each configuration package revision is represented by a _pair_ of resources, each of which presents
+a different view, or a [representation][] of the same underlying package revision.
-#### Function Runner
-
-**Function runner** is a separate service responsible for evaluating [kpt functions][functions]. Function runner exposes
-a [GRPC](https://grpc.io/) endpoint which enables evaluating a kpt function on the provided configuration package.
-
-The GRPC technology was chosen for the function runner service because the [requirements](#grpc-api) that informed
-choice of KRM API for the Package Orchestration service do not apply. The function runner is an internal microservice,
-an implementation detail not exposed to external callers. This makes GRPC perfectly suitable.
-
-The function runner also maintains a cache of functions to support low latency function evaluation. It achieves this through
-two mechanisms available to it for evaluation of a function
+{{% /alert %}}
-**Executable Evaluation** approach executes the function within the Pod runtime through shell based invocation of function
-binary; for which function binaries are bundled inside the function runner image itself
+Repository registration is supported by a `Repository` [custom resource][crds].
-**Pod Evaluation** approach is utilized when invoked function is not available via Executable Evaluation approach wherein
-function runner pod starts the function pod corresponding to invoked function along with a front-end service. Once
-the pod and service are up and running, it's exposed GRPC endpoint is invoked for function evaluation, passing the input
-package. For this mechanism, function runner reads the list of functions and their images supplied via a config
-file at startup, and spawns function pods, along with a corresponding front-end service for each configured function.
-These function pods/services are terminated after a pre-configured period of inactivity (default 30 minutes) by function
-runner and recreated on the next invocation.
+The **Porch server** itself comprises several key components, including the following:
+
+* The *Porch aggregated apiserver*
+ The *Porch aggregated apiserver* implements the integration into the main Kubernetes apiserver,
+ and directly serves the API requests for the `PackageRevision`, `PackageRevisionResources`
+ resources.
+* The Package Orchestration *engine*
+ The Package Orchestration *engine* implements the package lifecycle operations, and the package
+ mutation workflows.
+* The *CaD Library*
+ The *CaD Library* implements specific package manipulation algorithms, such as package rendering
+ (the evaluation of a package's function *pipeline*), the initialization of a new package, and so
+ on. The CaD Library is shared with `kpt`, where it likewise provides the core package
+ manipulation algorithms.
+* The *package cache*
+ The *package cache* enables both local caching, as well as the abstract manipulation of packages
+ and their contents, irrespective of the underlying storage mechanism, such as Git, or OCI.
+* The *repository adapters* for Git and OCI
+ The *repository adapters* for Git and OCI implement the specific logic of interacting with those types of package
+ repositories.
+* The *function runtime*
+ The *function runtime* implements support for evaluating the [kpt functions][functions] and the
+ multitier cache of functions to support low-latency function evaluation.
+
+#### Function runner
+
+The **function runner** is a separate service that is responsible for evaluating the
+[kpt functions][functions]. The function runner exposes a Google Remote Procedure Calls
+([GRPC](https://grpc.io/)) endpoint, which enables the evaluation of a kpt function on the provided
+configuration package.
+
+The GRPC technology was chosen for the function runner service because the
+[requirements](#grpc-api) that informed the choice of the KRM API for the Package Orchestration
+service do not apply. The function runner is an internal microservice, an implementation detail not
+exposed to external callers. This makes GRPC particularly suitable.
+
+The function runner also maintains a cache of functions to support low-latency function evaluation.
+It achieves this through two mechanisms that are available for the evaluation of a function.
+
+The **Executable Evaluation** approach executes the function within the pod runtime through a
+shell-based invocation of the function binary, for which the function binaries are bundled inside
+the function runner image itself.
+
+The **Pod Evaluation** approach is used when the invoked function is not available via the
+Executable Evaluation approach, wherein the function runner pod starts the function pod that
+corresponds to the invoked function, along with a front-end service. Once the pod and the service
+are up and running, its exposed GRPC endpoint is invoked for function evaluation, passing the input
+package. For this mechanism, the function runner reads the list of functions and their images
+supplied via a configuration file at startup, and spawns function pods, along with a corresponding
+front-end service for each configured function. These function pods and services are terminated
+after a preconfigured period of inactivity (the default is 30 minutes) by the function runner and
+are recreated on the next invocation.
#### CaD Library
-The [kpt](https://kpt.dev/) CLI already implements foundational package manipulation algorithms in order to provide the
-command line user experience, including:
-
-* [kpt pkg init](https://kpt.dev/reference/cli/pkg/init/) - create an empty, valid, KRM package
-* [kpt pkg get](https://kpt.dev/reference/cli/pkg/get/) - create a downstream package by cloning an upstream package;
- set up the upstream reference of the downstream package
-* [kpt pkg update](https://kpt.dev/reference/cli/pkg/update/) - update the downstream package with changes from new
- version of upstream, 3-way merge
-* [kpt fn eval](https://kpt.dev/reference/cli/fn/eval/) - evaluate a kpt function on a package
-* [kpt fn render](https://kpt.dev/reference/cli/fn/render/) - render the package by executing the function pipeline of
- the package and its nested packages
-* [kpt fn source](https://kpt.dev/reference/cli/fn/source/) and [kpt fn sink](https://kpt.dev/reference/cli/fn/sink/) -
- read package from local disk as a `ResourceList` and write package represented as `ResourcesList` into local disk
-
-The same set of primitives form the foundational building blocks of the package orchestration service. Further, the
-package orchestration service combines these primitives into higher-level operations (for example, package orchestrator
-renders packages automatically on changes, future versions will support bulk operations such as upgrade of multiple
-packages, etc).
-
-The implementation of the package manipulation primitives in kpt was refactored (with initial refactoring completed, and
-more to be performed as needed) in order to:
-
-* create a reusable CaD library, usable by both kpt CLI and Package Orchestration service
-* create abstractions for dependencies which differ between CLI and Porch, most notable are dependency on Docker for
- function evaluation, and dependency on the local file system for package rendering.
-
-Over time, the CaD Library will provide the package manipulation primitives:
-
-* create a valid empty package (init)
-* update package upstream pointers (get)
-* perform 3-way merge (update)
-* render - core package rendering algorithm using a pluggable function evaluator to support:
-
- * function evaluation via Docker (used by kpt CLI)
- * function evaluation via an RPC to a service or appropriate function sandbox
- * high-performance evaluation of trusted, built-in, functions without sandbox
-
-* heal configuration (restore comments after lossy transformation)
-
-and both kpt CLI and Porch will consume the library. This approach will allow leveraging the investment already made
-into the high quality package manipulation primitives, and enable functional parity between Kpt CLI and Package
-Orchestration service.
+The [kpt](https://kpt.dev/) CLI already implements foundational package manipulation algorithms, in
+order to provide the command line user experience, including the following:
+
+* [kpt pkg init](https://kpt.dev/reference/cli/pkg/init/): this creates an empty, valid KRM package.
+* [kpt pkg get](https://kpt.dev/reference/cli/pkg/get/): this creates a downstream package by
+ cloning an upstream package. It sets up the upstream reference of the downstream package.
+* [kpt pkg update](https://kpt.dev/reference/cli/pkg/update/): this updates the downstream package
+ with changes from the new version of the upstream, three-way merge.
+* [kpt fn eval](https://kpt.dev/reference/cli/fn/eval/): this evaluates a kpt function on a package.
+* [kpt fn render](https://kpt.dev/reference/cli/fn/render/): this renders the package by executing
+ the function pipeline of the package and its nested packages.
+* [kpt fn source](https://kpt.dev/reference/cli/fn/source/) and
+ [kpt fn sink](https://kpt.dev/reference/cli/fn/sink/): these read packages from a local disk as
+ a `ResourceList` and write the packages represented as a `ResourcesList` into the local disk.
+
+The same set of primitives form the building blocks of the package orchestration service. Further,
+the Package Orchestration service combines these primitives into higher-level operations (for
+example, package orchestrator renders the packages automatically on changes. Future versions will
+support bulk operations, such as the upgrade of multiple packages, and so on).
+
+The implementation of the package manipulation primitives in the kpt was refactored (with the
+initial refactoring completed, and more to be performed as needed), in order to do the following:
+
+* Create a reusable CaD library, usable by both the kpt CLI and the Package Orchestration service.
+* Create abstractions for dependencies which differ between the CLI and Porch. Most notable are
+ the dependency on Docker for function evaluation, and the dependency on the local file system for
+ package rendering.
+
+Over time, the CaD Library will provide the package manipulation primitives, to perform the
+following tasks:
+
+* Create a valid empty package (init).
+* Update the package upstream pointers (get).
+* Perform three-way merges (update).
+* Render: using a core package rendering algorithm that uses a pluggable function evaluator, to
+ support the following:
+
+ * Function evaluation via Docker (used by kpt CLI).
+ * Function evaluation via an RPC to a service or an appropriate function sandbox.
+ * High-performance evaluation of trusted, built-in functions without a sandbox.
+
+* Heal the configuration (restore comments after lossy transformation).
+
+Both the kpt CLI and Porch will consume the library. This approach will allow the leveraging of the
+investment already made into the high-quality package manipulation primitives, and enable
+functional parity between the kpt CLI and the Package Orchestration service.
## User Guide
-Find the Porch User Guide in a dedicated
+The Porch User Guide can be found in a dedicated document, via this link:
[document](https://github.com/kptdev/kpt/blob/main/site/guides/porch-user-guide.md).
-## Open Issues/Questions
+## Open issues and questions
-### Deployment Rollouts & Orchestration
+### Deployment rollouts and orchestration
__Not Yet Resolved__
-Cross-cluster rollouts and orchestration of deployment activity. For example, package deployed by configsync in cluster
-A, and only on success, the same (or a different) package deployed by configsync in cluster B.
+Cross-cluster rollouts and orchestration of deployment activity. For example, a package deployed by
+configsync in cluster A, and only on success, the same (or a different) package deployed by
+configsync in cluster B.
-## Alternatives Considered
+## Alternatives considered
### GRPC API
-We considered the use of [GRPC]() for the Porch API. The primary advantages of implementing Porch as an extension
-Kubernetes apiserver are:
+The use of Google Remote Procedure Calls ([GRPC]()) was considered for the Porch API. The primary
+advantages of implementing Porch as an extension of the Kubernetes apiserver are as follows:
-* customers won't have to open another port to their Kubernetes cluster and can reuse their existing infrastructure
-* customers can likewise reuse existing, familiar, Kubernetes tooling ecosystem
+* Customers would not have to open another port to their Kubernetes cluster and would be able to
+ reuse their existing infrastructure.
+* Customers could likewise reuse the existing Kubernetes tooling ecosystem.
[krm]: https://github.com/kubernetes/design-proposals-archive/blob/main/architecture/resource-management.md
diff --git a/go.mod b/go.mod
index 261ca841..84b0af24 100644
--- a/go.mod
+++ b/go.mod
@@ -1,8 +1,8 @@
module github.com/nephio-project/docs
-go 1.18
+go 1.24
require (
- github.com/google/docsy v0.10.0 // indirect
+ github.com/google/docsy v0.12.0 // indirect
github.com/google/docsy/dependencies v0.7.2 // indirect
)
diff --git a/go.sum b/go.sum
index 51663a2a..e69de29b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,11 +0,0 @@
-github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
-github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
-github.com/google/docsy v0.7.1 h1:DUriA7Nr3lJjNi9Ulev1SfiG1sUYmvyDeU4nTp7uDxY=
-github.com/google/docsy v0.7.1/go.mod h1:JCmE+c+izhE0Rvzv3y+AzHhz1KdwlA9Oj5YBMklJcfc=
-github.com/google/docsy v0.10.0 h1:6tMDacPwAyRWNCfvsn/9qGOZDQ8b0aRzjRZvnZPY5dg=
-github.com/google/docsy v0.10.0/go.mod h1:c0nIAqmRTOuJ01F85U/wJPQtc3Zj9N58Kea9bOT2AJc=
-github.com/google/docsy/dependencies v0.7.1/go.mod h1:gihhs5gmgeO+wuoay4FwOzob+jYJVyQbNaQOh788lD4=
-github.com/google/docsy/dependencies v0.7.2 h1:+t5ufoADQAj4XneFphz4A+UU0ICAxmNaRHVWtMYXPSI=
-github.com/google/docsy/dependencies v0.7.2/go.mod h1:gihhs5gmgeO+wuoay4FwOzob+jYJVyQbNaQOh788lD4=
-github.com/twbs/bootstrap v5.2.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
-github.com/twbs/bootstrap v5.3.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
diff --git a/layouts/_default/_markup/render-heading.html b/layouts/_default/_markup/render-heading.html
index d8326e6a..611e120a 100644
--- a/layouts/_default/_markup/render-heading.html
+++ b/layouts/_default/_markup/render-heading.html
@@ -1 +1 @@
-{{ template "_default/_markup/td-render-heading.html" . }}
\ No newline at end of file
+{{ .Text | safeHTML }}
\ No newline at end of file
diff --git a/netlify.toml b/netlify.toml
index 2a8b500f..d769258b 100644
--- a/netlify.toml
+++ b/netlify.toml
@@ -6,8 +6,8 @@ command = "hugo"
publish = "public"
[build.environment]
-GO_VERSION = "1.18.1"
-HUGO_VERSION = "v0.120.4"
+GO_VERSION = "1.24.5"
+HUGO_VERSION = "v0.148.2"
HUGO_ENV = "production"
[context.production]
diff --git a/package.json b/package.json
index b7b26227..cd769bae 100644
--- a/package.json
+++ b/package.json
@@ -1,18 +1,21 @@
{
- "name": "nephio-rpoject-docs",
+ "name": "nephio-project-docs",
"version": "1.0.0",
- "description": "",
+ "description": "Documentation website for Nephio - Kubernetes-based cloud-native network automation platform",
"main": "postcss.config.js",
"scripts": {
+ "build": "hugo --gc --minify",
+ "serve": "hugo server -D",
+ "lint": "echo \"No linting configured\" && exit 0",
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "git+https://github.com/nephio-project/docs.git"
},
- "keywords": [],
+ "keywords": ["nephio", "kubernetes", "documentation", "cloud-native", "network-automation", "5g", "hugo", "docsy"],
"author": "Gergely Csatari ",
- "license": "APACHE2",
+ "license": "CC-BY-4.0",
"bugs": {
"url": "https://github.com/nephio-project/docs/issues"
},