diff --git a/.chloggen/kubeletstats-memory-utilization.yaml b/.chloggen/kubeletstats-memory-utilization.yaml new file mode 100755 index 0000000000000..89e9788dce28a --- /dev/null +++ b/.chloggen/kubeletstats-memory-utilization.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: kubeletstatsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Adds new `k8s.pod.memory.utilization` and `container.memory.utilization` metrics that represent the ratio of memory used vs limits set. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [25894] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/kubeletstats-percentage-metrics.yaml b/.chloggen/kubeletstats-percentage-metrics.yaml new file mode 100755 index 0000000000000..7b8f846878ab2 --- /dev/null +++ b/.chloggen/kubeletstats-percentage-metrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: kubeletstatsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add new metrics for representing pod and memory consumption of pods and containers as a percentage of the defined resource limits. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [25835] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: These metrics represent how much of your resource limits a container or pod is consuming. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/mdatagen-skip-metric-unit.yaml b/.chloggen/mdatagen-skip-metric-unit.yaml new file mode 100755 index 0000000000000..9e4d93843a642 --- /dev/null +++ b/.chloggen/mdatagen-skip-metric-unit.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: mdatagen + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "allow setting empty metric units" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [27089] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.github/workflows/generate-weekly-report.yml b/.github/workflows/generate-weekly-report.yml new file mode 100644 index 0000000000000..5660fa6cf261a --- /dev/null +++ b/.github/workflows/generate-weekly-report.yml @@ -0,0 +1,24 @@ +# This action generates a weekly report as a github issue +# More details in https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/24672 + +name: 'Generate Weekly Report' +on: + workflow_dispatch: + schedule: + # run every tuesday at 1am UTC + - cron: "0 1 * * 2" + +jobs: + get_issues: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: npm install js-yaml + working-directory: ./.github/workflows/scripts + - uses: actions/github-script@v6 + id: get-issues + with: + retries: 3 + script: | + const script = require('.github/workflows/scripts/generate-weekly-report.js') + await script({github, context}) diff --git a/.github/workflows/scripts/generate-weekly-report.js b/.github/workflows/scripts/generate-weekly-report.js new file mode 100644 index 0000000000000..7b870a4b740cb --- /dev/null +++ b/.github/workflows/scripts/generate-weekly-report.js @@ -0,0 +1,439 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +const fs = require('fs'); +const path = require('path'); +const yaml = require('js-yaml'); + + +const REPO_NAME = "opentelemetry-collector-contrib" +const REPO_OWNER = "open-telemetry" + +function debug(msg) { + console.log(JSON.stringify(msg, null, 2)) +} + +async function getIssues(octokit, queryParams, filterPrs = true) { + let allIssues = []; + try { + while (true) { + const response = await octokit.issues.listForRepo(queryParams); + // filter out pull requests + const issues = filterPrs ? response.data.filter(issue => !issue.pull_request) : response.data; + allIssues = allIssues.concat(issues); + + // Check the 'link' header to see if there are more pages + const linkHeader = response.headers.link; + if (!linkHeader || !linkHeader.includes('rel="next"')) { + break; + } + + queryParams.page++; + } + return allIssues; + } catch (error) { + console.error('Error fetching issues:', error); + return []; + } +} + +function genLookbackDates() { + const now = new Date() + const midnightYesterday = new Date( + Date.UTC( + now.getUTCFullYear(), + now.getUTCMonth(), + now.getUTCDate(), + 0, 0, 0, 0 + ) + ); + const sevenDaysAgo = new Date(midnightYesterday); + sevenDaysAgo.setDate(midnightYesterday.getDate() - 7); + return { sevenDaysAgo, midnightYesterday }; +} + +function filterOnDateRange({ issue, sevenDaysAgo, midnightYesterday }) { + const createdAt = new Date(issue.created_at); + return createdAt >= sevenDaysAgo && createdAt <= midnightYesterday; +} + +async function getNewIssues({octokit, context}) { + const { sevenDaysAgo, midnightYesterday } = genLookbackDates(); + const queryParams = { + owner: REPO_OWNER, + repo: REPO_NAME, + state: 'all', // To get both open and closed issues + per_page: 100, // Number of items per page (maximum allowed) + page: 1, // Start with page 1 + since: sevenDaysAgo.toISOString(), + }; + + try { + const allIssues = await getIssues(octokit, queryParams) + const filteredIssues = allIssues.filter(issue => filterOnDateRange({ issue, sevenDaysAgo, midnightYesterday })); + return filteredIssues; + } catch (error) { + console.error('Error fetching issues:', error); + return []; + } +} + +async function getTargetLabelIssues({octokit, labels, filterPrs, context}) { + const queryParams = { + owner: REPO_OWNER, + repo: REPO_NAME, + state: 'open', + per_page: 100, // Number of items per page (maximum allowed) + page: 1, // Start with page 1 + labels + }; + debug({msg: "fetching issues", queryParams}) + try { + const allIssues = await getIssues(octokit, queryParams, filterPrs) + return allIssues; + } catch (error) { + console.error('Error fetching issues:', error); + return []; + } +} + +/** + * Get data required for issues report + */ +async function getIssuesData({octokit, context}) { + const targetLabels = { + "needs triage": { + filterPrs: true, + alias: "issuesTriage", + }, + "ready to merge": { + filterPrs: false, + alias: "issuesReadyToMerge", + }, + "Sponsor Needed": { + filterPrs: true, + alias: "issuesSponsorNeeded", + }, + }; + + const issuesNew = await getNewIssues({octokit, context}); + const issuesWithLabels = {}; + for (const lbl of Object.keys(targetLabels)) { + const filterPrs = targetLabels[lbl].filterPrs; + const resp = await getTargetLabelIssues({octokit, labels: lbl, filterPrs, context}); + issuesWithLabels[lbl] = resp; + } + + // tally results + const stats = { + issuesNew: { + title: "New issues", + count: 0, + data: [] + }, + issuesTriage: { + title: "Issues needing triage", + count: 0, + data: [] + }, + issuesReadyToMerge: { + title: "Issues ready to merge", + count: 0, + data: [] + }, + issuesSponsorNeeded: { + title: "Issues needing sponsorship", + count: 0, + data: [] + }, + issuesNewSponsorNeeded: { + title: "New issues needing sponsorship", + count: 0, + data: [] + }, + } + + // add new issues + issuesNew.forEach(issue => { + stats.issuesNew.count++; + const { html_url: url, title, number } = issue; + stats.issuesNew.data.push({ url, title, number }); + }); + + // add issues with labels + for (const lbl of Object.keys(targetLabels)) { + const alias = targetLabels[lbl].alias; + stats[alias].count = issuesWithLabels[lbl].length; + stats[alias].data = issuesWithLabels[lbl].map(issue => { + const { html_url: url, title, number } = issue; + return { url, title, number }; + }) + } + + // add new issues with sponsor needed label + const { sevenDaysAgo, midnightYesterday } = genLookbackDates(); + const sponsorNeededIssues = issuesWithLabels["Sponsor Needed"].filter(issue => filterOnDateRange({ issue, sevenDaysAgo, midnightYesterday })); + sponsorNeededIssues.forEach(issue => { + stats.issuesNewSponsorNeeded.count++; + const { html_url: url, title, number } = issue; + stats.issuesNewSponsorNeeded.data.push({ url, title, number }); + }); + return stats +} + +function generateReport({ issuesData, previousReport, componentData }) { + const out = [ + `## Format`, + "- `{CATEGORY}: {COUNT} ({CHANGE_FROM_PREVIOUS_WEEK})`", + "## Issues Report", + ''); + + // generate report for components + out.push('\n## Components Report', ''); + + // add json data + out.push('\n ## JSON Data'); + out.push(''); + out.push(`
+Expand +
+{
+  "issuesData": ${JSON.stringify(issuesData, null, 2)},
+  "componentData": ${JSON.stringify(componentData, null, 2)}
+}
+
+
`); + const report = out.join('\n'); + return report; +} + +async function createIssue({ octokit, lookbackData, report, context }) { + const title = `Weekly Report: ${lookbackData.sevenDaysAgo.toISOString().slice(0, 10)} - ${lookbackData.midnightYesterday.toISOString().slice(0, 10)}`; + return octokit.issues.create({ + // NOTE: we use the owner from the context because folks forking this repo might not have permission to (nor should they when developing) + // create issues in the upstream repository + owner: context.payload.repository.owner.login, + repo: REPO_NAME, + title, + body: report, + labels: ["report"] + }) +} + +async function getLastWeeksReport({ octokit, since, context }) { + const issues = await octokit.issues.listForRepo({ + + owner: context.payload.repository.owner.login, + repo: REPO_NAME, + state: 'all', // To get both open and closed issues + labels: ["report"], + since: since.toISOString(), + per_page: 1, + sort: "created", + direction: "asc" + }); + if (issues.data.length === 0) { + return null; + } + // grab earliest issue if multiple + return issues.data[0]; +} + +function parseJsonFromText(text) { + // Use regex to find the JSON data within the
 tags
+  const regex = /
\s*([\s\S]*?)\s*<\/pre>/;
+  const match = text.match(regex);
+
+  if (match && match[1]) {
+    // Parse the found string to JSON
+    return JSON.parse(match[1]);
+  } else {
+    throw new Error("JSON data not found");
+  }
+}
+
+async function processIssues({ octokit, context, lookbackData }) {
+  const issuesData = await getIssuesData({octokit, context});
+
+  const prevReportLookback = new Date(lookbackData.sevenDaysAgo)
+  prevReportLookback.setDate(prevReportLookback.getDate() - 7)
+  const previousReportIssue = await getLastWeeksReport({octokit, since: prevReportLookback, context});
+  // initialize to zeros
+  let previousReport = null;
+
+  if (previousReportIssue !== null) {
+    const {created_at, id, url, title} = previousReportIssue;
+    debug({ msg: "previous issue", created_at, id, url, title })
+    previousReport = parseJsonFromText(previousReportIssue.body)
+  }
+
+  return {issuesData, previousReport}
+
+
+}
+
+const findFilesByName = (startPath, filter) => {
+  let results = [];
+
+  // Check if directory exists
+  let files;
+  try {
+      files = fs.readdirSync(startPath);
+  } catch (error) {
+      console.error("Error reading directory: ", startPath, error);
+      return [];
+  }
+
+  for (let i = 0; i < files.length; i++) {
+      const filename = path.join(startPath, files[i]);
+      let stat;
+      try {
+          stat = fs.lstatSync(filename);
+      } catch (error) {
+          console.error("Error stating file: ", filename, error);
+          continue;
+      }
+      
+      if (stat.isDirectory()) {
+          const innerResults = findFilesByName(filename, filter); // Recursive call
+          results = results.concat(innerResults);
+      } else if (path.basename(filename) == filter) {
+          results.push(filename);
+      }
+  }
+  return results;
+};
+
+function processFiles(files) {
+  const results = {};
+
+  for (let filePath of files) {
+      const name = path.basename(path.dirname(filePath));                      // Directory of the file
+      const fileData = fs.readFileSync(filePath, 'utf8');                   // Read the file as a string
+
+      let data;
+      try {
+          data = yaml.load(fileData);  // Parse YAML
+      } catch (err) {
+          console.error(`Error parsing YAML for file ${filePath}:`, err);
+          continue;  // Skip this file if there's an error in parsing
+      }
+
+      let component = path.basename(path.dirname(path.dirname(filePath)));
+      try {
+        // if component is defined in metadata status, prefer to use that
+        component = data.status.class;
+      } catch(err) {
+      }
+
+      if (!results[component]) {
+          results[component] = {};
+      }
+
+      results[component][name] = {
+          path: filePath,
+          data
+      };
+  }
+
+  return results;
+}
+
+const processStatusResults = (results) => {
+  const filteredResults = {};
+
+  for (const component in results) {
+      for (const name in results[component]) {
+          const { path, data } = results[component][name];
+
+          if (data && data.status && data.status.stability) {
+              const { stability } = data.status;
+              const statuses = ['unmaintained', 'deprecated'];
+
+              for (const status of statuses) {
+                  if (stability[status] && stability[status].length > 0) {
+                      if (!filteredResults[status]) {
+                          filteredResults[status] = {};
+                      }
+                      filteredResults[status][name] = { path, stability: data.status.stability, component };
+                  }
+              }
+          }
+      }
+  }
+
+  return filteredResults;
+};
+
+async function processComponents() {
+  const results = findFilesByName(`.`, 'metadata.yaml');
+  const resultsClean = processFiles(results)
+  const resultsWithStability = processStatusResults(resultsClean)
+  return resultsWithStability
+
+}
+
+async function main({ github, context }) {
+  debug({msg: "running..."})
+  const octokit = github.rest;
+  const lookbackData = genLookbackDates();
+  const {issuesData, previousReport} = await processIssues({ octokit, context, lookbackData })
+  const componentData = await processComponents()
+
+  const report = generateReport({ issuesData, previousReport, componentData })
+
+  await createIssue({octokit, lookbackData, report, context});
+}
+
+module.exports = async ({ github, context }) => {
+  await main({ github, context })
+}
diff --git a/cmd/mdatagen/documentation.md b/cmd/mdatagen/documentation.md
index 4c649fea83202..b1ddc15ff4353 100644
--- a/cmd/mdatagen/documentation.md
+++ b/cmd/mdatagen/documentation.md
@@ -67,6 +67,21 @@ metrics:
 | string_attr | Attribute with any string value. | Any Str |
 | boolean_attr | Attribute with a boolean value. | Any Bool |
 
+### optional.metric.empty_unit
+
+[DEPRECATED] Gauge double metric disabled by default.
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+|  | Gauge | Double |
+
+#### Attributes
+
+| Name | Description | Values |
+| ---- | ----------- | ------ |
+| string_attr | Attribute with any string value. | Any Str |
+| boolean_attr | Attribute with a boolean value. | Any Bool |
+
 ## Resource Attributes
 
 | Name | Description | Values | Enabled |
diff --git a/cmd/mdatagen/internal/metadata/generated_config.go b/cmd/mdatagen/internal/metadata/generated_config.go
index 973c02069f4ac..6d1df6b6cb329 100644
--- a/cmd/mdatagen/internal/metadata/generated_config.go
+++ b/cmd/mdatagen/internal/metadata/generated_config.go
@@ -28,6 +28,7 @@ type MetricsConfig struct {
 	DefaultMetric            MetricConfig `mapstructure:"default.metric"`
 	DefaultMetricToBeRemoved MetricConfig `mapstructure:"default.metric.to_be_removed"`
 	OptionalMetric           MetricConfig `mapstructure:"optional.metric"`
+	OptionalMetricEmptyUnit  MetricConfig `mapstructure:"optional.metric.empty_unit"`
 }
 
 func DefaultMetricsConfig() MetricsConfig {
@@ -41,6 +42,9 @@ func DefaultMetricsConfig() MetricsConfig {
 		OptionalMetric: MetricConfig{
 			Enabled: false,
 		},
+		OptionalMetricEmptyUnit: MetricConfig{
+			Enabled: false,
+		},
 	}
 }
 
diff --git a/cmd/mdatagen/internal/metadata/generated_config_test.go b/cmd/mdatagen/internal/metadata/generated_config_test.go
index c00d9fcfb48d2..7a5c4c8efeac2 100644
--- a/cmd/mdatagen/internal/metadata/generated_config_test.go
+++ b/cmd/mdatagen/internal/metadata/generated_config_test.go
@@ -29,6 +29,7 @@ func TestMetricsBuilderConfig(t *testing.T) {
 					DefaultMetric:            MetricConfig{Enabled: true},
 					DefaultMetricToBeRemoved: MetricConfig{Enabled: true},
 					OptionalMetric:           MetricConfig{Enabled: true},
+					OptionalMetricEmptyUnit:  MetricConfig{Enabled: true},
 				},
 				ResourceAttributes: ResourceAttributesConfig{
 					MapResourceAttr:        ResourceAttributeConfig{Enabled: true},
@@ -46,6 +47,7 @@ func TestMetricsBuilderConfig(t *testing.T) {
 					DefaultMetric:            MetricConfig{Enabled: false},
 					DefaultMetricToBeRemoved: MetricConfig{Enabled: false},
 					OptionalMetric:           MetricConfig{Enabled: false},
+					OptionalMetricEmptyUnit:  MetricConfig{Enabled: false},
 				},
 				ResourceAttributes: ResourceAttributesConfig{
 					MapResourceAttr:        ResourceAttributeConfig{Enabled: false},
diff --git a/cmd/mdatagen/internal/metadata/generated_metrics.go b/cmd/mdatagen/internal/metadata/generated_metrics.go
index 3b17836621d5d..ef3861c971516 100644
--- a/cmd/mdatagen/internal/metadata/generated_metrics.go
+++ b/cmd/mdatagen/internal/metadata/generated_metrics.go
@@ -202,6 +202,58 @@ func newMetricOptionalMetric(cfg MetricConfig) metricOptionalMetric {
 	return m
 }
 
+type metricOptionalMetricEmptyUnit struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills optional.metric.empty_unit metric with initial data.
+func (m *metricOptionalMetricEmptyUnit) init() {
+	m.data.SetName("optional.metric.empty_unit")
+	m.data.SetDescription("[DEPRECATED] Gauge double metric disabled by default.")
+	m.data.SetUnit("")
+	m.data.SetEmptyGauge()
+	m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricOptionalMetricEmptyUnit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, stringAttrAttributeValue string, booleanAttrAttributeValue bool) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+	dp.Attributes().PutStr("string_attr", stringAttrAttributeValue)
+	dp.Attributes().PutBool("boolean_attr", booleanAttrAttributeValue)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricOptionalMetricEmptyUnit) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricOptionalMetricEmptyUnit) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricOptionalMetricEmptyUnit(cfg MetricConfig) metricOptionalMetricEmptyUnit {
+	m := metricOptionalMetricEmptyUnit{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
 // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
 // required to produce metric representation defined in metadata and user config.
 type MetricsBuilder struct {
@@ -213,6 +265,7 @@ type MetricsBuilder struct {
 	metricDefaultMetric            metricDefaultMetric
 	metricDefaultMetricToBeRemoved metricDefaultMetricToBeRemoved
 	metricOptionalMetric           metricOptionalMetric
+	metricOptionalMetricEmptyUnit  metricOptionalMetricEmptyUnit
 }
 
 // metricBuilderOption applies changes to default metrics builder.
@@ -235,6 +288,9 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting
 	if mbc.Metrics.OptionalMetric.enabledSetByUser {
 		settings.Logger.Warn("[WARNING] `optional.metric` should not be configured: This metric is deprecated and will be removed soon.")
 	}
+	if mbc.Metrics.OptionalMetricEmptyUnit.enabledSetByUser {
+		settings.Logger.Warn("[WARNING] `optional.metric.empty_unit` should not be configured: This metric is deprecated and will be removed soon.")
+	}
 	mb := &MetricsBuilder{
 		config:                         mbc,
 		startTime:                      pcommon.NewTimestampFromTime(time.Now()),
@@ -243,6 +299,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting
 		metricDefaultMetric:            newMetricDefaultMetric(mbc.Metrics.DefaultMetric),
 		metricDefaultMetricToBeRemoved: newMetricDefaultMetricToBeRemoved(mbc.Metrics.DefaultMetricToBeRemoved),
 		metricOptionalMetric:           newMetricOptionalMetric(mbc.Metrics.OptionalMetric),
+		metricOptionalMetricEmptyUnit:  newMetricOptionalMetricEmptyUnit(mbc.Metrics.OptionalMetricEmptyUnit),
 	}
 	for _, op := range options {
 		op(mb)
@@ -308,6 +365,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
 	mb.metricDefaultMetric.emit(ils.Metrics())
 	mb.metricDefaultMetricToBeRemoved.emit(ils.Metrics())
 	mb.metricOptionalMetric.emit(ils.Metrics())
+	mb.metricOptionalMetricEmptyUnit.emit(ils.Metrics())
 
 	for _, op := range rmo {
 		op(rm)
@@ -343,6 +401,11 @@ func (mb *MetricsBuilder) RecordOptionalMetricDataPoint(ts pcommon.Timestamp, va
 	mb.metricOptionalMetric.recordDataPoint(mb.startTime, ts, val, stringAttrAttributeValue, booleanAttrAttributeValue)
 }
 
+// RecordOptionalMetricEmptyUnitDataPoint adds a data point to optional.metric.empty_unit metric.
+func (mb *MetricsBuilder) RecordOptionalMetricEmptyUnitDataPoint(ts pcommon.Timestamp, val float64, stringAttrAttributeValue string, booleanAttrAttributeValue bool) {
+	mb.metricOptionalMetricEmptyUnit.recordDataPoint(mb.startTime, ts, val, stringAttrAttributeValue, booleanAttrAttributeValue)
+}
+
 // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
 // and metrics builder should update its startTime and reset it's internal state accordingly.
 func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
diff --git a/cmd/mdatagen/internal/metadata/generated_metrics_test.go b/cmd/mdatagen/internal/metadata/generated_metrics_test.go
index 3080bfb5ed88b..a092330ae5070 100644
--- a/cmd/mdatagen/internal/metadata/generated_metrics_test.go
+++ b/cmd/mdatagen/internal/metadata/generated_metrics_test.go
@@ -61,6 +61,10 @@ func TestMetricsBuilder(t *testing.T) {
 				assert.Equal(t, "[WARNING] `optional.metric` should not be configured: This metric is deprecated and will be removed soon.", observedLogs.All()[expectedWarnings].Message)
 				expectedWarnings++
 			}
+			if test.configSet == testSetAll || test.configSet == testSetNone {
+				assert.Equal(t, "[WARNING] `optional.metric.empty_unit` should not be configured: This metric is deprecated and will be removed soon.", observedLogs.All()[expectedWarnings].Message)
+				expectedWarnings++
+			}
 			assert.Equal(t, expectedWarnings, observedLogs.Len())
 
 			defaultMetricsCount := 0
@@ -77,6 +81,9 @@ func TestMetricsBuilder(t *testing.T) {
 			allMetricsCount++
 			mb.RecordOptionalMetricDataPoint(ts, 1, "string_attr-val", true)
 
+			allMetricsCount++
+			mb.RecordOptionalMetricEmptyUnitDataPoint(ts, 1, "string_attr-val", true)
+
 			rb := mb.NewResourceBuilder()
 			rb.SetMapResourceAttr(map[string]any{"key1": "map.resource.attr-val1", "key2": "map.resource.attr-val2"})
 			rb.SetOptionalResourceAttr("optional.resource.attr-val")
@@ -166,6 +173,24 @@ func TestMetricsBuilder(t *testing.T) {
 					attrVal, ok = dp.Attributes().Get("boolean_attr")
 					assert.True(t, ok)
 					assert.EqualValues(t, true, attrVal.Bool())
+				case "optional.metric.empty_unit":
+					assert.False(t, validatedMetrics["optional.metric.empty_unit"], "Found a duplicate in the metrics slice: optional.metric.empty_unit")
+					validatedMetrics["optional.metric.empty_unit"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "[DEPRECATED] Gauge double metric disabled by default.", ms.At(i).Description())
+					assert.Equal(t, "", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.Equal(t, float64(1), dp.DoubleValue())
+					attrVal, ok := dp.Attributes().Get("string_attr")
+					assert.True(t, ok)
+					assert.EqualValues(t, "string_attr-val", attrVal.Str())
+					attrVal, ok = dp.Attributes().Get("boolean_attr")
+					assert.True(t, ok)
+					assert.EqualValues(t, true, attrVal.Bool())
 				}
 			}
 		})
diff --git a/cmd/mdatagen/internal/metadata/testdata/config.yaml b/cmd/mdatagen/internal/metadata/testdata/config.yaml
index 1d61404b8f1fb..8fde806bad8fe 100644
--- a/cmd/mdatagen/internal/metadata/testdata/config.yaml
+++ b/cmd/mdatagen/internal/metadata/testdata/config.yaml
@@ -7,6 +7,8 @@ all_set:
       enabled: true
     optional.metric:
       enabled: true
+    optional.metric.empty_unit:
+      enabled: true
   resource_attributes:
     map.resource.attr:
       enabled: true
@@ -26,6 +28,8 @@ none_set:
       enabled: false
     optional.metric:
       enabled: false
+    optional.metric.empty_unit:
+      enabled: false
   resource_attributes:
     map.resource.attr:
       enabled: false
diff --git a/cmd/mdatagen/loader.go b/cmd/mdatagen/loader.go
index d01cb96675edf..24a10fe51f393 100644
--- a/cmd/mdatagen/loader.go
+++ b/cmd/mdatagen/loader.go
@@ -109,7 +109,7 @@ type metric struct {
 	ExtendedDocumentation string `mapstructure:"extended_documentation"`
 
 	// Unit of the metric.
-	Unit string `mapstructure:"unit"`
+	Unit *string `mapstructure:"unit"`
 
 	// Sum stores metadata for sum metric type
 	Sum *sum `mapstructure:"sum,omitempty"`
diff --git a/cmd/mdatagen/loader_test.go b/cmd/mdatagen/loader_test.go
index 93d9e515c9fed..2d8b277571570 100644
--- a/cmd/mdatagen/loader_test.go
+++ b/cmd/mdatagen/loader_test.go
@@ -131,7 +131,7 @@ func Test_loadMetadata(t *testing.T) {
 						Warnings: warnings{
 							IfEnabledNotSet: "This metric will be disabled by default soon.",
 						},
-						Unit: "s",
+						Unit: strPtr("s"),
 						Sum: &sum{
 							MetricValueType:        MetricValueType{pmetric.NumberDataPointValueTypeInt},
 							AggregationTemporality: AggregationTemporality{Aggregation: pmetric.AggregationTemporalityCumulative},
@@ -145,12 +145,25 @@ func Test_loadMetadata(t *testing.T) {
 						Warnings: warnings{
 							IfConfigured: "This metric is deprecated and will be removed soon.",
 						},
-						Unit: "1",
+						Unit: strPtr("1"),
 						Gauge: &gauge{
 							MetricValueType: MetricValueType{pmetric.NumberDataPointValueTypeDouble},
 						},
 						Attributes: []attributeName{"string_attr", "boolean_attr"},
 					},
+					"optional.metric.empty_unit": {
+						Enabled:     false,
+						Description: "[DEPRECATED] Gauge double metric disabled by default.",
+						Warnings: warnings{
+							IfConfigured: "This metric is deprecated and will be removed soon.",
+						},
+						Unit: strPtr(""),
+						Gauge: &gauge{
+							MetricValueType: MetricValueType{pmetric.NumberDataPointValueTypeDouble},
+						},
+						Attributes: []attributeName{"string_attr", "boolean_attr"},
+					},
+
 					"default.metric.to_be_removed": {
 						Enabled:               true,
 						Description:           "[DEPRECATED] Non-monotonic delta sum double metric enabled by default.",
@@ -158,7 +171,7 @@ func Test_loadMetadata(t *testing.T) {
 						Warnings: warnings{
 							IfEnabled: "This metric is deprecated and will be removed soon.",
 						},
-						Unit: "s",
+						Unit: strPtr("s"),
 						Sum: &sum{
 							MetricValueType:        MetricValueType{pmetric.NumberDataPointValueTypeDouble},
 							AggregationTemporality: AggregationTemporality{Aggregation: pmetric.AggregationTemporalityDelta},
@@ -228,3 +241,7 @@ func Test_loadMetadata(t *testing.T) {
 		})
 	}
 }
+
+func strPtr(s string) *string {
+	return &s
+}
diff --git a/cmd/mdatagen/metadata-sample.yaml b/cmd/mdatagen/metadata-sample.yaml
index 55b996739f9c1..79112733d7ff8 100644
--- a/cmd/mdatagen/metadata-sample.yaml
+++ b/cmd/mdatagen/metadata-sample.yaml
@@ -92,6 +92,16 @@ metrics:
     warnings:
       if_configured: This metric is deprecated and will be removed soon.
 
+  optional.metric.empty_unit:
+    enabled: false
+    description: "[DEPRECATED] Gauge double metric disabled by default."
+    unit: ""
+    gauge:
+      value_type: double
+    attributes: [string_attr, boolean_attr]
+    warnings:
+      if_configured: This metric is deprecated and will be removed soon.
+
   default.metric.to_be_removed:
     enabled: true
     description: "[DEPRECATED] Non-monotonic delta sum double metric enabled by default."
diff --git a/cmd/mdatagen/validate.go b/cmd/mdatagen/validate.go
index 75bb09271e925..e1e7a8ec121f5 100644
--- a/cmd/mdatagen/validate.go
+++ b/cmd/mdatagen/validate.go
@@ -152,7 +152,7 @@ func (m *metric) validate() error {
 	if m.Description == "" {
 		errs = multierr.Append(errs, errors.New(`missing metric description`))
 	}
-	if m.Unit == "" {
+	if m.Unit == nil {
 		errs = multierr.Append(errs, errors.New(`missing metric unit`))
 	}
 	if m.Sum != nil {
diff --git a/examples/kubernetes/otel-collector-config.yml b/examples/kubernetes/otel-collector-config.yml
index 28b2adc07f97b..bca7592f5eb57 100644
--- a/examples/kubernetes/otel-collector-config.yml
+++ b/examples/kubernetes/otel-collector-config.yml
@@ -5,7 +5,7 @@ receivers:
     exclude:
       # Exclude logs from all containers named otel-collector
       - /var/log/pods/*/otel-collector/*.log
-    start_at: beginning
+    start_at: end
     include_file_path: true
     include_file_name: false
     operators:
diff --git a/examples/kubernetes/otel-collector.yaml b/examples/kubernetes/otel-collector.yaml
index 1de279cbf5ce6..528a327c70409 100644
--- a/examples/kubernetes/otel-collector.yaml
+++ b/examples/kubernetes/otel-collector.yaml
@@ -12,7 +12,7 @@ data:
         exclude:
           # Exclude logs from all containers named otel-collector
           - /var/log/pods/*/otel-collector/*.log
-        start_at: beginning
+        start_at: end
         include_file_path: true
         include_file_name: false
         operators:
diff --git a/exporter/kineticaexporter/README.md b/exporter/kineticaexporter/README.md
index dd39979687253..7ffd005803fe5 100644
--- a/exporter/kineticaexporter/README.md
+++ b/exporter/kineticaexporter/README.md
@@ -4,6 +4,7 @@
 | Stability     | [development]: metrics, traces, logs   |
 | Distributions | [] |
 | Issues        | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fkinetica%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fkinetica) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fkinetica%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fkinetica) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner)    | [@am-kinetica](https://www.github.com/am-kinetica), [@TylerHelmuth](https://www.github.com/TylerHelmuth) |
 
 [development]: https://github.com/open-telemetry/opentelemetry-collector#development
 
diff --git a/exporter/kineticaexporter/metadata.yaml b/exporter/kineticaexporter/metadata.yaml
index 73e544576c441..63137132f7e3e 100644
--- a/exporter/kineticaexporter/metadata.yaml
+++ b/exporter/kineticaexporter/metadata.yaml
@@ -5,3 +5,5 @@ status:
   stability:
     development: [metrics, traces, logs]
   distributions: []
+  codeowners:
+    active: [am-kinetica, TylerHelmuth]
diff --git a/extension/encoding/jaegerencodingextension/extension.go b/extension/encoding/jaegerencodingextension/extension.go
index 8479660a5a85b..8fb2466fd89fb 100644
--- a/extension/encoding/jaegerencodingextension/extension.go
+++ b/extension/encoding/jaegerencodingextension/extension.go
@@ -27,7 +27,7 @@ func (e *jaegerExtension) Start(_ context.Context, _ component.Host) error {
 	case JaegerProtocolProtobuf:
 		e.unmarshaler = jaegerProtobufTrace{}
 	default:
-		return fmt.Errorf("unsupported protocol: %s", e.config.Protocol)
+		return fmt.Errorf("unsupported protocol: %q", e.config.Protocol)
 	}
 	return nil
 }
diff --git a/pkg/stanza/docs/operators/json_parser.md b/pkg/stanza/docs/operators/json_parser.md
index 0043a2c4996a8..f173c337ed15e 100644
--- a/pkg/stanza/docs/operators/json_parser.md
+++ b/pkg/stanza/docs/operators/json_parser.md
@@ -108,7 +108,7 @@ Configuration:
 Configuration:
 ```yaml
 - type: json_parser
-  if: '$matches "^{.*}$"'
+  if: 'body matches "^{.*}$"'
 ```
 
 
diff --git a/pkg/stanza/docs/operators/noop.md b/pkg/stanza/docs/operators/noop.md
index 40c1e9bd78fa7..1714f6a5becae 100644
--- a/pkg/stanza/docs/operators/noop.md
+++ b/pkg/stanza/docs/operators/noop.md
@@ -1,6 +1,6 @@
 ## `noop` operator
 
-The `noop` operator makes no changes to a entry. It is sometimes useful as a terminal node in [non-linear pipelines](../types/pipeline.md#non-linear-pipelines).
+The `noop` operator makes no changes to a entry. It is sometimes useful as a terminal node in [non-linear pipelines](../types/operators.md#non-linear-sequences).
 
 ### Configuration Fields
 
diff --git a/pkg/stanza/docs/types/operators.md b/pkg/stanza/docs/types/operators.md
new file mode 100644
index 0000000000000..a86795bea18f0
--- /dev/null
+++ b/pkg/stanza/docs/types/operators.md
@@ -0,0 +1,130 @@
+# Operator Sequences
+
+An operator sequence is made up of [operators](../operators/README.md) and defines how logs should be parsed and filtered before being emitted from a receiver.
+
+## Linear Sequences
+
+In a linear sequence of operators, logs flow from one operator to the next according to the order in which they are defined.
+
+For example, the following sequence reads logs from a file, parses them as `json`, removes a particular attribute, and adds another.
+
+```yaml
+receivers:
+  filelog:
+    include: my-log.json
+    operators:
+      - type: json_parser
+      - type: remove
+        field: attributes.foo
+      - type: add
+        key: attributes.bar
+        value: baz
+```
+
+Notice that every operator has a `type` field. The `type` of operator must always be specified.
+
+## `id` and `output`
+
+Linear sequences are sufficient for many use cases, but it is also possible to define non-linear sequences as well. In order to use non-linear sequences, the `id` and `output` fields must be understood. Let's take a close look at these.
+
+Each operator has a unique `id`. By default, `id` will take the same value as `type`. Alternately, you can specify a custom `id` for any operator.
+
+All operators support an `output` field which refers to the `id` of another operator. By default, the output field takes the value of the next operator's `id`.
+
+_The final operator in a sequence automatically emits logs from the receiver._
+
+Let's look at how these default values work together by considering the linear sequence shown above. The following pipeline would be exactly the same (although more verbosely defined):
+
+```yaml
+receivers:
+  filelog:
+    include: my-log.json
+    operators:
+      - type: json_parser
+        id: json_parser
+        output: remove
+      - type: remove
+        id: remove
+        field: attributes.foo
+        output: add
+      - type: add
+        id: add
+        key: attributes.bar
+        value: baz
+        # the last operator automatically outputs from the receiver
+```
+
+Additionally, we could accomplish the same task using custom `id`'s.
+
+```yaml
+receivers:
+  filelog:
+    include: my-log.json
+    operators:
+      - type: json_parser
+        id: my_json_parser
+        output: my_remove
+      - type: remove
+        id: my_remove
+        field: attributes.foo
+        output: my_add
+      - type: add
+        id: my_add
+        key: attributes.bar
+        value: baz
+        # the last operator automatically outputs from the receiver
+```
+
+## Non-Linear Sequences
+
+Now that we understand how `id` and `output` work together, we can configure more complex sequences. Technically, we are only limited in that the relationship between operators must be a [directed, acyclic, graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph).
+
+Here's a scenario where we read from a file that contains logs with two differnet formats which must be parsed differently:
+
+```yaml
+receivers:
+  filelog:
+    include: my-log.json
+    operators:
+      - type: router
+        routes:
+          - expr: 'body matches "^{.*}$"'
+            output: json_parser
+          - expr: 'body startsWith "ERROR"'
+            output: error_parser
+      - type: json_parser
+        output: remove # send from here directly to the 'remove' operator
+      - type: regex_parser
+        id: error_parser
+        regex: ... # regex appropriate to parsing error logs
+      - type: remove
+        field: attributes.foo
+      - type: add
+        key: attributes.bar
+        value: baz
+```
+
+### Emitting from a reciever
+
+By default, the last operator in a sequence will emit logs from the receiver.
+
+However, in some non-linear sequences, you may not want all logs to flow through the last operator. In such cases, you can add a `noop` operator to the end of the sequence which will have no effect on the logs, but _will_ emit them from the receiver.
+
+```yaml
+receivers:
+  filelog:
+    include: my-log.json
+    operators:
+      - type: router
+        routes:
+          - expr: 'body matches "^{.*}$"'
+            output: json_parser
+          - expr: 'body startsWith "ERROR"'
+            output: error_parser
+      - type: json_parser
+        output: noop # send from here directly to the end of the sequence
+      - type: regex_parser
+        id: error_parser
+        regex: ... # regex appropriate to parsing error logs
+      - type: noop
+```
diff --git a/pkg/stanza/docs/types/pipeline.md b/pkg/stanza/docs/types/pipeline.md
deleted file mode 100644
index 11e7891a0e4d2..0000000000000
--- a/pkg/stanza/docs/types/pipeline.md
+++ /dev/null
@@ -1,166 +0,0 @@
-# Pipeline
-
-A pipeline is made up of [operators](../operators/README.md). The pipeline defines how stanza should input, process, and output logs.
-
-
-## Linear Pipelines
-
-Many stanza pipelines are a linear sequence of operators. Logs flow from one operator to the next, according to the order in which they are defined.
-
-For example, the following pipeline will read logs from a file, parse them as `json`, and print them to `stdout`:
-```yaml
-pipeline:
-  - type: file_input
-    include:
-      - my-log.json
-  - type: json_parser
-  - type: stdout
-```
-
-Notice that every operator has a `type` field. The `type` of operator must always be specified.
-
-
-## `id` and `output`
-
-Linear pipelines are sufficient for many use cases, but stanza is also capabile of processing non-linear pipelines as well. In order to use non-linear pipelines, the `id` and `output` fields must be understood. Let's take a close look at these.
-
-Each operator in a pipeline has a unique `id`. By default, `id` will take the same value as `type`. Alternately, you can specify an `id` for any operator. If your pipeline contains multiple operators of the same `type`, then the `id` field must be used.
-
-All operators (except output operators) support an `output` field. By default, the output field takes the value of the next operator's `id`.
-
-Let's look at how these default values work together by considering the linear pipeline shown above. The following pipeline would be exactly the same (although much more verbosely defined):
-
-```yaml
-pipeline:
-  - type: file_input
-    id: file_input
-    include:
-      - my-log.json
-    output: json_parser
-  - type: json_parser
-    id: json_parser
-    output: stdout
-  - type: stdout
-    id: stdout
-```
-
-Additionally, we could accomplish the same task using custom `id`'s.
-
-```yaml
-pipeline:
-  - type: file_input
-    id: my_file
-    include:
-      - my-log.json
-    output: my_parser
-  - type: json_parser
-    id: my_parser
-    output: my_out
-  - type: stdout
-    id: my_out
-```
-
-We could even shuffle the order of operators, so long as we're explicitly declaring each output. This is a little counterintuitive, so it isn't recommended. However, it is shown here to highlight the fact that operators in a pipeline are ultimately connected via `output`'s and `id`'s.
-
-```yaml
-pipeline:
-  - type: stdout      # 3rd operator
-    id: my_out
-  - type: json_parser # 2nd operator
-    id: my_parser
-    output: my_out
-  - type: file_input  # 1st operator
-    id: my_file
-    include:
-      - my-log.json
-    output: my_parser
-```
-
-Finally, we could even remove some of the `id`'s and `output`'s, and depend on the default values. This is even less readable, so again would not be recommended. However, it is provided here to demonstrate that default values can be depended upon.
-
-```yaml
-pipeline:
-  - type: json_parser # 2nd operator
-  - type: stdout      # 3rd operator
-  - type: file_input  # 1st operator
-    include:
-      - my-log.json
-    output: json_parser
-```
-
-## Non-Linear Pipelines
-
-Now that we understand how `id` and `output` work together, we can configure stanza to run more complex pipelines. Technically, the structure of a stanza pipeline is limited only in that it must be a [directed, acyclic, graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph).
-
-Let's consider a pipeline with two inputs and one output:
-```yaml
-pipeline:
-  - type: file_input
-    include:
-      - my-log.json
-    output: stdout # flow directly to stdout
-
-  - type: windows_eventlog_input
-    channel: security
-    # implicitly flow to stdout
-
-  - type: stdout
-```
-
-Here's another, where we read from two files that should be parsed differently:
-```yaml
-pipeline:
-  # Read and parse a JSON file
-  - type: file_input
-    id: file_input_one
-    include:
-      - my-log.json
-  - type: json_parser
-    output: stdout # flow directly to stdout
-
-  # Read and parse a text file
-  - type: file_input
-    id: file_input_two
-    include:
-      - my-other-log.txt
-  - type: regex_parser
-    regex: ... # regex appropriate to file format
-    # implicitly flow to stdout
-
-  # Print
-  - type: stdout
-```
-
-Finally, in some cases, you might expect multiple log formats to come from a single input. This solution uses the [router](../operators/router.md) operator. The `router` operator allows one to define multiple "routes", each of which has an `output`.
-
-
-```yaml
-pipeline:
-  # Read log file
-  - type: file_input
-    include:
-      - my-log.txt
-
-  # Route based on log type
-  - type: router
-    routes:
-      - expr: 'body startsWith "ERROR"'
-        output: error_parser
-      - expr: 'body startsWith "INFO"'
-        output: info_parser
-
-  # Parse logs with format one
-  - type: regex_parser
-    id: error_parser
-    regex: ... # regex appropriate to parsing error logs
-    output: stdout # flow directly to stdout
-
-  # Parse logs with format two
-  - type: regex_parser
-    id: info_parser
-    regex: ... # regex appropriate to parsing info logs
-    output: stdout # flow directly to stdout
-
-  # Print
-  - type: stdout
-```
\ No newline at end of file
diff --git a/pkg/stanza/fileconsumer/internal/splitter/factory_test.go b/pkg/stanza/fileconsumer/internal/splitter/factory_test.go
index 91f994b1f49dc..6ca822c7f1a0d 100644
--- a/pkg/stanza/fileconsumer/internal/splitter/factory_test.go
+++ b/pkg/stanza/fileconsumer/internal/splitter/factory_test.go
@@ -8,115 +8,75 @@ import (
 	"testing"
 	"time"
 
-	"github.com/stretchr/testify/assert"
-
+	"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/split/splittest"
 	"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim"
 )
 
-func TestFactory(t *testing.T) {
-	factory := NewFactory(bufio.ScanLines, trim.Nop, 0)
-	splitFunc := factory.SplitFunc()
-	assert.NotNil(t, splitFunc)
-
-	input := []byte(" hello \n world \n extra ")
-
-	advance, token, err := splitFunc(input, false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte(" hello "), token)
-
-	advance, token, err = splitFunc(input[8:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte(" world "), token)
-
-	advance, token, err = splitFunc(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 0, advance)
-	assert.Nil(t, token)
-}
-
-func TestCustomWithTrim(t *testing.T) {
-	factory := NewFactory(bufio.ScanLines, trim.Whitespace, 0)
-	splitFunc := factory.SplitFunc()
-	assert.NotNil(t, splitFunc)
-
-	input := []byte(" hello \n world \n extra ")
-
-	advance, token, err := splitFunc(input, false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte("hello"), token)
-
-	advance, token, err = splitFunc(input[8:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte("world"), token)
-
-	advance, token, err = splitFunc(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 0, advance)
-	assert.Nil(t, token)
-}
-
-func TestCustomWithFlush(t *testing.T) {
-	flushPeriod := 100 * time.Millisecond
-	factory := NewFactory(bufio.ScanLines, trim.Nop, flushPeriod)
-	splitFunc := factory.SplitFunc()
-	assert.NotNil(t, splitFunc)
-
-	input := []byte(" hello \n world \n extra ")
-
-	advance, token, err := splitFunc(input, false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte(" hello "), token)
-
-	advance, token, err = splitFunc(input[8:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte(" world "), token)
-
-	advance, token, err = splitFunc(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 0, advance)
-	assert.Nil(t, token)
-
-	time.Sleep(2 * flushPeriod)
-
-	advance, token, err = splitFunc(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 7, advance)
-	assert.Equal(t, []byte(" extra "), token)
-}
-
-func TestCustomWithFlushTrim(t *testing.T) {
-	flushPeriod := 100 * time.Millisecond
-	factory := NewFactory(bufio.ScanLines, trim.Whitespace, flushPeriod)
-	splitFunc := factory.SplitFunc()
-	assert.NotNil(t, splitFunc)
-
-	input := []byte(" hello \n world \n extra ")
-
-	advance, token, err := splitFunc(input, false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte("hello"), token)
-
-	advance, token, err = splitFunc(input[8:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte("world"), token)
-
-	advance, token, err = splitFunc(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 0, advance)
-	assert.Nil(t, token)
-
-	time.Sleep(2 * flushPeriod)
-
-	advance, token, err = splitFunc(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 7, advance)
-	assert.Equal(t, []byte("extra"), token) // Ensure trim applies to flushed token
+func TestFactorySplitFunc(t *testing.T) {
+	testCases := []struct {
+		name        string
+		baseFunc    bufio.SplitFunc
+		trimFunc    trim.Func
+		flushPeriod time.Duration
+		input       []byte
+		steps       []splittest.Step
+	}{
+		{
+			name:        "ScanLinesStrict",
+			input:       []byte(" hello \n world \n extra "),
+			baseFunc:    splittest.ScanLinesStrict,
+			trimFunc:    trim.Nop,
+			flushPeriod: 0,
+			steps: []splittest.Step{
+				splittest.ExpectAdvanceToken(len(" hello \n"), " hello "),
+				splittest.ExpectAdvanceToken(len(" world \n"), " world "),
+			},
+		},
+		{
+			name:        "ScanLinesStrictWithTrim",
+			input:       []byte(" hello \n world \n extra "),
+			baseFunc:    splittest.ScanLinesStrict,
+			trimFunc:    trim.Whitespace,
+			flushPeriod: 0,
+			steps: []splittest.Step{
+				splittest.ExpectAdvanceToken(len(" hello \n"), "hello"),
+				splittest.ExpectAdvanceToken(len(" world \n"), "world"),
+			},
+		},
+		{
+			name:        "ScanLinesStrictWithFlush",
+			input:       []byte(" hello \n world \n extra "),
+			baseFunc:    splittest.ScanLinesStrict,
+			trimFunc:    trim.Nop,
+			flushPeriod: 100 * time.Millisecond,
+			steps: []splittest.Step{
+				splittest.ExpectAdvanceToken(len(" hello \n"), " hello "),
+				splittest.ExpectAdvanceToken(len(" world \n"), " world "),
+				splittest.ExpectReadMore(),
+				splittest.Eventually(
+					splittest.ExpectAdvanceToken(len(" extra "), " extra "), 200*time.Millisecond, 10*time.Millisecond,
+				),
+			},
+		},
+		{
+			name:        "ScanLinesStrictWithTrimAndFlush",
+			input:       []byte(" hello \n world \n extra "),
+			baseFunc:    splittest.ScanLinesStrict,
+			trimFunc:    trim.Whitespace,
+			flushPeriod: 100 * time.Millisecond,
+			steps: []splittest.Step{
+				splittest.ExpectAdvanceToken(len(" hello \n"), "hello"),
+				splittest.ExpectAdvanceToken(len(" world \n"), "world"),
+				splittest.ExpectReadMore(),
+				splittest.Eventually(
+					splittest.ExpectAdvanceToken(len(" extra "), "extra"), 200*time.Millisecond, 10*time.Millisecond,
+				),
+			},
+		},
+	}
+
+	for _, tc := range testCases {
+		factory := NewFactory(tc.baseFunc, tc.trimFunc, tc.flushPeriod)
+		t.Run(tc.name, splittest.New(factory.SplitFunc(), tc.input, tc.steps...))
+	}
 }
diff --git a/pkg/stanza/flush/flush_test.go b/pkg/stanza/flush/flush_test.go
index b40c829e93924..97bcbb2930d0d 100644
--- a/pkg/stanza/flush/flush_test.go
+++ b/pkg/stanza/flush/flush_test.go
@@ -22,7 +22,7 @@ func TestNewlineSplitFunc(t *testing.T) {
 		{
 			name:     "FlushNoPeriod",
 			input:    []byte("complete line\nincomplete"),
-			baseFunc: scanLinesStrict,
+			baseFunc: splittest.ScanLinesStrict,
 			steps: []splittest.Step{
 				splittest.ExpectAdvanceToken(len("complete line\n"), "complete line"),
 			},
@@ -30,7 +30,7 @@ func TestNewlineSplitFunc(t *testing.T) {
 		{
 			name:        "FlushIncompleteLineAfterPeriod",
 			input:       []byte("complete line\nincomplete"),
-			baseFunc:    scanLinesStrict,
+			baseFunc:    splittest.ScanLinesStrict,
 			flushPeriod: 100 * time.Millisecond,
 			steps: []splittest.Step{
 				splittest.ExpectAdvanceToken(len("complete line\n"), "complete line"),
@@ -45,11 +45,3 @@ func TestNewlineSplitFunc(t *testing.T) {
 		t.Run(tc.name, splittest.New(splitFunc, tc.input, tc.steps...))
 	}
 }
-
-func scanLinesStrict(data []byte, atEOF bool) (advance int, token []byte, err error) {
-	advance, token, err = bufio.ScanLines(data, atEOF)
-	if advance == len(token) {
-		return 0, nil, nil
-	}
-	return
-}
diff --git a/pkg/stanza/split/splittest/splittest_detailed.go b/pkg/stanza/split/splittest/splittest_detailed.go
index 07960bd253f61..117a6c79cb9d9 100644
--- a/pkg/stanza/split/splittest/splittest_detailed.go
+++ b/pkg/stanza/split/splittest/splittest_detailed.go
@@ -109,3 +109,12 @@ func New(splitFunc bufio.SplitFunc, input []byte, steps ...Step) func(*testing.T
 func needMoreData(advance int, token []byte, err error) bool {
 	return advance == 0 && token == nil && err == nil
 }
+
+// ScanLinesStrict behaves like bufio.ScanLines except EOF is not considered a line ending.
+func ScanLinesStrict(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	advance, token, err = bufio.ScanLines(data, atEOF)
+	if advance == len(token) {
+		return 0, nil, nil
+	}
+	return
+}
diff --git a/pkg/stanza/split/splittest/splittest_test.go b/pkg/stanza/split/splittest/splittest_test.go
index 0d3854201f44d..1fe2688bd3789 100644
--- a/pkg/stanza/split/splittest/splittest_test.go
+++ b/pkg/stanza/split/splittest/splittest_test.go
@@ -96,7 +96,7 @@ func TestNew(t *testing.T) {
 		},
 		{
 			name:      "ScanLinesNoEOF",
-			splitFunc: scanLinesStrict,
+			splitFunc: ScanLinesStrict,
 			input:     []byte("foo bar.\nhello world!\nincomplete line"),
 			steps: []Step{
 				ExpectAdvanceToken(len("foo bar.\n"), "foo bar."),
@@ -130,14 +130,6 @@ func TestNew(t *testing.T) {
 	}
 }
 
-func scanLinesStrict(data []byte, atEOF bool) (advance int, token []byte, err error) {
-	advance, token, err = bufio.ScanLines(data, atEOF)
-	if advance == len(token) {
-		return 0, nil, nil
-	}
-	return
-}
-
 func scanLinesError(data []byte, atEOF bool) (advance int, token []byte, err error) {
 	advance, token, err = bufio.ScanLines(data, atEOF)
 	if strings.Contains(string(token), "error") {
@@ -149,7 +141,7 @@ func scanLinesError(data []byte, atEOF bool) (advance int, token []byte, err err
 func scanLinesStrictWithFlush(flushPeriod time.Duration) bufio.SplitFunc {
 	now := time.Now()
 	return func(data []byte, atEOF bool) (advance int, token []byte, err error) {
-		advance, token, err = scanLinesStrict(data, atEOF)
+		advance, token, err = ScanLinesStrict(data, atEOF)
 		if advance > 0 || token != nil || err != nil {
 			return
 		}
diff --git a/pkg/stanza/trim/trim_test.go b/pkg/stanza/trim/trim_test.go
index 5db6cf44f77f7..469af2718ee16 100644
--- a/pkg/stanza/trim/trim_test.go
+++ b/pkg/stanza/trim/trim_test.go
@@ -8,6 +8,8 @@ import (
 	"testing"
 
 	"github.com/stretchr/testify/assert"
+
+	"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/split/splittest"
 )
 
 func TestTrim(t *testing.T) {
@@ -75,49 +77,40 @@ func TestTrim(t *testing.T) {
 }
 
 func TestWithFunc(t *testing.T) {
-	scanAndTrimLines := WithFunc(bufio.ScanLines, Config{
-		PreserveLeading:  false,
-		PreserveTrailing: false,
-	}.Func())
-
-	input := []byte(" hello \n world \n extra ")
-
-	advance, token, err := scanAndTrimLines(input, false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte("hello"), token)
-
-	advance, token, err = scanAndTrimLines(input[8:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte("world"), token)
-
-	advance, token, err = scanAndTrimLines(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 0, advance)
-	assert.Nil(t, token)
-}
-
-func TestWithNilTrimFunc(t *testing.T) {
-	// Same test as above, but pass nil instead of a trim func
-	// In other words, we should expect exactly the behavior of bufio.ScanLines.
-
-	scanLines := WithFunc(bufio.ScanLines, nil)
-
-	input := []byte(" hello \n world \n extra ")
-
-	advance, token, err := scanLines(input, false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte(" hello "), token)
-
-	advance, token, err = scanLines(input[8:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 8, advance)
-	assert.Equal(t, []byte(" world "), token)
+	testCases := []struct {
+		name     string
+		baseFunc bufio.SplitFunc
+		trimFunc Func
+		input    []byte
+		steps    []splittest.Step
+	}{
+		{
+			// nil trim func should return original split func
+			name:     "NilTrimFunc",
+			input:    []byte(" hello \n world \n extra "),
+			baseFunc: bufio.ScanLines,
+			trimFunc: nil,
+			steps: []splittest.Step{
+				splittest.ExpectAdvanceToken(len(" hello \n"), " hello "),
+				splittest.ExpectAdvanceToken(len(" world \n"), " world "),
+				splittest.ExpectAdvanceToken(len(" extra "), " extra "),
+			},
+		},
+		{
+			name:     "ScanLinesStrictWithTrim",
+			input:    []byte(" hello \n world \n extra "),
+			baseFunc: bufio.ScanLines,
+			trimFunc: Whitespace,
+			steps: []splittest.Step{
+				splittest.ExpectAdvanceToken(len(" hello \n"), "hello"),
+				splittest.ExpectAdvanceToken(len(" world \n"), "world"),
+				splittest.ExpectAdvanceToken(len(" extra "), "extra"),
+			},
+		},
+	}
 
-	advance, token, err = scanLines(input[16:], false)
-	assert.NoError(t, err)
-	assert.Equal(t, 0, advance)
-	assert.Nil(t, token)
+	for _, tc := range testCases {
+		splitFunc := WithFunc(tc.baseFunc, tc.trimFunc)
+		t.Run(tc.name, splittest.New(splitFunc, tc.input, tc.steps...))
+	}
 }
diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md
index 97f5c83f90e45..bd2aede3283fa 100644
--- a/receiver/kubeletstatsreceiver/documentation.md
+++ b/receiver/kubeletstatsreceiver/documentation.md
@@ -394,6 +394,22 @@ The time since the container started
 | ---- | ----------- | ---------- | ----------------------- | --------- |
 | s | Sum | Int | Cumulative | true |
 
+### k8s.container.memory_limit_utilization
+
+Container memory utilization as a ratio of the container's limits
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| 1 | Gauge | Double |
+
+### k8s.container.memory_request_utilization
+
+Container memory utilization as a ratio of the container's requests
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| 1 | Gauge | Double |
+
 ### k8s.node.uptime
 
 The time since the node started
@@ -402,6 +418,22 @@ The time since the node started
 | ---- | ----------- | ---------- | ----------------------- | --------- |
 | s | Sum | Int | Cumulative | true |
 
+### k8s.pod.memory_limit_utilization
+
+Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted.
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| 1 | Gauge | Double |
+
+### k8s.pod.memory_request_utilization
+
+Pod memory utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted.
+
+| Unit | Metric Type | Value Type |
+| ---- | ----------- | ---------- |
+| 1 | Gauge | Double |
+
 ### k8s.pod.uptime
 
 The time since the pod started
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
index 858b969900ccf..53d206b737104 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go
@@ -57,7 +57,7 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) {
 	currentTime := pcommon.NewTimestampFromTime(a.time)
 	addUptimeMetric(a.mbs.NodeMetricsBuilder, metadata.NodeUptimeMetrics.Uptime, s.StartTime, currentTime)
 	addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime)
-	addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime)
+	addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime, resources{})
 	addFilesystemMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeFilesystemMetrics, s.Fs, currentTime)
 	addNetworkMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeNetworkMetrics, s.Network, currentTime)
 	// todo s.Runtime.ImageFs
@@ -77,7 +77,7 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {
 	currentTime := pcommon.NewTimestampFromTime(a.time)
 	addUptimeMetric(a.mbs.PodMetricsBuilder, metadata.PodUptimeMetrics.Uptime, s.StartTime, currentTime)
 	addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime)
-	addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime)
+	addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime, a.metadata.podResources[s.PodRef.UID])
 	addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
 	addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime)
 
@@ -110,7 +110,7 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont
 	currentTime := pcommon.NewTimestampFromTime(a.time)
 	addUptimeMetric(a.mbs.ContainerMetricsBuilder, metadata.ContainerUptimeMetrics.Uptime, s.StartTime, currentTime)
 	addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime)
-	addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime)
+	addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime, a.metadata.containerResources[sPod.PodRef.UID+s.Name])
 	addFilesystemMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime)
 
 	a.m = append(a.m, a.mbs.ContainerMetricsBuilder.Emit(
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
index 5068e0f23ffa3..fd18bfd270966 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go
@@ -44,12 +44,10 @@ func TestMetadataErrorCases(t *testing.T) {
 						ObjectMeta: metav1.ObjectMeta{
 							UID: "pod-uid-123",
 						},
-						Status: v1.PodStatus{
-							ContainerStatuses: []v1.ContainerStatus{
+						Spec: v1.PodSpec{
+							Containers: []v1.Container{
 								{
-									// different container name
-									Name:        "container2",
-									ContainerID: "test-container",
+									Name: "container2",
 								},
 							},
 						},
diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
index 977f21b8bb8f0..85713e5234c06 100644
--- a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
+++ b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go
@@ -10,7 +10,7 @@ import (
 	"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
 )
 
-func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp) {
+func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp, r resources) {
 	if s == nil {
 		return
 	}
@@ -21,4 +21,13 @@ func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.Memory
 	recordIntDataPoint(mb, memoryMetrics.WorkingSet, s.WorkingSetBytes, currentTime)
 	recordIntDataPoint(mb, memoryMetrics.PageFaults, s.PageFaults, currentTime)
 	recordIntDataPoint(mb, memoryMetrics.MajorPageFaults, s.MajorPageFaults, currentTime)
+
+	if s.UsageBytes != nil {
+		if r.memoryLimit > 0 {
+			memoryMetrics.LimitUtilization(mb, currentTime, float64(*s.UsageBytes)/float64(r.memoryLimit))
+		}
+		if r.memoryRequest > 0 {
+			memoryMetrics.RequestUtilization(mb, currentTime, float64(*s.UsageBytes)/float64(r.memoryRequest))
+		}
+	}
 }
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go
index 5ee2e99b88ce5..f8c68b4cee49f 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go
@@ -25,51 +25,55 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error {
 
 // MetricsConfig provides config for kubeletstats metrics.
 type MetricsConfig struct {
-	ContainerCPUTime               MetricConfig `mapstructure:"container.cpu.time"`
-	ContainerCPUUtilization        MetricConfig `mapstructure:"container.cpu.utilization"`
-	ContainerFilesystemAvailable   MetricConfig `mapstructure:"container.filesystem.available"`
-	ContainerFilesystemCapacity    MetricConfig `mapstructure:"container.filesystem.capacity"`
-	ContainerFilesystemUsage       MetricConfig `mapstructure:"container.filesystem.usage"`
-	ContainerMemoryAvailable       MetricConfig `mapstructure:"container.memory.available"`
-	ContainerMemoryMajorPageFaults MetricConfig `mapstructure:"container.memory.major_page_faults"`
-	ContainerMemoryPageFaults      MetricConfig `mapstructure:"container.memory.page_faults"`
-	ContainerMemoryRss             MetricConfig `mapstructure:"container.memory.rss"`
-	ContainerMemoryUsage           MetricConfig `mapstructure:"container.memory.usage"`
-	ContainerMemoryWorkingSet      MetricConfig `mapstructure:"container.memory.working_set"`
-	ContainerUptime                MetricConfig `mapstructure:"container.uptime"`
-	K8sNodeCPUTime                 MetricConfig `mapstructure:"k8s.node.cpu.time"`
-	K8sNodeCPUUtilization          MetricConfig `mapstructure:"k8s.node.cpu.utilization"`
-	K8sNodeFilesystemAvailable     MetricConfig `mapstructure:"k8s.node.filesystem.available"`
-	K8sNodeFilesystemCapacity      MetricConfig `mapstructure:"k8s.node.filesystem.capacity"`
-	K8sNodeFilesystemUsage         MetricConfig `mapstructure:"k8s.node.filesystem.usage"`
-	K8sNodeMemoryAvailable         MetricConfig `mapstructure:"k8s.node.memory.available"`
-	K8sNodeMemoryMajorPageFaults   MetricConfig `mapstructure:"k8s.node.memory.major_page_faults"`
-	K8sNodeMemoryPageFaults        MetricConfig `mapstructure:"k8s.node.memory.page_faults"`
-	K8sNodeMemoryRss               MetricConfig `mapstructure:"k8s.node.memory.rss"`
-	K8sNodeMemoryUsage             MetricConfig `mapstructure:"k8s.node.memory.usage"`
-	K8sNodeMemoryWorkingSet        MetricConfig `mapstructure:"k8s.node.memory.working_set"`
-	K8sNodeNetworkErrors           MetricConfig `mapstructure:"k8s.node.network.errors"`
-	K8sNodeNetworkIo               MetricConfig `mapstructure:"k8s.node.network.io"`
-	K8sNodeUptime                  MetricConfig `mapstructure:"k8s.node.uptime"`
-	K8sPodCPUTime                  MetricConfig `mapstructure:"k8s.pod.cpu.time"`
-	K8sPodCPUUtilization           MetricConfig `mapstructure:"k8s.pod.cpu.utilization"`
-	K8sPodFilesystemAvailable      MetricConfig `mapstructure:"k8s.pod.filesystem.available"`
-	K8sPodFilesystemCapacity       MetricConfig `mapstructure:"k8s.pod.filesystem.capacity"`
-	K8sPodFilesystemUsage          MetricConfig `mapstructure:"k8s.pod.filesystem.usage"`
-	K8sPodMemoryAvailable          MetricConfig `mapstructure:"k8s.pod.memory.available"`
-	K8sPodMemoryMajorPageFaults    MetricConfig `mapstructure:"k8s.pod.memory.major_page_faults"`
-	K8sPodMemoryPageFaults         MetricConfig `mapstructure:"k8s.pod.memory.page_faults"`
-	K8sPodMemoryRss                MetricConfig `mapstructure:"k8s.pod.memory.rss"`
-	K8sPodMemoryUsage              MetricConfig `mapstructure:"k8s.pod.memory.usage"`
-	K8sPodMemoryWorkingSet         MetricConfig `mapstructure:"k8s.pod.memory.working_set"`
-	K8sPodNetworkErrors            MetricConfig `mapstructure:"k8s.pod.network.errors"`
-	K8sPodNetworkIo                MetricConfig `mapstructure:"k8s.pod.network.io"`
-	K8sPodUptime                   MetricConfig `mapstructure:"k8s.pod.uptime"`
-	K8sVolumeAvailable             MetricConfig `mapstructure:"k8s.volume.available"`
-	K8sVolumeCapacity              MetricConfig `mapstructure:"k8s.volume.capacity"`
-	K8sVolumeInodes                MetricConfig `mapstructure:"k8s.volume.inodes"`
-	K8sVolumeInodesFree            MetricConfig `mapstructure:"k8s.volume.inodes.free"`
-	K8sVolumeInodesUsed            MetricConfig `mapstructure:"k8s.volume.inodes.used"`
+	ContainerCPUTime                     MetricConfig `mapstructure:"container.cpu.time"`
+	ContainerCPUUtilization              MetricConfig `mapstructure:"container.cpu.utilization"`
+	ContainerFilesystemAvailable         MetricConfig `mapstructure:"container.filesystem.available"`
+	ContainerFilesystemCapacity          MetricConfig `mapstructure:"container.filesystem.capacity"`
+	ContainerFilesystemUsage             MetricConfig `mapstructure:"container.filesystem.usage"`
+	ContainerMemoryAvailable             MetricConfig `mapstructure:"container.memory.available"`
+	ContainerMemoryMajorPageFaults       MetricConfig `mapstructure:"container.memory.major_page_faults"`
+	ContainerMemoryPageFaults            MetricConfig `mapstructure:"container.memory.page_faults"`
+	ContainerMemoryRss                   MetricConfig `mapstructure:"container.memory.rss"`
+	ContainerMemoryUsage                 MetricConfig `mapstructure:"container.memory.usage"`
+	ContainerMemoryWorkingSet            MetricConfig `mapstructure:"container.memory.working_set"`
+	ContainerUptime                      MetricConfig `mapstructure:"container.uptime"`
+	K8sContainerMemoryLimitUtilization   MetricConfig `mapstructure:"k8s.container.memory_limit_utilization"`
+	K8sContainerMemoryRequestUtilization MetricConfig `mapstructure:"k8s.container.memory_request_utilization"`
+	K8sNodeCPUTime                       MetricConfig `mapstructure:"k8s.node.cpu.time"`
+	K8sNodeCPUUtilization                MetricConfig `mapstructure:"k8s.node.cpu.utilization"`
+	K8sNodeFilesystemAvailable           MetricConfig `mapstructure:"k8s.node.filesystem.available"`
+	K8sNodeFilesystemCapacity            MetricConfig `mapstructure:"k8s.node.filesystem.capacity"`
+	K8sNodeFilesystemUsage               MetricConfig `mapstructure:"k8s.node.filesystem.usage"`
+	K8sNodeMemoryAvailable               MetricConfig `mapstructure:"k8s.node.memory.available"`
+	K8sNodeMemoryMajorPageFaults         MetricConfig `mapstructure:"k8s.node.memory.major_page_faults"`
+	K8sNodeMemoryPageFaults              MetricConfig `mapstructure:"k8s.node.memory.page_faults"`
+	K8sNodeMemoryRss                     MetricConfig `mapstructure:"k8s.node.memory.rss"`
+	K8sNodeMemoryUsage                   MetricConfig `mapstructure:"k8s.node.memory.usage"`
+	K8sNodeMemoryWorkingSet              MetricConfig `mapstructure:"k8s.node.memory.working_set"`
+	K8sNodeNetworkErrors                 MetricConfig `mapstructure:"k8s.node.network.errors"`
+	K8sNodeNetworkIo                     MetricConfig `mapstructure:"k8s.node.network.io"`
+	K8sNodeUptime                        MetricConfig `mapstructure:"k8s.node.uptime"`
+	K8sPodCPUTime                        MetricConfig `mapstructure:"k8s.pod.cpu.time"`
+	K8sPodCPUUtilization                 MetricConfig `mapstructure:"k8s.pod.cpu.utilization"`
+	K8sPodFilesystemAvailable            MetricConfig `mapstructure:"k8s.pod.filesystem.available"`
+	K8sPodFilesystemCapacity             MetricConfig `mapstructure:"k8s.pod.filesystem.capacity"`
+	K8sPodFilesystemUsage                MetricConfig `mapstructure:"k8s.pod.filesystem.usage"`
+	K8sPodMemoryAvailable                MetricConfig `mapstructure:"k8s.pod.memory.available"`
+	K8sPodMemoryMajorPageFaults          MetricConfig `mapstructure:"k8s.pod.memory.major_page_faults"`
+	K8sPodMemoryPageFaults               MetricConfig `mapstructure:"k8s.pod.memory.page_faults"`
+	K8sPodMemoryRss                      MetricConfig `mapstructure:"k8s.pod.memory.rss"`
+	K8sPodMemoryUsage                    MetricConfig `mapstructure:"k8s.pod.memory.usage"`
+	K8sPodMemoryWorkingSet               MetricConfig `mapstructure:"k8s.pod.memory.working_set"`
+	K8sPodMemoryLimitUtilization         MetricConfig `mapstructure:"k8s.pod.memory_limit_utilization"`
+	K8sPodMemoryRequestUtilization       MetricConfig `mapstructure:"k8s.pod.memory_request_utilization"`
+	K8sPodNetworkErrors                  MetricConfig `mapstructure:"k8s.pod.network.errors"`
+	K8sPodNetworkIo                      MetricConfig `mapstructure:"k8s.pod.network.io"`
+	K8sPodUptime                         MetricConfig `mapstructure:"k8s.pod.uptime"`
+	K8sVolumeAvailable                   MetricConfig `mapstructure:"k8s.volume.available"`
+	K8sVolumeCapacity                    MetricConfig `mapstructure:"k8s.volume.capacity"`
+	K8sVolumeInodes                      MetricConfig `mapstructure:"k8s.volume.inodes"`
+	K8sVolumeInodesFree                  MetricConfig `mapstructure:"k8s.volume.inodes.free"`
+	K8sVolumeInodesUsed                  MetricConfig `mapstructure:"k8s.volume.inodes.used"`
 }
 
 func DefaultMetricsConfig() MetricsConfig {
@@ -110,6 +114,12 @@ func DefaultMetricsConfig() MetricsConfig {
 		ContainerUptime: MetricConfig{
 			Enabled: false,
 		},
+		K8sContainerMemoryLimitUtilization: MetricConfig{
+			Enabled: false,
+		},
+		K8sContainerMemoryRequestUtilization: MetricConfig{
+			Enabled: false,
+		},
 		K8sNodeCPUTime: MetricConfig{
 			Enabled: true,
 		},
@@ -185,6 +195,12 @@ func DefaultMetricsConfig() MetricsConfig {
 		K8sPodMemoryWorkingSet: MetricConfig{
 			Enabled: true,
 		},
+		K8sPodMemoryLimitUtilization: MetricConfig{
+			Enabled: false,
+		},
+		K8sPodMemoryRequestUtilization: MetricConfig{
+			Enabled: false,
+		},
 		K8sPodNetworkErrors: MetricConfig{
 			Enabled: true,
 		},
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go
index c54a8f7879358..149c19b8adcfb 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go
@@ -26,51 +26,55 @@ func TestMetricsBuilderConfig(t *testing.T) {
 			name: "all_set",
 			want: MetricsBuilderConfig{
 				Metrics: MetricsConfig{
-					ContainerCPUTime:               MetricConfig{Enabled: true},
-					ContainerCPUUtilization:        MetricConfig{Enabled: true},
-					ContainerFilesystemAvailable:   MetricConfig{Enabled: true},
-					ContainerFilesystemCapacity:    MetricConfig{Enabled: true},
-					ContainerFilesystemUsage:       MetricConfig{Enabled: true},
-					ContainerMemoryAvailable:       MetricConfig{Enabled: true},
-					ContainerMemoryMajorPageFaults: MetricConfig{Enabled: true},
-					ContainerMemoryPageFaults:      MetricConfig{Enabled: true},
-					ContainerMemoryRss:             MetricConfig{Enabled: true},
-					ContainerMemoryUsage:           MetricConfig{Enabled: true},
-					ContainerMemoryWorkingSet:      MetricConfig{Enabled: true},
-					ContainerUptime:                MetricConfig{Enabled: true},
-					K8sNodeCPUTime:                 MetricConfig{Enabled: true},
-					K8sNodeCPUUtilization:          MetricConfig{Enabled: true},
-					K8sNodeFilesystemAvailable:     MetricConfig{Enabled: true},
-					K8sNodeFilesystemCapacity:      MetricConfig{Enabled: true},
-					K8sNodeFilesystemUsage:         MetricConfig{Enabled: true},
-					K8sNodeMemoryAvailable:         MetricConfig{Enabled: true},
-					K8sNodeMemoryMajorPageFaults:   MetricConfig{Enabled: true},
-					K8sNodeMemoryPageFaults:        MetricConfig{Enabled: true},
-					K8sNodeMemoryRss:               MetricConfig{Enabled: true},
-					K8sNodeMemoryUsage:             MetricConfig{Enabled: true},
-					K8sNodeMemoryWorkingSet:        MetricConfig{Enabled: true},
-					K8sNodeNetworkErrors:           MetricConfig{Enabled: true},
-					K8sNodeNetworkIo:               MetricConfig{Enabled: true},
-					K8sNodeUptime:                  MetricConfig{Enabled: true},
-					K8sPodCPUTime:                  MetricConfig{Enabled: true},
-					K8sPodCPUUtilization:           MetricConfig{Enabled: true},
-					K8sPodFilesystemAvailable:      MetricConfig{Enabled: true},
-					K8sPodFilesystemCapacity:       MetricConfig{Enabled: true},
-					K8sPodFilesystemUsage:          MetricConfig{Enabled: true},
-					K8sPodMemoryAvailable:          MetricConfig{Enabled: true},
-					K8sPodMemoryMajorPageFaults:    MetricConfig{Enabled: true},
-					K8sPodMemoryPageFaults:         MetricConfig{Enabled: true},
-					K8sPodMemoryRss:                MetricConfig{Enabled: true},
-					K8sPodMemoryUsage:              MetricConfig{Enabled: true},
-					K8sPodMemoryWorkingSet:         MetricConfig{Enabled: true},
-					K8sPodNetworkErrors:            MetricConfig{Enabled: true},
-					K8sPodNetworkIo:                MetricConfig{Enabled: true},
-					K8sPodUptime:                   MetricConfig{Enabled: true},
-					K8sVolumeAvailable:             MetricConfig{Enabled: true},
-					K8sVolumeCapacity:              MetricConfig{Enabled: true},
-					K8sVolumeInodes:                MetricConfig{Enabled: true},
-					K8sVolumeInodesFree:            MetricConfig{Enabled: true},
-					K8sVolumeInodesUsed:            MetricConfig{Enabled: true},
+					ContainerCPUTime:                     MetricConfig{Enabled: true},
+					ContainerCPUUtilization:              MetricConfig{Enabled: true},
+					ContainerFilesystemAvailable:         MetricConfig{Enabled: true},
+					ContainerFilesystemCapacity:          MetricConfig{Enabled: true},
+					ContainerFilesystemUsage:             MetricConfig{Enabled: true},
+					ContainerMemoryAvailable:             MetricConfig{Enabled: true},
+					ContainerMemoryMajorPageFaults:       MetricConfig{Enabled: true},
+					ContainerMemoryPageFaults:            MetricConfig{Enabled: true},
+					ContainerMemoryRss:                   MetricConfig{Enabled: true},
+					ContainerMemoryUsage:                 MetricConfig{Enabled: true},
+					ContainerMemoryWorkingSet:            MetricConfig{Enabled: true},
+					ContainerUptime:                      MetricConfig{Enabled: true},
+					K8sContainerMemoryLimitUtilization:   MetricConfig{Enabled: true},
+					K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: true},
+					K8sNodeCPUTime:                       MetricConfig{Enabled: true},
+					K8sNodeCPUUtilization:                MetricConfig{Enabled: true},
+					K8sNodeFilesystemAvailable:           MetricConfig{Enabled: true},
+					K8sNodeFilesystemCapacity:            MetricConfig{Enabled: true},
+					K8sNodeFilesystemUsage:               MetricConfig{Enabled: true},
+					K8sNodeMemoryAvailable:               MetricConfig{Enabled: true},
+					K8sNodeMemoryMajorPageFaults:         MetricConfig{Enabled: true},
+					K8sNodeMemoryPageFaults:              MetricConfig{Enabled: true},
+					K8sNodeMemoryRss:                     MetricConfig{Enabled: true},
+					K8sNodeMemoryUsage:                   MetricConfig{Enabled: true},
+					K8sNodeMemoryWorkingSet:              MetricConfig{Enabled: true},
+					K8sNodeNetworkErrors:                 MetricConfig{Enabled: true},
+					K8sNodeNetworkIo:                     MetricConfig{Enabled: true},
+					K8sNodeUptime:                        MetricConfig{Enabled: true},
+					K8sPodCPUTime:                        MetricConfig{Enabled: true},
+					K8sPodCPUUtilization:                 MetricConfig{Enabled: true},
+					K8sPodFilesystemAvailable:            MetricConfig{Enabled: true},
+					K8sPodFilesystemCapacity:             MetricConfig{Enabled: true},
+					K8sPodFilesystemUsage:                MetricConfig{Enabled: true},
+					K8sPodMemoryAvailable:                MetricConfig{Enabled: true},
+					K8sPodMemoryMajorPageFaults:          MetricConfig{Enabled: true},
+					K8sPodMemoryPageFaults:               MetricConfig{Enabled: true},
+					K8sPodMemoryRss:                      MetricConfig{Enabled: true},
+					K8sPodMemoryUsage:                    MetricConfig{Enabled: true},
+					K8sPodMemoryWorkingSet:               MetricConfig{Enabled: true},
+					K8sPodMemoryLimitUtilization:         MetricConfig{Enabled: true},
+					K8sPodMemoryRequestUtilization:       MetricConfig{Enabled: true},
+					K8sPodNetworkErrors:                  MetricConfig{Enabled: true},
+					K8sPodNetworkIo:                      MetricConfig{Enabled: true},
+					K8sPodUptime:                         MetricConfig{Enabled: true},
+					K8sVolumeAvailable:                   MetricConfig{Enabled: true},
+					K8sVolumeCapacity:                    MetricConfig{Enabled: true},
+					K8sVolumeInodes:                      MetricConfig{Enabled: true},
+					K8sVolumeInodesFree:                  MetricConfig{Enabled: true},
+					K8sVolumeInodesUsed:                  MetricConfig{Enabled: true},
 				},
 				ResourceAttributes: ResourceAttributesConfig{
 					AwsVolumeID:                  ResourceAttributeConfig{Enabled: true},
@@ -95,51 +99,55 @@ func TestMetricsBuilderConfig(t *testing.T) {
 			name: "none_set",
 			want: MetricsBuilderConfig{
 				Metrics: MetricsConfig{
-					ContainerCPUTime:               MetricConfig{Enabled: false},
-					ContainerCPUUtilization:        MetricConfig{Enabled: false},
-					ContainerFilesystemAvailable:   MetricConfig{Enabled: false},
-					ContainerFilesystemCapacity:    MetricConfig{Enabled: false},
-					ContainerFilesystemUsage:       MetricConfig{Enabled: false},
-					ContainerMemoryAvailable:       MetricConfig{Enabled: false},
-					ContainerMemoryMajorPageFaults: MetricConfig{Enabled: false},
-					ContainerMemoryPageFaults:      MetricConfig{Enabled: false},
-					ContainerMemoryRss:             MetricConfig{Enabled: false},
-					ContainerMemoryUsage:           MetricConfig{Enabled: false},
-					ContainerMemoryWorkingSet:      MetricConfig{Enabled: false},
-					ContainerUptime:                MetricConfig{Enabled: false},
-					K8sNodeCPUTime:                 MetricConfig{Enabled: false},
-					K8sNodeCPUUtilization:          MetricConfig{Enabled: false},
-					K8sNodeFilesystemAvailable:     MetricConfig{Enabled: false},
-					K8sNodeFilesystemCapacity:      MetricConfig{Enabled: false},
-					K8sNodeFilesystemUsage:         MetricConfig{Enabled: false},
-					K8sNodeMemoryAvailable:         MetricConfig{Enabled: false},
-					K8sNodeMemoryMajorPageFaults:   MetricConfig{Enabled: false},
-					K8sNodeMemoryPageFaults:        MetricConfig{Enabled: false},
-					K8sNodeMemoryRss:               MetricConfig{Enabled: false},
-					K8sNodeMemoryUsage:             MetricConfig{Enabled: false},
-					K8sNodeMemoryWorkingSet:        MetricConfig{Enabled: false},
-					K8sNodeNetworkErrors:           MetricConfig{Enabled: false},
-					K8sNodeNetworkIo:               MetricConfig{Enabled: false},
-					K8sNodeUptime:                  MetricConfig{Enabled: false},
-					K8sPodCPUTime:                  MetricConfig{Enabled: false},
-					K8sPodCPUUtilization:           MetricConfig{Enabled: false},
-					K8sPodFilesystemAvailable:      MetricConfig{Enabled: false},
-					K8sPodFilesystemCapacity:       MetricConfig{Enabled: false},
-					K8sPodFilesystemUsage:          MetricConfig{Enabled: false},
-					K8sPodMemoryAvailable:          MetricConfig{Enabled: false},
-					K8sPodMemoryMajorPageFaults:    MetricConfig{Enabled: false},
-					K8sPodMemoryPageFaults:         MetricConfig{Enabled: false},
-					K8sPodMemoryRss:                MetricConfig{Enabled: false},
-					K8sPodMemoryUsage:              MetricConfig{Enabled: false},
-					K8sPodMemoryWorkingSet:         MetricConfig{Enabled: false},
-					K8sPodNetworkErrors:            MetricConfig{Enabled: false},
-					K8sPodNetworkIo:                MetricConfig{Enabled: false},
-					K8sPodUptime:                   MetricConfig{Enabled: false},
-					K8sVolumeAvailable:             MetricConfig{Enabled: false},
-					K8sVolumeCapacity:              MetricConfig{Enabled: false},
-					K8sVolumeInodes:                MetricConfig{Enabled: false},
-					K8sVolumeInodesFree:            MetricConfig{Enabled: false},
-					K8sVolumeInodesUsed:            MetricConfig{Enabled: false},
+					ContainerCPUTime:                     MetricConfig{Enabled: false},
+					ContainerCPUUtilization:              MetricConfig{Enabled: false},
+					ContainerFilesystemAvailable:         MetricConfig{Enabled: false},
+					ContainerFilesystemCapacity:          MetricConfig{Enabled: false},
+					ContainerFilesystemUsage:             MetricConfig{Enabled: false},
+					ContainerMemoryAvailable:             MetricConfig{Enabled: false},
+					ContainerMemoryMajorPageFaults:       MetricConfig{Enabled: false},
+					ContainerMemoryPageFaults:            MetricConfig{Enabled: false},
+					ContainerMemoryRss:                   MetricConfig{Enabled: false},
+					ContainerMemoryUsage:                 MetricConfig{Enabled: false},
+					ContainerMemoryWorkingSet:            MetricConfig{Enabled: false},
+					ContainerUptime:                      MetricConfig{Enabled: false},
+					K8sContainerMemoryLimitUtilization:   MetricConfig{Enabled: false},
+					K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: false},
+					K8sNodeCPUTime:                       MetricConfig{Enabled: false},
+					K8sNodeCPUUtilization:                MetricConfig{Enabled: false},
+					K8sNodeFilesystemAvailable:           MetricConfig{Enabled: false},
+					K8sNodeFilesystemCapacity:            MetricConfig{Enabled: false},
+					K8sNodeFilesystemUsage:               MetricConfig{Enabled: false},
+					K8sNodeMemoryAvailable:               MetricConfig{Enabled: false},
+					K8sNodeMemoryMajorPageFaults:         MetricConfig{Enabled: false},
+					K8sNodeMemoryPageFaults:              MetricConfig{Enabled: false},
+					K8sNodeMemoryRss:                     MetricConfig{Enabled: false},
+					K8sNodeMemoryUsage:                   MetricConfig{Enabled: false},
+					K8sNodeMemoryWorkingSet:              MetricConfig{Enabled: false},
+					K8sNodeNetworkErrors:                 MetricConfig{Enabled: false},
+					K8sNodeNetworkIo:                     MetricConfig{Enabled: false},
+					K8sNodeUptime:                        MetricConfig{Enabled: false},
+					K8sPodCPUTime:                        MetricConfig{Enabled: false},
+					K8sPodCPUUtilization:                 MetricConfig{Enabled: false},
+					K8sPodFilesystemAvailable:            MetricConfig{Enabled: false},
+					K8sPodFilesystemCapacity:             MetricConfig{Enabled: false},
+					K8sPodFilesystemUsage:                MetricConfig{Enabled: false},
+					K8sPodMemoryAvailable:                MetricConfig{Enabled: false},
+					K8sPodMemoryMajorPageFaults:          MetricConfig{Enabled: false},
+					K8sPodMemoryPageFaults:               MetricConfig{Enabled: false},
+					K8sPodMemoryRss:                      MetricConfig{Enabled: false},
+					K8sPodMemoryUsage:                    MetricConfig{Enabled: false},
+					K8sPodMemoryWorkingSet:               MetricConfig{Enabled: false},
+					K8sPodMemoryLimitUtilization:         MetricConfig{Enabled: false},
+					K8sPodMemoryRequestUtilization:       MetricConfig{Enabled: false},
+					K8sPodNetworkErrors:                  MetricConfig{Enabled: false},
+					K8sPodNetworkIo:                      MetricConfig{Enabled: false},
+					K8sPodUptime:                         MetricConfig{Enabled: false},
+					K8sVolumeAvailable:                   MetricConfig{Enabled: false},
+					K8sVolumeCapacity:                    MetricConfig{Enabled: false},
+					K8sVolumeInodes:                      MetricConfig{Enabled: false},
+					K8sVolumeInodesFree:                  MetricConfig{Enabled: false},
+					K8sVolumeInodesUsed:                  MetricConfig{Enabled: false},
 				},
 				ResourceAttributes: ResourceAttributesConfig{
 					AwsVolumeID:                  ResourceAttributeConfig{Enabled: false},
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
index 1c5511b47c627..aa72c769e7d38 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
@@ -629,6 +629,104 @@ func newMetricContainerUptime(cfg MetricConfig) metricContainerUptime {
 	return m
 }
 
+type metricK8sContainerMemoryLimitUtilization struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills k8s.container.memory_limit_utilization metric with initial data.
+func (m *metricK8sContainerMemoryLimitUtilization) init() {
+	m.data.SetName("k8s.container.memory_limit_utilization")
+	m.data.SetDescription("Container memory utilization as a ratio of the container's limits")
+	m.data.SetUnit("1")
+	m.data.SetEmptyGauge()
+}
+
+func (m *metricK8sContainerMemoryLimitUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sContainerMemoryLimitUtilization) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sContainerMemoryLimitUtilization) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricK8sContainerMemoryLimitUtilization(cfg MetricConfig) metricK8sContainerMemoryLimitUtilization {
+	m := metricK8sContainerMemoryLimitUtilization{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
+type metricK8sContainerMemoryRequestUtilization struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills k8s.container.memory_request_utilization metric with initial data.
+func (m *metricK8sContainerMemoryRequestUtilization) init() {
+	m.data.SetName("k8s.container.memory_request_utilization")
+	m.data.SetDescription("Container memory utilization as a ratio of the container's requests")
+	m.data.SetUnit("1")
+	m.data.SetEmptyGauge()
+}
+
+func (m *metricK8sContainerMemoryRequestUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sContainerMemoryRequestUtilization) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sContainerMemoryRequestUtilization) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricK8sContainerMemoryRequestUtilization(cfg MetricConfig) metricK8sContainerMemoryRequestUtilization {
+	m := metricK8sContainerMemoryRequestUtilization{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
 type metricK8sNodeCPUTime struct {
 	data     pmetric.Metric // data buffer for generated metric.
 	config   MetricConfig   // metric config provided by user.
@@ -1870,6 +1968,104 @@ func newMetricK8sPodMemoryWorkingSet(cfg MetricConfig) metricK8sPodMemoryWorking
 	return m
 }
 
+type metricK8sPodMemoryLimitUtilization struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory_limit_utilization metric with initial data.
+func (m *metricK8sPodMemoryLimitUtilization) init() {
+	m.data.SetName("k8s.pod.memory_limit_utilization")
+	m.data.SetDescription("Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted.")
+	m.data.SetUnit("1")
+	m.data.SetEmptyGauge()
+}
+
+func (m *metricK8sPodMemoryLimitUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryLimitUtilization) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryLimitUtilization) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricK8sPodMemoryLimitUtilization(cfg MetricConfig) metricK8sPodMemoryLimitUtilization {
+	m := metricK8sPodMemoryLimitUtilization{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
+type metricK8sPodMemoryRequestUtilization struct {
+	data     pmetric.Metric // data buffer for generated metric.
+	config   MetricConfig   // metric config provided by user.
+	capacity int            // max observed number of data points added to the metric.
+}
+
+// init fills k8s.pod.memory_request_utilization metric with initial data.
+func (m *metricK8sPodMemoryRequestUtilization) init() {
+	m.data.SetName("k8s.pod.memory_request_utilization")
+	m.data.SetDescription("Pod memory utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted.")
+	m.data.SetUnit("1")
+	m.data.SetEmptyGauge()
+}
+
+func (m *metricK8sPodMemoryRequestUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
+	if !m.config.Enabled {
+		return
+	}
+	dp := m.data.Gauge().DataPoints().AppendEmpty()
+	dp.SetStartTimestamp(start)
+	dp.SetTimestamp(ts)
+	dp.SetDoubleValue(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricK8sPodMemoryRequestUtilization) updateCapacity() {
+	if m.data.Gauge().DataPoints().Len() > m.capacity {
+		m.capacity = m.data.Gauge().DataPoints().Len()
+	}
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricK8sPodMemoryRequestUtilization) emit(metrics pmetric.MetricSlice) {
+	if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+		m.updateCapacity()
+		m.data.MoveTo(metrics.AppendEmpty())
+		m.init()
+	}
+}
+
+func newMetricK8sPodMemoryRequestUtilization(cfg MetricConfig) metricK8sPodMemoryRequestUtilization {
+	m := metricK8sPodMemoryRequestUtilization{config: cfg}
+	if cfg.Enabled {
+		m.data = pmetric.NewMetric()
+		m.init()
+	}
+	return m
+}
+
 type metricK8sPodNetworkErrors struct {
 	data     pmetric.Metric // data buffer for generated metric.
 	config   MetricConfig   // metric config provided by user.
@@ -2277,56 +2473,60 @@ func newMetricK8sVolumeInodesUsed(cfg MetricConfig) metricK8sVolumeInodesUsed {
 // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
 // required to produce metric representation defined in metadata and user config.
 type MetricsBuilder struct {
-	config                               MetricsBuilderConfig // config of the metrics builder.
-	startTime                            pcommon.Timestamp    // start time that will be applied to all recorded data points.
-	metricsCapacity                      int                  // maximum observed number of metrics per resource.
-	metricsBuffer                        pmetric.Metrics      // accumulates metrics data before emitting.
-	buildInfo                            component.BuildInfo  // contains version information.
-	metricContainerCPUTime               metricContainerCPUTime
-	metricContainerCPUUtilization        metricContainerCPUUtilization
-	metricContainerFilesystemAvailable   metricContainerFilesystemAvailable
-	metricContainerFilesystemCapacity    metricContainerFilesystemCapacity
-	metricContainerFilesystemUsage       metricContainerFilesystemUsage
-	metricContainerMemoryAvailable       metricContainerMemoryAvailable
-	metricContainerMemoryMajorPageFaults metricContainerMemoryMajorPageFaults
-	metricContainerMemoryPageFaults      metricContainerMemoryPageFaults
-	metricContainerMemoryRss             metricContainerMemoryRss
-	metricContainerMemoryUsage           metricContainerMemoryUsage
-	metricContainerMemoryWorkingSet      metricContainerMemoryWorkingSet
-	metricContainerUptime                metricContainerUptime
-	metricK8sNodeCPUTime                 metricK8sNodeCPUTime
-	metricK8sNodeCPUUtilization          metricK8sNodeCPUUtilization
-	metricK8sNodeFilesystemAvailable     metricK8sNodeFilesystemAvailable
-	metricK8sNodeFilesystemCapacity      metricK8sNodeFilesystemCapacity
-	metricK8sNodeFilesystemUsage         metricK8sNodeFilesystemUsage
-	metricK8sNodeMemoryAvailable         metricK8sNodeMemoryAvailable
-	metricK8sNodeMemoryMajorPageFaults   metricK8sNodeMemoryMajorPageFaults
-	metricK8sNodeMemoryPageFaults        metricK8sNodeMemoryPageFaults
-	metricK8sNodeMemoryRss               metricK8sNodeMemoryRss
-	metricK8sNodeMemoryUsage             metricK8sNodeMemoryUsage
-	metricK8sNodeMemoryWorkingSet        metricK8sNodeMemoryWorkingSet
-	metricK8sNodeNetworkErrors           metricK8sNodeNetworkErrors
-	metricK8sNodeNetworkIo               metricK8sNodeNetworkIo
-	metricK8sNodeUptime                  metricK8sNodeUptime
-	metricK8sPodCPUTime                  metricK8sPodCPUTime
-	metricK8sPodCPUUtilization           metricK8sPodCPUUtilization
-	metricK8sPodFilesystemAvailable      metricK8sPodFilesystemAvailable
-	metricK8sPodFilesystemCapacity       metricK8sPodFilesystemCapacity
-	metricK8sPodFilesystemUsage          metricK8sPodFilesystemUsage
-	metricK8sPodMemoryAvailable          metricK8sPodMemoryAvailable
-	metricK8sPodMemoryMajorPageFaults    metricK8sPodMemoryMajorPageFaults
-	metricK8sPodMemoryPageFaults         metricK8sPodMemoryPageFaults
-	metricK8sPodMemoryRss                metricK8sPodMemoryRss
-	metricK8sPodMemoryUsage              metricK8sPodMemoryUsage
-	metricK8sPodMemoryWorkingSet         metricK8sPodMemoryWorkingSet
-	metricK8sPodNetworkErrors            metricK8sPodNetworkErrors
-	metricK8sPodNetworkIo                metricK8sPodNetworkIo
-	metricK8sPodUptime                   metricK8sPodUptime
-	metricK8sVolumeAvailable             metricK8sVolumeAvailable
-	metricK8sVolumeCapacity              metricK8sVolumeCapacity
-	metricK8sVolumeInodes                metricK8sVolumeInodes
-	metricK8sVolumeInodesFree            metricK8sVolumeInodesFree
-	metricK8sVolumeInodesUsed            metricK8sVolumeInodesUsed
+	config                                     MetricsBuilderConfig // config of the metrics builder.
+	startTime                                  pcommon.Timestamp    // start time that will be applied to all recorded data points.
+	metricsCapacity                            int                  // maximum observed number of metrics per resource.
+	metricsBuffer                              pmetric.Metrics      // accumulates metrics data before emitting.
+	buildInfo                                  component.BuildInfo  // contains version information.
+	metricContainerCPUTime                     metricContainerCPUTime
+	metricContainerCPUUtilization              metricContainerCPUUtilization
+	metricContainerFilesystemAvailable         metricContainerFilesystemAvailable
+	metricContainerFilesystemCapacity          metricContainerFilesystemCapacity
+	metricContainerFilesystemUsage             metricContainerFilesystemUsage
+	metricContainerMemoryAvailable             metricContainerMemoryAvailable
+	metricContainerMemoryMajorPageFaults       metricContainerMemoryMajorPageFaults
+	metricContainerMemoryPageFaults            metricContainerMemoryPageFaults
+	metricContainerMemoryRss                   metricContainerMemoryRss
+	metricContainerMemoryUsage                 metricContainerMemoryUsage
+	metricContainerMemoryWorkingSet            metricContainerMemoryWorkingSet
+	metricContainerUptime                      metricContainerUptime
+	metricK8sContainerMemoryLimitUtilization   metricK8sContainerMemoryLimitUtilization
+	metricK8sContainerMemoryRequestUtilization metricK8sContainerMemoryRequestUtilization
+	metricK8sNodeCPUTime                       metricK8sNodeCPUTime
+	metricK8sNodeCPUUtilization                metricK8sNodeCPUUtilization
+	metricK8sNodeFilesystemAvailable           metricK8sNodeFilesystemAvailable
+	metricK8sNodeFilesystemCapacity            metricK8sNodeFilesystemCapacity
+	metricK8sNodeFilesystemUsage               metricK8sNodeFilesystemUsage
+	metricK8sNodeMemoryAvailable               metricK8sNodeMemoryAvailable
+	metricK8sNodeMemoryMajorPageFaults         metricK8sNodeMemoryMajorPageFaults
+	metricK8sNodeMemoryPageFaults              metricK8sNodeMemoryPageFaults
+	metricK8sNodeMemoryRss                     metricK8sNodeMemoryRss
+	metricK8sNodeMemoryUsage                   metricK8sNodeMemoryUsage
+	metricK8sNodeMemoryWorkingSet              metricK8sNodeMemoryWorkingSet
+	metricK8sNodeNetworkErrors                 metricK8sNodeNetworkErrors
+	metricK8sNodeNetworkIo                     metricK8sNodeNetworkIo
+	metricK8sNodeUptime                        metricK8sNodeUptime
+	metricK8sPodCPUTime                        metricK8sPodCPUTime
+	metricK8sPodCPUUtilization                 metricK8sPodCPUUtilization
+	metricK8sPodFilesystemAvailable            metricK8sPodFilesystemAvailable
+	metricK8sPodFilesystemCapacity             metricK8sPodFilesystemCapacity
+	metricK8sPodFilesystemUsage                metricK8sPodFilesystemUsage
+	metricK8sPodMemoryAvailable                metricK8sPodMemoryAvailable
+	metricK8sPodMemoryMajorPageFaults          metricK8sPodMemoryMajorPageFaults
+	metricK8sPodMemoryPageFaults               metricK8sPodMemoryPageFaults
+	metricK8sPodMemoryRss                      metricK8sPodMemoryRss
+	metricK8sPodMemoryUsage                    metricK8sPodMemoryUsage
+	metricK8sPodMemoryWorkingSet               metricK8sPodMemoryWorkingSet
+	metricK8sPodMemoryLimitUtilization         metricK8sPodMemoryLimitUtilization
+	metricK8sPodMemoryRequestUtilization       metricK8sPodMemoryRequestUtilization
+	metricK8sPodNetworkErrors                  metricK8sPodNetworkErrors
+	metricK8sPodNetworkIo                      metricK8sPodNetworkIo
+	metricK8sPodUptime                         metricK8sPodUptime
+	metricK8sVolumeAvailable                   metricK8sVolumeAvailable
+	metricK8sVolumeCapacity                    metricK8sVolumeCapacity
+	metricK8sVolumeInodes                      metricK8sVolumeInodes
+	metricK8sVolumeInodesFree                  metricK8sVolumeInodesFree
+	metricK8sVolumeInodesUsed                  metricK8sVolumeInodesUsed
 }
 
 // metricBuilderOption applies changes to default metrics builder.
@@ -2341,55 +2541,59 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
 
 func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder {
 	mb := &MetricsBuilder{
-		config:                               mbc,
-		startTime:                            pcommon.NewTimestampFromTime(time.Now()),
-		metricsBuffer:                        pmetric.NewMetrics(),
-		buildInfo:                            settings.BuildInfo,
-		metricContainerCPUTime:               newMetricContainerCPUTime(mbc.Metrics.ContainerCPUTime),
-		metricContainerCPUUtilization:        newMetricContainerCPUUtilization(mbc.Metrics.ContainerCPUUtilization),
-		metricContainerFilesystemAvailable:   newMetricContainerFilesystemAvailable(mbc.Metrics.ContainerFilesystemAvailable),
-		metricContainerFilesystemCapacity:    newMetricContainerFilesystemCapacity(mbc.Metrics.ContainerFilesystemCapacity),
-		metricContainerFilesystemUsage:       newMetricContainerFilesystemUsage(mbc.Metrics.ContainerFilesystemUsage),
-		metricContainerMemoryAvailable:       newMetricContainerMemoryAvailable(mbc.Metrics.ContainerMemoryAvailable),
-		metricContainerMemoryMajorPageFaults: newMetricContainerMemoryMajorPageFaults(mbc.Metrics.ContainerMemoryMajorPageFaults),
-		metricContainerMemoryPageFaults:      newMetricContainerMemoryPageFaults(mbc.Metrics.ContainerMemoryPageFaults),
-		metricContainerMemoryRss:             newMetricContainerMemoryRss(mbc.Metrics.ContainerMemoryRss),
-		metricContainerMemoryUsage:           newMetricContainerMemoryUsage(mbc.Metrics.ContainerMemoryUsage),
-		metricContainerMemoryWorkingSet:      newMetricContainerMemoryWorkingSet(mbc.Metrics.ContainerMemoryWorkingSet),
-		metricContainerUptime:                newMetricContainerUptime(mbc.Metrics.ContainerUptime),
-		metricK8sNodeCPUTime:                 newMetricK8sNodeCPUTime(mbc.Metrics.K8sNodeCPUTime),
-		metricK8sNodeCPUUtilization:          newMetricK8sNodeCPUUtilization(mbc.Metrics.K8sNodeCPUUtilization),
-		metricK8sNodeFilesystemAvailable:     newMetricK8sNodeFilesystemAvailable(mbc.Metrics.K8sNodeFilesystemAvailable),
-		metricK8sNodeFilesystemCapacity:      newMetricK8sNodeFilesystemCapacity(mbc.Metrics.K8sNodeFilesystemCapacity),
-		metricK8sNodeFilesystemUsage:         newMetricK8sNodeFilesystemUsage(mbc.Metrics.K8sNodeFilesystemUsage),
-		metricK8sNodeMemoryAvailable:         newMetricK8sNodeMemoryAvailable(mbc.Metrics.K8sNodeMemoryAvailable),
-		metricK8sNodeMemoryMajorPageFaults:   newMetricK8sNodeMemoryMajorPageFaults(mbc.Metrics.K8sNodeMemoryMajorPageFaults),
-		metricK8sNodeMemoryPageFaults:        newMetricK8sNodeMemoryPageFaults(mbc.Metrics.K8sNodeMemoryPageFaults),
-		metricK8sNodeMemoryRss:               newMetricK8sNodeMemoryRss(mbc.Metrics.K8sNodeMemoryRss),
-		metricK8sNodeMemoryUsage:             newMetricK8sNodeMemoryUsage(mbc.Metrics.K8sNodeMemoryUsage),
-		metricK8sNodeMemoryWorkingSet:        newMetricK8sNodeMemoryWorkingSet(mbc.Metrics.K8sNodeMemoryWorkingSet),
-		metricK8sNodeNetworkErrors:           newMetricK8sNodeNetworkErrors(mbc.Metrics.K8sNodeNetworkErrors),
-		metricK8sNodeNetworkIo:               newMetricK8sNodeNetworkIo(mbc.Metrics.K8sNodeNetworkIo),
-		metricK8sNodeUptime:                  newMetricK8sNodeUptime(mbc.Metrics.K8sNodeUptime),
-		metricK8sPodCPUTime:                  newMetricK8sPodCPUTime(mbc.Metrics.K8sPodCPUTime),
-		metricK8sPodCPUUtilization:           newMetricK8sPodCPUUtilization(mbc.Metrics.K8sPodCPUUtilization),
-		metricK8sPodFilesystemAvailable:      newMetricK8sPodFilesystemAvailable(mbc.Metrics.K8sPodFilesystemAvailable),
-		metricK8sPodFilesystemCapacity:       newMetricK8sPodFilesystemCapacity(mbc.Metrics.K8sPodFilesystemCapacity),
-		metricK8sPodFilesystemUsage:          newMetricK8sPodFilesystemUsage(mbc.Metrics.K8sPodFilesystemUsage),
-		metricK8sPodMemoryAvailable:          newMetricK8sPodMemoryAvailable(mbc.Metrics.K8sPodMemoryAvailable),
-		metricK8sPodMemoryMajorPageFaults:    newMetricK8sPodMemoryMajorPageFaults(mbc.Metrics.K8sPodMemoryMajorPageFaults),
-		metricK8sPodMemoryPageFaults:         newMetricK8sPodMemoryPageFaults(mbc.Metrics.K8sPodMemoryPageFaults),
-		metricK8sPodMemoryRss:                newMetricK8sPodMemoryRss(mbc.Metrics.K8sPodMemoryRss),
-		metricK8sPodMemoryUsage:              newMetricK8sPodMemoryUsage(mbc.Metrics.K8sPodMemoryUsage),
-		metricK8sPodMemoryWorkingSet:         newMetricK8sPodMemoryWorkingSet(mbc.Metrics.K8sPodMemoryWorkingSet),
-		metricK8sPodNetworkErrors:            newMetricK8sPodNetworkErrors(mbc.Metrics.K8sPodNetworkErrors),
-		metricK8sPodNetworkIo:                newMetricK8sPodNetworkIo(mbc.Metrics.K8sPodNetworkIo),
-		metricK8sPodUptime:                   newMetricK8sPodUptime(mbc.Metrics.K8sPodUptime),
-		metricK8sVolumeAvailable:             newMetricK8sVolumeAvailable(mbc.Metrics.K8sVolumeAvailable),
-		metricK8sVolumeCapacity:              newMetricK8sVolumeCapacity(mbc.Metrics.K8sVolumeCapacity),
-		metricK8sVolumeInodes:                newMetricK8sVolumeInodes(mbc.Metrics.K8sVolumeInodes),
-		metricK8sVolumeInodesFree:            newMetricK8sVolumeInodesFree(mbc.Metrics.K8sVolumeInodesFree),
-		metricK8sVolumeInodesUsed:            newMetricK8sVolumeInodesUsed(mbc.Metrics.K8sVolumeInodesUsed),
+		config:                                     mbc,
+		startTime:                                  pcommon.NewTimestampFromTime(time.Now()),
+		metricsBuffer:                              pmetric.NewMetrics(),
+		buildInfo:                                  settings.BuildInfo,
+		metricContainerCPUTime:                     newMetricContainerCPUTime(mbc.Metrics.ContainerCPUTime),
+		metricContainerCPUUtilization:              newMetricContainerCPUUtilization(mbc.Metrics.ContainerCPUUtilization),
+		metricContainerFilesystemAvailable:         newMetricContainerFilesystemAvailable(mbc.Metrics.ContainerFilesystemAvailable),
+		metricContainerFilesystemCapacity:          newMetricContainerFilesystemCapacity(mbc.Metrics.ContainerFilesystemCapacity),
+		metricContainerFilesystemUsage:             newMetricContainerFilesystemUsage(mbc.Metrics.ContainerFilesystemUsage),
+		metricContainerMemoryAvailable:             newMetricContainerMemoryAvailable(mbc.Metrics.ContainerMemoryAvailable),
+		metricContainerMemoryMajorPageFaults:       newMetricContainerMemoryMajorPageFaults(mbc.Metrics.ContainerMemoryMajorPageFaults),
+		metricContainerMemoryPageFaults:            newMetricContainerMemoryPageFaults(mbc.Metrics.ContainerMemoryPageFaults),
+		metricContainerMemoryRss:                   newMetricContainerMemoryRss(mbc.Metrics.ContainerMemoryRss),
+		metricContainerMemoryUsage:                 newMetricContainerMemoryUsage(mbc.Metrics.ContainerMemoryUsage),
+		metricContainerMemoryWorkingSet:            newMetricContainerMemoryWorkingSet(mbc.Metrics.ContainerMemoryWorkingSet),
+		metricContainerUptime:                      newMetricContainerUptime(mbc.Metrics.ContainerUptime),
+		metricK8sContainerMemoryLimitUtilization:   newMetricK8sContainerMemoryLimitUtilization(mbc.Metrics.K8sContainerMemoryLimitUtilization),
+		metricK8sContainerMemoryRequestUtilization: newMetricK8sContainerMemoryRequestUtilization(mbc.Metrics.K8sContainerMemoryRequestUtilization),
+		metricK8sNodeCPUTime:                       newMetricK8sNodeCPUTime(mbc.Metrics.K8sNodeCPUTime),
+		metricK8sNodeCPUUtilization:                newMetricK8sNodeCPUUtilization(mbc.Metrics.K8sNodeCPUUtilization),
+		metricK8sNodeFilesystemAvailable:           newMetricK8sNodeFilesystemAvailable(mbc.Metrics.K8sNodeFilesystemAvailable),
+		metricK8sNodeFilesystemCapacity:            newMetricK8sNodeFilesystemCapacity(mbc.Metrics.K8sNodeFilesystemCapacity),
+		metricK8sNodeFilesystemUsage:               newMetricK8sNodeFilesystemUsage(mbc.Metrics.K8sNodeFilesystemUsage),
+		metricK8sNodeMemoryAvailable:               newMetricK8sNodeMemoryAvailable(mbc.Metrics.K8sNodeMemoryAvailable),
+		metricK8sNodeMemoryMajorPageFaults:         newMetricK8sNodeMemoryMajorPageFaults(mbc.Metrics.K8sNodeMemoryMajorPageFaults),
+		metricK8sNodeMemoryPageFaults:              newMetricK8sNodeMemoryPageFaults(mbc.Metrics.K8sNodeMemoryPageFaults),
+		metricK8sNodeMemoryRss:                     newMetricK8sNodeMemoryRss(mbc.Metrics.K8sNodeMemoryRss),
+		metricK8sNodeMemoryUsage:                   newMetricK8sNodeMemoryUsage(mbc.Metrics.K8sNodeMemoryUsage),
+		metricK8sNodeMemoryWorkingSet:              newMetricK8sNodeMemoryWorkingSet(mbc.Metrics.K8sNodeMemoryWorkingSet),
+		metricK8sNodeNetworkErrors:                 newMetricK8sNodeNetworkErrors(mbc.Metrics.K8sNodeNetworkErrors),
+		metricK8sNodeNetworkIo:                     newMetricK8sNodeNetworkIo(mbc.Metrics.K8sNodeNetworkIo),
+		metricK8sNodeUptime:                        newMetricK8sNodeUptime(mbc.Metrics.K8sNodeUptime),
+		metricK8sPodCPUTime:                        newMetricK8sPodCPUTime(mbc.Metrics.K8sPodCPUTime),
+		metricK8sPodCPUUtilization:                 newMetricK8sPodCPUUtilization(mbc.Metrics.K8sPodCPUUtilization),
+		metricK8sPodFilesystemAvailable:            newMetricK8sPodFilesystemAvailable(mbc.Metrics.K8sPodFilesystemAvailable),
+		metricK8sPodFilesystemCapacity:             newMetricK8sPodFilesystemCapacity(mbc.Metrics.K8sPodFilesystemCapacity),
+		metricK8sPodFilesystemUsage:                newMetricK8sPodFilesystemUsage(mbc.Metrics.K8sPodFilesystemUsage),
+		metricK8sPodMemoryAvailable:                newMetricK8sPodMemoryAvailable(mbc.Metrics.K8sPodMemoryAvailable),
+		metricK8sPodMemoryMajorPageFaults:          newMetricK8sPodMemoryMajorPageFaults(mbc.Metrics.K8sPodMemoryMajorPageFaults),
+		metricK8sPodMemoryPageFaults:               newMetricK8sPodMemoryPageFaults(mbc.Metrics.K8sPodMemoryPageFaults),
+		metricK8sPodMemoryRss:                      newMetricK8sPodMemoryRss(mbc.Metrics.K8sPodMemoryRss),
+		metricK8sPodMemoryUsage:                    newMetricK8sPodMemoryUsage(mbc.Metrics.K8sPodMemoryUsage),
+		metricK8sPodMemoryWorkingSet:               newMetricK8sPodMemoryWorkingSet(mbc.Metrics.K8sPodMemoryWorkingSet),
+		metricK8sPodMemoryLimitUtilization:         newMetricK8sPodMemoryLimitUtilization(mbc.Metrics.K8sPodMemoryLimitUtilization),
+		metricK8sPodMemoryRequestUtilization:       newMetricK8sPodMemoryRequestUtilization(mbc.Metrics.K8sPodMemoryRequestUtilization),
+		metricK8sPodNetworkErrors:                  newMetricK8sPodNetworkErrors(mbc.Metrics.K8sPodNetworkErrors),
+		metricK8sPodNetworkIo:                      newMetricK8sPodNetworkIo(mbc.Metrics.K8sPodNetworkIo),
+		metricK8sPodUptime:                         newMetricK8sPodUptime(mbc.Metrics.K8sPodUptime),
+		metricK8sVolumeAvailable:                   newMetricK8sVolumeAvailable(mbc.Metrics.K8sVolumeAvailable),
+		metricK8sVolumeCapacity:                    newMetricK8sVolumeCapacity(mbc.Metrics.K8sVolumeCapacity),
+		metricK8sVolumeInodes:                      newMetricK8sVolumeInodes(mbc.Metrics.K8sVolumeInodes),
+		metricK8sVolumeInodesFree:                  newMetricK8sVolumeInodesFree(mbc.Metrics.K8sVolumeInodesFree),
+		metricK8sVolumeInodesUsed:                  newMetricK8sVolumeInodesUsed(mbc.Metrics.K8sVolumeInodesUsed),
 	}
 	for _, op := range options {
 		op(mb)
@@ -2463,6 +2667,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
 	mb.metricContainerMemoryUsage.emit(ils.Metrics())
 	mb.metricContainerMemoryWorkingSet.emit(ils.Metrics())
 	mb.metricContainerUptime.emit(ils.Metrics())
+	mb.metricK8sContainerMemoryLimitUtilization.emit(ils.Metrics())
+	mb.metricK8sContainerMemoryRequestUtilization.emit(ils.Metrics())
 	mb.metricK8sNodeCPUTime.emit(ils.Metrics())
 	mb.metricK8sNodeCPUUtilization.emit(ils.Metrics())
 	mb.metricK8sNodeFilesystemAvailable.emit(ils.Metrics())
@@ -2488,6 +2694,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
 	mb.metricK8sPodMemoryRss.emit(ils.Metrics())
 	mb.metricK8sPodMemoryUsage.emit(ils.Metrics())
 	mb.metricK8sPodMemoryWorkingSet.emit(ils.Metrics())
+	mb.metricK8sPodMemoryLimitUtilization.emit(ils.Metrics())
+	mb.metricK8sPodMemoryRequestUtilization.emit(ils.Metrics())
 	mb.metricK8sPodNetworkErrors.emit(ils.Metrics())
 	mb.metricK8sPodNetworkIo.emit(ils.Metrics())
 	mb.metricK8sPodUptime.emit(ils.Metrics())
@@ -2576,6 +2784,16 @@ func (mb *MetricsBuilder) RecordContainerUptimeDataPoint(ts pcommon.Timestamp, v
 	mb.metricContainerUptime.recordDataPoint(mb.startTime, ts, val)
 }
 
+// RecordK8sContainerMemoryLimitUtilizationDataPoint adds a data point to k8s.container.memory_limit_utilization metric.
+func (mb *MetricsBuilder) RecordK8sContainerMemoryLimitUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+	mb.metricK8sContainerMemoryLimitUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sContainerMemoryRequestUtilizationDataPoint adds a data point to k8s.container.memory_request_utilization metric.
+func (mb *MetricsBuilder) RecordK8sContainerMemoryRequestUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+	mb.metricK8sContainerMemoryRequestUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
 // RecordK8sNodeCPUTimeDataPoint adds a data point to k8s.node.cpu.time metric.
 func (mb *MetricsBuilder) RecordK8sNodeCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
 	mb.metricK8sNodeCPUTime.recordDataPoint(mb.startTime, ts, val)
@@ -2701,6 +2919,16 @@ func (mb *MetricsBuilder) RecordK8sPodMemoryWorkingSetDataPoint(ts pcommon.Times
 	mb.metricK8sPodMemoryWorkingSet.recordDataPoint(mb.startTime, ts, val)
 }
 
+// RecordK8sPodMemoryLimitUtilizationDataPoint adds a data point to k8s.pod.memory_limit_utilization metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryLimitUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+	mb.metricK8sPodMemoryLimitUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordK8sPodMemoryRequestUtilizationDataPoint adds a data point to k8s.pod.memory_request_utilization metric.
+func (mb *MetricsBuilder) RecordK8sPodMemoryRequestUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
+	mb.metricK8sPodMemoryRequestUtilization.recordDataPoint(mb.startTime, ts, val)
+}
+
 // RecordK8sPodNetworkErrorsDataPoint adds a data point to k8s.pod.network.errors metric.
 func (mb *MetricsBuilder) RecordK8sPodNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string, directionAttributeValue AttributeDirection) {
 	mb.metricK8sPodNetworkErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue, directionAttributeValue.String())
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go
index 9dc39efa019b9..11cb737ec6d2f 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go
@@ -101,6 +101,12 @@ func TestMetricsBuilder(t *testing.T) {
 			allMetricsCount++
 			mb.RecordContainerUptimeDataPoint(ts, 1)
 
+			allMetricsCount++
+			mb.RecordK8sContainerMemoryLimitUtilizationDataPoint(ts, 1)
+
+			allMetricsCount++
+			mb.RecordK8sContainerMemoryRequestUtilizationDataPoint(ts, 1)
+
 			defaultMetricsCount++
 			allMetricsCount++
 			mb.RecordK8sNodeCPUTimeDataPoint(ts, 1)
@@ -200,6 +206,12 @@ func TestMetricsBuilder(t *testing.T) {
 			allMetricsCount++
 			mb.RecordK8sPodMemoryWorkingSetDataPoint(ts, 1)
 
+			allMetricsCount++
+			mb.RecordK8sPodMemoryLimitUtilizationDataPoint(ts, 1)
+
+			allMetricsCount++
+			mb.RecordK8sPodMemoryRequestUtilizationDataPoint(ts, 1)
+
 			defaultMetricsCount++
 			allMetricsCount++
 			mb.RecordK8sPodNetworkErrorsDataPoint(ts, 1, "interface-val", AttributeDirectionReceive)
@@ -417,6 +429,30 @@ func TestMetricsBuilder(t *testing.T) {
 					assert.Equal(t, ts, dp.Timestamp())
 					assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
 					assert.Equal(t, int64(1), dp.IntValue())
+				case "k8s.container.memory_limit_utilization":
+					assert.False(t, validatedMetrics["k8s.container.memory_limit_utilization"], "Found a duplicate in the metrics slice: k8s.container.memory_limit_utilization")
+					validatedMetrics["k8s.container.memory_limit_utilization"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Container memory utilization as a ratio of the container's limits", ms.At(i).Description())
+					assert.Equal(t, "1", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.Equal(t, float64(1), dp.DoubleValue())
+				case "k8s.container.memory_request_utilization":
+					assert.False(t, validatedMetrics["k8s.container.memory_request_utilization"], "Found a duplicate in the metrics slice: k8s.container.memory_request_utilization")
+					validatedMetrics["k8s.container.memory_request_utilization"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Container memory utilization as a ratio of the container's requests", ms.At(i).Description())
+					assert.Equal(t, "1", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.Equal(t, float64(1), dp.DoubleValue())
 				case "k8s.node.cpu.time":
 					assert.False(t, validatedMetrics["k8s.node.cpu.time"], "Found a duplicate in the metrics slice: k8s.node.cpu.time")
 					validatedMetrics["k8s.node.cpu.time"] = true
@@ -739,6 +775,30 @@ func TestMetricsBuilder(t *testing.T) {
 					assert.Equal(t, ts, dp.Timestamp())
 					assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType())
 					assert.Equal(t, int64(1), dp.IntValue())
+				case "k8s.pod.memory_limit_utilization":
+					assert.False(t, validatedMetrics["k8s.pod.memory_limit_utilization"], "Found a duplicate in the metrics slice: k8s.pod.memory_limit_utilization")
+					validatedMetrics["k8s.pod.memory_limit_utilization"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted.", ms.At(i).Description())
+					assert.Equal(t, "1", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.Equal(t, float64(1), dp.DoubleValue())
+				case "k8s.pod.memory_request_utilization":
+					assert.False(t, validatedMetrics["k8s.pod.memory_request_utilization"], "Found a duplicate in the metrics slice: k8s.pod.memory_request_utilization")
+					validatedMetrics["k8s.pod.memory_request_utilization"] = true
+					assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type())
+					assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len())
+					assert.Equal(t, "Pod memory utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted.", ms.At(i).Description())
+					assert.Equal(t, "1", ms.At(i).Unit())
+					dp := ms.At(i).Gauge().DataPoints().At(0)
+					assert.Equal(t, start, dp.StartTimestamp())
+					assert.Equal(t, ts, dp.Timestamp())
+					assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType())
+					assert.Equal(t, float64(1), dp.DoubleValue())
 				case "k8s.pod.network.errors":
 					assert.False(t, validatedMetrics["k8s.pod.network.errors"], "Found a duplicate in the metrics slice: k8s.pod.network.errors")
 					validatedMetrics["k8s.pod.network.errors"] = true
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
index 838a53efd3ba2..196accd9411d8 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go
@@ -19,8 +19,9 @@ type MetricsBuilders struct {
 }
 
 type CPUMetrics struct {
-	Time        RecordDoubleDataPointFunc
-	Utilization RecordDoubleDataPointFunc
+	Time         RecordDoubleDataPointFunc
+	Utilization  RecordDoubleDataPointFunc
+	UsagePercent RecordDoubleDataPointFunc
 }
 
 var NodeCPUMetrics = CPUMetrics{
@@ -39,12 +40,14 @@ var ContainerCPUMetrics = CPUMetrics{
 }
 
 type MemoryMetrics struct {
-	Available       RecordIntDataPointFunc
-	Usage           RecordIntDataPointFunc
-	Rss             RecordIntDataPointFunc
-	WorkingSet      RecordIntDataPointFunc
-	PageFaults      RecordIntDataPointFunc
-	MajorPageFaults RecordIntDataPointFunc
+	Available          RecordIntDataPointFunc
+	Usage              RecordIntDataPointFunc
+	LimitUtilization   RecordDoubleDataPointFunc
+	RequestUtilization RecordDoubleDataPointFunc
+	Rss                RecordIntDataPointFunc
+	WorkingSet         RecordIntDataPointFunc
+	PageFaults         RecordIntDataPointFunc
+	MajorPageFaults    RecordIntDataPointFunc
 }
 
 var NodeMemoryMetrics = MemoryMetrics{
@@ -57,21 +60,25 @@ var NodeMemoryMetrics = MemoryMetrics{
 }
 
 var PodMemoryMetrics = MemoryMetrics{
-	Available:       (*MetricsBuilder).RecordK8sPodMemoryAvailableDataPoint,
-	Usage:           (*MetricsBuilder).RecordK8sPodMemoryUsageDataPoint,
-	Rss:             (*MetricsBuilder).RecordK8sPodMemoryRssDataPoint,
-	WorkingSet:      (*MetricsBuilder).RecordK8sPodMemoryWorkingSetDataPoint,
-	PageFaults:      (*MetricsBuilder).RecordK8sPodMemoryPageFaultsDataPoint,
-	MajorPageFaults: (*MetricsBuilder).RecordK8sPodMemoryMajorPageFaultsDataPoint,
+	Available:          (*MetricsBuilder).RecordK8sPodMemoryAvailableDataPoint,
+	Usage:              (*MetricsBuilder).RecordK8sPodMemoryUsageDataPoint,
+	LimitUtilization:   (*MetricsBuilder).RecordK8sPodMemoryLimitUtilizationDataPoint,
+	RequestUtilization: (*MetricsBuilder).RecordK8sPodMemoryRequestUtilizationDataPoint,
+	Rss:                (*MetricsBuilder).RecordK8sPodMemoryRssDataPoint,
+	WorkingSet:         (*MetricsBuilder).RecordK8sPodMemoryWorkingSetDataPoint,
+	PageFaults:         (*MetricsBuilder).RecordK8sPodMemoryPageFaultsDataPoint,
+	MajorPageFaults:    (*MetricsBuilder).RecordK8sPodMemoryMajorPageFaultsDataPoint,
 }
 
 var ContainerMemoryMetrics = MemoryMetrics{
-	Available:       (*MetricsBuilder).RecordContainerMemoryAvailableDataPoint,
-	Usage:           (*MetricsBuilder).RecordContainerMemoryUsageDataPoint,
-	Rss:             (*MetricsBuilder).RecordContainerMemoryRssDataPoint,
-	WorkingSet:      (*MetricsBuilder).RecordContainerMemoryWorkingSetDataPoint,
-	PageFaults:      (*MetricsBuilder).RecordContainerMemoryPageFaultsDataPoint,
-	MajorPageFaults: (*MetricsBuilder).RecordContainerMemoryMajorPageFaultsDataPoint,
+	Available:          (*MetricsBuilder).RecordContainerMemoryAvailableDataPoint,
+	Usage:              (*MetricsBuilder).RecordContainerMemoryUsageDataPoint,
+	LimitUtilization:   (*MetricsBuilder).RecordK8sContainerMemoryLimitUtilizationDataPoint,
+	RequestUtilization: (*MetricsBuilder).RecordK8sContainerMemoryRequestUtilizationDataPoint,
+	Rss:                (*MetricsBuilder).RecordContainerMemoryRssDataPoint,
+	WorkingSet:         (*MetricsBuilder).RecordContainerMemoryWorkingSetDataPoint,
+	PageFaults:         (*MetricsBuilder).RecordContainerMemoryPageFaultsDataPoint,
+	MajorPageFaults:    (*MetricsBuilder).RecordContainerMemoryMajorPageFaultsDataPoint,
 }
 
 type FilesystemMetrics struct {
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml
index e79fe3db7171c..35c5011d0d9b1 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml
+++ b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml
@@ -25,6 +25,10 @@ all_set:
       enabled: true
     container.uptime:
       enabled: true
+    k8s.container.memory_limit_utilization:
+      enabled: true
+    k8s.container.memory_request_utilization:
+      enabled: true
     k8s.node.cpu.time:
       enabled: true
     k8s.node.cpu.utilization:
@@ -75,6 +79,10 @@ all_set:
       enabled: true
     k8s.pod.memory.working_set:
       enabled: true
+    k8s.pod.memory_limit_utilization:
+      enabled: true
+    k8s.pod.memory_request_utilization:
+      enabled: true
     k8s.pod.network.errors:
       enabled: true
     k8s.pod.network.io:
@@ -148,6 +156,10 @@ none_set:
       enabled: false
     container.uptime:
       enabled: false
+    k8s.container.memory_limit_utilization:
+      enabled: false
+    k8s.container.memory_request_utilization:
+      enabled: false
     k8s.node.cpu.time:
       enabled: false
     k8s.node.cpu.utilization:
@@ -198,6 +210,10 @@ none_set:
       enabled: false
     k8s.pod.memory.working_set:
       enabled: false
+    k8s.pod.memory_limit_utilization:
+      enabled: false
+    k8s.pod.memory_request_utilization:
+      enabled: false
     k8s.pod.network.errors:
       enabled: false
     k8s.pod.network.io:
diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml
index 19169c2d465c8..56fc768934193 100644
--- a/receiver/kubeletstatsreceiver/metadata.yaml
+++ b/receiver/kubeletstatsreceiver/metadata.yaml
@@ -217,6 +217,20 @@ metrics:
     gauge:
       value_type: int
     attributes: []
+  k8s.pod.memory_limit_utilization:
+    enabled: false
+    description: "Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted."
+    unit: 1
+    gauge:
+      value_type: double
+    attributes: [ ]
+  k8s.pod.memory_request_utilization:
+    enabled: false
+    description: "Pod memory utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted."
+    unit: 1
+    gauge:
+      value_type: double
+    attributes: [ ]
   k8s.pod.memory.rss:
     enabled: true
     description: "Pod memory rss"
@@ -323,6 +337,20 @@ metrics:
     gauge:
       value_type: int
     attributes: []
+  k8s.container.memory_limit_utilization:
+    enabled: false
+    description: "Container memory utilization as a ratio of the container's limits"
+    unit: 1
+    gauge:
+      value_type: double
+    attributes: [ ]
+  k8s.container.memory_request_utilization:
+    enabled: false
+    description: "Container memory utilization as a ratio of the container's requests"
+    unit: 1
+    gauge:
+      value_type: double
+    attributes: [ ]
   container.memory.rss:
     enabled: true
     description: "Container memory rss"
diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go
index 1c386ea18d865..580bac72fa005 100644
--- a/receiver/kubeletstatsreceiver/scraper.go
+++ b/receiver/kubeletstatsreceiver/scraper.go
@@ -36,6 +36,7 @@ type kubletScraper struct {
 	k8sAPIClient          kubernetes.Interface
 	cachedVolumeSource    map[string]v1.PersistentVolumeSource
 	mbs                   *metadata.MetricsBuilders
+	needsResources        bool
 }
 
 func newKubletScraper(
@@ -58,6 +59,10 @@ func newKubletScraper(
 			ContainerMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig, set),
 			OtherMetricsBuilder:     metadata.NewMetricsBuilder(metricsConfig, set),
 		},
+		needsResources: metricsConfig.Metrics.K8sPodMemoryLimitUtilization.Enabled ||
+			metricsConfig.Metrics.K8sPodMemoryRequestUtilization.Enabled ||
+			metricsConfig.Metrics.K8sContainerMemoryLimitUtilization.Enabled ||
+			metricsConfig.Metrics.K8sContainerMemoryRequestUtilization.Enabled,
 	}
 	return scraperhelper.NewScraper(metadata.Type, ks.scrape)
 }
@@ -71,7 +76,7 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
 
 	var podsMetadata *v1.PodList
 	// fetch metadata only when extra metadata labels are needed
-	if len(r.extraMetadataLabels) > 0 {
+	if len(r.extraMetadataLabels) > 0 || r.needsResources {
 		podsMetadata, err = r.metadataProvider.Pods()
 		if err != nil {
 			r.logger.Error("call to /pods endpoint failed", zap.Error(err))
@@ -80,6 +85,7 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) {
 	}
 
 	metadata := kubelet.NewMetadata(r.extraMetadataLabels, podsMetadata, r.detailedPVCLabelsSetter())
+
 	mds := kubelet.MetricsData(r.logger, summary, metadata, r.metricGroupsToCollect, r.mbs)
 	md := pmetric.NewMetrics()
 	for i := range mds {
diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go
index d8a9307665f28..06f3c5fc720d8 100644
--- a/receiver/kubeletstatsreceiver/scraper_test.go
+++ b/receiver/kubeletstatsreceiver/scraper_test.go
@@ -10,6 +10,7 @@ import (
 	"strings"
 	"testing"
 
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"go.opentelemetry.io/collector/pdata/pcommon"
 	"go.opentelemetry.io/collector/receiver/receivertest"
@@ -130,6 +131,182 @@ func TestScraperWithMetadata(t *testing.T) {
 	}
 }
 
+func TestScraperWithPercentMetrics(t *testing.T) {
+	options := &scraperOptions{
+		metricGroupsToCollect: map[kubelet.MetricGroup]bool{
+			kubelet.ContainerMetricGroup: true,
+			kubelet.PodMetricGroup:       true,
+		},
+	}
+	metricsConfig := metadata.MetricsBuilderConfig{
+		Metrics: metadata.MetricsConfig{
+			ContainerCPUTime: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerCPUUtilization: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerFilesystemAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerFilesystemCapacity: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerFilesystemUsage: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerMemoryAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerMemoryMajorPageFaults: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerMemoryPageFaults: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerMemoryRss: metadata.MetricConfig{
+				Enabled: false,
+			},
+			ContainerMemoryUsage: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sContainerMemoryLimitUtilization: metadata.MetricConfig{
+				Enabled: true,
+			},
+			K8sContainerMemoryRequestUtilization: metadata.MetricConfig{
+				Enabled: true,
+			},
+			ContainerMemoryWorkingSet: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeCPUTime: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeCPUUtilization: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeFilesystemAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeFilesystemCapacity: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeFilesystemUsage: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeMemoryAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeMemoryMajorPageFaults: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeMemoryPageFaults: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeMemoryRss: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeMemoryUsage: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeMemoryWorkingSet: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeNetworkErrors: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sNodeNetworkIo: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodCPUTime: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodCPUUtilization: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodFilesystemAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodFilesystemCapacity: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodFilesystemUsage: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodMemoryAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodMemoryMajorPageFaults: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodMemoryPageFaults: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodMemoryRss: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodMemoryUsage: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodMemoryLimitUtilization: metadata.MetricConfig{
+				Enabled: true,
+			},
+			K8sPodMemoryRequestUtilization: metadata.MetricConfig{
+				Enabled: true,
+			},
+			K8sPodMemoryWorkingSet: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodNetworkErrors: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sPodNetworkIo: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sVolumeAvailable: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sVolumeCapacity: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sVolumeInodes: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sVolumeInodesFree: metadata.MetricConfig{
+				Enabled: false,
+			},
+			K8sVolumeInodesUsed: metadata.MetricConfig{
+				Enabled: false,
+			},
+		},
+		ResourceAttributes: metadata.DefaultResourceAttributesConfig(),
+	}
+	r, err := newKubletScraper(
+		&fakeRestClient{},
+		receivertest.NewNopCreateSettings(),
+		options,
+		metricsConfig,
+	)
+	require.NoError(t, err)
+
+	md, err := r.Scrape(context.Background())
+	require.NoError(t, err)
+	require.Equal(t, 4, md.DataPointCount())
+
+	assert.Equal(t, "k8s.pod.memory_limit_utilization", md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Name())
+	assert.True(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() <= 1)
+	assert.True(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() >= 0)
+	assert.Equal(t, "k8s.pod.memory_request_utilization", md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Name())
+	assert.True(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Gauge().DataPoints().At(0).DoubleValue() > 1)
+
+	assert.Equal(t, "k8s.container.memory_limit_utilization", md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Name())
+	assert.True(t, md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() <= 1)
+	assert.True(t, md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() >= 0)
+	assert.Equal(t, "k8s.container.memory_request_utilization", md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(1).Name())
+	assert.True(t, md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(1).Gauge().DataPoints().At(0).DoubleValue() > 1)
+
+}
+
 func TestScraperWithMetricGroups(t *testing.T) {
 	tests := []struct {
 		name         string
diff --git a/receiver/kubeletstatsreceiver/testdata/pods.json b/receiver/kubeletstatsreceiver/testdata/pods.json
index 30153050316d8..070f8146b370e 100644
--- a/receiver/kubeletstatsreceiver/testdata/pods.json
+++ b/receiver/kubeletstatsreceiver/testdata/pods.json
@@ -5,6 +5,23 @@
         "name": "kube-scheduler-minikube",
         "uid": "5795d0c442cb997ff93c49feeb9f6386"
       },
+      "spec": {
+        "containers": [
+          {
+            "name":"kube-scheduler",
+            "resources": {
+              "requests": {
+                "cpu": "50m",
+                "memory": "10M"
+              },
+              "limits": {
+                "cpu": "100m",
+                "memory": "100M"
+              }
+            }
+          }
+        ]
+      },
       "status": {
         "containerStatuses": [
           {