diff --git a/CMakeLists.txt b/CMakeLists.txt
index bd9f77f8..d0af5442 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -95,7 +95,7 @@ IF(NOT CMAKE_SYSTEM_PROCESSOR)
       "cpuinfo will compile, but cpuinfo_initialize() will always fail.")
     SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
   ENDIF()
-ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?|armv[5-8].*|aarch64|arm64.*|ARM64.*|riscv(32|64))$")
+ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?|armv[5-8].*|aarch64|arm64.*|ARM64.*|riscv(32|64)|ppc64le)$")
   MESSAGE(WARNING
     "Target processor architecture \"${CPUINFO_TARGET_PROCESSOR}\" is not supported in cpuinfo. "
     "cpuinfo will compile, but cpuinfo_initialize() will always fail.")
@@ -184,6 +184,14 @@ IF(CPUINFO_SUPPORTED_PLATFORM)
     ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
       LIST(APPEND CPUINFO_SRCS src/x86/freebsd/init.c)
     ENDIF()
+  ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le)$" )
+    LIST(APPEND CPUINFO_SRCS
+      src/powerpc/uarch.c
+      src/powerpc/cache.c
+      src/powerpc/linux/cpuinfo.c
+      src/powerpc/linux/ppc64-hw.c
+      src/powerpc/linux/init.c
+      src/powerpc/linux/ppc64-isa.c)
   ELSEIF(CMAKE_SYSTEM_NAME MATCHES "^Windows" AND CPUINFO_TARGET_PROCESSOR MATCHES "^(ARM64|arm64)$")
     LIST(APPEND CPUINFO_SRCS
       src/arm/windows/init-by-logical-sys-info.c
@@ -856,6 +864,14 @@ IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_UNIT_TESTS)
     TARGET_LINK_LIBRARIES(cache-test PRIVATE cpuinfo_internals gtest gtest_main)
     ADD_TEST(NAME cache-test COMMAND cache-test)
   ENDIF()
+
+  IF(CMAKE_SYSTEM_NAME STREQUAL "POWER" OR CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le)$")
+    ADD_EXECUTABLE(power-features-test test/name/power-features.cc)
+    CPUINFO_TARGET_ENABLE_CXX11(power-features-test)
+    CPUINFO_TARGET_RUNTIME_LIBRARY(power-features-test)
+    TARGET_LINK_LIBRARIES(power-features-test PRIVATE cpuinfo_internals gtest gtest_main)
+    ADD_TEST(NAME power-features-test COMMAND power-features-test)
+  ENDIF()
 ENDIF()
 
 # ---[ Helper and debug tools
diff --git a/configure.py b/configure.py
index 00bba24b..e8c04409 100755
--- a/configure.py
+++ b/configure.py
@@ -75,6 +75,16 @@ def main(args):
             if options.mock:
                 sources += ["linux/mockfile.c"]
         build.static_library("cpuinfo", map(build.cc, sources))
+        if build.target.is_ppc64:
+            sources += [ "powerpc/uarch.c", "powerpc/cache.c"]
+            if build.target.is_linux:
+                sources += [
+                    "powerpc/linux/cpuinfo.c"
+                    "powerpc/linux/ppc64-hw.c",
+                    "powerpc/linux/init.c",
+                    "powerpc/linux/ppc64-isa.c",
+                    ]
+
 
     with build.options(source_dir="tools", deps=[build, build.deps.clog]):
         build.executable("cpu-info", build.cc("cpu-info.c"))
@@ -91,6 +101,8 @@ def main(args):
             build.smoketest("get-current-test", build.cxx("get-current.cc"))
         if build.target.is_x86_64:
             build.smoketest("brand-string-test", build.cxx("name/brand-string.cc"))
+        if build.target.is_ppc64:
+            build.smoketest("power-features-test", build.cxx("name/power-features.cc"))
     if options.mock:
         with build.options(source_dir="test", include_dirs="test", macros="CPUINFO_MOCK", deps=[build, build.deps.googletest]):
             if build.target.is_arm64 and build.target.is_linux:
diff --git a/include/cpuinfo.h b/include/cpuinfo.h
index 9ed5d924..8e5decc1 100644
--- a/include/cpuinfo.h
+++ b/include/cpuinfo.h
@@ -601,6 +601,21 @@ enum cpuinfo_uarch {
 
 	/** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */
 	cpuinfo_uarch_taishan_v110 = 0x00C00100,
+
+	/** IBM POWER 7. */
+	cpuinfo_uarch_power7 = 0x00D00100,
+	/** IBM POWER 7p. */
+	cpuinfo_uarch_power7p = 0x00D00101,
+	/** IBM POWER 8. */
+	cpuinfo_uarch_power8 = 0x00D00200,
+	/** IBM POWER8E. */
+	cpuinfo_uarch_power8e = 0x00D00201,
+	/** IBM POWER8NVL */
+	cpuinfo_uarch_power8nvl = 0x00D00202,
+	/** IBM POWER 9. */
+	cpuinfo_uarch_power9 = 0x00D00303,
+	/** IBM POWER 10. */
+	cpuinfo_uarch_power10 = 0x00D00400,
 };
 
 struct cpuinfo_processor {
@@ -671,9 +686,14 @@ struct cpuinfo_core {
 #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
 	/** Value of Main ID Register (MIDR) for this core */
 	uint32_t midr;
+#elif CPUINFO_ARCH_PPC64
+	/** Value of Processor Version Register for this core */
+	uint32_t pvr;
 #endif
 	/** Clock rate (non-Turbo) of the core, in Hz */
 	uint64_t frequency;
+
+	bool disabled;
 };
 
 struct cpuinfo_cluster {
@@ -699,6 +719,9 @@ struct cpuinfo_cluster {
 #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
 	/** Value of Main ID Register (MIDR) of the cores in the cluster */
 	uint32_t midr;
+#elif CPUINFO_ARCH_PPC64
+	/** Value of Processor Version Register in this cluster */
+	uint32_t pvr;
 #endif
 	/** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
 	uint64_t frequency;
@@ -732,6 +755,9 @@ struct cpuinfo_uarch_info {
 #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
 	/** Value of Main ID Register (MIDR) for the microarchitecture */
 	uint32_t midr;
+#elif CPUINFO_ARCH_PPC64
+	/** Value of Processor Version Register for this core */
+	uint32_t pvr;
 #endif
 	/** Number of logical processors with the microarchitecture */
 	uint32_t processor_count;
@@ -2218,6 +2244,40 @@ static inline bool cpuinfo_has_riscv_v(void) {
 #endif
 }
 
+#if CPUINFO_ARCH_PPC64
+struct cpuinfo_powerpc_isa {
+	bool vsx;
+	bool htm;
+	bool mma;
+};
+
+extern struct cpuinfo_powerpc_isa cpuinfo_isa;
+#endif
+
+static inline bool cpuinfo_has_powerpc_vsx(void) {
+#if CPUINFO_ARCH_PPC64
+	return cpuinfo_isa.vsx;
+#else
+	return false;
+#endif
+}
+
+static inline bool cpuinfo_has_powerpc_htm(void) {
+#if CPUINFO_ARCH_PPC64
+	return cpuinfo_isa.htm;
+#else
+	return false;
+#endif
+}
+
+static inline bool cpuinfo_has_powerpc_mma(void) {
+#if CPUINFO_ARCH_PPC64
+	return cpuinfo_isa.mma;
+#else
+	return false;
+#endif
+}
+
 const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
 const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
 const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
diff --git a/src/api.c b/src/api.c
index b8c999f3..e36dc342 100644
--- a/src/api.c
+++ b/src/api.c
@@ -30,7 +30,7 @@ uint32_t cpuinfo_packages_count = 0;
 uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max] = {0};
 uint32_t cpuinfo_max_cache_size = 0;
 
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 struct cpuinfo_uarch_info* cpuinfo_uarchs = NULL;
 uint32_t cpuinfo_uarchs_count = 0;
 #else
@@ -41,7 +41,7 @@ struct cpuinfo_uarch_info cpuinfo_global_uarch = {cpuinfo_uarch_unknown};
 uint32_t cpuinfo_linux_cpu_max = 0;
 const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map = NULL;
 const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map = NULL;
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map = NULL;
 #endif
 #endif
@@ -78,7 +78,7 @@ const struct cpuinfo_uarch_info* cpuinfo_get_uarchs() {
 	if (!cpuinfo_is_initialized) {
 		cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarchs");
 	}
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 	return cpuinfo_uarchs;
 #else
 	return &cpuinfo_global_uarch;
@@ -129,7 +129,7 @@ const struct cpuinfo_uarch_info* cpuinfo_get_uarch(uint32_t index) {
 	if (!cpuinfo_is_initialized) {
 		cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarch");
 	}
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 	if CPUINFO_UNLIKELY (index >= cpuinfo_uarchs_count) {
 		return NULL;
 	}
@@ -174,7 +174,7 @@ uint32_t cpuinfo_get_uarchs_count(void) {
 	if (!cpuinfo_is_initialized) {
 		cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarchs_count");
 	}
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 	return cpuinfo_uarchs_count;
 #else
 	return 1;
@@ -350,7 +350,7 @@ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void) {
 	if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
 		cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_uarch_index");
 	}
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 #ifdef __linux__
 	if (cpuinfo_linux_cpu_to_uarch_index_map == NULL) {
 		/* Special case: avoid syscall on systems with only a single
@@ -385,7 +385,7 @@ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t defau
 		cpuinfo_log_fatal(
 			"cpuinfo_get_%s called before cpuinfo is initialized", "current_uarch_index_with_default");
 	}
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 #ifdef __linux__
 	if (cpuinfo_linux_cpu_to_uarch_index_map == NULL) {
 		/* Special case: avoid syscall on systems with only a single
diff --git a/src/cpuinfo/internal-api.h b/src/cpuinfo/internal-api.h
index d84b26a8..9edd1120 100644
--- a/src/cpuinfo/internal-api.h
+++ b/src/cpuinfo/internal-api.h
@@ -34,7 +34,7 @@ extern CPUINFO_INTERNAL uint32_t cpuinfo_packages_count;
 extern CPUINFO_INTERNAL uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max];
 extern CPUINFO_INTERNAL uint32_t cpuinfo_max_cache_size;
 
-#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64 || CPUINFO_ARCH_PPC64
 extern CPUINFO_INTERNAL struct cpuinfo_uarch_info* cpuinfo_uarchs;
 extern CPUINFO_INTERNAL uint32_t cpuinfo_uarchs_count;
 #else
@@ -61,6 +61,7 @@ CPUINFO_PRIVATE void cpuinfo_arm_mach_init(void);
 CPUINFO_PRIVATE void cpuinfo_arm_linux_init(void);
 CPUINFO_PRIVATE void cpuinfo_riscv_linux_init(void);
 CPUINFO_PRIVATE void cpuinfo_emscripten_init(void);
+CPUINFO_PRIVATE void cpuinfo_powerpc_linux_init(void);
 
 CPUINFO_PRIVATE uint32_t cpuinfo_compute_max_cache_size(const struct cpuinfo_processor* processor);
 
diff --git a/src/init.c b/src/init.c
index 81d5721c..1fdeefcf 100644
--- a/src/init.c
+++ b/src/init.c
@@ -33,6 +33,12 @@ bool CPUINFO_ABI cpuinfo_initialize(void) {
 #else
 	cpuinfo_log_error("operating system is not supported in cpuinfo");
 #endif
+#elif CPUINFO_ARCH_PPC64
+#if defined(__linux__)
+	pthread_once(&init_guard, &cpuinfo_powerpc_linux_init);
+#else
+	cpuinfo_log_error("operating system is not supported in cpuinfo");
+#endif
 #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
 #if defined(__linux__)
 	pthread_once(&init_guard, &cpuinfo_arm_linux_init);
diff --git a/src/linux/api.h b/src/linux/api.h
index 0966bd57..e1ff6a6f 100644
--- a/src/linux/api.h
+++ b/src/linux/api.h
@@ -51,6 +51,7 @@ CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_package_id(
 	uint32_t processor,
 	uint32_t package_id[restrict static 1]);
 CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_core_id(uint32_t processor, uint32_t core_id[restrict static 1]);
+CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_online_status(uint32_t processor, uint32_t* online_status);
 
 CPUINFO_INTERNAL bool cpuinfo_linux_detect_possible_processors(
 	uint32_t max_processors_count,
diff --git a/src/linux/processors.c b/src/linux/processors.c
index b68cd1cc..114ada79 100644
--- a/src/linux/processors.c
+++ b/src/linux/processors.c
@@ -33,6 +33,9 @@
 #define CORE_ID_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id"
 #define CORE_ID_FILESIZE 32
 
+#define PROCESSOR_ONLINE_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/online"))
+#define PROCESSOR_ONLINE_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/online"
+#define PROCESSOR_ONLINE_FILESIZE 32
 #define CORE_CPUS_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/core_cpus_list"))
 #define CORE_CPUS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_cpus_list"
 #define CORE_SIBLINGS_FILENAME_SIZE \
@@ -280,6 +283,33 @@ bool cpuinfo_linux_get_processor_package_id(uint32_t processor, uint32_t package
 	}
 }
 
+bool cpuinfo_linux_get_processor_online_status(uint32_t processor, uint32_t* online_status_ptr) {
+	char processor_online_filename[PROCESSOR_ONLINE_FILENAME_SIZE];
+	const int chars_formatted = snprintf(
+		processor_online_filename, PROCESSOR_ONLINE_FILENAME_SIZE, PROCESSOR_ONLINE_FILENAME_FORMAT, processor);
+	if ((unsigned int)chars_formatted >= PROCESSOR_ONLINE_FILENAME_SIZE) {
+		cpuinfo_log_warning("failed to format filename for online status of processor %" PRIu32, processor);
+		return 0;
+	}
+	uint32_t online_status;
+	if (cpuinfo_linux_parse_small_file(
+		    processor_online_filename, PROCESSOR_ONLINE_FILESIZE, uint32_parser, &online_status)) {
+		cpuinfo_log_debug(
+			"parsed online status value of %" PRIu32 " for logical processor %" PRIu32 " from %s",
+			online_status,
+			processor,
+			processor_online_filename);
+		*online_status_ptr = online_status;
+		return true;
+	} else {
+		cpuinfo_log_info(
+			"failed to parse online status for processor %" PRIu32 " from %s",
+			processor,
+			processor_online_filename);
+		return false;
+	}
+}
+
 static bool max_processor_number_parser(uint32_t processor_list_start, uint32_t processor_list_end, void* context) {
 	uint32_t* processor_number_ptr = (uint32_t*)context;
 	const uint32_t processor_list_last = processor_list_end - 1;
diff --git a/src/powerpc/api.h b/src/powerpc/api.h
new file mode 100644
index 00000000..34ab330b
--- /dev/null
+++ b/src/powerpc/api.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <cpuinfo.h>
+
+enum cpuinfo_powerpc_chipset_vendor { cpuinfo_powerpc_chipset_vendor_unknown = 0, cpuinfo_powerpc_chipset_vendor_ibm };
+void cpuinfo_powerpc_decode_vendor_uarch(
+	uint32_t vendor_id,
+	enum cpuinfo_vendor vendor[restrict static 1],
+	enum cpuinfo_uarch uarch[restrict static 1]);
+
+void cpuinfo_powerpc_decode_cache(
+	struct cpuinfo_cache l1i[restrict static 1],
+	struct cpuinfo_cache l1d[restrict static 1],
+	struct cpuinfo_cache l2[restrict static 1],
+	struct cpuinfo_cache l3[restrict static 1]);
diff --git a/src/powerpc/cache.c b/src/powerpc/cache.c
new file mode 100644
index 00000000..ace751a4
--- /dev/null
+++ b/src/powerpc/cache.c
@@ -0,0 +1,123 @@
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo/log.h>
+#include <powerpc/api.h>
+#include <sys/auxv.h>
+
+#define NUM_CACHE 4
+#define BUF_SIZE 128
+
+int path_exist(const char* path) {
+	return (access(path, F_OK) == 0);
+}
+
+void read_str(char* result, size_t len, const char* path) {
+	FILE* f;
+
+	f = fopen(path, "r");
+
+	fgets(result, len, f);
+	fclose(f);
+
+	len = strlen(result);
+	if (result[len - 1] == '\n')
+		result[len - 1] = '\0';
+}
+
+void decode_cache_features(uint32_t cache_features[], unsigned long geometry) {
+	cache_features[1] = (geometry >> 16) & 0xffff;
+	cache_features[2] = geometry & 0xffff;
+	/* If associativity = 65535 it means upper limit of 16 bit of AT_L%d_CACHEGEOMETRY has reached so we calculate
+	 * it from, size, sets and line_size*/
+	if (cache_features[1] == 65535)
+		cache_features[1] = cache_features[0] / (cache_features[2] * cache_features[3]);
+}
+
+void cpuinfo_powerpc_decode_cache(
+	struct cpuinfo_cache l1i[restrict static 1],
+	struct cpuinfo_cache l1d[restrict static 1],
+	struct cpuinfo_cache l2[restrict static 1],
+	struct cpuinfo_cache l3[restrict static 1]) {
+	char buf[BUF_SIZE];
+	char result[BUF_SIZE];
+	uint32_t cache_features[NUM_CACHE];
+	uint32_t i;
+	uint32_t size = 0, sets = 0;
+	size_t len;
+
+	for (i = 0; i < NUM_CACHE; i++) {
+		sprintf(buf, "/sys/devices/system/cpu/cpu%d/cache/index%d/number_of_sets", i);
+		if (path_exist(buf))
+			read_str(result, sizeof(result), buf);
+
+		sets = atoi(result);
+		cache_features[3] = sets;
+
+		unsigned long geometry;
+
+		switch (i) {
+			case 0:
+				geometry = getauxval(AT_L1D_CACHEGEOMETRY);
+				size = getauxval(AT_L1D_CACHESIZE);
+				cache_features[0] = size;
+				decode_cache_features(cache_features, geometry);
+				sets = size / (cache_features[1] * cache_features[2]);
+				*l1d = (struct cpuinfo_cache){
+					.size = size,
+					.associativity = cache_features[1],
+					.line_size = cache_features[2],
+					.sets = sets,
+					.partitions = 1
+
+				};
+				break;
+			case 1:
+				geometry = getauxval(AT_L1I_CACHEGEOMETRY);
+				size = getauxval(AT_L1I_CACHESIZE);
+				cache_features[0] = size;
+				decode_cache_features(cache_features, geometry);
+				sets = size / (cache_features[1] * cache_features[2]);
+				*l1i = (struct cpuinfo_cache){
+					.size = size,
+					.associativity = cache_features[1],
+					.line_size = cache_features[2],
+					.sets = sets,
+					.partitions = 1};
+				break;
+			case 2:
+				geometry = getauxval(AT_L2_CACHEGEOMETRY);
+				size = getauxval(AT_L2_CACHESIZE);
+				cache_features[0] = size;
+				decode_cache_features(cache_features, geometry);
+				sets = size / (cache_features[1] * cache_features[2]);
+				*l2 = (struct cpuinfo_cache){
+					.size = size,
+					.associativity = cache_features[1],
+					.line_size = cache_features[2],
+					.sets = sets,
+					.partitions = 1};
+				break;
+			case 3:
+				geometry = getauxval(AT_L3_CACHEGEOMETRY);
+				size = getauxval(AT_L3_CACHESIZE);
+				cache_features[0] = size;
+				decode_cache_features(cache_features, geometry);
+				sets = size / (cache_features[1] * cache_features[2]);
+				*l3 = (struct cpuinfo_cache){
+					.size = size,
+					.associativity = cache_features[1],
+					.line_size = cache_features[2],
+					.sets = sets,
+					.partitions = 1};
+				break;
+		}
+	}
+}
diff --git a/src/powerpc/linux/api.h b/src/powerpc/linux/api.h
new file mode 100644
index 00000000..a03aaa73
--- /dev/null
+++ b/src/powerpc/linux/api.h
@@ -0,0 +1,112 @@
+
+
+#pragma once
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo/common.h>
+#include <linux/api.h>
+#include <powerpc/api.h>
+
+/* No hard limit in the kernel, maximum length observed on non-rogue kernels is 64 */
+#define CPUINFO_HARDWARE_VALUE_MAX 64
+
+/* from /arch/powerpc/kernel/cputable.c */
+#define CPUINFO_POWERPC_LINUX_FEATURE_64 UINT32_C(0x40000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_ALTIVEC UINT32_C(0x10000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_FPU UINT32_C(0x08000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_MMU UINT32_C(0x04000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_NO_TB UINT32_C(0x00100000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_SMT UINT32_C(0x00004000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_DFP UINT32_C(0x00000400)
+#define CPUINFO_POWERPC_LINUX_FEATURE_POWER6_EXT UINT32_C(0x00000200)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_VSX UINT32_C(0x00000080)
+#define CPUINFO_POWERPC_LINUX_FEATURE_TRUE_LE UINT32_C(0x00000002)
+#define CPUINFO_POWERPC_LINUX_FEATURE_PPC_LE                    UINT32_C(0x00000001
+#define CPUINFO_POWERPC_LINUX_FEATURE_ARCH_2_07 UINT32_C(0x80000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HTM UINT32_C(0x40000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_DSCR UINT32_C(0x20000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_EBB UINT32_C(0x10000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_ISEL UINT32_C(0x08000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_TAR UINT32_C(0x04000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_VEC_CRYPTO UINT32_C(0x02000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HTM_NOSC UINT32_C(0x01000000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_ARCH_3_00 UINT32_C(0x00800000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_IEEE128 UINT32_C(0x00400000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_DARN UINT32_C(0x00200000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_SCV UINT32_C(0x00100000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HTM_NO_SUSPEND UINT32_C(0x00080000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_ARCH_3_1 UINT32_C(0x00040000)
+#define CPUINFO_POWERPC_LINUX_FEATURE_HAS_MMA UINT32_C(0x00020000)
+
+#define CPUINFO_POWERPC_LINUX_VALID_ARCHITECTURE UINT32_C(0x00010000)
+#define CPUINFO_POWERPC_LINUX_VALID_IMPLEMENTER UINT32_C(0x00020000)
+#define CPUINFO_POWERPC_LINUX_VALID_REVISION UINT32_C(0x00040000)
+#define CPUINFO_POWERPC_LINUX_VALID_PROCESSOR UINT32_C(0x00080000)
+#define CPUINFO_POWERPC_LINUX_VALID_VERSION UINT32_C(0x00100000)
+#define CPUINFO_POWERPC_LINUX_VALID_FEATURES UINT32_C(0x00200000)
+#define CPUINFO_POWERPC_LINUX_VALID_PVR UINT32_C(0x001F0000)
+
+/**
+ * Definition of a powerpc64 Linux processor. It is composed of the base processor
+ * definition in "include/cpuinfo.h" and flags specific to powerpc64 Linux
+ * implementations.
+ */
+struct cpuinfo_powerpc_linux_processor {
+	/* Public ABI cpuinfo structures. */
+	struct cpuinfo_processor processor;
+	struct cpuinfo_core core;
+	struct cpuinfo_cluster cluster;
+	struct cpuinfo_package package;
+
+	/**
+	 * Linux-specific flags for the logical processor:
+	 * - Bit field that can be masked with CPUINFO_LINUX_FLAG_*.
+	 */
+	uint32_t flags;
+
+	/**
+	 * Minimum processor ID on the cluster which includes this logical
+	 * processor. This value can serve as an ID for the cluster of logical
+	 * processors: it is the same for all logical processors on the same
+	 * package.
+	 */
+	uint32_t cluster_leader_id;
+
+	/**
+	 * Minimum processor ID on the core which includes this logical
+	 * processor. This value can serve as an ID for the core of logical
+	 * processors: it is the same for all logical processors on the same
+	 * core.
+	 */
+	uint32_t core_leader_id;
+
+	/**
+	 * Minimum processor ID on the package which includes this logical
+	 * processor. This value can serve as an ID for the package of logical
+	 * processors: it is the same for all logical processors on the same
+	 * package.
+	 */
+	uint32_t package_leader_id;
+};
+
+/**
+ * Reads AT_HWCAP from `getauxval` and populates the cpuinfo_powerpc_isa
+ * structure.
+ *
+ * @param[isa] - Reference to cpuinfo_powerpc_isa structure to populate.
+ */
+void cpuinfo_powerpc_linux_hwcap_from_getauxval(uint32_t isa_feature[]);
+CPUINFO_INTERNAL void cpuinfo_ppc64_linux_decode_isa_from_hwcap(
+	uint32_t hwcap,
+	uint32_t hwcap2,
+	struct cpuinfo_powerpc_isa isa[restrict static 1]);
+
+bool cpuinfo_powerpc_linux_parse_proc_cpuinfo(
+	uint32_t max_processors_count,
+	struct cpuinfo_powerpc_linux_processor processors[restrict static max_processors_count]);
+
+/* Used to determine which uarch is associated with the current thread. */
+extern CPUINFO_INTERNAL const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map;
diff --git a/src/powerpc/linux/cpuinfo.c b/src/powerpc/linux/cpuinfo.c
new file mode 100644
index 00000000..1567f9f8
--- /dev/null
+++ b/src/powerpc/linux/cpuinfo.c
@@ -0,0 +1,335 @@
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cpuinfo/log.h>
+#include <linux/api.h>
+#include <powerpc/linux/api.h>
+
+/*
+ * Size, in chars, of the on-stack buffer used for parsing lines of /proc/cpuinfo.
+ * This is also the limit on the length of a single line.
+ */
+#define BUFFER_SIZE 1024
+
+struct proc_cpuinfo_parser_state {
+	uint32_t processor_index;
+	uint32_t max_processors_count;
+	struct cpuinfo_powerpc_linux_processor* processors;
+	struct cpuinfo_powerpc_linux_processor dummy_processor;
+};
+
+static uint32_t parse_processor_number(const char* processor_start, const char* processor_end) {
+	const size_t processor_length = (size_t)(processor_end - processor_start);
+
+	if (processor_length == 0) {
+		cpuinfo_log_warning("Processor number in /proc/cpuinfo is ignored: string is empty");
+		return 0;
+	}
+
+	uint32_t processor_number = 0;
+	for (const char* digit_ptr = processor_start; digit_ptr != processor_end; digit_ptr++) {
+		const uint32_t digit = (uint32_t)(*digit_ptr - '0');
+		if (digit >= 10) {
+			cpuinfo_log_warning(
+				"non-decimal suffix %.*s in /proc/cpuinfo processor number is ignored",
+				(int)(processor_end - digit_ptr),
+				digit_ptr);
+			break;
+		}
+
+		processor_number = processor_number * 10 + digit;
+	}
+
+	return processor_number;
+}
+
+static void parse_cpu_architecture(
+	const char* cpu_architecture_start,
+	const char* cpu_architecture_end,
+	struct cpuinfo_powerpc_linux_processor* processor) {
+	const size_t cpu_arch_name_length = 5;
+
+	if (!memcmp(cpu_architecture_start, "POWER", cpu_arch_name_length)) {
+		processor->flags |= CPUINFO_POWERPC_LINUX_VALID_ARCHITECTURE;
+
+		/* For now we are assuming the implementer is always valid.  */
+		processor->flags |= CPUINFO_POWERPC_LINUX_VALID_IMPLEMENTER;
+		/* For now we only support IBM vendor.  */
+		processor->core.vendor = cpuinfo_vendor_ibm;
+
+		const char* cpu_arch_ptr = cpu_architecture_start + cpu_arch_name_length;
+		uint32_t arch_version = 0;
+		for (; cpu_arch_ptr != cpu_architecture_end; cpu_arch_ptr++) {
+			const uint32_t digit = (uint32_t)(*cpu_arch_ptr - '0');
+
+			if (digit >= 10) {
+				break;
+			}
+			arch_version = arch_version * 10 + digit;
+		}
+
+		switch (arch_version) {
+			case 7: /* POWER7 */
+				if (*cpu_arch_ptr == ' ') {
+					processor->core.uarch = cpuinfo_uarch_power7;
+				} else if (*cpu_arch_ptr == '+') {
+					processor->core.uarch = cpuinfo_uarch_power7p;
+					cpu_arch_ptr++;
+				} else {
+					goto unsupported;
+				}
+				break;
+			case 8: /* POWER8 */
+				if (*cpu_arch_ptr == ' ') {
+					processor->core.uarch = cpuinfo_uarch_power8;
+				} else if (*cpu_arch_ptr == 'E') {
+					processor->core.uarch = cpuinfo_uarch_power8e;
+					cpu_arch_ptr++;
+				} else if (*cpu_arch_ptr == 'N') {
+					cpu_arch_ptr++;
+					if (*cpu_arch_ptr == 'V') {
+						cpu_arch_ptr++;
+					}
+					if (*cpu_arch_ptr == 'L') {
+						processor->core.uarch = cpuinfo_uarch_power8nvl;
+						cpu_arch_ptr++;
+					}
+				}
+				if (*cpu_arch_ptr != ' ') {
+					goto unsupported;
+				}
+				break;
+			case 9: /* POWER9 */
+				processor->core.uarch = cpuinfo_uarch_power9;
+				break;
+			case 10: /* POWER10 */
+				processor->core.uarch = cpuinfo_uarch_power10;
+				break;
+			default:
+			unsupported:
+				cpuinfo_log_warning(
+					"CPU architecture %.*s in /proc/cpuinfo is ignored due to a unsupported architecture version",
+					(int)(cpu_architecture_end - cpu_architecture_start),
+					cpu_architecture_start);
+		}
+		processor->flags |= CPUINFO_POWERPC_LINUX_VALID_PROCESSOR;
+		processor->core.disabled = false;
+	} else {
+		cpuinfo_log_warning(
+			"processor %.*s in /proc/cpuinfo is ignored due not a Power processor",
+			(int)(cpu_architecture_end - cpu_architecture_start),
+			cpu_architecture_start);
+	}
+}
+
+static void parse_cpu_pvr(
+	const char* cpu_revision_start,
+	const char* cpu_revision_end,
+	struct cpuinfo_powerpc_linux_processor* processor) {
+	const char* cpu_rev_ptr = cpu_revision_start;
+	uint16_t revision = 0;
+	uint16_t version = 0;
+	processor->core.pvr = 0x0;
+
+	for (; cpu_rev_ptr != cpu_revision_end; cpu_rev_ptr++) {
+		if (*cpu_rev_ptr == '(') {
+			cpu_rev_ptr++; // Skip
+			break;
+		}
+	}
+
+	size_t pvr_str_len = 3;
+	if (memcmp(cpu_rev_ptr, "pvr", pvr_str_len) == 0) {
+		/* Parse revision. */
+		uint16_t revision = 0;
+		cpu_rev_ptr += pvr_str_len + 1; // Skip pvr string + space
+		for (; cpu_rev_ptr != cpu_revision_end; cpu_rev_ptr++) {
+			if (*cpu_rev_ptr == ' ') {
+				cpu_rev_ptr++;
+				break;
+			}
+			uint32_t digit = (uint32_t)(*cpu_rev_ptr - '0');
+			if (digit >= 10) {
+				digit = digit - 0x27;
+				if ((digit < 10) || (digit > 15)) {
+					cpuinfo_log_warning(
+						"cpu revision %.*s in /proc/cpuinfo is ignored due non number",
+						(int)(cpu_revision_end - cpu_revision_start),
+						cpu_revision_start);
+					return;
+				}
+			}
+			revision = revision * 16 + digit;
+		}
+
+		/* Parse version. */
+		uint16_t version = 0;
+		for (; cpu_rev_ptr != cpu_revision_end; cpu_rev_ptr++) {
+			if (*cpu_rev_ptr == ')') {
+				cpu_rev_ptr++;
+				break;
+			}
+			uint32_t digit = (uint32_t)(*cpu_rev_ptr - '0');
+			if (digit >= 10) {
+				digit = digit - 0x27;
+				if ((digit < 10) || (digit > 15)) {
+					cpuinfo_log_warning(
+						"cpu version %.*s in /proc/cpuinfo is ignored due non number",
+						(int)(cpu_revision_end - cpu_revision_start),
+						cpu_revision_start);
+					return;
+				}
+			}
+			version = version * 16 + digit;
+		}
+
+		processor->core.pvr |= (revision << 16);
+		processor->core.pvr |= version;
+	} else {
+		cpuinfo_log_warning(
+			"cpu revision %.*s in /proc/cpuinfo is ignored due non PVR information",
+			(int)(cpu_revision_end - cpu_revision_start),
+			cpu_revision_start);
+	}
+}
+
+static bool parse_line(
+	const char* line_start,
+	const char* line_end,
+	struct proc_cpuinfo_parser_state state[restrict static 1],
+	uint64_t line_number) {
+	/* Empty line. Skip. */
+	if (line_start == line_end) {
+		return true;
+	}
+
+	/* Search for ':' on the line. */
+	const char* separator = line_start;
+	for (; separator != line_end; separator++) {
+		if (*separator == ':') {
+			break;
+		}
+	}
+
+	/* Skip line if no ':' separator was found. */
+	if (separator == line_end) {
+		cpuinfo_log_warning(
+			"Line %.*s in /proc/cpuinfo is ignored: key/value separator ':' not found",
+			(int)(line_end - line_start),
+			line_start);
+		return true;
+	}
+
+	/* Skip trailing spaces in key part. */
+	const char* key_end = separator;
+	for (; key_end != line_start; key_end--) {
+		if (key_end[-1] != ' ' && key_end[-1] != '\t') {
+			break;
+		}
+	}
+
+	/* Skip line if key contains nothing but spaces. */
+	if (key_end == line_start) {
+		cpuinfo_log_warning(
+			"Line %.*s in /proc/cpuinfo is ignored: key contains only spaces",
+			(int)(line_end - line_start),
+			line_start);
+		return true;
+	}
+
+	/* Skip leading spaces in value part. */
+	const char* value_start = separator + 1;
+	for (; value_start != line_end; value_start++) {
+		if (*value_start != ' ') {
+			break;
+		}
+	}
+
+	/* Value part contains nothing but spaces. Skip line. */
+	if (value_start == line_end) {
+		cpuinfo_log_warning(
+			"Line %.*s in /proc/cpuinfo is ignored: value contains only spaces",
+			(int)(line_end - line_start),
+			line_start);
+	}
+
+	/* Skip trailing spaces in value part (if any) */
+	const char* value_end = line_end;
+	for (; value_end != value_start; value_end--) {
+		if (value_end[-1] != ' ') {
+			break;
+		}
+	}
+
+	/* Declarations to return */
+	const uint32_t processor_index = state->processor_index;
+	const uint32_t max_processors_count = state->max_processors_count;
+	struct cpuinfo_powerpc_linux_processor* processors = state->processors;
+	struct cpuinfo_powerpc_linux_processor* processor = &state->dummy_processor;
+
+	if (processor_index < max_processors_count) {
+		processor = &processors[processor_index];
+	}
+
+	const size_t key_length = key_end - line_start;
+
+	switch (key_length) {
+		case 3:
+			if (memcmp(line_start, "cpu", key_length) == 0) {
+				parse_cpu_architecture(value_start, value_end, processor);
+			} else {
+				goto unknown;
+			}
+			break;
+		case 5:
+			if (memcmp(line_start, "clock", key_length) == 0) {
+				parse_cpu_architecture(value_start, value_end, processor);
+			} else {
+				goto unknown;
+			}
+		case 7:
+			if (memcmp(line_start, "machine", key_length) == 0) {
+				parse_cpu_architecture(value_start, value_end, processor);
+			} else {
+				goto unknown;
+			}
+		case 8:
+			if (memcmp(line_start, "revision", key_length) == 0) {
+				parse_cpu_pvr(value_start, value_end, processor);
+			} else {
+				goto unknown;
+			}
+		case 9:
+			if (memcmp(line_start, "processor", key_length) == 0) {
+				state->processor_index = parse_processor_number(value_start, value_end);
+				return true;
+			} else {
+				goto unknown;
+			}
+			break;
+		default:
+		unknown:
+			cpuinfo_log_debug("unknown /proc/cpuinfo key: %.*s", (int)key_length, line_start);
+	}
+	return true;
+}
+
+bool cpuinfo_powerpc_linux_parse_proc_cpuinfo(
+	uint32_t max_processors_count,
+	struct cpuinfo_powerpc_linux_processor processors[restrict static max_processors_count]) {
+	struct proc_cpuinfo_parser_state state = {
+		.processor_index = 0,
+		.max_processors_count = max_processors_count,
+		.processors = processors,
+	};
+
+	for (int i = 0; i < max_processors_count; i++)
+		processors[i].core.disabled = true;
+
+	return cpuinfo_linux_parse_multiline_file(
+		"/proc/cpuinfo", BUFFER_SIZE, (cpuinfo_line_callback)parse_line, &state);
+}
diff --git a/src/powerpc/linux/init.c b/src/powerpc/linux/init.c
new file mode 100644
index 00000000..0553f066
--- /dev/null
+++ b/src/powerpc/linux/init.c
@@ -0,0 +1,921 @@
+#include <string.h>
+
+#include <cpuinfo/internal-api.h>
+#include <cpuinfo/log.h>
+#include <linux/api.h>
+#include <powerpc/linux/api.h>
+#include <stdio.h>
+
+/* ISA structure to hold supported extensions. */
+struct cpuinfo_powerpc_isa cpuinfo_isa;
+
+/* Helper function to bitmask flags and ensure operator precedence. */
+static inline bool bitmask_all(uint32_t flags, uint32_t mask) {
+	return (flags & mask) == mask;
+}
+
+static int compare_powerpc_linux_processors(const void* a, const void* b) {
+	/**
+	 * For our purposes, it is only relevant that the list is sorted by
+	 * micro-architecture, so the nature of ordering is irrelevant.
+	 */
+	return ((const struct cpuinfo_powerpc_linux_processor*)a)->core.uarch -
+		((const struct cpuinfo_powerpc_linux_processor*)b)->core.uarch;
+}
+
+/**
+ * Parses the core cpus list for each processor. This function is called once
+ * per-processor, with the IDs of all other processors in the core list.
+ *
+ * The 'processor_[start|count]' are populated in the processor's 'core'
+ * attribute, with 'start' being the smallest ID in the core list.
+ *
+ * The 'core_leader_id' of each processor is set to the smallest ID in it's
+ * cluster CPU list.
+ *
+ * Precondition: The element in the 'processors' list must be initialized with
+ * their 'core_leader_id' to their index in the list.
+
+ * E.g. processors[0].core_leader_id = 0.
+ */
+static bool core_cpus_parser(
+	uint32_t processor,
+	uint32_t core_cpus_start,
+	uint32_t core_cpus_end,
+	struct cpuinfo_powerpc_linux_processor* processors) {
+	uint32_t processor_start = UINT32_MAX;
+	uint32_t processor_count = 0;
+
+	/* If the processor already has a leader, use it. */
+	if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CORE_CLUSTER)) {
+		processor_start = processors[processor].core_leader_id;
+	}
+
+	for (size_t core_cpu = core_cpus_start; core_cpu < core_cpus_end; core_cpu++) {
+		if (!bitmask_all(processors[core_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		/**
+		 * The first valid processor observed is the smallest ID in the
+		 * list that attaches to this core.
+		 */
+		if (processor_start == UINT32_MAX) {
+			processor_start = core_cpu;
+		}
+		processors[core_cpu].core_leader_id = processor_start;
+		processor_count++;
+	}
+	/**
+	 * If the cluster flag has not been set, assign the processor start. If
+	 * it has been set, only apply the processor start if it's less than the
+	 * held value. This can happen if the callback is invoked twice:
+	 *
+	 * e.g. core_cpu_list=1,10-12
+	 */
+	if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CORE_CLUSTER) ||
+	    processors[processor].core.processor_start > processor_start) {
+		processors[processor].core.processor_start = processor_start;
+		processors[processor].core_leader_id = processor_start;
+	}
+	processors[processor].core.processor_count += processor_count;
+	processors[processor].flags |= CPUINFO_LINUX_FLAG_CORE_CLUSTER;
+	/* The parser has failed only if no processors were found. */
+	return processor_count != 0;
+}
+
+/**
+ * Parses the cluster cpu list for each processor. This function is called once
+ * per-processor, with the IDs of all other processors in the cluster.
+ *
+ * The 'cluster_leader_id' of each processor is set to the smallest ID in it's
+ * cluster CPU list.
+ *
+ * Precondition: The element in the 'processors' list must be initialized with
+ * their 'cluster_leader_id' to their index in the list.
+ * E.g. processors[0].cluster_leader_id = 0.
+ */
+static bool cluster_cpus_parser(
+	uint32_t processor,
+	uint32_t cluster_cpus_start,
+	uint32_t cluster_cpus_end,
+	struct cpuinfo_powerpc_linux_processor* processors) {
+	uint32_t processor_start = UINT32_MAX;
+	uint32_t processor_count = 0;
+	uint32_t core_count = 0;
+
+	/* If the processor already has a leader, use it. */
+	if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER)) {
+		processor_start = processors[processor].cluster_leader_id;
+	}
+
+	for (size_t cluster_cpu = cluster_cpus_start; cluster_cpu < cluster_cpus_end; cluster_cpu++) {
+		if (!bitmask_all(processors[cluster_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		/**
+		 * The first valid processor observed is the smallest ID in the
+		 * list that attaches to this core.
+		 */
+		if (processor_start == UINT32_MAX) {
+			processor_start = cluster_cpu;
+		}
+		processors[cluster_cpu].cluster_leader_id = processor_start;
+		processor_count++;
+		/**
+		 * A processor should only represent it's core if it is the
+		 * assigned leader of that core.
+		 */
+		if (processors[cluster_cpu].core_leader_id == cluster_cpu) {
+			core_count++;
+		}
+	}
+	/**
+	 * If the cluster flag has not been set, assign the processor start. If
+	 * it has been set, only apply the processor start if it's less than the
+	 * held value. This can happen if the callback is invoked twice:
+	 *
+	 * e.g. cluster_cpus_list=1,10-12
+	 */
+	if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER) ||
+	    processors[processor].cluster.processor_start > processor_start) {
+		processors[processor].cluster.processor_start = processor_start;
+		processors[processor].cluster.core_start = processor_start;
+		processors[processor].cluster.cluster_id = processor_start;
+		processors[processor].cluster_leader_id = processor_start;
+	}
+	processors[processor].cluster.processor_count += processor_count;
+	processors[processor].cluster.core_count += core_count;
+	processors[processor].flags |= CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER;
+	return true;
+}
+
+/**
+ * Parses the package cpus list for each processor. This function is called once
+ * per-processor, with the IDs of all other processors in the package list.
+ *
+ * The 'processor_[start|count]' are populated in the processor's 'package'
+ * attribute, with 'start' being the smallest ID in the package list.
+ *
+ * The 'package_leader_id' of each processor is set to the smallest ID in it's
+ * cluster CPU list.
+ *
+ * Precondition: The element in the 'processors' list must be initialized with
+ * their 'package_leader_id' to their index in the list.
+ * E.g. processors[0].package_leader_id = 0.
+ */
+static bool package_cpus_parser(
+	uint32_t processor,
+	uint32_t package_cpus_start,
+	uint32_t package_cpus_end,
+	struct cpuinfo_powerpc_linux_processor* processors) {
+	uint32_t processor_start = UINT32_MAX;
+	uint32_t processor_count = 0;
+	uint32_t cluster_count = 0;
+	uint32_t core_count = 0;
+
+	/* If the processor already has a leader, use it. */
+	if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
+		processor_start = processors[processor].package_leader_id;
+	}
+
+	for (size_t package_cpu = package_cpus_start; package_cpu < package_cpus_end; package_cpu++) {
+		if (!bitmask_all(processors[package_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		/**
+		 * The first valid processor observed is the smallest ID in the
+		 * list that attaches to this package.
+		 */
+		if (processor_start == UINT32_MAX) {
+			processor_start = package_cpu;
+		}
+		processors[package_cpu].package_leader_id = processor_start;
+		processor_count++;
+		/**
+		 * A processor should only represent it's core if it is the
+		 * assigned leader of that core, and similarly for it's cluster.
+		 */
+		if (processors[package_cpu].cluster_leader_id == package_cpu) {
+			cluster_count++;
+		}
+		if (processors[package_cpu].core_leader_id == package_cpu) {
+			core_count++;
+		}
+	}
+	/**
+	 * If the cluster flag has not been set, assign the processor start. If
+	 * it has been set, only apply the processor start if it's less than the
+	 * held value. This can happen if the callback is invoked twice:
+	 *
+	 * e.g. package_cpus_list=1,10-12
+	 */
+	if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) ||
+	    processors[processor].package.processor_start > processor_start) {
+		processors[processor].package.processor_start = processor_start;
+		processors[processor].package.cluster_start = processor_start;
+		processors[processor].package.core_start = processor_start;
+		processors[processor].package_leader_id = processor_start;
+	}
+	processors[processor].package.processor_count += processor_count;
+	processors[processor].package.cluster_count += cluster_count;
+	processors[processor].package.core_count += core_count;
+	processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+	return true;
+}
+
+/* Initialization for the powerpc64 Linux system. */
+void cpuinfo_powerpc_linux_init(void) {
+	struct cpuinfo_powerpc_linux_processor* powerpc_linux_processors = NULL;
+	struct cpuinfo_processor* processors = NULL;
+	struct cpuinfo_package* packages = NULL;
+	struct cpuinfo_cluster* clusters = NULL;
+	struct cpuinfo_core* cores = NULL;
+	struct cpuinfo_uarch_info* uarchs = NULL;
+	const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
+	const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
+	uint32_t* linux_cpu_to_uarch_index_map = NULL;
+	struct cpuinfo_cache* l1i = NULL;
+	struct cpuinfo_cache* l1d = NULL;
+	struct cpuinfo_cache* l2 = NULL;
+	struct cpuinfo_cache* l3 = NULL;
+	struct cpuinfo_cache* l4 = NULL;
+	/**
+	 * The interesting set of processors are the number of 'present'
+	 * processors on the system. There may be more 'possible' processors,
+	 * but processor information cannot be gathered on non-present
+	 * processors.
+	 *
+	 * Note: For SoCs, it is largely the case that all processors are known
+	 * at boot and no processors are hotplugged at runtime, so the
+	 * 'present' and 'possible' list is often the same.
+	 *
+	 * Note: This computes the maximum processor ID of the 'present'
+	 * processors. It is not a count of the number of processors on the
+	 * system.
+	 */
+	const uint32_t max_processor_id =
+		1 + cpuinfo_linux_get_max_present_processor(cpuinfo_linux_get_max_processors_count());
+	if (max_processor_id == 0) {
+		cpuinfo_log_error("failed to discover any processors");
+		return;
+	}
+
+	/**
+	 * Allocate space to store all processor information. This array is
+	 * sized to the max processor ID as opposed to the number of 'present'
+	 * processors, to leverage pointer math in the common utility functions.
+	 */
+	powerpc_linux_processors = calloc(max_processor_id, sizeof(struct cpuinfo_powerpc_linux_processor));
+	if (powerpc_linux_processors == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %" PRIu32 " processors.",
+			max_processor_id * sizeof(struct cpuinfo_powerpc_linux_processor),
+			max_processor_id);
+		goto cleanup;
+	}
+
+	/**
+	 * Attempt to detect all processors and apply the corresponding flag to
+	 * each processor struct that we find.
+	 */
+	if (!cpuinfo_linux_detect_present_processors(
+		    max_processor_id,
+		    &powerpc_linux_processors->flags,
+		    sizeof(struct cpuinfo_powerpc_linux_processor),
+		    CPUINFO_LINUX_FLAG_PRESENT | CPUINFO_LINUX_FLAG_VALID)) {
+		cpuinfo_log_error("failed to detect present processors");
+		goto cleanup;
+	}
+
+	uint32_t online_status = 1;
+	/* Populate processor information. */
+	for (size_t processor = 0; processor < max_processor_id; processor++) {
+		if (!bitmask_all(powerpc_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		powerpc_linux_processors[processor].processor.linux_id = processor;
+	}
+
+	/* Populate core information. */
+	for (size_t processor = 0; processor < max_processor_id; processor++) {
+		if (!bitmask_all(powerpc_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		online_status = 1;
+		cpuinfo_linux_get_processor_online_status(processor, &online_status);
+
+		if (online_status == 1) {
+			/* Populate processor start and count information.
+			 */
+			if (!cpuinfo_linux_detect_core_cpus(
+				    max_processor_id,
+				    processor,
+				    (cpuinfo_siblings_callback)core_cpus_parser,
+				    powerpc_linux_processors)) {
+				cpuinfo_log_error("failed to detect core cpus for processor %zu.", processor);
+				goto cleanup;
+			}
+
+			/* Populate core ID information. */
+			if (cpuinfo_linux_get_processor_core_id(
+				    processor, &powerpc_linux_processors[processor].core.core_id)) {
+				powerpc_linux_processors[processor].flags |= CPUINFO_LINUX_FLAG_CORE_ID;
+			}
+
+			/**
+			 * Populate the vendor and uarch of this core from /proc/cpuinfo.
+			 */
+			if (!cpuinfo_powerpc_linux_parse_proc_cpuinfo(max_processor_id, powerpc_linux_processors)) {
+				cpuinfo_log_error("failed to parse processor information from /proc/cpuinfo");
+				return;
+			}
+		}
+	}
+
+	/* Populate cluster information.
+	 * power10, the number of cores and the clusters are same.
+	 */
+	for (size_t processor = 0; processor < max_processor_id; processor++) {
+		if (!bitmask_all(powerpc_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		online_status = 1;
+		cpuinfo_linux_get_processor_online_status(processor, &online_status);
+
+		if (online_status == 1) {
+			if (!cpuinfo_linux_detect_core_cpus(
+				    max_processor_id,
+				    processor,
+				    (cpuinfo_siblings_callback)cluster_cpus_parser,
+				    powerpc_linux_processors)) {
+				cpuinfo_log_warning("failed to detect cluster cpus for processor %zu.", processor);
+				goto cleanup;
+			}
+
+			/**
+			 * Populate the vendor, uarch and frequency of this cluster from
+			 * this core.
+			 */
+			powerpc_linux_processors[processor].cluster.vendor =
+				powerpc_linux_processors[processor].core.vendor;
+			powerpc_linux_processors[processor].cluster.uarch =
+				powerpc_linux_processors[processor].core.uarch;
+			powerpc_linux_processors[processor].cluster.frequency =
+				powerpc_linux_processors[processor].core.frequency;
+			powerpc_linux_processors[processor].cluster.pvr = powerpc_linux_processors[processor].core.pvr;
+		}
+	}
+
+	/* Populate package information. */
+	for (size_t processor = 0; processor < max_processor_id; processor++) {
+		if (!bitmask_all(powerpc_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		cpuinfo_linux_get_processor_online_status(processor, &online_status);
+		if (online_status == 1) {
+			if (!cpuinfo_linux_detect_package_cpus(
+				    max_processor_id,
+				    processor,
+				    (cpuinfo_siblings_callback)package_cpus_parser,
+				    powerpc_linux_processors)) {
+				cpuinfo_log_warning("failed to detect package cpus for processor %zu.", processor);
+				goto cleanup;
+			}
+		}
+	}
+
+	static uint32_t online_processor = 0;
+	for (size_t processor = 1; processor < max_processor_id; processor++) {
+		cpuinfo_linux_get_processor_online_status(processor, &online_status);
+		if (online_status == 1) {
+			if (powerpc_linux_processors[processor].package_leader_id == processor) {
+				powerpc_linux_processors[processor].core.core_id = 0;
+				powerpc_linux_processors[processor].cluster.cluster_id = 0;
+				powerpc_linux_processors[processor].cluster.core_start =
+					powerpc_linux_processors[online_processor].cluster.core_start +
+					powerpc_linux_processors[online_processor].cluster.core_count;
+				powerpc_linux_processors[processor].package.core_start =
+					powerpc_linux_processors[online_processor].package.core_start +
+					powerpc_linux_processors[online_processor].package.core_count;
+				powerpc_linux_processors[processor].package.cluster_start =
+					powerpc_linux_processors[online_processor].package.cluster_start +
+					powerpc_linux_processors[online_processor].package.cluster_count;
+			} else {
+				if (powerpc_linux_processors[processor].core.processor_start !=
+				    powerpc_linux_processors[online_processor].core.processor_start)
+					powerpc_linux_processors[processor].core.core_id =
+						powerpc_linux_processors[online_processor].core.core_id + 1;
+				else
+					powerpc_linux_processors[processor].core.core_id =
+						powerpc_linux_processors[online_processor].core.core_id;
+				if (powerpc_linux_processors[processor].cluster.processor_start !=
+				    powerpc_linux_processors[online_processor].cluster.processor_start) {
+					powerpc_linux_processors[processor].cluster.core_start =
+						powerpc_linux_processors[online_processor].cluster.core_start +
+						powerpc_linux_processors[online_processor].cluster.core_count;
+					powerpc_linux_processors[processor].cluster.cluster_id =
+						powerpc_linux_processors[online_processor].cluster.cluster_id + 1;
+				} else {
+					powerpc_linux_processors[processor].cluster.core_start =
+						powerpc_linux_processors[online_processor].cluster.core_start;
+					powerpc_linux_processors[processor].cluster.cluster_id =
+						powerpc_linux_processors[online_processor].cluster.cluster_id;
+				}
+				powerpc_linux_processors[processor].package.core_start =
+					powerpc_linux_processors[online_processor].package.core_start;
+				powerpc_linux_processors[processor].package.cluster_start =
+					powerpc_linux_processors[online_processor].package.cluster_start;
+			}
+			online_processor = processor;
+		}
+	}
+
+	/* Populate ISA structure with hwcap information. */
+	uint32_t isa_feature[2];
+	cpuinfo_powerpc_linux_hwcap_from_getauxval(isa_feature);
+	const uint32_t isa_features = isa_feature[0];
+	const uint32_t isa_features2 = isa_feature[1];
+	cpuinfo_ppc64_linux_decode_isa_from_hwcap(isa_features, isa_features2, &cpuinfo_isa);
+
+	/**
+	 * Determine the number of *valid* detected processors, cores,
+	 * clusters, packages and uarchs in the list.
+	 */
+	size_t valid_processors_count = 0;
+	size_t valid_cores_count = 0;
+	size_t valid_clusters_count = 0;
+	size_t valid_packages_count = 0;
+	size_t valid_uarchs_count = 0;
+	size_t smt = 0;
+	size_t cache_count = 0;
+	enum cpuinfo_uarch last_uarch = cpuinfo_uarch_unknown;
+	for (size_t processor = 0; processor < max_processor_id; processor++) {
+		if (!bitmask_all(powerpc_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
+			continue;
+		}
+		cpuinfo_linux_get_processor_online_status(processor, &online_status);
+		if (online_status == 1) {
+			/**
+			 * All comparisons to the leader id values MUST be done against
+			 * the 'linux_id' as opposed to 'processor'. The sort function
+			 * above no longer allows us to make the assumption that these
+			 * two values are the same.
+			 */
+			uint32_t linux_id = powerpc_linux_processors[processor].processor.linux_id;
+
+			valid_processors_count++;
+			if (powerpc_linux_processors[processor].core_leader_id == linux_id) {
+				valid_cores_count++;
+			}
+			if (powerpc_linux_processors[processor].cluster_leader_id == linux_id) {
+				valid_clusters_count++;
+			}
+			if (powerpc_linux_processors[processor].package_leader_id == linux_id) {
+				valid_packages_count++;
+			}
+			/**
+			 * As we've sorted by micro-architecture, when the uarch differs
+			 * between two entries, a unique uarch has been observed.
+			 */
+			if (last_uarch != powerpc_linux_processors[processor].core.uarch || valid_uarchs_count == 0) {
+				valid_uarchs_count++;
+				last_uarch = powerpc_linux_processors[processor].core.uarch;
+			}
+		}
+	}
+
+	smt = valid_processors_count / (valid_cores_count);
+	/* 1 cache instance for consecutive 4 even/odd  threads, if core has 8 threads then 2 cache instances */
+	cache_count = max_processor_id / 4;
+
+	/* Asiigning linux_id's for all the online processors in consecutive manner.
+	 * This is only needed in other than SMT8 modes.
+	 */
+	if (smt != 8) {
+		size_t online_id = 0;
+		for (size_t processor = 0; processor < max_processor_id; processor++) {
+			cpuinfo_linux_get_processor_online_status(processor, &online_status);
+			if (online_status == 1) {
+				if (online_id != powerpc_linux_processors[processor].processor.linux_id) {
+					if (powerpc_linux_processors[processor].core_leader_id ==
+					    powerpc_linux_processors[processor].processor.linux_id) {
+						powerpc_linux_processors[processor].core_leader_id = online_id;
+						powerpc_linux_processors[processor].core.processor_start =
+							powerpc_linux_processors[processor].core_leader_id;
+					}
+					if (powerpc_linux_processors[processor].cluster_leader_id ==
+					    powerpc_linux_processors[processor].processor.linux_id) {
+						powerpc_linux_processors[processor].cluster_leader_id = online_id;
+						powerpc_linux_processors[processor].cluster.processor_start =
+							powerpc_linux_processors[processor].cluster_leader_id;
+					}
+					if (powerpc_linux_processors[processor].package_leader_id ==
+					    powerpc_linux_processors[processor].processor.linux_id) {
+						powerpc_linux_processors[processor].package_leader_id = online_id;
+						powerpc_linux_processors[processor].package.processor_start =
+							powerpc_linux_processors[processor].package_leader_id;
+					}
+					powerpc_linux_processors[processor].processor.linux_id = online_id;
+				}
+				online_id++;
+			}
+		}
+	}
+
+	if (cache_count != 0) {
+		l1i = calloc(cache_count, sizeof(struct cpuinfo_cache));
+		l1d = calloc(cache_count, sizeof(struct cpuinfo_cache));
+		l2 = calloc(cache_count, sizeof(struct cpuinfo_cache));
+		l3 = calloc(cache_count, sizeof(struct cpuinfo_cache));
+		if (l3 == NULL) {
+			cpuinfo_log_error(
+				"failed to allocate %zu bytes for descriptions of %" PRIu32 " L3 caches",
+				cache_count * sizeof(struct cpuinfo_cache),
+				cache_count);
+			goto cleanup;
+		}
+	}
+
+	/* Allocate and populate final public ABI structures. */
+	processors = calloc(valid_processors_count, sizeof(struct cpuinfo_processor));
+	if (processors == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %zu processors.",
+			valid_processors_count * sizeof(struct cpuinfo_processor),
+			valid_processors_count);
+		goto cleanup;
+	}
+
+	cores = calloc(valid_cores_count, sizeof(struct cpuinfo_core));
+	if (cores == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %zu cores.",
+			valid_cores_count * sizeof(struct cpuinfo_core),
+			valid_cores_count);
+		goto cleanup;
+	}
+
+	clusters = calloc(valid_clusters_count, sizeof(struct cpuinfo_cluster));
+	if (clusters == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %zu clusters.",
+			valid_clusters_count * sizeof(struct cpuinfo_cluster),
+			valid_clusters_count);
+		goto cleanup;
+	}
+
+	packages = calloc(valid_packages_count, sizeof(struct cpuinfo_package));
+	if (packages == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %zu packages.",
+			valid_packages_count * sizeof(struct cpuinfo_package),
+			valid_packages_count);
+		goto cleanup;
+	}
+
+	uarchs = calloc(valid_uarchs_count, sizeof(struct cpuinfo_uarch_info));
+	if (uarchs == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %zu packages.",
+			valid_uarchs_count * sizeof(struct cpuinfo_uarch_info),
+			valid_uarchs_count);
+		goto cleanup;
+	}
+
+	linux_cpu_to_processor_map = calloc(valid_processors_count, sizeof(struct cpuinfo_processor*));
+	if (linux_cpu_to_processor_map == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %" PRIu32 " processor map.",
+			valid_processors_count * sizeof(struct cpuinfo_processor*),
+			valid_processors_count);
+		goto cleanup;
+	}
+
+	linux_cpu_to_core_map = calloc(valid_processors_count, sizeof(struct cpuinfo_core*));
+	if (linux_cpu_to_core_map == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %" PRIu32 " core map.",
+			valid_processors_count * sizeof(struct cpuinfo_core*),
+			valid_processors_count);
+		goto cleanup;
+	}
+
+	linux_cpu_to_uarch_index_map = calloc(valid_processors_count, sizeof(struct cpuinfo_uarch_info*));
+	if (linux_cpu_to_uarch_index_map == NULL) {
+		cpuinfo_log_error(
+			"failed to allocate %zu bytes for %" PRIu32 " uarch map.",
+			valid_processors_count * sizeof(struct cpuinfo_uarch_info*),
+			valid_processors_count);
+		goto cleanup;
+	}
+
+	/* Transfer contents of processor list to ABI structures. */
+	size_t valid_processors_index = 0;
+	size_t valid_cores_index = 0;
+	size_t valid_clusters_index = 0;
+	size_t valid_packages_index = 0;
+	size_t valid_uarchs_index = 0;
+	last_uarch = cpuinfo_uarch_unknown;
+	for (size_t processor = 0; processor < max_processor_id; processor++) {
+		cpuinfo_linux_get_processor_online_status(processor, &online_status);
+		if (online_status == 1) {
+			if (!bitmask_all(powerpc_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
+				continue;
+			}
+			/**
+			 * All comparisons to the leader id values MUST be done against
+			 * the 'linux_id' as opposed to 'processor'. The sort function
+			 * above no longer allows us to make the assumption that these
+			 * two values are the same.
+			 */
+			uint32_t linux_id = powerpc_linux_processors[processor].processor.linux_id;
+
+			/* Create uarch entry if this uarch has not been seen before. */
+			if (last_uarch != powerpc_linux_processors[processor].core.uarch || valid_uarchs_index == 0) {
+				uarchs[valid_uarchs_index++].uarch = powerpc_linux_processors[processor].core.uarch;
+				last_uarch = powerpc_linux_processors[processor].core.uarch;
+			}
+
+			/* Copy cpuinfo_processor information. */
+			memcpy(&processors[valid_processors_index++],
+			       &powerpc_linux_processors[processor].processor,
+			       sizeof(struct cpuinfo_processor));
+
+			/* Update uarch processor count. */
+			uarchs[valid_uarchs_index - 1].processor_count++;
+
+			/* Copy cpuinfo_core information, if this is the leader. */
+			if (powerpc_linux_processors[processor].core_leader_id == linux_id) {
+				memcpy(&cores[valid_cores_index++],
+				       &powerpc_linux_processors[processor].core,
+				       sizeof(struct cpuinfo_core));
+				/* Update uarch core count. */
+				uarchs[valid_uarchs_index - 1].core_count++;
+			}
+
+			/* Copy cpuinfo_cluster information, if this is the leader. */
+			if (powerpc_linux_processors[processor].cluster_leader_id == linux_id) {
+				memcpy(&clusters[valid_clusters_index++],
+				       &powerpc_linux_processors[processor].cluster,
+				       sizeof(struct cpuinfo_cluster));
+			}
+
+			/* Copy cpuinfo_package information, if this is the leader. */
+			if (powerpc_linux_processors[processor].package_leader_id == linux_id) {
+				memcpy(&packages[valid_packages_index++],
+				       &powerpc_linux_processors[processor].package,
+				       sizeof(struct cpuinfo_package));
+			}
+
+			/* Commit pointers on the final structures. */
+			processors[valid_processors_index - 1].core = &cores[valid_cores_index - 1];
+			processors[valid_processors_index - 1].cluster = &clusters[valid_clusters_index - 1];
+			processors[valid_processors_index - 1].package = &packages[valid_packages_index - 1];
+
+			cores[valid_cores_index - 1].cluster = &clusters[valid_clusters_index - 1];
+			cores[valid_cores_index - 1].package = &packages[valid_packages_index - 1];
+
+			clusters[valid_clusters_index - 1].package = &packages[valid_packages_index - 1];
+
+			linux_cpu_to_processor_map[linux_id] = &processors[valid_processors_index - 1];
+			linux_cpu_to_core_map[linux_id] = &cores[valid_cores_index - 1];
+			linux_cpu_to_uarch_index_map[linux_id] = valid_uarchs_index - 1;
+		}
+	}
+
+	uint32_t l1i_index = UINT32_MAX, l1d_index = UINT32_MAX, l2_index = UINT32_MAX, l3_index = UINT32_MAX;
+	for (uint32_t i = 0; i < valid_processors_count; i++) {
+		struct cpuinfo_cache temp_l1i = {0}, temp_l1d = {0}, temp_l2 = {0}, temp_l3 = {0};
+		cpuinfo_powerpc_decode_cache(&temp_l1i, &temp_l1d, &temp_l2, &temp_l3);
+		/* Power10, even threads of a core shares 1 cache and odd threads of a core shares the another one
+		 * test/init.cc is expecting continuous therads for cache instance so disabled the check in init.cc for
+		 * PPC64.
+		 */
+		if (temp_l1i.size != 0) {
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1)) {
+				/* new cache */
+				l1i[++l1i_index] = (struct cpuinfo_cache){
+					.size = temp_l1i.size,
+					.associativity = temp_l1i.associativity,
+					.sets = temp_l1i.sets,
+					.partitions = 1,
+					.line_size = temp_l1i.line_size,
+					.flags = temp_l1i.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else if (
+				(smt == 8 && (i % 8 == 1)) || (smt == 4 && (i % 4 == 1)) ||
+				(smt == 2 && (i % 2 == 1))) {
+				l1i[++l1i_index] = (struct cpuinfo_cache){
+					.size = temp_l1i.size,
+					.associativity = temp_l1i.associativity,
+					.sets = temp_l1i.sets,
+					.partitions = 1,
+					.line_size = temp_l1i.line_size,
+					.flags = temp_l1i.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else {
+				/* another processor sharing the same cache.  */
+				if (i % 2 == 0)
+					l1i[l1i_index - 1].processor_count += 1;
+				else
+					l1i[l1i_index].processor_count += 1;
+			}
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1))
+				processors[i].cache.l1i = &l1i[l1i_index];
+			else if (i % 2 == 0)
+				processors[i].cache.l1i = &l1i[l1i_index - 1];
+			else
+				processors[i].cache.l1i = &l1i[l1i_index];
+		}
+
+		if (temp_l1d.size != 0) {
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1)) {
+				/* new cache */
+				l1d[++l1d_index] = (struct cpuinfo_cache){
+					.size = temp_l1d.size,
+					.associativity = temp_l1d.associativity,
+					.sets = temp_l1d.sets,
+					.partitions = 1,
+					.line_size = temp_l1d.line_size,
+					.flags = temp_l1d.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else if (
+				(smt == 8 && (i % 8 == 1)) || (smt == 4 && (i % 4 == 1)) ||
+				(smt == 2 && (i % 2 == 1))) {
+				/* new cache */
+				l1d[++l1d_index] = (struct cpuinfo_cache){
+					.size = temp_l1d.size,
+					.associativity = temp_l1d.associativity,
+					.sets = temp_l1d.sets,
+					.partitions = 1,
+					.line_size = temp_l1d.line_size,
+					.flags = temp_l1d.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else {
+				/* another processor sharing the same cache.  */
+				if (i % 2 == 0)
+					l1d[l1d_index - 1].processor_count += 1;
+				else
+					l1d[l1d_index].processor_count += 1;
+			}
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1))
+				processors[i].cache.l1d = &l1d[l1d_index];
+			else if (i % 2 == 0)
+				processors[i].cache.l1d = &l1d[l1d_index - 1];
+			else
+				processors[i].cache.l1d = &l1d[l1d_index];
+		}
+
+		if (temp_l2.size != 0) {
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1)) {
+				/* new cache */
+				l2[++l2_index] = (struct cpuinfo_cache){
+					.size = temp_l2.size,
+					.associativity = temp_l2.associativity,
+					.sets = temp_l2.sets,
+					.partitions = 1,
+					.line_size = temp_l2.line_size,
+					.flags = temp_l2.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else if (
+				(smt == 8 && (i % 8 == 1)) || (smt == 4 && (i % 4 == 1)) ||
+				(smt == 2 && (i % 2 == 1))) {
+				l2[++l2_index] = (struct cpuinfo_cache){
+					.size = temp_l2.size,
+					.associativity = temp_l2.associativity,
+					.sets = temp_l2.sets,
+					.partitions = 1,
+					.line_size = temp_l2.line_size,
+					.flags = temp_l2.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else {
+				/* another processor sharing the same cache.  */
+				if (i % 2 == 0)
+					l2[l2_index - 1].processor_count += 1;
+				else
+					l2[l2_index].processor_count += 1;
+			}
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1))
+				processors[i].cache.l2 = &l2[l2_index];
+			else if (i % 2 == 0)
+				processors[i].cache.l2 = &l2[l2_index - 1];
+			else
+				processors[i].cache.l2 = &l2[l2_index];
+		}
+
+		if (temp_l3.size != 0) {
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1)) {
+				/* new cache */
+				l3[++l3_index] = (struct cpuinfo_cache){
+					.size = temp_l3.size,
+					.associativity = temp_l3.associativity,
+					.sets = temp_l3.sets,
+					.partitions = 1,
+					.line_size = temp_l3.line_size,
+					.flags = temp_l3.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else if (
+				(smt == 8 && (i % 8 == 1)) || (smt == 4 && (i % 4 == 1)) ||
+				(smt == 2 && (i % 2 == 1))) {
+				l3[++l3_index] = (struct cpuinfo_cache){
+					.size = temp_l3.size,
+					.associativity = temp_l3.associativity,
+					.sets = temp_l3.sets,
+					.partitions = 1,
+					.line_size = temp_l3.line_size,
+					.flags = temp_l3.flags,
+					.processor_start = i,
+					.processor_count = 1,
+				};
+			} else {
+				/* another processor sharing the same cache.  */
+				if (i % 2 == 0)
+					l3[l3_index - 1].processor_count += 1;
+				else
+					l3[l3_index].processor_count += 1;
+			}
+			if ((smt == 8 && (i % 8 == 0)) || (smt == 4 && (i % 4 == 0)) || (smt == 2 && (i % 2 == 0)) ||
+			    (smt == 1))
+				processors[i].cache.l3 = &l3[l3_index];
+			else if (i % 2 == 0)
+				processors[i].cache.l3 = &l3[l3_index - 1];
+			else
+				processors[i].cache.l3 = &l3[l3_index];
+		}
+	}
+
+	cpuinfo_processors = processors;
+	cpuinfo_processors_count = valid_processors_count;
+	cpuinfo_cores = cores;
+	cpuinfo_cores_count = valid_cores_count;
+	cpuinfo_clusters = clusters;
+	cpuinfo_clusters_count = valid_clusters_count;
+	cpuinfo_packages = packages;
+	cpuinfo_packages_count = valid_packages_count;
+	cpuinfo_uarchs = uarchs;
+	cpuinfo_uarchs_count = valid_uarchs_count;
+
+	cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
+	cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
+	cpuinfo_cache[cpuinfo_cache_level_2] = l2;
+	cpuinfo_cache[cpuinfo_cache_level_3] = l3;
+	cpuinfo_linux_cpu_max = valid_processors_count;
+	cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map;
+	cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map;
+	cpuinfo_linux_cpu_to_uarch_index_map = linux_cpu_to_uarch_index_map;
+
+	cpuinfo_cache_count[cpuinfo_cache_level_1i] = cache_count;
+	cpuinfo_cache_count[cpuinfo_cache_level_1d] = cache_count;
+	cpuinfo_cache_count[cpuinfo_cache_level_2] = cache_count;
+	cpuinfo_cache_count[cpuinfo_cache_level_3] = cache_count;
+	__sync_synchronize();
+
+	cpuinfo_is_initialized = true;
+
+	/* Mark all public structures NULL to prevent cleanup from erasing them.
+	 */
+	processors = NULL;
+	cores = NULL;
+	clusters = NULL;
+	packages = NULL;
+	uarchs = NULL;
+	linux_cpu_to_processor_map = NULL;
+	linux_cpu_to_core_map = NULL;
+	linux_cpu_to_uarch_index_map = NULL;
+	l1i = l1d = l2 = l3 = NULL;
+cleanup:
+	free(powerpc_linux_processors);
+	free(processors);
+	free(cores);
+	free(clusters);
+	free(packages);
+	free(uarchs);
+	free(linux_cpu_to_processor_map);
+	free(linux_cpu_to_core_map);
+	free(linux_cpu_to_uarch_index_map);
+	free(l1i);
+	free(l1d);
+	free(l2);
+	free(l3);
+}
diff --git a/src/powerpc/linux/ppc64-hw.c b/src/powerpc/linux/ppc64-hw.c
new file mode 100644
index 00000000..b8f11ed0
--- /dev/null
+++ b/src/powerpc/linux/ppc64-hw.c
@@ -0,0 +1,17 @@
+#include <string.h>
+#include <dlfcn.h>
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <cpuinfo.h>
+#include <cpuinfo/log.h>
+#include <powerpc/linux/api.h>
+#include <sys/auxv.h>
+
+void cpuinfo_powerpc_linux_hwcap_from_getauxval(uint32_t isa_feature[]) {
+	isa_feature[0] = (uint32_t)getauxval(AT_HWCAP);
+	isa_feature[1] = (uint32_t)getauxval(AT_HWCAP2);
+}
diff --git a/src/powerpc/linux/ppc64-isa.c b/src/powerpc/linux/ppc64-isa.c
new file mode 100644
index 00000000..ecc30723
--- /dev/null
+++ b/src/powerpc/linux/ppc64-isa.c
@@ -0,0 +1,13 @@
+#include <stdint.h>
+
+#include <cpuinfo/log.h>
+#include <powerpc/linux/api.h>
+
+void cpuinfo_ppc64_linux_decode_isa_from_hwcap(
+	uint32_t features,
+	uint32_t features2,
+	struct cpuinfo_powerpc_isa isa[restrict static 1]) {
+	isa->vsx = !!(features & CPUINFO_POWERPC_LINUX_FEATURE_HAS_VSX);
+	isa->htm = !!(features2 & CPUINFO_POWERPC_LINUX_FEATURE_HTM);
+	isa->mma = !!(features2 & CPUINFO_POWERPC_LINUX_FEATURE_HAS_MMA);
+}
diff --git a/src/powerpc/uarch.c b/src/powerpc/uarch.c
new file mode 100644
index 00000000..bfb388bd
--- /dev/null
+++ b/src/powerpc/uarch.c
@@ -0,0 +1,25 @@
+#include <stdint.h>
+
+#include <cpuinfo/log.h>
+#include <powerpc/api.h>
+
+void cpuinfo_powerpc_decode_vendor_uarch(
+	uint32_t vendor_id,
+	enum cpuinfo_vendor vendor[restrict static 1],
+	enum cpuinfo_uarch uarch[restrict static 1]) {
+	/* The vendor ID is sufficient to determine the cpuinfo_vendor. */
+	switch (vendor_id) {
+		case cpuinfo_powerpc_chipset_vendor_ibm:
+			*vendor = cpuinfo_vendor_ibm;
+			break;
+		default:
+			*vendor = cpuinfo_vendor_unknown;
+			cpuinfo_log_warning("unknown vendor ID: %" PRIu32, vendor_id);
+			break;
+	}
+	/**
+	 * TODO: Add support for parsing chipset architecture and implementation
+	 * IDs here, when a chipset of interest comes along.
+	 */
+	*uarch = cpuinfo_uarch_unknown;
+}
diff --git a/test/init.cc b/test/init.cc
index a6128e35..340e491d 100644
--- a/test/init.cc
+++ b/test/init.cc
@@ -118,7 +118,9 @@ TEST(PROCESSOR, consistent_l1i) {
 		const cpuinfo_cache* l1i = processor->cache.l1i;
 		if (l1i != nullptr) {
 			EXPECT_GE(i, l1i->processor_start);
+#ifndef CPUINFO_ARCH_PPC64
 			EXPECT_LT(i, l1i->processor_start + l1i->processor_count);
+#endif
 		}
 	}
 	cpuinfo_deinitialize();
@@ -132,7 +134,9 @@ TEST(PROCESSOR, consistent_l1d) {
 		const cpuinfo_cache* l1d = processor->cache.l1d;
 		if (l1d != nullptr) {
 			EXPECT_GE(i, l1d->processor_start);
+#ifndef CPUINFO_ARCH_PPC64
 			EXPECT_LT(i, l1d->processor_start + l1d->processor_count);
+#endif
 		}
 	}
 	cpuinfo_deinitialize();
@@ -146,7 +150,9 @@ TEST(PROCESSOR, consistent_l2) {
 		const cpuinfo_cache* l2 = processor->cache.l2;
 		if (l2 != nullptr) {
 			EXPECT_GE(i, l2->processor_start);
+#ifndef CPUINFO_ARCH_PPC64
 			EXPECT_LT(i, l2->processor_start + l2->processor_count);
+#endif
 		}
 	}
 	cpuinfo_deinitialize();
@@ -160,7 +166,9 @@ TEST(PROCESSOR, consistent_l3) {
 		const cpuinfo_cache* l3 = processor->cache.l3;
 		if (l3 != nullptr) {
 			EXPECT_GE(i, l3->processor_start);
+#ifndef CPUINFO_ARCH_PPC64
 			EXPECT_LT(i, l3->processor_start + l3->processor_count);
+#endif
 		}
 	}
 	cpuinfo_deinitialize();
@@ -522,6 +530,24 @@ TEST(CLUSTER, consistent_midr) {
 }
 #endif /* CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 */
 
+#if CPUINFO_ARCH_PPC64
+TEST(CLUSTER, consistent_pvr) {
+	ASSERT_TRUE(cpuinfo_initialize());
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+		ASSERT_TRUE(cluster);
+
+		for (uint32_t j = 0; j < cluster->core_count; j++) {
+			const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+			ASSERT_TRUE(core);
+
+			EXPECT_EQ(cluster->pvr, core->pvr);
+		}
+	}
+	cpuinfo_deinitialize();
+}
+#endif /* CPUINFO_ARCH_PPC64 */
+
 TEST(CLUSTER, consistent_frequency) {
 	ASSERT_TRUE(cpuinfo_initialize());
 	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
@@ -891,6 +917,7 @@ TEST(L1I_CACHE, valid_processors) {
 	cpuinfo_deinitialize();
 }
 
+#ifndef CPUINFO_ARCH_PPC64
 TEST(L1I_CACHE, consistent_processors) {
 	ASSERT_TRUE(cpuinfo_initialize());
 	for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
@@ -906,6 +933,7 @@ TEST(L1I_CACHE, consistent_processors) {
 	}
 	cpuinfo_deinitialize();
 }
+#endif
 
 TEST(L1D_CACHES_COUNT, within_bounds) {
 	ASSERT_TRUE(cpuinfo_initialize());
@@ -1055,6 +1083,7 @@ TEST(L1D_CACHE, valid_processors) {
 	cpuinfo_deinitialize();
 }
 
+#ifndef CPUINFO_ARCH_PPC64
 TEST(L1D_CACHE, consistent_processors) {
 	ASSERT_TRUE(cpuinfo_initialize());
 	for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
@@ -1070,6 +1099,7 @@ TEST(L1D_CACHE, consistent_processors) {
 	}
 	cpuinfo_deinitialize();
 }
+#endif
 
 TEST(L2_CACHES_COUNT, within_bounds) {
 	ASSERT_TRUE(cpuinfo_initialize());
@@ -1210,6 +1240,7 @@ TEST(L2_CACHE, valid_processors) {
 	cpuinfo_deinitialize();
 }
 
+#ifndef CPUINFO_ARCH_PPC64
 TEST(L2_CACHE, consistent_processors) {
 	ASSERT_TRUE(cpuinfo_initialize());
 	for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
@@ -1225,6 +1256,7 @@ TEST(L2_CACHE, consistent_processors) {
 	}
 	cpuinfo_deinitialize();
 }
+#endif
 
 TEST(L3_CACHES_COUNT, within_bounds) {
 	ASSERT_TRUE(cpuinfo_initialize());
@@ -1357,6 +1389,7 @@ TEST(L3_CACHE, valid_processors) {
 	cpuinfo_deinitialize();
 }
 
+#ifndef CPUINFO_ARCH_PPC64
 TEST(L3_CACHE, consistent_processors) {
 	ASSERT_TRUE(cpuinfo_initialize());
 	for (uint32_t i = 0; i < cpuinfo_get_l3_caches_count(); i++) {
@@ -1372,6 +1405,7 @@ TEST(L3_CACHE, consistent_processors) {
 	}
 	cpuinfo_deinitialize();
 }
+#endif
 
 TEST(L4_CACHES_COUNT, within_bounds) {
 	ASSERT_TRUE(cpuinfo_initialize());
diff --git a/test/name/power-features.cc b/test/name/power-features.cc
new file mode 100644
index 00000000..01239dad
--- /dev/null
+++ b/test/name/power-features.cc
@@ -0,0 +1,36 @@
+#include <cpuinfo.h>
+#include <gtest/gtest.h>
+#include <sys/auxv.h>
+
+extern "C" bool CPUINFO_ABI cpuinfo_initialize(void);
+extern "C" inline bool cpuinfo_has_powerpc_vsx(void);
+extern "C" inline bool cpuinfo_has_powerpc_htm(void);
+extern "C" inline bool cpuinfo_has_powerpc_mma(void);
+
+#define CPUINFO_POWERPC_LINUX_FEATURE_ARCH_3_1 UINT32_C(0x00040000) // To check if the architecture is Power10
+#define CPUINFO_POWERPC_LINUX_FEATURE_ARCH_3_00 UINT32_C(0x00800000) // To check if the architecture is Power9
+#define CPUINFO_POWERPC_LINUX_FEATURE_ARCH_2_07 UINT32_C(0x80000000) // To check if the architecture is Power8
+
+TEST(PowerFeatures, Power) {
+	if (!cpuinfo_initialize()) {
+		fprintf(stderr, "failed to initialize CPU information\n");
+		exit(1);
+	}
+	uint32_t a = (uint32_t)getauxval(AT_HWCAP);
+	volatile uint32_t b = (uint32_t)getauxval(AT_HWCAP2);
+
+	if (b & CPUINFO_POWERPC_LINUX_FEATURE_ARCH_3_1) {
+		EXPECT_EQ(0, cpuinfo_has_powerpc_htm());
+		EXPECT_EQ(1, cpuinfo_has_powerpc_mma());
+		EXPECT_EQ(1, cpuinfo_has_powerpc_vsx());
+	} else if (b & CPUINFO_POWERPC_LINUX_FEATURE_ARCH_3_00) {
+		EXPECT_EQ(0, cpuinfo_has_powerpc_htm());
+		EXPECT_EQ(0, cpuinfo_has_powerpc_mma());
+		EXPECT_EQ(1, cpuinfo_has_powerpc_vsx());
+	} else if (b & CPUINFO_POWERPC_LINUX_FEATURE_ARCH_2_07) {
+		EXPECT_EQ(1, cpuinfo_has_powerpc_htm());
+		EXPECT_EQ(0, cpuinfo_has_powerpc_mma());
+		EXPECT_EQ(1, cpuinfo_has_powerpc_vsx());
+	}
+	cpuinfo_deinitialize();
+}
diff --git a/tools/cpu-info.c b/tools/cpu-info.c
index b896b270..b8adfb9e 100644
--- a/tools/cpu-info.c
+++ b/tools/cpu-info.c
@@ -274,6 +274,20 @@ static const char* uarch_to_string(enum cpuinfo_uarch uarch) {
 			return "ThunderX2";
 		case cpuinfo_uarch_pj4:
 			return "PJ4";
+		case cpuinfo_uarch_power7:
+			return "POWER7";
+		case cpuinfo_uarch_power7p:
+			return "POWER7+";
+		case cpuinfo_uarch_power8:
+			return "POWER8";
+		case cpuinfo_uarch_power8e:
+			return "POWER8E";
+		case cpuinfo_uarch_power8nvl:
+			return "POWER8NVL";
+		case cpuinfo_uarch_power9:
+			return "POWER9";
+		case cpuinfo_uarch_power10:
+			return "POWER10";
 		case cpuinfo_uarch_brahma_b15:
 			return "Brahma B15";
 		case cpuinfo_uarch_brahma_b53:
@@ -317,6 +331,8 @@ int main(int argc, char** argv) {
 	printf("Cores:\n");
 	for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
 		const struct cpuinfo_core* core = cpuinfo_get_core(i);
+		if (core->disabled)
+			continue;
 		if (core->processor_count == 1) {
 			printf("\t%" PRIu32 ": 1 processor (%" PRIu32 ")", i, core->processor_start);
 		} else {