From 388b29c3f7f950b3a6a428b0c7d8b5a79581d400 Mon Sep 17 00:00:00 2001 From: Brice Goglin Date: Fri, 27 Mar 2020 11:30:17 +0100 Subject: [PATCH] linux: fix the discovery of hugepages on btrfs fsroot btrfs always returns 1 in directory stat.st_nlink. It breaks make check in tests/hwloc/linux/ because the hugepages discovery uses st_nlink to allocate the memory page_types array. Always allocate at least 3 page_types slots (enough for all known cases, 1 for normal, 2 for huge pages) and realloc later if needed. Thanks to Ludovic Courtes for the report. Signed-off-by: Brice Goglin (cherry picked from commit 7f351cec9bfe54a031f35ad16c9cfb99784d76dc) --- hwloc/topology-linux.c | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/hwloc/topology-linux.c b/hwloc/topology-linux.c index 56f6f8fb71..75c01dffd7 100644 --- a/hwloc/topology-linux.c +++ b/hwloc/topology-linux.c @@ -2407,6 +2407,7 @@ static void hwloc_parse_hugepages_info(struct hwloc_linux_backend_data_s *data, const char *dirpath, struct hwloc_numanode_attr_s *memory, + unsigned allocated_page_types, uint64_t *remaining_local_memory) { DIR *dir; @@ -2421,6 +2422,14 @@ hwloc_parse_hugepages_info(struct hwloc_linux_backend_data_s *data, int err; if (strncmp(dirent->d_name, "hugepages-", 10)) continue; + if (index_ >= allocated_page_types) { + /* we must increase the page_types array */ + struct hwloc_memory_page_type_s *tmp = realloc(memory->page_types, allocated_page_types * 2 * sizeof(*tmp)); + if (!tmp) + break; + memory->page_types = tmp; + allocated_page_types *= 2; + } memory->page_types[index_].size = strtoul(dirent->d_name+10, NULL, 0) * 1024ULL; err = snprintf(path, sizeof(path), "%s/%s/nr_hugepages", dirpath, dirent->d_name); if ((size_t) err < sizeof(path) @@ -2448,7 +2457,14 @@ hwloc_get_machine_meminfo(struct hwloc_linux_backend_data_s *data, err = hwloc_stat("/sys/kernel/mm/hugepages", &st, data->root_fd); if (!err) { - types = 1 + st.st_nlink-2; + types = 1 /* normal non-huge size */ + st.st_nlink - 2 /* ignore . and .. */; + if (types < 3) + /* some buggy filesystems (e.g. btrfs when reading from fsroot) + * return wrong st_nlink for directories (always 1 for btrfs). + * use 3 as a sane default (default page + 2 huge sizes). + * hwloc_parse_hugepages_info() will extend it if needed. + */ + types = 3; has_sysfs_hugepages = 1; } @@ -2466,7 +2482,8 @@ hwloc_get_machine_meminfo(struct hwloc_linux_backend_data_s *data, if (has_sysfs_hugepages) { /* read from node%d/hugepages/hugepages-%skB/nr_hugepages */ - hwloc_parse_hugepages_info(data, "/sys/kernel/mm/hugepages", memory, &remaining_local_memory); + hwloc_parse_hugepages_info(data, "/sys/kernel/mm/hugepages", memory, types, &remaining_local_memory); + /* memory->page_types_len may have changed */ } /* use remaining memory as normal pages */ @@ -2490,7 +2507,14 @@ hwloc_get_sysfs_node_meminfo(struct hwloc_linux_backend_data_s *data, sprintf(path, "%s/node%d/hugepages", syspath, node); err = hwloc_stat(path, &st, data->root_fd); if (!err) { - types = 1 + st.st_nlink-2; + types = 1 /* normal non-huge size */ + st.st_nlink - 2 /* ignore . and .. */; + if (types < 3) + /* some buggy filesystems (e.g. btrfs when reading from fsroot) + * return wrong st_nlink for directories (always 1 for btrfs). + * use 3 as a sane default (default page + 2 huge sizes). + * hwloc_parse_hugepages_info() will extend it if needed. + */ + types = 3; has_sysfs_hugepages = 1; } @@ -2509,7 +2533,8 @@ hwloc_get_sysfs_node_meminfo(struct hwloc_linux_backend_data_s *data, if (has_sysfs_hugepages) { /* read from node%d/hugepages/hugepages-%skB/nr_hugepages */ - hwloc_parse_hugepages_info(data, path, memory, &remaining_local_memory); + hwloc_parse_hugepages_info(data, path, memory, types, &remaining_local_memory); + /* memory->page_types_len may have changed */ } /* use remaining memory as normal pages */