Skip to content

Commit

Permalink
Hash table charts (#15323)
Browse files Browse the repository at this point in the history
  • Loading branch information
thiagoftsm committed Jul 20, 2023
1 parent 29d0210 commit b762b05
Show file tree
Hide file tree
Showing 17 changed files with 355 additions and 172 deletions.
167 changes: 165 additions & 2 deletions collectors/ebpf.plugin/ebpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ ebpf_module_t ebpf_modules[] = {
NETDATA_V5_14,
.load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
.thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 },
{ .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC,
{ .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC,
.enabled = 0, .start_routine = ebpf_socket_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
Expand Down Expand Up @@ -1229,7 +1229,7 @@ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist,
*/
int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
{
static int priority = 140100;
static int priority = NETATA_EBPF_ORDER_STAT_ARAL_BEGIN;
char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };

Expand Down Expand Up @@ -1325,6 +1325,49 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
write_end_chart();
}

/*****************************************************************
*
* FUNCTIONS TO READ GLOBAL HASH TABLES
*
*****************************************************************/

/**
* Read Global Table Stats
*
* Read data from specified table (map_fd) using array allocated inside thread(values) and storing
* them in stats vector starting from the first position.
*
* For PID tables is recommended to use a function to parse the specific data.
*
* @param stats vector used to store data
* @param values helper to read data from hash tables.
* @param map_fd table that has data
* @param maps_per_core Is necessary to read data from all cores?
* @param begin initial value to query hash table
* @param end last value that will not be used.
*/
void ebpf_read_global_table_stats(netdata_idx_t *stats,
netdata_idx_t *values,
int map_fd,
int maps_per_core,
uint32_t begin,
uint32_t end)
{
uint32_t idx, order;

for (idx = begin, order = 0; idx < end; idx++, order++) {
if (!bpf_map_lookup_elem(map_fd, &idx, values)) {
int i;
int before = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < before; i++)
total += values[i];

stats[order] = total;
}
}
}

/*****************************************************************
*
* FUNCTIONS TO DEFINE OPTIONS
Expand Down Expand Up @@ -2453,6 +2496,47 @@ static char *memlock_stat = {"memory_locked"};
static char *hash_table_stat = {"hash_table"};
static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};

/**
* Send Hash Table PID data
*
* Send all information associated with a specific pid table.
*
* @param chart chart id
* @param idx index position in hash_table_stats
*/
static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx)
{
int i;
write_begin_chart(NETDATA_MONITORING_FAMILY, chart);
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
ebpf_module_t *wem = &ebpf_modules[i];
if (wem->apps_routine)
write_chart_dimension((char *)wem->thread_name,
(wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
wem->hash_table_stats[idx]:
0);
}
write_end_chart();
}

/**
* Send Global Hash Table data
*
* Send all information associated with a specific pid table.
*
*/
static inline void ebpf_send_global_hash_table_data()
{
int i;
write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS);
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
ebpf_module_t *wem = &ebpf_modules[i];
write_chart_dimension((char *)wem->thread_name,
(wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0);
}
write_end_chart();
}

/**
* Send Statistic Data
*
Expand Down Expand Up @@ -2500,6 +2584,11 @@ void ebpf_send_statistic_data()
write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
write_end_chart();

ebpf_send_global_hash_table_data();

ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD);
ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL);
}

/**
Expand Down Expand Up @@ -2681,6 +2770,66 @@ static inline void ebpf_create_statistic_hash_per_core(int update_every)
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
}

/**
* Hash table global elements
*
* Write to standard output current values inside global tables.
*
* @param update_every time used to update charts
*/
static void ebpf_create_statistic_hash_global_elements(int update_every)
{
ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS,
"Controllers inside global table",
"rows",
NETDATA_EBPF_FAMILY,
NETDATA_EBPF_CHART_TYPE_LINE,
NULL,
NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
update_every,
NETDATA_EBPF_MODULE_NAME_PROCESS);

int i;
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name,
(char *)ebpf_modules[i].thread_name,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
}
}

/**
* Hash table global elements
*
* Write to standard output current values inside global tables.
*
* @param update_every time used to update charts
* @param id chart id
* @param title chart title
* @param order ordder chart will be shown on dashboard.
*/
static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, char *title, int order)
{
ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
id,
title,
"rows",
NETDATA_EBPF_FAMILY,
NETDATA_EBPF_CHART_TYPE_LINE,
NULL,
order,
update_every,
NETDATA_EBPF_MODULE_NAME_PROCESS);

int i;
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
ebpf_module_t *wem = &ebpf_modules[i];
if (wem->apps_routine)
ebpf_write_global_dimension((char *)wem->thread_name,
(char *)wem->thread_name,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
}
}

/**
* Create Statistics Charts
Expand Down Expand Up @@ -2718,6 +2867,20 @@ static void ebpf_create_statistic_charts(int update_every)
ebpf_create_statistic_hash_tables(update_every);

ebpf_create_statistic_hash_per_core(update_every);

ebpf_create_statistic_hash_global_elements(update_every);

ebpf_create_statistic_hash_pid_table(update_every,
NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS,
"Elements inserted into PID table",
NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED);

ebpf_create_statistic_hash_pid_table(update_every,
NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS,
"Elements removed from PID table",
NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED);

fflush(stdout);
}

/*****************************************************************
Expand Down
8 changes: 6 additions & 2 deletions collectors/ebpf.plugin/ebpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,6 @@ typedef struct netdata_syscall_stat {
struct netdata_syscall_stat *next; // Link list
} netdata_syscall_stat_t;

typedef uint64_t netdata_idx_t;

typedef struct netdata_publish_syscall {
char *dimension;
char *name;
Expand Down Expand Up @@ -169,6 +167,9 @@ typedef struct ebpf_tracepoint {
#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
#define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core"
#define NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS "ebpf_hash_tables_global_elements"
#define NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS "ebpf_hash_tables_insert_pid_elements"
#define NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS "ebpf_hash_tables_remove_pid_elements"

// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
Expand Down Expand Up @@ -319,6 +320,9 @@ void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links);

void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd,
int maps_per_core, uint32_t begin, uint32_t end);

extern ebpf_filesystem_partitions_t localfs[];
extern ebpf_sync_syscalls_t local_syscalls[];
extern int ebpf_exit_plugin;
Expand Down
36 changes: 18 additions & 18 deletions collectors/ebpf.plugin/ebpf_cachestat.c
Original file line number Diff line number Diff line change
Expand Up @@ -854,26 +854,24 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* Read the table with number of calls for all functions
*
* @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
static void ebpf_cachestat_read_global_table(int maps_per_core)
static void ebpf_cachestat_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
uint32_t idx;
netdata_idx_t *val = cachestat_hash_values;
netdata_idx_t *stored = cachestat_values;
int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd;

for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
int end = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];

val[idx] = total;
}
}
ebpf_read_global_table_stats(cachestat_hash_values,
cachestat_values,
cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd,
maps_per_core,
NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
NETDATA_CACHESTAT_END);

ebpf_read_global_table_stats(stats,
cachestat_values,
cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd,
maps_per_core,
NETDATA_CONTROLLER_PID_TABLE_ADD,
NETDATA_CONTROLLER_END);
}

/**
Expand Down Expand Up @@ -1288,6 +1286,8 @@ static void cachestat_collector(ebpf_module_t *em)
//This will be cancelled by its parent
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);

Expand All @@ -1296,7 +1296,7 @@ static void cachestat_collector(ebpf_module_t *em)

counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_cachestat_read_global_table(maps_per_core);
ebpf_cachestat_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
ebpf_read_cachestat_apps_table(maps_per_core);
Expand Down
36 changes: 18 additions & 18 deletions collectors/ebpf.plugin/ebpf_dcstat.c
Original file line number Diff line number Diff line change
Expand Up @@ -690,26 +690,24 @@ static void ebpf_update_dc_cgroup(int maps_per_core)
*
* Read the table with number of calls for all functions
*
* @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
static void ebpf_dc_read_global_table(int maps_per_core)
static void ebpf_dc_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
uint32_t idx;
netdata_idx_t *val = dcstat_hash_values;
netdata_idx_t *stored = dcstat_values;
int fd = dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd;

for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
int end = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];

val[idx] = total;
}
}
ebpf_read_global_table_stats(dcstat_hash_values,
dcstat_values,
dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd,
maps_per_core,
NETDATA_KEY_DC_REFERENCE,
NETDATA_DIRECTORY_CACHE_END);

ebpf_read_global_table_stats(stats,
dcstat_values,
dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd,
maps_per_core,
NETDATA_CONTROLLER_PID_TABLE_ADD,
NETDATA_CONTROLLER_END);
}

/**
Expand Down Expand Up @@ -1169,6 +1167,8 @@ static void dcstat_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);

Expand All @@ -1177,7 +1177,7 @@ static void dcstat_collector(ebpf_module_t *em)

counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_dc_read_global_table(maps_per_core);
ebpf_dc_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_dc_apps_table(maps_per_core);
Expand Down

0 comments on commit b762b05

Please sign in to comment.