Skip to content

Commit

Permalink
Comments improved and code cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelesabella committed Mar 10, 2019
1 parent bbabbd4 commit fba9db9
Show file tree
Hide file tree
Showing 6 changed files with 94 additions and 53 deletions.
35 changes: 18 additions & 17 deletions docker_api.cpp
Expand Up @@ -36,7 +36,7 @@
// Docker daemon query url
const char* query_from_id = "http://localhost/containers/%s/json";
// Cache where to store the queries results
std::unordered_map<std::string, struct cache_entry*> *gQueryCache = nullptr;
std::unordered_map<std::string, struct cache_entry*> *gQueryCache = NULL;


/* ************************************************* */
Expand All @@ -47,7 +47,7 @@ void docker_api_init () {
}

void clean_cache_entry (struct cache_entry *e) {
if (e->value != nullptr)
if (e->value != NULL)
free(e->value);

free(e);
Expand All @@ -56,13 +56,13 @@ void clean_cache_entry (struct cache_entry *e) {
void docker_api_clean () {
std::unordered_map<std::string, struct cache_entry*>::iterator it;

if (gQueryCache==nullptr) return;
if (gQueryCache==NULL) return;

for (it=gQueryCache->begin(); it!=gQueryCache->end(); it++)
clean_cache_entry(it->second);

delete gQueryCache;
gQueryCache = nullptr;
gQueryCache = NULL;
}


Expand Down Expand Up @@ -176,14 +176,14 @@ int update_query_cache (char* t_cgroupid, struct cache_entry **t_dqr) {
CURL *curl_handle;
CURLcode res;
char url[101];
struct ResponseBuffer chunk;
cache_entry *dqr;
std::string cgroupid(t_cgroupid);
// Crafting query
snprintf(url, sizeof(url), query_from_id, t_cgroupid);

// Performing query ----- //
// Initializing memory buffer
struct ResponseBuffer chunk;
// Initializing memory buffer
chunk.memory = (char*) malloc(1);
chunk.size = 0;
// Preparing libcurl
Expand Down Expand Up @@ -230,12 +230,13 @@ int update_query_cache (char* t_cgroupid, struct cache_entry **t_dqr) {
* returns -1 if the query has not been cached 0 if some info are available
* 1 for dummy keys
*/
int docker_id_cached (std::string t_cgroupid, struct cache_entry **t_dqs) {
int docker_id_cached (char* t_cgroupid, struct cache_entry **t_dqs) {
std::string cgroupid(t_cgroupid);
std::unordered_map<std::string, struct cache_entry*>::iterator res;
res = gQueryCache->find(t_cgroupid);
res = gQueryCache->find(cgroupid);

if (res != gQueryCache->end()) {
if (res->second->value != nullptr) {
if (res->second->value != NULL) {
res->second->accessed += 1;
*t_dqs = res->second;
return 0;
Expand All @@ -247,16 +248,17 @@ int docker_id_cached (std::string t_cgroupid, struct cache_entry **t_dqs) {


void clean_cache () {
struct cache_entry *entry;
std::vector<std::string> markedentries;
std::vector<std::string>::iterator marked_it;
std::unordered_map<std::string, struct cache_entry*>::iterator it;

if (gQueryCache==nullptr) return;
if (gQueryCache==NULL) return;

// Getting entries accessed less than MINTOCLEAN times
for (it=gQueryCache->begin(); it!=gQueryCache->end(); it++) {
struct cache_entry *entry = it->second;
if (entry->accessed < MINTOCLEAN || entry->value==nullptr) {
entry = it->second;
if (entry->accessed < MINTOCLEAN || entry->value==NULL) {
markedentries.push_back(it->first);
clean_cache_entry(it->second);
}
Expand All @@ -276,11 +278,10 @@ void clean_cache () {
/* ******************************* */
int docker_id_get (char* t_cgroupid, docker_api **t_dqr) {
cache_entry* qr;
std::string cgroupid(t_cgroupid);
int res;

static time_t last = time(nullptr);
time_t now = time(nullptr);
static time_t last = time(NULL);
time_t now = time(NULL);

if (difftime(now, last) > CLEAN_INTERVAL /* Seconds */ ) {
clean_cache();
last = now;
Expand All @@ -290,7 +291,7 @@ int docker_id_get (char* t_cgroupid, docker_api **t_dqr) {
return -1;
}

res = docker_id_cached(cgroupid, &qr);
res = docker_id_cached(t_cgroupid, &qr);
if (res == 1) {
return -1;
}
Expand Down
26 changes: 17 additions & 9 deletions docker_api.h
Expand Up @@ -30,7 +30,7 @@
// Every time the cache is cleaned, every entry
// with less than MINTOCLEAN accesses will be removed
#define MINTOCLEAN 50
// Cache clean interval in seconds
// Cache cleaning interval in seconds
#define CLEAN_INTERVAL 30


Expand Down Expand Up @@ -98,24 +98,32 @@ int update_query_cache (char* t_cgroupid, struct cache_entry **t_dqr);
/* *********************************** */
/*
* docker_id_cached - check if containers info have been cached
* returns -1 if the query has not been cached 0 otherwise
* and if some info are available stores them in *t_dqs
* @t_cgroupid: docker container ID
* @t_dqs: will point to the cache entry if no error occurs (returns != -1)
* returns 0 if the cache contains information concerning the container
* -1 if there no entry corresponding to the ID provided. 1 if
* there is an entry associated with the ID but there aren't
* information available
*/
int docker_id_cached (std::string t_cgroupid, struct cache_entry **t_dqs);
int docker_id_cached (char *t_cgroupid, struct cache_entry **t_dqs);

/*
* Clean the cache from queries far back in time
* Clean the cache from queries far back in time and with less
* than MINTOCLEAN or dummy entries from failed queries
*/
void clean_cache ();

/* ******************************* */
// ===== ===== QUERIES ===== ===== //
/* ******************************* */
/*
* docker_id_getname - set a pointer to the associated information
* @t_cgroupid: full length container identifier
* @t_buff: t_dqr docker api query data structure
* returns 1 if kubernetes informations have been found, -1 if no
* information has been found, otherwise zero
* docker_id_get - sets a pointer to the associated information
* @t_cgroupid: docker container ID
* @t_dqs: will point to the container informations if no error occurs (returns != -1)
* returns >0 if some information has been found, 1 if
* kubernetes information has been gathered. Returns -1 if no
* info are available
*/
int docker_id_get (char* t_cgroupid, docker_api **t_dqr);

Expand Down
28 changes: 15 additions & 13 deletions ebpf_flow.cpp
Expand Up @@ -200,7 +200,7 @@ extern "C" {
}

// opening output buffer ----- //
open_res = bpf->open_perf_buffer("ebpf_events", ebpfHandler, nullptr, (void*)priv_ptr);
open_res = bpf->open_perf_buffer("ebpf_events", ebpfHandler, NULL, (void*)priv_ptr);
if(open_res.code() != 0) { *rc = ebpf_events_open_error; goto init_failed; }

*rc = ebpf_no_error;
Expand Down Expand Up @@ -241,9 +241,12 @@ extern "C" {
/* ******************************************* */

void ebpf_preprocess_event(eBPFevent *event, bool docker_flag) {
struct docker_api *container_info;
struct dockerInfo *dinfo;
struct kubeInfo *kinfo;
int id_get_res, l;
char what[256], sym[256] = { '\0' };
char fwhat[256], fsym[256] = { '\0' };
int l;

gettimeofday(&event->event_time, NULL);
check_pid(&event->proc), check_pid(&event->father);
Expand All @@ -270,18 +273,17 @@ extern "C" {
event->docker = NULL, event->kube = NULL;

if(docker_flag) {
struct docker_api *container_info;
int res = docker_id_get(event->cgroup_id, &container_info);
if(res >= 0) /* Docker info available */ {
struct dockerInfo *d = (struct dockerInfo*) malloc(sizeof(struct dockerInfo));
strcpy(d->dname, container_info->docker_name);
event->docker = d;
id_get_res = docker_id_get(event->cgroup_id, &container_info);
if(id_get_res >= 0) /* Docker info available */ {
dinfo = (struct dockerInfo*) malloc(sizeof(struct dockerInfo));
strcpy(dinfo->dname, container_info->docker_name);
event->docker = dinfo;
}
if(res >= 1) /* Kubernetes info available */ {
struct kubeInfo *k = (struct kubeInfo*) malloc(sizeof(struct kubeInfo));
strcpy(k->pod, container_info->kube_pod);
strcpy(k->ns, container_info->kube_namespace);
event->kube = k;
if(id_get_res >= 1) /* Kubernetes info available */ {
kinfo = (struct kubeInfo*) malloc(sizeof(struct kubeInfo));
strcpy(kinfo->pod, container_info->kube_pod);
strcpy(kinfo->ns, container_info->kube_namespace);
event->kube = kinfo;
}
}
}
Expand Down
46 changes: 37 additions & 9 deletions ebpf_flow.h
Expand Up @@ -29,7 +29,7 @@

/* ******************************************* */

#define COMMAND_LEN 17
#define COMMAND_LEN 16
#define CGROUP_ID_LEN 64

/*
Expand All @@ -40,6 +40,7 @@
* II_digit (=0): tcp events
* (=1): udp events
* III_digit: discriminate the single event
* The type is reported in eBPFevent->etype
*/
typedef enum {
eTCP_ACPT = 100,
Expand Down Expand Up @@ -70,20 +71,20 @@ struct ipv6_kernel_data {
};

struct dockerInfo {
char dname[100];
char dname[100]; // Docker container name
};

struct kubeInfo {
char pod[60];
char ns[60];
char ns[60]; // Kubernetes namespace
};

typedef struct {
__u64 ktime;
char ifname[IFNAMSIZ];
struct timeval event_time;
__u64 ktime; // Absolute kernel time
char ifname[IFNAMSIZ]; // net-dev name
struct timeval event_time; // Event time, filled during event preprocessing
__u8 ip_version:4, sent_packet:4;
__u16 etype;
__u16 etype; // event type, supported events are listed in event_type enum

union {
struct ipv4_kernel_data v4;
Expand All @@ -97,7 +98,8 @@ typedef struct {

struct taskInfo proc, father;

char cgroup_id[CGROUP_ID_LEN];
char cgroup_id[CGROUP_ID_LEN]; // Docker identifier
// Both next fields are initializated to NULL and populated only during preprocessing
struct dockerInfo *docker;
struct kubeInfo *kube;
} eBPFevent;
Expand All @@ -111,6 +113,10 @@ typedef enum {
ebpf_events_open_error,
} ebpfRetCode;

/*
* Supported flags to filter events when initializating libebpfflow
* Combinations of this flags allow to capture only subsets of events
*/
typedef enum {
LIBEBPF_TCP = 1 << 0,
LIBEBPF_UDP = 1 << 1,
Expand All @@ -128,13 +134,35 @@ extern "C" {
#endif // __cplusplus

typedef void (*eBPFHandler)(void* t_bpfctx, void* t_data, int t_datasize);


/*
* init_ebpf_flow - Initializes the library with a target event handler
* @flags: restrict the number of events to generate by
* not tracing certain functions. Use default (i.e. 0xFFFF) to capture
* all events. Supported events are combinations of libebpflow_flag enum type
* returns a pointer to an ebpf::BPF object upon success NULL otherwise
*/
void* init_ebpf_flow(void *priv_ptr, eBPFHandler ebpfHandler,
ebpfRetCode *rc, u_int16_t flags=0xffff /* Use 0xFFFF for default */);
/*
* term_ebpf_flow - Cleans the resources used by the library
* @ebpfHook: a pointer to an ebpf::BPF, that is the one returned by init_ebpf_flow
*/
void term_ebpf_flow(void *ebpfHook);
/*
* ebpf_poll_event - Pools an event from an ebpf::BPF object
*/
void ebpf_poll_event(void *ebpfHook, u_int ms_timeout);
/*
* Collect further information wrt the one contained in an eBPF event
* @docker_flag: if 1 docker daemon will be queried to gather information
* concerning containers
*/
void ebpf_preprocess_event(eBPFevent *event, bool docker_flag);
const char* ebpf_print_error(ebpfRetCode rc);
/*
* Cleans the resources used by an eBPFevent data structure
*/
void ebpf_free_event(eBPFevent *event);
const char* ebpf_flow_version();

Expand Down
2 changes: 1 addition & 1 deletion ebpflow.ebpf
Expand Up @@ -50,7 +50,7 @@ BPF_LRU_HASH3(retr_table, u32, struct retr_entry);

#define CGROUP_ID_LEN 64 // max is in limits.h -> NAME_MAX

#define COMMAND_LEN TASK_COMM_LEN+1 // 16, defined in sched.h
#define COMMAND_LEN 16 // defined in sched.h

/*
* Events types are forged as follows:
Expand Down
10 changes: 6 additions & 4 deletions toolebpflow.cpp
Expand Up @@ -72,15 +72,15 @@ static const struct option long_opts[] = {
};

int main(int argc, char **argv) {
int ch;
short flags = 0;
ebpfRetCode rc;
void *ebpf;
void (*handler)(void*, void*, int) = ebpfHandler;

signal(SIGINT, handleTermination);

// Argument Parsing ----- //
int ch;
short flags = 0;
gDOCKER_ENABLE=1;
while ((ch = getopt_long(argc, argv,
"rcutiodvh",
Expand Down Expand Up @@ -252,10 +252,12 @@ void event_summary (eBPFevent* e, char* t_buffer, int t_size) {

static void verboseHandleEvent(void* t_bpfctx, void* t_data, int t_datasize) {
char event_type_str[17];
struct ipv4_kernel_data *ipv4_event;
struct ipv6_kernel_data *ipv6_event;
eBPFevent *e = (eBPFevent*)t_data;
eBPFevent event;

// Preprocessing event ----- //
eBPFevent event;
// Copy needed as ebpf_preprocess_event will modify the memory
memcpy(&event, e, sizeof(eBPFevent));
ebpf_preprocess_event(&event, gDOCKER_ENABLE);
Expand All @@ -281,7 +283,7 @@ static void verboseHandleEvent(void* t_bpfctx, void* t_data, int t_datasize) {
event_summary(&event, event_type_str, sizeof(event_type_str));
if (event.ip_version == 4) {
// IPv4 Event type
struct ipv4_kernel_data *ipv4_event = &event.event.v4;
ipv4_event = &event.event.v4;
// IPv4 Network info
char buf1[32], buf2[32];

Expand Down

0 comments on commit fba9db9

Please sign in to comment.