/
non-cluster.zeek
84 lines (72 loc) · 1.81 KB
/
non-cluster.zeek
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
@load ./main
module SumStats;
event SumStats::process_epoch_result(ss: SumStat, now: time, data: ResultTable)
{
# TODO: is this the right processing group size?
local i = 50;
local keys_to_delete: vector of SumStats::Key = vector();
for ( key, res in data )
{
ss$epoch_result(now, key, res);
keys_to_delete += key;
if ( --i == 0 )
break;
}
for ( idx in keys_to_delete )
delete data[keys_to_delete[idx]];
if ( |data| > 0 )
# TODO: is this the right interval?
schedule 0.01 secs { SumStats::process_epoch_result(ss, now, data) };
else if ( ss?$epoch_finished )
ss$epoch_finished(now);
}
event SumStats::finish_epoch(ss: SumStat)
{
if ( ss$name in result_store )
{
if ( ss?$epoch_result )
{
local data = result_store[ss$name];
local now = network_time();
if ( zeek_is_terminating() )
{
for ( key, val in data )
ss$epoch_result(now, key, val);
if ( ss?$epoch_finished )
ss$epoch_finished(now);
}
else
{
if ( |data| > 0 )
event SumStats::process_epoch_result(ss, now, copy(data));
else
{
if ( ss?$epoch_finished )
ss$epoch_finished(now);
}
}
}
# We can reset here because we know that the reference
# to the data will be maintained by the process_epoch_result
# event.
reset(ss);
}
if ( ss$epoch != 0secs )
schedule ss$epoch { SumStats::finish_epoch(ss) };
}
function data_added(ss: SumStat, key: Key, result: Result)
{
if ( check_thresholds(ss, key, result, 1.0) )
threshold_crossed(ss, key, result);
}
function request_key(ss_name: string, key: Key): Result
{
# This only needs to be implemented this way for cluster compatibility.
return when [ss_name, key] ( T )
{
if ( ss_name in result_store && key in result_store[ss_name] )
return result_store[ss_name][key];
else
return table();
}
}