Skip to content

Commit

Permalink
Merge PR #1507 (Release 2023-06: Davion) into master
Browse files Browse the repository at this point in the history
  • Loading branch information
eugeneia committed Jun 26, 2023
2 parents c7bbb51 + 5fc329b commit de5aaa3
Show file tree
Hide file tree
Showing 11 changed files with 457 additions and 60 deletions.
2 changes: 2 additions & 0 deletions src/apps/ipfix/ipfix-information-elements-local.inc
Expand Up @@ -13,3 +13,5 @@ this is the name of the first non-CNAME answer record or the name of the last CN
2946:110,dnsAnswerRdata,octetArray,"On-the-wire encoding of the answer record's rdata section. For well-known record types,
compressed domain names have been replaced with their uncompressed counterparts",,,,,,,,
2946:111,dnsAnswerRdataLen,unsigned16,,,,,,,,,
39499:338,tlsSNI,string,DNS name from the TLS Server Name Indication extension,,,,,,,,
39499:339,tlsSNILength,unsigned16,Length of tlsSNI in bytes,,,,,,,,
51 changes: 36 additions & 15 deletions src/apps/ipfix/ipfix.lua
Expand Up @@ -120,6 +120,17 @@ end

FlowSet = {}

local function create_maps(template, maps_in)
for _, name in ipairs(template.require_maps) do
assert(maps_in[name],
string.format("Template #%d: required map %s "
.."not configured", template.id, name))
template.maps[name] = maps.mk_map(name, maps_in[name],
nil, template.maps_log_fh,
template.logger)
end
end

function FlowSet:new (spec, args)
local t = {}
for s in spec:split(':') do
Expand All @@ -143,13 +154,9 @@ function FlowSet:new (spec, args)
.." IPFIX template #"..template.id })
template.name = template_name
template.maps = {}
for _, name in ipairs(template.require_maps) do
assert(args.maps[name],
string.format("Template #%d: required map %s "
.."not configured", template.id, name))
template.maps[name] = maps.mk_map(name, args.maps[name],
nil, args.maps_log_fh)
end
template.maps_log_fh = args.maps_logfile and
assert(io.open(args.maps_logfile, "a")) or nil
create_maps(template, args.maps)

assert(args.active_timeout > args.scan_time,
string.format("Template #%d: active timeout (%d) "
Expand Down Expand Up @@ -437,8 +444,8 @@ function FlowSet:suppress_flow(flow_entry, timestamp)
local fps = aggr.flow_count/interval
local drop_interval = (timestamp - aggr.tstamp_drop_start)/1000
if (fps >= config.threshold_rate) then
local aggr_ppf = aggr.packets/aggr.flow_count
local aggr_bpp = aggr.octets/aggr.packets
local aggr_ppf = aggr.packets/aggr.flow_count
local aggr_bpp = aggr.octets/aggr.packets
if aggr.suppress == 0 then
self.template.logger:log(
string.format("Flow rate threshold exceeded from %s: "..
Expand Down Expand Up @@ -481,9 +488,9 @@ function FlowSet:suppress_flow(flow_entry, timestamp)
flow_entry.value.octetDeltaCount
end
if config.drop and aggr.suppress == 1 then
-- NB: this rate-limiter applies to flows from *all*
-- aggregates, while the threshold rate applies to each
-- aggregate individually.
-- NB: this rate-limiter applies to flows from *all*
-- aggregates, while the threshold rate applies to each
-- aggregate individually.
if self.sp.export_rate_tb:take(1) then
aggr.exports = aggr.exports + 1
return false
Expand Down Expand Up @@ -712,16 +719,27 @@ function IPFIX:reconfig(config)
flush_timeout = config.flush_timeout,
parent = self,
maps = config.maps,
maps_log_fh = config.maps_logfile and
assert(io.open(config.maps_logfile, "a")) or nil,
maps_logfile = config.maps_logfile,
instance = config.instance,
log_date = config.log_date }

-- Eventually, we'd like to perform reconfiguration with as little
-- impact as possible. In particular, we want to avoid
-- re-allocation of the flow table unnecessarily. For now, we only
-- deal with the case when any of the mapping files changes since
-- this is fairly common and is easy to do on-the-fly.
local flow_set_args_changed = not lib.equal(self.flow_set_args, flow_set_args)
local flow_set_args_changed_basic = flow_set_args_changed
if self.flow_set_args and flow_set_args_changed then
local save = flow_set_args.maps
flow_set_args.maps = self.flow_set_args.maps
flow_set_args_changed_basic = not lib.equal(self.flow_set_args, flow_set_args)
flow_set_args.maps = save
end
self.flow_set_args = flow_set_args

for i, template in ipairs(self.templates) do
if template ~= config.templates[i] or flow_set_args_changed then
if template ~= config.templates[i] or flow_set_args_changed_basic then
self.flow_sets[i] = nil
end
end
Expand All @@ -733,6 +751,9 @@ function IPFIX:reconfig(config)
else
self.logger:log("Added template "..self.flow_sets[i]:id())
end
elseif flow_set_args_changed then
create_maps(self.flow_sets[i].template, config.maps)
self.logger:log("Updated maps for template "..self.flow_sets[i]:id())
else
self.logger:log("Kept template "..self.flow_sets[i]:id())
end
Expand Down
89 changes: 59 additions & 30 deletions src/apps/ipfix/maps.lua
Expand Up @@ -8,6 +8,7 @@ local ipv4 = require("lib.protocol.ipv4")
local ipv6 = require("lib.protocol.ipv6")
local poptrie = require("lib.poptrie")
local logger = require("lib.logger")
local S = require("syscall")

-- Map MAC addresses to peer AS number
--
Expand All @@ -17,7 +18,7 @@ local logger = require("lib.logger")
local mac_to_as_key_t = ffi.typeof("uint8_t[6]")
local mac_to_as_value_t = ffi.typeof("uint32_t")

local function make_mac_to_as_map(name)
local function make_mac_to_as_map(name, template_logger)
local table = ctable.new({ key_type = mac_to_as_key_t,
value_type = mac_to_as_value_t,
initial_size = 15000,
Expand All @@ -26,16 +27,19 @@ local function make_mac_to_as_map(name)
local value = mac_to_as_value_t()
for line in assert(io.lines(name)) do
local as, mac = line:match("^%s*(%d*)-([0-9a-fA-F:]*)")
assert(as and mac, "MAC-to-AS map: invalid line: "..line)
local key, value = ethernet:pton(mac), tonumber(as)
local result = table:lookup_ptr(key)
if result then
if result.value ~= value then
print("MAC-to-AS map: amibguous mapping: "
..ethernet:ntop(key)..": "..result.value..", "..value)
end
if not (as and mac) then
template_logger:log("MAC-to-AS map: invalid line: "..line)
else
local key, value = ethernet:pton(mac), tonumber(as)
local result = table:lookup_ptr(key)
if result then
if result.value ~= value then
template_logger:log("MAC-to-AS map: amibguous mapping: "
..ethernet:ntop(key)..": "..result.value..", "..value)
end
end
table:add(key, value, true)
end
table:add(key, value, true)
end
return table
end
Expand All @@ -52,16 +56,18 @@ end
-- elements is relevant, depending on the direction of the flow. File
-- format:
-- <TAG>-<ingress>-<egress>
local function make_vlan_to_ifindex_map(name)
local function make_vlan_to_ifindex_map(name, template_logger)
local table = {}
for line in assert(io.lines(name)) do
local vlan, ingress, egress = line:match("^(%d+)-(%d+)-(%d+)$")
assert(vlan and ingress and egress,
"VLAN-to-IFIndex map: invalid line: "..line)
table[tonumber(vlan)] = {
ingress = tonumber(ingress),
egress = tonumber(egress)
}
if not (vlan and ingress and egress) then
template_logger:log("VLAN-to-IFIndex map: invalid line: "..line)
else
table[tonumber(vlan)] = {
ingress = tonumber(ingress),
egress = tonumber(egress)
}
end
end
return table
end
Expand All @@ -74,28 +80,40 @@ end
-- authoritative data from the RIRs. This parser supports the format
-- used by the Geo2Lite database provided by MaxMind:
-- http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN-CSV.zip
local function make_pfx_to_as_map(name, proto)
local function make_pfx_to_as_map(name, proto, template_logger)
local table = { pt = poptrie.new{direct_pointing=true,
leaf_t=ffi.typeof("uint32_t")} }
local max_plen
if proto == ipv4 then
function table:search_bytes (a)
return self.pt:lookup32(a)
end
max_plen = 32
elseif proto == ipv6 then
function table:search_bytes (a)
return self.pt:lookup128(a)
end
max_plen = 128
else
error("Proto must be ipv4 or ipv6")
end
for line in assert(io.lines(name)) do
if not line:match("^network") then
local cidr, asn = line:match("([^,]*),(%d+),")
asn = tonumber(asn)
assert(cidr and asn, "Prefix-to-AS map: invalid line: "..line)
assert(asn > 0 and asn < 2^32, "Prefix-to-AS map: asn out of range: "..asn)
local pfx, len = proto:pton_cidr(cidr)
table.pt:add(pfx, len, asn)
if not (cidr and asn) then
print(cidr, asn)
template_logger:log("Prefix-to-AS map: invalid line: "..line)
elseif not (asn > 0 and asn < 2^32) then
template_logger:log("Prefix-to-AS map: asn out of range: "..line)
else
local pfx, len = proto:pton_cidr(cidr)
if pfx and len <= max_plen then
table.pt:add(pfx, len, asn)
else
template_logger:log("Prefix-to-AS map: invalid address: "..line)
end
end
end
end
table.pt:build()
Expand All @@ -112,25 +130,36 @@ local map_info = {
logger_module = 'VLAN to ifIndex mapper'
},
pfx4_to_as = {
create_fn = function (name) return make_pfx_to_as_map(name, ipv4) end,
create_fn = function (name, tmpl_logger)
return make_pfx_to_as_map(name, ipv4, tmpl_logger)
end,
logger_module = 'IPv4 prefix to AS mapper'
},
pfx6_to_as = {
create_fn = function (name) return make_pfx_to_as_map(name, ipv6) end,
create_fn = function (name, tmpl_logger)
return make_pfx_to_as_map(name, ipv6, tmpl_logger)
end,
logger_module = 'IPv6 prefix to AS mapper'
}
}

local maps = {}

function mk_map(name, file, log_rate, log_fh)
function mk_map(name, file, log_rate, log_fh, template_logger)
local info = assert(map_info[name])
local map = maps[name]
if not map then
map = info.create_fn(file)
maps[name] = map
local stat = assert(S.stat(file))
local map_cache = maps[name]
if not map_cache or map_cache.ctime ~= stat.ctime then
map_cache = {
map = info.create_fn(file, template_logger),
ctime = stat.ctime
}
maps[name] = map_cache
template_logger:log("Created "..name.." map from "..file)
else
template_logger:log("Using cache for map "..name)
end
local map = { map = map }
local map = { map = map_cache.map }
if log_fh then
map.logger = logger.new({ rate = log_rate or 0.05,
fh = log_fh,
Expand Down
56 changes: 56 additions & 0 deletions src/apps/ipfix/template.lua
Expand Up @@ -15,6 +15,7 @@ local ipv6 = require("lib.protocol.ipv6")
local metadata = require("apps.rss.metadata")
local strings = require("apps.ipfix.strings")
local dns = require("apps.ipfix.dns")
local tls = require("apps.ipfix.tls")
local S = require("syscall")

local ntohs = lib.ntohs
Expand Down Expand Up @@ -498,6 +499,21 @@ local function HTTP_accumulate(self, dst, new, pkt)
end
end

-- HTTPS-specific statistics counters
local function HTTPS_counters()
return {
HTTPS_client_hellos = 0,
HTTPS_extensions_present = 0,
HTTPS_snis = 0,
}
end

local function HTTPS_accumulate(self, dst, new, pkt)
accumulate_generic(dst, new)
accumulate_tcp_flags_reduced(dst, new)
tls.accumulate(self, dst.value, pkt)
end

local function DNS_extract(self, pkt, timestamp, entry, extract_addr_fn)
local md = metadata_get(pkt)
extract_5_tuple(pkt, timestamp, entry, md, extract_addr_fn)
Expand Down Expand Up @@ -720,6 +736,26 @@ templates = {
end,
accumulate = DNS_accumulate
},
v4_HTTPS = {
id = 259,
filter = "ip and tcp and (dst port 443 or dst port 8443)",
aggregation_type = 'v4',
keys = { "sourceIPv4Address",
"destinationIPv4Address",
"protocolIdentifier",
"sourceTransportPort",
"destinationTransportPort" },
values = { "flowStartMilliseconds",
"flowEndMilliseconds",
"packetDeltaCount",
"octetDeltaCount",
"tcpControlBitsReduced",
"tlsSNI=64",
"tlsSNILength"},
counters = HTTPS_counters(),
extract = v4_extract,
accumulate = HTTPS_accumulate,
},
v4_extended = {
id = 1256,
filter = "ip",
Expand Down Expand Up @@ -839,6 +875,26 @@ templates = {
end,
accumulate = DNS_accumulate
},
v6_HTTPS = {
id = 515,
filter = "ip6 and tcp and (dst port 443 or dst port 8443)",
aggregation_type = 'v6',
keys = { "sourceIPv6Address",
"destinationIPv6Address",
"protocolIdentifier",
"sourceTransportPort",
"destinationTransportPort" },
values = { "flowStartMilliseconds",
"flowEndMilliseconds",
"packetDeltaCount",
"octetDeltaCount",
"tcpControlBitsReduced",
"tlsSNI=64",
"tlsSNILength"},
counters = HTTPS_counters(),
extract = v6_extract,
accumulate = HTTPS_accumulate,
},
v6_extended = {
id = 1512,
filter = "ip6",
Expand Down

0 comments on commit de5aaa3

Please sign in to comment.