From 55470602d1459ae6312fdec26b85c1c4d8da8d6c Mon Sep 17 00:00:00 2001 From: Steffen Siering Date: Thu, 3 Nov 2016 14:19:16 +0100 Subject: [PATCH] Simple Packetbeat only lint cleanups (#2928) * lint rename packetbeat/config * lint names protos/mysql * lint rename protos/amqp * lint rename protos/applayer * lint rename protos/cassandra * lint rename protos/http * lint rename protos/icmp * lint rename protos/mongodb * lint rename protos/pgsql * lint rename protos/redis * lint rename protos/thrift * lint rename protocol plugin methods * lint rename packetbeat/decoder * lint rename packetbeat/procs * lint rename packetbeat/sniffer * lint rename protos plugin types * lint rename protos/udp * lint rename protos/tcp * lint rename protos/memcache * lint rename protos/nfs * lint rename protos/dns * lint rename protos.go * replace `+= 1` with `++` * More minor packetbeat lint cleanups --- packetbeat/beater/packetbeat.go | 8 +- packetbeat/config/config.go | 22 +- packetbeat/decoder/decoder_test.go | 30 +- packetbeat/procs/procs.go | 111 ++++---- packetbeat/procs/procs_test.go | 55 ++-- packetbeat/protos/amqp/amqp.go | 71 +++-- packetbeat/protos/amqp/amqp_fields.go | 18 +- packetbeat/protos/amqp/amqp_parser.go | 77 +++-- packetbeat/protos/amqp/amqp_structs.go | 14 +- packetbeat/protos/amqp/amqp_test.go | 32 +-- packetbeat/protos/applayer/applayer.go | 10 +- packetbeat/protos/cassandra/cassandra.go | 11 +- packetbeat/protos/cassandra/config.go | 15 +- .../protos/cassandra/internal/gocql/frame.go | 4 +- .../cassandra/internal/gocql/marshal.go | 8 +- .../internal/gocql/stream_decoder.go | 6 +- packetbeat/protos/cassandra/parser.go | 14 +- packetbeat/protos/cassandra/trans.go | 2 +- packetbeat/protos/dns/config.go | 4 +- packetbeat/protos/dns/dns.go | 213 +++++++------- packetbeat/protos/dns/dns_tcp.go | 82 +++--- packetbeat/protos/dns/dns_tcp_test.go | 262 +++++++++--------- packetbeat/protos/dns/dns_test.go | 44 +-- packetbeat/protos/dns/dns_udp.go | 14 +- packetbeat/protos/dns/dns_udp_test.go | 162 +++++------ packetbeat/protos/dns/errors.go | 4 +- packetbeat/protos/dns/names.go | 3 +- packetbeat/protos/dns/names_test.go | 18 +- packetbeat/protos/http/config.go | 16 +- packetbeat/protos/http/http.go | 26 +- packetbeat/protos/http/http_test.go | 22 +- packetbeat/protos/icmp/icmp.go | 28 +- packetbeat/protos/icmp/icmp_test.go | 12 +- packetbeat/protos/icmp/message.go | 6 +- packetbeat/protos/icmp/message_test.go | 16 +- packetbeat/protos/icmp/transaction.go | 3 +- packetbeat/protos/icmp/tuple.go | 20 +- packetbeat/protos/icmp/tuple_test.go | 36 +-- packetbeat/protos/memcache/binary.go | 22 +- packetbeat/protos/memcache/config.go | 4 +- packetbeat/protos/memcache/errors.go | 2 +- packetbeat/protos/memcache/memcache.go | 65 ++--- packetbeat/protos/memcache/memcache_test.go | 4 +- packetbeat/protos/memcache/parse.go | 9 +- packetbeat/protos/memcache/parse_test.go | 2 +- packetbeat/protos/memcache/plugin_tcp.go | 11 +- packetbeat/protos/memcache/plugin_udp.go | 68 ++--- packetbeat/protos/memcache/plugin_udp_test.go | 14 +- packetbeat/protos/memcache/text.go | 49 ++-- packetbeat/protos/mongodb/mongodb.go | 32 +-- packetbeat/protos/mongodb/mongodb_parser.go | 8 +- .../protos/mongodb/mongodb_parser_test.go | 2 +- packetbeat/protos/mongodb/mongodb_structs.go | 4 +- packetbeat/protos/mongodb/mongodb_test.go | 30 +- packetbeat/protos/mysql/mysql.go | 261 +++++++++-------- packetbeat/protos/mysql/mysql_test.go | 44 +-- packetbeat/protos/nfs/nfs.go | 10 +- packetbeat/protos/nfs/nfs3.go | 11 +- packetbeat/protos/nfs/nfs4.go | 242 ++++++++-------- packetbeat/protos/nfs/nfs_status.go | 2 +- packetbeat/protos/nfs/request_handler.go | 47 ++-- packetbeat/protos/nfs/rpc.go | 62 ++--- packetbeat/protos/nfs/xdr_test.go | 6 +- packetbeat/protos/pgsql/parse.go | 36 +-- packetbeat/protos/pgsql/pgsql.go | 55 ++-- packetbeat/protos/pgsql/pgsql_test.go | 18 +- packetbeat/protos/protos.go | 70 ++--- packetbeat/protos/protos_test.go | 78 +++--- packetbeat/protos/redis/redis.go | 14 +- packetbeat/protos/redis/redis_parse.go | 18 +- packetbeat/protos/registry.go | 8 +- packetbeat/protos/tcp/tcp.go | 70 ++--- packetbeat/protos/tcp/tcp_test.go | 52 ++-- packetbeat/protos/thrift/thrift.go | 96 +++---- packetbeat/protos/thrift/thrift_idl.go | 22 +- packetbeat/protos/thrift/thrift_idl_test.go | 6 +- packetbeat/protos/thrift/thrift_test.go | 184 ++++++------ packetbeat/protos/udp/udp.go | 26 +- packetbeat/protos/udp/udp_test.go | 38 +-- packetbeat/publish/publish.go | 52 ++-- packetbeat/sniffer/afpacket_nonlinux.go | 2 +- packetbeat/sniffer/sniffer.go | 48 ++-- packetbeat/sniffer/sniffer_test.go | 36 +-- 83 files changed, 1714 insertions(+), 1725 deletions(-) diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index 4454974473c..cd9f33b041b 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -189,10 +189,10 @@ func (pb *Packetbeat) Stop() { func (pb *Packetbeat) setupSniffer() error { config := &pb.Config - withVlans := config.Interfaces.With_vlans + withVlans := config.Interfaces.WithVlans withICMP := config.Protocols["icmp"].Enabled() - filter := config.Interfaces.Bpf_filter + filter := config.Interfaces.BpfFilter if filter == "" && !config.Flows.IsEnabled() { filter = protos.Protos.BpfFilter(withVlans, withICMP) } @@ -225,12 +225,12 @@ func (pb *Packetbeat) createWorker(dl layers.LinkType) (sniffer.Worker, error) { icmp6 = icmp } - tcp, err := tcp.NewTcp(&protos.Protos) + tcp, err := tcp.NewTCP(&protos.Protos) if err != nil { return nil, err } - udp, err := udp.NewUdp(&protos.Protos) + udp, err := udp.NewUDP(&protos.Protos) if err != nil { return nil, err } diff --git a/packetbeat/config/config.go b/packetbeat/config/config.go index 12c7f120d39..977e4e1a621 100644 --- a/packetbeat/config/config.go +++ b/packetbeat/config/config.go @@ -18,17 +18,17 @@ type Config struct { } type InterfacesConfig struct { - Device string - Type string - File string - With_vlans bool - Bpf_filter string - Snaplen int - Buffer_size_mb int - TopSpeed bool - Dumpfile string - OneAtATime bool - Loop int + Device string + Type string + File string + WithVlans bool + BpfFilter string + Snaplen int + BufferSizeMb int + TopSpeed bool + Dumpfile string + OneAtATime bool + Loop int } type Flows struct { diff --git a/packetbeat/decoder/decoder_test.go b/packetbeat/decoder/decoder_test.go index 546a0895ab4..6021e5ccdc6 100644 --- a/packetbeat/decoder/decoder_test.go +++ b/packetbeat/decoder/decoder_test.go @@ -35,26 +35,26 @@ func (l *TestIcmp6Processor) ProcessICMPv6(id *flows.FlowID, icmp6 *layers.ICMPv l.pkt = pkt } -type TestTcpProcessor struct { +type TestTCPProcessor struct { tcphdr *layers.TCP pkt *protos.Packet } -func (l *TestTcpProcessor) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet) { +func (l *TestTCPProcessor) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet) { l.tcphdr = tcphdr l.pkt = pkt } -type TestUdpProcessor struct { +type TestUDPProcessor struct { pkt *protos.Packet } -func (l *TestUdpProcessor) Process(id *flows.FlowID, pkt *protos.Packet) { +func (l *TestUDPProcessor) Process(id *flows.FlowID, pkt *protos.Packet) { l.pkt = pkt } // 172.16.16.164:1108 172.16.16.139:53 DNS 87 Standard query 0x0007 AXFR contoso.local -var ipv4TcpDns = []byte{ +var ipv4TcpDNS = []byte{ 0x00, 0x0c, 0x29, 0xce, 0xd1, 0x9e, 0x00, 0x0c, 0x29, 0x7e, 0xec, 0xa4, 0x08, 0x00, 0x45, 0x00, 0x00, 0x49, 0x46, 0x54, 0x40, 0x00, 0x80, 0x06, 0x3b, 0x0b, 0xac, 0x10, 0x10, 0xa4, 0xac, 0x10, 0x10, 0x8b, 0x04, 0x54, 0x00, 0x35, 0x5d, 0x9f, 0x0c, 0x90, 0x1a, 0xef, 0x6f, 0x43, 0x50, 0x18, @@ -69,7 +69,7 @@ func TestDecodePacketData_ipv4Tcp(t *testing.T) { logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"decoder"}) } - p := gopacket.NewPacket(ipv4TcpDns, layers.LinkTypeEthernet, gopacket.Default) + p := gopacket.NewPacket(ipv4TcpDNS, layers.LinkTypeEthernet, gopacket.Default) if p.ErrorLayer() != nil { t.Error("Failed to decode packet:", p.ErrorLayer().Error()) } @@ -85,7 +85,7 @@ func TestDecodePacketData_ipv4Tcp(t *testing.T) { } // 192.168.170.8:32795 192.168.170.20:53 DNS 74 Standard query 0x75c0 A www.netbsd.org -var ipv4UdpDns = []byte{ +var ipv4UdpDNS = []byte{ 0x00, 0xc0, 0x9f, 0x32, 0x41, 0x8c, 0x00, 0xe0, 0x18, 0xb1, 0x0c, 0xad, 0x08, 0x00, 0x45, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 0x65, 0x43, 0xc0, 0xa8, 0xaa, 0x08, 0xc0, 0xa8, 0xaa, 0x14, 0x80, 0x1b, 0x00, 0x35, 0x00, 0x28, 0xaf, 0x61, 0x75, 0xc0, 0x01, 0x00, 0x00, 0x01, @@ -95,7 +95,7 @@ var ipv4UdpDns = []byte{ // Test that DecodePacket decodes and IPv4/UDP packet and invokes the UDP processor. func TestDecodePacketData_ipv4Udp(t *testing.T) { - p := gopacket.NewPacket(ipv4UdpDns, layers.LinkTypeEthernet, gopacket.Default) + p := gopacket.NewPacket(ipv4UdpDNS, layers.LinkTypeEthernet, gopacket.Default) if p.ErrorLayer() != nil { t.Error("Failed to decode packet:", p.ErrorLayer().Error()) } @@ -111,7 +111,7 @@ func TestDecodePacketData_ipv4Udp(t *testing.T) { } // IP6 2001:6f8:102d::2d0:9ff:fee3:e8de.59201 > 2001:6f8:900:7c0::2.80 -var ipv6TcpHttpGet = []byte{ +var ipv6TcpHTTPGet = []byte{ 0x00, 0x11, 0x25, 0x82, 0x95, 0xb5, 0x00, 0xd0, 0x09, 0xe3, 0xe8, 0xde, 0x86, 0xdd, 0x60, 0x00, 0x00, 0x00, 0x01, 0x04, 0x06, 0x40, 0x20, 0x01, 0x06, 0xf8, 0x10, 0x2d, 0x00, 0x00, 0x02, 0xd0, 0x09, 0xff, 0xfe, 0xe3, 0xe8, 0xde, 0x20, 0x01, 0x06, 0xf8, 0x09, 0x00, 0x07, 0xc0, 0x00, 0x00, @@ -136,7 +136,7 @@ var ipv6TcpHttpGet = []byte{ // Test that DecodePacket decodes and IPv6/TCP packet and invokes the TCP processor. func TestDecodePacketData_ipv6Tcp(t *testing.T) { - p := gopacket.NewPacket(ipv6TcpHttpGet, layers.LinkTypeEthernet, gopacket.Default) + p := gopacket.NewPacket(ipv6TcpHTTPGet, layers.LinkTypeEthernet, gopacket.Default) if p.ErrorLayer() != nil { t.Error("Failed to decode packet: ", p.ErrorLayer().Error()) } @@ -152,7 +152,7 @@ func TestDecodePacketData_ipv6Tcp(t *testing.T) { } // 3ffe:507:0:1:200:86ff:fe05:80da.2415 > 3ffe:501:4819::42.53 -var ipv6UdpDns = []byte{ +var ipv6UdpDNS = []byte{ 0x00, 0x60, 0x97, 0x07, 0x69, 0xea, 0x00, 0x00, 0x86, 0x05, 0x80, 0xda, 0x86, 0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x61, 0x11, 0x40, 0x3f, 0xfe, 0x05, 0x07, 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x86, 0xff, 0xfe, 0x05, 0x80, 0xda, 0x3f, 0xfe, 0x05, 0x01, 0x48, 0x19, 0x00, 0x00, 0x00, 0x00, @@ -167,7 +167,7 @@ var ipv6UdpDns = []byte{ // Test that DecodePacket decodes and IPv6/UDP packet and invokes the UDP processor. func TestDecodePacketData_ipv6Udp(t *testing.T) { - p := gopacket.NewPacket(ipv6UdpDns, layers.LinkTypeEthernet, gopacket.Default) + p := gopacket.NewPacket(ipv6UdpDNS, layers.LinkTypeEthernet, gopacket.Default) if p.ErrorLayer() != nil { t.Error("Failed to decode packet:", p.ErrorLayer().Error()) } @@ -183,11 +183,11 @@ func TestDecodePacketData_ipv6Udp(t *testing.T) { } // Creates a new TestDecoder that handles ethernet packets. -func newTestDecoder(t *testing.T) (*DecoderStruct, *TestTcpProcessor, *TestUdpProcessor) { +func newTestDecoder(t *testing.T) (*DecoderStruct, *TestTCPProcessor, *TestUDPProcessor) { icmp4Layer := &TestIcmp4Processor{} icmp6Layer := &TestIcmp6Processor{} - tcpLayer := &TestTcpProcessor{} - udpLayer := &TestUdpProcessor{} + tcpLayer := &TestTCPProcessor{} + udpLayer := &TestUDPProcessor{} d, err := NewDecoder(nil, layers.LinkTypeEthernet, icmp4Layer, icmp6Layer, tcpLayer, udpLayer) if err != nil { t.Fatalf("Error creating decoder %v", err) diff --git a/packetbeat/procs/procs.go b/packetbeat/procs/procs.go index 2f2695b9df9..a45e58e6ada 100644 --- a/packetbeat/procs/procs.go +++ b/packetbeat/procs/procs.go @@ -20,10 +20,10 @@ import ( ) type SocketInfo struct { - Src_ip, Dst_ip net.IP - Src_port, Dst_port uint16 + SrcIP, DstIP net.IP + SrcPort, DstPort uint16 - Uid uint32 + UID uint32 Inode uint64 } @@ -55,27 +55,27 @@ type ProcessesWatcher struct { RefreshPidsFreq time.Duration // test helpers - proc_prefix string + procPrefix string TestSignals *chan bool } type ProcsConfig struct { - Enabled bool `config:"enabled"` - Max_proc_read_freq time.Duration `config:"max_proc_read_freq"` - Monitored []ProcConfig `config:"monitored"` - Refresh_pids_freq time.Duration `config:"refresh_pids_freq"` + Enabled bool `config:"enabled"` + MaxProcReadFreq time.Duration `config:"max_proc_read_freq"` + Monitored []ProcConfig `config:"monitored"` + RefreshPidsFreq time.Duration `config:"refresh_pids_freq"` } type ProcConfig struct { - Process string - Cmdline_grep string + Process string + CmdlineGrep string } var ProcWatcher ProcessesWatcher func (proc *ProcessesWatcher) Init(config ProcsConfig) error { - proc.proc_prefix = "" + proc.procPrefix = "" proc.PortProcMap = make(map[uint16]PortProcMapping) proc.LastMapUpdate = time.Now() @@ -91,16 +91,16 @@ func (proc *ProcessesWatcher) Init(config ProcsConfig) error { logp.Info("Process matching disabled") } - if config.Max_proc_read_freq == 0 { + if config.MaxProcReadFreq == 0 { proc.MaxReadFreq = 10 * time.Millisecond } else { - proc.MaxReadFreq = config.Max_proc_read_freq + proc.MaxReadFreq = config.MaxProcReadFreq } - if config.Refresh_pids_freq == 0 { + if config.RefreshPidsFreq == 0 { proc.RefreshPidsFreq = 1 * time.Second } else { - proc.RefreshPidsFreq = config.Refresh_pids_freq + proc.RefreshPidsFreq = config.RefreshPidsFreq } // Read the local IP addresses @@ -114,7 +114,7 @@ func (proc *ProcessesWatcher) Init(config ProcsConfig) error { if proc.ReadFromProc { for _, procConfig := range config.Monitored { - grepper := procConfig.Cmdline_grep + grepper := procConfig.CmdlineGrep if len(grepper) == 0 { grepper = procConfig.Process } @@ -148,7 +148,7 @@ func (p *Process) RefreshPids() { for range p.RefreshPidsTimer { logp.Debug("procs", "In RefreshPids tick") var err error - p.Pids, err = FindPidsByCmdlineGrep(p.proc.proc_prefix, p.Grepper) + p.Pids, err = FindPidsByCmdlineGrep(p.proc.procPrefix, p.Grepper) if err != nil { logp.Err("Error finding PID files for %s: %s", p.Name, err) } @@ -194,26 +194,26 @@ func FindPidsByCmdlineGrep(prefix string, process string) ([]int, error) { return pids, nil } -func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IPPortTuple) (proc_tuple *common.CmdlineTuple) { - proc_tuple = &common.CmdlineTuple{} +func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IPPortTuple) (procTuple *common.CmdlineTuple) { + procTuple = &common.CmdlineTuple{} if !proc.ReadFromProc { return } - if proc.IsLocalIp(tuple.SrcIP) { + if proc.IsLocalIP(tuple.SrcIP) { logp.Debug("procs", "Looking for port %d", tuple.SrcPort) - proc_tuple.Src = []byte(proc.FindProc(tuple.SrcPort)) - if len(proc_tuple.Src) > 0 { - logp.Debug("procs", "Found device %s for port %d", proc_tuple.Src, tuple.SrcPort) + procTuple.Src = []byte(proc.FindProc(tuple.SrcPort)) + if len(procTuple.Src) > 0 { + logp.Debug("procs", "Found device %s for port %d", procTuple.Src, tuple.SrcPort) } } - if proc.IsLocalIp(tuple.DstIP) { + if proc.IsLocalIP(tuple.DstIP) { logp.Debug("procs", "Looking for port %d", tuple.DstPort) - proc_tuple.Dst = []byte(proc.FindProc(tuple.DstPort)) - if len(proc_tuple.Dst) > 0 { - logp.Debug("procs", "Found device %s for port %d", proc_tuple.Dst, tuple.DstPort) + procTuple.Dst = []byte(proc.FindProc(tuple.DstPort)) + if len(procTuple.Dst) > 0 { + logp.Debug("procs", "Found device %s for port %d", procTuple.Dst, tuple.DstPort) } } @@ -245,7 +245,7 @@ func (proc *ProcessesWatcher) FindProc(port uint16) (procname string) { return "" } -func hex_to_ipv4(word string) (net.IP, error) { +func hexToIpv4(word string) (net.IP, error) { ip, err := strconv.ParseInt(word, 16, 64) if err != nil { return nil, err @@ -253,7 +253,7 @@ func hex_to_ipv4(word string) (net.IP, error) { return net.IPv4(byte(ip), byte(ip>>8), byte(ip>>16), byte(ip>>24)), nil } -func hex_to_ipv6(word string) (net.IP, error) { +func hexToIpv6(word string) (net.IP, error) { p := make(net.IP, net.IPv6len) for i := 0; i < 4; i++ { part, err := strconv.ParseInt(word[i*8:(i+1)*8], 16, 32) @@ -268,21 +268,20 @@ func hex_to_ipv6(word string) (net.IP, error) { return p, nil } -func hex_to_ip(word string, ipv6 bool) (net.IP, error) { +func hexToIP(word string, ipv6 bool) (net.IP, error) { if ipv6 { - return hex_to_ipv6(word) - } else { - return hex_to_ipv4(word) + return hexToIpv6(word) } + return hexToIpv4(word) } -func hex_to_ip_port(str []byte, ipv6 bool) (net.IP, uint16, error) { +func hexToIPPort(str []byte, ipv6 bool) (net.IP, uint16, error) { words := bytes.Split(str, []byte(":")) if len(words) < 2 { return nil, 0, errors.New("Didn't find ':' as a separator") } - ip, err := hex_to_ip(string(words[0]), ipv6) + ip, err := hexToIP(string(words[0]), ipv6) if err != nil { return nil, 0, err } @@ -298,36 +297,36 @@ func hex_to_ip_port(str []byte, ipv6 bool) (net.IP, uint16, error) { func (proc *ProcessesWatcher) UpdateMap() { logp.Debug("procs", "UpdateMap()") - ipv4socks, err := sockets_From_Proc("/proc/net/tcp", false) + ipv4socks, err := socketsFromProc("/proc/net/tcp", false) if err != nil { logp.Err("Parse_Proc_Net_Tcp: %s", err) return } - ipv6socks, err := sockets_From_Proc("/proc/net/tcp6", true) + ipv6socks, err := socketsFromProc("/proc/net/tcp6", true) if err != nil { logp.Err("Parse_Proc_Net_Tcp ipv6: %s", err) return } - socks_map := map[uint64]*SocketInfo{} + socksMap := map[uint64]*SocketInfo{} for _, s := range ipv4socks { - socks_map[s.Inode] = s + socksMap[s.Inode] = s } for _, s := range ipv6socks { - socks_map[s.Inode] = s + socksMap[s.Inode] = s } for _, p := range proc.Processes { for _, pid := range p.Pids { - inodes, err := FindSocketsOfPid(proc.proc_prefix, pid) + inodes, err := FindSocketsOfPid(proc.procPrefix, pid) if err != nil { logp.Err("FindSocketsOfPid: %s", err) continue } for _, inode := range inodes { - sockInfo, exists := socks_map[inode] + sockInfo, exists := socksMap[inode] if exists { - proc.UpdateMappingEntry(sockInfo.Src_port, pid, p) + proc.UpdateMappingEntry(sockInfo.SrcPort, pid, p) } } @@ -336,21 +335,21 @@ func (proc *ProcessesWatcher) UpdateMap() { } -func sockets_From_Proc(filename string, ipv6 bool) ([]*SocketInfo, error) { +func socketsFromProc(filename string, ipv6 bool) ([]*SocketInfo, error) { file, err := os.Open("/proc/net/tcp") if err != nil { return nil, err } defer file.Close() - return Parse_Proc_Net_Tcp(file, false) + return ParseProcNetTCP(file, false) } // Parses the /proc/net/tcp file -func Parse_Proc_Net_Tcp(input io.Reader, ipv6 bool) ([]*SocketInfo, error) { +func ParseProcNetTCP(input io.Reader, ipv6 bool) ([]*SocketInfo, error) { buf := bufio.NewReader(input) sockets := []*SocketInfo{} - var err error = nil + var err error var line []byte for err != io.EOF { line, err = buf.ReadBytes('\n') @@ -365,22 +364,22 @@ func Parse_Proc_Net_Tcp(input io.Reader, ipv6 bool) ([]*SocketInfo, error) { } var sock SocketInfo - var err_ error + var err error - sock.Src_ip, sock.Src_port, err_ = hex_to_ip_port(words[1], ipv6) - if err_ != nil { - logp.Debug("procs", "Error parsing IP and port: %s", err_) + sock.SrcIP, sock.SrcPort, err = hexToIPPort(words[1], ipv6) + if err != nil { + logp.Debug("procs", "Error parsing IP and port: %s", err) continue } - sock.Dst_ip, sock.Dst_port, err_ = hex_to_ip_port(words[2], ipv6) - if err_ != nil { - logp.Debug("procs", "Error parsing IP and port: %s", err_) + sock.DstIP, sock.DstPort, err = hexToIPPort(words[2], ipv6) + if err != nil { + logp.Debug("procs", "Error parsing IP and port: %s", err) continue } uid, _ := strconv.Atoi(string(words[7])) - sock.Uid = uint32(uid) + sock.UID = uint32(uid) inode, _ := strconv.Atoi(string(words[9])) sock.Inode = uint64(inode) @@ -435,7 +434,7 @@ func FindSocketsOfPid(prefix string, pid int) (inodes []uint64, err error) { return inodes, nil } -func (proc *ProcessesWatcher) IsLocalIp(ip net.IP) bool { +func (proc *ProcessesWatcher) IsLocalIP(ip net.IP) bool { if ip.IsLoopback() { return true diff --git a/packetbeat/procs/procs_test.go b/packetbeat/procs/procs_test.go index e37c7aac0e6..85eed28d756 100644 --- a/packetbeat/procs/procs_test.go +++ b/packetbeat/procs/procs_test.go @@ -3,7 +3,6 @@ package procs import ( - "fmt" "io/ioutil" "os" "path/filepath" @@ -56,7 +55,7 @@ func AssertIntArraysAreEqual(t *testing.T, expected []int, result []int) bool { } } if !found { - t.Error(fmt.Sprintf("Expected array %v but got %v", expected, result)) + t.Errorf("Expected array %v but got %v", expected, result) return false } } @@ -73,7 +72,7 @@ func AssertUint64ArraysAreEqual(t *testing.T, expected []uint64, result []uint64 } } if !found { - t.Error(fmt.Sprintf("Expected array %v but got %v", expected, result)) + t.Errorf("Expected array %v but got %v", expected, result) return false } } @@ -95,20 +94,20 @@ func TestFindPidsByCmdlineGrep(t *testing.T) { } // Create fake proc file system - path_prefix, err := ioutil.TempDir("/tmp", "") + pathPrefix, err := ioutil.TempDir("/tmp", "") if err != nil { t.Error("TempDir failed:", err) return } - defer os.RemoveAll(path_prefix) + defer os.RemoveAll(pathPrefix) - err = CreateFakeDirectoryStructure(path_prefix, proc) + err = CreateFakeDirectoryStructure(pathPrefix, proc) if err != nil { t.Error("CreateFakeDirectoryStructure failed:", err) return } - pids, err := FindPidsByCmdlineGrep(path_prefix, "nginx") + pids, err := FindPidsByCmdlineGrep(pathPrefix, "nginx") if err != nil { t.Error("FindPidsByCmdline:", err) return @@ -130,23 +129,25 @@ func TestRefreshPids(t *testing.T) { } // Create fake proc file system - path_prefix, err := ioutil.TempDir("/tmp", "") + pathPrefix, err := ioutil.TempDir("/tmp", "") if err != nil { t.Error("TempDir failed:", err) return } - defer os.RemoveAll(path_prefix) + defer os.RemoveAll(pathPrefix) - err = CreateFakeDirectoryStructure(path_prefix, proc) + err = CreateFakeDirectoryStructure(pathPrefix, proc) if err != nil { t.Error("CreateFakeDirectoryStructure failed:", err) return } testSignals := make(chan bool) - var procs ProcessesWatcher = ProcessesWatcher{proc_prefix: path_prefix, - TestSignals: &testSignals} - var ch chan time.Time = make(chan time.Time) + procs := ProcessesWatcher{ + procPrefix: pathPrefix, + TestSignals: &testSignals, + } + ch := make(chan time.Time) p, err := NewProcess(&procs, "nginx", "nginx", (<-chan time.Time)(ch)) if err != nil { @@ -160,8 +161,8 @@ func TestRefreshPids(t *testing.T) { AssertIntArraysAreEqual(t, []int{766, 768, 769}, p.Pids) // Add new process - os.MkdirAll(filepath.Join(path_prefix, "/proc/780"), 0755) - ioutil.WriteFile(filepath.Join(path_prefix, "/proc/780/cmdline"), + os.MkdirAll(filepath.Join(pathPrefix, "/proc/780"), 0755) + ioutil.WriteFile(filepath.Join(pathPrefix, "/proc/780/cmdline"), []byte("nginx whatever"), 0644) ch <- time.Now() @@ -186,20 +187,20 @@ func TestFindSocketsOfPid(t *testing.T) { } // Create fake proc file system - path_prefix, err := ioutil.TempDir("/tmp", "") + pathPrefix, err := ioutil.TempDir("/tmp", "") if err != nil { t.Error("TempDir failed:", err) return } - defer os.RemoveAll(path_prefix) + defer os.RemoveAll(pathPrefix) - err = CreateFakeDirectoryStructure(path_prefix, proc) + err = CreateFakeDirectoryStructure(pathPrefix, proc) if err != nil { t.Error("CreateFakeDirectoryStructure failed:", err) return } - inodes, err := FindSocketsOfPid(path_prefix, 766) + inodes, err := FindSocketsOfPid(pathPrefix, 766) if err != nil { t.Fatalf("FindSocketsOfPid: %s", err) } @@ -212,17 +213,17 @@ func TestParse_Proc_Net_Tcp(t *testing.T) { if err != nil { t.Fatalf("Opening ../tests/files/proc_net_tcp.txt: %s", err) } - socketInfo, err := Parse_Proc_Net_Tcp(file, false) + socketInfo, err := ParseProcNetTCP(file, false) if err != nil { t.Fatalf("Parse_Proc_Net_Tcp: %s", err) } if len(socketInfo) != 32 { t.Error("expected socket information on 32 sockets but got", len(socketInfo)) } - if socketInfo[31].Src_ip.String() != "192.168.2.243" { + if socketInfo[31].SrcIP.String() != "192.168.2.243" { t.Error("Failed to parse source IP address 192.168.2.243") } - if socketInfo[31].Src_port != 41622 { + if socketInfo[31].SrcPort != 41622 { t.Error("Failed to parse source port 41622") } } @@ -232,18 +233,18 @@ func TestParse_Proc_Net_Tcp6(t *testing.T) { if err != nil { t.Fatalf("Opening ../tests/files/proc_net_tcp6.txt: %s", err) } - socketInfo, err := Parse_Proc_Net_Tcp(file, true) + socketInfo, err := ParseProcNetTCP(file, true) if err != nil { t.Fatalf("Parse_Proc_Net_Tcp: %s", err) } if len(socketInfo) != 6 { t.Error("expected socket information on 6 sockets but got", len(socketInfo)) } - if socketInfo[5].Src_ip.String() != "::" { - t.Error("Failed to parse source IP address ::, got instead", socketInfo[5].Src_ip.String()) + if socketInfo[5].SrcIP.String() != "::" { + t.Error("Failed to parse source IP address ::, got instead", socketInfo[5].SrcIP.String()) } // TODO add an example of a 'real' IPv6 address - if socketInfo[5].Src_port != 59497 { - t.Error("Failed to parse source port 59497, got instead", socketInfo[5].Src_port) + if socketInfo[5].SrcPort != 59497 { + t.Error("Failed to parse source port 59497, got instead", socketInfo[5].SrcPort) } } diff --git a/packetbeat/protos/amqp/amqp.go b/packetbeat/protos/amqp/amqp.go index b066d8daa09..b0a7cafbc90 100644 --- a/packetbeat/protos/amqp/amqp.go +++ b/packetbeat/protos/amqp/amqp.go @@ -195,7 +195,7 @@ func (amqp *Amqp) Parse(pkt *protos.Packet, tcptuple *common.TCPTuple, } else { // concatenate databytes priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...) - if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(priv.Data[dir].data) > tcp.TCPMaxDataInStream { debugf("Stream data too large, dropping TCP stream") priv.Data[dir] = nil return priv @@ -237,7 +237,7 @@ func (amqp *Amqp) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, func (amqp *Amqp) handleAmqpRequest(msg *AmqpMessage) { // Add it to the HT - tuple := msg.TcpTuple + tuple := msg.TCPTuple trans := amqp.getTransaction(tuple.Hashable()) if trans != nil { @@ -254,16 +254,16 @@ func (amqp *Amqp) handleAmqpRequest(msg *AmqpMessage) { trans.Ts = trans.ts.UnixNano() / 1000 trans.JsTs = msg.Ts trans.Src = common.Endpoint{ - IP: msg.TcpTuple.SrcIP.String(), - Port: msg.TcpTuple.SrcPort, + IP: msg.TCPTuple.SrcIP.String(), + Port: msg.TCPTuple.SrcPort, Proc: string(msg.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: msg.TcpTuple.DstIP.String(), - Port: msg.TcpTuple.DstPort, + IP: msg.TCPTuple.DstIP.String(), + Port: msg.TCPTuple.DstPort, Proc: string(msg.CmdlineTuple.Dst), } - if msg.Direction == tcp.TcpDirectionReverse { + if msg.Direction == tcp.TCPDirectionReverse { trans.Src, trans.Dst = trans.Dst, trans.Src } @@ -275,7 +275,7 @@ func (amqp *Amqp) handleAmqpRequest(msg *AmqpMessage) { trans.Request = msg.Method } //length = message + 4 bytes header + frame end octet - trans.BytesIn = msg.Body_size + 12 + trans.BytesIn = msg.BodySize + 12 if msg.Fields != nil { trans.Amqp = msg.Fields } else { @@ -299,7 +299,7 @@ func (amqp *Amqp) handleAmqpRequest(msg *AmqpMessage) { } func (amqp *Amqp) handleAmqpResponse(msg *AmqpMessage) { - tuple := msg.TcpTuple + tuple := msg.TCPTuple trans := amqp.getTransaction(tuple.Hashable()) if trans == nil || trans.Amqp == nil { debugf("Response from unknown transaction. Ignoring.") @@ -308,7 +308,7 @@ func (amqp *Amqp) handleAmqpResponse(msg *AmqpMessage) { } //length = message + 4 bytes class/method + frame end octet + header - trans.BytesOut = msg.Body_size + 12 + trans.BytesOut = msg.BodySize + 12 //merge the both fields from request and response trans.Amqp.Update(msg.Fields) trans.Response = common.OK_STATUS @@ -348,34 +348,34 @@ func (amqp *Amqp) expireTransaction(trans *AmqpTransaction) { //process, the method, header and body frames are regrouped in one transaction func (amqp *Amqp) handlePublishing(client *AmqpMessage) { - tuple := client.TcpTuple + tuple := client.TCPTuple trans := amqp.getTransaction(tuple.Hashable()) if trans == nil { trans = &AmqpTransaction{Type: "amqp", tuple: tuple} - amqp.transactions.Put(client.TcpTuple.Hashable(), trans) + amqp.transactions.Put(client.TCPTuple.Hashable(), trans) } trans.ts = client.Ts trans.Ts = client.Ts.UnixNano() / 1000 trans.JsTs = client.Ts trans.Src = common.Endpoint{ - IP: client.TcpTuple.SrcIP.String(), - Port: client.TcpTuple.SrcPort, + IP: client.TCPTuple.SrcIP.String(), + Port: client.TCPTuple.SrcPort, Proc: string(client.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: client.TcpTuple.DstIP.String(), - Port: client.TcpTuple.DstPort, + IP: client.TCPTuple.DstIP.String(), + Port: client.TCPTuple.DstPort, Proc: string(client.CmdlineTuple.Dst), } trans.Method = client.Method //for publishing and delivering, bytes in and out represent the length of the //message itself - trans.BytesIn = client.Body_size + trans.BytesIn = client.BodySize - if client.Body_size > uint64(amqp.MaxBodyLength) { + if client.BodySize > uint64(amqp.MaxBodyLength) { trans.Body = client.Body[:amqp.MaxBodyLength] } else { trans.Body = client.Body @@ -395,33 +395,33 @@ func (amqp *Amqp) handlePublishing(client *AmqpMessage) { //body frames are regrouped in one transaction func (amqp *Amqp) handleDelivering(server *AmqpMessage) { - tuple := server.TcpTuple + tuple := server.TCPTuple trans := amqp.getTransaction(tuple.Hashable()) if trans == nil { trans = &AmqpTransaction{Type: "amqp", tuple: tuple} - amqp.transactions.Put(server.TcpTuple.Hashable(), trans) + amqp.transactions.Put(server.TCPTuple.Hashable(), trans) } trans.ts = server.Ts trans.Ts = server.Ts.UnixNano() / 1000 trans.JsTs = server.Ts trans.Src = common.Endpoint{ - IP: server.TcpTuple.SrcIP.String(), - Port: server.TcpTuple.SrcPort, + IP: server.TCPTuple.SrcIP.String(), + Port: server.TCPTuple.SrcPort, Proc: string(server.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: server.TcpTuple.DstIP.String(), - Port: server.TcpTuple.DstPort, + IP: server.TCPTuple.DstIP.String(), + Port: server.TCPTuple.DstPort, Proc: string(server.CmdlineTuple.Dst), } //for publishing and delivering, bytes in and out represent the length of the //message itself - trans.BytesOut = server.Body_size + trans.BytesOut = server.BodySize - if server.Body_size > uint64(amqp.MaxBodyLength) { + if server.BodySize > uint64(amqp.MaxBodyLength) { trans.Body = server.Body[:amqp.MaxBodyLength] } else { trans.Body = server.Body @@ -514,17 +514,16 @@ func (amqp *Amqp) publishTransaction(t *AmqpTransaction) { func isAsynchronous(trans *AmqpTransaction) bool { if val, ok := trans.Amqp["no-wait"]; ok && val == true { return true - } else { - return trans.Method == "basic.reject" || - trans.Method == "basic.ack" || - trans.Method == "basic.nack" } + + return trans.Method == "basic.reject" || + trans.Method == "basic.ack" || + trans.Method == "basic.nack" } //function to convert a body slice into a readable format func bodyToString(data []byte) string { - var ret []string = make([]string, len(data)) - + ret := make([]string, len(data)) for i, c := range data { ret[i] = strconv.Itoa(int(c)) } @@ -565,10 +564,6 @@ func isCloseError(t *AmqpTransaction) bool { } func getReplyCode(m common.MapStr) uint16 { - code, ok := m["reply-code"].(uint16) - if !ok { - return 0 - } else { - return code - } + code, _ := m["reply-code"].(uint16) + return code } diff --git a/packetbeat/protos/amqp/amqp_fields.go b/packetbeat/protos/amqp/amqp_fields.go index 8ab4034a5d2..42222b882e3 100644 --- a/packetbeat/protos/amqp/amqp_fields.go +++ b/packetbeat/protos/amqp/amqp_fields.go @@ -2,12 +2,13 @@ package amqp import ( "encoding/binary" - "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/logp" "math" "strconv" "strings" "time" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" ) func getTable(fields common.MapStr, data []byte, offset uint32) (next uint32, err bool, exists bool) { @@ -71,16 +72,16 @@ func fieldUnmarshal(table common.MapStr, data []byte, offset uint32, length uint //get name of the field. If it's an array, it will be the index parameter as a //string. If it's a table, it will be the name of the field. if index < 0 { - field_name, offset_temp, err := getShortString(data, offset+1, uint32(data[offset])) + fieldName, offsetTemp, err := getShortString(data, offset+1, uint32(data[offset])) if err { logp.Warn("Failed to get short string in table") return true } - name = field_name - offset = offset_temp + name = fieldName + offset = offsetTemp } else { name = strconv.Itoa(index) - index += 1 + index++ } switch data[offset] { @@ -175,7 +176,7 @@ func fieldUnmarshal(table common.MapStr, data []byte, offset uint32, length uint offset = next case noField: table[name] = nil - offset += 1 + offset++ case byteArray: size := binary.BigEndian.Uint32(data[offset+1 : offset+5]) table[name] = bodyToByteArray(data[offset+1+size : offset+5+size]) @@ -190,8 +191,7 @@ func fieldUnmarshal(table common.MapStr, data []byte, offset uint32, length uint // function to convert a body slice into a byte array func bodyToByteArray(data []byte) string { - var ret []string = make([]string, len(data)) - + ret := make([]string, len(data)) for i, c := range data { ret[i] = strconv.Itoa(int(c)) } diff --git a/packetbeat/protos/amqp/amqp_parser.go b/packetbeat/protos/amqp/amqp_parser.go index ee1e6c5028b..bf7c1ec043b 100644 --- a/packetbeat/protos/amqp/amqp_parser.go +++ b/packetbeat/protos/amqp/amqp_parser.go @@ -58,8 +58,8 @@ func (amqp *Amqp) amqpMessageParser(s *AmqpStream) (ok bool, complete bool) { return ok, complete } -func (stream *AmqpStream) PrepareForNewMessage() { - stream.message = nil +func (s *AmqpStream) PrepareForNewMessage() { + s.message = nil } func isProtocolHeader(data []byte) (isHeader bool, version string) { @@ -103,25 +103,26 @@ The Method Payload, according to official doc : short short ... */ -func (amqp *Amqp) decodeMethodFrame(s *AmqpStream, m_data []byte) (bool, bool) { - if len(m_data) < 4 { +func (amqp *Amqp) decodeMethodFrame(s *AmqpStream, buf []byte) (bool, bool) { + if len(buf) < 4 { logp.Warn("Method frame too small, waiting for more data") return true, false } - class := codeClass(binary.BigEndian.Uint16(m_data[0:2])) - method := codeMethod(binary.BigEndian.Uint16(m_data[2:4])) - arguments := m_data[4:] + class := codeClass(binary.BigEndian.Uint16(buf[0:2])) + method := codeMethod(binary.BigEndian.Uint16(buf[2:4])) + arguments := buf[4:] s.message.ParseArguments = amqp.ParseArguments - s.message.Body_size = uint64(len(m_data[4:])) + s.message.BodySize = uint64(len(buf[4:])) debugf("Received frame of class %d and method %d", class, method) - if function, exists := amqp.MethodMap[class][method]; exists { - return function(s.message, arguments) - } else { + fn, exists := amqp.MethodMap[class][method] + if !exists { logp.Debug("amqpdetailed", "Received unknown or not supported method") return false, false } + + return fn(s.message, arguments) } /* @@ -133,16 +134,16 @@ Structure of a content header, according to official doc : short short long long short remainder... */ -func (amqp *Amqp) decodeHeaderFrame(s *AmqpStream, h_data []byte) bool { - if len(h_data) < 14 { +func (amqp *Amqp) decodeHeaderFrame(s *AmqpStream, buf []byte) bool { + if len(buf) < 14 { logp.Warn("Header frame too small, waiting for mode data") return true } - s.message.Body_size = binary.BigEndian.Uint64(h_data[4:12]) - debugf("Received Header frame. A message of %d bytes is expected", s.message.Body_size) + s.message.BodySize = binary.BigEndian.Uint64(buf[4:12]) + debugf("Received Header frame. A message of %d bytes is expected", s.message.BodySize) if amqp.ParseHeaders == true { - err := getMessageProperties(s, h_data[12:]) + err := getMessageProperties(s, buf[12:]) if err { return false } @@ -157,17 +158,15 @@ Structure of a body frame, according to official doc : +-----------------------+ +-----------+ */ -func (s *AmqpStream) decodeBodyFrame(b_data []byte) (ok bool, complete bool) { - s.message.Body = append(s.message.Body, b_data...) +func (s *AmqpStream) decodeBodyFrame(buf []byte) (ok bool, complete bool) { + s.message.Body = append(s.message.Body, buf...) debugf("A body frame of %d bytes long has been transmitted", - len(b_data)) + len(buf)) //is the message complete ? If yes, let's publish it - if uint64(len(s.message.Body)) < s.message.Body_size { - return true, false - } else { - return true, true - } + + complete = uint64(len(s.message.Body)) >= s.message.BodySize + return true, complete } func hasProperty(prop, flag byte) bool { @@ -227,21 +226,21 @@ func getMessageProperties(s *AmqpStream, data []byte) bool { } else if data[offset] == 2 { m.Fields["delivery-mode"] = "persistent" } - offset += 1 + offset++ } if hasProperty(prop1, priorityProp) { m.Fields["priority"] = data[offset] - offset += 1 + offset++ } - if hasProperty(prop1, correlationIdProp) { - correlationId, next, err := getShortString(data, offset+1, uint32(data[offset])) + if hasProperty(prop1, correlationIDProp) { + correlationID, next, err := getShortString(data, offset+1, uint32(data[offset])) if err { logp.Warn("Failed to get correlation-id in header frame") return true } - m.Fields["correlation-id"] = correlationId + m.Fields["correlation-id"] = correlationID offset = next } @@ -265,13 +264,13 @@ func getMessageProperties(s *AmqpStream, data []byte) bool { offset = next } - if hasProperty(prop2, messageIdProp) { - messageId, next, err := getShortString(data, offset+1, uint32(data[offset])) + if hasProperty(prop2, messageIDProp) { + messageID, next, err := getShortString(data, offset+1, uint32(data[offset])) if err { logp.Warn("Failed to get message id in header frame") return true } - m.Fields["message-id"] = messageId + m.Fields["message-id"] = messageID offset = next } @@ -291,23 +290,23 @@ func getMessageProperties(s *AmqpStream, data []byte) bool { offset = next } - if hasProperty(prop2, userIdProp) { - userId, next, err := getShortString(data, offset+1, uint32(data[offset])) + if hasProperty(prop2, userIDProp) { + userID, next, err := getShortString(data, offset+1, uint32(data[offset])) if err { logp.Warn("Failed to get user id in header frame") return true } - m.Fields["user-id"] = userId + m.Fields["user-id"] = userID offset = next } - if hasProperty(prop2, appIdProp) { - appId, _, err := getShortString(data, offset+1, uint32(data[offset])) + if hasProperty(prop2, appIDProp) { + appID, _, err := getShortString(data, offset+1, uint32(data[offset])) if err { logp.Warn("Failed to get app-id in header frame") return true } - m.Fields["app-id"] = appId + m.Fields["app-id"] = appID } return false } @@ -317,7 +316,7 @@ func (amqp *Amqp) handleAmqp(m *AmqpMessage, tcptuple *common.TCPTuple, dir uint return } debugf("A message is ready to be handled") - m.TcpTuple = *tcptuple + m.TCPTuple = *tcptuple m.Direction = dir m.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcptuple.IPPort()) diff --git a/packetbeat/protos/amqp/amqp_structs.go b/packetbeat/protos/amqp/amqp_structs.go index a2dc56a153f..b6ba043852a 100644 --- a/packetbeat/protos/amqp/amqp_structs.go +++ b/packetbeat/protos/amqp/amqp_structs.go @@ -120,7 +120,7 @@ const ( const ( expirationProp byte = 1 replyToProp byte = 2 - correlationIdProp byte = 4 + correlationIDProp byte = 4 priorityProp byte = 8 deliveryModeProp byte = 16 headersProp byte = 32 @@ -131,11 +131,11 @@ const ( //Message properties codes for byte prop2 in getMessageProperties const ( - appIdProp byte = 8 - userIdProp byte = 16 + appIDProp byte = 8 + userIDProp byte = 16 typeProp byte = 32 timestampProp byte = 64 - messageIdProp byte = 128 + messageIDProp byte = 128 ) //table types @@ -174,7 +174,7 @@ type AmqpFrame struct { type AmqpMessage struct { Ts time.Time - TcpTuple common.TCPTuple + TCPTuple common.TCPTuple CmdlineTuple *common.CmdlineTuple Method string IsRequest bool @@ -185,8 +185,8 @@ type AmqpMessage struct { //mapstr containing all the options for the methods and header fields Fields common.MapStr - Body []byte - Body_size uint64 + Body []byte + BodySize uint64 Notes []string } diff --git a/packetbeat/protos/amqp/amqp_test.go b/packetbeat/protos/amqp/amqp_test.go index 723218339b0..adba57f5d37 100644 --- a/packetbeat/protos/amqp/amqp_test.go +++ b/packetbeat/protos/amqp/amqp_test.go @@ -20,7 +20,7 @@ func AmqpModForTests() *Amqp { return &amqp } -func testTcpTuple() *common.TCPTuple { +func testTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, SrcIP: net.IPv4(192, 168, 0, 1), DstIP: net.IPv4(192, 168, 0, 2), @@ -285,7 +285,7 @@ func TestAmqp_ExchangeUnbindTransaction(t *testing.T) { data2, err := hex.DecodeString("0100010000000400280033ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -327,7 +327,7 @@ func TestAmqp_PublishMessage(t *testing.T) { "2049276d20686f6d6520616761696ece") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -378,7 +378,7 @@ func TestAmqp_DeliverMessage(t *testing.T) { data3, err := hex.DecodeString("030001000000056b696b6f6fce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -516,7 +516,7 @@ func TestAmqp_NoWaitQueueDeleteMethod(t *testing.T) { "6f6d617304ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -549,7 +549,7 @@ func TestAmqp_RejectMessage(t *testing.T) { data, err := hex.DecodeString("0100010000000d003c005a000000000000000101ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -584,7 +584,7 @@ func TestAmqp_GetEmptyMethod(t *testing.T) { data2, err := hex.DecodeString("01000100000005003c004800ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -617,7 +617,7 @@ func TestAmqp_GetMethod(t *testing.T) { "f752064617265ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -647,7 +647,7 @@ func TestAmqp_MaxBodyLength(t *testing.T) { "0300010000001649276d2061207665727920626967206d657373616765ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -678,7 +678,7 @@ func TestAmqp_MaxBodyLength(t *testing.T) { "414141414141414141414141414141414141414141414141414141414141ce") assert.Nil(t, err) - tcptuple = testTcpTuple() + tcptuple = testTCPTuple() req = protos.Packet{Payload: data} private = protos.ProtocolData(new(amqpPrivateData)) @@ -719,7 +719,7 @@ func TestAmqp_HideArguments(t *testing.T) { "6572180000003704626f6f6c74010362697462050568656c6c6f530000001f4869206461" + "726c696e6720c3aac3aac3aac3aac3aac3aac3aae697a5e69cacce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) private = amqp.Parse(&req, tcptuple, 0, private) @@ -745,7 +745,7 @@ func TestAmqp_HideArguments(t *testing.T) { "e02060a656c206d656e73616a65ce0300010000001a54657374206865616465722066696" + "56c647320666f7265766572ce") assert.Nil(t, err) - tcptuple = testTcpTuple() + tcptuple = testTCPTuple() req = protos.Packet{Payload: data} private = protos.ProtocolData(new(amqpPrivateData)) amqp.Parse(&req, tcptuple, 0, private) @@ -780,7 +780,7 @@ func TestAmqp_RecoverMethod(t *testing.T) { data2, err := hex.DecodeString("01000100000004003c006fce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -1106,7 +1106,7 @@ func TestAmqp_ChannelCloseErrorMethod(t *testing.T) { data2, err := hex.DecodeString("0100010000000400280033ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -1134,7 +1134,7 @@ func TestAmqp_ConnectionCloseNoError(t *testing.T) { data2, err := hex.DecodeString("01000000000004000a0033ce") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) @@ -1170,7 +1170,7 @@ func TestAmqp_MultipleBodyFrames(t *testing.T) { data2, err := hex.DecodeString("0300010000000a657373616765732a2a2ace") assert.Nil(t, err) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := protos.Packet{Payload: data} private := protos.ProtocolData(new(amqpPrivateData)) private = amqp.Parse(&req, tcptuple, 0, private) diff --git a/packetbeat/protos/applayer/applayer.go b/packetbeat/protos/applayer/applayer.go index d451c3c462c..edc26d9d382 100644 --- a/packetbeat/protos/applayer/applayer.go +++ b/packetbeat/protos/applayer/applayer.go @@ -1,4 +1,4 @@ -// The applayer module provides common definitions with common fields +// Package applayer provides common definitions with common fields // for use with application layer protocols among beats. package applayer @@ -25,16 +25,16 @@ const ( type Transport uint8 const ( - TransportUdp Transport = iota - TransportTcp + TransportUDP Transport = iota + TransportTCP ) // String returns the transport type its textual representation. func (t Transport) String() string { switch t { - case TransportUdp: + case TransportUDP: return "udp" - case TransportTcp: + case TransportTCP: return "tcp" default: return "invalid" diff --git a/packetbeat/protos/cassandra/cassandra.go b/packetbeat/protos/cassandra/cassandra.go index 0a9858bec13..ef2f45f5b7d 100644 --- a/packetbeat/protos/cassandra/cassandra.go +++ b/packetbeat/protos/cassandra/cassandra.go @@ -7,9 +7,10 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/packetbeat/protos" - . "github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql" "github.com/elastic/beats/packetbeat/protos/tcp" "github.com/elastic/beats/packetbeat/publish" + + gocql "github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql" ) // cassandra application level protocol analyzer plugin @@ -76,18 +77,18 @@ func (cassandra *cassandra) setFromConfig(config *cassandraConfig) error { // set parser configuration parser := &cassandra.parserConfig - parser.maxBytes = tcp.TCP_MAX_DATA_IN_STREAM + parser.maxBytes = tcp.TCPMaxDataInStream // set parser's compressor, only `snappy` supported right now - if config.Compressor == Snappy { - parser.compressor = SnappyCompressor{} + if config.Compressor == gocql.Snappy { + parser.compressor = gocql.SnappyCompressor{} } else { parser.compressor = nil } // parsed ignored ops if len(config.OPsIgnored) > 0 { - maps := map[FrameOp]bool{} + maps := map[gocql.FrameOp]bool{} for _, op := range config.OPsIgnored { maps[op] = true } diff --git a/packetbeat/protos/cassandra/config.go b/packetbeat/protos/cassandra/config.go index 43e1f693e36..d4408a49c69 100644 --- a/packetbeat/protos/cassandra/config.go +++ b/packetbeat/protos/cassandra/config.go @@ -2,18 +2,19 @@ package cassandra import ( "fmt" + "github.com/elastic/beats/packetbeat/config" "github.com/elastic/beats/packetbeat/protos" - . "github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql" - "github.com/pkg/errors" + + gocql "github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql" ) type cassandraConfig struct { config.ProtocolCommon `config:",inline"` - SendRequestHeader bool `config:"send_request_header"` - SendResponseHeader bool `config:"send_response_header"` - Compressor string `config:"compressor"` - OPsIgnored []FrameOp `config:"ignored_ops"` + SendRequestHeader bool `config:"send_request_header"` + SendResponseHeader bool `config:"send_response_header"` + Compressor string `config:"compressor"` + OPsIgnored []gocql.FrameOp `config:"ignored_ops"` } var ( @@ -30,7 +31,7 @@ var ( func (c *cassandraConfig) Validate() error { if !(c.Compressor == "" || c.Compressor == "snappy") { - return errors.New(fmt.Sprintf("invalid compressor config: %s, only snappy supported", c.Compressor)) + return fmt.Errorf("invalid compressor config: %s, only snappy supported", c.Compressor) } return nil } diff --git a/packetbeat/protos/cassandra/internal/gocql/frame.go b/packetbeat/protos/cassandra/internal/gocql/frame.go index cb9b6e356a3..c14bc903815 100644 --- a/packetbeat/protos/cassandra/internal/gocql/frame.go +++ b/packetbeat/protos/cassandra/internal/gocql/frame.go @@ -310,8 +310,8 @@ func (f *Framer) parseErrorFrame() (data map[string]interface{}) { detail["table"] = table case errUnprepared: - stmtId := decoder.ReadShortBytes() - detail["stmt_id"] = stmtId + stmtID := decoder.ReadShortBytes() + detail["stmt_id"] = stmtID case errReadFailure: detail["read_consistency"] = decoder.ReadConsistency().String() diff --git a/packetbeat/protos/cassandra/internal/gocql/marshal.go b/packetbeat/protos/cassandra/internal/gocql/marshal.go index ac2627343c7..a8d3f460502 100644 --- a/packetbeat/protos/cassandra/internal/gocql/marshal.go +++ b/packetbeat/protos/cassandra/internal/gocql/marshal.go @@ -67,7 +67,7 @@ type CollectionType struct { func goType(t TypeInfo) reflect.Type { switch t.Type() { - case TypeVarchar, TypeAscii, TypeInet, TypeText: + case TypeVarchar, TypeASCII, TypeInet, TypeText: return reflect.TypeOf(*new(string)) case TypeBigInt, TypeCounter: return reflect.TypeOf(*new(int64)) @@ -172,7 +172,7 @@ type Type int const ( TypeCustom Type = 0x0000 - TypeAscii Type = 0x0001 + TypeASCII Type = 0x0001 TypeBigInt Type = 0x0002 TypeBlob Type = 0x0003 TypeBoolean Type = 0x0004 @@ -204,7 +204,7 @@ func (t Type) String() string { switch t { case TypeCustom: return "custom" - case TypeAscii: + case TypeASCII: return "ascii" case TypeBigInt: return "bigint" @@ -265,7 +265,7 @@ const ( func getApacheCassandraType(class string) Type { switch strings.TrimPrefix(class, apacheCassandraTypePrefix) { case "AsciiType": - return TypeAscii + return TypeASCII case "LongType": return TypeBigInt case "BytesType": diff --git a/packetbeat/protos/cassandra/internal/gocql/stream_decoder.go b/packetbeat/protos/cassandra/internal/gocql/stream_decoder.go index 711539c3f66..c4925aa0ffc 100644 --- a/packetbeat/protos/cassandra/internal/gocql/stream_decoder.go +++ b/packetbeat/protos/cassandra/internal/gocql/stream_decoder.go @@ -2,9 +2,9 @@ package cassandra import ( "fmt" - "github.com/elastic/beats/libbeat/common/streambuf" - "github.com/pkg/errors" "net" + + "github.com/elastic/beats/libbeat/common/streambuf" ) type StreamDecoder struct { @@ -66,7 +66,7 @@ func (f StreamDecoder) ReadLongString() (s string) { size := f.ReadInt() if !f.r.Avail(size) { - panic(errors.New(fmt.Sprintf("not enough buf to readLongString,need:%d,actual:%d", size, f.r.Len()))) + panic(fmt.Errorf("not enough buf to readLongString,need:%d,actual:%d", size, f.r.Len())) } str := make([]byte, size) _, err := f.r.Read(str) diff --git a/packetbeat/protos/cassandra/parser.go b/packetbeat/protos/cassandra/parser.go index eed70874092..917e86db337 100644 --- a/packetbeat/protos/cassandra/parser.go +++ b/packetbeat/protos/cassandra/parser.go @@ -2,25 +2,27 @@ package cassandra import ( "errors" + "time" + "github.com/elastic/beats/libbeat/common/streambuf" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/packetbeat/protos/applayer" - . "github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql" - "time" + + gocql "github.com/elastic/beats/packetbeat/protos/cassandra/internal/gocql" ) type parser struct { buf streambuf.Buffer config *parserConfig - framer *Framer + framer *gocql.Framer message *message onMessage func(m *message) error } type parserConfig struct { maxBytes int - compressor Compressor - ignoredOps map[FrameOp]bool + compressor gocql.Compressor + ignoredOps map[gocql.FrameOp]bool } // check whether this ops is enabled or not @@ -195,7 +197,7 @@ func (p *parser) parse() (*message, error) { if isDebug { debugf("start new framer") } - p.framer = NewFramer(&p.buf, p.config.compressor) + p.framer = gocql.NewFramer(&p.buf, p.config.compressor) } // check if the frame header were parsed or not diff --git a/packetbeat/protos/cassandra/trans.go b/packetbeat/protos/cassandra/trans.go index 4c829a1da57..9549c1984c6 100644 --- a/packetbeat/protos/cassandra/trans.go +++ b/packetbeat/protos/cassandra/trans.go @@ -41,7 +41,7 @@ func (trans *transactions) onMessage( ) error { var err error msg.Tuple = *tuple - msg.Transport = applayer.TransportTcp + msg.Transport = applayer.TransportTCP msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(&msg.Tuple) if msg.IsRequest { diff --git a/packetbeat/protos/dns/config.go b/packetbeat/protos/dns/config.go index 347c3ff6b97..2f7629feb84 100644 --- a/packetbeat/protos/dns/config.go +++ b/packetbeat/protos/dns/config.go @@ -7,8 +7,8 @@ import ( type dnsConfig struct { config.ProtocolCommon `config:",inline"` - Include_authorities bool `config:"include_authorities"` - Include_additionals bool `config:"include_additionals"` + IncludeAuthorities bool `config:"include_authorities"` + IncludeAdditionals bool `config:"include_additionals"` } var ( diff --git a/packetbeat/protos/dns/dns.go b/packetbeat/protos/dns/dns.go index b16a0dbcdcf..4d7254973ca 100644 --- a/packetbeat/protos/dns/dns.go +++ b/packetbeat/protos/dns/dns.go @@ -4,7 +4,6 @@ // RFC 4035 (DNS Security Extensions), but since those specifications only // add backwards compatible features there will be no issues handling the // messages. - package dns import ( @@ -31,7 +30,7 @@ var ( debugf = logp.MakeDebug("dns") ) -const MaxDnsTupleRawSize = 16 + 16 + 2 + 2 + 4 + 1 +const MaxDNSTupleRawSize = 16 + 16 + 2 + 2 + 4 + 1 // Constants used to associate the DNS QR flag with a meaningful value. const ( @@ -48,8 +47,8 @@ var ( ) const ( - TransportTcp = iota - TransportUdp + TransportTCP = iota + TransportUDP ) var TransportNames = []string{ @@ -64,10 +63,10 @@ func (t Transport) String() string { return TransportNames[t] } -type HashableDnsTuple [MaxDnsTupleRawSize]byte +type HashableDNSTuple [MaxDNSTupleRawSize]byte // DnsMessage contains a single DNS message. -type DnsMessage struct { +type DNSMessage struct { Ts time.Time // Time when the message was received. Tuple common.IPPortTuple // Source and destination addresses of packet. CmdlineTuple *common.CmdlineTuple @@ -77,91 +76,91 @@ type DnsMessage struct { // DnsTuple contains source IP/port, destination IP/port, transport protocol, // and DNS ID. -type DnsTuple struct { - Ip_length int - Src_ip, Dst_ip net.IP - Src_port, Dst_port uint16 - Transport Transport - Id uint16 - - raw HashableDnsTuple // Src_ip:Src_port:Dst_ip:Dst_port:Transport:Id - revRaw HashableDnsTuple // Dst_ip:Dst_port:Src_ip:Src_port:Transport:Id -} - -func DnsTupleFromIpPort(t *common.IPPortTuple, trans Transport, id uint16) DnsTuple { - tuple := DnsTuple{ - Ip_length: t.IPLength, - Src_ip: t.SrcIP, - Dst_ip: t.DstIP, - Src_port: t.SrcPort, - Dst_port: t.DstPort, +type DNSTuple struct { + IPLength int + SrcIP, DstIP net.IP + SrcPort, DstPort uint16 + Transport Transport + ID uint16 + + raw HashableDNSTuple // Src_ip:Src_port:Dst_ip:Dst_port:Transport:Id + revRaw HashableDNSTuple // Dst_ip:Dst_port:Src_ip:Src_port:Transport:Id +} + +func DNSTupleFromIPPort(t *common.IPPortTuple, trans Transport, id uint16) DNSTuple { + tuple := DNSTuple{ + IPLength: t.IPLength, + SrcIP: t.SrcIP, + DstIP: t.DstIP, + SrcPort: t.SrcPort, + DstPort: t.DstPort, Transport: trans, - Id: id, + ID: id, } tuple.ComputeHashebles() return tuple } -func (t DnsTuple) Reverse() DnsTuple { - return DnsTuple{ - Ip_length: t.Ip_length, - Src_ip: t.Dst_ip, - Dst_ip: t.Src_ip, - Src_port: t.Dst_port, - Dst_port: t.Src_port, +func (t DNSTuple) Reverse() DNSTuple { + return DNSTuple{ + IPLength: t.IPLength, + SrcIP: t.DstIP, + DstIP: t.SrcIP, + SrcPort: t.DstPort, + DstPort: t.SrcPort, Transport: t.Transport, - Id: t.Id, + ID: t.ID, raw: t.revRaw, revRaw: t.raw, } } -func (t *DnsTuple) ComputeHashebles() { - copy(t.raw[0:16], t.Src_ip) - copy(t.raw[16:18], []byte{byte(t.Src_port >> 8), byte(t.Src_port)}) - copy(t.raw[18:34], t.Dst_ip) - copy(t.raw[34:36], []byte{byte(t.Dst_port >> 8), byte(t.Dst_port)}) - copy(t.raw[36:38], []byte{byte(t.Id >> 8), byte(t.Id)}) +func (t *DNSTuple) ComputeHashebles() { + copy(t.raw[0:16], t.SrcIP) + copy(t.raw[16:18], []byte{byte(t.SrcPort >> 8), byte(t.SrcPort)}) + copy(t.raw[18:34], t.DstIP) + copy(t.raw[34:36], []byte{byte(t.DstPort >> 8), byte(t.DstPort)}) + copy(t.raw[36:38], []byte{byte(t.ID >> 8), byte(t.ID)}) t.raw[39] = byte(t.Transport) - copy(t.revRaw[0:16], t.Dst_ip) - copy(t.revRaw[16:18], []byte{byte(t.Dst_port >> 8), byte(t.Dst_port)}) - copy(t.revRaw[18:34], t.Src_ip) - copy(t.revRaw[34:36], []byte{byte(t.Src_port >> 8), byte(t.Src_port)}) - copy(t.revRaw[36:38], []byte{byte(t.Id >> 8), byte(t.Id)}) + copy(t.revRaw[0:16], t.DstIP) + copy(t.revRaw[16:18], []byte{byte(t.DstPort >> 8), byte(t.DstPort)}) + copy(t.revRaw[18:34], t.SrcIP) + copy(t.revRaw[34:36], []byte{byte(t.SrcPort >> 8), byte(t.SrcPort)}) + copy(t.revRaw[36:38], []byte{byte(t.ID >> 8), byte(t.ID)}) t.revRaw[39] = byte(t.Transport) } -func (t *DnsTuple) String() string { +func (t *DNSTuple) String() string { return fmt.Sprintf("DnsTuple src[%s:%d] dst[%s:%d] transport[%s] id[%d]", - t.Src_ip.String(), - t.Src_port, - t.Dst_ip.String(), - t.Dst_port, + t.SrcIP.String(), + t.SrcPort, + t.DstIP.String(), + t.DstPort, t.Transport, - t.Id) + t.ID) } // Hashable returns a hashable value that uniquely identifies // the DNS tuple. -func (t *DnsTuple) Hashable() HashableDnsTuple { +func (t *DNSTuple) Hashable() HashableDNSTuple { return t.raw } // Hashable returns a hashable value that uniquely identifies // the DNS tuple after swapping the source and destination. -func (t *DnsTuple) RevHashable() HashableDnsTuple { +func (t *DNSTuple) RevHashable() HashableDNSTuple { return t.revRaw } -type Dns struct { +type DNS struct { // Configuration data. - Ports []int - Send_request bool - Send_response bool - Include_authorities bool - Include_additionals bool + Ports []int + SendRequest bool + SendResponse bool + IncludeAuthorities bool + IncludeAdditionals bool // Cache of active DNS transactions. The map key is the HashableDnsTuple // associated with the request. @@ -175,25 +174,25 @@ type Dns struct { // HashableDnsTuple. The lookup key should be the HashableDnsTuple associated // with the request (src is the requestor). Nil is returned if the entry // does not exist. -func (dns *Dns) getTransaction(k HashableDnsTuple) *DnsTransaction { +func (dns *DNS) getTransaction(k HashableDNSTuple) *DNSTransaction { v := dns.transactions.Get(k) if v != nil { - return v.(*DnsTransaction) + return v.(*DNSTransaction) } return nil } -type DnsTransaction struct { +type DNSTransaction struct { ts time.Time // Time when the request was received. - tuple DnsTuple // Key used to track this transaction in the transactionsMap. + tuple DNSTuple // Key used to track this transaction in the transactionsMap. ResponseTime int32 // Elapsed time in milliseconds between the request and response. Src common.Endpoint Dst common.Endpoint Transport Transport Notes []string - Request *DnsMessage - Response *DnsMessage + Request *DNSMessage + Response *DNSMessage } func init() { @@ -205,7 +204,7 @@ func New( results publish.Transactions, cfg *common.Config, ) (protos.Plugin, error) { - p := &Dns{} + p := &DNS{} config := defaultConfig if !testMode { if err := cfg.Unpack(&config); err != nil { @@ -219,13 +218,13 @@ func New( return p, nil } -func (dns *Dns) init(results publish.Transactions, config *dnsConfig) error { +func (dns *DNS) init(results publish.Transactions, config *dnsConfig) error { dns.setFromConfig(config) dns.transactions = common.NewCacheWithRemovalListener( dns.transactionTimeout, protos.DefaultTransactionHashSize, func(k common.Key, v common.Value) { - trans, ok := v.(*DnsTransaction) + trans, ok := v.(*DNSTransaction) if !ok { logp.Err("Expired value is not a *DnsTransaction.") return @@ -239,30 +238,30 @@ func (dns *Dns) init(results publish.Transactions, config *dnsConfig) error { return nil } -func (dns *Dns) setFromConfig(config *dnsConfig) error { +func (dns *DNS) setFromConfig(config *dnsConfig) error { dns.Ports = config.Ports - dns.Send_request = config.SendRequest - dns.Send_response = config.SendResponse - dns.Include_authorities = config.Include_authorities - dns.Include_additionals = config.Include_additionals + dns.SendRequest = config.SendRequest + dns.SendResponse = config.SendResponse + dns.IncludeAuthorities = config.IncludeAuthorities + dns.IncludeAdditionals = config.IncludeAdditionals dns.transactionTimeout = config.TransactionTimeout return nil } -func newTransaction(ts time.Time, tuple DnsTuple, cmd common.CmdlineTuple) *DnsTransaction { - trans := &DnsTransaction{ +func newTransaction(ts time.Time, tuple DNSTuple, cmd common.CmdlineTuple) *DNSTransaction { + trans := &DNSTransaction{ Transport: tuple.Transport, ts: ts, tuple: tuple, } trans.Src = common.Endpoint{ - IP: tuple.Src_ip.String(), - Port: tuple.Src_port, + IP: tuple.SrcIP.String(), + Port: tuple.SrcPort, Proc: string(cmd.Src), } trans.Dst = common.Endpoint{ - IP: tuple.Dst_ip.String(), - Port: tuple.Dst_port, + IP: tuple.DstIP.String(), + Port: tuple.DstPort, Proc: string(cmd.Dst), } return trans @@ -270,23 +269,23 @@ func newTransaction(ts time.Time, tuple DnsTuple, cmd common.CmdlineTuple) *DnsT // deleteTransaction deletes an entry from the transaction map and returns // the deleted element. If the key does not exist then nil is returned. -func (dns *Dns) deleteTransaction(k HashableDnsTuple) *DnsTransaction { +func (dns *DNS) deleteTransaction(k HashableDNSTuple) *DNSTransaction { v := dns.transactions.Delete(k) if v != nil { - return v.(*DnsTransaction) + return v.(*DNSTransaction) } return nil } -func (dns *Dns) GetPorts() []int { +func (dns *DNS) GetPorts() []int { return dns.Ports } -func (dns *Dns) ConnectionTimeout() time.Duration { +func (dns *DNS) ConnectionTimeout() time.Duration { return dns.transactionTimeout } -func (dns *Dns) receivedDnsRequest(tuple *DnsTuple, msg *DnsMessage) { +func (dns *DNS) receivedDNSRequest(tuple *DNSTuple, msg *DNSMessage) { debugf("Processing query. %s", tuple.String()) trans := dns.deleteTransaction(tuple.Hashable()) @@ -301,16 +300,16 @@ func (dns *Dns) receivedDnsRequest(tuple *DnsTuple, msg *DnsMessage) { trans = newTransaction(msg.Ts, *tuple, *msg.CmdlineTuple) - if tuple.Transport == TransportUdp && (msg.Data.IsEdns0() != nil) && msg.Length > MaxDnsPacketSize { - trans.Notes = append(trans.Notes, UdpPacketTooLarge.Error()) - debugf("%s", UdpPacketTooLarge.Error()) + if tuple.Transport == TransportUDP && (msg.Data.IsEdns0() != nil) && msg.Length > MaxDNSPacketSize { + trans.Notes = append(trans.Notes, UDPPacketTooLarge.Error()) + debugf("%s", UDPPacketTooLarge.Error()) } dns.transactions.Put(tuple.Hashable(), trans) trans.Request = msg } -func (dns *Dns) receivedDnsResponse(tuple *DnsTuple, msg *DnsMessage) { +func (dns *DNS) receivedDNSResponse(tuple *DNSTuple, msg *DNSMessage) { debugf("Processing response. %s", tuple.String()) trans := dns.getTransaction(tuple.RevHashable()) @@ -324,11 +323,11 @@ func (dns *Dns) receivedDnsResponse(tuple *DnsTuple, msg *DnsMessage) { trans.Response = msg - if tuple.Transport == TransportUdp { + if tuple.Transport == TransportUDP { respIsEdns := msg.Data.IsEdns0() != nil - if !respIsEdns && msg.Length > MaxDnsPacketSize { - trans.Notes = append(trans.Notes, UdpPacketTooLarge.ResponseError()) - debugf("%s", UdpPacketTooLarge.ResponseError()) + if !respIsEdns && msg.Length > MaxDNSPacketSize { + trans.Notes = append(trans.Notes, UDPPacketTooLarge.ResponseError()) + debugf("%s", UDPPacketTooLarge.ResponseError()) } request := trans.Request @@ -350,7 +349,7 @@ func (dns *Dns) receivedDnsResponse(tuple *DnsTuple, msg *DnsMessage) { dns.deleteTransaction(trans.tuple.Hashable()) } -func (dns *Dns) publishTransaction(t *DnsTransaction) { +func (dns *DNS) publishTransaction(t *DNSTransaction) { if dns.results == nil { return } @@ -382,17 +381,17 @@ func (dns *Dns) publishTransaction(t *DnsTransaction) { event["query"] = dnsQuestionToString(t.Request.Data.Question[0]) event["resource"] = t.Request.Data.Question[0].Name } - addDnsToMapStr(dnsEvent, t.Response.Data, dns.Include_authorities, - dns.Include_additionals) + addDNSToMapStr(dnsEvent, t.Response.Data, dns.IncludeAuthorities, + dns.IncludeAdditionals) if t.Response.Data.Rcode == 0 { event["status"] = common.OK_STATUS } - if dns.Send_request { + if dns.SendRequest { event["request"] = dnsToString(t.Request.Data) } - if dns.Send_response { + if dns.SendResponse { event["response"] = dnsToString(t.Response.Data) } } else if t.Request != nil { @@ -402,10 +401,10 @@ func (dns *Dns) publishTransaction(t *DnsTransaction) { event["query"] = dnsQuestionToString(t.Request.Data.Question[0]) event["resource"] = t.Request.Data.Question[0].Name } - addDnsToMapStr(dnsEvent, t.Request.Data, dns.Include_authorities, - dns.Include_additionals) + addDNSToMapStr(dnsEvent, t.Request.Data, dns.IncludeAuthorities, + dns.IncludeAdditionals) - if dns.Send_request { + if dns.SendRequest { event["request"] = dnsToString(t.Request.Data) } } else if t.Response != nil { @@ -415,9 +414,9 @@ func (dns *Dns) publishTransaction(t *DnsTransaction) { event["query"] = dnsQuestionToString(t.Response.Data.Question[0]) event["resource"] = t.Response.Data.Question[0].Name } - addDnsToMapStr(dnsEvent, t.Response.Data, dns.Include_authorities, - dns.Include_additionals) - if dns.Send_response { + addDNSToMapStr(dnsEvent, t.Response.Data, dns.IncludeAuthorities, + dns.IncludeAdditionals) + if dns.SendResponse { event["response"] = dnsToString(t.Response.Data) } } @@ -425,7 +424,7 @@ func (dns *Dns) publishTransaction(t *DnsTransaction) { dns.results.PublishTransaction(event) } -func (dns *Dns) expireTransaction(t *DnsTransaction) { +func (dns *DNS) expireTransaction(t *DNSTransaction) { t.Notes = append(t.Notes, NoResponse.Error()) debugf("%s %s", NoResponse.Error(), t.tuple.String()) dns.publishTransaction(t) @@ -433,7 +432,7 @@ func (dns *Dns) expireTransaction(t *DnsTransaction) { } // Adds the DNS message data to the supplied MapStr. -func addDnsToMapStr(m common.MapStr, dns *mkdns.Msg, authority bool, additional bool) { +func addDNSToMapStr(m common.MapStr, dns *mkdns.Msg, authority bool, additional bool) { m["id"] = dns.Id m["op_code"] = dnsOpCodeToString(dns.Opcode) @@ -771,9 +770,9 @@ func dnsToString(dns *mkdns.Msg) string { // then the returned dns pointer will be nil. This method recovers from panics // and is concurrency-safe. // We do not handle Unpack ErrTruncated for now. See https://github.com/miekg/dns/pull/281 -func decodeDnsData(transport Transport, rawData []byte) (dns *mkdns.Msg, err error) { +func decodeDNSData(transport Transport, rawData []byte) (dns *mkdns.Msg, err error) { var offset int - if transport == TransportTcp { + if transport == TransportTCP { offset = DecodeOffset } @@ -792,7 +791,7 @@ func decodeDnsData(transport Transport, rawData []byte) (dns *mkdns.Msg, err err // We use this check because Unpack does not return an error for some unvalid messages. // TODO: can a better solution be found? if msg.Len() <= 12 || err != nil { - return nil, NonDnsMsg + return nil, NonDNSMsg } return msg, nil } diff --git a/packetbeat/protos/dns/dns_tcp.go b/packetbeat/protos/dns/dns_tcp.go index bb470ba384f..06addf1cc60 100644 --- a/packetbeat/protos/dns/dns_tcp.go +++ b/packetbeat/protos/dns/dns_tcp.go @@ -13,7 +13,7 @@ import ( mkdns "github.com/miekg/dns" ) -const MaxDnsMessageSize = (1 << 16) - 1 +const MaxDNSMessageSize = (1 << 16) - 1 // RFC 1035 // The 2 first bytes contain the length of the message @@ -21,11 +21,11 @@ const DecodeOffset = 2 // DnsStream contains DNS data from one side of a TCP transmission. A pair // of DnsStream's are used to represent the full conversation. -type DnsStream struct { +type DNSStream struct { tcpTuple *common.TCPTuple rawData []byte parseOffset int - message *DnsMessage + message *DNSMessage } // dnsConnectionData contains two DnsStream's that hold data from a complete TCP @@ -33,17 +33,17 @@ type DnsStream struct { // the request data. // prevRequest (previous Request) is used to add Notes to a transaction when a failing answer is encountered type dnsConnectionData struct { - Data [2]*DnsStream - prevRequest *DnsMessage + Data [2]*DNSStream + prevRequest *DNSMessage } -func (dns *Dns) Parse(pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { +func (dns *DNS) Parse(pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { defer logp.Recover("Dns ParseTcp") debugf("Parsing packet addressed with %s of length %d.", pkt.Tuple.String(), len(pkt.Payload)) - conn := ensureDnsConnection(private) + conn := ensureDNSConnection(private) conn = dns.doParse(conn, pkt, tcpTuple, dir) if conn == nil { @@ -53,7 +53,7 @@ func (dns *Dns) Parse(pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8, return conn } -func ensureDnsConnection(private protos.ProtocolData) *dnsConnectionData { +func ensureDNSConnection(private protos.ProtocolData) *dnsConnectionData { if private == nil { return &dnsConnectionData{} } @@ -71,7 +71,7 @@ func ensureDnsConnection(private protos.ProtocolData) *dnsConnectionData { return conn } -func (dns *Dns) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8) *dnsConnectionData { +func (dns *DNS) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8) *dnsConnectionData { stream := conn.Data[dir] payload := pkt.Payload @@ -80,17 +80,17 @@ func (dns *Dns) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTuple *c conn.Data[dir] = stream } else { if stream.message == nil { // nth message of the same stream - stream.message = &DnsMessage{Ts: pkt.Ts, Tuple: pkt.Tuple} + stream.message = &DNSMessage{Ts: pkt.Ts, Tuple: pkt.Tuple} } stream.rawData = append(stream.rawData, payload...) - if len(stream.rawData) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(stream.rawData) > tcp.TCPMaxDataInStream { debugf("Stream data too large, dropping DNS stream") conn.Data[dir] = nil return conn } } - decodedData, err := stream.handleTcpRawData() + decodedData, err := stream.handleTCPRawData() if err != nil { @@ -99,7 +99,7 @@ func (dns *Dns) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTuple *c return conn } - if dir == tcp.TcpDirectionReverse { + if dir == tcp.TCPDirectionReverse { dns.publishResponseError(conn, err) } @@ -117,42 +117,42 @@ func (dns *Dns) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTuple *c return conn } -func newStream(pkt *protos.Packet, tcpTuple *common.TCPTuple) *DnsStream { - return &DnsStream{ +func newStream(pkt *protos.Packet, tcpTuple *common.TCPTuple) *DNSStream { + return &DNSStream{ tcpTuple: tcpTuple, rawData: pkt.Payload, - message: &DnsMessage{Ts: pkt.Ts, Tuple: pkt.Tuple}, + message: &DNSMessage{Ts: pkt.Ts, Tuple: pkt.Tuple}, } } -func (dns *Dns) messageComplete(conn *dnsConnectionData, tcpTuple *common.TCPTuple, dir uint8, decodedData *mkdns.Msg) { - dns.handleDns(conn, tcpTuple, decodedData, dir) +func (dns *DNS) messageComplete(conn *dnsConnectionData, tcpTuple *common.TCPTuple, dir uint8, decodedData *mkdns.Msg) { + dns.handleDNS(conn, tcpTuple, decodedData, dir) } -func (dns *Dns) handleDns(conn *dnsConnectionData, tcpTuple *common.TCPTuple, decodedData *mkdns.Msg, dir uint8) { +func (dns *DNS) handleDNS(conn *dnsConnectionData, tcpTuple *common.TCPTuple, decodedData *mkdns.Msg, dir uint8) { message := conn.Data[dir].message - dnsTuple := DnsTupleFromIpPort(&message.Tuple, TransportTcp, decodedData.Id) + dnsTuple := DNSTupleFromIPPort(&message.Tuple, TransportTCP, decodedData.Id) message.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcpTuple.IPPort()) message.Data = decodedData message.Length += DecodeOffset if decodedData.Response { - dns.receivedDnsResponse(&dnsTuple, message) + dns.receivedDNSResponse(&dnsTuple, message) conn.prevRequest = nil } else /* Query */ { - dns.receivedDnsRequest(&dnsTuple, message) + dns.receivedDNSRequest(&dnsTuple, message) conn.prevRequest = message } } -func (stream *DnsStream) PrepareForNewMessage() { +func (stream *DNSStream) PrepareForNewMessage() { stream.rawData = stream.rawData[stream.parseOffset:] stream.message = nil stream.parseOffset = 0 } -func (dns *Dns) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { +func (dns *DNS) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { if private == nil { return nil } @@ -166,14 +166,14 @@ func (dns *Dns) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private protos return conn } - decodedData, err := stream.handleTcpRawData() + decodedData, err := stream.handleTCPRawData() if err == nil { dns.messageComplete(conn, tcpTuple, dir, decodedData) return conn } - if dir == tcp.TcpDirectionReverse { + if dir == tcp.TCPDirectionReverse { dns.publishResponseError(conn, err) } @@ -183,7 +183,7 @@ func (dns *Dns) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private protos return conn } -func (dns *Dns) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) { +func (dns *DNS) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) { if private == nil { return private, true } @@ -197,14 +197,14 @@ func (dns *Dns) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes int, pr return private, false } - decodedData, err := stream.handleTcpRawData() + decodedData, err := stream.handleTCPRawData() if err == nil { dns.messageComplete(conn, tcpTuple, dir, decodedData) return private, true } - if dir == tcp.TcpDirectionReverse { + if dir == tcp.TCPDirectionReverse { dns.publishResponseError(conn, err) } @@ -218,40 +218,40 @@ func (dns *Dns) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes int, pr // Add Notes to the transaction about a failure in the response // Publish and remove the transaction -func (dns *Dns) publishResponseError(conn *dnsConnectionData, err error) { - streamOrigin := conn.Data[tcp.TcpDirectionOriginal] - streamReverse := conn.Data[tcp.TcpDirectionReverse] +func (dns *DNS) publishResponseError(conn *dnsConnectionData, err error) { + streamOrigin := conn.Data[tcp.TCPDirectionOriginal] + streamReverse := conn.Data[tcp.TCPDirectionReverse] if streamOrigin == nil || conn.prevRequest == nil || streamReverse == nil { return } dataOrigin := conn.prevRequest.Data - dnsTupleOrigin := DnsTupleFromIpPort(&conn.prevRequest.Tuple, TransportTcp, dataOrigin.Id) - hashDnsTupleOrigin := (&dnsTupleOrigin).Hashable() + dnsTupleOrigin := DNSTupleFromIPPort(&conn.prevRequest.Tuple, TransportTCP, dataOrigin.Id) + hashDNSTupleOrigin := (&dnsTupleOrigin).Hashable() - trans := dns.deleteTransaction(hashDnsTupleOrigin) + trans := dns.deleteTransaction(hashDNSTupleOrigin) if trans == nil { // happens if Parse, Gap or Fin already published the response error return } - errDns, ok := err.(*DNSError) + errDNS, ok := err.(*DNSError) if !ok { return } - trans.Notes = append(trans.Notes, errDns.ResponseError()) + trans.Notes = append(trans.Notes, errDNS.ResponseError()) // Should we publish the length (bytes_out) of the failed Response? //streamReverse.message.Length = len(streamReverse.rawData) //trans.Response = streamReverse.message dns.publishTransaction(trans) - dns.deleteTransaction(hashDnsTupleOrigin) + dns.deleteTransaction(hashDNSTupleOrigin) } // Manages data length prior to decoding the data and manages errors after decoding -func (stream *DnsStream) handleTcpRawData() (*mkdns.Msg, error) { +func (stream *DNSStream) handleTCPRawData() (*mkdns.Msg, error) { rawData := stream.rawData messageLength := len(rawData) @@ -270,7 +270,7 @@ func (stream *DnsStream) handleTcpRawData() (*mkdns.Msg, error) { if messageLength <= 0 { return nil, ZeroLengthMsg } - if messageLength > MaxDnsMessageSize { // Should never be true though ... + if messageLength > MaxDNSMessageSize { // Should never be true though ... return nil, UnexpectedLengthMsg } } @@ -279,7 +279,7 @@ func (stream *DnsStream) handleTcpRawData() (*mkdns.Msg, error) { return nil, IncompleteMsg } - decodedData, err := decodeDnsData(TransportTcp, rawData[:stream.parseOffset]) + decodedData, err := decodeDNSData(TransportTCP, rawData[:stream.parseOffset]) if err != nil { return nil, err diff --git a/packetbeat/protos/dns/dns_tcp_test.go b/packetbeat/protos/dns/dns_tcp_test.go index e3ca5a9ba7a..649cb7e0f57 100644 --- a/packetbeat/protos/dns/dns_tcp_test.go +++ b/packetbeat/protos/dns/dns_tcp_test.go @@ -25,25 +25,25 @@ import ( ) // Verify that the interface TCP has been satisfied. -var _ protos.TcpPlugin = &Dns{} +var _ protos.TCPPlugin = &DNS{} var ( - messagesTcp = []DnsTestMessage{ + messagesTCP = []DNSTestMessage{ elasticATcp, - zoneAxfrTcp, - githubPtrTcp, - sophosTxtTcp, + zoneAxfrTCP, + githubPtrTCP, + sophosTxtTCP, } - elasticATcp = DnsTestMessage{ + elasticATcp = DNSTestMessage{ id: 11674, opcode: "QUERY", flags: []string{"rd", "ra"}, rcode: "NOERROR", - q_class: "IN", - q_type: "A", - q_name: "elastic.co.", - q_etld: "elastic.co.", + qClass: "IN", + qType: "A", + qName: "elastic.co.", + qEtld: "elastic.co.", answers: []string{"54.201.204.244", "54.200.185.88"}, authorities: []string{"NS-835.AWSDNS-40.NET.", "NS-1183.AWSDNS-19.ORG.", "NS-2007.AWSDNS-58.CO.UK.", "NS-66.AWSDNS-08.COM."}, request: []byte{ @@ -67,14 +67,14 @@ var ( }, } - zoneAxfrTcp = DnsTestMessage{ + zoneAxfrTCP = DNSTestMessage{ id: 0, opcode: "QUERY", rcode: "NOERROR", - q_class: "IN", - q_type: "AXFR", - q_name: "etas.com.", - q_etld: "etas.com.", + qClass: "IN", + qType: "AXFR", + qName: "etas.com.", + qEtld: "etas.com.", answers: []string{"training2003p.", "training2003p.", "1.1.1.1", "training2003p."}, request: []byte{ 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x65, @@ -97,15 +97,15 @@ var ( }, } - githubPtrTcp = DnsTestMessage{ + githubPtrTCP = DNSTestMessage{ id: 6766, opcode: "QUERY", flags: []string{"rd", "ra"}, rcode: "NOERROR", - q_class: "IN", - q_type: "PTR", - q_name: "131.252.30.192.in-addr.arpa.", - q_etld: "192.in-addr.arpa.", + qClass: "IN", + qType: "PTR", + qName: "131.252.30.192.in-addr.arpa.", + qEtld: "192.in-addr.arpa.", answers: []string{"github.com."}, authorities: []string{"ns1.p16.dynect.net.", "ns3.p16.dynect.net.", "ns4.p16.dynect.net.", "ns2.p16.dynect.net."}, request: []byte{ @@ -127,17 +127,17 @@ var ( }, } - sophosTxtTcp = DnsTestMessage{ - id: 35009, - opcode: "QUERY", - flags: []string{"rd", "ra"}, - rcode: "NXDOMAIN", - q_class: "IN", - q_type: "TXT", - q_name: "3.1o19ss00s2s17s4qp375sp49r830n2n4n923s8839052s7p7768s53365226pp3.659p1r741os37393" + + sophosTxtTCP = DNSTestMessage{ + id: 35009, + opcode: "QUERY", + flags: []string{"rd", "ra"}, + rcode: "NXDOMAIN", + qClass: "IN", + qType: "TXT", + qName: "3.1o19ss00s2s17s4qp375sp49r830n2n4n923s8839052s7p7768s53365226pp3.659p1r741os37393" + "648s2348o762q1066q53rq5p4614r1q4781qpr16n809qp4.879o3o734q9sns005o3pp76q83.2q65qns3spns" + "1081s5rn5sr74opqrqnpq6rn3ro5.i.00.mac.sophosxl.net.", - q_etld: "sophosxl.net.", + qEtld: "sophosxl.net.", request: []byte{ 0x00, 0xed, 0x88, 0xc1, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x33, 0x3f, 0x31, 0x6f, 0x31, 0x39, 0x73, 0x73, 0x30, 0x30, 0x73, 0x32, 0x73, 0x31, 0x37, 0x73, 0x34, @@ -175,7 +175,7 @@ var ( } ) -func testTcpTuple() *common.TCPTuple { +func testTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, SrcIP: net.IPv4(192, 168, 0, 1), DstIP: net.IPv4(192, 168, 0, 2), @@ -188,36 +188,36 @@ func testTcpTuple() *common.TCPTuple { func TestDecodeTcp_nonDnsMsgRequest(t *testing.T) { rawData := []byte{0, 2, 1, 2} - _, err := decodeDnsData(TransportTcp, rawData) - assert.Equal(t, err, NonDnsMsg) + _, err := decodeDNSData(TransportTCP, rawData) + assert.Equal(t, err, NonDNSMsg) } // Verify that the split lone request packet is decoded. func TestDecodeTcp_splitRequest(t *testing.T) { - stream := &DnsStream{rawData: sophosTxtTcp.request[:10], message: new(DnsMessage)} - _, err := decodeDnsData(TransportTcp, stream.rawData) + stream := &DNSStream{rawData: sophosTxtTCP.request[:10], message: new(DNSMessage)} + _, err := decodeDNSData(TransportTCP, stream.rawData) assert.NotNil(t, err, "Not expecting a complete message yet") - stream.rawData = append(stream.rawData, sophosTxtTcp.request[10:]...) - _, err = decodeDnsData(TransportTcp, stream.rawData) + stream.rawData = append(stream.rawData, sophosTxtTCP.request[10:]...) + _, err = decodeDNSData(TransportTCP, stream.rawData) assert.Nil(t, err, "Message should be complete") } func TestParseTcp_errorNonDnsMsgResponse(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - tcptuple := testTcpTuple() + dns := newDNS(testing.Verbose()) + tcptuple := testTCPTuple() q := elasticATcp packet := newPacket(forward, q.request) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") r := []byte{0, 2, 1, 2} packet = newPacket(reverse, r) - dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Empty(t, dns.transactions.Size(), "There should be no transaction.") m := expectResult(t, dns) @@ -226,17 +226,17 @@ func TestParseTcp_errorNonDnsMsgResponse(t *testing.T) { assert.Equal(t, len(q.request), mapValue(t, m, "bytes_in")) assert.Nil(t, mapValue(t, m, "bytes_out")) assert.Equal(t, common.ERROR_STATUS, mapValue(t, m, "status")) - assert.Equal(t, NonDnsMsg.ResponseError(), mapValue(t, m, "notes")) + assert.Equal(t, NonDNSMsg.ResponseError(), mapValue(t, m, "notes")) } // Verify that a request message with length (first two bytes value) of zero is not published func TestParseTcp_zeroLengthMsgRequest(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - tcptuple := testTcpTuple() + dns := newDNS(testing.Verbose()) + tcptuple := testTCPTuple() packet := newPacket(forward, []byte{0, 0, 1, 2}) - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") client := dns.results.(*publish.ChanTransactions) close(client.Channel) @@ -246,17 +246,17 @@ func TestParseTcp_zeroLengthMsgRequest(t *testing.T) { // Verify that a response message with length (first two bytes value) of zero is published with the corresponding Notes func TestParseTcp_errorZeroLengthMsgResponse(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - tcptuple := testTcpTuple() + dns := newDNS(testing.Verbose()) + tcptuple := testTCPTuple() q := elasticATcp packet := newPacket(forward, q.request) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") r := []byte{0, 0, 1, 2} packet = newPacket(reverse, r) - dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Empty(t, dns.transactions.Size(), "There should be no transaction.") m := expectResult(t, dns) @@ -271,11 +271,11 @@ func TestParseTcp_errorZeroLengthMsgResponse(t *testing.T) { // Verify that an empty packet is safely handled (no panics). func TestParseTcp_emptyPacket(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) packet := newPacket(forward, []byte{}) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") client := dns.results.(*publish.ChanTransactions) close(client.Channel) @@ -285,23 +285,23 @@ func TestParseTcp_emptyPacket(t *testing.T) { // Verify that a malformed packet is safely handled (no panics). func TestParseTcp_malformedPacket(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) garbage := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() packet := newPacket(forward, garbage) - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") } // Verify that the lone request packet is parsed. func TestParseTcp_requestPacket(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) packet := newPacket(forward, elasticATcp.request) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") client := dns.results.(*publish.ChanTransactions) close(client.Channel) @@ -312,12 +312,12 @@ func TestParseTcp_requestPacket(t *testing.T) { // result is published. func TestParseTcp_errorResponseOnly(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := elasticATcp packet := newPacket(reverse, q.response) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) m := expectResult(t, dns) assert.Equal(t, "tcp", mapValue(t, m, "transport")) assert.Nil(t, mapValue(t, m, "bytes_in")) @@ -333,15 +333,15 @@ func TestParseTcp_errorResponseOnly(t *testing.T) { // map awaiting a response. func TestParseTcp_errorDuplicateRequests(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := elasticATcp packet := newPacket(forward, q.request) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) // The first request is published and this one becomes a transaction assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") @@ -359,15 +359,15 @@ func TestParseTcp_errorDuplicateRequests(t *testing.T) { // Checks that PrepareNewMessage and Parse can manage two messages on the same stream, in different packets func TestParseTcp_errorDuplicateRequestsOneStream(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := elasticATcp packet := newPacket(forward, q.request) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) // The first query is published and this one becomes a transaction assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") @@ -385,19 +385,19 @@ func TestParseTcp_errorDuplicateRequestsOneStream(t *testing.T) { // It typically happens when a SOA is followed by AXFR func TestParseTcp_errorDuplicateRequestsOnePacket(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := elasticATcp offset := 4 concatRequest := append(q.request, q.request[:offset]...) packet := newPacket(forward, concatRequest) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(forward, q.request[offset:]) - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") m := expectResult(t, dns) @@ -413,28 +413,28 @@ func TestParseTcp_errorDuplicateRequestsOnePacket(t *testing.T) { // Verify that a split response packet is parsed and published func TestParseTcp_splitResponse(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) tcpQuery := elasticATcp q := tcpQuery.request r0 := tcpQuery.response[:1] r1 := tcpQuery.response[1:10] r2 := tcpQuery.response[10:] - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() packet := newPacket(forward, q) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, r0) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, r1) - dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, r2) - dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Empty(t, dns.transactions.Size(), "There should be no transaction.") m := expectResult(t, dns) @@ -449,18 +449,18 @@ func TestParseTcp_splitResponse(t *testing.T) { func TestGap_requestDrop(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - q := sophosTxtTcp.request[:10] + dns := newDNS(testing.Verbose()) + q := sophosTxtTCP.request[:10] packet := newPacket(forward, q) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) - private, drop := dns.GapInStream(tcptuple, tcp.TcpDirectionOriginal, 10, private) + private, drop := dns.GapInStream(tcptuple, tcp.TCPDirectionOriginal, 10, private) assert.Equal(t, true, drop) - dns.ReceivedFin(tcptuple, tcp.TcpDirectionOriginal, private) + dns.ReceivedFin(tcptuple, tcp.TCPDirectionOriginal, private) client := dns.results.(*publish.ChanTransactions) close(client.Channel) @@ -471,26 +471,26 @@ func TestGap_requestDrop(t *testing.T) { // Verify that a gap during the response publish the request with Notes func TestGap_errorResponse(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - q := sophosTxtTcp.request - r := sophosTxtTcp.response[:10] - tcptuple := testTcpTuple() + dns := newDNS(testing.Verbose()) + q := sophosTxtTCP.request + r := sophosTxtTCP.response[:10] + tcptuple := testTCPTuple() packet := newPacket(forward, q) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, r) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") - private, drop := dns.GapInStream(tcptuple, tcp.TcpDirectionReverse, 10, private) + private, drop := dns.GapInStream(tcptuple, tcp.TCPDirectionReverse, 10, private) assert.Equal(t, true, drop) - dns.ReceivedFin(tcptuple, tcp.TcpDirectionReverse, private) + dns.ReceivedFin(tcptuple, tcp.TCPDirectionReverse, private) m := expectResult(t, dns) - assertRequest(t, m, sophosTxtTcp) + assertRequest(t, m, sophosTxtTCP) assert.Equal(t, IncompleteMsg.ResponseError(), mapValue(t, m, "notes")) assert.Nil(t, mapValue(t, m, "answers")) } @@ -498,18 +498,18 @@ func TestGap_errorResponse(t *testing.T) { // Verify that a gap/fin happening after a valid query create only one tansaction func TestGapFin_validMessage(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - q := sophosTxtTcp.request - tcptuple := testTcpTuple() + dns := newDNS(testing.Verbose()) + q := sophosTxtTCP.request + tcptuple := testTCPTuple() packet := newPacket(forward, q) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") - private, drop := dns.GapInStream(tcptuple, tcp.TcpDirectionOriginal, 10, private) + private, drop := dns.GapInStream(tcptuple, tcp.TCPDirectionOriginal, 10, private) assert.Equal(t, false, drop) - dns.ReceivedFin(tcptuple, tcp.TcpDirectionReverse, private) + dns.ReceivedFin(tcptuple, tcp.TCPDirectionReverse, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") client := dns.results.(*publish.ChanTransactions) @@ -522,37 +522,37 @@ func TestGapFin_validMessage(t *testing.T) { // Verify that a Fin during the response publish the request with Notes func TestFin_errorResponse(t *testing.T) { var private protos.ProtocolData - dns := newDns(testing.Verbose()) - q := zoneAxfrTcp.request - r := zoneAxfrTcp.response[:10] - tcptuple := testTcpTuple() + dns := newDNS(testing.Verbose()) + q := zoneAxfrTCP.request + r := zoneAxfrTCP.response[:10] + tcptuple := testTCPTuple() packet := newPacket(forward, q) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, r) - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") - dns.ReceivedFin(tcptuple, tcp.TcpDirectionReverse, private) + dns.ReceivedFin(tcptuple, tcp.TCPDirectionReverse, private) m := expectResult(t, dns) - assertRequest(t, m, zoneAxfrTcp) + assertRequest(t, m, zoneAxfrTCP) assert.Equal(t, IncompleteMsg.ResponseError(), mapValue(t, m, "notes")) assert.Nil(t, mapValue(t, m, "answers")) } // parseTcpRequestResponse parses a request then a response packet and validates // the published result. -func parseTcpRequestResponse(t testing.TB, dns *Dns, q DnsTestMessage) { +func parseTCPRequestResponse(t testing.TB, dns *DNS, q DNSTestMessage) { var private protos.ProtocolData packet := newPacket(forward, q.request) - tcptuple := testTcpTuple() - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + tcptuple := testTCPTuple() + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) packet = newPacket(reverse, q.response) - dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") @@ -575,29 +575,29 @@ func parseTcpRequestResponse(t testing.TB, dns *Dns, q DnsTestMessage) { // Verify that the request/response pair are parsed and that a result // is published. func TestParseTcp_requestResponse(t *testing.T) { - parseTcpRequestResponse(t, newDns(testing.Verbose()), elasticATcp) + parseTCPRequestResponse(t, newDNS(testing.Verbose()), elasticATcp) } // Verify all DNS TCP test messages are parsed correctly. func TestParseTcp_allTestMessages(t *testing.T) { - dns := newDns(testing.Verbose()) - for _, q := range messagesTcp { - t.Logf("Testing with query for %s", q.q_name) - parseTcpRequestResponse(t, dns, q) + dns := newDNS(testing.Verbose()) + for _, q := range messagesTCP { + t.Logf("Testing with query for %s", q.qName) + parseTCPRequestResponse(t, dns, q) } } // Benchmarks TCP parsing for the given test message. -func benchmarkTcp(b *testing.B, q DnsTestMessage) { - dns := newDns(false) +func benchmarkTCP(b *testing.B, q DNSTestMessage) { + dns := newDNS(false) for i := 0; i < b.N; i++ { var private protos.ProtocolData packet := newPacket(forward, q.request) - tcptuple := testTcpTuple() - private = dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + tcptuple := testTCPTuple() + private = dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) packet = newPacket(reverse, q.response) - dns.Parse(packet, tcptuple, tcp.TcpDirectionReverse, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionReverse, private) client := dns.results.(*publish.ChanTransactions) <-client.Channel @@ -605,18 +605,18 @@ func benchmarkTcp(b *testing.B, q DnsTestMessage) { } // Benchmark Tcp parsing against each test message. -func BenchmarkTcpElasticA(b *testing.B) { benchmarkTcp(b, elasticATcp) } -func BenchmarkTcpZoneIxfr(b *testing.B) { benchmarkTcp(b, zoneAxfrTcp) } -func BenchmarkTcpGithubPtr(b *testing.B) { benchmarkTcp(b, githubPtrTcp) } -func BenchmarkTcpSophosTxt(b *testing.B) { benchmarkTcp(b, sophosTxtTcp) } +func BenchmarkTcpElasticA(b *testing.B) { benchmarkTCP(b, elasticATcp) } +func BenchmarkTcpZoneIxfr(b *testing.B) { benchmarkTCP(b, zoneAxfrTCP) } +func BenchmarkTcpGithubPtr(b *testing.B) { benchmarkTCP(b, githubPtrTCP) } +func BenchmarkTcpSophosTxt(b *testing.B) { benchmarkTCP(b, sophosTxtTCP) } // Benchmark that runs with parallelism to help find concurrency related // issues. To run with parallelism, the 'go test' cpu flag must be set // greater than 1, otherwise it just runs concurrently but not in parallel. func BenchmarkParallelTcpParse(b *testing.B) { rand.Seed(22) - numMessages := len(messagesTcp) - dns := newDns(false) + numMessages := len(messagesTCP) + dns := newDNS(false) client := dns.results.(*publish.ChanTransactions) // Drain the results channel while the test is running. @@ -634,19 +634,19 @@ func BenchmarkParallelTcpParse(b *testing.B) { // Each iteration parses one message, either a request or a response. // The request and response could be parsed on different goroutines. for pb.Next() { - q := messagesTcp[rand.Intn(numMessages)] + q := messagesTCP[rand.Intn(numMessages)] var packet *protos.Packet var tcptuple *common.TCPTuple var private protos.ProtocolData if rand.Intn(2) == 0 { packet = newPacket(forward, q.request) - tcptuple = testTcpTuple() + tcptuple = testTCPTuple() } else { packet = newPacket(reverse, q.response) - tcptuple = testTcpTuple() + tcptuple = testTCPTuple() } - dns.Parse(packet, tcptuple, tcp.TcpDirectionOriginal, private) + dns.Parse(packet, tcptuple, tcp.TCPDirectionOriginal, private) } }) diff --git a/packetbeat/protos/dns/dns_test.go b/packetbeat/protos/dns/dns_test.go index 5e5902bf865..4ae2e0029f3 100644 --- a/packetbeat/protos/dns/dns_test.go +++ b/packetbeat/protos/dns/dns_test.go @@ -22,23 +22,23 @@ import ( // Test Constants const ( - ServerIp = "192.168.0.1" + ServerIP = "192.168.0.1" ServerPort = 53 - ClientIp = "10.0.0.1" + ClientIP = "10.0.0.1" ClientPort = 34898 ) // DnsTestMessage holds the data that is expected to be returned when parsing // the raw DNS layer payloads for the request and response packet. -type DnsTestMessage struct { +type DNSTestMessage struct { id uint16 opcode string flags []string rcode string - q_class string - q_type string - q_name string - q_etld string + qClass string + qType string + qName string + qEtld string answers []string authorities []string additionals []string @@ -49,14 +49,14 @@ type DnsTestMessage struct { // Request and response addresses. var ( forward = common.NewIPPortTuple(4, - net.ParseIP(ServerIp), ServerPort, - net.ParseIP(ClientIp), ClientPort) + net.ParseIP(ServerIP), ServerPort, + net.ParseIP(ClientIP), ClientPort) reverse = common.NewIPPortTuple(4, - net.ParseIP(ClientIp), ClientPort, - net.ParseIP(ServerIp), ServerPort) + net.ParseIP(ClientIP), ClientPort, + net.ParseIP(ServerIP), ServerPort) ) -func newDns(verbose bool) *Dns { +func newDNS(verbose bool) *DNS { if verbose { logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"dns"}) } else { @@ -76,7 +76,7 @@ func newDns(verbose bool) *Dns { panic(err) } - return dns.(*Dns) + return dns.(*DNS) } func newPacket(t common.IPPortTuple, payload []byte) *protos.Packet { @@ -89,7 +89,7 @@ func newPacket(t common.IPPortTuple, payload []byte) *protos.Packet { // expectResult returns one MapStr result from the Dns results channel. If // no result is available then the test fails. -func expectResult(t testing.TB, dns *Dns) common.MapStr { +func expectResult(t testing.TB, dns *DNS) common.MapStr { client := dns.results.(*publish.ChanTransactions) select { case result := <-client.Channel: @@ -158,7 +158,7 @@ func mapValueHelper(t testing.TB, m common.MapStr, keys []string) interface{} { // dns.authorities // dns.additionals_count // dns.additionals -func assertMapStrData(t testing.TB, m common.MapStr, q DnsTestMessage) { +func assertMapStrData(t testing.TB, m common.MapStr, q DNSTestMessage) { assertRequest(t, m, q) // Answers @@ -204,20 +204,20 @@ func assertMapStrData(t testing.TB, m common.MapStr, q DnsTestMessage) { } } -func assertRequest(t testing.TB, m common.MapStr, q DnsTestMessage) { +func assertRequest(t testing.TB, m common.MapStr, q DNSTestMessage) { assert.Equal(t, "dns", mapValue(t, m, "type")) assertAddress(t, forward, mapValue(t, m, "src")) assertAddress(t, reverse, mapValue(t, m, "dst")) - assert.Equal(t, fmt.Sprintf("class %s, type %s, %s", q.q_class, q.q_type, q.q_name), + assert.Equal(t, fmt.Sprintf("class %s, type %s, %s", q.qClass, q.qType, q.qName), mapValue(t, m, "query")) - assert.Equal(t, q.q_name, mapValue(t, m, "resource")) + assert.Equal(t, q.qName, mapValue(t, m, "resource")) assert.Equal(t, q.opcode, mapValue(t, m, "method")) assert.Equal(t, q.id, mapValue(t, m, "dns.id")) assert.Equal(t, q.opcode, mapValue(t, m, "dns.op_code")) - assert.Equal(t, q.q_class, mapValue(t, m, "dns.question.class")) - assert.Equal(t, q.q_type, mapValue(t, m, "dns.question.type")) - assert.Equal(t, q.q_name, mapValue(t, m, "dns.question.name")) - assert.Equal(t, q.q_etld, mapValue(t, m, "dns.question.etld_plus_one")) + assert.Equal(t, q.qClass, mapValue(t, m, "dns.question.class")) + assert.Equal(t, q.qType, mapValue(t, m, "dns.question.type")) + assert.Equal(t, q.qName, mapValue(t, m, "dns.question.name")) + assert.Equal(t, q.qEtld, mapValue(t, m, "dns.question.etld_plus_one")) } // Assert that the specified flags are set. diff --git a/packetbeat/protos/dns/dns_udp.go b/packetbeat/protos/dns/dns_udp.go index 72a2f6db41d..70f76f8272b 100644 --- a/packetbeat/protos/dns/dns_udp.go +++ b/packetbeat/protos/dns/dns_udp.go @@ -8,16 +8,16 @@ import ( ) // Only EDNS packets should have their size beyond this value -const MaxDnsPacketSize = (1 << 9) // 512 (bytes) +const MaxDNSPacketSize = (1 << 9) // 512 (bytes) -func (dns *Dns) ParseUdp(pkt *protos.Packet) { +func (dns *DNS) ParseUDP(pkt *protos.Packet) { defer logp.Recover("Dns ParseUdp") packetSize := len(pkt.Payload) debugf("Parsing packet addressed with %s of length %d.", pkt.Tuple.String(), packetSize) - dnsPkt, err := decodeDnsData(TransportUdp, pkt.Payload) + dnsPkt, err := decodeDNSData(TransportUDP, pkt.Payload) if err != nil { // This means that malformed requests or responses are being sent or // that someone is attempting to the DNS port for non-DNS traffic. Both @@ -26,8 +26,8 @@ func (dns *Dns) ParseUdp(pkt *protos.Packet) { return } - dnsTuple := DnsTupleFromIpPort(&pkt.Tuple, TransportUdp, dnsPkt.Id) - dnsMsg := &DnsMessage{ + dnsTuple := DNSTupleFromIPPort(&pkt.Tuple, TransportUDP, dnsPkt.Id) + dnsMsg := &DNSMessage{ Ts: pkt.Ts, Tuple: pkt.Tuple, CmdlineTuple: procs.ProcWatcher.FindProcessesTuple(&pkt.Tuple), @@ -36,8 +36,8 @@ func (dns *Dns) ParseUdp(pkt *protos.Packet) { } if dnsMsg.Data.Response { - dns.receivedDnsResponse(&dnsTuple, dnsMsg) + dns.receivedDNSResponse(&dnsTuple, dnsMsg) } else /* Query */ { - dns.receivedDnsRequest(&dnsTuple, dnsMsg) + dns.receivedDNSRequest(&dnsTuple, dnsMsg) } } diff --git a/packetbeat/protos/dns/dns_udp_test.go b/packetbeat/protos/dns/dns_udp_test.go index 9d513087066..57bf9a90d8b 100644 --- a/packetbeat/protos/dns/dns_udp_test.go +++ b/packetbeat/protos/dns/dns_udp_test.go @@ -33,13 +33,13 @@ import ( ) // Verify that the interface for UDP has been satisfied. -var _ protos.UdpPlugin = &Dns{} +var _ protos.UDPPlugin = &DNS{} // DNS messages for testing. When adding a new test message, add it to the // messages array and create a new benchmark test for the message. var ( // An array of all test messages. - messages = []DnsTestMessage{ + messages = []DNSTestMessage{ elasticA, zoneIxfr, githubPtr, @@ -47,15 +47,15 @@ var ( ednsSecA, } - elasticA = DnsTestMessage{ + elasticA = DNSTestMessage{ id: 8529, opcode: "QUERY", flags: []string{"rd", "ra"}, rcode: "NOERROR", - q_class: "IN", - q_type: "A", - q_name: "elastic.co.", - q_etld: "elastic.co.", + qClass: "IN", + qType: "A", + qName: "elastic.co.", + qEtld: "elastic.co.", answers: []string{"54.148.130.30", "54.69.104.66"}, request: []byte{ 0x21, 0x51, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x65, 0x6c, 0x61, @@ -69,15 +69,15 @@ var ( }, } - zoneIxfr = DnsTestMessage{ - id: 16384, - opcode: "QUERY", - flags: []string{"ra"}, - rcode: "NOERROR", - q_class: "IN", - q_type: "IXFR", - q_name: "etas.com.", - q_etld: "etas.com.", + zoneIxfr = DNSTestMessage{ + id: 16384, + opcode: "QUERY", + flags: []string{"ra"}, + rcode: "NOERROR", + qClass: "IN", + qType: "IXFR", + qName: "etas.com.", + qEtld: "etas.com.", answers: []string{"training2003p.", "training2003p.", "training2003p.", "training2003p.", "1.1.1.100"}, request: []byte{ @@ -106,15 +106,15 @@ var ( }, } - githubPtr = DnsTestMessage{ + githubPtr = DNSTestMessage{ id: 344, opcode: "QUERY", flags: []string{"rd", "ra"}, rcode: "NOERROR", - q_class: "IN", - q_type: "PTR", - q_name: "131.252.30.192.in-addr.arpa.", - q_etld: "192.in-addr.arpa.", + qClass: "IN", + qType: "PTR", + qName: "131.252.30.192.in-addr.arpa.", + qEtld: "192.in-addr.arpa.", answers: []string{"github.com."}, authorities: []string{"a.root-servers.net.", "b.root-servers.net.", "c.root-servers.net.", "d.root-servers.net.", "e.root-servers.net.", "f.root-servers.net.", "g.root-servers.net.", @@ -147,17 +147,17 @@ var ( }, } - sophosTxt = DnsTestMessage{ - id: 8238, - opcode: "QUERY", - flags: []string{"rd", "ra"}, - rcode: "NXDOMAIN", - q_class: "IN", - q_type: "TXT", - q_name: "3.1o19ss00s2s17s4qp375sp49r830n2n4n923s8839052s7p7768s53365226pp3.659p1r741os37393" + + sophosTxt = DNSTestMessage{ + id: 8238, + opcode: "QUERY", + flags: []string{"rd", "ra"}, + rcode: "NXDOMAIN", + qClass: "IN", + qType: "TXT", + qName: "3.1o19ss00s2s17s4qp375sp49r830n2n4n923s8839052s7p7768s53365226pp3.659p1r741os37393" + "648s2348o762q1066q53rq5p4614r1q4781qpr16n809qp4.879o3o734q9sns005o3pp76q83.2q65qns3spns" + "1081s5rn5sr74opqrqnpq6rn3ro5.i.00.mac.sophosxl.net.", - q_etld: "sophosxl.net.", + qEtld: "sophosxl.net.", request: []byte{ 0x20, 0x2e, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x33, 0x3f, 0x31, 0x6f, 0x31, 0x39, 0x73, 0x73, 0x30, 0x30, 0x73, 0x32, 0x73, 0x31, 0x37, 0x73, 0x34, 0x71, 0x70, @@ -194,15 +194,15 @@ var ( }, } - ednsSecA = DnsTestMessage{ + ednsSecA = DNSTestMessage{ id: 20498, opcode: "QUERY", flags: []string{"rd", "ad", "ra"}, rcode: "NOERROR", - q_class: "IN", - q_type: "A", - q_name: "www.ietf.org.", - q_etld: "ietf.org.", + qClass: "IN", + qType: "A", + qName: "www.ietf.org.", + qEtld: "ietf.org.", answers: []string{"64.170.98.30", "iDA8bJnrAEz3jgYnyFRm567a76qlv1V0CqxOSd/o9nvnN0GlZLaVoDmuXpaIaoypbGxwzwgK/LY6CV2k6SWKwicBmpENL26hwyjkFzPDW8kX3ibFhtfsOb8pYe7nBj326actp/7iG+DRuDmPnkYBja+wDYk61doTtkqZg57fn3iS97tjNPCC9C9knRAuDYUG+dVxalazSwYrpvY97dUC1H2spD0g4UdDyCbGA46mouZ4GPzNMewgf948qxrnU8pWPk3nQW5TgLVkGoWgco2owfLElBqf6rJ4LOswuhaw8IpTtmw3FsixxTLQvKOE5nftd1nMhQQd9CaHjoKNAUEz5Q=="}, request: []byte{ 0x50, 0x12, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x77, 0x77, 0x77, @@ -239,9 +239,9 @@ var ( // Verify that an empty packet is safely handled (no panics). func TestParseUdp_emptyPacket(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) packet := newPacket(forward, []byte{}) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") client := dns.results.(*publish.ChanTransactions) close(client.Channel) @@ -250,10 +250,10 @@ func TestParseUdp_emptyPacket(t *testing.T) { // Verify that a malformed packet is safely handled (no panics). func TestParseUdp_malformedPacket(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) garbage := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} packet := newPacket(forward, garbage) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") // As a future addition, a malformed message should publish a result. @@ -261,9 +261,9 @@ func TestParseUdp_malformedPacket(t *testing.T) { // Verify that the lone request packet is parsed. func TestParseUdp_requestPacket(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) packet := newPacket(forward, elasticA.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") client := dns.results.(*publish.ChanTransactions) close(client.Channel) @@ -273,10 +273,10 @@ func TestParseUdp_requestPacket(t *testing.T) { // Verify that the lone response packet is parsed and that an error // result is published. func TestParseUdp_responseOnly(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := elasticA packet := newPacket(reverse, q.response) - dns.ParseUdp(packet) + dns.ParseUDP(packet) m := expectResult(t, dns) assert.Equal(t, "udp", mapValue(t, m, "transport")) @@ -292,13 +292,13 @@ func TestParseUdp_responseOnly(t *testing.T) { // the status is error. This second packet will remain in the transaction // map awaiting a response. func TestParseUdp_duplicateRequests(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := elasticA packet := newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") m := expectResult(t, dns) @@ -313,25 +313,25 @@ func TestParseUdp_duplicateRequests(t *testing.T) { // Verify that the request/response pair are parsed and that a result // is published. func TestParseUdp_requestResponse(t *testing.T) { - parseUdpRequestResponse(t, newDns(testing.Verbose()), elasticA) + parseUDPRequestResponse(t, newDNS(testing.Verbose()), elasticA) } // Verify all DNS test messages are parsed correctly. func TestParseUdp_allTestMessages(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) for _, q := range messages { - t.Logf("Testing with query for %s", q.q_name) - parseUdpRequestResponse(t, dns, q) + t.Logf("Testing with query for %s", q.qName) + parseUDPRequestResponse(t, dns, q) } } // Verify that expireTransaction publishes an event with an error status // and note. func TestExpireTransaction(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) - trans := newTransaction(time.Now(), DnsTuple{}, common.CmdlineTuple{}) - trans.Request = &DnsMessage{ + trans := newTransaction(time.Now(), DNSTuple{}, common.CmdlineTuple{}) + trans.Request = &DNSMessage{ Data: &mkdns.Msg{ Question: []mkdns.Question{{}}, }, @@ -347,10 +347,10 @@ func TestExpireTransaction(t *testing.T) { // Verify that an empty DNS request packet can be published. func TestPublishTransaction_emptyDnsRequest(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) - trans := newTransaction(time.Now(), DnsTuple{}, common.CmdlineTuple{}) - trans.Request = &DnsMessage{ + trans := newTransaction(time.Now(), DNSTuple{}, common.CmdlineTuple{}) + trans.Request = &DNSMessage{ Data: &mkdns.Msg{}, } dns.publishTransaction(trans) @@ -361,10 +361,10 @@ func TestPublishTransaction_emptyDnsRequest(t *testing.T) { // Verify that an empty DNS response packet can be published. func TestPublishTransaction_emptyDnsResponse(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) - trans := newTransaction(time.Now(), DnsTuple{}, common.CmdlineTuple{}) - trans.Response = &DnsMessage{ + trans := newTransaction(time.Now(), DNSTuple{}, common.CmdlineTuple{}) + trans.Response = &DNSMessage{ Data: &mkdns.Msg{}, } dns.publishTransaction(trans) @@ -374,13 +374,13 @@ func TestPublishTransaction_emptyDnsResponse(t *testing.T) { } func TestPublishTransaction_edns(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := ednsSecA packet := newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, q.response) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") m := expectResult(t, dns) @@ -395,15 +395,15 @@ func TestPublishTransaction_edns(t *testing.T) { // Verify that a non-edns answer to a edns query publishes Notes. func TestPublishTransaction_respEdnsNoSupport(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := ednsSecA q.response = q.response[:len(q.response)-11] // Remove OPT RR packet := newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, q.response) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") m := expectResult(t, dns) @@ -418,15 +418,15 @@ func TestPublishTransaction_respEdnsNoSupport(t *testing.T) { // Verify that a edns response to a non-edns query publishes Notes. func TestPublishTransaction_respEdnsUnexpected(t *testing.T) { - dns := newDns(testing.Verbose()) + dns := newDNS(testing.Verbose()) q := ednsSecA q.request = q.request[:len(q.request)-11] // Remove OPT RR packet := newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Equal(t, 1, dns.transactions.Size(), "There should be one transaction.") packet = newPacket(reverse, q.response) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") m := expectResult(t, dns) @@ -440,13 +440,13 @@ func TestPublishTransaction_respEdnsUnexpected(t *testing.T) { } // Benchmarks UDP parsing for the given test message. -func benchmarkUdp(b *testing.B, q DnsTestMessage) { - dns := newDns(false) +func benchmarkUDP(b *testing.B, q DNSTestMessage) { + dns := newDNS(false) for i := 0; i < b.N; i++ { packet := newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) packet = newPacket(reverse, q.response) - dns.ParseUdp(packet) + dns.ParseUDP(packet) client := dns.results.(*publish.ChanTransactions) <-client.Channel @@ -454,10 +454,10 @@ func benchmarkUdp(b *testing.B, q DnsTestMessage) { } // Benchmark UDP parsing against each test message. -func BenchmarkUdpElasticA(b *testing.B) { benchmarkUdp(b, elasticA) } -func BenchmarkUdpZoneIxfr(b *testing.B) { benchmarkUdp(b, zoneIxfr) } -func BenchmarkUdpGithubPtr(b *testing.B) { benchmarkUdp(b, githubPtr) } -func BenchmarkUdpSophosTxt(b *testing.B) { benchmarkUdp(b, sophosTxt) } +func BenchmarkUdpElasticA(b *testing.B) { benchmarkUDP(b, elasticA) } +func BenchmarkUdpZoneIxfr(b *testing.B) { benchmarkUDP(b, zoneIxfr) } +func BenchmarkUdpGithubPtr(b *testing.B) { benchmarkUDP(b, githubPtr) } +func BenchmarkUdpSophosTxt(b *testing.B) { benchmarkUDP(b, sophosTxt) } // Benchmark that runs with parallelism to help find concurrency related // issues. To run with parallelism, the 'go test' cpu flag must be set @@ -465,7 +465,7 @@ func BenchmarkUdpSophosTxt(b *testing.B) { benchmarkUdp(b, sophosTxt) } func BenchmarkParallelUdpParse(b *testing.B) { rand.Seed(22) numMessages := len(messages) - dns := newDns(false) + dns := newDNS(false) client := dns.results.(*publish.ChanTransactions) // Drain the results channal while the test is running. @@ -490,7 +490,7 @@ func BenchmarkParallelUdpParse(b *testing.B) { } else { packet = newPacket(reverse, q.response) } - dns.ParseUdp(packet) + dns.ParseUDP(packet) } }) @@ -499,11 +499,11 @@ func BenchmarkParallelUdpParse(b *testing.B) { // parseUdpRequestResponse parses a request then a response packet and validates // the published result. -func parseUdpRequestResponse(t testing.TB, dns *Dns, q DnsTestMessage) { +func parseUDPRequestResponse(t testing.TB, dns *DNS, q DNSTestMessage) { packet := newPacket(forward, q.request) - dns.ParseUdp(packet) + dns.ParseUDP(packet) packet = newPacket(reverse, q.response) - dns.ParseUdp(packet) + dns.ParseUDP(packet) assert.Empty(t, dns.transactions.Size(), "There should be no transactions.") m := expectResult(t, dns) diff --git a/packetbeat/protos/dns/errors.go b/packetbeat/protos/dns/errors.go index 117034d980f..9a7c1d22d03 100644 --- a/packetbeat/protos/dns/errors.go +++ b/packetbeat/protos/dns/errors.go @@ -28,7 +28,7 @@ func (e *DNSError) ResponseError() string { // Common var ( - NonDnsMsg = &DNSError{Err: "Message's data could not be decoded as DNS"} + NonDNSMsg = &DNSError{Err: "Message's data could not be decoded as DNS"} DuplicateQueryMsg = &DNSError{Err: "Another query with the same DNS ID from this client " + "was received so this query was closed without receiving a response"} NoResponse = &DNSError{Err: "No response to this query was received"} @@ -37,7 +37,7 @@ var ( // EDNS var ( - UdpPacketTooLarge = &DNSError{Err: fmt.Sprintf("Non-EDNS packet has size greater than %d", MaxDnsPacketSize)} + UDPPacketTooLarge = &DNSError{Err: fmt.Sprintf("Non-EDNS packet has size greater than %d", MaxDNSPacketSize)} RespEdnsNoSupport = &DNSError{Err: "Responder does not support EDNS"} RespEdnsUnexpected = &DNSError{Err: "Unexpected EDNS answer"} ) diff --git a/packetbeat/protos/dns/names.go b/packetbeat/protos/dns/names.go index b3e41935422..5a13da12ec5 100644 --- a/packetbeat/protos/dns/names.go +++ b/packetbeat/protos/dns/names.go @@ -1,6 +1,7 @@ +package dns + // This file contains the name mapping data used to convert various DNS IDs to // their string values. -package dns import ( "encoding/hex" diff --git a/packetbeat/protos/dns/names_test.go b/packetbeat/protos/dns/names_test.go index b5745d87391..de4fb6a439f 100644 --- a/packetbeat/protos/dns/names_test.go +++ b/packetbeat/protos/dns/names_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" ) -type DnsTestMsg struct { +type DNSTestMsg struct { rawData []byte question common.MapStr answers []common.MapStr @@ -29,13 +29,13 @@ type DnsTestMsg struct { // DNS messages for testing. var ( // An array of all test messages. - dnsTestRRs = []DnsTestMsg{ + dnsTestRRs = []DNSTestMsg{ unhandledRR, unknownRR, opt, } - unhandledRR = DnsTestMsg{ // RR specified in a RFC but not implemented in the package dns + unhandledRR = DNSTestMsg{ // RR specified in a RFC but not implemented in the package dns rawData: []byte{ 0x21, 0x51, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x02, 0x63, 0x6f, 0x00, 0x00, 0x1e, 0x00, 0x01, @@ -46,7 +46,7 @@ var ( }, } - unknownRR = DnsTestMsg{ // RR unspecified in any known RFC + unknownRR = DNSTestMsg{ // RR unspecified in any known RFC rawData: []byte{ 0x21, 0x51, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x02, 0x63, 0x6f, 0x00, 0xff, 0x00, 0x00, 0x01, @@ -57,7 +57,7 @@ var ( }, } - opt = DnsTestMsg{ + opt = DNSTestMsg{ rawData: []byte{ 0x50, 0x12, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x77, 0x77, 0x77, 0x04, 0x69, 0x65, 0x74, 0x66, 0x03, 0x6f, 0x72, 0x67, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, @@ -86,14 +86,14 @@ func assertRRs(t testing.TB, oracleRRs []common.MapStr, rrs []common.MapStr) { } } -func assertDnsMessage(t testing.TB, q DnsTestMsg) { - dns, err := decodeDnsData(TransportUdp, q.rawData) +func assertDNSMessage(t testing.TB, q DNSTestMsg) { + dns, err := decodeDNSData(TransportUDP, q.rawData) if err != nil { t.Error("failed to decode dns data") } mapStr := common.MapStr{} - addDnsToMapStr(mapStr, dns, true, true) + addDNSToMapStr(mapStr, dns, true, true) if q.question != nil { for k, v := range q.question { assert.NotNil(t, mapStr["question"].(common.MapStr)[k]) @@ -119,6 +119,6 @@ func assertDnsMessage(t testing.TB, q DnsTestMsg) { func TestAllRR(t *testing.T) { for _, q := range dnsTestRRs { - assertDnsMessage(t, q) + assertDNSMessage(t, q) } } diff --git a/packetbeat/protos/http/config.go b/packetbeat/protos/http/config.go index fa1973bce38..00acf70e4d3 100644 --- a/packetbeat/protos/http/config.go +++ b/packetbeat/protos/http/config.go @@ -8,13 +8,13 @@ import ( type httpConfig struct { config.ProtocolCommon `config:",inline"` - Send_all_headers bool `config:"send_all_headers"` - Send_headers []string `config:"send_headers"` - Split_cookie bool `config:"split_cookie"` - Real_ip_header string `config:"real_ip_header"` - Include_body_for []string `config:"include_body_for"` - Hide_keywords []string `config:"hide_keywords"` - Redact_authorization bool `config:"redact_authorization"` + SendAllHeaders bool `config:"send_all_headers"` + SendHeaders []string `config:"send_headers"` + SplitCookie bool `config:"split_cookie"` + RealIPHeader string `config:"real_ip_header"` + IncludeBodyFor []string `config:"include_body_for"` + HideKeywords []string `config:"hide_keywords"` + RedactAuthorization bool `config:"redact_authorization"` MaxMessageSize int `config:"max_message_size"` } @@ -23,6 +23,6 @@ var ( ProtocolCommon: config.ProtocolCommon{ TransactionTimeout: protos.DefaultTransactionExpiration, }, - MaxMessageSize: tcp.TCP_MAX_DATA_IN_STREAM, + MaxMessageSize: tcp.TCPMaxDataInStream, } ) diff --git a/packetbeat/protos/http/http.go b/packetbeat/protos/http/http.go index 57c0b90943f..7343339b2db 100644 --- a/packetbeat/protos/http/http.go +++ b/packetbeat/protos/http/http.go @@ -119,23 +119,23 @@ func (http *HTTP) setFromConfig(config *httpConfig) { http.Ports = config.Ports http.SendRequest = config.SendRequest http.SendResponse = config.SendResponse - http.HideKeywords = config.Hide_keywords - http.RedactAuthorization = config.Redact_authorization - http.SplitCookie = config.Split_cookie - http.parserConfig.RealIPHeader = strings.ToLower(config.Real_ip_header) + http.HideKeywords = config.HideKeywords + http.RedactAuthorization = config.RedactAuthorization + http.SplitCookie = config.SplitCookie + http.parserConfig.RealIPHeader = strings.ToLower(config.RealIPHeader) http.transactionTimeout = config.TransactionTimeout - http.IncludeBodyFor = config.Include_body_for + http.IncludeBodyFor = config.IncludeBodyFor http.MaxMessageSize = config.MaxMessageSize - if config.Send_all_headers { + if config.SendAllHeaders { http.parserConfig.SendHeaders = true http.parserConfig.SendAllHeaders = true } else { - if len(config.Send_headers) > 0 { + if len(config.SendHeaders) > 0 { http.parserConfig.SendHeaders = true http.parserConfig.HeadersWhitelist = map[string]bool{} - for _, hdr := range config.Send_headers { + for _, hdr := range config.SendHeaders { http.parserConfig.HeadersWhitelist[strings.ToLower(hdr)] = true } } @@ -461,11 +461,11 @@ func (http *HTTP) newTransaction(requ, resp *message) common.MapStr { Port: requ.TCPTuple.DstPort, Proc: string(requ.CmdlineTuple.Dst), } - if requ.Direction == tcp.TcpDirectionReverse { + if requ.Direction == tcp.TCPDirectionReverse { src, dst = dst, src } - http_details := common.MapStr{ + httpDetails := common.MapStr{ "request": common.MapStr{ "params": params, "headers": http.collectHeaders(requ), @@ -477,8 +477,8 @@ func (http *HTTP) newTransaction(requ, resp *message) common.MapStr { }, } - http.setBody(http_details["request"].(common.MapStr), requ) - http.setBody(http_details["response"].(common.MapStr), resp) + http.setBody(httpDetails["request"].(common.MapStr), requ) + http.setBody(httpDetails["response"].(common.MapStr), resp) event := common.MapStr{ "@timestamp": common.Time(requ.Ts), @@ -488,7 +488,7 @@ func (http *HTTP) newTransaction(requ, resp *message) common.MapStr { "method": requ.Method, "path": path, "query": fmt.Sprintf("%s %s", requ.Method, path), - "http": http_details, + "http": httpDetails, "bytes_out": resp.Size, "bytes_in": requ.Size, "src": &src, diff --git a/packetbeat/protos/http/http_test.go b/packetbeat/protos/http/http_test.go index ecb6a336be3..377a0841d4f 100644 --- a/packetbeat/protos/http/http_test.go +++ b/packetbeat/protos/http/http_test.go @@ -1124,11 +1124,11 @@ func TestHttp_configsSettingAll(t *testing.T) { config.SendRequest = true config.SendResponse = true - config.Hide_keywords = []string{"a", "b"} - config.Redact_authorization = true - config.Send_all_headers = true - config.Split_cookie = true - config.Real_ip_header = "X-Forwarded-For" + config.HideKeywords = []string{"a", "b"} + config.RedactAuthorization = true + config.SendAllHeaders = true + config.SplitCookie = true + config.RealIPHeader = "X-Forwarded-For" // Set config http.setFromConfig(&config) @@ -1138,12 +1138,12 @@ func TestHttp_configsSettingAll(t *testing.T) { assert.Equal(t, config.Ports, http.GetPorts()) assert.Equal(t, config.SendRequest, http.SendRequest) assert.Equal(t, config.SendResponse, http.SendResponse) - assert.Equal(t, config.Hide_keywords, http.HideKeywords) - assert.Equal(t, config.Redact_authorization, http.RedactAuthorization) + assert.Equal(t, config.HideKeywords, http.HideKeywords) + assert.Equal(t, config.RedactAuthorization, http.RedactAuthorization) assert.True(t, http.parserConfig.SendHeaders) assert.True(t, http.parserConfig.SendAllHeaders) - assert.Equal(t, config.Split_cookie, http.SplitCookie) - assert.Equal(t, strings.ToLower(config.Real_ip_header), http.parserConfig.RealIPHeader) + assert.Equal(t, config.SplitCookie, http.SplitCookie) + assert.Equal(t, strings.ToLower(config.RealIPHeader), http.parserConfig.RealIPHeader) } func TestHttp_configsSettingHeaders(t *testing.T) { @@ -1152,14 +1152,14 @@ func TestHttp_configsSettingHeaders(t *testing.T) { config := defaultConfig // Assign config vars - config.Send_headers = []string{"a", "b", "c"} + config.SendHeaders = []string{"a", "b", "c"} // Set config http.setFromConfig(&config) // Check if http config is set correctly assert.True(t, http.parserConfig.SendHeaders) - assert.Equal(t, len(config.Send_headers), len(http.parserConfig.HeadersWhitelist)) + assert.Equal(t, len(config.SendHeaders), len(http.parserConfig.HeadersWhitelist)) for _, val := range http.parserConfig.HeadersWhitelist { assert.True(t, val) diff --git a/packetbeat/protos/icmp/icmp.go b/packetbeat/protos/icmp/icmp.go index 85bc23b6316..623fb84a6cc 100644 --- a/packetbeat/protos/icmp/icmp.go +++ b/packetbeat/protos/icmp/icmp.go @@ -115,9 +115,9 @@ func (icmp *Icmp) ProcessICMPv4( tuple := &icmpTuple{ IcmpVersion: 4, - SrcIp: pkt.Tuple.SrcIP, - DstIp: pkt.Tuple.DstIP, - Id: id, + SrcIP: pkt.Tuple.SrcIP, + DstIP: pkt.Tuple.DstIP, + ID: id, Seq: seq, } msg := &icmpMessage{ @@ -150,9 +150,9 @@ func (icmp *Icmp) ProcessICMPv6( id, seq := extractTrackingData(6, typ, &icmp6.BaseLayer) tuple := &icmpTuple{ IcmpVersion: 6, - SrcIp: pkt.Tuple.SrcIP, - DstIp: pkt.Tuple.DstIP, - Id: id, + SrcIP: pkt.Tuple.SrcIP, + DstIP: pkt.Tuple.DstIP, + ID: id, Seq: seq, } msg := &icmpMessage{ @@ -213,22 +213,22 @@ func (icmp *Icmp) processResponse(tuple *icmpTuple, msg *icmpMessage) { } func (icmp *Icmp) direction(t *icmpTransaction) uint8 { - if !icmp.isLocalIp(t.Tuple.SrcIp) { + if !icmp.isLocalIP(t.Tuple.SrcIP) { return directionFromOutside } - if !icmp.isLocalIp(t.Tuple.DstIp) { + if !icmp.isLocalIP(t.Tuple.DstIP) { return directionFromInside } return directionLocalOnly } -func (icmp *Icmp) isLocalIp(ip net.IP) bool { +func (icmp *Icmp) isLocalIP(ip net.IP) bool { if ip.IsLoopback() { return true } - for _, localIp := range icmp.localIps { - if ip.Equal(localIp) { + for _, localIP := range icmp.localIps { + if ip.Equal(localIP) { return true } } @@ -269,13 +269,13 @@ func (icmp *Icmp) publishTransaction(trans *icmpTransaction) { event := common.MapStr{} // common fields - group "env" - event["client_ip"] = trans.Tuple.SrcIp - event["ip"] = trans.Tuple.DstIp + event["client_ip"] = trans.Tuple.SrcIP + event["ip"] = trans.Tuple.DstIP // common fields - group "event" event["@timestamp"] = common.Time(trans.Ts) // timestamp of the first packet event["type"] = "icmp" // protocol name - event["path"] = trans.Tuple.DstIp // what is requested (dst ip) + event["path"] = trans.Tuple.DstIP // what is requested (dst ip) if trans.HasError() { event["status"] = common.ERROR_STATUS } else { diff --git a/packetbeat/protos/icmp/icmp_test.go b/packetbeat/protos/icmp/icmp_test.go index 7409d2e9442..def0ac6faae 100644 --- a/packetbeat/protos/icmp/icmp_test.go +++ b/packetbeat/protos/icmp/icmp_test.go @@ -22,21 +22,21 @@ import ( func TestIcmpIsLocalIp(t *testing.T) { icmp := Icmp{localIps: []net.IP{net.IPv4(192, 168, 0, 1), net.IPv4(192, 168, 0, 2)}} - assert.True(t, icmp.isLocalIp(net.IPv4(127, 0, 0, 1)), "loopback IP") - assert.True(t, icmp.isLocalIp(net.IPv4(192, 168, 0, 1)), "local IP") - assert.False(t, icmp.isLocalIp(net.IPv4(10, 0, 0, 1)), "remote IP") + assert.True(t, icmp.isLocalIP(net.IPv4(127, 0, 0, 1)), "loopback IP") + assert.True(t, icmp.isLocalIP(net.IPv4(192, 168, 0, 1)), "local IP") + assert.False(t, icmp.isLocalIP(net.IPv4(10, 0, 0, 1)), "remote IP") } func TestIcmpDirection(t *testing.T) { icmp := Icmp{} - trans1 := &icmpTransaction{Tuple: icmpTuple{SrcIp: net.IPv4(127, 0, 0, 1), DstIp: net.IPv4(127, 0, 0, 1)}} + trans1 := &icmpTransaction{Tuple: icmpTuple{SrcIP: net.IPv4(127, 0, 0, 1), DstIP: net.IPv4(127, 0, 0, 1)}} assert.Equal(t, uint8(directionLocalOnly), icmp.direction(trans1), "local communication") - trans2 := &icmpTransaction{Tuple: icmpTuple{SrcIp: net.IPv4(10, 0, 0, 1), DstIp: net.IPv4(127, 0, 0, 1)}} + trans2 := &icmpTransaction{Tuple: icmpTuple{SrcIP: net.IPv4(10, 0, 0, 1), DstIP: net.IPv4(127, 0, 0, 1)}} assert.Equal(t, uint8(directionFromOutside), icmp.direction(trans2), "client to server") - trans3 := &icmpTransaction{Tuple: icmpTuple{SrcIp: net.IPv4(127, 0, 0, 1), DstIp: net.IPv4(10, 0, 0, 1)}} + trans3 := &icmpTransaction{Tuple: icmpTuple{SrcIP: net.IPv4(127, 0, 0, 1), DstIP: net.IPv4(10, 0, 0, 1)}} assert.Equal(t, uint8(directionFromInside), icmp.direction(trans3), "server to client") } diff --git a/packetbeat/protos/icmp/message.go b/packetbeat/protos/icmp/message.go index fb69540ee70..4476eec09d7 100644 --- a/packetbeat/protos/icmp/message.go +++ b/packetbeat/protos/icmp/message.go @@ -104,18 +104,16 @@ func extractTrackingData(icmpVersion uint8, msgType uint8, baseLayer *layers.Bas id := binary.BigEndian.Uint16(baseLayer.Contents[4:6]) seq := binary.BigEndian.Uint16(baseLayer.Contents[6:8]) return id, seq - } else { - return 0, 0 } + return 0, 0 } if icmpVersion == 6 { if icmp6PairTypes[msgType] { id := binary.BigEndian.Uint16(baseLayer.Contents[4:6]) seq := binary.BigEndian.Uint16(baseLayer.Contents[6:8]) return id, seq - } else { - return 0, 0 } + return 0, 0 } logp.WTF("icmp", "Invalid ICMP version[%d]", icmpVersion) return 0, 0 diff --git a/packetbeat/protos/icmp/message_test.go b/packetbeat/protos/icmp/message_test.go index 36e13a79063..f07e04e0f1b 100644 --- a/packetbeat/protos/icmp/message_test.go +++ b/packetbeat/protos/icmp/message_test.go @@ -56,15 +56,15 @@ func TestIcmpMessageExtractTrackingDataICMPv4(t *testing.T) { baseLayer := &layers.BaseLayer{Contents: []byte{0x0, 0x0, 0x0, 0x0, 0xff, 0x1, 0x0, 0x2}} // pair type - actualId, actualSeq := extractTrackingData(4, layers.ICMPv4TypeEchoRequest, baseLayer) + actualID, actualSeq := extractTrackingData(4, layers.ICMPv4TypeEchoRequest, baseLayer) - assert.Equal(t, uint16(65281), actualId) + assert.Equal(t, uint16(65281), actualID) assert.Equal(t, uint16(2), actualSeq) // non-pair type - actualId, actualSeq = extractTrackingData(4, layers.ICMPv4TypeDestinationUnreachable, baseLayer) + actualID, actualSeq = extractTrackingData(4, layers.ICMPv4TypeDestinationUnreachable, baseLayer) - assert.Equal(t, uint16(0), actualId) + assert.Equal(t, uint16(0), actualID) assert.Equal(t, uint16(0), actualSeq) } @@ -72,15 +72,15 @@ func TestIcmpMessageExtractTrackingDataICMPv6(t *testing.T) { baseLayer := &layers.BaseLayer{Contents: []byte{0x0, 0x0, 0x0, 0x0, 0xff, 0x1, 0x0, 0x2}} // pair type - actualId, actualSeq := extractTrackingData(6, layers.ICMPv6TypeEchoRequest, baseLayer) + actualID, actualSeq := extractTrackingData(6, layers.ICMPv6TypeEchoRequest, baseLayer) - assert.Equal(t, uint16(65281), actualId) + assert.Equal(t, uint16(65281), actualID) assert.Equal(t, uint16(2), actualSeq) // non-pair type - actualId, actualSeq = extractTrackingData(6, layers.ICMPv6TypeDestinationUnreachable, baseLayer) + actualID, actualSeq = extractTrackingData(6, layers.ICMPv6TypeDestinationUnreachable, baseLayer) - assert.Equal(t, uint16(0), actualId) + assert.Equal(t, uint16(0), actualID) assert.Equal(t, uint16(0), actualSeq) } diff --git a/packetbeat/protos/icmp/transaction.go b/packetbeat/protos/icmp/transaction.go index de485397506..85c2a6265d2 100644 --- a/packetbeat/protos/icmp/transaction.go +++ b/packetbeat/protos/icmp/transaction.go @@ -21,7 +21,6 @@ func (t *icmpTransaction) HasError() bool { func (t *icmpTransaction) ResponseTimeMillis() (int32, bool) { if t.Request != nil && t.Response != nil { return int32(t.Response.Ts.Sub(t.Request.Ts).Nanoseconds() / 1e6), true - } else { - return 0, false } + return 0, false } diff --git a/packetbeat/protos/icmp/tuple.go b/packetbeat/protos/icmp/tuple.go index 83f2bc8f656..a9fed350fc2 100644 --- a/packetbeat/protos/icmp/tuple.go +++ b/packetbeat/protos/icmp/tuple.go @@ -16,31 +16,31 @@ type hashableIcmpTuple [maxIcmpTupleRawSize]byte type icmpTuple struct { IcmpVersion uint8 - SrcIp net.IP - DstIp net.IP - Id uint16 + SrcIP net.IP + DstIP net.IP + ID uint16 Seq uint16 } func (t *icmpTuple) Reverse() icmpTuple { return icmpTuple{ IcmpVersion: t.IcmpVersion, - SrcIp: t.DstIp, - DstIp: t.SrcIp, - Id: t.Id, + SrcIP: t.DstIP, + DstIP: t.SrcIP, + ID: t.ID, Seq: t.Seq, } } func (t *icmpTuple) Hashable() hashableIcmpTuple { var hash hashableIcmpTuple - copy(hash[0:16], t.SrcIp) - copy(hash[16:32], t.DstIp) - copy(hash[32:37], []byte{byte(t.Id >> 8), byte(t.Id), byte(t.Seq >> 8), byte(t.Seq), t.IcmpVersion}) + copy(hash[0:16], t.SrcIP) + copy(hash[16:32], t.DstIP) + copy(hash[32:37], []byte{byte(t.ID >> 8), byte(t.ID), byte(t.Seq >> 8), byte(t.Seq), t.IcmpVersion}) return hash } func (t *icmpTuple) String() string { return fmt.Sprintf("icmpTuple version[%d] src[%s] dst[%s] id[%d] seq[%d]", - t.IcmpVersion, t.SrcIp, t.DstIp, t.Id, t.Seq) + t.IcmpVersion, t.SrcIP, t.DstIP, t.ID, t.Seq) } diff --git a/packetbeat/protos/icmp/tuple_test.go b/packetbeat/protos/icmp/tuple_test.go index 03f10c7a7a7..75a402ea003 100644 --- a/packetbeat/protos/icmp/tuple_test.go +++ b/packetbeat/protos/icmp/tuple_test.go @@ -12,18 +12,18 @@ import ( func TestIcmpTupleReverse(t *testing.T) { tuple := icmpTuple{ IcmpVersion: 4, - SrcIp: net.IPv4(192, 168, 0, 1), - DstIp: net.IPv4(192, 168, 0, 2), - Id: 256, + SrcIP: net.IPv4(192, 168, 0, 1), + DstIP: net.IPv4(192, 168, 0, 2), + ID: 256, Seq: 1, } actualReverse := tuple.Reverse() expectedReverse := icmpTuple{ IcmpVersion: 4, - SrcIp: net.IPv4(192, 168, 0, 2), - DstIp: net.IPv4(192, 168, 0, 1), - Id: 256, + SrcIP: net.IPv4(192, 168, 0, 2), + DstIP: net.IPv4(192, 168, 0, 1), + ID: 256, Seq: 1, } @@ -33,9 +33,9 @@ func TestIcmpTupleReverse(t *testing.T) { func BenchmarkIcmpTupleReverse(b *testing.B) { tuple := icmpTuple{ IcmpVersion: 4, - SrcIp: net.IPv4(192, 168, 0, 1), - DstIp: net.IPv4(192, 168, 0, 2), - Id: 256, + SrcIP: net.IPv4(192, 168, 0, 1), + DstIP: net.IPv4(192, 168, 0, 2), + ID: 256, Seq: 1, } @@ -49,9 +49,9 @@ func BenchmarkIcmpTupleReverse(b *testing.B) { func TestIcmpTupleHashable(t *testing.T) { tuple := icmpTuple{ IcmpVersion: 4, - SrcIp: net.IPv4(192, 168, 0, 1), - DstIp: net.IPv4(192, 168, 0, 2), - Id: 256, + SrcIP: net.IPv4(192, 168, 0, 1), + DstIP: net.IPv4(192, 168, 0, 2), + ID: 256, Seq: 1, } @@ -69,9 +69,9 @@ func TestIcmpTupleHashable(t *testing.T) { func BenchmarkIcmpTupleHashable(b *testing.B) { tuple := icmpTuple{ IcmpVersion: 4, - SrcIp: net.IPv4(192, 168, 0, 1), - DstIp: net.IPv4(192, 168, 0, 2), - Id: 256, + SrcIP: net.IPv4(192, 168, 0, 1), + DstIP: net.IPv4(192, 168, 0, 2), + ID: 256, Seq: 1, } @@ -85,9 +85,9 @@ func BenchmarkIcmpTupleHashable(b *testing.B) { func TestIcmpTupleToString(t *testing.T) { tuple := icmpTuple{ IcmpVersion: 4, - SrcIp: net.IPv4(192, 168, 0, 1), - DstIp: net.IPv4(192, 168, 0, 2), - Id: 256, + SrcIP: net.IPv4(192, 168, 0, 1), + DstIP: net.IPv4(192, 168, 0, 2), + ID: 256, Seq: 1, } diff --git a/packetbeat/protos/memcache/binary.go b/packetbeat/protos/memcache/binary.go index 257d0cf56e9..deb297a84fd 100644 --- a/packetbeat/protos/memcache/binary.go +++ b/packetbeat/protos/memcache/binary.go @@ -370,7 +370,7 @@ func parseDataBinary(parser *parser, buf *streambuf.Buffer) parseResult { debug("found data message") if msg.bytesLost > 0 { - msg.count_values++ + msg.countValues++ } else { parser.appendMessageData(data) } @@ -453,8 +453,8 @@ func makeSerializeBinary( if msg.isCas { serializeCas(msg, event) } - if msg.count_values > 0 { - event["count_values"] = msg.count_values + if msg.countValues > 0 { + event["count_values"] = msg.countValues if len(msg.values) > 0 { event["values"] = msg.values } @@ -468,16 +468,16 @@ func makeSerializeBinary( event["quiet"] = msg.isQuiet event["vbucket"] = msg.vbucket return serializeArgs(msg, event, requestArgs) - } else { - status := memcacheStatusCode(msg.status) - event["status"] = status.String() - event["status_code"] = status + } - if typ == MemcacheCounterMsg { - event["value"] = msg.value - } - return serializeArgs(msg, event, responseArgs) + status := memcacheStatusCode(msg.status) + event["status"] = status.String() + event["status_code"] = status + + if typ == MemcacheCounterMsg { + event["value"] = msg.value } + return serializeArgs(msg, event, responseArgs) } } diff --git a/packetbeat/protos/memcache/config.go b/packetbeat/protos/memcache/config.go index edd6169a62f..cfb94f60ebe 100644 --- a/packetbeat/protos/memcache/config.go +++ b/packetbeat/protos/memcache/config.go @@ -11,7 +11,7 @@ type memcacheConfig struct { config.ProtocolCommon `config:",inline"` MaxValues int MaxBytesPerValue int - UdpTransactionTimeout time.Duration + UDPTransactionTimeout time.Duration ParseUnknown bool } @@ -21,6 +21,6 @@ var ( Ports: []int{11211}, TransactionTimeout: protos.DefaultTransactionExpiration, }, - UdpTransactionTimeout: protos.DefaultTransactionExpiration, + UDPTransactionTimeout: protos.DefaultTransactionExpiration, } ) diff --git a/packetbeat/protos/memcache/errors.go b/packetbeat/protos/memcache/errors.go index ce34227d80e..d8715b46d0b 100644 --- a/packetbeat/protos/memcache/errors.go +++ b/packetbeat/protos/memcache/errors.go @@ -29,7 +29,7 @@ var ( // memcache UDP errors var ( - ErrUdpIncompleteMessage = errors.New("attempt to parse incomplete message failed") + ErrUDPIncompleteMessage = errors.New("attempt to parse incomplete message failed") ) // memcache transaction/message errors diff --git a/packetbeat/protos/memcache/memcache.go b/packetbeat/protos/memcache/memcache.go index 3a69d23b7e6..7abc65e5dcc 100644 --- a/packetbeat/protos/memcache/memcache.go +++ b/packetbeat/protos/memcache/memcache.go @@ -59,19 +59,19 @@ type message struct { isQuiet bool // values - keys []memcacheString - flags uint32 - exptime uint32 - value uint64 - value2 uint64 - ivalue int64 - ivalue2 int64 - str memcacheString - data memcacheData - bytes uint - bytesLost uint - values []memcacheData - count_values uint32 + keys []memcacheString + flags uint32 + exptime uint32 + value uint64 + value2 uint64 + ivalue int64 + ivalue2 int64 + str memcacheString + data memcacheData + bytes uint + bytesLost uint + values []memcacheData + countValues uint32 stats []memcacheStat } @@ -157,11 +157,11 @@ func (mc *Memcache) setFromConfig(config *memcacheConfig) error { mc.config.parseUnkown = config.ParseUnknown - mc.udpConfig.transTimeout = config.UdpTransactionTimeout + mc.udpConfig.transTimeout = config.UDPTransactionTimeout mc.tcpConfig.tcpTransTimeout = config.TransactionTimeout debug("transaction timeout: %v", config.TransactionTimeout) - debug("udp transaction timeout: %v", config.UdpTransactionTimeout) + debug("udp transaction timeout: %v", config.UDPTransactionTimeout) debug("maxValues = %v", mc.config.maxValues) debug("maxBytesPerValue = %v", mc.config.maxBytesPerValue) @@ -209,9 +209,10 @@ func (m *message) SubEvent( if m == nil { return nil, nil } - msg_event := common.MapStr{} - event[name] = msg_event - return msg_event, m.Event(msg_event) + + msgEvent := common.MapStr{} + event[name] = msgEvent + return msgEvent, m.Event(msgEvent) } func tryMergeResponses(mc *Memcache, prev, msg *message) (bool, error) { @@ -236,17 +237,17 @@ func tryMergeResponses(mc *Memcache, prev, msg *message) (bool, error) { } return false, nil - } else { - // merge binary protocol stats messages - if prev.opcode != opcodeStat || msg.opcode != opcodeStat { - return false, nil - } - if prev.opaque != msg.opaque { - return false, nil - } + } - return mergeStatsMessages(mc, prev, msg) + // merge binary protocol stats messages + if prev.opcode != opcodeStat || msg.opcode != opcodeStat { + return false, nil } + if prev.opaque != msg.opaque { + return false, nil + } + + return mergeStatsMessages(mc, prev, msg) } func mergeValueMessages(mc *Memcache, prev, msg *message) (bool, error) { @@ -265,7 +266,7 @@ func mergeValueMessages(mc *Memcache, prev, msg *message) (bool, error) { prev.bytes += msg.bytes prev.keys = append(prev.keys, msg.keys...) prev.AddNotes(msg.Notes...) - prev.count_values += msg.count_values + prev.countValues += msg.countValues if msg.command.code == MemcacheResValue { delta := 0 if mc.config.maxValues < 0 { @@ -309,11 +310,11 @@ func checkResponseComplete(msg *message) bool { return true } return len(msg.keys) == 0 - } else { - cont := msg.command.code == MemcacheResValue || - msg.command.code == MemcacheResStat - return !cont } + + cont := msg.command.code == MemcacheResValue || + msg.command.code == MemcacheResStat + return !cont } func newTransaction(requ, resp *message) *transaction { diff --git a/packetbeat/protos/memcache/memcache_test.go b/packetbeat/protos/memcache/memcache_test.go index f76b3215f09..95a78a28c84 100644 --- a/packetbeat/protos/memcache/memcache_test.go +++ b/packetbeat/protos/memcache/memcache_test.go @@ -170,7 +170,7 @@ func Test_MergeTextValueResponsesNoLimits(t *testing.T) { msg := msg1 assert.Equal(t, "k1", msg.keys[0].String()) assert.Equal(t, "k2", msg.keys[1].String()) - assert.Equal(t, uint32(2), msg.count_values) + assert.Equal(t, uint32(2), msg.countValues) assert.Equal(t, "value1", msg.values[0].String()) assert.Equal(t, "value2", msg.values[1].String()) } @@ -197,7 +197,7 @@ func Test_MergeTextValueResponsesWithLimits(t *testing.T) { msg := msg1 assert.Equal(t, "k1", msg.keys[0].String()) assert.Equal(t, "k2", msg.keys[1].String()) - assert.Equal(t, uint32(2), msg.count_values) + assert.Equal(t, uint32(2), msg.countValues) assert.Equal(t, 1, len(msg.values)) assert.Equal(t, "value1", msg.values[0].String()) } diff --git a/packetbeat/protos/memcache/parse.go b/packetbeat/protos/memcache/parse.go index 01694c8de76..ade38d2c18f 100644 --- a/packetbeat/protos/memcache/parse.go +++ b/packetbeat/protos/memcache/parse.go @@ -145,7 +145,7 @@ func (p *parser) appendMessageData(data []byte) { } msg.values = append(msg.values, msg.data) } - msg.count_values++ + msg.countValues++ } func parseFailing(parser *parser, buf *streambuf.Buffer) parseResult { @@ -161,12 +161,11 @@ func doParseCommand(parser *parser, buf *streambuf.Buffer) parseResult { return parser.needMore() } magic := buf.Bytes()[0] - is_binary := magic == MemcacheMagicRequest || magic == MemcacheMagicResponse - if is_binary { + isBinary := magic == MemcacheMagicRequest || magic == MemcacheMagicResponse + if isBinary { return parser.contWith(buf, parseStateBinaryCommand) - } else { - return parser.contWith(buf, parseStateTextCommand) } + return parser.contWith(buf, parseStateTextCommand) } func argparseNoop(p *parser, h, b *streambuf.Buffer) error { diff --git a/packetbeat/protos/memcache/parse_test.go b/packetbeat/protos/memcache/parse_test.go index a3fcf989d64..b0ea5c83e90 100644 --- a/packetbeat/protos/memcache/parse_test.go +++ b/packetbeat/protos/memcache/parse_test.go @@ -179,7 +179,7 @@ func extras(es ...extraFn) []extraFn { func (b *offsetBinWriter) WriteNetUint8(u uint8) error { err := b.WriteNetUint8At(u, 0) - b.offset += 1 + b.offset++ return err } diff --git a/packetbeat/protos/memcache/plugin_tcp.go b/packetbeat/protos/memcache/plugin_tcp.go index 24ca39f5031..18d96c35417 100644 --- a/packetbeat/protos/memcache/plugin_tcp.go +++ b/packetbeat/protos/memcache/plugin_tcp.go @@ -98,7 +98,7 @@ func (mc *Memcache) Parse( func (mc *Memcache) newStream(tcptuple *common.TCPTuple) *stream { s := &stream{} s.parser.init(&mc.config) - s.Stream.Init(tcp.TCP_MAX_DATA_IN_STREAM) + s.Stream.Init(tcp.TCPMaxDataInStream) return s } @@ -173,14 +173,13 @@ func (mc *Memcache) onTCPMessage( msg *message, ) error { msg.Tuple = *tuple - msg.Transport = applayer.TransportTcp + msg.Transport = applayer.TransportTCP msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tuple) if msg.IsRequest { return mc.onTCPRequest(conn, tuple, dir, msg) - } else { - return mc.onTCPResponse(conn, tuple, dir, msg) } + return mc.onTCPResponse(conn, tuple, dir, msg) } func (mc *Memcache) onTCPRequest( @@ -189,7 +188,7 @@ func (mc *Memcache) onTCPRequest( dir uint8, msg *message, ) error { - requestSeenFirst := dir == tcp.TcpDirectionOriginal + requestSeenFirst := dir == tcp.TCPDirectionOriginal if requestSeenFirst { msg.Direction = applayer.NetOriginalDirection } else { @@ -217,7 +216,7 @@ func (mc *Memcache) onTCPResponse( dir uint8, msg *message, ) error { - requestSeenFirst := dir == tcp.TcpDirectionReverse + requestSeenFirst := dir == tcp.TCPDirectionReverse if requestSeenFirst { msg.Direction = applayer.NetOriginalDirection } else { diff --git a/packetbeat/protos/memcache/plugin_udp.go b/packetbeat/protos/memcache/plugin_udp.go index 80a3f9c3d7d..a4f007f3549 100644 --- a/packetbeat/protos/memcache/plugin_udp.go +++ b/packetbeat/protos/memcache/plugin_udp.go @@ -25,8 +25,8 @@ type udpConfig struct { transTimeout time.Duration } -type mcUdpHeader struct { - requestId uint16 +type mcUDPHeader struct { + requestID uint16 seqNumber uint16 numDatagrams uint16 } @@ -38,7 +38,7 @@ type udpConnection struct { } type udpTransaction struct { - requestId uint16 + requestID uint16 timer *time.Timer next *udpTransaction @@ -66,27 +66,27 @@ type udpMessage struct { datagrams [][]byte } -func (mc *Memcache) ParseUdp(pkt *protos.Packet) { +func (mc *Memcache) ParseUDP(pkt *protos.Packet) { defer logp.Recover("ParseMemcache(UDP) exception") buffer := streambuf.NewFixed(pkt.Payload) - header, err := parseUdpHeader(buffer) + header, err := parseUDPHeader(buffer) if err != nil { debug("parsing memcache udp header failed") return } debug("new udp datagram requestId=%v, seqNumber=%v, numDatagrams=%v", - header.requestId, header.seqNumber, header.numDatagrams) + header.requestID, header.seqNumber, header.numDatagrams) // find connection object based on ips and ports (forward->reverse connection) - connection, dir := mc.getUdpConnection(&pkt.Tuple) + connection, dir := mc.getUDPConnection(&pkt.Tuple) debug("udp connection: %p", connection) // get udp transaction combining forward/reverse direction 'streams' // for current requestId - trans := connection.udpTransactionForId(header.requestId) - debug("udp transaction (id=%v): %p", header.requestId, trans) + trans := connection.udpTransactionForID(header.requestID) + debug("udp transaction (id=%v): %p", header.requestID, trans) // Clean old transaction. We do the cleaning after potentially adding a new // transaction to the connection object, so connection object will not be @@ -111,7 +111,7 @@ func (mc *Memcache) ParseUdp(pkt *protos.Packet) { done := false if payload != nil { // parse memcached message - msg, err := parseUdp(&mc.config, pkt.Ts, payload) + msg, err := parseUDP(&mc.config, pkt.Ts, payload) if err != nil { logp.Warn("failed to parse memcached(UDP) message: %s", err) connection.killTransaction(trans) @@ -119,7 +119,7 @@ func (mc *Memcache) ParseUdp(pkt *protos.Packet) { } // apply memcached to transaction - done, err = mc.onUdpMessage(trans, &pkt.Tuple, dir, msg) + done, err = mc.onUDPMessage(trans, &pkt.Tuple, dir, msg) if err != nil { logp.Warn("error processing memcache message: %s", err) connection.killTransaction(trans) @@ -129,13 +129,13 @@ func (mc *Memcache) ParseUdp(pkt *protos.Packet) { if !done { trans.timer = time.AfterFunc(mc.udpConfig.transTimeout, func() { debug("transaction timeout -> forward") - mc.onUdpTrans(trans) + mc.onUDPTrans(trans) mc.udpExpTrans.push(trans) }) } } -func (mc *Memcache) getUdpConnection( +func (mc *Memcache) getUDPConnection( tuple *common.IPPortTuple, ) (*udpConnection, applayer.NetDirection) { connection := mc.udpConnections[tuple.Hashable()] @@ -147,12 +147,12 @@ func (mc *Memcache) getUdpConnection( return connection, applayer.NetReverseDirection } - connection = newUdpConnection(mc, tuple) + connection = newUDPConnection(mc, tuple) mc.udpConnections[tuple.Hashable()] = connection return connection, applayer.NetOriginalDirection } -func (mc *Memcache) onUdpMessage( +func (mc *Memcache) onUDPMessage( trans *udpTransaction, tuple *common.IPPortTuple, dir applayer.NetDirection, @@ -166,7 +166,7 @@ func (mc *Memcache) onUdpMessage( msg.Direction = applayer.NetReverseDirection } msg.Tuple = *tuple - msg.Transport = applayer.TransportUdp + msg.Transport = applayer.TransportUDP msg.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tuple) done := false @@ -185,20 +185,20 @@ func (mc *Memcache) onUdpMessage( done = done || (trans.request != nil && trans.response != nil) if done { - err = mc.onUdpTrans(trans) + err = mc.onUDPTrans(trans) trans.connection.killTransaction(trans) } return done, err } -func (mc *Memcache) onUdpTrans(udp *udpTransaction) error { +func (mc *Memcache) onUDPTrans(udp *udpTransaction) error { debug("received memcache(udp) transaction") trans := newTransaction(udp.request, udp.response) return mc.finishTransaction(trans) } -func newUdpConnection(mc *Memcache, tuple *common.IPPortTuple) *udpConnection { +func newUDPConnection(mc *Memcache, tuple *common.IPPortTuple) *udpConnection { c := &udpConnection{ tuple: *tuple, memcache: mc, @@ -207,8 +207,8 @@ func newUdpConnection(mc *Memcache, tuple *common.IPPortTuple) *udpConnection { return c } -func (c *udpConnection) udpTransactionForId(requestId uint16) *udpTransaction { - trans := c.transactions[requestId] +func (c *udpConnection) udpTransactionForID(requestID uint16) *udpTransaction { + trans := c.transactions[requestID] if trans != nil && trans.timer != nil { stopped := trans.timer.Stop() if !stopped { @@ -218,10 +218,10 @@ func (c *udpConnection) udpTransactionForId(requestId uint16) *udpTransaction { } if trans == nil { trans = &udpTransaction{ - requestId: requestId, + requestID: requestID, connection: c, } - c.transactions[requestId] = trans + c.transactions[requestID] = trans } else { trans.timer = nil } @@ -234,12 +234,12 @@ func (c *udpConnection) killTransaction(t *udpTransaction) { t.timer.Stop() } - if c.transactions[t.requestId] != t { + if c.transactions[t.requestID] != t { // transaction was already replaced return } - delete(c.transactions, t.requestId) + delete(c.transactions, t.requestID) if len(c.transactions) == 0 { delete(c.memcache.udpConnections, c.tuple.Hashable()) } @@ -261,18 +261,18 @@ func (lst *udpExpTransList) steal() *udpTransaction { } func (t *udpTransaction) udpMessageForDir( - header *mcUdpHeader, + header *mcUDPHeader, dir applayer.NetDirection, ) *udpMessage { udpMsg := t.messages[dir] if udpMsg == nil { - udpMsg = newUdpMessage(header) + udpMsg = newUDPMessage(header) t.messages[dir] = udpMsg } return udpMsg } -func newUdpMessage(header *mcUdpHeader) *udpMessage { +func newUDPMessage(header *mcUDPHeader) *udpMessage { udpMsg := &udpMessage{ numDatagrams: header.numDatagrams, count: 0, @@ -284,7 +284,7 @@ func newUdpMessage(header *mcUdpHeader) *udpMessage { } func (msg *udpMessage) addDatagram( - header *mcUdpHeader, + header *mcUDPHeader, data []byte, ) *streambuf.Buffer { if msg.isComplete { @@ -318,16 +318,16 @@ func (msg *udpMessage) addDatagram( return buffer } -func parseUdpHeader(buf *streambuf.Buffer) (mcUdpHeader, error) { - var h mcUdpHeader - h.requestId, _ = buf.ReadNetUint16() +func parseUDPHeader(buf *streambuf.Buffer) (mcUDPHeader, error) { + var h mcUDPHeader + h.requestID, _ = buf.ReadNetUint16() h.seqNumber, _ = buf.ReadNetUint16() h.numDatagrams, _ = buf.ReadNetUint16() buf.Advance(2) // ignore reserved return h, buf.Err() } -func parseUdp( +func parseUDP( config *parserConfig, ts time.Time, buf *streambuf.Buffer, @@ -335,7 +335,7 @@ func parseUdp( parser := newParser(config) msg, err := parser.parse(buf, ts) if err != nil && msg == nil { - err = ErrUdpIncompleteMessage + err = ErrUDPIncompleteMessage } return msg, err } diff --git a/packetbeat/protos/memcache/plugin_udp_test.go b/packetbeat/protos/memcache/plugin_udp_test.go index dc09689dd8f..a1082a9b4f5 100644 --- a/packetbeat/protos/memcache/plugin_udp_test.go +++ b/packetbeat/protos/memcache/plugin_udp_test.go @@ -10,21 +10,21 @@ import ( func Test_UdpDatagramAddOnCompleteMessage(t *testing.T) { msg := &udpMessage{isComplete: true} - buf := msg.addDatagram(&mcUdpHeader{}, []byte{1, 2, 3, 4}) + buf := msg.addDatagram(&mcUDPHeader{}, []byte{1, 2, 3, 4}) assert.Nil(t, buf) } func Test_UdpDatagramAddSingleDatagram(t *testing.T) { - hdr := &mcUdpHeader{requestId: 10, seqNumber: 0, numDatagrams: 1} - msg := newUdpMessage(hdr) + hdr := &mcUDPHeader{requestID: 10, seqNumber: 0, numDatagrams: 1} + msg := newUDPMessage(hdr) buf := msg.addDatagram(hdr, []byte{1, 2, 3, 4}) assert.Equal(t, 4, buf.Len()) assert.Equal(t, []byte{1, 2, 3, 4}, buf.Bytes()) } func Test_UdpDatagramMultiple(t *testing.T) { - hdr := &mcUdpHeader{requestId: 10, seqNumber: 0, numDatagrams: 4} - msg := newUdpMessage(hdr) + hdr := &mcUDPHeader{requestID: 10, seqNumber: 0, numDatagrams: 4} + msg := newUDPMessage(hdr) buf := msg.addDatagram(hdr, []byte{1, 2}) assert.Nil(t, buf) @@ -47,8 +47,8 @@ func Test_UdpDatagramMultiple(t *testing.T) { } func Test_UdpDatagramMultipleDups(t *testing.T) { - hdr := &mcUdpHeader{requestId: 10, seqNumber: 0, numDatagrams: 4} - msg := newUdpMessage(hdr) + hdr := &mcUDPHeader{requestID: 10, seqNumber: 0, numDatagrams: 4} + msg := newUDPMessage(hdr) buf := msg.addDatagram(hdr, []byte{1, 2}) assert.Nil(t, buf) diff --git a/packetbeat/protos/memcache/text.go b/packetbeat/protos/memcache/text.go index 5ad80bd26d3..0389c36584c 100644 --- a/packetbeat/protos/memcache/text.go +++ b/packetbeat/protos/memcache/text.go @@ -42,15 +42,15 @@ var argMultiKeys = argDef{ msg := parser.message rest := buf.Bytes() buf.Advance(len(rest)) - raw_keys := bytes.FieldsFunc(rest, func(b rune) bool { + rawKeys := bytes.FieldsFunc(rest, func(b rune) bool { return b == ' ' }) - if len(raw_keys) == 0 { + if len(rawKeys) == 0 { return ErrExpectedKeys } - msg.keys = make([]memcacheString, len(raw_keys)) - for i, raw_key := range raw_keys { - msg.keys[i] = memcacheString{raw_key} + msg.keys = make([]memcacheString, len(rawKeys)) + for i, rawKey := range rawKeys { + msg.keys[i] = memcacheString{rawKey} } return nil }, @@ -262,10 +262,10 @@ func defTextMessage( } func makeDefTextDataMessage( - is_request bool, + isRequest bool, ) func(string, commandTypeCode, commandCode, ...argDef) textCommandType { serialize := serializeDataResponse - if is_request { + if isRequest { serialize = serializeDataRequest } return func( @@ -504,7 +504,7 @@ func parseData(parser *parser, buf *streambuf.Buffer) parseResult { debug("found message data") if msg.bytesLost > 0 { - msg.count_values++ + msg.countValues++ } else { parser.appendMessageData(data) } @@ -526,8 +526,7 @@ func parseStatLine(parser *parser, hdr, buf *streambuf.Buffer) error { return nil } -func parseTextArgs(parser *parser, args []argDef) error { - var err error = nil +func parseTextArgs(parser *parser, args []argDef) (err error) { buf := streambuf.NewFixed(parser.message.rawArgs) for _, arg := range args { debug("args rest: %s", buf.Bytes()) @@ -536,21 +535,21 @@ func parseTextArgs(parser *parser, args []argDef) error { break } } - return err + return } func splitCommandAndArgs(line []byte) ([]byte, []byte, error) { - command_line := streambuf.NewFixed(line) - command, err := parseStringArg(command_line) + commandLine := streambuf.NewFixed(line) + command, err := parseStringArg(commandLine) if err != nil { return nil, nil, err } var args []byte - if command_line.Len() > 0 { - command_line.Advance(1) - args = command_line.Bytes() + if commandLine.Len() > 0 { + commandLine.Advance(1) + args = commandLine.Bytes() } - return command, args, command_line.Err() + return command, args, commandLine.Err() } func parseStringArg(buf *streambuf.Buffer) ([]byte, error) { @@ -684,12 +683,12 @@ func serializeDataRequest( args ...argDef, ) eventFn { command := code.String() - event_type := typ.String() + eventType := typ.String() return func(msg *message, event common.MapStr) error { event["command"] = command - event["type"] = event_type - event["count_values"] = msg.count_values - if msg.count_values != 0 && msg.data.IsSet() { + event["type"] = eventType + event["count_values"] = msg.countValues + if msg.countValues != 0 && msg.data.IsSet() { event["values"] = msg.data } return serializeArgs(msg, event, args) @@ -702,12 +701,12 @@ func serializeDataResponse( args ...argDef, ) eventFn { response := code.String() - event_type := typ.String() + eventType := typ.String() return func(msg *message, event common.MapStr) error { event["command"] = response - event["type"] = event_type - event["count_values"] = msg.count_values - if msg.count_values != 0 && len(msg.values) > 0 { + event["type"] = eventType + event["count_values"] = msg.countValues + if msg.countValues != 0 && len(msg.values) > 0 { event["values"] = msg.values } return serializeArgs(msg, event, args) diff --git a/packetbeat/protos/mongodb/mongodb.go b/packetbeat/protos/mongodb/mongodb.go index 5b79608fa66..a00d94d5861 100644 --- a/packetbeat/protos/mongodb/mongodb.go +++ b/packetbeat/protos/mongodb/mongodb.go @@ -146,7 +146,7 @@ func (mongodb *Mongodb) doParse( } else { // concatenate bytes st.data = append(st.data, pkt.Payload...) - if len(st.data) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(st.data) > tcp.TCPMaxDataInStream { debugf("Stream data too large, dropping TCP stream") conn.Streams[dir] = nil return conn @@ -198,7 +198,7 @@ func (mongodb *Mongodb) handleMongodb( dir uint8, ) { - m.TcpTuple = *tcptuple + m.TCPTuple = *tcptuple m.Direction = dir m.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcptuple.IPPort()) @@ -218,8 +218,8 @@ func (mongodb *Mongodb) onRequest(conn *mongodbConnectionData, msg *mongodbMessa return } - id := msg.requestId - key := transactionKey{tcp: msg.TcpTuple.Hashable(), id: id} + id := msg.requestID + key := transactionKey{tcp: msg.TCPTuple.Hashable(), id: id} // try to find matching response potentially inserted before if v := mongodb.responses.Delete(key); v != nil { @@ -238,7 +238,7 @@ func (mongodb *Mongodb) onRequest(conn *mongodbConnectionData, msg *mongodbMessa func (mongodb *Mongodb) onResponse(conn *mongodbConnectionData, msg *mongodbMessage) { id := msg.responseTo - key := transactionKey{tcp: msg.TcpTuple.Hashable(), id: id} + key := transactionKey{tcp: msg.TCPTuple.Hashable(), id: id} // try to find matching request if v := mongodb.requests.Delete(key); v != nil { @@ -263,7 +263,7 @@ func newTransaction(requ, resp *mongodbMessage) *transaction { // fill request if requ != nil { - trans.tuple = requ.TcpTuple + trans.tuple = requ.TCPTuple trans.Mongodb = common.MapStr{} trans.event = requ.event @@ -274,16 +274,16 @@ func newTransaction(requ, resp *mongodbMessage) *transaction { trans.Ts = int64(trans.ts.UnixNano() / 1000) // transactions have microseconds resolution trans.JsTs = requ.Ts trans.Src = common.Endpoint{ - IP: requ.TcpTuple.SrcIP.String(), - Port: requ.TcpTuple.SrcPort, + IP: requ.TCPTuple.SrcIP.String(), + Port: requ.TCPTuple.SrcPort, Proc: string(requ.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: requ.TcpTuple.DstIP.String(), - Port: requ.TcpTuple.DstPort, + IP: requ.TCPTuple.DstIP.String(), + Port: requ.TCPTuple.DstPort, Proc: string(requ.CmdlineTuple.Dst), } - if requ.Direction == tcp.TcpDirectionReverse { + if requ.Direction == tcp.TCPDirectionReverse { trans.Src, trans.Dst = trans.Dst, trans.Src } trans.params = requ.params @@ -295,7 +295,7 @@ func newTransaction(requ, resp *mongodbMessage) *transaction { if resp != nil { if requ == nil { // TODO: reverse tuple? - trans.tuple = resp.TcpTuple + trans.tuple = resp.TCPTuple } for k, v := range resp.event { @@ -323,7 +323,7 @@ func (mongodb *Mongodb) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, return private } -func copy_map_without_key(d map[string]interface{}, key string) map[string]interface{} { +func copyMapWithoutKey(d map[string]interface{}, key string) map[string]interface{} { res := map[string]interface{}{} for k, v := range d { if k != key { @@ -342,11 +342,11 @@ func reconstructQuery(t *transaction, full bool) (query string) { // remove the actual data. // TODO: review if we need to add other commands here if t.method == "insert" { - params, err = doc2str(copy_map_without_key(t.params, "documents")) + params, err = doc2str(copyMapWithoutKey(t.params, "documents")) } else if t.method == "update" { - params, err = doc2str(copy_map_without_key(t.params, "updates")) + params, err = doc2str(copyMapWithoutKey(t.params, "updates")) } else if t.method == "findandmodify" { - params, err = doc2str(copy_map_without_key(t.params, "update")) + params, err = doc2str(copyMapWithoutKey(t.params, "update")) } } else { params, err = doc2str(t.params) diff --git a/packetbeat/protos/mongodb/mongodb_parser.go b/packetbeat/protos/mongodb/mongodb_parser.go index f791279caac..056ace10650 100644 --- a/packetbeat/protos/mongodb/mongodb_parser.go +++ b/packetbeat/protos/mongodb/mongodb_parser.go @@ -32,7 +32,7 @@ func mongodbMessageParser(s *stream) (bool, bool) { // see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#standard-message-header s.message.messageLength = length - s.message.requestId, _ = d.readInt32() + s.message.requestID, _ = d.readInt32() s.message.responseTo, _ = d.readInt32() code, _ := d.readInt32() @@ -162,13 +162,13 @@ func opInsertParse(d *decoder, m *mongodbMessage) (bool, bool) { return true, true } -func extract_documents(query map[string]interface{}) []interface{} { - docs_vi, present := query["documents"] +func extractDocuments(query map[string]interface{}) []interface{} { + docsVi, present := query["documents"] if !present { return []interface{}{} } - docs, ok := docs_vi.([]interface{}) + docs, ok := docsVi.([]interface{}) if !ok { return []interface{}{} } diff --git a/packetbeat/protos/mongodb/mongodb_parser_test.go b/packetbeat/protos/mongodb/mongodb_parser_test.go index ac1479b3897..c93b767a648 100644 --- a/packetbeat/protos/mongodb/mongodb_parser_test.go +++ b/packetbeat/protos/mongodb/mongodb_parser_test.go @@ -119,7 +119,7 @@ func Test_extract_documents(t *testing.T) { } for _, test := range tests { - assert.Equal(t, test.Output, extract_documents(test.Input)) + assert.Equal(t, test.Output, extractDocuments(test.Input)) } } diff --git a/packetbeat/protos/mongodb/mongodb_structs.go b/packetbeat/protos/mongodb/mongodb_structs.go index 210275878ff..da0bad81b48 100644 --- a/packetbeat/protos/mongodb/mongodb_structs.go +++ b/packetbeat/protos/mongodb/mongodb_structs.go @@ -10,7 +10,7 @@ import ( type mongodbMessage struct { Ts time.Time - TcpTuple common.TCPTuple + TCPTuple common.TCPTuple CmdlineTuple *common.CmdlineTuple Direction uint8 @@ -20,7 +20,7 @@ type mongodbMessage struct { // Standard message header fields from mongodb wire protocol // see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#standard-message-header messageLength int - requestId int + requestID int responseTo int opCode opCode diff --git a/packetbeat/protos/mongodb/mongodb_test.go b/packetbeat/protos/mongodb/mongodb_test.go index 261f26566f2..2aa2f0d89b3 100644 --- a/packetbeat/protos/mongodb/mongodb_test.go +++ b/packetbeat/protos/mongodb/mongodb_test.go @@ -25,7 +25,7 @@ func MongodbModForTests() *Mongodb { } // Helper function that returns an example TcpTuple -func testTcpTuple() *common.TCPTuple { +func testTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, SrcIP: net.IPv4(192, 168, 0, 1), DstIP: net.IPv4(192, 168, 0, 2), @@ -57,13 +57,13 @@ func TestSimpleFindLimit1(t *testing.T) { mongodb := MongodbModForTests() // request and response from tests/pcaps/mongo_one_row.pcap - req_data, err := hex.DecodeString( + reqData, err := hex.DecodeString( "320000000a000000ffffffffd4070000" + "00000000746573742e72667374617572" + "616e7473000000000001000000050000" + "0000") assert.Nil(t, err) - resp_data, err := hex.DecodeString( + respData, err := hex.DecodeString( "020200004a0000000a00000001000000" + "08000000000000000000000000000000" + "01000000de010000075f696400558beb" + @@ -99,9 +99,9 @@ func TestSimpleFindLimit1(t *testing.T) { "0000") assert.Nil(t, err) - tcptuple := testTcpTuple() - req := protos.Packet{Payload: req_data} - resp := protos.Packet{Payload: resp_data} + tcptuple := testTCPTuple() + req := protos.Packet{Payload: reqData} + resp := protos.Packet{Payload: respData} private := protos.ProtocolData(new(mongodbConnectionData)) @@ -128,13 +128,13 @@ func TestSimpleFindLimit1_split(t *testing.T) { mongodb.SendResponse = true // request and response from tests/pcaps/mongo_one_row.pcap - req_data, err := hex.DecodeString( + reqData, err := hex.DecodeString( "320000000a000000ffffffffd4070000" + "00000000746573742e72667374617572" + "616e7473000000000001000000050000" + "0000") assert.Nil(t, err) - resp_data1, err := hex.DecodeString( + respData1, err := hex.DecodeString( "020200004a0000000a00000001000000" + "08000000000000000000000000000000" + "01000000de010000075f696400558beb" + @@ -145,7 +145,7 @@ func TestSimpleFindLimit1_split(t *testing.T) { "3100d5b14ae9996c4440000273747265" + "657400100000004d6f72726973205061") - resp_data2, err := hex.DecodeString( + respData2, err := hex.DecodeString( "726b2041766500027a6970636f646500" + "060000003130343632000002626f726f" + "756768000600000042726f6e78000263" + @@ -158,7 +158,7 @@ func TestSimpleFindLimit1_split(t *testing.T) { "00026772616465000200000041001073" + "636f72650006000000000332002b0000") - resp_data3, err := hex.DecodeString( + respData3, err := hex.DecodeString( "00096461746500009cda693c01000002" + "6772616465000200000041001073636f" + "7265000a000000000333002b00000009" + @@ -174,20 +174,20 @@ func TestSimpleFindLimit1_split(t *testing.T) { "0000") assert.Nil(t, err) - tcptuple := testTcpTuple() - req := protos.Packet{Payload: req_data} + tcptuple := testTCPTuple() + req := protos.Packet{Payload: reqData} private := protos.ProtocolData(new(mongodbConnectionData)) private = mongodb.Parse(&req, tcptuple, 0, private) - resp1 := protos.Packet{Payload: resp_data1} + resp1 := protos.Packet{Payload: respData1} private = mongodb.Parse(&resp1, tcptuple, 1, private) - resp2 := protos.Packet{Payload: resp_data2} + resp2 := protos.Packet{Payload: respData2} private = mongodb.Parse(&resp2, tcptuple, 1, private) - resp3 := protos.Packet{Payload: resp_data3} + resp3 := protos.Packet{Payload: respData3} mongodb.Parse(&resp3, tcptuple, 1, private) trans := expectTransaction(t, mongodb) diff --git a/packetbeat/protos/mysql/mysql.go b/packetbeat/protos/mysql/mysql.go index 06d9ac69790..8f9f9e3f398 100644 --- a/packetbeat/protos/mysql/mysql.go +++ b/packetbeat/protos/mysql/mysql.go @@ -18,10 +18,10 @@ import ( // Packet types const ( - MYSQL_CMD_QUERY = 3 + MysqlCmdQuery = 3 ) -const MAX_PAYLOAD_SIZE = 100 * 1024 +const MaxPayloadSize = 100 * 1024 var ( unmatchedRequests = expvar.NewInt("mysql.unmatched_requests") @@ -45,7 +45,7 @@ type MysqlMessage struct { Tables string IsOK bool AffectedRows uint64 - InsertId uint64 + InsertID uint64 IsError bool ErrorCode uint16 ErrorInfo string @@ -54,7 +54,7 @@ type MysqlMessage struct { Direction uint8 IsTruncated bool - TcpTuple common.TCPTuple + TCPTuple common.TCPTuple CmdlineTuple *common.CmdlineTuple Raw []byte Notes []string @@ -78,8 +78,8 @@ type MysqlTransaction struct { Mysql common.MapStr - Request_raw string - Response_raw string + RequestRaw string + ResponseRaw string } type MysqlStream struct { @@ -105,7 +105,7 @@ const ( MysqlStateMax ) -var stateStrings []string = []string{ +var stateStrings = []string{ "Start", "EatMessage", "EatFields", @@ -119,11 +119,11 @@ func (state parseState) String() string { type Mysql struct { // config - Ports []int - maxStoreRows int - maxRowLength int - Send_request bool - Send_response bool + Ports []int + maxStoreRows int + maxRowLength int + SendRequest bool + SendResponse bool transactions *common.Cache transactionTimeout time.Duration @@ -175,8 +175,8 @@ func (mysql *Mysql) setFromConfig(config *mysqlConfig) { mysql.Ports = config.Ports mysql.maxRowLength = config.MaxRowLength mysql.maxStoreRows = config.MaxRows - mysql.Send_request = config.SendRequest - mysql.Send_response = config.SendResponse + mysql.SendRequest = config.SendRequest + mysql.SendResponse = config.SendResponse mysql.transactionTimeout = config.TransactionTimeout } @@ -223,7 +223,7 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { if m.Seq == 0 { // starts Command Phase - if m.Typ == MYSQL_CMD_QUERY { + if m.Typ == MysqlCmdQuery { // parse request m.IsRequest = true m.start = s.parseOffset @@ -270,54 +270,54 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { logp.Debug("mysql", "Unexpected MySQL message of type %d received.", m.Typ) return false, false } - break case mysqlStateEatMessage: - if len(s.data[s.parseOffset:]) >= int(m.PacketLength)+4 { - s.parseOffset += 4 //header - s.parseOffset += int(m.PacketLength) - m.end = s.parseOffset - if m.IsRequest { - m.Query = string(s.data[m.start+5 : m.end]) - } else if m.IsOK { - // affected rows - affectedRows, off, complete, err := read_linteger(s.data, m.start+5) - if !complete { - return true, false - } - if err != nil { - logp.Debug("mysql", "Error on read_linteger: %s", err) - return false, false - } - m.AffectedRows = affectedRows + if len(s.data[s.parseOffset:]) < int(m.PacketLength)+4 { + // wait for more data + return true, false + } - // last insert id - insertId, _, complete, err := read_linteger(s.data, off) - if !complete { - return true, false - } - if err != nil { - logp.Debug("mysql", "Error on read_linteger: %s", err) - return false, false - } - m.InsertId = insertId - } else if m.IsError { - // int<1>header (0xff) - // int<2>error code - // string[1] sql state marker - // string[5] sql state - // string error message - m.ErrorCode = uint16(s.data[m.start+6])<<8 | uint16(s.data[m.start+5]) - - m.ErrorInfo = string(s.data[m.start+8:m.start+13]) + ": " + string(s.data[m.start+13:]) + s.parseOffset += 4 //header + s.parseOffset += int(m.PacketLength) + m.end = s.parseOffset + if m.IsRequest { + m.Query = string(s.data[m.start+5 : m.end]) + } else if m.IsOK { + // affected rows + affectedRows, off, complete, err := readLinteger(s.data, m.start+5) + if !complete { + return true, false } - m.Size = uint64(m.end - m.start) - logp.Debug("mysqldetailed", "Message complete. remaining=%d", len(s.data[s.parseOffset:])) - return true, true - } else { - // wait for more - return true, false + if err != nil { + logp.Debug("mysql", "Error on read_linteger: %s", err) + return false, false + } + m.AffectedRows = affectedRows + + // last insert id + insertID, _, complete, err := readLinteger(s.data, off) + if !complete { + return true, false + } + if err != nil { + logp.Debug("mysql", "Error on read_linteger: %s", err) + return false, false + } + m.InsertID = insertID + } else if m.IsError { + // int<1>header (0xff) + // int<2>error code + // string[1] sql state marker + // string[5] sql state + // string error message + m.ErrorCode = uint16(s.data[m.start+6])<<8 | uint16(s.data[m.start+5]) + + m.ErrorInfo = string(s.data[m.start+8:m.start+13]) + ": " + string(s.data[m.start+13:]) } + m.Size = uint64(m.end - m.start) + logp.Debug("mysqldetailed", "Message complete. remaining=%d", + len(s.data[s.parseOffset:])) + return true, true case mysqlStateEatFields: if len(s.data[s.parseOffset:]) < 4 { @@ -340,7 +340,7 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { s.parseState = mysqlStateEatRows } else { - _ /* catalog */, off, complete, err := read_lstring(s.data, s.parseOffset) + _ /* catalog */, off, complete, err := readLstring(s.data, s.parseOffset) if !complete { return true, false } @@ -348,7 +348,7 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { logp.Debug("mysql", "Error on read_lstring: %s", err) return false, false } - db /*schema */, off, complete, err := read_lstring(s.data, off) + db /*schema */, off, complete, err := readLstring(s.data, off) if !complete { return true, false } @@ -356,7 +356,7 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { logp.Debug("mysql", "Error on read_lstring: %s", err) return false, false } - table /* table */, _ /*off*/, complete, err := read_lstring(s.data, off) + table /* table */, _ /*off*/, complete, err := readLstring(s.data, off) if !complete { return true, false } @@ -365,12 +365,12 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { return false, false } - db_table := string(db) + "." + string(table) + dbTable := string(db) + "." + string(table) if len(m.Tables) == 0 { - m.Tables = db_table - } else if !strings.Contains(m.Tables, db_table) { - m.Tables = m.Tables + ", " + db_table + m.Tables = dbTable + } else if !strings.Contains(m.Tables, dbTable) { + m.Tables = m.Tables + ", " + dbTable } logp.Debug("mysqldetailed", "db=%s, table=%s", db, table) s.parseOffset += int(m.PacketLength) @@ -380,7 +380,6 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { // wait for more return true, false } - break case mysqlStateEatRows: if len(s.data[s.parseOffset:]) < 4 { @@ -393,40 +392,38 @@ func mysqlMessageParser(s *MysqlStream) (bool, bool) { logp.Debug("mysqldetailed", "Rows: packet length %d, packet number %d", m.PacketLength, m.Seq) - if len(s.data[s.parseOffset:]) >= int(m.PacketLength)+4 { - s.parseOffset += 4 //header + if len(s.data[s.parseOffset:]) < int(m.PacketLength)+4 { + // wait for more + return true, false + } - if uint8(s.data[s.parseOffset]) == 0xfe { - logp.Debug("mysqldetailed", "Received EOF packet") - // EOF marker - s.parseOffset += int(m.PacketLength) + s.parseOffset += 4 //header - if m.end == 0 { - m.end = s.parseOffset - } else { - m.IsTruncated = true - } - if !m.IsError { - // in case the response was sent successfully - m.IsOK = true - } - m.Size = uint64(m.end - m.start) - return true, true + if uint8(s.data[s.parseOffset]) == 0xfe { + logp.Debug("mysqldetailed", "Received EOF packet") + // EOF marker + s.parseOffset += int(m.PacketLength) + + if m.end == 0 { + m.end = s.parseOffset } else { - s.parseOffset += int(m.PacketLength) - if m.end == 0 && s.parseOffset > MAX_PAYLOAD_SIZE { - // only send up to here, but read until the end - m.end = s.parseOffset - } - m.NumberOfRows += 1 - // go to next row + m.IsTruncated = true } - } else { - // wait for more - return true, false + if !m.IsError { + // in case the response was sent successfully + m.IsOK = true + } + m.Size = uint64(m.end - m.start) + return true, true } - break + s.parseOffset += int(m.PacketLength) + if m.end == 0 && s.parseOffset > MaxPayloadSize { + // only send up to here, but read until the end + m.end = s.parseOffset + } + m.NumberOfRows++ + // go to next row } } @@ -501,7 +498,7 @@ func (mysql *Mysql) Parse(pkt *protos.Packet, tcptuple *common.TCPTuple, } else { // concatenate bytes priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...) - if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(priv.Data[dir].data) > tcp.TCPMaxDataInStream { logp.Debug("mysql", "Stream data too large, dropping TCP stream") priv.Data[dir] = nil return priv @@ -572,12 +569,12 @@ func (mysql *Mysql) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, } func handleMysql(mysql *Mysql, m *MysqlMessage, tcptuple *common.TCPTuple, - dir uint8, raw_msg []byte) { + dir uint8, rawMsg []byte) { - m.TcpTuple = *tcptuple + m.TCPTuple = *tcptuple m.Direction = dir m.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcptuple.IPPort()) - m.Raw = raw_msg + m.Raw = rawMsg if m.IsRequest { mysql.receivedMysqlRequest(m) @@ -587,7 +584,7 @@ func handleMysql(mysql *Mysql, m *MysqlMessage, tcptuple *common.TCPTuple, } func (mysql *Mysql) receivedMysqlRequest(msg *MysqlMessage) { - tuple := msg.TcpTuple + tuple := msg.TCPTuple trans := mysql.getTransaction(tuple.Hashable()) if trans != nil { if trans.Mysql != nil { @@ -603,16 +600,16 @@ func (mysql *Mysql) receivedMysqlRequest(msg *MysqlMessage) { trans.Ts = int64(trans.ts.UnixNano() / 1000) // transactions have microseconds resolution trans.JsTs = msg.Ts trans.Src = common.Endpoint{ - IP: msg.TcpTuple.SrcIP.String(), - Port: msg.TcpTuple.SrcPort, + IP: msg.TCPTuple.SrcIP.String(), + Port: msg.TCPTuple.SrcPort, Proc: string(msg.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: msg.TcpTuple.DstIP.String(), - Port: msg.TcpTuple.DstPort, + IP: msg.TCPTuple.DstIP.String(), + Port: msg.TCPTuple.DstPort, Proc: string(msg.CmdlineTuple.Dst), } - if msg.Direction == tcp.TcpDirectionReverse { + if msg.Direction == tcp.TCPDirectionReverse { trans.Src, trans.Dst = trans.Dst, trans.Src } @@ -635,12 +632,12 @@ func (mysql *Mysql) receivedMysqlRequest(msg *MysqlMessage) { trans.Notes = msg.Notes // save Raw message - trans.Request_raw = msg.Query + trans.RequestRaw = msg.Query trans.BytesIn = msg.Size } func (mysql *Mysql) receivedMysqlResponse(msg *MysqlMessage) { - trans := mysql.getTransaction(msg.TcpTuple.Hashable()) + trans := mysql.getTransaction(msg.TCPTuple.Hashable()) if trans == nil { logp.Debug("mysql", "Response from unknown transaction. Ignoring.") unmatchedResponses.Add(1) @@ -656,7 +653,7 @@ func (mysql *Mysql) receivedMysqlResponse(msg *MysqlMessage) { // save json details trans.Mysql.Update(common.MapStr{ "affected_rows": msg.AffectedRows, - "insert_id": msg.InsertId, + "insert_id": msg.InsertID, "num_rows": msg.NumberOfRows, "num_fields": msg.NumberOfFields, "iserror": msg.IsError, @@ -672,7 +669,7 @@ func (mysql *Mysql) receivedMysqlResponse(msg *MysqlMessage) { if len(msg.Raw) > 0 { fields, rows := mysql.parseMysqlResponse(msg.Raw) - trans.Response_raw = common.DumpInCSVFormat(fields, rows) + trans.ResponseRaw = common.DumpInCSVFormat(fields, rows) } trans.Notes = append(trans.Notes, msg.Notes...) @@ -681,12 +678,12 @@ func (mysql *Mysql) receivedMysqlResponse(msg *MysqlMessage) { mysql.transactions.Delete(trans.tuple.Hashable()) logp.Debug("mysql", "Mysql transaction completed: %s", trans.Mysql) - logp.Debug("mysql", "%s", trans.Response_raw) + logp.Debug("mysql", "%s", trans.ResponseRaw) } func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { - length, err := read_length(data, 0) + length, err := readLength(data, 0) if err != nil { logp.Warn("Invalid response: %v", err) return []string{}, [][]string{} @@ -715,7 +712,7 @@ func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { // Read fields for { - length, err = read_length(data, offset) + length, err = readLength(data, offset) if err != nil { logp.Warn("Invalid response: %v", err) return []string{}, [][]string{} @@ -732,32 +729,32 @@ func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { break } - _ /* catalog */, off, complete, err := read_lstring(data, offset+4) + _ /* catalog */, off, complete, err := readLstring(data, offset+4) if err != nil || !complete { logp.Debug("mysql", "Reading field: %v %v", err, complete) return fields, rows } - _ /*database*/, off, complete, err = read_lstring(data, off) + _ /*database*/, off, complete, err = readLstring(data, off) if err != nil || !complete { logp.Debug("mysql", "Reading field: %v %v", err, complete) return fields, rows } - _ /*table*/, off, complete, err = read_lstring(data, off) + _ /*table*/, off, complete, err = readLstring(data, off) if err != nil || !complete { logp.Debug("mysql", "Reading field: %v %v", err, complete) return fields, rows } - _ /*org table*/, off, complete, err = read_lstring(data, off) + _ /*org table*/, off, complete, err = readLstring(data, off) if err != nil || !complete { logp.Debug("mysql", "Reading field: %v %v", err, complete) return fields, rows } - name, off, complete, err := read_lstring(data, off) + name, off, complete, err := readLstring(data, off) if err != nil || !complete { logp.Debug("mysql", "Reading field: %v %v", err, complete) return fields, rows } - _ /* org name */, _ /*off*/, complete, err = read_lstring(data, off) + _ /* org name */, _ /*off*/, complete, err = readLstring(data, off) if err != nil || !complete { logp.Debug("mysql", "Reading field: %v %v", err, complete) return fields, rows @@ -775,7 +772,7 @@ func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { // Read rows for offset < len(data) { var row []string - var row_len int + var rowLen int if len(data[offset:]) < 5 { logp.Warn("Invalid response.") @@ -788,7 +785,7 @@ func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { break } - length, err = read_length(data, offset) + length, err = readLength(data, offset) if err != nil { logp.Warn("Invalid response: %v", err) break @@ -804,7 +801,7 @@ func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { } else { var err error var complete bool - text, off, complete, err = read_lstring(data, off) + text, off, complete, err = readLstring(data, off) if err != nil || !complete { logp.Debug("mysql", "Error parsing rows: %s %b", err, complete) // nevertheless, return what we have so far @@ -812,12 +809,12 @@ func (mysql *Mysql) parseMysqlResponse(data []byte) ([]string, [][]string) { } } - if row_len < mysql.maxRowLength { - if row_len+len(text) > mysql.maxRowLength { - text = text[:mysql.maxRowLength-row_len] + if rowLen < mysql.maxRowLength { + if rowLen+len(text) > mysql.maxRowLength { + text = text[:mysql.maxRowLength-rowLen] } row = append(row, string(text)) - row_len += len(text) + rowLen += len(text) } } @@ -852,11 +849,11 @@ func (mysql *Mysql) publishTransaction(t *MysqlTransaction) { } event["responsetime"] = t.ResponseTime - if mysql.Send_request { - event["request"] = t.Request_raw + if mysql.SendRequest { + event["request"] = t.RequestRaw } - if mysql.Send_response { - event["response"] = t.Response_raw + if mysql.SendResponse { + event["response"] = t.ResponseRaw } event["method"] = t.Method event["query"] = t.Query @@ -876,8 +873,8 @@ func (mysql *Mysql) publishTransaction(t *MysqlTransaction) { mysql.results.PublishTransaction(event) } -func read_lstring(data []byte, offset int) ([]byte, int, bool, error) { - length, off, complete, err := read_linteger(data, offset) +func readLstring(data []byte, offset int) ([]byte, int, bool, error) { + length, off, complete, err := readLinteger(data, offset) if err != nil { return nil, 0, false, err } @@ -887,7 +884,7 @@ func read_lstring(data []byte, offset int) ([]byte, int, bool, error) { return data[off : off+int(length)], off + int(length), true, nil } -func read_linteger(data []byte, offset int) (uint64, int, bool, error) { +func readLinteger(data []byte, offset int) (uint64, int, bool, error) { if len(data) < offset+1 { return 0, 0, false, nil } @@ -922,7 +919,7 @@ func read_linteger(data []byte, offset int) (uint64, int, bool, error) { } // Read a mysql length field (3 bytes LE) -func read_length(data []byte, offset int) (int, error) { +func readLength(data []byte, offset int) (int, error) { if len(data[offset:]) < 3 { return 0, errors.New("Data too small to contain a valid length") } diff --git a/packetbeat/protos/mysql/mysql_test.go b/packetbeat/protos/mysql/mysql_test.go index 53e4bec182e..723fd85b118 100644 --- a/packetbeat/protos/mysql/mysql_test.go +++ b/packetbeat/protos/mysql/mysql_test.go @@ -99,7 +99,7 @@ func TestMySQLParser_OKResponse(t *testing.T) { if stream.message.AffectedRows != 1 { t.Errorf("Failed to parse affected rows") } - if stream.message.InsertId != 4 { + if stream.message.InsertID != 4 { t.Errorf("Failed to parse last INSERT id") } if stream.message.Size != 11 { @@ -333,17 +333,17 @@ func TestParseMySQL_simpleUpdateResponse(t *testing.T) { var tuple common.TCPTuple var private mysqlPrivateData - var count_handleMysql = 0 + var countHandleMysql = 0 mysql.handleMysql = func(mysql *Mysql, m *MysqlMessage, tcp *common.TCPTuple, dir uint8, raw_msg []byte) { - count_handleMysql += 1 + countHandleMysql++ } mysql.Parse(&pkt, &tuple, 1, private) - if count_handleMysql != 1 { + if countHandleMysql != 1 { t.Errorf("handleMysql not called") } } @@ -376,17 +376,17 @@ func TestParseMySQL_threeResponses(t *testing.T) { var tuple common.TCPTuple var private mysqlPrivateData - var count_handleMysql = 0 + var countHandleMysql = 0 mysql.handleMysql = func(mysql *Mysql, m *MysqlMessage, tcptuple *common.TCPTuple, dir uint8, raw_msg []byte) { - count_handleMysql += 1 + countHandleMysql++ } mysql.Parse(&pkt, &tuple, 1, private) - if count_handleMysql != 3 { + if countHandleMysql != 3 { t.Errorf("handleMysql not called three times") } } @@ -420,16 +420,16 @@ func TestParseMySQL_splitResponse(t *testing.T) { var tuple common.TCPTuple var private mysqlPrivateData - var count_handleMysql = 0 + var countHandleMysql = 0 mysql.handleMysql = func(mysql *Mysql, m *MysqlMessage, tcptuple *common.TCPTuple, dir uint8, raw_msg []byte) { - count_handleMysql += 1 + countHandleMysql++ } private = mysql.Parse(&pkt, &tuple, 1, private).(mysqlPrivateData) - if count_handleMysql != 0 { + if countHandleMysql != 0 { t.Errorf("handleMysql called on first run") } @@ -453,12 +453,12 @@ func TestParseMySQL_splitResponse(t *testing.T) { } mysql.Parse(&pkt, &tuple, 1, private) - if count_handleMysql != 1 { + if countHandleMysql != 1 { t.Errorf("handleMysql not called on the second run") } } -func testTcpTuple() *common.TCPTuple { +func testTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, SrcIP: net.IPv4(192, 168, 0, 1), DstIP: net.IPv4(192, 168, 0, 2), @@ -491,11 +491,11 @@ func Test_gap_in_response(t *testing.T) { // request and response from tests/pcaps/mysql_result_long.pcap // select * from test - req_data, err := hex.DecodeString( + reqData, err := hex.DecodeString( "130000000373656c656374202a20" + "66726f6d2074657374") assert.Nil(t, err) - resp_data, err := hex.DecodeString( + respData, err := hex.DecodeString( "0100000103240000020364656604" + "74657374047465737404746573740161" + "01610c3f000b00000003000000000024" + @@ -514,9 +514,9 @@ func Test_gap_in_response(t *testing.T) { "696e6475737472792e204c6f72656d20") assert.Nil(t, err) - tcptuple := testTcpTuple() - req := protos.Packet{Payload: req_data} - resp := protos.Packet{Payload: resp_data} + tcptuple := testTCPTuple() + req := protos.Packet{Payload: reqData} + resp := protos.Packet{Payload: respData} private := protos.ProtocolData(new(mysqlPrivateData)) @@ -544,12 +544,12 @@ func Test_gap_in_eat_message(t *testing.T) { // request from tests/pcaps/mysql_result_long.pcap // "select * from test". Last byte missing. - req_data, err := hex.DecodeString( + reqData, err := hex.DecodeString( "130000000373656c656374202a20" + "66726f6d20746573") assert.Nil(t, err) - stream := &MysqlStream{data: req_data, message: new(MysqlMessage)} + stream := &MysqlStream{data: reqData, message: new(MysqlMessage)} ok, complete := mysqlMessageParser(stream) assert.Equal(t, true, ok) assert.Equal(t, false, complete) @@ -566,13 +566,13 @@ func Test_read_length(t *testing.T) { var err error var length int - _, err = read_length([]byte{}, 0) + _, err = readLength([]byte{}, 0) assert.NotNil(t, err) - _, err = read_length([]byte{0x00, 0x00}, 0) + _, err = readLength([]byte{0x00, 0x00}, 0) assert.NotNil(t, err) - length, err = read_length([]byte{0x01, 0x00, 0x00}, 0) + length, err = readLength([]byte{0x01, 0x00, 0x00}, 0) assert.Nil(t, err) assert.Equal(t, length, 1) } diff --git a/packetbeat/protos/nfs/nfs.go b/packetbeat/protos/nfs/nfs.go index 310f2903760..7847b052f9c 100644 --- a/packetbeat/protos/nfs/nfs.go +++ b/packetbeat/protos/nfs/nfs.go @@ -4,13 +4,13 @@ import ( "github.com/elastic/beats/libbeat/common" ) -type Nfs struct { +type NFS struct { vers uint32 proc uint32 event common.MapStr } -func (nfs *Nfs) getRequestInfo(xdr *Xdr) common.MapStr { +func (nfs *NFS) getRequestInfo(xdr *Xdr) common.MapStr { nfsInfo := common.MapStr{} nfsInfo["version"] = nfs.vers @@ -32,12 +32,12 @@ func (nfs *Nfs) getRequestInfo(xdr *Xdr) common.MapStr { return nfsInfo } -func (nfs *Nfs) getNFSReplyStatus(xdr *Xdr) string { +func (nfs *NFS) getNFSReplyStatus(xdr *Xdr) string { switch nfs.proc { case 0: - return NFS_STATUS[0] + return NFSStatus[0] default: stat := int(xdr.getUInt()) - return NFS_STATUS[stat] + return NFSStatus[stat] } } diff --git a/packetbeat/protos/nfs/nfs3.go b/packetbeat/protos/nfs/nfs3.go index 9bcfba4a631..4cb2e12b914 100644 --- a/packetbeat/protos/nfs/nfs3.go +++ b/packetbeat/protos/nfs/nfs3.go @@ -1,6 +1,6 @@ package nfs -var nfs_opnum3 = [...]string{ +var nfsOpnum3 = [...]string{ "NULL", "GETATTR", "SETATTR", @@ -25,10 +25,9 @@ var nfs_opnum3 = [...]string{ "COMMIT", } -func (nfs *Nfs) getV3Opcode(proc int) string { - if proc < len(nfs_opnum3) { - return nfs_opnum3[proc] - } else { - return "ILLEGAL" +func (nfs *NFS) getV3Opcode(proc int) string { + if proc < len(nfsOpnum3) { + return nfsOpnum3[proc] } + return "ILLEGAL" } diff --git a/packetbeat/protos/nfs/nfs4.go b/packetbeat/protos/nfs/nfs4.go index faabfa107bb..32516623b59 100644 --- a/packetbeat/protos/nfs/nfs4.go +++ b/packetbeat/protos/nfs/nfs4.go @@ -3,66 +3,66 @@ package nfs import "fmt" const ( - OP_ACCESS = 3 - OP_CLOSE = 4 - OP_COMMIT = 5 - OP_CREATE = 6 - OP_DELEGPURGE = 7 - OP_DELEGRETURN = 8 - OP_GETATTR = 9 - OP_GETFH = 10 - OP_LINK = 11 - OP_LOCK = 12 - OP_LOCKT = 13 - OP_LOCKU = 14 - OP_LOOKUP = 15 - OP_LOOKUPP = 16 - OP_NVERIFY = 17 - OP_OPEN = 18 - OP_OPENATTR = 19 - OP_OPEN_CONFIRM = 20 - OP_OPEN_DOWNGRADE = 21 - OP_PUTFH = 22 - OP_PUTPUBFH = 23 - OP_PUTROOTFH = 24 - OP_READ = 25 - OP_READDIR = 26 - OP_READLINK = 27 - OP_REMOVE = 28 - OP_RENAME = 29 - OP_RENEW = 30 - OP_RESTOREFH = 31 - OP_SAVEFH = 32 - OP_SECINFO = 33 - OP_SETATTR = 34 - OP_SETCLIENTID = 35 - OP_SETCLIENTID_CONFIRM = 36 - OP_VERIFY = 37 - OP_WRITE = 38 - OP_RELEASE_LOCKOWNER = 39 - OP_BACKCHANNEL_CTL = 40 - OP_BIND_CONN_TO_SESSION = 41 - OP_EXCHANGE_ID = 42 - OP_CREATE_SESSION = 43 - OP_DESTROY_SESSION = 44 - OP_FREE_STATEID = 45 - OP_GET_DIR_DELEGATION = 46 - OP_GETDEVICEINFO = 47 - OP_GETDEVICELIST = 48 - OP_LAYOUTCOMMIT = 49 - OP_LAYOUTGET = 50 - OP_LAYOUTRETURN = 51 - OP_SECINFO_NO_NAME = 52 - OP_SEQUENCE = 53 - OP_SET_SSV = 54 - OP_TEST_STATEID = 55 - OP_WANT_DELEGATION = 56 - OP_DESTROY_CLIENTID = 57 - OP_RECLAIM_COMPLETE = 58 - OP_ILLEGAL = 10044 + OpAccess = 3 + OpClose = 4 + OpCommit = 5 + OpCreate = 6 + OpDelegpurge = 7 + OpDelegreturn = 8 + OpGetattr = 9 + OpGetfh = 10 + OpLink = 11 + OpLock = 12 + OpLockt = 13 + OpLocku = 14 + OpLookup = 15 + OpLookupp = 16 + OpNverify = 17 + OpOpen = 18 + OpOpenattr = 19 + OpOpenConfirm = 20 + OpOpenDowngrade = 21 + OpPutfh = 22 + OpPutpubfh = 23 + OpPutrootfh = 24 + OpRead = 25 + OpReaddir = 26 + OpReadlink = 27 + OpRemove = 28 + OpRename = 29 + OpRenew = 30 + OpRestorefh = 31 + OpSavefh = 32 + OpSecinfo = 33 + OpSetattr = 34 + OpSetclientid = 35 + OpSetclientidConfirm = 36 + OpVerify = 37 + OpWrite = 38 + OpReleaseLockowner = 39 + OpBackchannelCtl = 40 + OpBindConnToSession = 41 + OpExchangeID = 42 + OpCreateSession = 43 + OpDestroySession = 44 + OpFreeStateid = 45 + OpGetDirDelegation = 46 + OpGetdeviceinfo = 47 + OpGetdevicelist = 48 + OpLayoutcommit = 49 + OpLayoutget = 50 + OpLayoutreturn = 51 + OpSecinfoNoName = 52 + OpSequence = 53 + OpSetSsv = 54 + OpTestStateid = 55 + OpWantDelegation = 56 + OpDestroyClientid = 57 + OpReclaimComplete = 58 + OpIllegal = 10044 ) -var nfs_opnum4 = map[int]string{ +var nfsOpnum4 = map[int]string{ 3: "ACCESS", 4: "CLOSE", 5: "COMMIT", @@ -122,40 +122,40 @@ var nfs_opnum4 = map[int]string{ 10044: "ILLEGAL", } -func (nfs *Nfs) eatData(op int, xdr *Xdr) { +func (nfs *NFS) eatData(op int, xdr *Xdr) { switch op { - case OP_GETATTR: + case OpGetattr: xdr.getUIntVector() - case OP_GETFH: + case OpGetfh: // nothing to eat - case OP_LOOKUP: + case OpLookup: xdr.getDynamicOpaque() - case OP_LOOKUPP: + case OpLookupp: // nothing to eat - case OP_NVERIFY: + case OpNverify: xdr.getUIntVector() xdr.getDynamicOpaque() - case OP_PUTFH: + case OpPutfh: xdr.getDynamicOpaque() - case OP_PUTPUBFH: + case OpPutpubfh: // nothing to eat - case OP_PUTROOTFH: + case OpPutrootfh: // nothing to eat - case OP_READLINK: + case OpReadlink: // nothing to eat - case OP_RENEW: + case OpRenew: xdr.getUHyper() - case OP_RESTOREFH: + case OpRestorefh: // nothing to eat - case OP_SAVEFH: + case OpSavefh: // nothing to eat - case OP_SECINFO: + case OpSecinfo: xdr.getDynamicOpaque() - case OP_VERIFY: + case OpVerify: xdr.getUIntVector() xdr.getDynamicOpaque() - case OP_SEQUENCE: + case OpSequence: xdr.getOpaque(16) xdr.getUInt() xdr.getUInt() @@ -179,23 +179,23 @@ func (nfs *Nfs) eatData(op int, xdr *Xdr) { // PUTFH + GETATTR // // GETATTR is the main operation. -func (nfs *Nfs) findV4MainOpcode(xdr *Xdr) string { +func (nfs *NFS) findV4MainOpcode(xdr *Xdr) string { // did we find a main operation opcode? found := false // default op code - current_opname := "ILLEGAL" + currentOpname := "ILLEGAL" opcount := int(xdr.getUInt()) for i := 0; !found && i < opcount; i++ { op := int(xdr.getUInt()) - opname, ok := nfs_opnum4[op] + opname, ok := nfsOpnum4[op] if !ok { return fmt.Sprintf("ILLEGAL (%d)", op) } - current_opname = opname + currentOpname = opname switch op { // First class ops @@ -205,53 +205,53 @@ func (nfs *Nfs) findV4MainOpcode(xdr *Xdr) string { // first class ops are used, like OPEN->LOCK->WRITE->LOCKU->CLOSE, // but such construnction are not used in the practice. case - OP_ACCESS, - OP_BACKCHANNEL_CTL, - OP_BIND_CONN_TO_SESSION, - OP_CLOSE, - OP_COMMIT, - OP_CREATE, - OP_CREATE_SESSION, - OP_DELEGPURGE, - OP_DELEGRETURN, - OP_DESTROY_CLIENTID, - OP_DESTROY_SESSION, - OP_EXCHANGE_ID, - OP_FREE_STATEID, - OP_GETDEVICEINFO, - OP_GETDEVICELIST, - OP_GET_DIR_DELEGATION, - OP_LAYOUTCOMMIT, - OP_LAYOUTGET, - OP_LAYOUTRETURN, - OP_LINK, - OP_LOCK, - OP_LOCKT, - OP_LOCKU, - OP_OPEN, - OP_OPENATTR, - OP_OPEN_CONFIRM, - OP_OPEN_DOWNGRADE, - OP_READ, - OP_READDIR, - OP_READLINK, - OP_RECLAIM_COMPLETE, - OP_RELEASE_LOCKOWNER, - OP_REMOVE, - OP_RENAME, - OP_SECINFO_NO_NAME, - OP_SETATTR, - OP_SETCLIENTID, - OP_SETCLIENTID_CONFIRM, - OP_SET_SSV, - OP_TEST_STATEID, - OP_WANT_DELEGATION, - OP_WRITE: + OpAccess, + OpBackchannelCtl, + OpBindConnToSession, + OpClose, + OpCommit, + OpCreate, + OpCreateSession, + OpDelegpurge, + OpDelegreturn, + OpDestroyClientid, + OpDestroySession, + OpExchangeID, + OpFreeStateid, + OpGetdeviceinfo, + OpGetdevicelist, + OpGetDirDelegation, + OpLayoutcommit, + OpLayoutget, + OpLayoutreturn, + OpLink, + OpLock, + OpLockt, + OpLocku, + OpOpen, + OpOpenattr, + OpOpenConfirm, + OpOpenDowngrade, + OpRead, + OpReaddir, + OpReadlink, + OpReclaimComplete, + OpReleaseLockowner, + OpRemove, + OpRename, + OpSecinfoNoName, + OpSetattr, + OpSetclientid, + OpSetclientidConfirm, + OpSetSsv, + OpTestStateid, + OpWantDelegation, + OpWrite: found = true default: nfs.eatData(op, xdr) } } - return current_opname + return currentOpname } diff --git a/packetbeat/protos/nfs/nfs_status.go b/packetbeat/protos/nfs/nfs_status.go index 08746d84666..0e2c611e048 100644 --- a/packetbeat/protos/nfs/nfs_status.go +++ b/packetbeat/protos/nfs/nfs_status.go @@ -1,6 +1,6 @@ package nfs -var NFS_STATUS = map[int]string{ +var NFSStatus = map[int]string{ 0: "NFS_OK", 1: "NFSERR_PERM", 2: "NFSERR_NOENT", diff --git a/packetbeat/protos/nfs/request_handler.go b/packetbeat/protos/nfs/request_handler.go index 3db509f3e89..bc82e15020f 100644 --- a/packetbeat/protos/nfs/request_handler.go +++ b/packetbeat/protos/nfs/request_handler.go @@ -1,6 +1,7 @@ -// This file contains methods process RPC calls package nfs +// This file contains methods process RPC calls + import ( "expvar" "fmt" @@ -10,9 +11,9 @@ import ( "github.com/elastic/beats/packetbeat/protos/tcp" ) -const NFS_PROGRAM_NUMBER = 100003 +const NFSProgramNumber = 100003 -var ACCEPT_STATUS = [...]string{ +var AcceptStatus = [...]string{ "success", "prog_unavail", "prog_mismatch", @@ -26,19 +27,19 @@ var ( ) // called by Cache, when re reply seen within expected time window -func (rpc *Rpc) handleExpiredPacket(nfs *Nfs) { +func (rpc *RPC) handleExpiredPacket(nfs *NFS) { nfs.event["status"] = "NO_REPLY" rpc.results.PublishTransaction(nfs.event) unmatchedRequests.Add(1) } // called when we process a RPC call -func (rpc *Rpc) handleCall(xid string, xdr *Xdr, ts time.Time, tcptuple *common.TCPTuple, dir uint8) { +func (rpc *RPC) handleCall(xid string, xdr *Xdr, ts time.Time, tcptuple *common.TCPTuple, dir uint8) { // eat rpc version number xdr.getUInt() rpcProg := xdr.getUInt() - if rpcProg != NFS_PROGRAM_NUMBER { + if rpcProg != NFSProgramNumber { // not a NFS request return } @@ -54,7 +55,7 @@ func (rpc *Rpc) handleCall(xid string, xdr *Xdr, ts time.Time, tcptuple *common. // The direction of the stream is based in the direction of first packet seen. // if we have stored stream in reverse order, swap src and dst - if dir == tcp.TcpDirectionReverse { + if dir == tcp.TCPDirectionReverse { src, dst = dst, src } @@ -72,15 +73,15 @@ func (rpc *Rpc) handleCall(xid string, xdr *Xdr, ts time.Time, tcptuple *common. rpcInfo["xid"] = xid rpcInfo["call_size"] = xdr.size() - auth_flavor := xdr.getUInt() - auth_opaque := xdr.getDynamicOpaque() - switch auth_flavor { + authFlavor := xdr.getUInt() + authOpaque := xdr.getDynamicOpaque() + switch authFlavor { case 0: rpcInfo["auth_flavor"] = "none" case 1: rpcInfo["auth_flavor"] = "unix" cred := common.MapStr{} - credXdr := Xdr{data: auth_opaque, offset: 0} + credXdr := Xdr{data: authOpaque, offset: 0} cred["stamp"] = credXdr.getUInt() machine := credXdr.getString() if machine == "" { @@ -94,7 +95,7 @@ func (rpc *Rpc) handleCall(xid string, xdr *Xdr, ts time.Time, tcptuple *common. case 6: rpcInfo["auth_flavor"] = "rpcsec_gss" default: - rpcInfo["auth_flavor"] = fmt.Sprintf("unknown (%d)", auth_flavor) + rpcInfo["auth_flavor"] = fmt.Sprintf("unknown (%d)", authFlavor) } // eat auth verifier @@ -103,18 +104,18 @@ func (rpc *Rpc) handleCall(xid string, xdr *Xdr, ts time.Time, tcptuple *common. event["type"] = "nfs" event["rpc"] = rpcInfo - nfs := Nfs{vers: nfsVers, proc: nfsProc, event: event} + nfs := NFS{vers: nfsVers, proc: nfsProc, event: event} event["nfs"] = nfs.getRequestInfo(xdr) // use xid+src ip to uniquely identify request - reqId := xid + tcptuple.SrcIP.String() + reqID := xid + tcptuple.SrcIP.String() // populate cache to trace request reply - rpc.callsSeen.Put(reqId, &nfs) + rpc.callsSeen.Put(reqID, &nfs) } // called when we process a RPC reply -func (rpc *Rpc) handleReply(xid string, xdr *Xdr, ts time.Time, tcptuple *common.TCPTuple, dir uint8) { +func (rpc *RPC) handleReply(xid string, xdr *Xdr, ts time.Time, tcptuple *common.TCPTuple, dir uint8) { replyStatus := xdr.getUInt() // we are interested only in accepted rpc reply if replyStatus != 0 { @@ -126,19 +127,19 @@ func (rpc *Rpc) handleReply(xid string, xdr *Xdr, ts time.Time, tcptuple *common xdr.getDynamicOpaque() // xid+src ip is used to uniquely identify request. - var reqId string - if dir == tcp.TcpDirectionReverse { + var reqID string + if dir == tcp.TCPDirectionReverse { // stream in correct order: Src points to a client - reqId = xid + tcptuple.SrcIP.String() + reqID = xid + tcptuple.SrcIP.String() } else { // stream in reverse order: Dst points to a client - reqId = xid + tcptuple.DstIP.String() + reqID = xid + tcptuple.DstIP.String() } // get cached request - v := rpc.callsSeen.Delete(reqId) + v := rpc.callsSeen.Delete(reqID) if v != nil { - nfs := v.(*Nfs) + nfs := v.(*NFS) event := nfs.event rpcInfo := event["rpc"].(common.MapStr) rpcInfo["reply_size"] = xdr.size() @@ -147,7 +148,7 @@ func (rpc *Rpc) handleReply(xid string, xdr *Xdr, ts time.Time, tcptuple *common // the same in human readable form rpcInfo["time_str"] = fmt.Sprintf("%v", rpcTime) acceptStatus := int(xdr.getUInt()) - rpcInfo["status"] = ACCEPT_STATUS[acceptStatus] + rpcInfo["status"] = AcceptStatus[acceptStatus] // populate nfs info for successfully executed requests if acceptStatus == 0 { diff --git a/packetbeat/protos/nfs/rpc.go b/packetbeat/protos/nfs/rpc.go index 16c75b6fa61..49d99a78d2a 100644 --- a/packetbeat/protos/nfs/rpc.go +++ b/packetbeat/protos/nfs/rpc.go @@ -21,25 +21,25 @@ import ( var debugf = logp.MakeDebug("rpc") const ( - RPC_LAST_FRAG = 0x80000000 - RPC_SIZE_MASK = 0x7fffffff + RPCLastFrag = 0x80000000 + RPCSizeMask = 0x7fffffff ) const ( - RPC_CALL = 0 - RPC_REPLY = 1 + RPCCall = 0 + RPCReply = 1 ) -type RpcStream struct { +type RPCStream struct { tcpTuple *common.TCPTuple rawData []byte } type rpcConnectionData struct { - Streams [2]*RpcStream + Streams [2]*RPCStream } -type Rpc struct { +type RPC struct { // Configuration data. Ports []int callsSeen *common.Cache @@ -57,7 +57,7 @@ func New( results publish.Transactions, cfg *common.Config, ) (protos.Plugin, error) { - p := &Rpc{} + p := &RPC{} config := defaultConfig if !testMode { if err := cfg.Unpack(&config); err != nil { @@ -73,14 +73,14 @@ func New( return p, nil } -func (rpc *Rpc) init(results publish.Transactions, config *rpcConfig) error { +func (rpc *RPC) init(results publish.Transactions, config *rpcConfig) error { rpc.setFromConfig(config) rpc.results = results rpc.callsSeen = common.NewCacheWithRemovalListener( rpc.transactionTimeout, protos.DefaultTransactionHashSize, func(k common.Key, v common.Value) { - nfs, ok := v.(*Nfs) + nfs, ok := v.(*NFS) if !ok { logp.Err("Expired value is not a MapStr (%T).", v) return @@ -92,18 +92,18 @@ func (rpc *Rpc) init(results publish.Transactions, config *rpcConfig) error { return nil } -func (rpc *Rpc) setFromConfig(config *rpcConfig) error { +func (rpc *RPC) setFromConfig(config *rpcConfig) error { rpc.Ports = config.Ports rpc.transactionTimeout = config.TransactionTimeout return nil } -func (rpc *Rpc) GetPorts() []int { +func (rpc *RPC) GetPorts() []int { return rpc.Ports } // Called when TCP payload data is available for parsing. -func (rpc *Rpc) Parse( +func (rpc *RPC) Parse( pkt *protos.Packet, tcptuple *common.TCPTuple, dir uint8, @@ -112,9 +112,9 @@ func (rpc *Rpc) Parse( defer logp.Recover("ParseRPC exception") - conn := ensureRpcConnection(private) + conn := ensureRPCConnection(private) - conn = rpc.handleRpcFragment(conn, pkt, tcptuple, dir) + conn = rpc.handleRPCFragment(conn, pkt, tcptuple, dir) if conn == nil { return nil } @@ -122,7 +122,7 @@ func (rpc *Rpc) Parse( } // Called when the FIN flag is seen in the TCP stream. -func (rpc *Rpc) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, +func (rpc *RPC) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { defer logp.Recover("ReceivedFinRpc exception") @@ -133,7 +133,7 @@ func (rpc *Rpc) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, // Called when a packets are missing from the tcp // stream. -func (rpc *Rpc) GapInStream(tcptuple *common.TCPTuple, dir uint8, +func (rpc *RPC) GapInStream(tcptuple *common.TCPTuple, dir uint8, nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) { defer logp.Recover("GapInRpcStream exception") @@ -144,20 +144,20 @@ func (rpc *Rpc) GapInStream(tcptuple *common.TCPTuple, dir uint8, // ConnectionTimeout returns the per stream connection timeout. // Return <=0 to set default tcp module transaction timeout. -func (rpc *Rpc) ConnectionTimeout() time.Duration { +func (rpc *RPC) ConnectionTimeout() time.Duration { // forced by TCP interface return rpc.transactionTimeout } -func ensureRpcConnection(private protos.ProtocolData) *rpcConnectionData { - conn := getRpcConnection(private) +func ensureRPCConnection(private protos.ProtocolData) *rpcConnectionData { + conn := getRPCConnection(private) if conn == nil { conn = &rpcConnectionData{} } return conn } -func getRpcConnection(private protos.ProtocolData) *rpcConnectionData { +func getRPCConnection(private protos.ProtocolData) *rpcConnectionData { if private == nil { return nil } @@ -176,7 +176,7 @@ func getRpcConnection(private protos.ProtocolData) *rpcConnectionData { } // Parse function is used to process TCP payloads. -func (rpc *Rpc) handleRpcFragment( +func (rpc *RPC) handleRPCFragment( conn *rpcConnectionData, pkt *protos.Packet, tcptuple *common.TCPTuple, @@ -190,7 +190,7 @@ func (rpc *Rpc) handleRpcFragment( } else { // concatenate bytes st.rawData = append(st.rawData, pkt.Payload...) - if len(st.rawData) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(st.rawData) > tcp.TCPMaxDataInStream { debugf("Stream data too large, dropping TCP stream") conn.Streams[dir] = nil return conn @@ -205,8 +205,8 @@ func (rpc *Rpc) handleRpcFragment( } marker := uint32(binary.BigEndian.Uint32(st.rawData[0:4])) - size := int(marker & RPC_SIZE_MASK) - islast := (marker & RPC_LAST_FRAG) != 0 + size := int(marker & RPCSizeMask) + islast := (marker & RPCLastFrag) != 0 if len(st.rawData)-4 < size { debugf("Wainting for more data") @@ -223,30 +223,30 @@ func (rpc *Rpc) handleRpcFragment( // keep the rest of the next fragment st.rawData = st.rawData[4+size:] - rpc.handleRpcPacket(xdr, pkt.Ts, tcptuple, dir) + rpc.handleRPCPacket(xdr, pkt.Ts, tcptuple, dir) } return conn } -func (rpc *Rpc) handleRpcPacket(xdr *Xdr, ts time.Time, tcptuple *common.TCPTuple, dir uint8) { +func (rpc *RPC) handleRPCPacket(xdr *Xdr, ts time.Time, tcptuple *common.TCPTuple, dir uint8) { xid := fmt.Sprintf("%.8x", xdr.getUInt()) msgType := xdr.getUInt() switch msgType { - case RPC_CALL: + case RPCCall: rpc.handleCall(xid, xdr, ts, tcptuple, dir) - case RPC_REPLY: + case RPCReply: rpc.handleReply(xid, xdr, ts, tcptuple, dir) default: logp.Warn("Bad RPC message") } } -func newStream(pkt *protos.Packet, tcptuple *common.TCPTuple) *RpcStream { - return &RpcStream{ +func newStream(pkt *protos.Packet, tcptuple *common.TCPTuple) *RPCStream { + return &RPCStream{ tcpTuple: tcptuple, rawData: pkt.Payload, } diff --git a/packetbeat/protos/nfs/xdr_test.go b/packetbeat/protos/nfs/xdr_test.go index 9f13ba4f77c..6a76d5303a8 100644 --- a/packetbeat/protos/nfs/xdr_test.go +++ b/packetbeat/protos/nfs/xdr_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" ) -var test_msg = []byte{ +var testMsg = []byte{ 0x80, 0x00, 0x00, 0xe0, 0xb5, 0x49, 0x21, 0xab, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, @@ -17,12 +17,12 @@ var test_msg = []byte{ } func TestXdrDecoding(t *testing.T) { - xdr := Xdr{data: test_msg, offset: 0} + xdr := Xdr{data: testMsg, offset: 0} assert.Equal(t, uint32(0x800000e0), uint32(xdr.getUInt())) assert.Equal(t, uint32(0xb54921ab), uint32(xdr.getUInt())) assert.Equal(t, uint64(2), uint64(xdr.getUHyper())) assert.Equal(t, uint32(4), uint32(xdr.getUInt())) assert.Equal(t, "test string", xdr.getString()) - assert.Equal(t, len(test_msg), xdr.size()) + assert.Equal(t, len(testMsg), xdr.size()) } diff --git a/packetbeat/protos/pgsql/parse.go b/packetbeat/protos/pgsql/parse.go index 0c8c67f071c..ae5cc9a1537 100644 --- a/packetbeat/protos/pgsql/parse.go +++ b/packetbeat/protos/pgsql/parse.go @@ -88,7 +88,7 @@ func (pgsql *Pgsql) parseCommand(s *PgsqlStream) (bool, bool) { // one byte reply to SSLRequest detailedf("Reply for SSLRequest %c", typ) m.start = s.parseOffset - s.parseOffset += 1 + s.parseOffset++ m.end = s.parseOffset m.isSSLResponse = true m.Size = uint64(m.end - m.start) @@ -142,7 +142,7 @@ func (pgsql *Pgsql) parseSimpleQuery(s *PgsqlStream, length int) (bool, bool) { m.start = s.parseOffset m.IsRequest = true - s.parseOffset += 1 //type + s.parseOffset++ //type s.parseOffset += length m.end = s.parseOffset m.Size = uint64(m.end - m.start) @@ -174,7 +174,7 @@ func (pgsql *Pgsql) parseRowDescription(s *PgsqlStream, length int) (bool, bool) } detailedf("Fields: %s", m.Fields) - s.parseOffset += 1 //type + s.parseOffset++ //type s.parseOffset += length //length s.parseState = PgsqlGetDataState return pgsql.parseMessageData(s) @@ -222,7 +222,7 @@ func (pgsql *Pgsql) parseCommandComplete(s *PgsqlStream, length int) (bool, bool m.IsOK = true m.toExport = true - s.parseOffset += 1 //type + s.parseOffset++ //type name, err := pgsqlString(s.data[s.parseOffset+4:], length-4) if err != nil { return false, false @@ -244,7 +244,7 @@ func (pgsql *Pgsql) parseReadyForQuery(s *PgsqlStream, length int) (bool, bool) m.start = s.parseOffset m.Size = uint64(m.end - m.start) - s.parseOffset += 1 // type + s.parseOffset++ // type s.parseOffset += length m.end = s.parseOffset @@ -261,7 +261,7 @@ func (pgsql *Pgsql) parseErrorResponse(s *PgsqlStream, length int) (bool, bool) m.IsError = true m.toExport = true - s.parseOffset += 1 //type + s.parseOffset++ //type pgsqlErrorParser(s, s.data[s.parseOffset+4:s.parseOffset+length]) s.parseOffset += length //length @@ -279,7 +279,7 @@ func (pgsql *Pgsql) parseExtReq(s *PgsqlStream, length int) (bool, bool) { m.start = s.parseOffset m.IsRequest = true - s.parseOffset += 1 //type + s.parseOffset++ //type s.parseOffset += length m.end = s.parseOffset m.Size = uint64(m.end - m.start) @@ -311,7 +311,7 @@ func (pgsql *Pgsql) parseExtResp(s *PgsqlStream, length int) (bool, bool) { m.IsOK = true m.toExport = true - s.parseOffset += 1 //type + s.parseOffset++ //type s.parseOffset += length detailedf("Parse completion in an extended query response") s.parseState = PgsqlGetDataState @@ -322,7 +322,7 @@ func (pgsql *Pgsql) parseSkipMessage(s *PgsqlStream, length int) (bool, bool) { // TODO: add info from NoticeResponse in case there are warning messages for a query // ignore command - s.parseOffset += 1 //type + s.parseOffset++ //type s.parseOffset += length m := s.message @@ -360,7 +360,7 @@ func pgsqlFieldsParser(s *PgsqlStream, buf []byte) error { return errNoFieldName } fields = append(fields, fieldName) - m.NumberOfFields += 1 + m.NumberOfFields++ off += len(fieldName) + 1 // read Table OID (int32) @@ -463,13 +463,13 @@ func (pgsql *Pgsql) parseMessageData(s *PgsqlStream) (bool, bool) { if err != nil { return false, false } - s.parseOffset += 1 + s.parseOffset++ s.parseOffset += length case 'C': // CommandComplete // skip type - s.parseOffset += 1 + s.parseOffset++ name, err := pgsqlString(s.data[s.parseOffset+4:], length-4) if err != nil { @@ -490,7 +490,7 @@ func (pgsql *Pgsql) parseMessageData(s *PgsqlStream) (bool, bool) { // Parse completion -> Bind completion for an extended query response // skip type - s.parseOffset += 1 + s.parseOffset++ s.parseOffset += length s.parseState = PgsqlStartState case 'T': @@ -557,7 +557,7 @@ func (pgsql *Pgsql) parseDataRow(s *PgsqlStream, buf []byte) error { return errFieldBufferBig } - m.NumberOfRows += 1 + m.NumberOfRows++ if len(m.Rows) < pgsql.maxStoreRows { m.Rows = append(m.Rows, rows) } @@ -599,28 +599,28 @@ func (pgsql *Pgsql) parseMessageExtendedQuery(s *PgsqlStream) (bool, bool) { // Parse -> Bind // skip type - s.parseOffset += 1 + s.parseOffset++ s.parseOffset += length //TODO: pgsql.parseBind(s) case 'D': // Bind -> Describe // skip type - s.parseOffset += 1 + s.parseOffset++ s.parseOffset += length //TODO: pgsql.parseDescribe(s) case 'E': // Bind(or Describe) -> Execute // skip type - s.parseOffset += 1 + s.parseOffset++ s.parseOffset += length //TODO: pgsql.parseExecute(s) case 'S': // Execute -> Sync // skip type - s.parseOffset += 1 + s.parseOffset++ s.parseOffset += length m.end = s.parseOffset m.Size = uint64(m.end - m.start) diff --git a/packetbeat/protos/pgsql/pgsql.go b/packetbeat/protos/pgsql/pgsql.go index c66c90efe61..86fcbf126bb 100644 --- a/packetbeat/protos/pgsql/pgsql.go +++ b/packetbeat/protos/pgsql/pgsql.go @@ -39,7 +39,7 @@ type PgsqlMessage struct { Notes []string Direction uint8 - TcpTuple common.TCPTuple + TCPTuple common.TCPTuple CmdlineTuple *common.CmdlineTuple } @@ -60,8 +60,8 @@ type PgsqlTransaction struct { Pgsql common.MapStr - Request_raw string - Response_raw string + RequestRaw string + ResponseRaw string } type PgsqlStream struct { @@ -105,11 +105,11 @@ var ( type Pgsql struct { // config - Ports []int - maxStoreRows int - maxRowLength int - Send_request bool - Send_response bool + Ports []int + maxStoreRows int + maxRowLength int + SendRequest bool + SendResponse bool transactions *common.Cache transactionTimeout time.Duration @@ -161,8 +161,8 @@ func (pgsql *Pgsql) setFromConfig(config *pgsqlConfig) { pgsql.Ports = config.Ports pgsql.maxRowLength = config.MaxRowLength pgsql.maxStoreRows = config.MaxRows - pgsql.Send_request = config.SendRequest - pgsql.Send_response = config.SendResponse + pgsql.SendRequest = config.SendRequest + pgsql.SendResponse = config.SendResponse pgsql.transactionTimeout = config.TransactionTimeout } @@ -231,7 +231,7 @@ func (pgsql *Pgsql) Parse(pkt *protos.Packet, tcptuple *common.TCPTuple, // concatenate bytes priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...) logp.Debug("pgsqldetailed", "Len data: %d cap data: %d", len(priv.Data[dir].data), cap(priv.Data[dir].data)) - if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(priv.Data[dir].data) > tcp.TCPMaxDataInStream { debugf("Stream data too large, dropping TCP stream") priv.Data[dir] = nil return priv @@ -297,9 +297,8 @@ func messageHasEnoughData(msg *PgsqlMessage) bool { } if msg.IsRequest { return len(msg.Query) > 0 - } else { - return len(msg.Rows) > 0 } + return len(msg.Rows) > 0 } // Called when there's a drop packet @@ -350,7 +349,7 @@ func (pgsql *Pgsql) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, var handlePgsql = func(pgsql *Pgsql, m *PgsqlMessage, tcptuple *common.TCPTuple, dir uint8, raw_msg []byte) { - m.TcpTuple = *tcptuple + m.TCPTuple = *tcptuple m.Direction = dir m.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcptuple.IPPort()) @@ -363,7 +362,7 @@ var handlePgsql = func(pgsql *Pgsql, m *PgsqlMessage, tcptuple *common.TCPTuple, func (pgsql *Pgsql) receivedPgsqlRequest(msg *PgsqlMessage) { - tuple := msg.TcpTuple + tuple := msg.TCPTuple // parse the query, as it might contain a list of pgsql command // separated by ';' @@ -384,16 +383,16 @@ func (pgsql *Pgsql) receivedPgsqlRequest(msg *PgsqlMessage) { trans.Ts = int64(trans.ts.UnixNano() / 1000) // transactions have microseconds resolution trans.JsTs = msg.Ts trans.Src = common.Endpoint{ - IP: msg.TcpTuple.SrcIP.String(), - Port: msg.TcpTuple.SrcPort, + IP: msg.TCPTuple.SrcIP.String(), + Port: msg.TCPTuple.SrcPort, Proc: string(msg.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: msg.TcpTuple.DstIP.String(), - Port: msg.TcpTuple.DstPort, + IP: msg.TCPTuple.DstIP.String(), + Port: msg.TCPTuple.DstPort, Proc: string(msg.CmdlineTuple.Dst), } - if msg.Direction == tcp.TcpDirectionReverse { + if msg.Direction == tcp.TCPDirectionReverse { trans.Src, trans.Dst = trans.Dst, trans.Src } @@ -404,7 +403,7 @@ func (pgsql *Pgsql) receivedPgsqlRequest(msg *PgsqlMessage) { trans.Notes = msg.Notes - trans.Request_raw = query + trans.RequestRaw = query transList = append(transList, trans) } @@ -413,7 +412,7 @@ func (pgsql *Pgsql) receivedPgsqlRequest(msg *PgsqlMessage) { func (pgsql *Pgsql) receivedPgsqlResponse(msg *PgsqlMessage) { - tuple := msg.TcpTuple + tuple := msg.TCPTuple transList := pgsql.getTransaction(tuple.Hashable()) if transList == nil || len(transList) == 0 { debugf("Response from unknown transaction. Ignoring.") @@ -442,13 +441,13 @@ func (pgsql *Pgsql) receivedPgsqlResponse(msg *PgsqlMessage) { trans.BytesOut = msg.Size trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds - trans.Response_raw = common.DumpInCSVFormat(msg.Fields, msg.Rows) + trans.ResponseRaw = common.DumpInCSVFormat(msg.Fields, msg.Rows) trans.Notes = append(trans.Notes, msg.Notes...) pgsql.publishTransaction(trans) - debugf("Postgres transaction completed: %s\n%s", trans.Pgsql, trans.Response_raw) + debugf("Postgres transaction completed: %s\n%s", trans.Pgsql, trans.ResponseRaw) } func (pgsql *Pgsql) publishTransaction(t *PgsqlTransaction) { @@ -466,11 +465,11 @@ func (pgsql *Pgsql) publishTransaction(t *PgsqlTransaction) { event["status"] = common.OK_STATUS } event["responsetime"] = t.ResponseTime - if pgsql.Send_request { - event["request"] = t.Request_raw + if pgsql.SendRequest { + event["request"] = t.RequestRaw } - if pgsql.Send_response { - event["response"] = t.Response_raw + if pgsql.SendResponse { + event["response"] = t.ResponseRaw } event["query"] = t.Query event["method"] = t.Method diff --git a/packetbeat/protos/pgsql/pgsql_test.go b/packetbeat/protos/pgsql/pgsql_test.go index be3a0229644..5c9c6fff4db 100644 --- a/packetbeat/protos/pgsql/pgsql_test.go +++ b/packetbeat/protos/pgsql/pgsql_test.go @@ -203,17 +203,17 @@ func TestPgsqlParser_threeResponses(t *testing.T) { } var tuple common.TCPTuple var private pgsqlPrivateData - var count_handlePgsql = 0 + var countHandlePgsql = 0 pgsql.handlePgsql = func(pgsql *Pgsql, m *PgsqlMessage, tcptuple *common.TCPTuple, dir uint8, raw_msg []byte) { - count_handlePgsql += 1 + countHandlePgsql++ } pgsql.Parse(&pkt, &tuple, 1, private) - if count_handlePgsql != 3 { + if countHandlePgsql != 3 { t.Error("handlePgsql not called three times") } @@ -291,7 +291,7 @@ func TestPgsqlParser_invalidMessage(t *testing.T) { } } -func testTcpTuple() *common.TCPTuple { +func testTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, SrcIP: net.IPv4(192, 168, 0, 1), DstIP: net.IPv4(192, 168, 0, 2), @@ -324,13 +324,13 @@ func Test_gap_in_response(t *testing.T) { // request and response from tests/pcaps/pgsql_request_response.pcap // select * from test - req_data, err := hex.DecodeString( + reqData, err := hex.DecodeString( "510000001873656c656374202a20" + "66726f6d20746573743b00") assert.Nil(t, err) // response is incomplete - resp_data, err := hex.DecodeString( + respData, err := hex.DecodeString( "5400000042000361000000410900" + "0100000413ffffffffffff0000620000" + "004009000200000413ffffffffffff00" + @@ -342,9 +342,9 @@ func Test_gap_in_response(t *testing.T) { "440000001e0003000000046d65613200") assert.Nil(t, err) - tcptuple := testTcpTuple() - req := protos.Packet{Payload: req_data} - resp := protos.Packet{Payload: resp_data} + tcptuple := testTCPTuple() + req := protos.Packet{Payload: reqData} + resp := protos.Packet{Payload: respData} private := protos.ProtocolData(new(pgsqlPrivateData)) diff --git a/packetbeat/protos/protos.go b/packetbeat/protos/protos.go index 0da020d1077..8675364d99f 100644 --- a/packetbeat/protos/protos.go +++ b/packetbeat/protos/protos.go @@ -57,30 +57,30 @@ func validatePorts(ports []int) error { } type Protocols interface { - BpfFilter(with_vlans bool, with_icmp bool) string - GetTcp(proto Protocol) TcpPlugin - GetUdp(proto Protocol) UdpPlugin + BpfFilter(withVlans bool, withICMP bool) string + GetTCP(proto Protocol) TCPPlugin + GetUDP(proto Protocol) UDPPlugin GetAll() map[Protocol]Plugin - GetAllTcp() map[Protocol]TcpPlugin - GetAllUdp() map[Protocol]UdpPlugin + GetAllTCP() map[Protocol]TCPPlugin + GetAllUDP() map[Protocol]UDPPlugin // Register(proto Protocol, plugin ProtocolPlugin) } // list of protocol plugins type ProtocolsStruct struct { all map[Protocol]Plugin - tcp map[Protocol]TcpPlugin - udp map[Protocol]UdpPlugin + tcp map[Protocol]TCPPlugin + udp map[Protocol]UDPPlugin } // Singleton of Protocols type. var Protos = ProtocolsStruct{ all: map[Protocol]Plugin{}, - tcp: map[Protocol]TcpPlugin{}, - udp: map[Protocol]UdpPlugin{}, + tcp: map[Protocol]TCPPlugin{}, + udp: map[Protocol]UDPPlugin{}, } -func (protocols ProtocolsStruct) Init( +func (s ProtocolsStruct) Init( testMode bool, results publish.Transactions, configs map[string]*common.Config, @@ -118,14 +118,14 @@ func (protocols ProtocolsStruct) Init( return err } - protocols.register(proto, inst) + s.register(proto, inst) } return nil } -func (protocols ProtocolsStruct) GetTcp(proto Protocol) TcpPlugin { - plugin, exists := protocols.tcp[proto] +func (s ProtocolsStruct) GetTCP(proto Protocol) TCPPlugin { + plugin, exists := s.tcp[proto] if !exists { return nil } @@ -133,8 +133,8 @@ func (protocols ProtocolsStruct) GetTcp(proto Protocol) TcpPlugin { return plugin } -func (protocols ProtocolsStruct) GetUdp(proto Protocol) UdpPlugin { - plugin, exists := protocols.udp[proto] +func (s ProtocolsStruct) GetUDP(proto Protocol) UDPPlugin { + plugin, exists := s.udp[proto] if !exists { return nil } @@ -142,26 +142,26 @@ func (protocols ProtocolsStruct) GetUdp(proto Protocol) UdpPlugin { return plugin } -func (protocols ProtocolsStruct) GetAll() map[Protocol]Plugin { - return protocols.all +func (s ProtocolsStruct) GetAll() map[Protocol]Plugin { + return s.all } -func (protocols ProtocolsStruct) GetAllTcp() map[Protocol]TcpPlugin { - return protocols.tcp +func (s ProtocolsStruct) GetAllTCP() map[Protocol]TCPPlugin { + return s.tcp } -func (protocols ProtocolsStruct) GetAllUdp() map[Protocol]UdpPlugin { - return protocols.udp +func (s ProtocolsStruct) GetAllUDP() map[Protocol]UDPPlugin { + return s.udp } // BpfFilter returns a Berkeley Packer Filter (BFP) expression that // will match against packets for the registered protocols. If with_vlans is // true the filter will match against both IEEE 802.1Q VLAN encapsulated // and unencapsulated packets -func (protocols ProtocolsStruct) BpfFilter(with_vlans bool, with_icmp bool) string { +func (s ProtocolsStruct) BpfFilter(withVlans bool, withICMP bool) string { // Sort the protocol IDs so that the return value is consistent. var protos []int - for proto := range protocols.all { + for proto := range s.all { protos = append(protos, int(proto)) } sort.Ints(protos) @@ -169,15 +169,15 @@ func (protocols ProtocolsStruct) BpfFilter(with_vlans bool, with_icmp bool) stri var expressions []string for _, key := range protos { proto := Protocol(key) - plugin := protocols.all[proto] + plugin := s.all[proto] for _, port := range plugin.GetPorts() { hasTCP := false hasUDP := false - if _, present := protocols.tcp[proto]; present { + if _, present := s.tcp[proto]; present { hasTCP = true } - if _, present := protocols.udp[proto]; present { + if _, present := s.udp[proto]; present { hasUDP = true } @@ -194,31 +194,31 @@ func (protocols ProtocolsStruct) BpfFilter(with_vlans bool, with_icmp bool) stri } } - if with_icmp { + if withICMP { expressions = append(expressions, "icmp", "icmp6") } filter := strings.Join(expressions, " or ") - if with_vlans { + if withVlans { filter = fmt.Sprintf("%s or (vlan and (%s))", filter, filter) } return filter } -func (protos ProtocolsStruct) register(proto Protocol, plugin Plugin) { - if _, exists := protos.all[proto]; exists { +func (s ProtocolsStruct) register(proto Protocol, plugin Plugin) { + if _, exists := s.all[proto]; exists { logp.Warn("Protocol (%s) plugin will overwritten by another plugin", proto.String()) } - protos.all[proto] = plugin + s.all[proto] = plugin success := false - if tcp, ok := plugin.(TcpPlugin); ok { - protos.tcp[proto] = tcp + if tcp, ok := plugin.(TCPPlugin); ok { + s.tcp[proto] = tcp success = true } - if udp, ok := plugin.(UdpPlugin); ok { - protos.udp[proto] = udp + if udp, ok := plugin.(UDPPlugin); ok { + s.udp[proto] = udp success = true } if !success { diff --git a/packetbeat/protos/protos_test.go b/packetbeat/protos/protos_test.go index ba7f214fe0f..c0a3f3a3f98 100644 --- a/packetbeat/protos/protos_test.go +++ b/packetbeat/protos/protos_test.go @@ -16,77 +16,77 @@ type TestProtocol struct { Ports []int } -type TcpProtocol TestProtocol +type TCPProtocol TestProtocol -func (proto *TcpProtocol) Init(test_mode bool, results publish.Transactions) error { +func (proto *TCPProtocol) Init(testMode bool, results publish.Transactions) error { return nil } -func (proto *TcpProtocol) GetPorts() []int { +func (proto *TCPProtocol) GetPorts() []int { return proto.Ports } -func (proto *TcpProtocol) Parse(pkt *Packet, tcptuple *common.TCPTuple, +func (proto *TCPProtocol) Parse(pkt *Packet, tcptuple *common.TCPTuple, dir uint8, private ProtocolData) ProtocolData { return private } -func (proto *TcpProtocol) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, +func (proto *TCPProtocol) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, private ProtocolData) ProtocolData { return private } -func (proto *TcpProtocol) GapInStream(tcptuple *common.TCPTuple, dir uint8, +func (proto *TCPProtocol) GapInStream(tcptuple *common.TCPTuple, dir uint8, nbytes int, private ProtocolData) (priv ProtocolData, drop bool) { return private, true } -func (proto *TcpProtocol) ConnectionTimeout() time.Duration { return 0 } +func (proto *TCPProtocol) ConnectionTimeout() time.Duration { return 0 } -type UdpProtocol TestProtocol +type UDPProtocol TestProtocol -func (proto *UdpProtocol) Init(test_mode bool, results publish.Transactions) error { +func (proto *UDPProtocol) Init(testMode bool, results publish.Transactions) error { return nil } -func (proto *UdpProtocol) GetPorts() []int { +func (proto *UDPProtocol) GetPorts() []int { return proto.Ports } -func (proto *UdpProtocol) ParseUdp(pkt *Packet) { +func (proto *UDPProtocol) ParseUDP(pkt *Packet) { return } -type TcpUdpProtocol TestProtocol +type TCPUDPProtocol TestProtocol -func (proto *TcpUdpProtocol) Init(test_mode bool, results publish.Transactions) error { +func (proto *TCPUDPProtocol) Init(testMode bool, results publish.Transactions) error { return nil } -func (proto *TcpUdpProtocol) GetPorts() []int { +func (proto *TCPUDPProtocol) GetPorts() []int { return proto.Ports } -func (proto *TcpUdpProtocol) Parse(pkt *Packet, tcptuple *common.TCPTuple, +func (proto *TCPUDPProtocol) Parse(pkt *Packet, tcptuple *common.TCPTuple, dir uint8, private ProtocolData) ProtocolData { return private } -func (proto *TcpUdpProtocol) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, +func (proto *TCPUDPProtocol) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, private ProtocolData) ProtocolData { return private } -func (proto *TcpUdpProtocol) GapInStream(tcptuple *common.TCPTuple, dir uint8, +func (proto *TCPUDPProtocol) GapInStream(tcptuple *common.TCPTuple, dir uint8, nbytes int, private ProtocolData) (priv ProtocolData, drop bool) { return private, true } -func (proto *TcpUdpProtocol) ParseUdp(pkt *Packet) { +func (proto *TCPUDPProtocol) ParseUDP(pkt *Packet) { return } -func (proto *TcpUdpProtocol) ConnectionTimeout() time.Duration { return 0 } +func (proto *TCPUDPProtocol) ConnectionTimeout() time.Duration { return 0 } func TestProtocolNames(t *testing.T) { assert.Equal(t, "unknown", UnknownProtocol.String()) @@ -96,24 +96,24 @@ func TestProtocolNames(t *testing.T) { func newProtocols() Protocols { p := ProtocolsStruct{} p.all = make(map[Protocol]Plugin) - p.tcp = make(map[Protocol]TcpPlugin) - p.udp = make(map[Protocol]UdpPlugin) + p.tcp = make(map[Protocol]TCPPlugin) + p.udp = make(map[Protocol]UDPPlugin) - tcp := &TcpProtocol{Ports: []int{80}} - udp := &UdpProtocol{Ports: []int{5060}} - tcpUdp := &TcpUdpProtocol{Ports: []int{53}} + tcp := &TCPProtocol{Ports: []int{80}} + udp := &UDPProtocol{Ports: []int{5060}} + tcpUDP := &TCPUDPProtocol{Ports: []int{53}} p.register(1, tcp) p.register(2, udp) - p.register(3, tcpUdp) + p.register(3, tcpUDP) return p } func TestBpfFilterWithoutVlanOnlyIcmp(t *testing.T) { p := ProtocolsStruct{} p.all = make(map[Protocol]Plugin) - p.tcp = make(map[Protocol]TcpPlugin) - p.udp = make(map[Protocol]UdpPlugin) + p.tcp = make(map[Protocol]TCPPlugin) + p.udp = make(map[Protocol]UDPPlugin) filter := p.BpfFilter(false, true) assert.Equal(t, "icmp or icmp6", filter) @@ -153,46 +153,46 @@ func TestGetAll(t *testing.T) { assert.NotNil(t, all[3]) } -func TestGetAllTcp(t *testing.T) { +func TestGetAllTCP(t *testing.T) { p := newProtocols() - tcp := p.GetAllTcp() + tcp := p.GetAllTCP() assert.NotNil(t, tcp[1]) assert.Nil(t, tcp[2]) assert.NotNil(t, tcp[3]) } -func TestGetAllUdp(t *testing.T) { +func TestGetAllUDP(t *testing.T) { p := newProtocols() - udp := p.GetAllUdp() + udp := p.GetAllUDP() assert.Nil(t, udp[1]) assert.NotNil(t, udp[2]) assert.NotNil(t, udp[3]) } -func TestGetTcp(t *testing.T) { +func TestGetTCP(t *testing.T) { p := newProtocols() - tcp := p.GetTcp(1) + tcp := p.GetTCP(1) assert.NotNil(t, tcp) assert.Contains(t, tcp.GetPorts(), 80) - tcp = p.GetTcp(2) + tcp = p.GetTCP(2) assert.Nil(t, tcp) - tcp = p.GetTcp(3) + tcp = p.GetTCP(3) assert.NotNil(t, tcp) assert.Contains(t, tcp.GetPorts(), 53) } -func TestGetUdp(t *testing.T) { +func TestGetUDP(t *testing.T) { p := newProtocols() - udp := p.GetUdp(1) + udp := p.GetUDP(1) assert.Nil(t, udp) - udp = p.GetUdp(2) + udp = p.GetUDP(2) assert.NotNil(t, udp) assert.Contains(t, udp.GetPorts(), 5060) - udp = p.GetUdp(3) + udp = p.GetUDP(3) assert.NotNil(t, udp) assert.Contains(t, udp.GetPorts(), 53) } diff --git a/packetbeat/protos/redis/redis.go b/packetbeat/protos/redis/redis.go index 23be3745e33..250f6041f58 100644 --- a/packetbeat/protos/redis/redis.go +++ b/packetbeat/protos/redis/redis.go @@ -208,7 +208,7 @@ func newStream(ts time.Time, tcptuple *common.TCPTuple) *stream { tcptuple: tcptuple, } s.parser.message = newMessage(ts) - s.Stream.Init(tcp.TCP_MAX_DATA_IN_STREAM) + s.Stream.Init(tcp.TCPMaxDataInStream) return s } @@ -222,7 +222,7 @@ func (redis *Redis) handleRedis( tcptuple *common.TCPTuple, dir uint8, ) { - m.TcpTuple = *tcptuple + m.TCPTuple = *tcptuple m.Direction = dir m.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcptuple.IPPort()) @@ -275,16 +275,16 @@ func (redis *Redis) newTransaction(requ, resp *redisMessage) common.MapStr { } src := &common.Endpoint{ - IP: requ.TcpTuple.SrcIP.String(), - Port: requ.TcpTuple.SrcPort, + IP: requ.TCPTuple.SrcIP.String(), + Port: requ.TCPTuple.SrcPort, Proc: string(requ.CmdlineTuple.Src), } dst := &common.Endpoint{ - IP: requ.TcpTuple.DstIP.String(), - Port: requ.TcpTuple.DstPort, + IP: requ.TCPTuple.DstIP.String(), + Port: requ.TCPTuple.DstPort, Proc: string(requ.CmdlineTuple.Dst), } - if requ.Direction == tcp.TcpDirectionReverse { + if requ.Direction == tcp.TCPDirectionReverse { src, dst = dst, src } diff --git a/packetbeat/protos/redis/redis_parse.go b/packetbeat/protos/redis/redis_parse.go index e5125067c87..8460b6ba96d 100644 --- a/packetbeat/protos/redis/redis_parse.go +++ b/packetbeat/protos/redis/redis_parse.go @@ -17,7 +17,7 @@ type parser struct { type redisMessage struct { Ts time.Time - TcpTuple common.TCPTuple + TCPTuple common.TCPTuple CmdlineTuple *common.CmdlineTuple Direction uint8 @@ -32,9 +32,9 @@ type redisMessage struct { } const ( - START = iota - BULK_ARRAY - SIMPLE_MESSAGE + Start = iota + BulkArray + SimpleMessage ) var ( @@ -247,10 +247,10 @@ func (p *parser) reset() { p.message = nil } -func (parser *parser) parse(buf *streambuf.Buffer) (bool, bool) { +func (p *parser) parse(buf *streambuf.Buffer) (bool, bool) { snapshot := buf.Snapshot() - content, iserror, ok, complete := parser.dispatch(0, buf) + content, iserror, ok, complete := p.dispatch(0, buf) if !ok || !complete { // on error or incomplete message drop all parsing progress, due to // parse not being statefull among multiple calls @@ -259,9 +259,9 @@ func (parser *parser) parse(buf *streambuf.Buffer) (bool, bool) { return ok, complete } - parser.message.IsError = iserror - parser.message.Size = buf.BufferConsumed() - parser.message.Message = content + p.message.IsError = iserror + p.message.Size = buf.BufferConsumed() + p.message.Message = content return true, true } diff --git a/packetbeat/protos/registry.go b/packetbeat/protos/registry.go index 4ca7b365467..83a53838545 100644 --- a/packetbeat/protos/registry.go +++ b/packetbeat/protos/registry.go @@ -19,7 +19,7 @@ type Plugin interface { GetPorts() []int } -type TcpPlugin interface { +type TCPPlugin interface { Plugin // Called when TCP payload data is available for parsing. @@ -40,11 +40,11 @@ type TcpPlugin interface { ConnectionTimeout() time.Duration } -type UdpPlugin interface { +type UDPPlugin interface { Plugin - // ParseUdp is invoked when UDP payload data is available for parsing. - ParseUdp(pkt *Packet) + // ParseUDP is invoked when UDP payload data is available for parsing. + ParseUDP(pkt *Packet) } // Protocol identifier. diff --git a/packetbeat/protos/tcp/tcp.go b/packetbeat/protos/tcp/tcp.go index b7de2247b78..4be70623aa3 100644 --- a/packetbeat/protos/tcp/tcp.go +++ b/packetbeat/protos/tcp/tcp.go @@ -14,14 +14,14 @@ import ( "github.com/tsg/gopacket/layers" ) -const TCP_MAX_DATA_IN_STREAM = 10 * (1 << 20) +const TCPMaxDataInStream = 10 * (1 << 20) const ( - TcpDirectionReverse = 0 - TcpDirectionOriginal = 1 + TCPDirectionReverse = 0 + TCPDirectionOriginal = 1 ) -type Tcp struct { +type TCP struct { id uint32 streams *common.Cache portMap map[uint16]protos.Protocol @@ -49,12 +49,12 @@ var ( isDebug = false ) -func (tcp *Tcp) getId() uint32 { - tcp.id += 1 +func (tcp *TCP) getID() uint32 { + tcp.id++ return tcp.id } -func (tcp *Tcp) decideProtocol(tuple *common.IPPortTuple) protos.Protocol { +func (tcp *TCP) decideProtocol(tuple *common.IPPortTuple) protos.Protocol { protocol, exists := tcp.portMap[tuple.SrcPort] if exists { return protocol @@ -68,20 +68,20 @@ func (tcp *Tcp) decideProtocol(tuple *common.IPPortTuple) protos.Protocol { return protos.UnknownProtocol } -func (tcp *Tcp) findStream(k common.HashableIPPortTuple) *TcpConnection { +func (tcp *TCP) findStream(k common.HashableIPPortTuple) *TCPConnection { v := tcp.streams.Get(k) if v != nil { - return v.(*TcpConnection) + return v.(*TCPConnection) } return nil } -type TcpConnection struct { +type TCPConnection struct { id uint32 tuple *common.IPPortTuple protocol protos.Protocol tcptuple common.TCPTuple - tcp *Tcp + tcp *TCP lastSeq [2]uint32 @@ -89,19 +89,19 @@ type TcpConnection struct { data protos.ProtocolData } -type TcpStream struct { - conn *TcpConnection +type TCPStream struct { + conn *TCPConnection dir uint8 } -func (conn *TcpConnection) String() string { +func (conn *TCPConnection) String() string { return fmt.Sprintf("TcpStream id[%d] tuple[%s] protocol[%s] lastSeq[%d %d]", conn.id, conn.tuple, conn.protocol, conn.lastSeq[0], conn.lastSeq[1]) } -func (stream *TcpStream) addPacket(pkt *protos.Packet, tcphdr *layers.TCP) { +func (stream *TCPStream) addPacket(pkt *protos.Packet, tcphdr *layers.TCP) { conn := stream.conn - mod := conn.tcp.protocols.GetTcp(conn.protocol) + mod := conn.tcp.protocols.GetTCP(conn.protocol) if mod == nil { if isDebug { protocol := conn.protocol @@ -120,14 +120,14 @@ func (stream *TcpStream) addPacket(pkt *protos.Packet, tcphdr *layers.TCP) { } } -func (stream *TcpStream) gapInStream(nbytes int) (drop bool) { +func (stream *TCPStream) gapInStream(nbytes int) (drop bool) { conn := stream.conn - mod := conn.tcp.protocols.GetTcp(conn.protocol) + mod := conn.tcp.protocols.GetTCP(conn.protocol) conn.data, drop = mod.GapInStream(&conn.tcptuple, stream.dir, nbytes, conn.data) return drop } -func (tcp *Tcp) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet) { +func (tcp *TCP) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet) { // This Recover should catch all exceptions in // protocol modules. defer logp.Recover("Process tcp exception") @@ -186,7 +186,7 @@ func (tcp *Tcp) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet // drop application layer connection state and // update stream_id for app layer analysers using stream_id for lookups - conn.id = tcp.getId() + conn.id = tcp.getID() conn.data = nil } @@ -208,23 +208,23 @@ func (tcp *Tcp) Process(id *flows.FlowID, tcphdr *layers.TCP, pkt *protos.Packet stream.addPacket(pkt, tcphdr) } -func (tcp *Tcp) getStream(pkt *protos.Packet) (stream TcpStream, created bool) { +func (tcp *TCP) getStream(pkt *protos.Packet) (stream TCPStream, created bool) { if conn := tcp.findStream(pkt.Tuple.Hashable()); conn != nil { - return TcpStream{conn: conn, dir: TcpDirectionOriginal}, false + return TCPStream{conn: conn, dir: TCPDirectionOriginal}, false } if conn := tcp.findStream(pkt.Tuple.RevHashable()); conn != nil { - return TcpStream{conn: conn, dir: TcpDirectionReverse}, false + return TCPStream{conn: conn, dir: TCPDirectionReverse}, false } protocol := tcp.decideProtocol(&pkt.Tuple) if protocol == protos.UnknownProtocol { // don't follow - return TcpStream{}, false + return TCPStream{}, false } var timeout time.Duration - mod := tcp.protocols.GetTcp(protocol) + mod := tcp.protocols.GetTCP(protocol) if mod != nil { timeout = mod.ConnectionTimeout() } @@ -236,14 +236,14 @@ func (tcp *Tcp) getStream(pkt *protos.Packet) (stream TcpStream, created bool) { t.DstIP.String(), t.DstPort) } - conn := &TcpConnection{ - id: tcp.getId(), + conn := &TCPConnection{ + id: tcp.getID(), tuple: &pkt.Tuple, protocol: protocol, tcp: tcp} conn.tcptuple = common.TCPTupleFromIPPort(conn.tuple, conn.id) tcp.streams.PutWithTimeout(pkt.Tuple.Hashable(), conn, timeout) - return TcpStream{conn: conn, dir: TcpDirectionOriginal}, true + return TCPStream{conn: conn, dir: TCPDirectionOriginal}, true } func tcpSeqCompare(seq1, seq2 uint32) seqCompare { @@ -266,18 +266,18 @@ func tcpSeqBeforeEq(seq1 uint32, seq2 uint32) bool { return int32(seq1-seq2) <= 0 } -func buildPortsMap(plugins map[protos.Protocol]protos.TcpPlugin) (map[uint16]protos.Protocol, error) { +func buildPortsMap(plugins map[protos.Protocol]protos.TCPPlugin) (map[uint16]protos.Protocol, error) { var res = map[uint16]protos.Protocol{} for proto, protoPlugin := range plugins { for _, port := range protoPlugin.GetPorts() { - old_proto, exists := res[uint16(port)] + oldProto, exists := res[uint16(port)] if exists { - if old_proto == proto { + if oldProto == proto { continue } return nil, fmt.Errorf("Duplicate port (%d) exists in %s and %s protocols", - port, old_proto, proto) + port, oldProto, proto) } res[uint16(port)] = proto } @@ -287,15 +287,15 @@ func buildPortsMap(plugins map[protos.Protocol]protos.TcpPlugin) (map[uint16]pro } // Creates and returns a new Tcp. -func NewTcp(p protos.Protocols) (*Tcp, error) { +func NewTCP(p protos.Protocols) (*TCP, error) { isDebug = logp.IsDebug("tcp") - portMap, err := buildPortsMap(p.GetAllTcp()) + portMap, err := buildPortsMap(p.GetAllTCP()) if err != nil { return nil, err } - tcp := &Tcp{ + tcp := &TCP{ protocols: p, portMap: portMap, streams: common.NewCache( diff --git a/packetbeat/protos/tcp/tcp_test.go b/packetbeat/protos/tcp/tcp_test.go index cc837bcbb37..52cf39f45c4 100644 --- a/packetbeat/protos/tcp/tcp_test.go +++ b/packetbeat/protos/tcp/tcp_test.go @@ -18,9 +18,9 @@ import ( // Test Constants const ( - ServerIp = "192.168.0.1" + ServerIP = "192.168.0.1" ServerPort = 12345 - ClientIp = "10.0.0.1" + ClientIP = "10.0.0.1" ) var ( @@ -63,8 +63,8 @@ var _ protos.Plugin = &TestProtocol{ }, } -func (proto *TestProtocol) Init(test_mode bool, results publish.Transactions) error { - return proto.init(test_mode, results) +func (proto *TestProtocol) Init(testMode bool, results publish.Transactions) error { + return proto.init(testMode, results) } func (proto TestProtocol) GetPorts() []int { @@ -93,13 +93,13 @@ func (proto TestProtocol) ConnectionTimeout() time.Duration { func Test_configToPortsMap(t *testing.T) { type configTest struct { - Input map[protos.Protocol]protos.TcpPlugin + Input map[protos.Protocol]protos.TCPPlugin Output map[uint16]protos.Protocol } - config_tests := []configTest{ + configTests := []configTest{ { - Input: map[protos.Protocol]protos.TcpPlugin{ + Input: map[protos.Protocol]protos.TCPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080}}, }, Output: map[uint16]protos.Protocol{ @@ -108,7 +108,7 @@ func Test_configToPortsMap(t *testing.T) { }, }, { - Input: map[protos.Protocol]protos.TcpPlugin{ + Input: map[protos.Protocol]protos.TCPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080}}, mysqlProtocol: &TestProtocol{Ports: []int{3306}}, redisProtocol: &TestProtocol{Ports: []int{6379, 6380}}, @@ -124,7 +124,7 @@ func Test_configToPortsMap(t *testing.T) { // should ignore duplicate ports in the same protocol { - Input: map[protos.Protocol]protos.TcpPlugin{ + Input: map[protos.Protocol]protos.TCPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080, 8080}}, mysqlProtocol: &TestProtocol{Ports: []int{3306}}, }, @@ -136,7 +136,7 @@ func Test_configToPortsMap(t *testing.T) { }, } - for _, test := range config_tests { + for _, test := range configTests { output, err := buildPortsMap(test.Input) assert.Nil(t, err) assert.Equal(t, test.Output, output) @@ -146,14 +146,14 @@ func Test_configToPortsMap(t *testing.T) { func Test_configToPortsMap_negative(t *testing.T) { type errTest struct { - Input map[protos.Protocol]protos.TcpPlugin + Input map[protos.Protocol]protos.TCPPlugin Err string } tests := []errTest{ { // should raise error on duplicate port - Input: map[protos.Protocol]protos.TcpPlugin{ + Input: map[protos.Protocol]protos.TCPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080}}, mysqlProtocol: &TestProtocol{Ports: []int{3306}}, redisProtocol: &TestProtocol{Ports: []int{6379, 6380, 3306}}, @@ -171,18 +171,18 @@ func Test_configToPortsMap_negative(t *testing.T) { // Mock protos.Protocols used for testing the tcp package. type protocols struct { - tcp map[protos.Protocol]protos.TcpPlugin + tcp map[protos.Protocol]protos.TCPPlugin } // Verify protocols implements the protos.Protocols interface. var _ protos.Protocols = &protocols{} -func (p protocols) BpfFilter(with_vlans bool, with_icmp bool) string { return "" } -func (p protocols) GetTcp(proto protos.Protocol) protos.TcpPlugin { return p.tcp[proto] } -func (p protocols) GetUdp(proto protos.Protocol) protos.UdpPlugin { return nil } +func (p protocols) BpfFilter(withVlans bool, withICMP bool) string { return "" } +func (p protocols) GetTCP(proto protos.Protocol) protos.TCPPlugin { return p.tcp[proto] } +func (p protocols) GetUDP(proto protos.Protocol) protos.UDPPlugin { return nil } func (p protocols) GetAll() map[protos.Protocol]protos.Plugin { return nil } -func (p protocols) GetAllTcp() map[protos.Protocol]protos.TcpPlugin { return p.tcp } -func (p protocols) GetAllUdp() map[protos.Protocol]protos.UdpPlugin { return nil } +func (p protocols) GetAllTCP() map[protos.Protocol]protos.TCPPlugin { return p.tcp } +func (p protocols) GetAllUDP() map[protos.Protocol]protos.UDPPlugin { return nil } func (p protocols) Register(proto protos.Protocol, plugin protos.Plugin) { return } func TestTCSeqPayload(t *testing.T) { @@ -273,8 +273,8 @@ func TestTCSeqPayload(t *testing.T) { gap := 0 var state []byte - tcp, err := NewTcp(protocols{ - tcp: map[protos.Protocol]protos.TcpPlugin{ + tcp, err := NewTCP(protocols{ + tcp: map[protos.Protocol]protos.TCPPlugin{ httpProtocol: &TestProtocol{ Ports: []int{ServerPort}, gap: makeCountGaps(nil, &gap), @@ -287,8 +287,8 @@ func TestTCSeqPayload(t *testing.T) { } addr := common.NewIPPortTuple(4, - net.ParseIP(ServerIp), ServerPort, - net.ParseIP(ClientIp), uint16(rand.Intn(65535))) + net.ParseIP(ServerIP), ServerPort, + net.ParseIP(ClientIP), uint16(rand.Intn(65535))) for _, segment := range test.segments { hdr := &layers.TCP{Seq: segment.seq} @@ -315,9 +315,9 @@ func TestTCSeqPayload(t *testing.T) { func BenchmarkParallelProcess(b *testing.B) { rand.Seed(18) p := protocols{} - p.tcp = make(map[protos.Protocol]protos.TcpPlugin) + p.tcp = make(map[protos.Protocol]protos.TCPPlugin) p.tcp[1] = &TestProtocol{Ports: []int{ServerPort}} - tcp, _ := NewTcp(p) + tcp, _ := NewTCP(p) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { @@ -325,8 +325,8 @@ func BenchmarkParallelProcess(b *testing.B) { pkt := &protos.Packet{ Ts: time.Now(), Tuple: common.NewIPPortTuple(4, - net.ParseIP(ServerIp), ServerPort, - net.ParseIP(ClientIp), uint16(rand.Intn(65535))), + net.ParseIP(ServerIP), ServerPort, + net.ParseIP(ClientIP), uint16(rand.Intn(65535))), Payload: []byte{1, 2, 3, 4}, } tcp.Process(nil, &layers.TCP{}, pkt) diff --git a/packetbeat/protos/thrift/thrift.go b/packetbeat/protos/thrift/thrift.go index 3e9396c6f31..c21ccc4fdd7 100644 --- a/packetbeat/protos/thrift/thrift.go +++ b/packetbeat/protos/thrift/thrift.go @@ -23,7 +23,7 @@ import ( type ThriftMessage struct { Ts time.Time - TcpTuple common.TCPTuple + TCPTuple common.TCPTuple CmdlineTuple *common.CmdlineTuple Direction uint8 @@ -36,7 +36,7 @@ type ThriftMessage struct { Version uint32 Type uint32 Method string - SeqId uint32 + SeqID uint32 Params string ReturnValue string Exceptions string @@ -47,7 +47,7 @@ type ThriftMessage struct { type ThriftField struct { Type byte - Id uint16 + ID uint16 Value string } @@ -144,8 +144,8 @@ type Thrift struct { DropAfterNStructFields int CaptureReply bool ObfuscateStrings bool - Send_request bool - Send_response bool + SendRequest bool + SendResponse bool TransportType byte ProtocolType byte @@ -229,8 +229,8 @@ func (thrift *Thrift) InitDefaults() { thrift.ProtocolType = ThriftTBinary thrift.CaptureReply = true thrift.ObfuscateStrings = false - thrift.Send_request = false - thrift.Send_response = false + thrift.SendRequest = false + thrift.SendResponse = false thrift.transactionTimeout = protos.DefaultTransactionExpiration } @@ -238,8 +238,8 @@ func (thrift *Thrift) readConfig(config *thriftConfig) error { var err error thrift.Ports = config.Ports - thrift.Send_request = config.SendRequest - thrift.Send_response = config.SendResponse + thrift.SendRequest = config.SendRequest + thrift.SendResponse = config.SendResponse thrift.StringMaxSize = config.StringMaxSize thrift.CollectionMaxSize = config.CollectionMaxSize @@ -279,7 +279,7 @@ func (thrift *Thrift) GetPorts() []int { func (m *ThriftMessage) String() string { return fmt.Sprintf("IsRequest: %t Type: %d Method: %s SeqId: %d Params: %s ReturnValue: %s Exceptions: %s", - m.IsRequest, m.Type, m.Method, m.SeqId, m.Params, m.ReturnValue, m.Exceptions) + m.IsRequest, m.Type, m.Method, m.SeqID, m.Params, m.ReturnValue, m.Exceptions) } func (thrift *Thrift) readMessageBegin(s *ThriftStream) (bool, bool) { @@ -323,7 +323,7 @@ func (thrift *Thrift) readMessageBegin(s *ThriftStream) (bool, bool) { logp.Debug("thriftdetailed", "Less then 4 bytes remaining") return true, false // ok, not complete } - m.SeqId = common.BytesNtohl(s.data[offset : offset+4]) + m.SeqID = common.BytesNtohl(s.data[offset : offset+4]) s.parseOffset = offset + 4 } else { // no version mode @@ -347,8 +347,8 @@ func (thrift *Thrift) readMessageBegin(s *ThriftStream) (bool, bool) { } m.Type = uint32(s.data[offset]) - offset += 1 - m.SeqId = common.BytesNtohl(s.data[offset : offset+4]) + offset++ + m.SeqID = common.BytesNtohl(s.data[offset : offset+4]) s.parseOffset = offset + 4 } @@ -477,11 +477,11 @@ func (thrift *Thrift) readListOrSet(data []byte) (value string, ok bool, complet if len(data) < 5 { return "", true, false, 0 } - type_ := data[0] + typ := data[0] - funcReader, typeFound := thrift.funcReadersByType(type_) + funcReader, typeFound := thrift.funcReadersByType(typ) if !typeFound { - logp.Debug("thrift", "Field type %d not known", type_) + logp.Debug("thrift", "Field type %d not known", typ) return "", false, false, 0 } @@ -534,18 +534,18 @@ func (thrift *Thrift) readMap(data []byte) (value string, ok bool, complete bool if len(data) < 6 { return "", true, false, 0 } - type_key := data[0] - type_value := data[1] + typeKey := data[0] + typeValue := data[1] - funcReaderKey, typeFound := thrift.funcReadersByType(type_key) + funcReaderKey, typeFound := thrift.funcReadersByType(typeKey) if !typeFound { - logp.Debug("thrift", "Field type %d not known", type_key) + logp.Debug("thrift", "Field type %d not known", typeKey) return "", false, false, 0 } - funcReaderValue, typeFound := thrift.funcReadersByType(type_value) + funcReaderValue, typeFound := thrift.funcReadersByType(typeValue) if !typeFound { - logp.Debug("thrift", "Field type %d not known", type_value) + logp.Debug("thrift", "Field type %d not known", typeValue) return "", false, false, 0 } @@ -609,7 +609,7 @@ func (thrift *Thrift) readStruct(data []byte) (value string, ok bool, complete b } field.Type = byte(data[offset]) - offset += 1 + offset++ if field.Type == ThriftTypeStop { return thrift.formatStruct(fields, false, []*string{}), true, true, offset } @@ -618,7 +618,7 @@ func (thrift *Thrift) readStruct(data []byte) (value string, ok bool, complete b return "", true, false, 0 // not complete } - field.Id = common.BytesNtohs(data[offset : offset+2]) + field.ID = common.BytesNtohs(data[offset : offset+2]) offset += 2 funcReader, typeFound := thrift.funcReadersByType(field.Type) @@ -640,7 +640,7 @@ func (thrift *Thrift) readStruct(data []byte) (value string, ok bool, complete b } } -func (thrift *Thrift) formatStruct(fields []ThriftField, resolve_names bool, +func (thrift *Thrift) formatStruct(fields []ThriftField, resolveNames bool, fieldnames []*string) string { toJoin := []string{} @@ -649,18 +649,18 @@ func (thrift *Thrift) formatStruct(fields []ThriftField, resolve_names bool, toJoin = append(toJoin, "...") break } - if resolve_names && int(field.Id) < len(fieldnames) && fieldnames[field.Id] != nil { - toJoin = append(toJoin, *fieldnames[field.Id]+": "+field.Value) + if resolveNames && int(field.ID) < len(fieldnames) && fieldnames[field.ID] != nil { + toJoin = append(toJoin, *fieldnames[field.ID]+": "+field.Value) } else { - toJoin = append(toJoin, strconv.Itoa(int(field.Id))+": "+field.Value) + toJoin = append(toJoin, strconv.Itoa(int(field.ID))+": "+field.Value) } } return "(" + strings.Join(toJoin, ", ") + ")" } // Dictionary wrapped in a function to avoid "initialization loop" -func (thrift *Thrift) funcReadersByType(type_ byte) (func_ ThriftFieldReader, exists bool) { - switch type_ { +func (thrift *Thrift) funcReadersByType(typ byte) (fn ThriftFieldReader, exists bool) { + switch typ { case ThriftTypeBool: return thrift.readBool, true case ThriftTypeByte: @@ -707,7 +707,7 @@ func (thrift *Thrift) readField(s *ThriftStream) (ok bool, complete bool, field if len(s.data[offset:]) < 2 { return true, false, nil // ok, not complete } - field.Id = common.BytesNtohs(s.data[offset : offset+2]) + field.ID = common.BytesNtohs(s.data[offset : offset+2]) offset += 2 funcReader, typeFound := thrift.funcReadersByType(field.Type) @@ -775,7 +775,7 @@ func (thrift *Thrift) messageParser(s *ThriftStream) (bool, bool) { } if complete { // done - var method *ThriftIdlMethod = nil + var method *ThriftIdlMethod if thrift.Idl != nil { method = thrift.Idl.FindMethod(m.Method) } @@ -793,7 +793,7 @@ func (thrift *Thrift) messageParser(s *ThriftStream) (bool, bool) { } if len(m.fields) > 0 { field := m.fields[0] - if field.Id == 0 { + if field.ID == 0 { m.ReturnValue = field.Value m.Exceptions = "" } else { @@ -859,15 +859,15 @@ type thriftPrivateData struct { func (thrift *Thrift) messageComplete(tcptuple *common.TCPTuple, dir uint8, stream *ThriftStream, priv *thriftPrivateData) { - var flush bool = false + flush := false if stream.message.IsRequest { logp.Debug("thrift", "Thrift request message: %s", stream.message.Method) if !thrift.CaptureReply { // enable the stream in the other direction to get the reply - stream_rev := priv.Data[1-dir] - if stream_rev != nil { - stream_rev.skipInput = false + streamRev := priv.Data[1-dir] + if streamRev != nil { + streamRev.skipInput = false } } } else { @@ -882,7 +882,7 @@ func (thrift *Thrift) messageComplete(tcptuple *common.TCPTuple, dir uint8, } // all ok, go to next level - stream.message.TcpTuple = *tcptuple + stream.message.TCPTuple = *tcptuple stream.message.Direction = dir stream.message.CmdlineTuple = procs.ProcWatcher.FindProcessesTuple(tcptuple.IPPort()) if stream.message.FrameSize == 0 { @@ -929,7 +929,7 @@ func (thrift *Thrift) Parse(pkt *protos.Packet, tcptuple *common.TCPTuple, dir u } // concatenate bytes stream.data = append(stream.data, pkt.Payload...) - if len(stream.data) > tcp.TCP_MAX_DATA_IN_STREAM { + if len(stream.data) > tcp.TCPMaxDataInStream { logp.Debug("thrift", "Stream data too large, dropping TCP stream") priv.Data[dir] = nil return priv @@ -974,7 +974,7 @@ func (thrift *Thrift) handleThrift(msg *ThriftMessage) { } func (thrift *Thrift) receivedRequest(msg *ThriftMessage) { - tuple := msg.TcpTuple + tuple := msg.TCPTuple trans := thrift.getTransaction(tuple.Hashable()) if trans != nil { @@ -993,16 +993,16 @@ func (thrift *Thrift) receivedRequest(msg *ThriftMessage) { trans.Ts = int64(trans.ts.UnixNano() / 1000) trans.JsTs = msg.Ts trans.Src = common.Endpoint{ - IP: msg.TcpTuple.SrcIP.String(), - Port: msg.TcpTuple.SrcPort, + IP: msg.TCPTuple.SrcIP.String(), + Port: msg.TCPTuple.SrcPort, Proc: string(msg.CmdlineTuple.Src), } trans.Dst = common.Endpoint{ - IP: msg.TcpTuple.DstIP.String(), - Port: msg.TcpTuple.DstPort, + IP: msg.TCPTuple.DstIP.String(), + Port: msg.TCPTuple.DstPort, Proc: string(msg.CmdlineTuple.Dst), } - if msg.Direction == tcp.TcpDirectionReverse { + if msg.Direction == tcp.TCPDirectionReverse { trans.Src, trans.Dst = trans.Dst, trans.Src } @@ -1013,7 +1013,7 @@ func (thrift *Thrift) receivedRequest(msg *ThriftMessage) { func (thrift *Thrift) receivedReply(msg *ThriftMessage) { // we need to search the request first. - tuple := msg.TcpTuple + tuple := msg.TCPTuple trans := thrift.getTransaction(tuple.Hashable()) if trans == nil { @@ -1111,7 +1111,7 @@ func (thrift *Thrift) publishTransactions() { thriftmap["service"] = t.Request.Service } - if thrift.Send_request { + if thrift.SendRequest { event["request"] = fmt.Sprintf("%s%s", t.Request.Method, t.Request.Params) } @@ -1124,7 +1124,7 @@ func (thrift *Thrift) publishTransactions() { } event["bytes_out"] = uint64(t.Reply.FrameSize) - if thrift.Send_response { + if thrift.SendResponse { if !t.Reply.HasException { event["response"] = t.Reply.ReturnValue } else { diff --git a/packetbeat/protos/thrift/thrift_idl.go b/packetbeat/protos/thrift/thrift_idl.go index 9abd20ed934..49fbf20b1c4 100644 --- a/packetbeat/protos/thrift/thrift_idl.go +++ b/packetbeat/protos/thrift/thrift_idl.go @@ -20,7 +20,7 @@ type ThriftIdl struct { MethodsByName map[string]*ThriftIdlMethod } -func fieldsToArrayById(fields []*parser.Field) []*string { +func fieldsToArrayByID(fields []*parser.Field) []*string { if len(fields) == 0 { return []*string{} } @@ -43,11 +43,11 @@ func fieldsToArrayById(fields []*parser.Field) []*string { return output } -func BuildMethodsMap(thrift_files map[string]parser.Thrift) map[string]*ThriftIdlMethod { +func BuildMethodsMap(thriftFiles map[string]parser.Thrift) map[string]*ThriftIdlMethod { output := make(map[string]*ThriftIdlMethod) - for _, thrift := range thrift_files { + for _, thrift := range thriftFiles { for _, service := range thrift.Services { for _, method := range service.Methods { if _, exists := output[method.Name]; exists { @@ -57,8 +57,8 @@ func BuildMethodsMap(thrift_files map[string]parser.Thrift) map[string]*ThriftId output[method.Name] = &ThriftIdlMethod{ Service: service, Method: method, - Params: fieldsToArrayById(method.Arguments), - Exceptions: fieldsToArrayById(method.Exceptions), + Params: fieldsToArrayByID(method.Arguments), + Exceptions: fieldsToArrayByID(method.Exceptions), } } } @@ -73,12 +73,12 @@ func ReadFiles(files []string) (map[string]parser.Thrift, error) { thriftParser := parser.Parser{} for _, file := range files { - files_map, _, err := thriftParser.ParseFile(file) + filesMap, _, err := thriftParser.ParseFile(file) if err != nil { return output, fmt.Errorf("Error parsing Thrift IDL file %s: %s", file, err) } - for fname, parsedFile := range files_map { + for fname, parsedFile := range filesMap { output[fname] = *parsedFile } } @@ -90,17 +90,17 @@ func (thriftidl *ThriftIdl) FindMethod(name string) *ThriftIdlMethod { return thriftidl.MethodsByName[name] } -func NewThriftIdl(idl_files []string) (*ThriftIdl, error) { +func NewThriftIdl(idlFiles []string) (*ThriftIdl, error) { - if len(idl_files) == 0 { + if len(idlFiles) == 0 { return nil, nil } - thrift_files, err := ReadFiles(idl_files) + thriftFiles, err := ReadFiles(idlFiles) if err != nil { return nil, err } return &ThriftIdl{ - MethodsByName: BuildMethodsMap(thrift_files), + MethodsByName: BuildMethodsMap(thriftFiles), }, nil } diff --git a/packetbeat/protos/thrift/thrift_idl_test.go b/packetbeat/protos/thrift/thrift_idl_test.go index c4297261588..afb383b7ba4 100644 --- a/packetbeat/protos/thrift/thrift_idl_test.go +++ b/packetbeat/protos/thrift/thrift_idl_test.go @@ -36,11 +36,11 @@ service Test { } `) - methods_map := idl.MethodsByName - if len(methods_map) == 0 { + methodsMap := idl.MethodsByName + if len(methodsMap) == 0 { t.Error("Empty methods_map") } - m, exists := methods_map["add"] + m, exists := methodsMap["add"] if !exists || m.Service == nil || m.Method == nil || m.Service.Name != "Test" || m.Method.Name != "add" { diff --git a/packetbeat/protos/thrift/thrift_test.go b/packetbeat/protos/thrift/thrift_test.go index d7357ea9e87..2cc7e16f83a 100644 --- a/packetbeat/protos/thrift/thrift_test.go +++ b/packetbeat/protos/thrift/thrift_test.go @@ -79,8 +79,8 @@ func TestThrift_readMessageBegin(t *testing.T) { t.Errorf("Bad result: %v %v", ok, complete) } if m.Method != "ping" || m.Type != ThriftMsgTypeCall || - m.SeqId != 0 || m.Version != ThriftVersion1 { - t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqId, m.Version) + m.SeqID != 0 || m.Version != ThriftVersion1 { + t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqID, m.Version) } data, _ = hex.DecodeString("800100010000000470696e6700000000") @@ -91,8 +91,8 @@ func TestThrift_readMessageBegin(t *testing.T) { t.Errorf("Bad result: %v %v", ok, complete) } if m.Method != "ping" || m.Type != ThriftMsgTypeCall || - m.SeqId != 0 || m.Version != ThriftVersion1 { - t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqId, m.Version) + m.SeqID != 0 || m.Version != ThriftVersion1 { + t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqID, m.Version) } data, _ = hex.DecodeString("800100010000000470696e6700000001") @@ -103,8 +103,8 @@ func TestThrift_readMessageBegin(t *testing.T) { t.Errorf("Bad result: %v %v", ok, complete) } if m.Method != "ping" || m.Type != ThriftMsgTypeCall || - m.SeqId != 1 || m.Version != ThriftVersion1 { - t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqId, m.Version) + m.SeqID != 1 || m.Version != ThriftVersion1 { + t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqID, m.Version) } data, _ = hex.DecodeString("800100010000000570696e6700000001") @@ -131,8 +131,8 @@ func TestThrift_readMessageBegin(t *testing.T) { t.Errorf("Bad result: %v %v", ok, complete) } if m.Method != "ping" || m.Type != ThriftMsgTypeCall || - m.SeqId != 0 || m.Version != 0 { - t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqId, m.Version) + m.SeqID != 0 || m.Version != 0 { + t.Errorf("Bad values: %v %v %v %v", m.Method, m.Type, m.SeqID, m.Version) } data, _ = hex.DecodeString("0000000570696e670100000000") @@ -166,9 +166,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeI32 || field.Value != "1" || + if field.ID != 1 || field.Type != ThriftTypeI32 || field.Value != "1" || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -178,9 +178,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeI16 || field.Value != "1" || + if field.ID != 1 || field.Type != ThriftTypeI16 || field.Value != "1" || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -190,9 +190,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeI64 || field.Value != "1" || + if field.ID != 1 || field.Type != ThriftTypeI64 || field.Value != "1" || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -202,9 +202,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeDouble || field.Value != "1.2" || + if field.ID != 1 || field.Type != ThriftTypeDouble || field.Value != "1.2" || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value, stream.parseOffset) + t.Error("Bad values:", field.ID, field.Type, field.Value, stream.parseOffset) } } @@ -214,9 +214,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeBool || field.Value != "true" || + if field.ID != 1 || field.Type != ThriftTypeBool || field.Value != "true" || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -233,8 +233,8 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeString || field.Value != `"hello"` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + if field.ID != 1 || field.Type != ThriftTypeString || field.Value != `"hello"` { + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -245,9 +245,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeString || field.Value != `"hel..."` || + if field.ID != 1 || field.Type != ThriftTypeString || field.Value != `"hel..."` || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } thrift.StringMaxSize = _old @@ -258,9 +258,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeList || + if field.ID != 1 || field.Type != ThriftTypeList || field.Value != "[1, 2, 3]" { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -271,10 +271,10 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeList || + if field.ID != 1 || field.Type != ThriftTypeList || stream.parseOffset != len(stream.data) || field.Value != "[1, ...]" { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } thrift.CollectionMaxSize = _old @@ -285,9 +285,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeSet || + if field.ID != 1 || field.Type != ThriftTypeSet || field.Value != "{1, 2, 3}" { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -298,10 +298,10 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeSet || + if field.ID != 1 || field.Type != ThriftTypeSet || stream.parseOffset != len(stream.data) || field.Value != "{1, 2, ...}" { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } thrift.CollectionMaxSize = _old @@ -312,10 +312,10 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeMap || + if field.ID != 1 || field.Type != ThriftTypeMap || field.Value != `{"a": 1, "c": 3, "b": 2}` || stream.parseOffset != len(stream.data) { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -326,9 +326,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeMap || + if field.ID != 1 || field.Type != ThriftTypeMap || field.Value != `{"a": 1, "c": 3, ...}` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } thrift.CollectionMaxSize = _old @@ -339,8 +339,8 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeString || field.Value != `"h\x10llo"` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + if field.ID != 1 || field.Type != ThriftTypeString || field.Value != `"h\x10llo"` { + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -350,9 +350,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeStruct || + if field.ID != 1 || field.Type != ThriftTypeStruct || field.Value != `(1: 1, 2: 0, 3: 4)` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -363,9 +363,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeStruct || + if field.ID != 1 || field.Type != ThriftTypeStruct || field.Value != `(1: 1, 2: 0, ...)` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } thrift.CollectionMaxSize = _old @@ -376,9 +376,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeStruct || + if field.ID != 1 || field.Type != ThriftTypeStruct || field.Value != `(1: 1, 2: "hello")` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -388,9 +388,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeStruct || + if field.ID != 1 || field.Type != ThriftTypeStruct || field.Value != `(1: 1, 2: (1: 1, 2: "hello"))` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -400,9 +400,9 @@ func TestThrift_thriftReadField(t *testing.T) { if !ok || complete || field == nil { t.Error("Bad result:", ok, complete, field) } else { - if field.Id != 1 || field.Type != ThriftTypeStruct || + if field.ID != 1 || field.Type != ThriftTypeStruct || field.Value != `(1: 1, 2: {1, 2, 3})` { - t.Error("Bad values:", field.Id, field.Type, field.Value) + t.Error("Bad values:", field.ID, field.Type, field.Value) } } @@ -430,7 +430,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if !m.IsRequest || m.Method != "ping" || - m.SeqId != 0 || m.Type != ThriftMsgTypeCall || m.Params != "()" { + m.SeqID != 0 || m.Type != ThriftMsgTypeCall || m.Params != "()" { t.Error("Bad result:", stream.message) } @@ -443,7 +443,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if !m.IsRequest || m.Method != "add16" || - m.SeqId != 0 || m.Type != ThriftMsgTypeCall || + m.SeqID != 0 || m.Type != ThriftMsgTypeCall || m.Params != "(1: 1, 2: 1)" { t.Error("Bad result:", stream.message) } @@ -457,7 +457,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if !m.IsRequest || m.Method != "calculate" || - m.SeqId != 0 || m.Type != ThriftMsgTypeCall || + m.SeqID != 0 || m.Type != ThriftMsgTypeCall || m.Params != "(1: 1, 2: (1: 1, 2: 0, 3: 4))" { t.Error("Bad result:", stream.message) } @@ -470,7 +470,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "add16" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || m.ReturnValue != "2" { t.Error("Bad result:", stream.message) } @@ -484,7 +484,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "echo_string" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || m.ReturnValue != `"hello"` { t.Error("Bad result:", stream.message) } @@ -498,7 +498,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "echo_list" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || m.ReturnValue != `[1, 2, 3]` { t.Error("Bad result:", stream.message) } @@ -512,7 +512,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "echo_map" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || m.ReturnValue != `{"a": 1, "c": 3, "b": 2}` { t.Error("Bad result:", stream.message) } @@ -525,7 +525,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "calculate" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || m.HasException || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || m.HasException || m.ReturnValue != `5` { t.Error("Bad result:", stream.message) } @@ -539,7 +539,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "calculate" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || !m.HasException || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || !m.HasException || m.Exceptions != `(1: (1: 4, 2: "Cannot divide by 0"))` { t.Error("Bad result:", stream.message) } @@ -553,7 +553,7 @@ func TestThrift_thriftMessageParser(t *testing.T) { t.Error("Bad result:", ok, complete) } if m.IsRequest || m.Method != "echo_binary" || - m.SeqId != 0 || m.Type != ThriftMsgTypeReply || m.HasException || + m.SeqID != 0 || m.Type != ThriftMsgTypeReply || m.HasException || m.ReturnValue != `ab0c1d281a000000` { t.Error("Bad result:", stream.message) } @@ -584,7 +584,7 @@ func expectThriftTransaction(t *testing.T, thrift *Thrift) *ThriftTransaction { return nil } -func testTcpTuple() *common.TCPTuple { +func testTCPTuple() *common.TCPTuple { t := &common.TCPTuple{ IPLength: 4, SrcIP: net.IPv4(192, 168, 0, 1), DstIP: net.IPv4(192, 168, 0, 2), @@ -603,7 +603,7 @@ func TestThrift_ParseSimpleTBinary(t *testing.T) { thrift := thriftForTests() thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "800100010000000470696e670000000000") repl := createTestPacket(t, "800100020000000470696e670000000000") @@ -632,7 +632,7 @@ func TestThrift_ParseSimpleTFramed(t *testing.T) { thrift.TransportType = ThriftTFramed thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "0000001e8001000100000003616464000000000800010000000108"+ "00020000000100") @@ -663,19 +663,19 @@ func TestThrift_ParseSimpleTFramedSplit(t *testing.T) { thrift.TransportType = ThriftTFramed thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - req_half1 := createTestPacket(t, "0000001e8001000100") - req_half2 := createTestPacket(t, "000003616464000000000800010000000108"+ + reqHalf1 := createTestPacket(t, "0000001e8001000100") + reqHalf2 := createTestPacket(t, "000003616464000000000800010000000108"+ "00020000000100") - repl_half1 := createTestPacket(t, "000000178001000200000003") - repl_half2 := createTestPacket(t, "616464000000000800000000000200") + replHalf1 := createTestPacket(t, "000000178001000200000003") + replHalf2 := createTestPacket(t, "616464000000000800000000000200") var private thriftPrivateData - private = thrift.Parse(req_half1, tcptuple, 0, private).(thriftPrivateData) - private = thrift.Parse(req_half2, tcptuple, 0, private).(thriftPrivateData) - private = thrift.Parse(repl_half1, tcptuple, 1, private).(thriftPrivateData) - thrift.Parse(repl_half2, tcptuple, 1, private) + private = thrift.Parse(reqHalf1, tcptuple, 0, private).(thriftPrivateData) + private = thrift.Parse(reqHalf2, tcptuple, 0, private).(thriftPrivateData) + private = thrift.Parse(replHalf1, tcptuple, 1, private).(thriftPrivateData) + thrift.Parse(replHalf2, tcptuple, 1, private) trans := expectThriftTransaction(t, thrift) if trans.Request.Method != "add" || @@ -697,19 +697,19 @@ func TestThrift_ParseSimpleTFramedSplitInterleaved(t *testing.T) { thrift.TransportType = ThriftTFramed thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - req_half1 := createTestPacket(t, "0000001e8001000100") - repl_half1 := createTestPacket(t, "000000178001000200000003") - req_half2 := createTestPacket(t, "000003616464000000000800010000000108"+ + reqHalf1 := createTestPacket(t, "0000001e8001000100") + replHalf1 := createTestPacket(t, "000000178001000200000003") + reqHalf2 := createTestPacket(t, "000003616464000000000800010000000108"+ "00020000000100") - repl_half2 := createTestPacket(t, "616464000000000800000000000200") + replHalf2 := createTestPacket(t, "616464000000000800000000000200") var private thriftPrivateData - private = thrift.Parse(req_half1, tcptuple, 0, private).(thriftPrivateData) - private = thrift.Parse(req_half2, tcptuple, 0, private).(thriftPrivateData) - private = thrift.Parse(repl_half1, tcptuple, 1, private).(thriftPrivateData) - thrift.Parse(repl_half2, tcptuple, 1, private) + private = thrift.Parse(reqHalf1, tcptuple, 0, private).(thriftPrivateData) + private = thrift.Parse(reqHalf2, tcptuple, 0, private).(thriftPrivateData) + private = thrift.Parse(replHalf1, tcptuple, 1, private).(thriftPrivateData) + thrift.Parse(replHalf2, tcptuple, 1, private) trans := expectThriftTransaction(t, thrift) if trans.Request.Method != "add" || @@ -730,7 +730,7 @@ func TestThrift_Parse_OneWayCallWithFin(t *testing.T) { thrift.TransportType = ThriftTFramed thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "0000001080010001000000037a69700000000000") @@ -757,7 +757,7 @@ func TestThrift_Parse_OneWayCall2Requests(t *testing.T) { thrift.TransportType = ThriftTFramed thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() reqzip := createTestPacket(t, "0000001080010001000000037a69700000000000") req := createTestPacket(t, "0000001e8001000100000003616464000000000800010000000108"+ @@ -796,7 +796,7 @@ func TestThrift_Parse_RequestReplyMismatch(t *testing.T) { thrift.TransportType = ThriftTFramed thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() reqzip := createTestPacket(t, "0000001080010001000000037a69700000000000") repladd := createTestPacket(t, "000000178001000200000003616464000000000800000000000200") @@ -825,7 +825,7 @@ func TestThrift_ParseSimpleTFramed_NoReply(t *testing.T) { thrift.CaptureReply = false thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "0000001e8001000100000003616464000000000800010000000108"+ "00020000000100") @@ -867,7 +867,7 @@ func TestThrift_ParseObfuscateStrings(t *testing.T) { thrift.ObfuscateStrings = true thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "00000024800100010000000b6563686f5f737472696e670000"+ "00000b00010000000568656c6c6f00") @@ -898,13 +898,13 @@ func BenchmarkThrift_ParseSkipReply(b *testing.B) { thrift.PublishQueue = make(chan *ThriftTransaction, 10) thrift.CaptureReply = false - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() - data_req, _ := hex.DecodeString("0000001e8001000100000003616464000000000800010000000108" + + dataReq, _ := hex.DecodeString("0000001e8001000100000003616464000000000800010000000108" + "00020000000100") - req := &protos.Packet{Payload: data_req} - data_repl, _ := hex.DecodeString("000000178001000200000003616464000000000800000000000200") - repl := &protos.Packet{Payload: data_repl} + req := &protos.Packet{Payload: dataReq} + dataRepl, _ := hex.DecodeString("000000178001000200000003616464000000000800000000000200") + repl := &protos.Packet{Payload: dataRepl} var private thriftPrivateData for n := 0; n < b.N; n++ { @@ -942,7 +942,7 @@ func TestThrift_Parse_Exception(t *testing.T) { thrift := thriftForTests() thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "800100010000000963616c63756c6174650000000008000"+ "1000000010c00020800010000000108000200000000080003000000040000") @@ -979,7 +979,7 @@ func TestThrift_ParametersNames(t *testing.T) { thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "0000001e8001000100000003616464000000000800010000000108"+ "00020000000100") @@ -1019,7 +1019,7 @@ func TestThrift_ExceptionName(t *testing.T) { thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "800100010000000963616c63756c6174650000000008000"+ "1000000010c00020800010000000108000200000000080003000000040000") @@ -1061,7 +1061,7 @@ func TestThrift_GapInStream_response(t *testing.T) { thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() req := createTestPacket(t, "800100010000000963616c63756c6174650000000008000"+ "1000000010c00020800010000000108000200000000080003000000040000") @@ -1112,7 +1112,7 @@ func TestThrift_GapInStream_request(t *testing.T) { thrift.PublishQueue = make(chan *ThriftTransaction, 10) - tcptuple := testTcpTuple() + tcptuple := testTCPTuple() // missing bytes from the request req := createTestPacket(t, "800100010000000963616c63756c6174") diff --git a/packetbeat/protos/udp/udp.go b/packetbeat/protos/udp/udp.go index 49db954cfd9..9a117adc7ce 100644 --- a/packetbeat/protos/udp/udp.go +++ b/packetbeat/protos/udp/udp.go @@ -10,7 +10,7 @@ import ( "github.com/elastic/beats/packetbeat/protos" ) -type Udp struct { +type UDP struct { protocols protos.Protocols portMap map[uint16]protos.Protocol } @@ -22,7 +22,7 @@ type Processor interface { // decideProtocol determines the protocol based on the source and destination // ports. If the protocol cannot be determined then protos.UnknownProtocol // is returned. -func (udp *Udp) decideProtocol(tuple *common.IPPortTuple) protos.Protocol { +func (udp *UDP) decideProtocol(tuple *common.IPPortTuple) protos.Protocol { protocol, exists := udp.portMap[tuple.SrcPort] if exists { return protocol @@ -38,16 +38,16 @@ func (udp *Udp) decideProtocol(tuple *common.IPPortTuple) protos.Protocol { // Process handles UDP packets that have been received. It attempts to // determine the protocol type and then invokes the associated -// UdpProtocolPlugin's ParseUdp method. If the protocol cannot be determined +// UdpProtocolPlugin's ParseUDP method. If the protocol cannot be determined // or the payload is empty then the method is a noop. -func (udp *Udp) Process(id *flows.FlowID, pkt *protos.Packet) { +func (udp *UDP) Process(id *flows.FlowID, pkt *protos.Packet) { protocol := udp.decideProtocol(&pkt.Tuple) if protocol == protos.UnknownProtocol { logp.Debug("udp", "unknown protocol") return } - plugin := udp.protocols.GetUdp(protocol) + plugin := udp.protocols.GetUDP(protocol) if plugin == nil { logp.Debug("udp", "Ignoring protocol for which we have no module loaded: %s", protocol) return @@ -56,25 +56,25 @@ func (udp *Udp) Process(id *flows.FlowID, pkt *protos.Packet) { if len(pkt.Payload) > 0 { logp.Debug("udp", "Parsing packet from %v of length %d.", pkt.Tuple.String(), len(pkt.Payload)) - plugin.ParseUdp(pkt) + plugin.ParseUDP(pkt) } } // buildPortsMap creates a mapping of port numbers to protocol identifiers. If // any two UdpProtocolPlugins operate on the same port number then an error // will be returned. -func buildPortsMap(plugins map[protos.Protocol]protos.UdpPlugin) (map[uint16]protos.Protocol, error) { +func buildPortsMap(plugins map[protos.Protocol]protos.UDPPlugin) (map[uint16]protos.Protocol, error) { var res = map[uint16]protos.Protocol{} for proto, protoPlugin := range plugins { for _, port := range protoPlugin.GetPorts() { - old_proto, exists := res[uint16(port)] + oldProto, exists := res[uint16(port)] if exists { - if old_proto == proto { + if oldProto == proto { continue } return nil, fmt.Errorf("Duplicate port (%d) exists in %s and %s protocols", - port, old_proto, proto) + port, oldProto, proto) } res[uint16(port)] = proto } @@ -84,13 +84,13 @@ func buildPortsMap(plugins map[protos.Protocol]protos.UdpPlugin) (map[uint16]pro } // NewUdp creates and returns a new Udp. -func NewUdp(p protos.Protocols) (*Udp, error) { - portMap, err := buildPortsMap(p.GetAllUdp()) +func NewUDP(p protos.Protocols) (*UDP, error) { + portMap, err := buildPortsMap(p.GetAllUDP()) if err != nil { return nil, err } - udp := &Udp{protocols: p, portMap: portMap} + udp := &UDP{protocols: p, portMap: portMap} logp.Debug("udp", "Port map: %v", portMap) return udp, nil diff --git a/packetbeat/protos/udp/udp_test.go b/packetbeat/protos/udp/udp_test.go index 6d35e377a31..1bc2bea8b00 100644 --- a/packetbeat/protos/udp/udp_test.go +++ b/packetbeat/protos/udp/udp_test.go @@ -33,18 +33,18 @@ var ( ) type TestProtocols struct { - udp map[protos.Protocol]protos.UdpPlugin + udp map[protos.Protocol]protos.UDPPlugin } -func (p TestProtocols) BpfFilter(with_vlans bool, with_icmp bool) string { +func (p TestProtocols) BpfFilter(withVlans bool, withICMP bool) string { return "mock bpf filter" } -func (p TestProtocols) GetTcp(proto protos.Protocol) protos.TcpPlugin { +func (p TestProtocols) GetTCP(proto protos.Protocol) protos.TCPPlugin { return nil } -func (p TestProtocols) GetUdp(proto protos.Protocol) protos.UdpPlugin { +func (p TestProtocols) GetUDP(proto protos.Protocol) protos.UDPPlugin { return p.udp[proto] } @@ -52,11 +52,11 @@ func (p TestProtocols) GetAll() map[protos.Protocol]protos.Plugin { return nil } -func (p TestProtocols) GetAllTcp() map[protos.Protocol]protos.TcpPlugin { +func (p TestProtocols) GetAllTCP() map[protos.Protocol]protos.TCPPlugin { return nil } -func (p TestProtocols) GetAllUdp() map[protos.Protocol]protos.UdpPlugin { +func (p TestProtocols) GetAllUDP() map[protos.Protocol]protos.UDPPlugin { return p.udp } @@ -69,7 +69,7 @@ type TestProtocol struct { pkt *protos.Packet // UDP packet that the plugin was called to process. } -func (proto *TestProtocol) Init(test_mode bool, results publish.Transactions) error { +func (proto *TestProtocol) Init(testMode bool, results publish.Transactions) error { return nil } @@ -77,13 +77,13 @@ func (proto *TestProtocol) GetPorts() []int { return proto.Ports } -func (proto *TestProtocol) ParseUdp(pkt *protos.Packet) { +func (proto *TestProtocol) ParseUDP(pkt *protos.Packet) { proto.pkt = pkt } type TestStruct struct { protocols *TestProtocols - udp *Udp + udp *UDP plugin *TestProtocol } @@ -94,11 +94,11 @@ func testSetup(t *testing.T) *TestStruct { } protocols := &TestProtocols{} - protocols.udp = make(map[protos.Protocol]protos.UdpPlugin) + protocols.udp = make(map[protos.Protocol]protos.UDPPlugin) plugin := &TestProtocol{Ports: []int{PORT}} protocols.udp[PROTO] = plugin - udp, err := NewUdp(protocols) + udp, err := NewUDP(protocols) if err != nil { t.Error("Error creating UDP handler: ", err) } @@ -108,15 +108,15 @@ func testSetup(t *testing.T) *TestStruct { func Test_buildPortsMap(t *testing.T) { type configTest struct { - Input map[protos.Protocol]protos.UdpPlugin + Input map[protos.Protocol]protos.UDPPlugin Output map[uint16]protos.Protocol } // The protocols named here are not necessarily UDP. They are just used // for testing purposes. - config_tests := []configTest{ + configTests := []configTest{ { - Input: map[protos.Protocol]protos.UdpPlugin{ + Input: map[protos.Protocol]protos.UDPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080}}, }, Output: map[uint16]protos.Protocol{ @@ -125,7 +125,7 @@ func Test_buildPortsMap(t *testing.T) { }, }, { - Input: map[protos.Protocol]protos.UdpPlugin{ + Input: map[protos.Protocol]protos.UDPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080}}, mysqlProtocol: &TestProtocol{Ports: []int{3306}}, redisProtocol: &TestProtocol{Ports: []int{6379, 6380}}, @@ -141,7 +141,7 @@ func Test_buildPortsMap(t *testing.T) { // should ignore duplicate ports in the same protocol { - Input: map[protos.Protocol]protos.UdpPlugin{ + Input: map[protos.Protocol]protos.UDPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080, 8080}}, mysqlProtocol: &TestProtocol{Ports: []int{3306}}, }, @@ -153,7 +153,7 @@ func Test_buildPortsMap(t *testing.T) { }, } - for _, test := range config_tests { + for _, test := range configTests { output, err := buildPortsMap(test.Input) assert.Nil(t, err) assert.Equal(t, test.Output, output) @@ -164,7 +164,7 @@ func Test_buildPortsMap(t *testing.T) { // for the same port number. func Test_buildPortsMap_portOverlapError(t *testing.T) { type errTest struct { - Input map[protos.Protocol]protos.UdpPlugin + Input map[protos.Protocol]protos.UDPPlugin Err string } @@ -173,7 +173,7 @@ func Test_buildPortsMap_portOverlapError(t *testing.T) { tests := []errTest{ { // Should raise error on duplicate port - Input: map[protos.Protocol]protos.UdpPlugin{ + Input: map[protos.Protocol]protos.UDPPlugin{ httpProtocol: &TestProtocol{Ports: []int{80, 8080}}, mysqlProtocol: &TestProtocol{Ports: []int{3306}}, redisProtocol: &TestProtocol{Ports: []int{6379, 6380, 3306}}, diff --git a/packetbeat/publish/publish.go b/packetbeat/publish/publish.go index 910d53c974d..e4e89cb50ad 100644 --- a/packetbeat/publish/publish.go +++ b/packetbeat/publish/publish.go @@ -76,9 +76,9 @@ func NewPublisher( }, nil } -func (t *PacketbeatPublisher) PublishTransaction(event common.MapStr) bool { +func (p *PacketbeatPublisher) PublishTransaction(event common.MapStr) bool { select { - case t.trans <- event: + case p.trans <- event: return true default: // drop event if queue is full @@ -86,64 +86,64 @@ func (t *PacketbeatPublisher) PublishTransaction(event common.MapStr) bool { } } -func (t *PacketbeatPublisher) PublishFlows(event []common.MapStr) bool { +func (p *PacketbeatPublisher) PublishFlows(event []common.MapStr) bool { select { - case t.flows <- event: + case p.flows <- event: return true - case <-t.done: + case <-p.done: // drop event, if worker has been stopped return false } } -func (t *PacketbeatPublisher) Start() { - t.wg.Add(1) +func (p *PacketbeatPublisher) Start() { + p.wg.Add(1) go func() { - defer t.wg.Done() + defer p.wg.Done() for { select { - case <-t.done: + case <-p.done: return - case event := <-t.trans: - t.onTransaction(event) + case event := <-p.trans: + p.onTransaction(event) } } }() - t.wg.Add(1) + p.wg.Add(1) go func() { - defer t.wg.Done() + defer p.wg.Done() for { select { - case <-t.done: + case <-p.done: return - case events := <-t.flows: - t.onFlow(events) + case events := <-p.flows: + p.onFlow(events) } } }() } -func (t *PacketbeatPublisher) Stop() { - t.client.Close() - close(t.done) - t.wg.Wait() +func (p *PacketbeatPublisher) Stop() { + p.client.Close() + close(p.done) + p.wg.Wait() } -func (t *PacketbeatPublisher) onTransaction(event common.MapStr) { +func (p *PacketbeatPublisher) onTransaction(event common.MapStr) { if err := validateEvent(event); err != nil { logp.Warn("Dropping invalid event: %v", err) return } - if !t.normalizeTransAddr(event) { + if !p.normalizeTransAddr(event) { return } - t.client.PublishEvent(event) + p.client.PublishEvent(event) } -func (t *PacketbeatPublisher) onFlow(events []common.MapStr) { +func (p *PacketbeatPublisher) onFlow(events []common.MapStr) { pub := events[:0] for _, event := range events { if err := validateEvent(event); err != nil { @@ -151,14 +151,14 @@ func (t *PacketbeatPublisher) onFlow(events []common.MapStr) { continue } - if !t.addGeoIPToFlow(event) { + if !p.addGeoIPToFlow(event) { continue } pub = append(pub, event) } - t.client.PublishEvents(pub) + p.client.PublishEvents(pub) } // filterEvent validates an event for common required fields with types. diff --git a/packetbeat/sniffer/afpacket_nonlinux.go b/packetbeat/sniffer/afpacket_nonlinux.go index 3d2187361d2..83bde7b5672 100644 --- a/packetbeat/sniffer/afpacket_nonlinux.go +++ b/packetbeat/sniffer/afpacket_nonlinux.go @@ -12,7 +12,7 @@ import ( type AfpacketHandle struct { } -func NewAfpacketHandle(device string, snaplen int, block_size int, num_blocks int, +func NewAfpacketHandle(device string, snaplen int, blockSize int, numBlocks int, timeout time.Duration) (*AfpacketHandle, error) { return nil, fmt.Errorf("Afpacket MMAP sniffing is only available on Linux") diff --git a/packetbeat/sniffer/sniffer.go b/packetbeat/sniffer/sniffer.go index fab6683abd1..8ef80db30a2 100644 --- a/packetbeat/sniffer/sniffer.go +++ b/packetbeat/sniffer/sniffer.go @@ -44,24 +44,24 @@ type WorkerFactory func(layers.LinkType) (Worker, error) // allocated mmap buffer is close to but smaller than target_size_mb. // The restriction is that the block_size must be divisible by both the // frame size and page size. -func afpacketComputeSize(target_size_mb int, snaplen int, page_size int) ( - frame_size int, block_size int, num_blocks int, err error) { +func afpacketComputeSize(targetSizeMb int, snaplen int, pageSize int) ( + frameSize int, blockSize int, numBlocks int, err error) { - if snaplen < page_size { - frame_size = page_size / (page_size / snaplen) + if snaplen < pageSize { + frameSize = pageSize / (pageSize / snaplen) } else { - frame_size = (snaplen/page_size + 1) * page_size + frameSize = (snaplen/pageSize + 1) * pageSize } // 128 is the default from the gopacket library so just use that - block_size = frame_size * 128 - num_blocks = (target_size_mb * 1024 * 1024) / block_size + blockSize = frameSize * 128 + numBlocks = (targetSizeMb * 1024 * 1024) / blockSize - if num_blocks == 0 { + if numBlocks == 0 { return 0, 0, 0, fmt.Errorf("Buffer size too small") } - return frame_size, block_size, num_blocks, nil + return frameSize, blockSize, numBlocks, nil } func deviceNameFromIndex(index int, devices []string) (string, error) { @@ -180,12 +180,12 @@ func (sniffer *SnifferSetup) setFromConfig(config *config.InterfacesConfig) erro sniffer.DataSource = gopacket.PacketDataSource(sniffer.pcapHandle) case "af_packet": - if sniffer.config.Buffer_size_mb == 0 { - sniffer.config.Buffer_size_mb = 24 + if sniffer.config.BufferSizeMb == 0 { + sniffer.config.BufferSizeMb = 24 } - frame_size, block_size, num_blocks, err := afpacketComputeSize( - sniffer.config.Buffer_size_mb, + frameSize, blockSize, numBlocks, err := afpacketComputeSize( + sniffer.config.BufferSizeMb, sniffer.config.Snaplen, os.Getpagesize()) if err != nil { @@ -194,9 +194,9 @@ func (sniffer *SnifferSetup) setFromConfig(config *config.InterfacesConfig) erro sniffer.afpacketHandle, err = NewAfpacketHandle( sniffer.config.Device, - frame_size, - block_size, - num_blocks, + frameSize, + blockSize, + numBlocks, 500*time.Millisecond) if err != nil { return err @@ -262,10 +262,10 @@ func (sniffer *SnifferSetup) Datalink() layers.LinkType { return layers.LinkTypeEthernet } -func (sniffer *SnifferSetup) Init(test_mode bool, filter string, factory WorkerFactory, interfaces *config.InterfacesConfig) error { +func (sniffer *SnifferSetup) Init(testMode bool, filter string, factory WorkerFactory, interfaces *config.InterfacesConfig) error { var err error - if !test_mode { + if !testMode { sniffer.filter = filter logp.Debug("sniffer", "BPF filter: '%s'", sniffer.filter) err = sniffer.setFromConfig(interfaces) @@ -307,8 +307,8 @@ func (sniffer *SnifferSetup) Init(test_mode bool, filter string, factory WorkerF func (sniffer *SnifferSetup) Run() error { counter := 0 loopCount := 1 - var lastPktTime *time.Time = nil - var ret_error error + var lastPktTime *time.Time + var retError error for sniffer.isAlive { if sniffer.config.OneAtATime { @@ -325,7 +325,7 @@ func (sniffer *SnifferSetup) Run() error { if err == io.EOF { logp.Debug("sniffer", "End of file") - loopCount += 1 + loopCount++ if sniffer.config.Loop > 0 && loopCount > sniffer.config.Loop { // give a bit of time to the publish goroutine // to flush @@ -337,7 +337,7 @@ func (sniffer *SnifferSetup) Run() error { logp.Debug("sniffer", "Reopening the file") err = sniffer.Reopen() if err != nil { - ret_error = fmt.Errorf("Error reopening file: %s", err) + retError = fmt.Errorf("Error reopening file: %s", err) sniffer.isAlive = false continue } @@ -346,7 +346,7 @@ func (sniffer *SnifferSetup) Run() error { } if err != nil { - ret_error = fmt.Errorf("Sniffing error: %s", err) + retError = fmt.Errorf("Sniffing error: %s", err) sniffer.isAlive = false continue } @@ -387,7 +387,7 @@ func (sniffer *SnifferSetup) Run() error { sniffer.dumper.Close() } - return ret_error + return retError } func (sniffer *SnifferSetup) Close() error { diff --git a/packetbeat/sniffer/sniffer_test.go b/packetbeat/sniffer/sniffer_test.go index 572dd461bf0..20181bf3209 100644 --- a/packetbeat/sniffer/sniffer_test.go +++ b/packetbeat/sniffer/sniffer_test.go @@ -9,48 +9,48 @@ import ( ) func TestSniffer_afpacketComputeSize(t *testing.T) { - var frame_size, block_size, num_blocks int + var frameSize, blockSize, numBlocks int var err error - frame_size, block_size, num_blocks, err = afpacketComputeSize(30, 1514, 4096) + frameSize, blockSize, numBlocks, err = afpacketComputeSize(30, 1514, 4096) if err != nil { t.Error(err) } - if frame_size != 2048 || block_size != 2048*128 || num_blocks != 120 { - t.Error("Bad result", frame_size, block_size, num_blocks) + if frameSize != 2048 || blockSize != 2048*128 || numBlocks != 120 { + t.Error("Bad result", frameSize, blockSize, numBlocks) } - if block_size*num_blocks > 30*1024*1024 { - t.Error("Value too big", block_size, num_blocks) + if blockSize*numBlocks > 30*1024*1024 { + t.Error("Value too big", blockSize, numBlocks) } - frame_size, block_size, num_blocks, err = afpacketComputeSize(1, 1514, 4096) + frameSize, blockSize, numBlocks, err = afpacketComputeSize(1, 1514, 4096) if err != nil { t.Error(err) } - if frame_size != 2048 || block_size != 2048*128 || num_blocks != 4 { - t.Error("Bad result", block_size, num_blocks) + if frameSize != 2048 || blockSize != 2048*128 || numBlocks != 4 { + t.Error("Bad result", blockSize, numBlocks) } - if block_size*num_blocks > 1*1024*1024 { - t.Error("Value too big", block_size, num_blocks) + if blockSize*numBlocks > 1*1024*1024 { + t.Error("Value too big", blockSize, numBlocks) } - frame_size, block_size, num_blocks, err = afpacketComputeSize(0, 1514, 4096) + frameSize, blockSize, numBlocks, err = afpacketComputeSize(0, 1514, 4096) if err == nil { t.Error("Expected an error") } // 16436 is the default MTU size of the loopback interface - frame_size, block_size, num_blocks, err = afpacketComputeSize(30, 16436, 4096) - if frame_size != 4096*5 || block_size != 4096*5*128 || num_blocks != 12 { - t.Error("Bad result", frame_size, block_size, num_blocks) + frameSize, blockSize, numBlocks, err = afpacketComputeSize(30, 16436, 4096) + if frameSize != 4096*5 || blockSize != 4096*5*128 || numBlocks != 12 { + t.Error("Bad result", frameSize, blockSize, numBlocks) } - frame_size, block_size, num_blocks, err = afpacketComputeSize(3, 16436, 4096) + frameSize, blockSize, numBlocks, err = afpacketComputeSize(3, 16436, 4096) if err != nil { t.Error(err) } - if frame_size != 4096*5 || block_size != 4096*5*128 || num_blocks != 1 { - t.Error("Bad result", frame_size, block_size, num_blocks) + if frameSize != 4096*5 || blockSize != 4096*5*128 || numBlocks != 1 { + t.Error("Bad result", frameSize, blockSize, numBlocks) } }