From 1d9a8628228a4a3f458a38355d4cf09cb819288a Mon Sep 17 00:00:00 2001 From: Mark Stemm Date: Wed, 14 Oct 2020 18:53:29 -0700 Subject: [PATCH] Add exceptions fields/comps/values to rules files Take advantage of the changes to support exceptions and refactor rules to use them whenever feasible: - Define exceptions for every rule. In cases where no practical exception exists e.g. "K8s Created/Deleted", define an empty exception property just to avoid warnings when loading rules. - Go through all rules and convert macros-used-as-exceptions that matched against 2-3 filter fields into exceptions. In most cases, switching from equality (e.g proc.name=nginx) to in (e.g. proc.name in (nginx)) allowed for better groupings into a smaller set of exception items. - In cases where the exception had complex combinations of fields, keep the macro as is. --- rules/falco_rules.yaml | 629 ++++++++++++++++++++++++------------- rules/k8s_audit_rules.yaml | 167 ++++++++-- 2 files changed, 542 insertions(+), 254 deletions(-) diff --git a/rules/falco_rules.yaml b/rules/falco_rules.yaml index 96d5851e979..813e1f4cece 100644 --- a/rules/falco_rules.yaml +++ b/rules/falco_rules.yaml @@ -214,9 +214,6 @@ - list: openscap_rpm_binaries items: [probe_rpminfo, probe_rpmverify, probe_rpmverifyfile, probe_rpmverifypackage] -- macro: rpm_procs - condition: (proc.name in (rpm_binaries, openscap_rpm_binaries) or proc.name in (salt-minion)) - - list: deb_binaries items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, @@ -466,6 +463,9 @@ - list: shell_config_directories items: [/etc/zsh] +- list: lvprogs_binaries + items: [dmeventd, lvcreate, pvscan, lvs] + - rule: Modify Shell Configuration File desc: Detect attempt to modify shell configuration files condition: > @@ -525,7 +525,7 @@ consider_all_cron_jobs and not user_known_cron_jobs exceptions: - - name: known_cron_writer + - name: known_cron_writers fields: [proc.name, fd.name] comps: [=, contains] output: > @@ -592,16 +592,6 @@ (proc.pcmdline contains /usr/sbin/denyhosts or proc.pcmdline contains /usr/local/bin/denyhosts.py))) -- macro: parent_python_running_sdchecks - condition: > - (proc.pname in (python, python2.7) and - (proc.pcmdline contains /opt/draios/bin/sdchecks)) - -- macro: python_running_sdchecks - condition: > - (proc.name in (python, python2.7) and - (proc.cmdline contains /opt/draios/bin/sdchecks)) - - macro: parent_linux_image_upgrade_script condition: proc.pname startswith linux-image- @@ -726,15 +716,9 @@ - macro: java_running_sdjagent condition: proc.name=java and proc.cmdline contains sdjagent.jar -- macro: kubelet_running_loopback - condition: (proc.pname=kubelet and proc.name=loopback) - - macro: python_mesos_marathon_scripting condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") -- macro: splunk_running_forwarder - condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") - - macro: parent_supervise_running_multilog condition: (proc.name=multilog and proc.pname=supervise) @@ -742,15 +726,6 @@ condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") -- macro: perl_running_updmap - condition: (proc.cmdline startswith "perl /usr/bin/updmap") - -- macro: perl_running_centrifydc - condition: (proc.cmdline startswith "perl /usr/share/centrifydc") - -- macro: runuser_reading_pam - condition: (proc.name=runuser and fd.directory=/etc/pam.d) - # CIS Linux Benchmark program - macro: linux_bench_reading_etc_shadow condition: ((proc.aname[2]=linux-bench and @@ -779,12 +754,6 @@ - macro: couchdb_writing_conf condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) -- macro: rancher_agent - condition: (proc.name=agent and container.image.repository contains "rancher/agent") - -- macro: rancher_network_manager - condition: (proc.name=rancher-bridge and container.image.repository contains "rancher/network-manager") - - macro: sosreport_writing_files condition: > (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and @@ -892,12 +861,15 @@ desc: an attempt to write to any file below a set of binary directories condition: > bin_dir and evt.dir = < and open_write - and not package_mgmt_procs and not exe_running_docker_save and not python_running_get_pip and not python_running_ms_oms and not user_known_write_below_binary_dir_activities exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [package_mgmt_binaries] - name: known_bin_writers fields: [proc.name, fd.name] comps: [=, contains] @@ -944,12 +916,15 @@ desc: an attempt to write to any file below a set of binary directories condition: > evt.dir = < and open_write and monitored_dir - and not package_mgmt_procs and not exe_running_docker_save and not python_running_get_pip and not python_running_ms_oms and not user_known_write_monitored_dir_conditions exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [package_mgmt_binaries] - name: known_writer_prefix fields: [proc.name, fd.name] comps: [=, startswith] @@ -1095,8 +1070,7 @@ exceptions: - name: proc_names fields: proc.name - values: - - [passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, + values: [passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, package_mgmt_binaries, ssl_mgmt_binaries, dhcp_binaries, dev_creation_binaries, shell_mgmt_binaries, mail_config_binaries, @@ -1135,6 +1109,7 @@ - [[keepalived], [/etc/keepalived/keepalived.conf]] - [[update-haproxy-,haproxy_reload.], [/etc/openvpn/client.map]] - [[start-fluentd], [/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf]] + - [[run-redis, redis-launcher.], [/etc/redis.conf]] - name: proc_file_prefix fields: [proc.name, fd.name] comps: [in, startswith] @@ -1152,19 +1127,19 @@ - [[prometheus-conf], /etc/prometheus/config_out] - [[oc], /etc/origin/node] - [[plesk_binaries], /etc/sw/keys] - - [[supervice,svc], /etc/sb/] + - [[supervise,svc], /etc/sb/] - [[openvpn,openvpn-entrypo], /etc/openvpn] - [[semodule,genhomedircon,sefcontext_comp], /etc/selinux] - - [[dmeventd,lvcreate,pvscan,lvs], /etc/lvm/archive] - - [[dmeventd,lvcreate,pvscan,lvs], /etc/lvm/backup] - - [[dmeventd,lvcreate,pvscan,lvs], /etc/lvm/cache] + - [[lvprogs_binaries], /etc/lvm/archive] + - [[lvprogs_binaries], /etc/lvm/backup] + - [[lvprogs_binaries], /etc/lvm/cache] - [[nginx,nginx-ingress-c,nginx-ingress], /etc/nginx] - [[nginx,nginx-ingress-c,nginx-ingress], /etc/ingress-controller] - [[adjoin,addns], /etc/krb5] - - [[run-redis, redis-launcher.], /etc/redis] + - [[run-redis, redis-launcher.], /etc/redis/] - [[update-haproxy-,haproxy_reload.], /etc/haproxy] - [[start-mysql.sh, run-mysqld], /etc/mysql] - - name: proc_directory + - name: proc_directory fields: [proc.name, fd.directory] comps: [in, in] values: @@ -1180,36 +1155,36 @@ - name: pname_file fields: [proc.pname, fd.name] comps: [in, in] - fields: + values: - [[update-haproxy-,haproxy_reload,haproxy_reload.], [/etc/openvpn/client.map]] - name: pname_file_prefix fields: [proc.pname, fd.name] comps: [in, startswith] - fields: + values: - [[run-openldap.sh], /etc/openldap] - [[start-mysql.sh], /etc/mysql] - [[update-haproxy-,haproxy_reload.], /etc/haproxy] - name: pname_directory fields: [proc.pname, fd.directory] comps: [in, in] - fields: + values: - [[start-mysql.sh], [/etc/my.cnf.d]] - name: pname_prefix_file_prefix fields: [proc.pname, fd.name] comps: [startswith, startswith] - fields: + values: - ["bash /var/lib/waagent/", /etc/azure] - [automount, /etc/mtab] - name: proc_pname_file fields: [proc.name, proc.pname, fd.name] comps: [in, in, startswith] values: - - [[urlgrabber-ext-], [yum, yum-cron, repoquery], /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + - [[urlgrabber-ext-], [yum, yum-cron, repoquery], /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb] - [[urlgrabber-ext-], [yum, yum-cron, repoquery], /etc/pki/nssdb] - [[trust], [update-ca-trust], /etc/pki] - name: cmdline_file fields: [proc.cmdline, fd.name] - fields: [in, in] + comps: [in, in] values: - [["bash /usr/sbin/start-cron"], [/etc/security/pam_env.conf]] - name: cmdline_file_prefix @@ -1239,7 +1214,7 @@ - name: pcmdline_prefix_file_prefix fields: [proc.pcmdline, fd.name] comps: [startswith, startswith] - fields: + values: - ["bash /var/lib/waagent/", /etc/azure] - ["chef-client /opt/gitlab", /etc/gitlab] - name: proc_container_dir @@ -1302,14 +1277,14 @@ and not user_known_write_below_root_activities exceptions: - name: files - field: fd.name + fields: fd.name values: [known_root_files] - name: dirs - field: fd.directory + fields: fd.directory comps: pmatch values: [known_root_directories] - name: prefixes - field: [fd.name] + fields: [fd.name] comps: [startswith] values: - [/root/orcexec.] @@ -1371,7 +1346,7 @@ fields: [proc.cmdline, fd.name] comps: [in, in] values: - - ["runc:[1:CHILD] init"], [/exec.fifo]] + - [["runc:[1:CHILD] init"], [/exec.fifo]] output: "File below / or /root opened for writing (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name container_id=%container.id image=%container.image.repository)" priority: ERROR tags: [filesystem, mitre_persistence] @@ -1388,6 +1363,10 @@ information) by a trusted program after startup. Trusted programs might read these files at startup to load initial state, but not afterwards. condition: sensitive_files and open_read and server_procs and not proc_is_new and proc.name!="sshd" and not user_known_read_sensitive_files_activities + exceptions: + - name: known_sensitive_reader + fields: [proc.name, fd.name] + comps: [=, contains] output: > Sensitive file opened for reading by trusted program after startup (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) @@ -1428,12 +1407,6 @@ condition: > sensitive_files and open_read and proc_name_exists - and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, - cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, - vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, - in.proftpd, mandb, salt-minion, postgres_mgmt_binaries, - google_oslogin_ - ) and not cmp_cp_by_passwd and not ansible_running_python and not proc.cmdline contains /usr/bin/mandb @@ -1442,13 +1415,32 @@ and not run_by_google_accounts_daemon and not user_read_sensitive_file_conditions and not perl_running_plesk - and not perl_running_updmap - and not veritas_driver_script - and not perl_running_centrifydc - and not runuser_reading_pam and not linux_bench_reading_etc_shadow and not user_known_read_sensitive_files_activities and not user_read_sensitive_file_containers + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, + cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, + vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, + in.proftpd, mandb, salt-minion, postgres_mgmt_binaries, + google_oslogin_] + - name: cmdline + fields: [proc.cmdline] + ops: [startswith] + values: + - ["perl /usr/bin/updmap"] + - ["perl /opt/VRTSsfmh/bin/mh_driver.pl"] + - ["perl /usr/share/centrifydc"] + - name: proc_directory + fields: [proc.name, fd.directory] + values: + - [runuser, /etc/pam.d] + - name: proc_file_prefix + fields: [proc.name, fd.name] + comps: [in, startswith] output: > Sensitive file opened for reading by non-trusted program (user=%user.name user_loginuid=%user.loginuid program=%proc.name command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository) @@ -1469,30 +1461,26 @@ desc: an attempt to write to the rpm database by any non-rpm related program condition: > fd.name startswith /var/lib/rpm and open_write - and not rpm_procs and not ansible_running_python and not python_running_chef and not exe_running_docker_save and not amazon_linux_running_python_yum and not user_known_write_rpm_database_activities + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [rpm_binaries, openscap_rpm_binaries, salt-minion] + - name: proc_file_prefix + fields: [proc.name, fd.name] + comps: [in, startswith] output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository)" priority: ERROR tags: [filesystem, software_mgmt, mitre_persistence] -- macro: postgres_running_wal_e - condition: (proc.pname=postgres and proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e") - - macro: redis_running_prepost_scripts condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) -- macro: rabbitmq_running_scripts - condition: > - (proc.pname=beam.smp and - (proc.cmdline startswith "sh -c exec ps" or - proc.cmdline startswith "sh -c exec inet_gethost" or - proc.cmdline= "sh -s unix:cmd" or - proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) - - macro: rabbitmqctl_running_scripts condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") @@ -1510,8 +1498,13 @@ proc.pname in (db_server_binaries) and spawned_process and not proc.name in (db_server_binaries) - and not postgres_running_wal_e and not user_known_db_spawned_processes + exceptions: + - name: pname_cmdline + fields: [proc.pname, proc.cmdline] + comps: [in, startswith] + values: + - [[postgres], "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e"] output: > Database-related program spawned process other than itself (user=%user.name user_loginuid=%user.loginuid program=%proc.cmdline parent=%proc.pname container_id=%container.id image=%container.image.repository) @@ -1523,7 +1516,17 @@ - rule: Modify binary dirs desc: an attempt to modify any file below a set of binary directories. - condition: bin_dir_rename and modify and not package_mgmt_procs and not exe_running_docker_save and not user_known_modify_bin_dir_activities + condition: bin_dir_rename and modify + and not exe_running_docker_save + and not user_known_modify_bin_dir_activities + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [package_mgmt_binaries] + - name: proc_file_prefix + fields: [proc.name, fd.name] + comps: [in, startswith] output: > File below known binary directory renamed/removed (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline pcmdline=%proc.pcmdline operation=%evt.type file=%fd.name %evt.args container_id=%container.id image=%container.image.repository) @@ -1535,7 +1538,15 @@ - rule: Mkdir binary dirs desc: an attempt to create a directory below a set of binary directories. - condition: mkdir and bin_dir_mkdir and not package_mgmt_procs and not user_known_mkdir_bin_dir_activities + condition: mkdir and bin_dir_mkdir and not user_known_mkdir_bin_dir_activities + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [package_mgmt_binaries] + - name: proc_file_prefix + fields: [proc.name, fd.name] + comps: [in, startswith] output: > Directory below known binary directory created (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline directory=%evt.arg.path container_id=%container.id image=%container.image.repository) @@ -1554,12 +1565,6 @@ - list: network_plugin_binaries items: [aws-cni, azure-vnet] -- macro: calico_node - condition: (container.image.repository endswith calico/node and proc.name=calico-node) - -- macro: weaveworks_scope - condition: (container.image.repository endswith weaveworks/scope and proc.name=scope) - - rule: Change thread namespace desc: > an attempt to change a program/thread\'s namespace (commonly done @@ -1567,20 +1572,44 @@ condition: > evt.type=setns and evt.dir=< and proc_name_exists - and not (container.id=host and proc.name in (docker_binaries, k8s_binaries, lxd_binaries, nsenter)) - and not proc.name in (sysdigcloud_binaries, sysdig, calico, oci-umount, cilium-cni, network_plugin_binaries) - and not proc.name in (user_known_change_thread_namespace_binaries) and not proc.name startswith "runc" and not proc.cmdline startswith "containerd" - and not proc.pname in (sysdigcloud_binaries, hyperkube, kubelet, protokube, dockerd, tini, aws) - and not python_running_sdchecks - and not java_running_sdjagent - and not kubelet_running_loopback - and not rancher_agent - and not rancher_network_manager - and not calico_node - and not weaveworks_scope and not user_known_change_thread_namespace_activities + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [sysdigcloud_binaries, sysdig, calico, oci-umount, + cilium-cni, network_plugin_binaries, + user_known_change_thread_namespace_binaries] + - name: container_proc_name + fields: [container.id, proc.name] + comps: [=, in] + values: + - [host, [docker_binaries, k8s_binaries, lxd_binaries, nsenter]] + - name: proc_pname + fields: proc.pname + comps: in + values: [sysdigcloud_binaries, hyperkube, kubelet, protokube, dockerd, tini, aws] + - name: proc_cmdline + fields: [proc.name, proc.cmdline] + comps: [in, contains] + values: + - [[python, python2.7], /opt/draios/bin/sdchecks] + - [[java], sdjagent.jar] + - name: proc_name_parent_name + fields: [proc.name, proc.pname] + comps: [in, in] + values: + - [[loopback], [kubelet]] + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] + values: + - [[agent], "rancher/agent"] + - [[rancher-bridge], "rancher/network-manager"] + - [[calico-node], "calico/node"] + - [[scope], "weaveworks/scope"] output: > Namespace change (setns) by unexpected program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname %container.info container_id=%container.id image=%container.image.repository:%container.image.tag) @@ -1626,24 +1655,6 @@ - macro: parent_java_running_datastax condition: (proc.pname=java and proc.pcmdline contains com.datastax) -- macro: nginx_starting_nginx - condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") - -- macro: nginx_running_aws_s3_cp - condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") - -- macro: consul_running_net_scripts - condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) - -- macro: consul_running_alert_checks - condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") - -- macro: serf_script - condition: (proc.cmdline startswith "sh -c serf") - -- macro: check_process_status - condition: (proc.cmdline startswith "sh -c kill -0 ") - # In some cases, you may want to consider node processes run directly # in containers as protected shell spawners. Examples include using # pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct @@ -1702,31 +1713,50 @@ and shell_procs and proc.pname exists and protected_shell_spawner - and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, - needrestart_binaries, - mesos_shell_binaries, - erl_child_setup, exechealthz, - PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, - lb-controller, nvidia-installe, runsv, statsite, erlexec, calico-node, - "puma reactor") - and not proc.cmdline in (known_shell_spawn_cmdlines) and not proc.aname in (unicorn_launche) - and not consul_running_net_scripts - and not consul_running_alert_checks - and not nginx_starting_nginx - and not nginx_running_aws_s3_cp and not run_by_package_mgmt_binaries - and not serf_script - and not check_process_status and not run_by_foreman and not python_mesos_marathon_scripting - and not splunk_running_forwarder - and not postgres_running_wal_e and not redis_running_prepost_scripts - and not rabbitmq_running_scripts and not rabbitmqctl_running_scripts and not run_by_appdynamics and not user_shell_container_exclusions + exceptions: + - name: proc_pname + fields: proc.pname + comps: in + values: [shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, + needrestart_binaries, + mesos_shell_binaries, + erl_child_setup, exechealthz, + PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, + lb-controller, nvidia-installe, runsv, statsite, erlexec, calico-node, + "puma reactor"] + - name: cmdlines + fields: proc.cmdline + comps: in + values: [known_shell_spawn_cmdlines] + - name: cmdline_prefix + fields: [proc.cmdline] + comps: [startswith] + values: + - ["sh -c serf"] + - ["sh -c kill -0 "] + - name: pname_cmdline_prefix + fields: [proc.pname, proc.cmdline] + comps: [in, startswith] + values: + - [[consul], "sh -c curl"] + - [[consul], "sh -c nc"] + - [[consul], "sh -c /bin/consul-alerts"] + - [[nginx], "/usr/sbin/nginx -c /etc/nginx/nginx.conf"] + - [[nginx], "sh -c /usr/local/bin/aws s3 cp"] + - [[splunkd], "sh -c /opt/splunkforwarder"] + - [[postgres], "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e"] + - [[beam.smp], "sh -c exec ps"] + - [[beam.smp], "sh -c exec inet_gethost"] + - [[beam.smp], "sh -s unix:cmd"] + - [[beam.smp], "sh -c exec /bin/sh -s unix:cmd 2>&1"] output: > Shell spawned by untrusted binary (user=%user.name user_loginuid=%user.loginuid shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] @@ -1773,27 +1803,6 @@ - list: trusted_images items: [] -# NOTE: This macro is only provided for backwards compatibility with -# older local falco rules files that may have been appending to -# trusted_images. To make customizations, it's better to add containers to -# user_trusted_containers, user_privileged_containers or user_sensitive_mount_containers. -- macro: trusted_containers - condition: (container.image.repository in (trusted_images)) - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to specify additional containers that are -# trusted and therefore allowed to run privileged *and* with sensitive -# mounts. -# -# Like trusted_images, this is deprecated in favor of -# user_privileged_containers and user_sensitive_mount_containers and -# is only provided for backwards compatibility. -# -# In this file, it just takes one of the images in trusted_containers -# and repeats it. -- macro: user_trusted_containers - condition: (never_true) - - list: sematext_images items: [docker.io/sematext/sematext-agent-docker, docker.io/sematext/agent, docker.io/sematext/logagent, registry.access.redhat.com/sematext/sematext-agent-docker, @@ -1828,14 +1837,6 @@ sematext_images ] -- macro: falco_privileged_containers - condition: (openshift_image or - user_trusted_containers or - container.image.repository in (trusted_images) or - container.image.repository in (falco_privileged_images) or - container.image.repository startswith istio/proxy_ or - container.image.repository startswith quay.io/sysdig/) - # Add conditions to this macro (probably in a separate file, # overwriting this macro) to specify additional containers that are # allowed to run privileged @@ -1864,12 +1865,6 @@ amazon/amazon-ecs-agent ] -- macro: falco_sensitive_mount_containers - condition: (user_trusted_containers or - container.image.repository in (trusted_images) or - container.image.repository in (falco_sensitive_mount_images) or - container.image.repository startswith quay.io/sysdig/) - # These container images are allowed to run with hostnetwork=true - list: falco_hostnetwork_images items: [ @@ -1897,8 +1892,18 @@ condition: > container_started and container and container.privileged=true - and not falco_privileged_containers - and not user_privileged_containers + and not openshift_image + exceptions: + - name: image_repo + fields: container.image.repository + comps: in + values: [trusted_images, falco_privileged_images] + - name: image_repo_prefix + fields: [container.image.repository] + comps: [startswith] + values: + - [istio/proxy_] + - [quay.io/sysdig/] output: Privileged container started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) priority: INFO tags: [container, cis, mitre_privilege_escalation, mitre_lateral_movement] @@ -1941,8 +1946,17 @@ condition: > container_started and container and sensitive_mount - and not falco_sensitive_mount_containers and not user_sensitive_mount_containers + exceptions: + - name: image_repo + fields: container.image.repository + comps: in + values: [trusted_images, falco_sensitive_mount_images] + - name: image_repo_prefix + fields: [container.image.repository] + comps: [startswith] + values: + - [quay.io/sysdig/] output: Container with sensitive mount started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag mounts=%container.mounts) priority: INFO tags: [container, cis, mitre_lateral_movement] @@ -1963,6 +1977,10 @@ desc: > Detect the initial process started by a container that is not in a list of allowed containers. condition: container_started and container and not allowed_containers + exceptions: + - name: image_repo + fields: container.image.repository + comps: in output: Container started and not in allowed list (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) priority: WARNING tags: [container, mitre_lateral_movement] @@ -1978,6 +1996,9 @@ - rule: System user interactive desc: an attempt to run interactive commands by a system (i.e. non-login) user condition: spawned_process and system_users and interactive and not user_known_system_user_login + exceptions: + - name: user_proc + fields: [user.name, proc.name] output: "System user ran an interactive command (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id image=%container.image.repository)" priority: INFO tags: [users, mitre_remote_access_tools] @@ -1994,6 +2015,10 @@ and shell_procs and proc.tty != 0 and container_entrypoint and not user_expected_terminal_shell_in_container_conditions + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: > A shell was spawned in a container with an attached terminal (user=%user.name user_loginuid=%user.loginuid %container.info shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository) @@ -2056,9 +2081,6 @@ - macro: user_shell_container_exclusions condition: (never_true) -- macro: login_doing_dns_lookup - condition: (proc.name=login and fd.l4proto=udp and fd.sport=53) - # sockfamily ip is to exclude certain processes (like 'groups') that communicate on unix-domain sockets # systemd can listen on ports to launch things like sshd on demand - rule: System procs network activity @@ -2066,9 +2088,16 @@ condition: > (fd.sockfamily = ip and (system_procs or proc.name in (shell_binaries))) and (inbound_outbound) - and not proc.name in (known_system_procs_network_activity_binaries) - and not login_doing_dns_lookup and not user_expected_system_procs_network_activity_conditions + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [known_system_procs_network_activity_binaries] + - name: proc_proto_sport + fields: [proc.name, fd.l4proto, fd.sport] + values: + - [login, udp, 53] output: > Known system binary sent/received network traffic (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) @@ -2107,6 +2136,10 @@ http_proxy_procs and not allowed_ssh_proxy_env and proc.env icontains HTTP_PROXY + exceptions: + - name: proc_names + fields: proc.name + comps: in output: > Program run with disallowed HTTP_PROXY environment variable (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline env=%proc.env parent=%proc.pname container_id=%container.id image=%container.image.repository) @@ -2130,6 +2163,9 @@ condition: > (inbound and consider_interpreted_inbound and interpreted_procs) + exceptions: + - name: proc_proto_sport + fields: [proc.name, fd.l4proto, fd.sport] output: > Interpreted program received/listened for network traffic (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) @@ -2141,6 +2177,9 @@ condition: > (outbound and consider_interpreted_outbound and interpreted_procs) + exceptions: + - name: proc_proto_sport + fields: [proc.name, fd.l4proto, fd.sport] output: > Interpreted program performed outgoing network connection (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) @@ -2182,34 +2221,15 @@ - rule: Unexpected UDP Traffic desc: UDP traffic not on port 53 (DNS) or other commonly used ports condition: (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic + exceptions: + - name: proc_proto_sport + fields: [proc.name, fd.l4proto, fd.sport] output: > Unexpected UDP Traffic Seen (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args container_id=%container.id image=%container.image.repository) priority: NOTICE tags: [network, mitre_exfiltration] -# With the current restriction on system calls handled by falco -# (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't -# trigger). -# - rule: Ssh error in syslog -# desc: any ssh errors (failed logins, disconnects, ...) sent to syslog -# condition: syslog and ssh_error_message and evt.dir = < -# output: "sshd sent error message to syslog (error=%evt.buffer)" -# priority: WARNING - -- macro: somebody_becoming_themself - condition: ((user.name=nobody and evt.arg.uid=nobody) or - (user.name=www-data and evt.arg.uid=www-data) or - (user.name=_apt and evt.arg.uid=_apt) or - (user.name=postfix and evt.arg.uid=postfix) or - (user.name=pki-agent and evt.arg.uid=pki-agent) or - (user.name=pki-acme and evt.arg.uid=pki-acme) or - (user.name=nfsnobody and evt.arg.uid=nfsnobody) or - (user.name=postgres and evt.arg.uid=postgres)) - -- macro: nrpe_becoming_nagios - condition: (proc.name=nrpe and evt.arg.uid=nagios) - # In containers, the user name might be for a uid that exists in the # container but not on the host. (See # https://github.com/draios/sysdig/issues/954). So in that case, allow @@ -2235,13 +2255,34 @@ evt.type=setuid and evt.dir=> and (known_user_in_container or not container) and not user.name=root - and not somebody_becoming_themself - and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, - nomachine_binaries) and not proc.name startswith "runc:" - and not java_running_sdjagent - and not nrpe_becoming_nagios and not user_known_non_sudo_setuid_conditions + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, + nomachine_binaries] + - name: proc_cmdline + fields: [proc.name, proc.cmdline] + comps: [in, contains] + values: + - [[java], sdjagent.jar] + - name: proc_new_uid + fields: [proc.name, evt.arg.uid] + values: + - [nrpe, nagios] + - name: user_new_uid + fields: [user.name, evt.arg.uid] + values: + - [nobody, nobody] + - [www-data, www-data] + - [_apt, _apt] + - [postfix, postfix] + - [pki-agent, pki-agent] + - [pki-acme, pki-acme] + - [nfsnobody, nfsnobody] + - [postgres, postgres] output: > Unexpected setuid call by non-sudo, non-root program (user=%user.name user_loginuid=%user.loginuid cur_uid=%user.uid parent=%proc.pname command=%proc.cmdline uid=%evt.arg.uid container_id=%container.id image=%container.image.repository) @@ -2259,17 +2300,29 @@ Some innocuous commandlines that don't actually change anything are excluded. condition: > spawned_process and proc.name in (user_mgmt_binaries) and - not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and - not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and - not proc.cmdline startswith "passwd -S" and - not proc.cmdline startswith "useradd -D" and - not proc.cmdline startswith "systemd --version" and + not container and not run_by_qualys and not run_by_sumologic_securefiles and not run_by_yum and not run_by_ms_oms and not run_by_google_accounts_daemon and not user_known_user_management_activities + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [su, sudo, lastlog, nologin, unix_chkpwd] + - name: proc_pnames + fields: proc.pname + comps: in + values: [cron_binaries, systemd, systemd.postins, udev.postinst, run-parts] + - name: cmdline_prefixes + fields: [proc.cmdline] + comps: [startswith] + values: + - ["passwd -S"] + - ["useradd -D"] + - ["systemd --version"] output: > User management binary command run outside of container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) @@ -2292,10 +2345,19 @@ condition: > fd.directory = /dev and (evt.type = creat or ((evt.type = open or evt.type = openat) and evt.arg.flags contains O_CREAT)) - and not proc.name in (dev_creation_binaries) - and not fd.name in (allowed_dev_files) and not fd.name startswith /dev/tty and not user_known_create_files_below_dev_activities + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [dev_creation_binaries] + - name: file_names + fields: fd.name + comps: in + values: [allowed_dev_files] + - name: proc_file + fields: [proc.name, fd.name] output: "File created below /dev by untrusted program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository)" priority: ERROR tags: [filesystem, mitre_persistence] @@ -2318,6 +2380,10 @@ - rule: Contact EC2 Instance Metadata Service From Container desc: Detect attempts to contact the EC2 Instance Metadata Service from a container condition: outbound and fd.sip="169.254.169.254" and container and not ec2_metadata_containers + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: Outbound connection to EC2 instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) priority: NOTICE tags: [network, aws, container, mitre_discovery] @@ -2337,6 +2403,10 @@ - rule: Contact cloud metadata service from container desc: Detect attempts to contact the Cloud Instance Metadata Service from a container condition: outbound and fd.sip="169.254.169.254" and container and consider_metadata_access and not user_known_metadata_access + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: Outbound connection to cloud instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) priority: NOTICE tags: [network, container, mitre_discovery] @@ -2368,6 +2438,10 @@ not k8s_containers and k8s_api_server and not user_known_contact_k8s_api_server_activities + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: Unexpected connection to K8s API Server from container (command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag connection=%fd.name) priority: NOTICE tags: [network, k8s, container, mitre_discovery] @@ -2384,6 +2458,10 @@ - rule: Unexpected K8s NodePort Connection desc: Detect attempts to use K8s NodePorts from a container condition: (inbound_outbound) and fd.sport >= 30000 and fd.sport <= 32767 and container and not nodeport_containers + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) priority: NOTICE tags: [network, k8s, container, mitre_port_knocking] @@ -2413,6 +2491,10 @@ and package_mgmt_procs and not package_mgmt_ancestor_procs and not user_known_package_manager_in_container + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: > Package management process launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) @@ -2427,6 +2509,10 @@ (proc.name = "ncat" and (proc.args contains "--sh-exec" or proc.args contains "--exec" or proc.args contains "-e " or proc.args contains "-c " or proc.args contains "--lua-exec")) ) + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: > Netcat runs inside container that allows remote code execution (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) @@ -2440,6 +2526,10 @@ desc: Detect network tools launched inside container condition: > spawned_process and container and network_tool_procs and not user_known_network_tool_activities + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: > Network tool launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) @@ -2460,6 +2550,9 @@ consider_network_tools_on_host and network_tool_procs and not user_known_network_tool_activities + exceptions: + - name: proc_pname + fields: [proc.name, proc.pname] output: > Network tool launched on host (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname) priority: NOTICE @@ -2495,6 +2588,9 @@ ((grep_commands and private_key_or_password) or (proc.name = "find" and (proc.args contains "id_rsa" or proc.args contains "id_dsa"))) ) + exceptions: + - name: proc_pname + fields: [proc.name, proc.pname] output: > Grep private keys or passwords activities found (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id container_name=%container.name @@ -2516,20 +2612,25 @@ - macro: allowed_clear_log_files condition: (never_true) -- macro: trusted_logging_images - condition: (container.image.repository endswith "splunk/fluentd-hec" or - container.image.repository endswith "fluent/fluentd-kubernetes-daemonset" or - container.image.repository endswith "openshift3/ose-logging-fluentd" or - container.image.repository endswith "containernetworking/azure-npm") - - rule: Clear Log Activities desc: Detect clearing of critical log files condition: > open_write and access_log_files and evt.arg.flags contains "O_TRUNC" and - not trusted_logging_images and not allowed_clear_log_files + exceptions: + - name: proc_file_prefix + fields: [proc.name, fd.name] + comps: [in, startswith] + - name: image_suffix + fields: [container.image.repository] + comps: [endswith] + values: + - ["splunk/fluentd-hec"] + - ["fluent/fluentd-kubernetes-daemonset"] + - ["openshift3/ose-logging-fluentd"] + - ["containernetworking/azure-npm"] output: > Log files were tampered (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) priority: @@ -2548,6 +2649,9 @@ - rule: Remove Bulk Data from Disk desc: Detect process running to clear bulk data from disk condition: spawned_process and clear_data_procs and not user_known_remove_data_activities + exceptions: + - name: proc_pname + fields: [proc.name, proc.pname] output: > Bulk data has been removed from disk (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) priority: @@ -2585,8 +2689,15 @@ desc: Detect shell history deletion condition: > (modify_shell_history or truncate_shell_history) and - not var_lib_docker_filepath and - not proc.name in (docker_binaries) + not var_lib_docker_filepath + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [docker_binaries] + - name: known_shell_history_mods + fields: [proc.name, fd.name] + comps: [=, contains] output: > Shell history had been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info) priority: @@ -2600,6 +2711,12 @@ condition: > ((spawned_process and proc.name in (shred, rm, mv) and proc.args contains "bash_history") or (open_write and fd.name contains "bash_history" and evt.arg.flags contains "O_TRUNC")) + exceptions: + - name: proc_pname + fields: [proc.name, proc.pname] + - name: known_shell_history_deletes + fields: [proc.name, fd.name] + comps: [=, contains] output: > Shell history had been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info) priority: @@ -2625,9 +2742,13 @@ Detect setuid or setgid bits set via chmod condition: > consider_all_chmods and chmod and (evt.arg.mode contains "S_ISUID" or evt.arg.mode contains "S_ISGID") - and not proc.name in (user_known_chmod_applications) and not exe_running_docker_save and not user_known_set_setuid_or_setgid_bit_conditions + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [user_known_chmod_applications] output: > Setuid or setgid bit is set via chmod (fd=%evt.arg.fd filename=%evt.arg.filename mode=%evt.arg.mode user=%user.name user_loginuid=%user.loginuid process=%proc.name command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) @@ -2653,6 +2774,10 @@ (open_write and evt.arg.flags contains "O_CREAT" and fd.name contains "/." and not fd.name pmatch (exclude_hidden_directories))) and consider_hidden_file_creation and not user_known_create_hidden_file_activities + exceptions: + - name: proc_file_prefix + fields: [proc.name, fd.name] + comps: [in, startswith] output: > Hidden file or directory created (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name newpath=%evt.arg.newpath container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) @@ -2670,6 +2795,9 @@ desc: Detect remote file copy tools launched in container condition: > spawned_process and container and remote_file_copy_procs + exceptions: + - name: proc_name_parent_name + fields: [proc.name, proc.pname] output: > Remote file copy tool launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) @@ -2681,6 +2809,10 @@ condition: > create_symlink and (evt.arg.target in (sensitive_file_names) or evt.arg.target in (sensitive_directory_names)) + exceptions: + - name: proc_file_prefix + fields: [proc.name, evt.arg.target] + comps: [in, startswith] output: > Symlinks created over senstivie files (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline target=%evt.arg.target linkpath=%evt.arg.linkpath parent_process=%proc.pname) priority: NOTICE @@ -2777,6 +2909,9 @@ - rule: Detect outbound connections to common miner pool ports desc: Miners typically connect to miner pools on common ports. condition: net_miner_pool and not trusted_images_query_miner_domain_dns + exceptions: + - name: proc_sport_sipname + fields: [proc.name, fd.sport, fd.sip.name] enabled: false output: Outbound connection to IP/Port flagged by cryptoioc.ch (command=%proc.cmdline port=%fd.rport ip=%fd.rip container=%container.info image=%container.image.repository) priority: CRITICAL @@ -2785,6 +2920,10 @@ - rule: Detect crypto miners using the Stratum protocol desc: Miners typically specify the mining pool to connect to with a URI that begins with 'stratum+tcp' condition: spawned_process and proc.cmdline contains "stratum+tcp" + exceptions: + - name: proc_pname_parent_cmdline + fields: [proc.pname, proc.cmdline] + comps: [=, contains] output: Possible miner running (command=%proc.cmdline container=%container.info image=%container.image.repository) priority: CRITICAL tags: [process, mitre_execution] @@ -2805,6 +2944,10 @@ - rule: The docker client is executed in a container desc: Detect a k8s client tool executed inside a container condition: spawned_process and container and not user_known_k8s_client_container_parens and proc.name in (k8s_client_binaries) + exceptions: + - name: image_suffix + fields: [container.image.repository] + comps: [endswith] output: "Docker or kubernetes client executed in container (user=%user.name user_loginuid=%user.loginuid %container.info parent=%proc.pname cmdline=%proc.cmdline image=%container.image.repository:%container.image.tag)" priority: WARNING tags: [container, mitre_execution] @@ -2821,6 +2964,10 @@ - rule: Packet socket created in container desc: Detect new packet socket at the device driver (OSI Layer 2) level in a container. Packet socket could be used for ARP Spoofing and privilege escalation(CVE-2020-14386) by attacker. condition: evt.type=socket and evt.arg[0]=AF_PACKET and consider_packet_socket_communication and container and not proc.name in (user_known_packet_socket_binaries) + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: Packet socket was created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline socket_info=%evt.args container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) priority: NOTICE tags: [network, mitre_discovery] @@ -2858,6 +3005,9 @@ container and not network_local_subnet and k8s.ns.name in (namespace_scope_network_only_subnet) + exceptions: + - name: proc_proto_sport + fields: [proc.name, fd.l4proto, fd.sport] output: > Network connection outside local subnet (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id @@ -2895,9 +3045,18 @@ allowed_port and inbound_outbound and container and - container.image.repository in (allowed_image) and - not proc.name in (authorized_server_binary) and - not fd.sport in (authorized_server_port) + container.image.repository in (allowed_image) + exceptions: + - name: proc_names + fields: proc.name + comps: in + values: [authorized_server_binary] + - name: fd_sports + fields: fd.sport + comps: in + values: [authorized_server_port] + - name: proc_proto_sport + fields: [proc.name, fd.l4proto, fd.sport] output: > Network connection outside authorized port and binary (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id @@ -2911,6 +3070,10 @@ - rule: Redirect STDOUT/STDIN to Network Connection in Container desc: Detect redirecting stdout/stdin to network connection in container (potential reverse shell). condition: evt.type=dup and evt.dir=> and container and fd.num in (0, 1, 2) and fd.type in ("ipv4", "ipv6") and not user_known_stand_streams_redirect_activities + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] output: > Redirect stdout/stdin to network connection (user=%user.name user_loginuid=%user.loginuid %container.info process=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository fd.name=%fd.name fd.num=%fd.num fd.type=%fd.type fd.sip=%fd.sip) priority: WARNING @@ -2934,13 +3097,21 @@ chmod and consider_all_chmods and container and - not runc_writing_exec_fifo and not runc_writing_var_lib_docker and not user_known_container_drift_activities and evt.rawres>=0 and ((evt.arg.mode contains "S_IXUSR") or (evt.arg.mode contains "S_IXGRP") or (evt.arg.mode contains "S_IXOTH")) + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] + - name: cmdline_file + fields: [proc.cmdline, fd.name] + comps: [in, in] + values: + - [["runc:[1:CHILD] init"], [/exec.fifo]] output: Drift detected (chmod), new executable created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type) priority: ERROR @@ -2953,10 +3124,18 @@ evt.type in (open,openat,creat) and evt.is_open_exec=true and container and - not runc_writing_exec_fifo and not runc_writing_var_lib_docker and not user_known_container_drift_activities and evt.rawres>=0 + exceptions: + - name: proc_name_image_suffix + fields: [proc.name, container.image.repository] + comps: [in, endswith] + - name: cmdline_file + fields: [proc.cmdline, fd.name] + comps: [in, in] + values: + - [["runc:[1:CHILD] init"], [/exec.fifo]] output: Drift detected (open+create), new executable created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type) priority: ERROR diff --git a/rules/k8s_audit_rules.yaml b/rules/k8s_audit_rules.yaml index 67e1b327a67..6cbb2eab468 100644 --- a/rules/k8s_audit_rules.yaml +++ b/rules/k8s_audit_rules.yaml @@ -54,7 +54,12 @@ - rule: Disallowed K8s User desc: Detect any k8s operation by users outside of an allowed set of users. - condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) + condition: kevt and non_system_user + exceptions: + - name: user_names + fields: ka.user.name + comps: in + values: [allowed_k8s_users] output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) priority: WARNING source: k8s_audit @@ -121,6 +126,10 @@ desc: > Detect an attempt to start a pod with a container image outside of a list of allowed images. condition: kevt and pod and kcreate and not allowed_k8s_containers + exceptions: + - name: image_repos + fields: ka.req.pod.containers.image.repository + comps: in output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit @@ -129,7 +138,12 @@ - rule: Create Privileged Pod desc: > Detect an attempt to start a pod with a privileged container - condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images) + condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) + exceptions: + - name: image_repos + fields: ka.req.pod.containers.image.repository + comps: in + values: [falco_privileged_images] output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit @@ -143,7 +157,12 @@ desc: > Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc). Exceptions are made for known trusted images. - condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images) + condition: kevt and pod and kcreate and sensitive_vol_mount + exceptions: + - name: image_repos + fields: ka.req.pod.containers.image.repository + comps: in + values: [falco_sensitive_mount_images] output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes]) priority: WARNING source: k8s_audit @@ -152,7 +171,12 @@ # Corresponds to K8s CIS Benchmark 1.7.4 - rule: Create HostNetwork Pod desc: Detect an attempt to start a pod using the host network. - condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images) + condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) + exceptions: + - name: image_repos + fields: ka.req.pod.containers.image.repository + comps: in + values: [falco_hostnetwork_images] output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit @@ -165,6 +189,9 @@ desc: > Detect an attempt to start a service with a NodePort service type condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service + exceptions: + - name: services + fields: [ka.target.namespace, ka.target.name] output: NodePort Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports) priority: WARNING source: k8s_audit @@ -183,6 +210,9 @@ desc: > Detect creating/modifying a configmap containing a private credential (aws key, password, etc.) condition: kevt and configmap and kmodify and contains_private_credentials + exceptions: + - name: configmaps + fields: [ka.target.namespace, ka.req.configmap.name] output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name config=%ka.req.configmap.obj) priority: WARNING source: k8s_audit @@ -193,6 +223,10 @@ desc: > Detect any request made by the anonymous user that was allowed condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint + exceptions: + - name: user_names + fields: ka.user.name + comps: in output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason)) priority: WARNING source: k8s_audit @@ -206,6 +240,10 @@ # events to be stateful, so it could know if a container named in an # attach request was created privileged or not. For now, we have a # less severe rule that detects attaches/execs to any pod. +# +# For the same reason, you can't use things like image names/prefixes, +# as the event that creates the pod (which has the images) is a +# separate event than the actual exec/attach to the pod. - macro: user_known_exec_pod_activities condition: (k8s_audit_never_true) @@ -214,6 +252,10 @@ desc: > Detect any attempt to attach/exec to a pod condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities + exceptions: + - name: user_names + fields: ka.user.name + comps: in output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command]) priority: NOTICE source: k8s_audit @@ -223,10 +265,14 @@ condition: (k8s_audit_never_true) # Only works when feature gate EphemeralContainers is enabled +# Definining empty exceptions just to avoid warnings. There isn't any +# great exception for this kind of object, as you'd expect the images +# to vary wildly. - rule: EphemeralContainers Created desc: > Detect any ephemeral container created condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities + exceptions: output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image]) priority: NOTICE source: k8s_audit @@ -238,7 +284,12 @@ - rule: Create Disallowed Namespace desc: Detect any attempt to create a namespace outside of a set of known namespaces - condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces) + condition: kevt and namespace and kcreate + exceptions: + - name: services + fields: ka.target.name + comps: in + values: [allowed_namespaces] output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name) priority: WARNING source: k8s_audit @@ -278,15 +329,16 @@ k8s_image_list ] -- macro: allowed_kube_namespace_pods - condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or - ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list)) - # Detect any new pod created in the kube-system namespace - rule: Pod Created in Kube Namespace desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces - condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods + condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) + exceptions: + - name: images + fields: ka.req.pod.containers.image.repository + comps: in + values: [user_allowed_kube_namespace_image_list, allowed_kube_namespace_image_list] priority: WARNING source: k8s_audit tags: [k8s] @@ -301,6 +353,9 @@ - rule: Service Account Created in Kube Namespace desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa + exceptions: + - name: accounts + fields: [ka.target.namespace, ka.target.name] output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace) priority: WARNING source: k8s_audit @@ -313,6 +368,9 @@ desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and not ka.target.name in (system:coredns, system:managed-certificate-controller) + exceptions: + - name: roles + fields: [ka.target.namespace, ka.target.name] output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name ns=%ka.target.namespace action=%ka.verb) priority: WARNING source: k8s_audit @@ -323,6 +381,10 @@ - rule: Attach to cluster-admin Role desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin + exceptions: + - name: subjects + fields: ka.req.binding.subjects + comps: in output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects) priority: WARNING source: k8s_audit @@ -331,6 +393,10 @@ - rule: ClusterRole With Wildcard Created desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*")) + exceptions: + - name: roles + fields: ka.target.name + comps: in output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) priority: WARNING source: k8s_audit @@ -343,6 +409,10 @@ - rule: ClusterRole With Write Privileges Created desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions condition: kevt and (role or clusterrole) and kcreate and writable_verbs + exceptions: + - name: roles + fields: ka.target.name + comps: in output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) priority: NOTICE source: k8s_audit @@ -351,6 +421,10 @@ - rule: ClusterRole With Pod Exec Created desc: Detect any attempt to create a Role/ClusterRole that can exec to pods condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec") + exceptions: + - name: roles + fields: ka.target.name + comps: in output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) priority: WARNING source: k8s_audit @@ -362,12 +436,16 @@ - macro: consider_activity_events condition: (k8s_audit_always_true) +# Activity events don't have exceptions. They do define an empty +# exceptions property just to avoid warnings when loading rules. + - macro: kactivity condition: (kevt and consider_activity_events) - rule: K8s Deployment Created desc: Detect any attempt to create a deployment condition: (kactivity and kcreate and deployment and response_successful) + exceptions: output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -376,6 +454,7 @@ - rule: K8s Deployment Deleted desc: Detect any attempt to delete a deployment condition: (kactivity and kdelete and deployment and response_successful) + exceptions: output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -384,6 +463,7 @@ - rule: K8s Service Created desc: Detect any attempt to create a service condition: (kactivity and kcreate and service and response_successful) + exceptions: output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -392,6 +472,7 @@ - rule: K8s Service Deleted desc: Detect any attempt to delete a service condition: (kactivity and kdelete and service and response_successful) + exceptions: output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -400,6 +481,7 @@ - rule: K8s ConfigMap Created desc: Detect any attempt to create a configmap condition: (kactivity and kcreate and configmap and response_successful) + exceptions: output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -408,6 +490,7 @@ - rule: K8s ConfigMap Deleted desc: Detect any attempt to delete a configmap condition: (kactivity and kdelete and configmap and response_successful) + exceptions: output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -416,6 +499,7 @@ - rule: K8s Namespace Created desc: Detect any attempt to create a namespace condition: (kactivity and kcreate and namespace and response_successful) + exceptions: output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -424,6 +508,7 @@ - rule: K8s Namespace Deleted desc: Detect any attempt to delete a namespace condition: (kactivity and non_system_user and kdelete and namespace and response_successful) + exceptions: output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -432,6 +517,7 @@ - rule: K8s Serviceaccount Created desc: Detect any attempt to create a service account condition: (kactivity and kcreate and serviceaccount and response_successful) + exceptions: output: K8s Serviceaccount Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -440,6 +526,7 @@ - rule: K8s Serviceaccount Deleted desc: Detect any attempt to delete a service account condition: (kactivity and kdelete and serviceaccount and response_successful) + exceptions: output: K8s Serviceaccount Deleted (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -448,6 +535,7 @@ - rule: K8s Role/Clusterrole Created desc: Detect any attempt to create a cluster role/role condition: (kactivity and kcreate and (clusterrole or role) and response_successful) + exceptions: output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -456,6 +544,7 @@ - rule: K8s Role/Clusterrole Deleted desc: Detect any attempt to delete a cluster role/role condition: (kactivity and kdelete and (clusterrole or role) and response_successful) + exceptions: output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -464,6 +553,7 @@ - rule: K8s Role/Clusterrolebinding Created desc: Detect any attempt to create a clusterrolebinding condition: (kactivity and kcreate and clusterrolebinding and response_successful) + exceptions: output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -472,6 +562,7 @@ - rule: K8s Role/Clusterrolebinding Deleted desc: Detect any attempt to delete a clusterrolebinding condition: (kactivity and kdelete and clusterrolebinding and response_successful) + exceptions: output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -480,6 +571,7 @@ - rule: K8s Secret Created desc: Detect any attempt to create a secret. Service account tokens are excluded. condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) + exceptions: output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -488,6 +580,7 @@ - rule: K8s Secret Deleted desc: Detect any attempt to delete a secret Service account tokens are excluded. condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) + exceptions: output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit @@ -506,6 +599,7 @@ - rule: All K8s Audit Events desc: Match all K8s Audit Events condition: kall + exceptions: output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj) priority: DEBUG source: k8s_audit @@ -520,11 +614,11 @@ - list: full_admin_k8s_users items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"] -# This rules detect an operation triggered by an user name that is -# included in the list of those that are default administrators upon -# cluster creation. This may signify a permission setting too broader. -# As we can't check for role of the user on a general ka.* event, this -# may or may not be an administrator. Customize the full_admin_k8s_users +# This rules detect an operation triggered by an user name that is +# included in the list of those that are default administrators upon +# cluster creation. This may signify a permission setting too broader. +# As we can't check for role of the user on a general ka.* event, this +# may or may not be an administrator. Customize the full_admin_k8s_users # list to your needs, and activate at your discrection. # # How to test: @@ -534,10 +628,14 @@ - rule: Full K8s Administrative Access desc: Detect any k8s operation by a user name that may be an administrator with full access. condition: > - kevt - and non_system_user - and ka.user.name in (admin_k8s_users) + kevt + and non_system_user + and ka.user.name in (full_admin_k8s_users) and not allowed_full_admin_users + exceptions: + - name: user_names + fields: ka.user.name + comps: in output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) priority: WARNING source: k8s_audit @@ -571,10 +669,13 @@ desc: Detect any attempt to create an ingress without TLS certification. condition: > (kactivity and kcreate and ingress and response_successful and not ingress_tls) + exceptions: + - name: ingresses + fields: [ka.target.namespace, ka.target.name] output: > K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name namespace=%ka.target.namespace) - source: k8s_audit + source: k8s_audit priority: WARNING tags: [k8s, network] @@ -597,11 +698,15 @@ desc: > Detect a node successfully joined the cluster outside of the list of allowed nodes. condition: > - kevt and node - and kcreate - and response_successful - and not allow_all_k8s_nodes - and not ka.target.name in (allowed_k8s_nodes) + kevt and node + and kcreate + and response_successful + and not allow_all_k8s_nodes + exceptions: + - name: nodes + fields: ka.target.name + comps: in + values: [allowed_k8s_nodes] output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name) priority: ERROR source: k8s_audit @@ -611,11 +716,15 @@ desc: > Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes. condition: > - kevt and node - and kcreate - and not response_successful - and not allow_all_k8s_nodes - and not ka.target.name in (allowed_k8s_nodes) + kevt and node + and kcreate + and not response_successful + and not allow_all_k8s_nodes + exceptions: + - name: nodes + fields: ka.target.name + comps: in + values: [allowed_k8s_nodes] output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason) priority: WARNING source: k8s_audit