diff --git a/patch/0000-Linux-Kernel-Runtime-Guard.patch b/patch/0000-Linux-Kernel-Runtime-Guard.patch new file mode 100644 index 000000000..7e5e304eb --- /dev/null +++ b/patch/0000-Linux-Kernel-Runtime-Guard.patch @@ -0,0 +1,20597 @@ +commit b3b3365cf3d6fea8f25456fa902859ab82b2203a +Author: RageLtMan +Date: Mon Apr 19 00:01:35 2021 -0400 + + LKRG in-tree @ b913995b + +diff --git a/security/Kconfig b/security/Kconfig +index a921713b76ec..5c9092000247 100644 +--- a/security/Kconfig ++++ b/security/Kconfig +@@ -289,6 +289,7 @@ source security/loadpin/Kconfig + source security/yama/Kconfig + + source security/integrity/Kconfig ++source security/lkrg/Kconfig + + choice + prompt "Default security module" +diff --git a/security/Makefile b/security/Makefile +index 4d2d3782ddef..57a930e8767c 100644 +--- a/security/Makefile ++++ b/security/Makefile +@@ -30,3 +30,7 @@ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o + # Object integrity file lists + subdir-$(CONFIG_INTEGRITY) += integrity + obj-$(CONFIG_INTEGRITY) += integrity/ ++ ++# LKRG file list ++subdir-$(CONFIG_SECURITY_LKRG) += lkrg ++obj-$(CONFIG_SECURITY_LKRG) += lkrg/ +diff --git a/security/lkrg/Kconfig b/security/lkrg/Kconfig +new file mode 100644 +index 000000000000..afbbab098977 +--- /dev/null ++++ b/security/lkrg/Kconfig +@@ -0,0 +1,10 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++config SECURITY_LKRG ++ tristate "LKRG support" ++ depends on SECURITY && KPROBES && MODULE_UNLOAD && KALLSYMS_ALL ++ default m ++ help ++ This selects LKRG - Linux Kernel Runtime Guard, which provides ++ integrity validation and anti-exploitation functions. ++ ++ If you are unsure how to answer this question, answer M. +diff --git a/security/lkrg/Makefile b/security/lkrg/Makefile +new file mode 100644 +index 000000000000..8cf5dbdc5caf +--- /dev/null ++++ b/security/lkrg/Makefile +@@ -0,0 +1,68 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++obj-$(CONFIG_SECURITY_LKRG) := p_lkrg.o ++p_lkrg-objs += modules/ksyms/p_resolve_ksym.o \ ++ modules/hashing/p_lkrg_fast_hash.o \ ++ modules/comm_channel/p_comm_channel.o \ ++ modules/integrity_timer/p_integrity_timer.o \ ++ modules/kmod/p_kmod.o \ ++ modules/database/CPU.o \ ++ modules/database/arch/x86/p_x86_metadata.o \ ++ modules/database/arch/x86/p_switch_idt/p_switch_idt.o \ ++ modules/database/arch/arm64/p_arm64_metadata.o \ ++ modules/database/arch/arm/p_arm_metadata.o \ ++ modules/database/arch/p_arch_metadata.o \ ++ modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.o \ ++ modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.o \ ++ modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.o \ ++ modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.o \ ++ modules/database/p_database.o \ ++ modules/notifiers/p_notifiers.o \ ++ modules/self-defense/hiding/p_hiding.o \ ++ modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.o \ ++ modules/exploit_detection/syscalls/p_install.o \ ++ modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.o \ ++ modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.o \ ++ modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.o \ ++ modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.o \ ++ modules/exploit_detection/syscalls/p_do_exit/p_do_exit.o \ ++ modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.o \ ++ modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.o \ ++ modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.o \ ++ modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.o \ ++ modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.o \ ++ modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.o \ ++ modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.o \ ++ modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.o \ ++ modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.o \ ++ modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.o \ ++ modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.o \ ++ modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.o \ ++ modules/exploit_detection/syscalls/p_seccomp/p_seccomp.o \ ++ modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.o \ ++ modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.o \ ++ modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.o \ ++ modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.o \ ++ modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.o \ ++ modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.o \ ++ modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.o \ ++ modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.o \ ++ modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.o \ ++ modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.o \ ++ modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.o \ ++ modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.o \ ++ modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.o \ ++ modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.o \ ++ modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.o \ ++ modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.o \ ++ modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.o \ ++ modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.o \ ++ modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.o \ ++ modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.o \ ++ modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.o \ ++ modules/exploit_detection/syscalls/p_capable/p_capable.o \ ++ modules/exploit_detection/syscalls/p_scm_send/p_scm_send.o \ ++ modules/exploit_detection/p_selinux_state.o \ ++ modules/exploit_detection/p_exploit_detection.o \ ++ p_lkrg_main.o ++ +diff --git a/security/lkrg/modules/comm_channel/p_comm_channel.c b/security/lkrg/modules/comm_channel/p_comm_channel.c +new file mode 100644 +index 000000000000..cdb455ae54c9 +--- /dev/null ++++ b/security/lkrg/modules/comm_channel/p_comm_channel.c +@@ -0,0 +1,1320 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Communication channel - sysctl interface ++ * ++ * Notes: ++ * - Allow administrator of the system to interact with LKRG via sysctl interface ++ * ++ * Timeline: ++ * - Created: 26.X.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++ ++static struct ctl_table_header *p_sysctl_handle; ++ ++static int p_kint_validate_min = 0; ++static int p_kint_validate_max = 3; ++ ++static int p_kint_enforce_min = 0; ++static int p_kint_enforce_max = 2; ++ ++static int p_pint_validate_min = 0; ++static int p_pint_validate_max = 3; ++ ++static int p_pint_enforce_min = 0; ++static int p_pint_enforce_max = 2; ++ ++static int p_interval_min = 5; ++static int p_interval_max = 1800; ++ ++static int p_log_level_min = P_LOG_LEVEL_NONE; ++static int p_log_level_max = P_LOG_LEVEL_MAX - 1; ++ ++static int p_block_module_min = 0; ++static int p_block_module_max = 1; ++ ++static int p_trigger_min = 0; ++static int p_trigger_max = 1; ++ ++#ifdef P_LKRG_UNHIDE ++static int p_hide_lkrg_min = 0; ++static int p_hide_lkrg_max = 1; ++#endif ++ ++static int p_heartbeat_min = 0; ++static int p_heartbeat_max = 1; ++ ++#if defined(CONFIG_X86) ++static int p_smep_validate_min = 0; ++static int p_smep_validate_max = 1; ++ ++static int p_smep_enforce_min = 0; ++static int p_smep_enforce_max = 2; ++ ++static int p_smap_validate_min = 0; ++static int p_smap_validate_max = 1; ++ ++static int p_smap_enforce_min = 0; ++static int p_smap_enforce_max = 2; ++#endif ++ ++static int p_umh_validate_min = 0; ++static int p_umh_validate_max = 2; ++ ++static int p_umh_enforce_min = 0; ++static int p_umh_enforce_max = 2; ++ ++static int p_msr_validate_min = 0; ++static int p_msr_validate_max = 1; ++ ++static int p_pcfi_validate_min = 0; ++static int p_pcfi_validate_max = 2; ++ ++static int p_pcfi_enforce_min = 0; ++static int p_pcfi_enforce_max = 2; ++ ++/* Profiles */ ++static int p_profile_validate_min = 0; ++static int p_profile_validate_max = 9; ++ ++static int p_profile_enforce_min = 0; ++static int p_profile_enforce_max = 9; ++ ++ ++static int p_sysctl_kint_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_kint_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_pint_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_pint_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_interval(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_block_modules(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_log_level(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_trigger(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++#ifdef P_LKRG_UNHIDE ++static int p_sysctl_hide(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++#endif ++static int p_sysctl_heartbeat(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++#if defined(CONFIG_X86) ++static int p_sysctl_smep_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_smep_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_smap_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_smap_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++#endif ++static int p_sysctl_umh_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_umh_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_msr_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_pcfi_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_pcfi_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_profile_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++static int p_sysctl_profile_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos); ++ ++ ++struct ctl_table p_lkrg_sysctl_base[] = { ++ { ++ .procname = "lkrg", ++ .mode = 0600, ++ .child = p_lkrg_sysctl_table, ++ }, ++ { } ++}; ++ ++struct ctl_table p_lkrg_sysctl_table[] = { ++ { ++ .procname = "kint_validate", ++ .data = &P_CTRL(p_kint_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_kint_validate, ++ .extra1 = &p_kint_validate_min, ++ .extra2 = &p_kint_validate_max, ++ }, ++ { ++ .procname = "kint_enforce", ++ .data = &P_CTRL(p_kint_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_kint_enforce, ++ .extra1 = &p_kint_enforce_min, ++ .extra2 = &p_kint_enforce_max, ++ }, ++ { ++ .procname = "pint_validate", ++ .data = &P_CTRL(p_pint_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_pint_validate, ++ .extra1 = &p_pint_validate_min, ++ .extra2 = &p_pint_validate_max, ++ }, ++ { ++ .procname = "pint_enforce", ++ .data = &P_CTRL(p_pint_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_pint_enforce, ++ .extra1 = &p_pint_enforce_min, ++ .extra2 = &p_pint_enforce_max, ++ }, ++ { ++ .procname = "interval", ++ .data = &P_CTRL(p_interval), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_interval, ++ .extra1 = &p_interval_min, ++ .extra2 = &p_interval_max, ++ }, ++ { ++ .procname = "block_modules", ++ .data = &P_CTRL(p_block_modules), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_block_modules, ++ .extra1 = &p_block_module_min, ++ .extra2 = &p_block_module_max, ++ }, ++ { ++ .procname = "log_level", ++ .data = &P_CTRL(p_log_level), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_log_level, ++ .extra1 = &p_log_level_min, ++ .extra2 = &p_log_level_max, ++ }, ++ { ++ .procname = "trigger", ++ .data = &P_CTRL(p_trigger), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_trigger, ++ .extra1 = &p_trigger_min, ++ .extra2 = &p_trigger_max, ++ }, ++#ifdef P_LKRG_UNHIDE ++ { ++ .procname = "hide", ++ .data = &P_CTRL(p_hide_lkrg), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_hide, ++ .extra1 = &p_hide_lkrg_min, ++ .extra2 = &p_hide_lkrg_max, ++ }, ++#endif ++ { ++ .procname = "heartbeat", ++ .data = &P_CTRL(p_heartbeat), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_heartbeat, ++ .extra1 = &p_heartbeat_min, ++ .extra2 = &p_heartbeat_max, ++ }, ++#if defined(CONFIG_X86) ++ { ++ .procname = "smep_validate", ++ .data = &P_CTRL(p_smep_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_smep_validate, ++ .extra1 = &p_smep_validate_min, ++ .extra2 = &p_smep_validate_max, ++ }, ++ { ++ .procname = "smep_enforce", ++ .data = &P_CTRL(p_smep_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_smep_enforce, ++ .extra1 = &p_smep_enforce_min, ++ .extra2 = &p_smep_enforce_max, ++ }, ++ { ++ .procname = "smap_validate", ++ .data = &P_CTRL(p_smap_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_smap_validate, ++ .extra1 = &p_smap_validate_min, ++ .extra2 = &p_smap_validate_max, ++ }, ++ { ++ .procname = "smap_enforce", ++ .data = &P_CTRL(p_smap_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_smap_enforce, ++ .extra1 = &p_smap_enforce_min, ++ .extra2 = &p_smap_enforce_max, ++ }, ++#endif ++ { ++ .procname = "umh_validate", ++ .data = &P_CTRL(p_umh_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_umh_validate, ++ .extra1 = &p_umh_validate_min, ++ .extra2 = &p_umh_validate_max, ++ }, ++ { ++ .procname = "umh_enforce", ++ .data = &P_CTRL(p_umh_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_umh_enforce, ++ .extra1 = &p_umh_enforce_min, ++ .extra2 = &p_umh_enforce_max, ++ }, ++ { ++ .procname = "msr_validate", ++ .data = &P_CTRL(p_msr_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_msr_validate, ++ .extra1 = &p_msr_validate_min, ++ .extra2 = &p_msr_validate_max, ++ }, ++ { ++ .procname = "pcfi_validate", ++ .data = &P_CTRL(p_pcfi_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_pcfi_validate, ++ .extra1 = &p_pcfi_validate_min, ++ .extra2 = &p_pcfi_validate_max, ++ }, ++ { ++ .procname = "pcfi_enforce", ++ .data = &P_CTRL(p_pcfi_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_pcfi_enforce, ++ .extra1 = &p_pcfi_enforce_min, ++ .extra2 = &p_pcfi_enforce_max, ++ }, ++ { ++ .procname = "profile_validate", ++ .data = &P_CTRL(p_profile_validate), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_profile_validate, ++ .extra1 = &p_profile_validate_min, ++ .extra2 = &p_profile_validate_max, ++ }, ++ { ++ .procname = "profile_enforce", ++ .data = &P_CTRL(p_profile_enforce), ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = p_sysctl_profile_enforce, ++ .extra1 = &p_profile_enforce_min, ++ .extra2 = &p_profile_enforce_max, ++ }, ++ { } ++}; ++ ++ ++static int p_sysctl_kint_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "DISABLED", ++ "MANUAL", ++ "PERIODICALLY", ++ "PERIODICALLY + RANDOM EVENTS" ++ }; ++ ++ p_tmp = P_CTRL(p_kint_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_kint_validate) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"kint_validate\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_kint_validate), ++ p_str[P_CTRL(p_kint_validate)] ++ ); ++ P_CTRL(p_profile_validate) = 9; ++ /* Random events */ ++ if (p_tmp < 3 && P_CTRL(p_kint_validate) == 3) { ++ p_register_notifiers(); ++ } else if (p_tmp == 3 && P_CTRL(p_kint_validate) < 3) { ++ p_deregister_notifiers(); ++ } ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_kint_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "LOG & ACCEPT", ++#if defined(CONFIG_X86) ++ "LOG ONLY (For SELinux and CR0.WP LOG & RESTORE)", ++#else ++ "LOG ONLY (For SELinux LOG & RESTORE)", ++#endif ++ "PANIC" ++ }; ++ ++ p_tmp = P_CTRL(p_kint_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_kint_enforce) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"kint_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_kint_enforce), ++ p_str[P_CTRL(p_kint_enforce)] ++ ); ++ P_CTRL(p_profile_enforce) = 9; ++#if defined(CONFIG_X86) ++ if (P_CTRL(p_kint_enforce)) { ++ P_ENABLE_WP_FLAG(p_pcfi_CPU_flags); ++ } ++#endif ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_pint_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "DISABLED", ++ "CURRENT", ++ "CURRENT", ++ "ALL TASKS" ++ }; ++ ++ p_tmp = P_CTRL(p_pint_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_pint_validate) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"pint_validate\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_pint_validate), ++ p_str[P_CTRL(p_pint_validate)] ++ ); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_pint_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "LOG & ACCEPT", ++ "KILL TASK", ++ "PANIC" ++ }; ++ ++ p_tmp = P_CTRL(p_pint_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_pint_enforce) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"pint_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_pint_enforce), ++ p_str[P_CTRL(p_pint_enforce)] ++ ); ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++ ++static int p_sysctl_interval(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ int p_ret; ++ ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ p_print_log(P_LKRG_CRIT, "[kINT] New interval => %d\n", P_CTRL(p_interval)); ++ p_offload_work(0); // run integrity check! ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_block_modules(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ ++ p_tmp = P_CTRL(p_block_modules); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_block_modules) && !p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Enabling \"blocking modules\" feature.\n"); ++ } else if (p_tmp && !P_CTRL(p_block_modules)) { ++ p_print_log(P_LKRG_CRIT, ++ "Disabling \"blocking modules\" feature.\n"); ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_log_level(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ int p_ret; ++ static const char * const p_log_level_string[] = { ++ "NONE", ++ "ALIVE", ++ "ERROR", ++ "WARN", ++ "INFO" ++#if defined(P_LKRG_DEBUG) ++ ,"DEBUG", ++ "STRONG_DEBUG" ++#endif ++ }; ++ ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ p_print_log(P_LKRG_CRIT, "New log level => %d (%s)\n", ++ P_CTRL(p_log_level), ++ p_log_level_string[P_CTRL(p_log_level])); ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++ ++static int p_sysctl_trigger(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_trigger)) { ++ p_manual = 1; ++ p_offload_work(0); // run integrity check! ++ P_CTRL(p_trigger) = 0; // Restore 0 value - user only sees that value! ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++#ifdef P_LKRG_UNHIDE ++static int p_sysctl_hide(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ ++ p_tmp = P_CTRL(p_hide_lkrg); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_hide_lkrg)) { ++ P_CTRL(p_hide_lkrg) = p_tmp; // Restore previous state - for sync ++ p_hide_itself(); // hide module! ++ } else { ++ P_CTRL(p_hide_lkrg) = p_tmp; // Restore previous state - for sync ++ p_unhide_itself(); // Unide module! ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++#endif ++ ++static int p_sysctl_heartbeat(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ ++ p_tmp = P_CTRL(p_heartbeat); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_heartbeat) && !p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Enabling heartbeat message.\n"); ++ } else if (p_tmp && !P_CTRL(p_heartbeat)) { ++ p_print_log(P_LKRG_CRIT, ++ "Disabling heartbeat message.\n"); ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++#if defined(CONFIG_X86) ++static int p_sysctl_smep_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ ++ p_tmp = P_CTRL(p_smep_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_smep_validate) && !p_tmp) { ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ p_print_log(P_LKRG_CRIT, ++ "Enabling SMEP validation feature.\n"); ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ } else { ++ p_print_log(P_LKRG_ERR, ++ "System does NOT support SMEP. LKRG can't enable SMEP validation :(\n"); ++ P_CTRL(p_smep_validate) = 0; ++ P_CTRL(p_smep_enforce) = 0; ++ } ++ P_CTRL(p_profile_validate) = 9; ++ } else if (p_tmp && !P_CTRL(p_smep_validate)) { ++ p_print_log(P_LKRG_CRIT, ++ "Disabling SMEP validation feature.\n"); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_smep_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "LOG & ACCEPT", ++ "LOG & RESTORE", ++ "PANIC" ++ }; ++ ++ p_tmp = P_CTRL(p_smep_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_smep_enforce) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"smep_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_smep_enforce), ++ p_str[P_CTRL(p_smep_enforce)] ++ ); ++ P_CTRL(p_profile_enforce) = 9; ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ } else { ++ p_print_log(P_LKRG_ERR, ++ "System does NOT support SMEP. LKRG's SMEP validation will be disabled :(\n"); ++ P_CTRL(p_smep_enforce) = 0; ++ P_CTRL(p_smep_validate) = 0; ++ } ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_smap_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ ++ p_tmp = P_CTRL(p_smap_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_smap_validate) && !p_tmp) { ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ p_print_log(P_LKRG_CRIT, ++ "Enabling SMAP validation feature.\n"); ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ } else { ++ p_print_log(P_LKRG_ERR, ++ "System does NOT support SMAP. LKRG can't enable SMAP validation :(\n"); ++ P_CTRL(p_smap_validate) = 0; ++ P_CTRL(p_smap_enforce) = 0; ++ } ++ P_CTRL(p_profile_validate) = 9; ++ } else if (p_tmp && !P_CTRL(p_smap_validate)) { ++ p_print_log(P_LKRG_CRIT, ++ "Disabling SMAP validation feature.\n"); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_smap_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "LOG & ACCEPT", ++ "LOG & RESTORE", ++ "PANIC" ++ }; ++ ++ p_tmp = P_CTRL(p_smap_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_smap_enforce) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"smap_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_smap_enforce), ++ p_str[P_CTRL(p_smap_enforce)] ++ ); ++ P_CTRL(p_profile_enforce) = 9; ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ } else { ++ p_print_log(P_LKRG_ERR, ++ "System does NOT support SMAP. LKRG's SMAP validation will be disabled :(\n"); ++ P_CTRL(p_smap_enforce) = 0; ++ P_CTRL(p_smap_validate) = 0; ++ } ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++#endif ++ ++static int p_sysctl_umh_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ static const char * const p_str[] = { ++ "Disable protection", ++ "Allow specific paths", ++ "Completely block usermodehelper" ++ }; ++ ++ p_tmp = P_CTRL(p_umh_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_umh_validate) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"umh_validate\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_umh_validate), ++ p_str[P_CTRL(p_umh_validate)] ++ ); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_umh_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "LOG ONLY", ++ "PREVENT EXECUTION", ++ "PANIC" ++ }; ++ ++ p_tmp = P_CTRL(p_umh_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_umh_enforce) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"umh_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_umh_enforce), ++ p_str[P_CTRL(p_umh_enforce)] ++ ); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_msr_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ int p_cpu; ++ unsigned int p_tmp; ++ ++ p_tmp = P_CTRL(p_msr_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_msr_validate) && !p_tmp) { ++ spin_lock(&p_db_lock); ++ memset(p_db.p_CPU_metadata_array,0,sizeof(p_CPU_metadata_hash_mem)*p_db.p_cpu.p_nr_cpu_ids); ++ for_each_present_cpu(p_cpu) { ++ if (cpu_online(p_cpu)) { ++ smp_call_function_single(p_cpu,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ } ++ } ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ p_print_log(P_LKRG_CRIT, ++ "Enabling MSRs verification during kernel integrity validation (kINT).\n"); ++ P_CTRL(p_profile_validate) = 9; ++ } else if (p_tmp && !P_CTRL(p_msr_validate)) { ++ spin_lock(&p_db_lock); ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ p_print_log(P_LKRG_CRIT, ++ "Disabling MSRs verification during kernel integrity validation (kINT).\n"); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_pcfi_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ static const char * const p_str[] = { ++ "Disabled", ++ "No stackwalk (weak)", ++ "Fully enabled" ++ }; ++ ++ p_tmp = P_CTRL(p_pcfi_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_pcfi_validate) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"pcfi_validate\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_pcfi_validate), ++ p_str[P_CTRL(p_pcfi_validate)] ++ ); ++ P_CTRL(p_profile_validate) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_pcfi_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ char *p_str[] = { ++ "LOG ONLY", ++ "KILL TASK", ++ "PANIC" ++ }; ++ ++ p_tmp = P_CTRL(p_pcfi_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_pcfi_enforce) != p_tmp) { ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"pcfi_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ p_str[p_tmp], ++ P_CTRL(p_pcfi_enforce), ++ p_str[P_CTRL(p_pcfi_enforce)] ++ ); ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_profile_validate(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ int p_cpu; ++ static const char * const p_str[] = { ++ "Disabled", ++ "Light", ++ "Balanced", ++ "Heavy", ++ "Paranoid" ++ }; ++ ++ p_tmp = P_CTRL(p_profile_validate); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_profile_validate) != p_tmp) { ++ if (P_CTRL(p_profile_validate) > 4 && P_CTRL(p_profile_validate) != 9) { ++ p_print_log(P_LKRG_CRIT, "Invalid \"profile_validate\" value.\n"); ++ P_CTRL(p_profile_validate) = p_tmp; ++ } else { ++ ++ switch (P_CTRL(p_profile_validate)) { ++ ++ case 0: ++ /* kint_validate */ ++ if (P_CTRL(p_kint_validate) == 3) ++ p_deregister_notifiers(); ++ P_CTRL(p_kint_validate) = 0; // Disabled ++ /* pint_validate */ ++ P_CTRL(p_pint_validate) = 0; // Disabled ++ /* pcfi_validate */ ++ P_CTRL(p_pcfi_validate) = 0; // Disabled ++ /* umh_validate */ ++ P_CTRL(p_umh_validate) = 0; // Disabled ++ /* msr_validate */ ++ if (P_CTRL(p_msr_validate)) { ++ spin_lock(&p_db_lock); ++ P_CTRL(p_msr_validate) = 0; // Disable ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ } ++#if defined(CONFIG_X86) ++ /* smep_validate */ ++ P_CTRL(p_smep_validate) = 0; ++ /* smap_validate */ ++ P_CTRL(p_smap_validate) = 0; ++#endif ++ break; ++ ++ case 1: ++ /* kint_validate */ ++ if (P_CTRL(p_kint_validate) == 3) ++ p_deregister_notifiers(); ++ P_CTRL(p_kint_validate) = 1; // Manual trigger only ++ /* pint_validate */ ++ P_CTRL(p_pint_validate) = 1; // Current task only ++ /* pcfi_validate */ ++ P_CTRL(p_pcfi_validate) = 1; // Weak pCFI ++ /* umh_validate */ ++ P_CTRL(p_umh_validate) = 1; // Allow specific paths ++ /* msr_validate */ ++ if (P_CTRL(p_msr_validate)) { ++ spin_lock(&p_db_lock); ++ P_CTRL(p_msr_validate) = 0; // Disable ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ } ++#if defined(CONFIG_X86) ++ /* smep_validate */ ++ if (!P_CTRL(p_smep_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smep_validate) = 0; ++ P_CTRL(p_smep_enforce) = 0; ++ } ++ } ++ /* smap_validate */ ++ if (!P_CTRL(p_smap_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smap_validate) = 0; ++ P_CTRL(p_smap_enforce) = 0; ++ } ++ } ++#endif ++ break; ++ ++ case 2: ++ /* kint_validate */ ++ if (P_CTRL(p_kint_validate) == 3) ++ p_deregister_notifiers(); ++ P_CTRL(p_kint_validate) = 2; // Timer ++ /* pint_validate */ ++ P_CTRL(p_pint_validate) = 1; // Current ++ /* pcfi_validate */ ++ P_CTRL(p_pcfi_validate) = 1; // Weak pCFI ++ /* umh_validate */ ++ P_CTRL(p_umh_validate) = 1; // Allow specific paths ++ /* msr_validate */ ++ if (P_CTRL(p_msr_validate)) { ++ spin_lock(&p_db_lock); ++ P_CTRL(p_msr_validate) = 0; // Disable ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ } ++#if defined(CONFIG_X86) ++ /* smep_validate */ ++ if (!P_CTRL(p_smep_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smep_validate) = 0; ++ P_CTRL(p_smep_enforce) = 0; ++ } ++ } ++ /* smap_validate */ ++ if (!P_CTRL(p_smap_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smap_validate) = 0; ++ P_CTRL(p_smap_enforce) = 0; ++ } ++ } ++#endif ++ break; ++ ++ case 3: ++ /* kint_validate */ ++ if (P_CTRL(p_kint_validate) < 3) ++ p_register_notifiers(); ++ P_CTRL(p_kint_validate) = 3; // Timer + random events ++ /* pint_validate */ ++ P_CTRL(p_pint_validate) = 1; // Current ++ /* pcfi_validate */ ++ P_CTRL(p_pcfi_validate) = 2; // Full pCFI ++ /* umh_validate */ ++ P_CTRL(p_umh_validate) = 1; // Allow specific paths ++ /* msr_validate */ ++ if (P_CTRL(p_msr_validate)) { ++ spin_lock(&p_db_lock); ++ P_CTRL(p_msr_validate) = 0; // Disable ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ } ++#if defined(CONFIG_X86) ++ /* smep_validate */ ++ if (!P_CTRL(p_smep_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smep_validate) = 0; ++ P_CTRL(p_smep_enforce) = 0; ++ } ++ } ++ /* smap_validate */ ++ if (!P_CTRL(p_smap_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smap_validate) = 0; ++ P_CTRL(p_smap_enforce) = 0; ++ } ++ } ++#endif ++ break; ++ ++ case 4: ++ /* kint_validate */ ++ if (P_CTRL(p_kint_validate) < 3) ++ p_register_notifiers(); ++ P_CTRL(p_kint_validate) = 3; // Timer + random events ++ /* pint_validate */ ++ P_CTRL(p_pint_validate) = 3; // Paranoid() ++ /* pcfi_validate */ ++ P_CTRL(p_pcfi_validate) = 2; // Full pCFI ++ /* umh_validate */ ++ P_CTRL(p_umh_validate) = 2; // Full lock-down ++ /* msr_validate */ ++ if (!P_CTRL(p_msr_validate)) { ++ spin_lock(&p_db_lock); ++ P_CTRL(p_msr_validate) = 1; // Enable ++ memset(p_db.p_CPU_metadata_array,0,sizeof(p_CPU_metadata_hash_mem)*p_db.p_cpu.p_nr_cpu_ids); ++ for_each_present_cpu(p_cpu) { ++ if (cpu_online(p_cpu)) { ++ smp_call_function_single(p_cpu,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ } ++ } ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ spin_unlock(&p_db_lock); ++ } ++#if defined(CONFIG_X86) ++ /* smep_validate */ ++ if (!P_CTRL(p_smep_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smep_validate) = 0; ++ P_CTRL(p_smep_enforce) = 0; ++ } ++ } ++ /* smap_validate */ ++ if (!P_CTRL(p_smap_validate)) { ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_validate) = 1; // Enable ++ } else { ++ P_CTRL(p_smap_validate) = 0; ++ P_CTRL(p_smap_enforce) = 0; ++ } ++ } ++#endif ++ break; ++ ++ default: ++ break; ++ ++ } ++ ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"profile_validate\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ (p_tmp != 9) ? p_str[p_tmp] : "Custom", ++ P_CTRL(p_profile_validate), ++ (P_CTRL(p_profile_validate) != 9) ? p_str[P_CTRL(p_profile_validate)] : "Custom" ++ ); ++ } ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++static int p_sysctl_profile_enforce(struct ctl_table *p_table, int p_write, ++ void __user *p_buffer, size_t *p_len, loff_t *p_pos) { ++ ++ int p_ret; ++ unsigned int p_tmp; ++ static const char * const p_str[] = { ++ "Log & Accept", ++ "Selective", ++ "Strict", ++ "Paranoid" ++ }; ++ ++ p_tmp = P_CTRL(p_profile_enforce); ++ p_lkrg_open_rw(); ++ if ( (p_ret = proc_dointvec_minmax(p_table, p_write, p_buffer, p_len, p_pos)) == 0 && p_write) { ++ if (P_CTRL(p_profile_enforce) != p_tmp) { ++ if (P_CTRL(p_profile_enforce) > 3 && P_CTRL(p_profile_enforce) != 9) { ++ p_print_log(P_LKRG_CRIT, "Invalid \"profile_enforce\" value.\n"); ++ P_CTRL(p_profile_enforce) = p_tmp; ++ } else { ++ ++ switch (P_CTRL(p_profile_enforce)) { ++ ++ case 0: ++ /* kint_enforce */ ++ P_CTRL(p_kint_enforce) = 0; // Log & accept ++ /* pint_enforce */ ++ P_CTRL(p_pint_enforce) = 0; // Log & accept ++ /* pcfi_enforce */ ++ P_CTRL(p_pcfi_enforce) = 0; // Log only ++ /* umh_enforce */ ++ P_CTRL(p_umh_enforce) = 0; // Log only ++#if defined(CONFIG_X86) ++ /* smep_enforce */ ++ P_CTRL(p_smep_enforce) = 0; // Log & accept ++ /* smap_enforce */ ++ P_CTRL(p_smap_enforce) = 0; // Log & accept ++#endif ++ break; ++ ++ case 1: ++ /* kint_enforce */ ++ P_CTRL(p_kint_enforce) = 1; // Log only ++ /* pint_enforce */ ++ P_CTRL(p_pint_enforce) = 1; // Kill task ++ /* pcfi_enforce */ ++ P_CTRL(p_pcfi_enforce) = 1; // Kill task ++ /* umh_enforce */ ++ P_CTRL(p_umh_enforce) = 1; // Prevent execution ++#if defined(CONFIG_X86) ++ /* smep_enforce */ ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_enforce) = 2; // Panic ++ } else { ++ P_CTRL(p_smep_enforce) = 0; ++ P_CTRL(p_smep_validate) = 0; ++ } ++ /* smap_enforce */ ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_enforce) = 2; // Panic ++ } else { ++ P_CTRL(p_smap_enforce) = 0; ++ P_CTRL(p_smap_validate) = 0; ++ } ++#endif ++ break; ++ ++ case 2: ++ /* kint_enforce */ ++ P_CTRL(p_kint_enforce) = 2; // Panic ++ /* pint_enforce */ ++ P_CTRL(p_pint_enforce) = 1; // Kill task ++ /* pcfi_enforce */ ++ P_CTRL(p_pcfi_enforce) = 1; // Kill task ++ /* umh_enforce */ ++ P_CTRL(p_umh_enforce) = 1; // Prevent execution ++#if defined(CONFIG_X86) ++ /* smep_enforce */ ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_enforce) = 2; // Panic ++ } else { ++ P_CTRL(p_smep_enforce) = 0; ++ P_CTRL(p_smep_validate) = 0; ++ } ++ /* smap_enforce */ ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_enforce) = 2; // Panic ++ } else { ++ P_CTRL(p_smap_enforce) = 0; ++ P_CTRL(p_smap_validate) = 0; ++ } ++#endif ++ break; ++ ++ case 3: ++ /* kint_enforce */ ++ P_CTRL(p_kint_enforce) = 2; // Panic ++ /* pint_enforce */ ++ P_CTRL(p_pint_enforce) = 2; // Panic ++ /* pcfi_enforce */ ++ P_CTRL(p_pcfi_enforce) = 2; // Panic ++ /* umh_enforce */ ++ P_CTRL(p_umh_enforce) = 2; // Panic ++#if defined(CONFIG_X86) ++ /* smep_enforce */ ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smep_enforce) = 2; // Panic ++ } else { ++ P_CTRL(p_smep_enforce) = 0; ++ P_CTRL(p_smep_validate) = 0; ++ } ++ /* smap_enforce */ ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ P_CTRL(p_smap_enforce) = 2; // Panic ++ } else { ++ P_CTRL(p_smap_enforce) = 0; ++ P_CTRL(p_smap_validate) = 0; ++ } ++#endif ++ break; ++ ++ default: ++ break; ++ ++ } ++ ++ p_print_log(P_LKRG_CRIT, ++ "Changing \"profile_enforce\" logic. From Old[%d | %s] to new[%d | %s] one.\n", ++ p_tmp, ++ (p_tmp != 9) ? p_str[p_tmp] : "Custom", ++ P_CTRL(p_profile_enforce), ++ (P_CTRL(p_profile_enforce) != 9) ? p_str[P_CTRL(p_profile_enforce)] : "Custom" ++ ); ++ } ++ } ++ } ++ p_lkrg_close_rw(); ++ ++ return p_ret; ++} ++ ++ ++int p_register_comm_channel(void) { ++ ++ if ( (p_sysctl_handle = register_sysctl_table(p_lkrg_sysctl_base)) == NULL) { ++ p_print_log(P_LKRG_ERR, ++ "Communication channel error! Can't register 'sysctl' table :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++void p_deregister_comm_channel(void) { ++ ++ unregister_sysctl_table(p_sysctl_handle); ++} +diff --git a/security/lkrg/modules/comm_channel/p_comm_channel.h b/security/lkrg/modules/comm_channel/p_comm_channel.h +new file mode 100644 +index 000000000000..85f7b18859ba +--- /dev/null ++++ b/security/lkrg/modules/comm_channel/p_comm_channel.h +@@ -0,0 +1,27 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Communication channel - sysctl interface ++ * ++ * Notes: ++ * - Allow administrator of the system to interact with LKRG via sysctl interface ++ * ++ * Timeline: ++ * - Created: 26.X.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_COMM_CHANNEL_SYSCTL_H ++#define P_LKRG_COMM_CHANNEL_SYSCTL_H ++ ++extern struct ctl_table p_lkrg_sysctl_base[]; ++extern struct ctl_table p_lkrg_sysctl_table[]; ++ ++int p_register_comm_channel(void); ++void p_deregister_comm_channel(void); ++ ++#endif +diff --git a/security/lkrg/modules/database/CPU.c b/security/lkrg/modules/database/CPU.c +new file mode 100644 +index 000000000000..60b3a73796ac +--- /dev/null ++++ b/security/lkrg/modules/database/CPU.c +@@ -0,0 +1,281 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => submodule for checking system configuration regarding CPUs ++ * ++ * Notes: ++ * - Some of the critical data may exist per logical CPU (core) ++ * and need to be independently verified / checked. ++ * Additionally, it is strongly dependent on the architecture. ++ * Linux kernel defines different types of CPUs: ++ * => online CPUs ++ * => possible CPUs ++ * => present CPUs ++ * => active CPUs ++ * ++ * This module will keep information about how many 'active CPUs', ++ * 'online CPUs' and 'present CPUs' exist in the current system. ++ * Additionally, Linux kernel exports global CPU id count ('nr_cpu_ids'), ++ * which is initialized per boot time. If over the time any of the ++ * CPU will be hot plugged / activated this information will be ++ * visible for us! ++ * ++ * - x86 (and amd64) arch: the following pieces of information are ++ * critical and need to be verified (checking integrity): ++ * => IDT base and/or entire table ++ * => MSRs ++ * ++ * - Since Linux 4.10 there isn't CPU_[ONLINE/DEAD] notifiers :( ++ * Hot CPU plug[in/out] notification logic has completely changed. ++ * More information can be found here: ++ * => https://patchwork.kernel.org/patch/9448577/ ++ * On new kernel (4.10.+) we use modern hot CPU plug[in/out] logic. ++ * ++ * Timeline: ++ * - Created: 28.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++/* ++ * #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) ++ * #define put_cpu() preempt_enable() ++ */ ++ ++void p_get_cpus(p_cpu_info *p_arg) { ++ ++ memset(p_arg,0,sizeof(p_cpu_info)); ++ ++ p_arg->online_CPUs = num_online_cpus(); ++ p_arg->possible_CPUs = num_possible_cpus(); ++ p_arg->present_CPUs = num_present_cpus(); ++ p_arg->active_CPUs = num_active_cpus(); ++ ++ p_arg->p_nr_cpu_ids = nr_cpu_ids; ++ ++ p_debug_log(P_LKRG_DBG, ++// p_print_log(P_LKRG_CRIT, ++ " online[%d] possible[%d] present[%d] active[%d] nr_cpu_ids[%d]\n", ++ p_arg->online_CPUs,p_arg->possible_CPUs,p_arg->present_CPUs,p_arg->active_CPUs, ++ p_arg->p_nr_cpu_ids); ++} ++ ++int p_cmp_cpus(p_cpu_info *p_arg1, p_cpu_info *p_arg2) { ++ ++ int p_flag = 0; ++ ++ if (p_arg1->online_CPUs != p_arg2->online_CPUs) { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! NUMBER OF ONLINE CPUs IS DIFFERENT !!!\n"); ++ p_flag++; ++ } ++ if (p_arg1->possible_CPUs != p_arg2->possible_CPUs) { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! NUMBER OF POSSIBLE CPUs IS DIFFERENT !!!\n"); ++ p_flag++; ++ } ++ if (p_arg1->present_CPUs != p_arg2->present_CPUs) { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! NUMBER OF PRESENT CPUs IS DIFFERENT !!!\n"); ++ p_flag++; ++ } ++ if (p_arg1->active_CPUs != p_arg2->active_CPUs) { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! NUMBER OF ACTIVE CPUs IS DIFFERENT !!!\n"); ++ p_flag++; ++ } ++ if (p_arg1->p_nr_cpu_ids != p_arg2->p_nr_cpu_ids) { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! VARIABLE 'nr_cpu_ids' IS DIFFERENT !!!\n"); ++ p_flag++; ++ } ++ ++ return p_flag; ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++/* ++ * Notification routine when new CPU is online or become offline. ++ * It may be critical from the security point of view, because new per-CPU ++ * metadata must be set-up. We must write them down and verify it. ++ */ ++int p_cpu_callback(struct notifier_block *p_block, unsigned long p_action, void *p_hcpu) { ++ ++ unsigned int p_cpu = (unsigned long)p_hcpu; ++ ++// TODO: lock db ++// lock is done in the individual action function ++// to reduce locking/starving time ++ ++ switch (p_action) { ++ ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ p_cpu_online_action(p_cpu); ++ break; ++ ++ case CPU_DEAD: ++ case CPU_DEAD_FROZEN: ++ p_cpu_dead_action(p_cpu); ++ break; ++ } ++ ++// TODO: unlock db ++// lock is done in the individual action function ++// to reduce locking/starving time ++ ++ return NOTIFY_OK; ++} ++#endif ++ ++ ++int p_cpu_online_action(unsigned int p_cpu) { ++ ++ int tmp_online_CPUs = p_db.p_cpu.online_CPUs; ++ ++ p_text_section_lock(); ++ spin_lock(&p_db_lock); ++ ++ smp_call_function_single(p_cpu,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ ++ /* Let's play... God mode on ;) */ ++// spin_lock_irqsave(&p_db_lock,p_db_flags); ++ ++ p_get_cpus(&p_db.p_cpu); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) ++ p_db.p_cpu.active_CPUs++; ++#endif ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ ++ /* UP kernel became SMP one! we need to do more work ;/ */ ++ if (tmp_online_CPUs == 1 && p_db.p_cpu.online_CPUs > 1) { ++ /* First recalculate _STEXT and other critical kernel's data - now is SMPbooted! */ ++ if (hash_from_ex_table() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU ONLINE ERROR: CANNOT GET HASH FROM EXCEPTION TABLE!\n"); ++ } ++ if (hash_from_kernel_stext() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU ONLINE ERROR: CANNOT GET HASH FROM _STEXT!\n"); ++ } ++ if (hash_from_kernel_rodata() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU ONLINE ERROR: CANNOT GET HASH FROM _RODATA!\n"); ++ } ++ if (hash_from_iommu_table() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU ONLINE ERROR: CANNOT GET HASH FROM IOMMU TABLE!\n"); ++ } ++ /* Now recalculate modules, again some macros are different now ! */ ++ ++ /* OK, now recalculate hashes again! */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x2) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Update global module list/kobj hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ ++ p_print_log(P_LKRG_INFO,"Hash from 'module list' => [0x%llx]\n",p_db.p_module_list_hash); ++ p_print_log(P_LKRG_INFO,"Hash from 'module kobj(s)' => [0x%llx]\n",p_db.p_module_kobj_hash); ++ ++ /* We should be fine now! */ ++ } ++ ++ /* God mode off ;) */ ++// spin_unlock_irqrestore(&p_db_lock,p_db_flags); ++ spin_unlock(&p_db_lock); ++ p_text_section_unlock(); ++ ++ return 0; ++} ++ ++int p_cpu_dead_action(unsigned int p_cpu) { ++ ++ int tmp_online_CPUs = p_db.p_cpu.online_CPUs; ++ ++ p_text_section_lock(); ++ spin_lock(&p_db_lock); ++ ++ p_db.p_CPU_metadata_array[p_cpu].p_cpu_online = P_CPU_OFFLINE; ++ ++ /* Update database */ ++ ++ /* Let's play... God mode on ;) */ ++// spin_lock_irqsave(&p_db_lock,p_db_flags); ++ ++ p_get_cpus(&p_db.p_cpu); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) ++ p_db.p_cpu.online_CPUs--; ++#endif ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ ++ /* ++ * SMP kernel might became UP one! Never had a chance to test it ;/ ++ * In case when UP kernel starting to be SMP one, some critical macros ++ * are changed and hashes from TEXT section of kernel core AND modules ++ * are changing so we recalculating them. It is possible we should follow ++ * the same scenario in this situation... ++ */ ++ if (tmp_online_CPUs > 1 && p_db.p_cpu.online_CPUs == 1) { ++ /* First recalculate _STEXT and other critical kernel's data - now is not SMPbooted! */ ++ if (hash_from_ex_table() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU OFFLINE ERROR: CANNOT GET HASH FROM EXCEPTION TABLE!\n"); ++ } ++ if (hash_from_kernel_stext() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU OFFLINE ERROR: CANNOT GET HASH FROM _STEXT!\n"); ++ } ++ if (hash_from_kernel_rodata() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU OFFLINE ERROR: CANNOT GET HASH FROM _RODATA!\n"); ++ } ++ if (hash_from_iommu_table() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CPU OFFLINE ERROR: CANNOT GET HASH FROM IOMMU TABLE!\n"); ++ } ++ /* Now recalculate modules, again some macros are different now ! */ ++ ++ /* OK, now recalculate hashes again! */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x2) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Update global module list/kobj hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ ++ p_print_log(P_LKRG_INFO,"Hash from 'module list' => [0x%llx]\n",p_db.p_module_list_hash); ++ p_print_log(P_LKRG_INFO,"Hash from 'module kobj(s)' => [0x%llx]\n",p_db.p_module_kobj_hash); ++ ++ /* We should be fine now! */ ++ } ++ ++ /* God mode off ;) */ ++// spin_unlock_irqrestore(&p_db_lock,p_db_flags); ++ spin_unlock(&p_db_lock); ++ p_text_section_unlock(); ++ ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++struct notifier_block p_cpu_notifier = ++{ ++ .notifier_call = p_cpu_callback, ++}; ++#endif +diff --git a/security/lkrg/modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.c b/security/lkrg/modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.c +new file mode 100644 +index 000000000000..9aa343f87602 +--- /dev/null ++++ b/security/lkrg/modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.c +@@ -0,0 +1,92 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle FTRACE functionality for self-modifying code. ++ * Hook 'ftrace_enable_sysctl' function. ++ * ++ * Notes: ++ * - Linux kernel might be self-modifying using dynamic FTRACE. ++ * Most of the Linux distributions provide kernel with FTRACE enabled. ++ * It can dynamically modify Linux kernel code. It is very troublesome ++ * for this project. We are relying on comparing hashes from the specific ++ * memory regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level FTRACE functions to be able to monitor ++ * whenever new modification is on the way. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IX.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++#if defined(P_LKRG_FTRACE_ENABLE_SYSCTL_H) ++ ++char p_ftrace_enable_sysctl_kretprobe_state = 0; ++ ++static struct kretprobe p_ftrace_enable_sysctl_kretprobe = { ++ .kp.symbol_name = "ftrace_enable_sysctl", ++ .handler = p_ftrace_enable_sysctl_ret, ++ .entry_handler = p_ftrace_enable_sysctl_entry, ++ .data_size = sizeof(struct p_ftrace_enable_sysctl_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++notrace int p_ftrace_enable_sysctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ p_regs_set_arg2(p_regs, 0x0); ++ ++ return 0; ++} ++ ++ ++notrace int p_ftrace_enable_sysctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ return 0; ++} ++ ++ ++int p_install_ftrace_enable_sysctl_hook(void) { ++ ++ int p_tmp; ++ ++ if ( (p_tmp = register_kretprobe(&p_ftrace_enable_sysctl_kretprobe)) != 0) { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_ftrace_enable_sysctl_kretprobe.kp.symbol_name, ++ p_tmp); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ p_print_log(P_LKRG_INFO, "Planted [kretprobe] <%s> at: 0x%lx\n", ++ p_ftrace_enable_sysctl_kretprobe.kp.symbol_name, ++ (unsigned long)p_ftrace_enable_sysctl_kretprobe.kp.addr); ++ p_ftrace_enable_sysctl_kretprobe_state = 1; ++ ++ return P_LKRG_SUCCESS; ++} ++ ++ ++void p_uninstall_ftrace_enable_sysctl_hook(void) { ++ ++ if (!p_ftrace_enable_sysctl_kretprobe_state) { ++ p_print_log(P_LKRG_INFO, "[kretprobe] <%s> at 0x%lx is NOT installed\n", ++ p_ftrace_enable_sysctl_kretprobe.kp.symbol_name, ++ (unsigned long)p_ftrace_enable_sysctl_kretprobe.kp.addr); ++ } else { ++ unregister_kretprobe(&p_ftrace_enable_sysctl_kretprobe); ++ p_print_log(P_LKRG_INFO, "Removing [kretprobe] <%s> at 0x%lx nmissed[%d]\n", ++ p_ftrace_enable_sysctl_kretprobe.kp.symbol_name, ++ (unsigned long)p_ftrace_enable_sysctl_kretprobe.kp.addr, ++ p_ftrace_enable_sysctl_kretprobe.nmissed); ++ p_ftrace_enable_sysctl_kretprobe_state = 0; ++ } ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.h b/security/lkrg/modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.h +new file mode 100644 +index 000000000000..283e2f180340 +--- /dev/null ++++ b/security/lkrg/modules/database/FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.h +@@ -0,0 +1,46 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle FTRACE functionality for self-modifying code. ++ * Hook 'ftrace_enable_sysctl' function. ++ * ++ * Notes: ++ * - Linux kernel might be self-modifying using dynamic FTRACE. ++ * Most of the Linux distributions provide kernel with FTRACE enabled. ++ * It can dynamically modify Linux kernel code. It is very troublesome ++ * for this project. We are relying on comparing hashes from the specific ++ * memory regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level FTRACE functions to be able to monitor ++ * whenever new modification is on the way. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IX.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#if defined(CONFIG_FUNCTION_TRACER) ++ ++#ifndef P_LKRG_FTRACE_ENABLE_SYSCTL_H ++#define P_LKRG_FTRACE_ENABLE_SYSCTL_H ++ ++/* per-instance private data */ ++struct p_ftrace_enable_sysctl_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_ftrace_enable_sysctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_ftrace_enable_sysctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_ftrace_enable_sysctl_hook(void); ++void p_uninstall_ftrace_enable_sysctl_hook(void); ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.c b/security/lkrg/modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.c +new file mode 100644 +index 000000000000..4c20f67b55f2 +--- /dev/null ++++ b/security/lkrg/modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.c +@@ -0,0 +1,282 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle dynamic FTRACE self-modifying code. ++ * Hook 'ftrace_modify_all_code' function. ++ * ++ * Notes: ++ * - Linux kernel might be self-modifying using dynamic FTRACE. ++ * Most of the Linux distributions provide kernel with FTRACE enabled. ++ * It can dynamically modify Linux kernel code. It is very troublesome ++ * for this project. We are relying on comparing hashes from the specific ++ * memory regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level FTRACE functions to be able to monitor ++ * whenever new modification is on the way. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IX.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++#if defined(P_LKRG_FTRACE_MODIFY_ALL_CODE_H) ++ ++char p_ftrace_modify_all_code_kretprobe_state = 0; ++ ++static struct kretprobe p_ftrace_modify_all_code_kretprobe = { ++ .kp.symbol_name = "ftrace_modify_all_code", ++ .handler = p_ftrace_modify_all_code_ret, ++ .entry_handler = p_ftrace_modify_all_code_entry, ++ .data_size = sizeof(struct p_ftrace_modify_all_code_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * We do not need to protect this variables since ftrace_modify_all_code() is executed ++ * under ftrace log. LKRG is synchronizing with it... ++ * ++ * ... unless I overlooked some code-path... ++ */ ++unsigned long p_ftrace_tmp_text; ++unsigned int p_ftrace_tmp_mod; ++ ++/* ++ * Prototype: ++ * ++ * static int ftrace_modify_all_code(unsigned long pc, unsigned long old, ++ * unsigned long new, bool validate) ++ */ ++notrace int p_ftrace_modify_all_code_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct ftrace_rec_iter *p_iter; ++ struct dyn_ftrace *p_rec; ++ struct module *p_module = NULL; ++ unsigned int p_tmp; ++ int p_command = p_regs_get_arg1(p_regs); ++ ++ if (unlikely(!P_SYM(p_state_init))) ++ return 0; ++ ++ spin_lock(&p_db_lock); ++ p_ftrace_tmp_mod = p_ftrace_tmp_text = 0; ++ /* text_mutex lock should do the sync work here... */ ++ bitmap_zero(p_db.p_jump_label.p_mod_mask, p_db.p_module_list_nr); ++ ++ if (p_command & FTRACE_UPDATE_TRACE_FUNC || ++ p_command & FTRACE_START_FUNC_RET || ++ p_command & FTRACE_STOP_FUNC_RET) { ++ p_ftrace_tmp_text++; ++ } ++ ++ p_for_ftrace_rec_iter(p_iter) { ++ p_rec = P_SYM(p_ftrace_rec_iter_record)(p_iter); ++ ++ if (P_SYM(p_core_kernel_text)(p_rec->ip)) { ++ ++ p_ftrace_tmp_text++; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ } else if ( (p_module = P_SYM(p_module_text_address)(p_rec->ip)) != NULL) { ++#else ++ } else if ( (p_module = __module_text_address(p_rec->ip)) != NULL) { ++#endif ++ for (p_tmp = 0; p_tmp < p_db.p_module_list_nr; p_tmp++) { ++ if (p_db.p_module_list_array[p_tmp].p_mod == p_module) { ++ /* ++ * OK, we found this module on our internal tracking list. ++ * Set bit in bitmask ++ */ ++ set_bit(p_tmp, p_db.p_jump_label.p_mod_mask); ++ p_ftrace_tmp_mod++; ++ break; ++ } ++ } ++ ++ } else { ++ /* ++ * FTRACE might generate dynamic trampoline which is not part of .text section. ++ * This is not abnormal situation anymore. ++ */ ++ p_print_log(P_LKRG_INFO, ++ "[FTRACE] Not a .text section! [0x%lx]\n",p_rec->ip); ++ } ++ } ++ ++ return 0; ++} ++ ++ ++notrace int p_ftrace_modify_all_code_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ unsigned int p_tmp,p_tmp2; ++ unsigned char p_flag = 0; ++ struct module *p_module = NULL; ++ ++ /* ++ * Are we initialized? ++ */ ++ if (unlikely(!P_SYM(p_state_init))) ++ return 0; ++ ++ if (p_ftrace_tmp_text) { ++ /* ++ * We do not require to take any locks neither to copy entire .text section to temporary memory ++ * since at this state it is static. Just recompute the hash. ++ */ ++ p_db.kernel_stext.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_stext.p_addr, ++ (unsigned int)p_db.kernel_stext.p_size); ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ memcpy(p_db.kernel_stext_copy,p_db.kernel_stext.p_addr,p_db.kernel_stext.p_size); ++ p_db.kernel_stext_copy[p_db.kernel_stext.p_size] = 0; ++#endif ++ ++ p_print_log(P_LKRG_INFO, ++ "[FTRACE] Updating kernel core .text section hash!\n"); ++ ++ } ++ ++ if (p_ftrace_tmp_mod) { ++ ++ for (p_tmp = 0; p_tmp < p_db.p_module_list_nr; p_tmp++) { ++ if (test_bit(p_tmp, p_db.p_jump_label.p_mod_mask)) { ++ ++ /* ++ * OK, we found this module on our internal tracking list. ++ * Update it's hash ++ */ ++ p_module = p_db.p_module_list_array[p_tmp].p_mod; ++ ++ p_print_log(P_LKRG_INFO, ++ "[FTRACE] Updating module's core .text section hash module[%s : 0x%lx]!\n", ++ p_db.p_module_list_array[p_tmp].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp].p_mod); ++ ++ p_db.p_module_list_array[p_tmp].p_mod_core_text_hash = ++ p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array[p_tmp].p_module_core, ++ (unsigned int)p_db.p_module_list_array[p_tmp].p_core_text_size); ++ /* ++ * Because we have modified individual module's hash, we need to update ++ * 'global' module's list hash as well ++ */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ ++ /* ++ * Because we update module's .text section hash we need to update KOBJs as well. ++ */ ++ for (p_tmp2 = 0; p_tmp2 < p_db.p_module_kobj_nr; p_tmp2++) { ++ if (p_db.p_module_kobj_array[p_tmp2].p_mod == p_module) { ++ p_db.p_module_kobj_array[p_tmp2].p_mod_core_text_hash = ++ p_db.p_module_list_array[p_tmp].p_mod_core_text_hash; ++ p_flag = 1; ++ break; ++ } ++ } ++ ++ if (!p_flag) { ++ p_print_log(P_LKRG_ERR, ++ "[FTRACE] Updated module's list hash for module[%s] but can't find the same module in KOBJs list!\n", ++ p_db.p_module_list_array[p_tmp].p_name); ++ p_print_log(P_LKRG_INFO,"module[%s : 0x%lx]!\n", ++ p_db.p_module_list_array[p_tmp].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp].p_mod); ++ } else { ++ ++ /* ++ * Because we have modified individual module's hash, we need to update ++ * 'global' module's list hash as well ++ */ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ } ++ } ++ } ++ } ++ ++ spin_unlock(&p_db_lock); ++ ++ return 0; ++} ++ ++ ++int p_install_ftrace_modify_all_code_hook(void) { ++ ++ int p_tmp; ++ ++ P_SYM(p_ftrace_lock) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("ftrace_lock"); ++ ++ if (!P_SYM(p_ftrace_lock)) { ++ p_print_log(P_LKRG_ERR, ++ "[FTRACE] ERROR: Can't find 'ftrace_lock' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ P_SYM(p_ftrace_rec_iter_start) = (struct ftrace_rec_iter *(*)(void)) ++ P_SYM(p_kallsyms_lookup_name)("ftrace_rec_iter_start"); ++ ++ if (!P_SYM(p_ftrace_rec_iter_start)) { ++ p_print_log(P_LKRG_ERR, ++ "[FTRACE] ERROR: Can't find 'ftrace_rec_iter_start' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ P_SYM(p_ftrace_rec_iter_next) = (struct ftrace_rec_iter *(*)(struct ftrace_rec_iter *)) ++ P_SYM(p_kallsyms_lookup_name)("ftrace_rec_iter_next"); ++ ++ if (!P_SYM(p_ftrace_rec_iter_next)) { ++ p_print_log(P_LKRG_ERR, ++ "[FTRACE] ERROR: Can't find 'ftrace_rec_iter_next' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ P_SYM(p_ftrace_rec_iter_record) = (struct dyn_ftrace *(*)(struct ftrace_rec_iter *)) ++ P_SYM(p_kallsyms_lookup_name)("ftrace_rec_iter_record"); ++ ++ if (!P_SYM(p_ftrace_rec_iter_record)) { ++ p_print_log(P_LKRG_ERR, ++ "[FTRACE] ERROR: Can't find 'ftrace_rec_iter_record' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ if ( (p_tmp = register_kretprobe(&p_ftrace_modify_all_code_kretprobe)) != 0) { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_ftrace_modify_all_code_kretprobe.kp.symbol_name, ++ p_tmp); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ p_print_log(P_LKRG_INFO, "Planted [kretprobe] <%s> at: 0x%lx\n", ++ p_ftrace_modify_all_code_kretprobe.kp.symbol_name, ++ (unsigned long)p_ftrace_modify_all_code_kretprobe.kp.addr); ++ p_ftrace_modify_all_code_kretprobe_state = 1; ++ ++ return P_LKRG_SUCCESS; ++} ++ ++ ++void p_uninstall_ftrace_modify_all_code_hook(void) { ++ ++ if (!p_ftrace_modify_all_code_kretprobe_state) { ++ p_print_log(P_LKRG_INFO, "[kretprobe] <%s> at 0x%lx is NOT installed\n", ++ p_ftrace_modify_all_code_kretprobe.kp.symbol_name, ++ (unsigned long)p_ftrace_modify_all_code_kretprobe.kp.addr); ++ } else { ++ unregister_kretprobe(&p_ftrace_modify_all_code_kretprobe); ++ p_print_log(P_LKRG_INFO, "Removing [kretprobe] <%s> at 0x%lx nmissed[%d]\n", ++ p_ftrace_modify_all_code_kretprobe.kp.symbol_name, ++ (unsigned long)p_ftrace_modify_all_code_kretprobe.kp.addr, ++ p_ftrace_modify_all_code_kretprobe.nmissed); ++ p_ftrace_modify_all_code_kretprobe_state = 0; ++ } ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.h b/security/lkrg/modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.h +new file mode 100644 +index 000000000000..ea2fde485b4b +--- /dev/null ++++ b/security/lkrg/modules/database/FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.h +@@ -0,0 +1,51 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle dynamic FTRACE self-modifying code. ++ * Hook 'ftrace_modify_all_code' function. ++ * ++ * Notes: ++ * - Linux kernel might be self-modifying using dynamic FTRACE. ++ * Most of the Linux distributions provide kernel with FTRACE enabled. ++ * It can dynamically modify Linux kernel code. It is very troublesome ++ * for this project. We are relying on comparing hashes from the specific ++ * memory regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level FTRACE functions to be able to monitor ++ * whenever new modification is on the way. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IX.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#if defined(CONFIG_DYNAMIC_FTRACE) ++ ++#ifndef P_LKRG_FTRACE_MODIFY_ALL_CODE_H ++#define P_LKRG_FTRACE_MODIFY_ALL_CODE_H ++ ++/* per-instance private data */ ++struct p_ftrace_modify_all_code_data { ++ ktime_t entry_stamp; ++}; ++ ++#define p_for_ftrace_rec_iter(iter) \ ++ for (iter = P_SYM(p_ftrace_rec_iter_start)(); \ ++ iter; \ ++ iter = P_SYM(p_ftrace_rec_iter_next)(iter)) ++ ++ ++int p_ftrace_modify_all_code_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_ftrace_modify_all_code_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_ftrace_modify_all_code_hook(void); ++void p_uninstall_ftrace_modify_all_code_hook(void); ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.c b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.c +new file mode 100644 +index 000000000000..fe362546466f +--- /dev/null ++++ b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.c +@@ -0,0 +1,232 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle *_JUMP_LABEL self-modifying code. ++ * Hook 'arch_jump_label_transform' function. ++ * ++ * Notes: ++ * - Linux kernel is heavily consuming *_JUMP_LABEL (if enabled). Most of the ++ * Linux distributions provide kernel with these options compiled. It makes ++ * Linux kernel being self-modifying code. It is very troublesome for this ++ * project. We are relying on comparing hashes from the specific memory ++ * regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level *_JUMP_LABEL functions to be able to ++ * monitor whenever new modification is on the way. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.I.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_arch_jump_label_transform_kretprobe_state = 0; ++p_lkrg_counter_lock p_jl_lock; ++ ++static struct kretprobe p_arch_jump_label_transform_kretprobe = { ++ .kp.symbol_name = "arch_jump_label_transform", ++ .handler = p_arch_jump_label_transform_ret, ++ .entry_handler = p_arch_jump_label_transform_entry, ++ .data_size = sizeof(struct p_arch_jump_label_transform_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++notrace int p_arch_jump_label_transform_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct jump_entry *p_tmp = (struct jump_entry *)p_regs_get_arg1(p_regs); ++ unsigned long p_addr = p_jump_entry_code(p_tmp); ++ struct module *p_module = NULL; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "p_arch_jump_label_transform_entry: comm[%s] Pid:%d\n",current->comm,current->pid); ++ ++ p_lkrg_counter_lock_lock(&p_jl_lock, &p_flags); ++ p_lkrg_counter_lock_val_inc(&p_jl_lock); ++ p_lkrg_counter_lock_unlock(&p_jl_lock, &p_flags); ++ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL] New modification: type[%s] code[0x%lx] target[0x%lx] key[0x%lx]!\n", ++ (p_regs_get_arg2(p_regs) == 1) ? "JUMP_LABEL_JMP" : ++ (p_regs_get_arg2(p_regs) == 0) ? "JUMP_LABEL_NOP" : ++ "UNKNOWN", ++ p_jump_entry_code(p_tmp), ++ p_jump_entry_target(p_tmp), ++ (unsigned long)p_jump_entry_key(p_tmp)); ++ ++ if (P_SYM(p_core_kernel_text)(p_addr)) { ++ /* ++ * OK, *_JUMP_LABEL tries to modify kernel core .text section ++ */ ++ p_db.p_jump_label.p_state = P_JUMP_LABEL_CORE_TEXT; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ } else if ( (p_module = P_SYM(p_module_text_address)(p_addr)) != NULL) { ++#else ++ } else if ( (p_module = __module_text_address(p_addr)) != NULL) { ++#endif ++ /* ++ * OK, *_JUMP_LABEL tries to modify some module's .text section ++ */ ++ p_db.p_jump_label.p_state = P_JUMP_LABEL_MODULE_TEXT; ++ p_db.p_jump_label.p_mod = p_module; ++ } else { ++ /* ++ * FTRACE might generate dynamic trampoline which is not part of .text section. ++ * This is not abnormal situation anymore. ++ */ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL] Not a .text section! [0x%lx]\n",p_addr); ++ p_db.p_jump_label.p_state = P_JUMP_LABEL_WTF_STATE; ++ } ++ ++ /* A dump_stack() here will give a stack backtrace */ ++ return 0; ++} ++ ++ ++notrace int p_arch_jump_label_transform_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ unsigned int p_tmp,p_tmp2; ++ unsigned char p_flag = 0; ++ ++ switch (p_db.p_jump_label.p_state) { ++ ++ case P_JUMP_LABEL_CORE_TEXT: ++ ++ /* ++ * We do not require to take any locks neither to copy entire .text section to temporary memory ++ * since at this state it is static. Just recompute the hash. ++ */ ++ p_db.kernel_stext.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_stext.p_addr, ++ (unsigned int)p_db.kernel_stext.p_size); ++ ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ memcpy(p_db.kernel_stext_copy,p_db.kernel_stext.p_addr,p_db.kernel_stext.p_size); ++ p_db.kernel_stext_copy[p_db.kernel_stext.p_size] = 0; ++#endif ++ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL] Updating kernel core .text section hash!\n"); ++ ++ break; ++ ++ case P_JUMP_LABEL_MODULE_TEXT: ++ ++ for (p_tmp = 0; p_tmp < p_db.p_module_list_nr; p_tmp++) { ++ if (p_db.p_module_list_array[p_tmp].p_mod == p_db.p_jump_label.p_mod) { ++ /* ++ * OK, we found this module on our internal tracking list. ++ * Update it's hash ++ */ ++ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL] Updating module's core .text section hash module[%s : 0x%lx]!\n", ++ p_db.p_module_list_array[p_tmp].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp].p_mod); ++ ++ p_db.p_module_list_array[p_tmp].p_mod_core_text_hash = ++ p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array[p_tmp].p_module_core, ++ (unsigned int)p_db.p_module_list_array[p_tmp].p_core_text_size); ++ /* ++ * Because we have modified individual module's hash, we need to update ++ * 'global' module's list hash as well ++ */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ /* ++ * Because we update module's .text section hash we need to update KOBJs as well. ++ */ ++ for (p_tmp2 = 0; p_tmp2 < p_db.p_module_kobj_nr; p_tmp2++) { ++ if (p_db.p_module_kobj_array[p_tmp2].p_mod == p_db.p_jump_label.p_mod) { ++ p_db.p_module_kobj_array[p_tmp2].p_mod_core_text_hash = ++ p_db.p_module_list_array[p_tmp].p_mod_core_text_hash; ++ p_flag = 1; ++ break; ++ } ++ } ++ ++ if (!p_flag) { ++ p_print_log(P_LKRG_ERR, ++ "[JUMP_LABEL] Updated module's list hash for module[%s] but can't find the same module in KOBJs list!\n", ++ p_db.p_module_list_array[p_tmp].p_name); ++ p_print_log(P_LKRG_INFO,"module[%s : 0x%lx]!\n", ++ p_db.p_module_list_array[p_tmp].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp].p_mod); ++ } else { ++ ++ /* ++ * Because we have modified individual module's hash, we need to update ++ * 'global' module's list hash as well ++ */ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ } ++ break; ++ } ++ } ++ break; ++ ++ case P_JUMP_LABEL_WTF_STATE: ++ default: ++ /* ++ * FTRACE might generate dynamic trampoline which is not part of .text section. ++ * This is not abnormal situation anymore. ++ */ ++ break; ++ } ++ ++ p_db.p_jump_label.p_state = P_JUMP_LABEL_NONE; ++ ++ p_lkrg_counter_lock_val_dec(&p_jl_lock); ++ ++ return 0; ++} ++ ++ ++int p_install_arch_jump_label_transform_hook(void) { ++ ++ int p_tmp; ++ ++ p_lkrg_counter_lock_init(&p_jl_lock); ++ ++ if ( (p_tmp = register_kretprobe(&p_arch_jump_label_transform_kretprobe)) != 0) { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_arch_jump_label_transform_kretprobe.kp.symbol_name, ++ p_tmp); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ p_print_log(P_LKRG_INFO, "Planted [kretprobe] <%s> at: 0x%lx\n", ++ p_arch_jump_label_transform_kretprobe.kp.symbol_name, ++ (unsigned long)p_arch_jump_label_transform_kretprobe.kp.addr); ++ p_arch_jump_label_transform_kretprobe_state = 1; ++ ++ return P_LKRG_SUCCESS; ++} ++ ++ ++void p_uninstall_arch_jump_label_transform_hook(void) { ++ ++ if (!p_arch_jump_label_transform_kretprobe_state) { ++ p_print_log(P_LKRG_INFO, "[kretprobe] <%s> at 0x%lx is NOT installed\n", ++ p_arch_jump_label_transform_kretprobe.kp.symbol_name, ++ (unsigned long)p_arch_jump_label_transform_kretprobe.kp.addr); ++ } else { ++ unregister_kretprobe(&p_arch_jump_label_transform_kretprobe); ++ p_print_log(P_LKRG_INFO, "Removing [kretprobe] <%s> at 0x%lx nmissed[%d]\n", ++ p_arch_jump_label_transform_kretprobe.kp.symbol_name, ++ (unsigned long)p_arch_jump_label_transform_kretprobe.kp.addr, ++ p_arch_jump_label_transform_kretprobe.nmissed); ++ p_arch_jump_label_transform_kretprobe_state = 0; ++ } ++} +diff --git a/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.h b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.h +new file mode 100644 +index 000000000000..4473e7cb5c78 +--- /dev/null ++++ b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.h +@@ -0,0 +1,43 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle *_JUMP_LABEL self-modifying code. ++ * Hook 'arch_jump_label_transform' function. ++ * ++ * Notes: ++ * - Linux kernel is heavily consuming *_JUMP_LABEL (if enabled). Most of the ++ * Linux distributions provide kernel with these options compiled. It makes ++ * Linux kernel being self-modifying code. It is very troublesome for this ++ * project. We are relying on comparing hashes from the specific memory ++ * regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level *_JUMP_LABEL functions to be able to ++ * monitor whenever new modification is on the way. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.I.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_H ++#define P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_H ++ ++/* per-instance private data */ ++struct p_arch_jump_label_transform_data { ++ ktime_t entry_stamp; ++}; ++ ++extern p_lkrg_counter_lock p_jl_lock; ++ ++int p_arch_jump_label_transform_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_arch_jump_label_transform_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_arch_jump_label_transform_hook(void); ++void p_uninstall_arch_jump_label_transform_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.c b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.c +new file mode 100644 +index 000000000000..c65e36551dc7 +--- /dev/null ++++ b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.c +@@ -0,0 +1,279 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle *_JUMP_LABEL self-modifying code. ++ * Hook 'arch_jump_label_transform_apply' function. ++ * ++ * Notes: ++ * - Linux kernel is heavily consuming *_JUMP_LABEL (if enabled). Most of the ++ * Linux distributions provide kernel with these options compiled. It makes ++ * Linux kernel being self-modifying code. It is very troublesome for this ++ * project. We are relying on comparing hashes from the specific memory ++ * regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level *_JUMP_LABEL functions to be able to ++ * monitor whenever new modification is on the way. ++ * ++ * Caveats: ++ * - Since kernel 5.3 Linux has support for 'batch mode' *_JUMP_LABEL. ++ * Let's handle that as well. ++ * ++ * https://lore.kernel.org/patchwork/patch/1064287/ ++ * ++ * Timeline: ++ * - Created: 31.X.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++#ifdef P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_APPLY_H ++ ++unsigned long p_jl_batch_addr[P_TP_VEC_MAX]; ++unsigned int p_jl_batch_nr; ++ ++char p_arch_jump_label_transform_apply_kretprobe_state = 0; ++ ++static struct kretprobe p_arch_jump_label_transform_apply_kretprobe = { ++ .kp.symbol_name = "arch_jump_label_transform_apply", ++ .handler = p_arch_jump_label_transform_apply_ret, ++ .entry_handler = p_arch_jump_label_transform_apply_entry, ++ .data_size = sizeof(struct p_arch_jump_label_transform_apply_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++notrace int p_arch_jump_label_transform_apply_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ int p_nr = *P_SYM(p_tp_vec_nr); ++ int p_cnt = 0; ++ p_text_poke_loc *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "p_arch_jump_label_transform_apply_entry: comm[%s] Pid:%d\n",current->comm,current->pid); ++ ++ p_lkrg_counter_lock_lock(&p_jl_lock, &p_flags); ++ p_lkrg_counter_lock_val_inc(&p_jl_lock); ++ p_lkrg_counter_lock_unlock(&p_jl_lock, &p_flags); ++ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL ] New modifications => %d\n",p_nr); ++ ++ for (p_jl_batch_nr = 0; p_cnt < p_nr; p_cnt++) { ++ p_tmp = (p_text_poke_loc *)&P_SYM(p_tp_vec)[p_jl_batch_nr*sizeof(p_text_poke_loc)]; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) ++ if ( (p_tmp->opcode == CALL_INSN_OPCODE || p_tmp->opcode == JMP32_INSN_OPCODE) && ++ p_tmp->rel_addr) { ++#else ++ if (p_tmp->len == JUMP_LABEL_NOP_SIZE && ++ p_tmp->addr ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) ++ && p_tmp->opcode) { ++#else ++ && p_tmp->detour) { ++#endif ++ ++#endif ++ p_jl_batch_addr[p_jl_batch_nr++] = ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) ++ (unsigned long)p_tmp->rel_addr + ++ (unsigned long)p_db.kernel_stext.p_addr; ++#else ++ (unsigned long)p_tmp->addr; ++#endif ++ } ++ } ++ ++ /* A dump_stack() here will give a stack backtrace */ ++ return 0; ++} ++ ++ ++notrace int p_arch_jump_label_transform_apply_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct module *p_module = NULL; ++ unsigned int p_cnt; ++ unsigned int p_tmp,p_tmp2; ++ unsigned char p_flag = 0; ++ unsigned int p_text = 0; ++ unsigned int p_mod = 0; ++// DECLARE_BITMAP(p_mod_mask, p_db.p_module_list_nr); ++ ++ bitmap_zero(p_db.p_jump_label.p_mod_mask, p_db.p_module_list_nr); ++ ++ for (p_cnt = 0; p_cnt < p_jl_batch_nr; p_cnt++) { ++ ++ if (P_SYM(p_core_kernel_text)(p_jl_batch_addr[p_cnt])) { ++ ++ p_text++; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ } else if ( (p_module = P_SYM(p_module_text_address)(p_jl_batch_addr[p_cnt])) != NULL) { ++#else ++ } else if ( (p_module = __module_text_address(p_jl_batch_addr[p_cnt])) != NULL) { ++#endif ++ ++ for (p_tmp = 0; p_tmp < p_db.p_module_list_nr; p_tmp++) { ++ if (p_db.p_module_list_array[p_tmp].p_mod == p_module) { ++ ++ /* ++ * OK, we found this module on our internal tracking list. ++ * Set bit in bitmask ++ */ ++ set_bit(p_tmp, p_db.p_jump_label.p_mod_mask); ++ p_mod++; ++ break; ++ } ++ } ++ ++ } else { ++ /* ++ * FTRACE might generate dynamic trampoline which is not part of .text section. ++ * This is not abnormal situation anymore. ++ */ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL ] Not a .text section! [0x%lx]\n",p_jl_batch_addr[p_cnt]); ++ } ++ } ++ ++ if (p_text) { ++ /* ++ * We do not require to take any locks neither to copy entire .text section to temporary memory ++ * since at this state it is static. Just recompute the hash. ++ */ ++ p_db.kernel_stext.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_stext.p_addr, ++ (unsigned int)p_db.kernel_stext.p_size); ++ ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ memcpy(p_db.kernel_stext_copy,p_db.kernel_stext.p_addr,p_db.kernel_stext.p_size); ++ p_db.kernel_stext_copy[p_db.kernel_stext.p_size] = 0; ++#endif ++ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL ] Updating kernel core .text section hash!\n"); ++ } ++ ++ if (p_mod) { ++ for (p_tmp = 0; p_tmp < p_db.p_module_list_nr; p_tmp++) { ++ if (test_bit(p_tmp, p_db.p_jump_label.p_mod_mask)) { ++ ++ /* ++ * OK, we found this module on our internal tracking list. ++ * Update it's hash ++ */ ++ ++ p_module = p_db.p_module_list_array[p_tmp].p_mod; ++ ++ p_print_log(P_LKRG_INFO, ++ "[JUMP_LABEL ] Updating module's core .text section hash module[%s : 0x%lx]!\n", ++ p_db.p_module_list_array[p_tmp].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp].p_mod); ++ ++ p_db.p_module_list_array[p_tmp].p_mod_core_text_hash = ++ p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array[p_tmp].p_module_core, ++ (unsigned int)p_db.p_module_list_array[p_tmp].p_core_text_size); ++ /* ++ * Because we have modified individual module's hash, we need to update ++ * 'global' module's list hash as well ++ */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ /* ++ * Because we update module's .text section hash we need to update KOBJs as well. ++ */ ++ for (p_tmp2 = 0; p_tmp2 < p_db.p_module_kobj_nr; p_tmp2++) { ++ if (p_db.p_module_kobj_array[p_tmp2].p_mod == p_module) { ++ p_db.p_module_kobj_array[p_tmp2].p_mod_core_text_hash = ++ p_db.p_module_list_array[p_tmp].p_mod_core_text_hash; ++ p_flag = 1; ++ break; ++ } ++ } ++ ++ if (!p_flag) { ++ p_print_log(P_LKRG_ERR, ++ "[JUMP_LABEL ] Updated module's list hash for module[%s] but can't find the same module in KOBJs list!\n", ++ p_db.p_module_list_array[p_tmp].p_name); ++ p_print_log(P_LKRG_INFO,"module[%s : 0x%lx]!\n", ++ p_db.p_module_list_array[p_tmp].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp].p_mod); ++ } else { ++ ++ /* ++ * Because we have modified individual module's hash, we need to update ++ * 'global' module's list hash as well ++ */ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ } ++ } ++ } ++ } ++ ++ p_db.p_jump_label.p_state = P_JUMP_LABEL_NONE; ++ ++ p_lkrg_counter_lock_val_dec(&p_jl_lock); ++ ++ return 0; ++} ++ ++ ++int p_install_arch_jump_label_transform_apply_hook(void) { ++ ++ int p_tmp; ++ ++ P_SYM(p_tp_vec) = (struct text_poke_loc **)P_SYM(p_kallsyms_lookup_name)("tp_vec"); ++ P_SYM(p_tp_vec_nr) = (int *)P_SYM(p_kallsyms_lookup_name)("tp_vec_nr"); ++ ++// DEBUG ++ p_debug_log(P_LKRG_DBG, " " ++ "p_tp_vec[0x%lx] p_tp_vec_nr[0x%lx]\n", ++ (unsigned long)P_SYM(p_tp_vec), ++ (unsigned long)P_SYM(p_tp_vec_nr)); ++ ++ if (!P_SYM(p_tp_vec) || !P_SYM(p_tp_vec_nr)) { ++ p_print_log(P_LKRG_ERR, ++ " " ++ "Can't find 'tp_vec' / 'tp_vec_nr' variable :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ if ( (p_tmp = register_kretprobe(&p_arch_jump_label_transform_apply_kretprobe)) != 0) { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_arch_jump_label_transform_apply_kretprobe.kp.symbol_name, ++ p_tmp); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ p_print_log(P_LKRG_INFO, "Planted [kretprobe] <%s> at: 0x%lx\n", ++ p_arch_jump_label_transform_apply_kretprobe.kp.symbol_name, ++ (unsigned long)p_arch_jump_label_transform_apply_kretprobe.kp.addr); ++ p_arch_jump_label_transform_apply_kretprobe_state = 1; ++ ++ return P_LKRG_SUCCESS; ++} ++ ++ ++void p_uninstall_arch_jump_label_transform_apply_hook(void) { ++ ++ if (!p_arch_jump_label_transform_apply_kretprobe_state) { ++ p_print_log(P_LKRG_INFO, "[kretprobe] <%s> at 0x%lx is NOT installed\n", ++ p_arch_jump_label_transform_apply_kretprobe.kp.symbol_name, ++ (unsigned long)p_arch_jump_label_transform_apply_kretprobe.kp.addr); ++ } else { ++ unregister_kretprobe(&p_arch_jump_label_transform_apply_kretprobe); ++ p_print_log(P_LKRG_INFO, "Removing [kretprobe] <%s> at 0x%lx nmissed[%d]\n", ++ p_arch_jump_label_transform_apply_kretprobe.kp.symbol_name, ++ (unsigned long)p_arch_jump_label_transform_apply_kretprobe.kp.addr, ++ p_arch_jump_label_transform_apply_kretprobe.nmissed); ++ p_arch_jump_label_transform_apply_kretprobe_state = 0; ++ } ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.h b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.h +new file mode 100644 +index 000000000000..21bc51f0af8b +--- /dev/null ++++ b/security/lkrg/modules/database/JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.h +@@ -0,0 +1,69 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Handle *_JUMP_LABEL self-modifying code. ++ * Hook 'arch_jump_label_transform_apply' function. ++ * ++ * Notes: ++ * - Linux kernel is heavily consuming *_JUMP_LABEL (if enabled). Most of the ++ * Linux distributions provide kernel with these options compiled. It makes ++ * Linux kernel being self-modifying code. It is very troublesome for this ++ * project. We are relying on comparing hashes from the specific memory ++ * regions and by design self-modifications break this functionality. ++ * - We are hooking into low-level *_JUMP_LABEL functions to be able to ++ * monitor whenever new modification is on the way. ++ * ++ * Caveats: ++ * - Since kernel 5.3 Linux has support for 'batch mode' *_JUMP_LABEL. ++ * Let's handle that as well. ++ * ++ * https://lore.kernel.org/patchwork/patch/1064287/ ++ * ++ * Timeline: ++ * - Created: 31.X.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#if defined(CONFIG_X86) ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)) ++ ++#ifndef P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_APPLY_H ++#define P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_APPLY_H ++ ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) ++typedef struct _p_text_poke_loc { ++ s32 rel_addr; /* addr := _stext + rel_addr */ ++ s32 rel32; ++ u8 opcode; ++ const u8 text[POKE_MAX_OPCODE_SIZE]; ++} p_text_poke_loc; ++#else ++typedef struct text_poke_loc p_text_poke_loc; ++#endif ++ ++#define P_TP_VEC_MAX (PAGE_SIZE / sizeof(p_text_poke_loc)) ++ ++/* per-instance private data */ ++struct p_arch_jump_label_transform_apply_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_arch_jump_label_transform_apply_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_arch_jump_label_transform_apply_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_arch_jump_label_transform_apply_hook(void); ++void p_uninstall_arch_jump_label_transform_apply_hook(void); ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/arm/p_arm_metadata.c b/security/lkrg/modules/database/arch/arm/p_arm_metadata.c +new file mode 100644 +index 000000000000..50209623a2e8 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/arm/p_arm_metadata.c +@@ -0,0 +1,60 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => submodule for dumping ARM specific metadata ++ * ++ * Notes: ++ * - Metadata can be different per CPU which makes it quite complicated... ++ * We need to run 'dumping' function on each CPU individually ++ * ++ * - Linux kernel defines different types of CPUs: ++ * => online CPUs ++ * => possible CPUs ++ * => present CPUs ++ * => active CPUs ++ * ++ * We are going to run procedure only on 'active CPUs' and different ++ * procedure is checking if number of active CPUs changes over time... ++ * ++ * Timeline: ++ * - Created: 09.X.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++#if defined(CONFIG_ARM) ++ ++/* ++ * This function is independently executed by each active CPU. ++ */ ++void p_dump_arm_metadata(void *_p_arg) { ++ ++ p_CPU_metadata_hash_mem *p_arg = _p_arg; ++ int p_curr_cpu = 0xFFFFFFFF; ++ ++ /* ++ * Get ID and lock - no preemtion. ++ */ ++// p_curr_cpu = get_cpu(); ++ p_curr_cpu = smp_processor_id(); ++ ++ /* ++ * To avoid multpile access to the same page from all CPUs ++ * memory will be already zero'd ++ */ ++// memset(&p_arg[p_curr_cpu],0,sizeof(p_CPU_metadata_hash_mem)); ++ ++ /* ++ * First fill information about current CPU ++ */ ++ p_arg[p_curr_cpu].p_cpu_id = p_curr_cpu; ++ p_arg[p_curr_cpu].p_cpu_online = P_CPU_ONLINE; ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/arm/p_arm_metadata.h b/security/lkrg/modules/database/arch/arm/p_arm_metadata.h +new file mode 100644 +index 000000000000..43e052106283 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/arm/p_arm_metadata.h +@@ -0,0 +1,42 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => Submodule - ARM specific metadata ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.X.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_ARM_METADATA_H ++#define P_LKRG_ARM_METADATA_H ++ ++/* ++ * Each CPU in the system independently dump own critical data and save it using ++ * following structure - it includes: ++ * - ... ++ */ ++typedef struct p_CPU_metadata_hash_mem { ++ ++ /* ++ * Some information about CPU to support hot-plug[in/out] ++ */ ++ int p_cpu_id; ++ char p_cpu_online; // 1 - online, 0 - offline ++ ++ char p_MSR_marker; ++ ++} p_CPU_metadata_hash_mem; ++ ++void p_dump_arm_metadata(void *_p_arg); ++//void p_dump_arm_metadata(p_CPU_metadata_hash_mem *p_arg); ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/arm64/p_arm64_metadata.c b/security/lkrg/modules/database/arch/arm64/p_arm64_metadata.c +new file mode 100644 +index 000000000000..a0a52c3eb9c4 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/arm64/p_arm64_metadata.c +@@ -0,0 +1,60 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => submodule for dumping ARM64 specific metadata ++ * ++ * Notes: ++ * - Metadata can be different per CPU which makes it quite complicated... ++ * We need to run 'dumping' function on each CPU individually ++ * ++ * - Linux kernel defines different types of CPUs: ++ * => online CPUs ++ * => possible CPUs ++ * => present CPUs ++ * => active CPUs ++ * ++ * We are going to run procedure only on 'active CPUs' and different ++ * procedure is checking if number of active CPUs changes over time... ++ * ++ * Timeline: ++ * - Created: 05.IV.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++#if defined(CONFIG_ARM64) ++ ++/* ++ * This function is independently executed by each active CPU. ++ */ ++void p_dump_arm64_metadata(void *_p_arg) { ++ ++ p_CPU_metadata_hash_mem *p_arg = _p_arg; ++ int p_curr_cpu = 0xFFFFFFFF; ++ ++ /* ++ * Get ID and lock - no preemtion. ++ */ ++// p_curr_cpu = get_cpu(); ++ p_curr_cpu = smp_processor_id(); ++ ++ /* ++ * To avoid multpile access to the same page from all CPUs ++ * memory will be already zero'd ++ */ ++// memset(&p_arg[p_curr_cpu],0,sizeof(p_CPU_metadata_hash_mem)); ++ ++ /* ++ * First fill information about current CPU ++ */ ++ p_arg[p_curr_cpu].p_cpu_id = p_curr_cpu; ++ p_arg[p_curr_cpu].p_cpu_online = P_CPU_ONLINE; ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/arm64/p_arm64_metadata.h b/security/lkrg/modules/database/arch/arm64/p_arm64_metadata.h +new file mode 100644 +index 000000000000..b64ed7506b59 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/arm64/p_arm64_metadata.h +@@ -0,0 +1,42 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => Submodule - ARM64 specific metadata ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.IV.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_ARM64_METADATA_H ++#define P_LKRG_ARM64_METADATA_H ++ ++/* ++ * Each CPU in the system independently dump own critical data and save it using ++ * following structure - it includes: ++ * - ... ++ */ ++typedef struct p_CPU_metadata_hash_mem { ++ ++ /* ++ * Some information about CPU to support hot-plug[in/out] ++ */ ++ int p_cpu_id; ++ char p_cpu_online; // 1 - online, 0 - offline ++ ++ char p_MSR_marker; ++ ++} p_CPU_metadata_hash_mem; ++ ++void p_dump_arm64_metadata(void *_p_arg); ++//void p_dump_arm64_metadata(p_CPU_metadata_hash_mem *p_arg); ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/p_arch_metadata.c b/security/lkrg/modules/database/arch/p_arch_metadata.c +new file mode 100644 +index 000000000000..ca9cfd5e8f15 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/p_arch_metadata.c +@@ -0,0 +1,132 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database submodule - middle layer for arch specific code ++ * ++ * Notes: ++ * - For now, it is only for x86 ++ * ++ * Timeline: ++ * - Created: 26.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../p_lkrg_main.h" ++ ++ ++void p_dump_CPU_metadata(void *_p_arg) { ++ ++ p_ed_pcfi_cpu(0); ++ ++#ifdef CONFIG_X86 ++ ++ p_dump_x86_metadata(_p_arg); ++ ++#elif defined(CONFIG_ARM64) ++ ++ p_dump_arm64_metadata(_p_arg); ++ ++#elif defined(CONFIG_ARM) ++ ++ p_dump_arm_metadata(_p_arg); ++ ++#endif ++ ++} ++ ++int p_register_arch_metadata(void) { ++ ++ P_SYM(p_core_kernel_text) = (int (*)(unsigned long))P_SYM(p_kallsyms_lookup_name)("core_kernel_text"); ++ ++ if (!P_SYM(p_core_kernel_text)) { ++ p_print_log(P_LKRG_ERR, ++ "[ED] ERROR: Can't find 'core_kernel_text' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++#ifdef P_LKRG_RUNTIME_CODE_INTEGRITY_SWITCH_IDT_H ++ ++ if (p_install_switch_idt_hook()) { ++ p_print_log(P_LKRG_CRIT, ++ "ERROR: Can't hook 'switch_idt' function :( " ++ "It's OK, but tracelogs might be not supported - if enabled, it might generate FP! (depends on the kernel version)\n"); ++ // ++ // p_ret = P_LKRG_GENERAL_ERROR; ++ // goto error path ++ // ++ // The reason why we do not stop initialization here (error condition) ++ // is because this can only happen in kernel < 3.10 - which is rare and acceptable. ++ // ++ } ++ ++#endif ++ ++ /* ++ * This is not an arch specific hook, but it's a good place to register it ++ */ ++ if (p_install_arch_jump_label_transform_hook()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook 'arch_jump_label_transform' function :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++#ifdef P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_APPLY_H ++ /* ++ * This is not an arch specific hook, but it's a good place to register it ++ */ ++ if (p_install_arch_jump_label_transform_apply_hook()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook 'arch_jump_label_transform_apply' function :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++#endif ++ ++#if defined(CONFIG_DYNAMIC_FTRACE) ++ /* ++ * Same for FTRACE ++ */ ++ if (p_install_ftrace_modify_all_code_hook()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook 'ftrace_modify_all_code' function :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++#endif ++ ++#if defined(CONFIG_FUNCTION_TRACER) ++ if (p_install_ftrace_enable_sysctl_hook()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook 'ftrace_enable_sysctl' function :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++#endif ++ ++ return P_LKRG_SUCCESS; ++} ++ ++ ++int p_unregister_arch_metadata(void) { ++ ++#ifdef P_LKRG_RUNTIME_CODE_INTEGRITY_SWITCH_IDT_H ++ p_uninstall_switch_idt_hook(); ++#endif ++ ++ /* ++ * This is not an arch specific hook, but it's a good place to deregister it ++ */ ++ p_uninstall_arch_jump_label_transform_hook(); ++#ifdef P_LKRG_CI_ARCH_JUMP_LABEL_TRANSFORM_APPLY_H ++ p_uninstall_arch_jump_label_transform_apply_hook(); ++#endif ++#if defined(CONFIG_DYNAMIC_FTRACE) ++ p_uninstall_ftrace_modify_all_code_hook(); ++#endif ++#if defined(CONFIG_FUNCTION_TRACER) ++ p_uninstall_ftrace_enable_sysctl_hook(); ++#endif ++ ++ return P_LKRG_SUCCESS; ++} +diff --git a/security/lkrg/modules/database/arch/p_arch_metadata.h b/security/lkrg/modules/database/arch/p_arch_metadata.h +new file mode 100644 +index 000000000000..3ca8b26cb3ec +--- /dev/null ++++ b/security/lkrg/modules/database/arch/p_arch_metadata.h +@@ -0,0 +1,48 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database submodule - middle layer for arch specific code ++ * ++ * Notes: ++ * - For now, it is only for x86 ++ * ++ * Timeline: ++ * - Created: 26.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_DATABASE_SUBMODULE_ARCH_H ++#define P_LKRG_DATABASE_SUBMODULE_ARCH_H ++ ++#ifdef CONFIG_X86 ++ ++/* ++ * x86/amd64 CPU specific data ++ */ ++#include "x86/p_x86_metadata.h" ++#include "x86/p_switch_idt/p_switch_idt.h" ++ ++#elif defined(CONFIG_ARM) ++/* ++ * ARM CPU specific data ++ */ ++#include "arm/p_arm_metadata.h" ++ ++#elif defined(CONFIG_ARM64) ++/* ++ * ARM64 CPU specific data ++ */ ++#include "arm64/p_arm64_metadata.h" ++ ++#endif ++ ++extern void p_dump_CPU_metadata(void *_p_arg); ++ ++int p_register_arch_metadata(void); ++int p_unregister_arch_metadata(void); ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/x86/MSR.h b/security/lkrg/modules/database/arch/x86/MSR.h +new file mode 100644 +index 000000000000..efe8e3f8fbcd +--- /dev/null ++++ b/security/lkrg/modules/database/arch/x86/MSR.h +@@ -0,0 +1,41 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => Submodule - X86/AMD64 MSR specific data ++ * ++ * Notes: ++ * - X86/AMD64 MSR specific data ++ * ++ * Timeline: ++ * - Created: 28.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_MSR_H ++#define P_LKRG_MSR_H ++ ++u64 p_read_msr(/*int p_cpu, */u32 p_arg); ++ ++#define P_MSR_READ_COUNT(x,y,z) \ ++do { \ ++ char p_tmp = x-1; \ ++ do { \ ++ y = p_read_msr(z); \ ++ } while(!y && p_tmp--); \ ++} while(0) ++ ++ ++#ifdef CONFIG_X86_64 ++ #define P_MSR_ASM_RET(val, low, high) (((u64)(high) << 32) | (low)) ++ #define P_MSR_ASM_READ(val, low, high) "=a" (low), "=d" (high) ++#else ++ #define P_MSR_ASM_RET(val, low, high) (val) ++ #define P_MSR_ASM_READ(val, low, high) "=A" (val) ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/x86/p_switch_idt/p_switch_idt.c b/security/lkrg/modules/database/arch/x86/p_switch_idt/p_switch_idt.c +new file mode 100644 +index 000000000000..10ccd28eb826 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/x86/p_switch_idt/p_switch_idt.c +@@ -0,0 +1,99 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'switch_idt' function ++ * ++ * Notes: ++ * - Until kernel 4.14+ Linux kernel is switching IDT ++ * when user enable/disables tracepoints. ++ * If this happens, LKRG needs to rebuild DB with ++ * new CPU metadata. ++ * ++ * Caveats: ++ * - It is only needed for x86 arch ++ * ++ * Timeline: ++ * - Created: 26.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++#ifdef P_LKRG_RUNTIME_CODE_INTEGRITY_SWITCH_IDT_H ++ ++char p_switch_idt_kretprobe_state = 0; ++ ++static struct kretprobe p_switch_idt_kretprobe = { ++ .kp.symbol_name = "switch_idt", ++ .handler = p_switch_idt_ret, ++ .entry_handler = p_switch_idt_entry, ++ .data_size = sizeof(struct p_switch_idt_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_switch_idt_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ spin_lock(&p_db_lock); ++ ++ /* A dump_stack() here will give a stack backtrace */ ++ return 0; ++} ++ ++ ++int p_switch_idt_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++/* ++ on_each_cpu(p_dump_CPU_metadata,p_tmp_cpus,true); ++ p_tmp_hash = hash_from_CPU_data(p_tmp_cpus); ++*/ ++ smp_call_function_single(smp_processor_id(),p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ ++ spin_unlock(&p_db_lock); ++ ++ return 0; ++} ++ ++ ++int p_install_switch_idt_hook(void) { ++ ++ int p_tmp; ++ ++ if ( (p_tmp = register_kretprobe(&p_switch_idt_kretprobe)) != 0) { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_switch_idt_kretprobe.kp.symbol_name, ++ p_tmp); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ p_print_log(P_LKRG_INFO, "Planted [kretprobe] <%s> at: 0x%lx\n", ++ p_switch_idt_kretprobe.kp.symbol_name, ++ (unsigned long)p_switch_idt_kretprobe.kp.addr); ++ p_switch_idt_kretprobe_state = 1; ++ ++ return P_LKRG_SUCCESS; ++} ++ ++ ++void p_uninstall_switch_idt_hook(void) { ++ ++ if (!p_switch_idt_kretprobe_state) { ++ p_print_log(P_LKRG_INFO, "[kretprobe] <%s> at 0x%lx is NOT installed\n", ++ p_switch_idt_kretprobe.kp.symbol_name, ++ (unsigned long)p_switch_idt_kretprobe.kp.addr); ++ } else { ++ unregister_kretprobe(&p_switch_idt_kretprobe); ++ p_print_log(P_LKRG_INFO, "Removing [kretprobe] <%s> at 0x%lx nmissed[%d]\n", ++ p_switch_idt_kretprobe.kp.symbol_name, ++ (unsigned long)p_switch_idt_kretprobe.kp.addr, ++ p_switch_idt_kretprobe.nmissed); ++ p_switch_idt_kretprobe_state = 0; ++ } ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/x86/p_switch_idt/p_switch_idt.h b/security/lkrg/modules/database/arch/x86/p_switch_idt/p_switch_idt.h +new file mode 100644 +index 000000000000..991374bcc6fc +--- /dev/null ++++ b/security/lkrg/modules/database/arch/x86/p_switch_idt/p_switch_idt.h +@@ -0,0 +1,46 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'switch_idt' function ++ * ++ * Notes: ++ * - Until kernel 4.14+ Linux kernel is switching IDT ++ * when user enable/disables tracepoints. ++ * If this happens, LKRG needs to rebuild DB with ++ * new CPU metadata. ++ * ++ * Caveats: ++ * - It is only needed for x86 arch ++ * ++ * Timeline: ++ * - Created: 26.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) ++ ++#ifdef CONFIG_X86 ++ ++#ifndef P_LKRG_RUNTIME_CODE_INTEGRITY_SWITCH_IDT_H ++#define P_LKRG_RUNTIME_CODE_INTEGRITY_SWITCH_IDT_H ++ ++/* per-instance private data */ ++struct p_switch_idt_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_switch_idt_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_switch_idt_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_switch_idt_hook(void); ++void p_uninstall_switch_idt_hook(void); ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/x86/p_x86_metadata.c b/security/lkrg/modules/database/arch/x86/p_x86_metadata.c +new file mode 100644 +index 000000000000..fd4d3668a4b0 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/x86/p_x86_metadata.c +@@ -0,0 +1,380 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => submodule for dumping IDT ++ * ++ * Notes: ++ * - IDT can be different per CPU which makes it quite complicated... ++ * We need to run 'dumping' function on each CPU individually ++ * ++ * - Linux kernel defines different types of CPUs: ++ * => online CPUs ++ * => possible CPUs ++ * => present CPUs ++ * => active CPUs ++ * ++ * We are going to run procedure only on 'active CPUs' and different ++ * procedure is checking if number of active CPUs changes over time... ++ * ++ * Timeline: ++ * - Created: 27.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++#ifdef CONFIG_X86 ++ ++u64 p_read_msr(/*int p_cpu, */u32 p_arg) { ++ ++ u32 p_low; ++ u32 p_high; ++ u64 p_val; ++// int p_err; ++ ++ p_low = p_high = p_val = 0; ++ ++ __asm__("rdmsr": P_MSR_ASM_READ(p_val,p_low,p_high) ++ : "c"(p_arg) ++ : ); ++ ++// Sometime may generate OOPS ;/ ++/* ++ if ( (p_err = rdmsr_safe_on_cpu(p_cpu,p_arg,&p_low,&p_high))) { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " rdmsr_safe_on_cpu() error! - shouldn't happen [err=0x%x]!\n",p_err); ++ return 0; ++ } ++ p_val = (u64 )p_high << 32 | p_low; ++*/ ++ ++ p_val = P_MSR_ASM_RET(p_val,p_low,p_high); ++ ++// DEBUG ++ p_debug_log(P_LKRG_DBG, ++ " MSR arg[0x%x] value[%llx]\n",smp_processor_id(),p_arg,p_val); ++ ++ return p_val; ++} ++ ++/* ++ * This function is independently executed by each active CPU. ++ * IDT is individual per logical CPU (same as MSRs, etc). ++ */ ++void p_dump_x86_metadata(void *_p_arg) { ++ ++ p_CPU_metadata_hash_mem *p_arg = _p_arg; ++/* ++ * IDTR register ++ */ ++#ifdef CONFIG_X86_64 ++ unsigned char p_idtr[0xA]; ++#else ++ unsigned char p_idtr[0x6]; ++#endif ++ ++ int p_curr_cpu = 0xFFFFFFFF; ++ ++ /* ++ * Get ID and lock - no preemtion. ++ */ ++// p_curr_cpu = get_cpu(); ++ p_curr_cpu = smp_processor_id(); ++ ++ /* ++ * To avoid multpile access to the same page from all CPUs ++ * memory will be already zero'd ++ */ ++// memset(&p_arg[p_curr_cpu],0,sizeof(p_CPU_metadata_hash_mem)); ++ ++ /* ++ * First fill information about current CPU ++ */ ++ p_arg[p_curr_cpu].p_cpu_id = p_curr_cpu; ++ p_arg[p_curr_cpu].p_cpu_online = P_CPU_ONLINE; ++ ++ /* ++ * IDT... ++ */ ++#ifdef CONFIG_X86_64 ++ __asm__("sidt %0\n" ++ "movq %3, %%rax\n" ++ "movq %%rax,%1\n" ++ "movw %4,%%ax\n" ++ "movw %%ax,%2\n":"=m"(p_idtr),"=m"(p_arg[p_curr_cpu].p_base),"=m"(p_arg[p_curr_cpu].p_size) ++ :"m"(p_idtr[2]),"m"(p_idtr[0]) ++ :"%rax"); ++#else ++ __asm__("sidt %0\n" ++ "movl %3, %%eax\n" ++ "movl %%eax,%1\n" ++ "movw %4,%%ax\n" ++ "movw %%ax,%2\n":"=m"(p_idtr),"=m"(p_arg[p_curr_cpu].p_base),"=m"(p_arg[p_curr_cpu].p_size) ++ :"m"(p_idtr[2]),"m"(p_idtr[0]) ++ :"%eax"); ++#endif ++ ++ /* ++ * On all x86 platforms there's defined maximum P_X86_MAX_IDT vectors. ++ * We can hardcode that size here since some 'weird' modules might ++ * incorrectly set it to MAX_SHORT value. ++ */ ++ p_arg[p_curr_cpu].p_size = P_X86_MAX_IDT; ++ ++ p_arg[p_curr_cpu].p_hash = p_lkrg_fast_hash((unsigned char *)p_arg[p_curr_cpu].p_base, ++ (unsigned int)sizeof(p_idt_descriptor) * P_X86_MAX_IDT); ++ ++// DEBUG ++#ifdef P_LKRG_DEBUG ++ p_debug_log(P_LKRG_DBG, ++ " CPU:[%d] IDT => base[0x%lx] size[0x%x] hash[0x%llx]\n", ++ p_arg[p_curr_cpu].p_cpu_id,p_arg[p_curr_cpu].p_base,p_arg[p_curr_cpu].p_size,p_arg[p_curr_cpu].p_hash); ++ ++ do { ++ p_idt_descriptor *p_test; ++ ++ p_debug_log(P_LKRG_DBG, ++ "Reading IDT 1 to verify data:"); ++ p_test = (p_idt_descriptor *)(p_arg[p_curr_cpu].p_base+(sizeof(p_idt_descriptor)*1)); ++#ifdef CONFIG_X86_64 ++ p_debug_log(P_LKRG_DBG, ++ "off_low[0x%x]" ++ "sel[0x%x]" ++ "none[0x%x]" ++ "flags[0x%x]" ++ "off_midl[0x%x]" ++ "off_high[0x%x]" ++ "padding[0x%x]\n", ++ p_test->off_low, ++ p_test->sel, ++ p_test->none, ++ p_test->flags, ++ p_test->off_midl, ++ p_test->off_high, ++ p_test->padding ++ ); ++#else ++ p_debug_log(P_LKRG_DBG, ++ "off_low[0x%x]" ++ "sel[0x%x]" ++ "none[0x%x]" ++ "flags[0x%x]" ++ "off_high[0x%x]\n", ++ p_test->off_low, ++ p_test->sel, ++ p_test->none, ++ p_test->flags, ++ p_test->off_high ++ ); ++#endif ++ } while(0); ++ ++#endif ++ ++ ++ if (P_CTRL(p_msr_validate)) { ++ ++ /* ++ * Now MSRs... ++ */ ++ ++ /* MSR_IA32_SYSENTER_CS */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_CS,MSR_IA32_SYSENTER_CS); ++// p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_CS = p_read_msr(p_curr_cpu,MSR_IA32_SYSENTER_CS); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_CS) { ++ p_print_log(P_LKRG_INFO, ++ "MSR IA32_SYSENTER_CS offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_IA32_SYSENTER_CS,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_IA32_SYSENTER_CS[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_CS,(unsigned long)&p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_CS); ++ ++ ++ /* MSR_IA32_SYSENTER_ESP */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_ESP,MSR_IA32_SYSENTER_ESP); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_ESP) { ++ p_print_log(P_LKRG_INFO, ++ "MSR IA32_SYSENTER_ESP offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_IA32_SYSENTER_ESP,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_IA32_SYSENTER_ESP[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_ESP,(unsigned long)&p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_ESP); ++ ++ ++ /* MSR_IA32_SYSENTER_EIP */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_EIP,MSR_IA32_SYSENTER_EIP); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_EIP) { ++ p_print_log(P_LKRG_INFO, ++ "MSR IA32_SYSENTER_EIP offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_IA32_SYSENTER_EIP,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_IA32_SYSENTER_EIP[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_EIP,(unsigned long)&p_arg[p_curr_cpu].p_MSR_IA32_SYSENTER_EIP); ++ ++ ++ /* MSR_IA32_CR_PAT */ ++ // Try reading at least 3 times before give up in case of error... ++ /* ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_IA32_CR_PAT,MSR_IA32_CR_PAT); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_IA32_CR_PAT) { ++ p_print_log(P_LKRG_INFO, ++ "MSR IA32_CR_PAT offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_IA32_CR_PAT,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_IA32_CR_PAT[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_IA32_CR_PAT,(unsigned long)&p_arg[p_curr_cpu].p_MSR_IA32_CR_PAT); ++ */ ++ ++ /* MSR_IA32_APICBASE */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_IA32_APICBASE,MSR_IA32_APICBASE); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_IA32_APICBASE) { ++ p_print_log(P_LKRG_INFO, ++ "MSR IA32_APICBASE offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_IA32_APICBASE,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_IA32_APICBASE[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_IA32_APICBASE,(unsigned long)&p_arg[p_curr_cpu].p_MSR_IA32_APICBASE); ++ ++ ++ /* MSR_EFER */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_EFER,MSR_EFER); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_EFER) { ++ p_print_log(P_LKRG_INFO, ++ "MSR EFER offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_EFER,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_EFER[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_EFER,(unsigned long)&p_arg[p_curr_cpu].p_MSR_EFER); ++ ++ ++ /* MSR_STAR */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_STAR,MSR_STAR); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_STAR) { ++ p_print_log(P_LKRG_INFO, ++ "MSR STAR offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_STAR,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_STAR[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_STAR,(unsigned long)&p_arg[p_curr_cpu].p_MSR_STAR); ++ ++ ++ /* MSR_LSTAR */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_LSTAR,MSR_LSTAR); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_LSTAR) { ++ p_print_log(P_LKRG_INFO, ++ "MSR LSTAR offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_LSTAR,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_LSTAR[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_LSTAR,(unsigned long)&p_arg[p_curr_cpu].p_MSR_LSTAR); ++ ++ ++ /* MSR_CSTAR */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_CSTAR,MSR_CSTAR); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_CSTAR) { ++ p_print_log(P_LKRG_INFO, ++ "MSR CSTAR offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_CSTAR,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_CSTAR[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_CSTAR,(unsigned long)&p_arg[p_curr_cpu].p_MSR_CSTAR); ++ ++ ++ /* MSR_SYSCALL_MASK */ ++ // Try reading at least 3 times before give up in case of error... ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_SYSCALL_MASK,MSR_SYSCALL_MASK); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_SYSCALL_MASK) { ++ p_print_log(P_LKRG_INFO, ++ "MSR SYSCALL_MASK offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_SYSCALL_MASK,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_SYSCALL_MASK[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_SYSCALL_MASK,(unsigned long)&p_arg[p_curr_cpu].p_MSR_SYSCALL_MASK); ++ ++ ++ /* p_MSR_KERNEL_GS_BASE */ ++ // Try reading at least 3 times before give up in case of error... ++ /* ++ P_MSR_READ_COUNT(3,p_arg[p_curr_cpu].p_MSR_KERNEL_GS_BASE,MSR_KERNEL_GS_BASE); ++ ++ if (!p_arg[p_curr_cpu].p_MSR_KERNEL_GS_BASE) { ++ p_print_log(P_LKRG_INFO, ++ "MSR KERNEL_GS_BASE offset 0x%x on CPU:[%d] is not set!\n", ++ MSR_KERNEL_GS_BASE,p_curr_cpu); ++ } ++ ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " CPU:[%d] MSR: MSR_KERNEL_GS_BASE[0x%llx] address in db[0x%lx]\n", ++ p_curr_cpu,p_arg[p_curr_cpu].p_MSR_KERNEL_GS_BASE,(unsigned long)&p_arg[p_curr_cpu].p_MSR_KERNEL_GS_BASE); ++ */ ++ ++ } ++ ++ /* ++ * Now Control Registers ++ */ ++ ++ // TODO... ++ ++ /* ++ * Unlock preemtion. ++ */ ++// put_cpu(); ++ ++} ++ ++#endif +diff --git a/security/lkrg/modules/database/arch/x86/p_x86_metadata.h b/security/lkrg/modules/database/arch/x86/p_x86_metadata.h +new file mode 100644 +index 000000000000..ebf75a952ed7 +--- /dev/null ++++ b/security/lkrg/modules/database/arch/x86/p_x86_metadata.h +@@ -0,0 +1,133 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * => Submodule - X86/AMD64 specific structures ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_X86_METADATA_H ++#define P_LKRG_X86_METADATA_H ++ ++/* ++ * Submodule for MSRs ++ */ ++#include "MSR.h" ++ ++/* ++ * IDT descriptor ++ */ ++#ifdef CONFIG_X86_64 ++typedef struct p_idt_descriptor { ++ ++ unsigned short off_low; ++ unsigned short sel; ++ unsigned char none, flags; ++ unsigned short off_midl; ++ unsigned int off_high; ++ unsigned int padding; ++ ++} p_idt_descriptor; ++#else ++typedef struct p_idt_descriptor { ++ ++ unsigned short off_low; ++ unsigned short sel; ++ unsigned char none, flags; ++ unsigned short off_high; ++ ++} p_idt_descriptor; ++#endif ++ ++#define P_X86_MAX_IDT 256 ++ ++/* ++ * Each CPU in the system independently dump own critical data and save it using ++ * following structure - it includes: ++ * - IDT base ++ * - IDT size ++ * - hash from the entire IDT ++ * - MSR (Model Specific Registers) ++ */ ++typedef struct p_CPU_metadata_hash_mem { ++ ++ /* ++ * Some information about CPU to support hot-plug[in/out] ++ */ ++ int p_cpu_id; ++ char p_cpu_online; // 1 - online, 0 - offline ++ ++ /* ++ * IDT information ++ */ ++ long p_base; // IDT base from IDTR ++ short p_size; // IDT size from IDTR ++ uint64_t p_hash; // hash from entire IDT table: ++ // p_base * P_X86_MAX_IDT ++ ++ /* ++ * Now MSRs... ++ */ ++ char p_MSR_marker; ++ ++ /* x86 critical MSRs */ ++ u64 p_MSR_IA32_SYSENTER_CS; // 0x00000174 ++ u64 p_MSR_IA32_SYSENTER_ESP; // 0x00000175 ++ u64 p_MSR_IA32_SYSENTER_EIP; // 0x00000176 ++ ++ /* MSR PAT */ ++// u64 p_MSR_IA32_CR_PAT; // 0x00000277 ++ ++ /* MSR APIC */ ++ u64 p_MSR_IA32_APICBASE; // 0x0000001b ++ ++ /* MSR EFER - extended feature register */ ++ u64 p_MSR_EFER; // 0xc0000080 ++ ++ ++ /* AMD64 critical MSRs */ ++ ++ /* MSR STAR - legacy mode SYSCALL target */ ++ u64 p_MSR_STAR; // 0xc0000081 ++ ++ /* ++ * From: "arch/x86/kernel/cpu/common.c" ++ * ++ * AMD64 syscalls: ++ * wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); ++ * ++ */ ++ /* MSR LSTAR - long mode SYSCALL target */ ++ u64 p_MSR_LSTAR; // 0xc0000082 ++ ++ /* MSR CSTAR - compat mode SYSCALL target */ ++ u64 p_MSR_CSTAR; // 0xc0000083 ++ ++ /* MSR SYSCALL_MASK - EFLAGS mask for syscall */ ++ u64 p_MSR_SYSCALL_MASK; // 0xc0000084 ++ ++ /* MSR KERNEL_GS_BASE - SwapGS GS shadow */ ++// u64 p_MSR_KERNEL_GS_BASE; // 0xc0000102 <- more research needed, ++ // saw some user mode code which might ++ // change that - arch prctl ++ ++ /* ++ * ... MORE MSRs ... ;) ++ */ ++ ++} p_CPU_metadata_hash_mem; ++ ++void p_dump_x86_metadata(void *_p_arg); ++//void p_dump_x86_metadata(p_CPU_metadata_hash_mem *p_arg); ++ ++#endif +diff --git a/security/lkrg/modules/database/p_database.c b/security/lkrg/modules/database/p_database.c +new file mode 100644 +index 000000000000..af1d0901dc27 +--- /dev/null ++++ b/security/lkrg/modules/database/p_database.c +@@ -0,0 +1,380 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * ++ * Notes: ++ * - Let's create database - calculate hashes ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++p_hash_database p_db; ++ ++int hash_from_ex_table(void) { ++ ++ unsigned long p_tmp = 0; ++ ++ p_db.kernel_ex_table.p_addr = (unsigned long *)P_SYM(p_kallsyms_lookup_name)("__start___ex_table"); ++ p_tmp = (unsigned long)P_SYM(p_kallsyms_lookup_name)("__stop___ex_table"); ++ ++ if (!p_db.kernel_ex_table.p_addr || !p_tmp || p_tmp < (unsigned long)p_db.kernel_ex_table.p_addr) { ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_db.kernel_ex_table.p_size = (unsigned long)(p_tmp - (unsigned long)p_db.kernel_ex_table.p_addr); ++ ++ p_db.kernel_ex_table.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_ex_table.p_addr, ++ (unsigned int)p_db.kernel_ex_table.p_size); ++ ++ p_debug_log(P_LKRG_DBG, ++ "hash [0x%llx] ___ex_table start [0x%lx] size [0x%lx]\n",p_db.kernel_ex_table.p_hash, ++ (long)p_db.kernel_ex_table.p_addr, ++ (long)p_db.kernel_ex_table.p_size); ++ ++ return P_LKRG_SUCCESS; ++} ++ ++int hash_from_kernel_stext(void) { ++ ++ unsigned long p_tmp = 0; ++ ++ p_db.kernel_stext.p_addr = (unsigned long *)P_SYM(p_kallsyms_lookup_name)("_stext"); ++ p_tmp = (unsigned long)P_SYM(p_kallsyms_lookup_name)("_etext"); ++ ++ if (!p_db.kernel_stext.p_addr || !p_tmp || p_tmp < (unsigned long)p_db.kernel_stext.p_addr) { ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_db.kernel_stext.p_size = (unsigned long)(p_tmp - (unsigned long)p_db.kernel_stext.p_addr); ++ p_db.kernel_stext.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_stext.p_addr, ++ (unsigned int)p_db.kernel_stext.p_size); ++ ++ ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ if (!p_db.kernel_stext_copy) { ++ if ( (p_db.kernel_stext_copy = vmalloc(p_db.kernel_stext.p_size+1)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_print_log(P_LKRG_CRIT, ++ "CREATING DATABASE: kzalloc() error! Can't allocate memory - copy stext ;[\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ } ++ memcpy(p_db.kernel_stext_copy,p_db.kernel_stext.p_addr,p_db.kernel_stext.p_size); ++ p_db.kernel_stext_copy[p_db.kernel_stext.p_size] = 0; ++#endif ++ ++ p_debug_log(P_LKRG_DBG, ++ "hash [0x%llx] _stext start [0x%lx] size [0x%lx]\n",p_db.kernel_stext.p_hash, ++ (long)p_db.kernel_stext.p_addr, ++ (long)p_db.kernel_stext.p_size); ++ return P_LKRG_SUCCESS; ++} ++ ++int hash_from_kernel_rodata(void) { ++ ++ unsigned long p_tmp = 0; ++ ++ p_db.kernel_rodata.p_addr = (unsigned long *)P_SYM(p_kallsyms_lookup_name)("__start_rodata"); ++ p_tmp = (unsigned long)P_SYM(p_kallsyms_lookup_name)("__end_rodata"); ++ ++ if (!p_db.kernel_rodata.p_addr || !p_tmp || p_tmp < (unsigned long)p_db.kernel_rodata.p_addr) { ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_db.kernel_rodata.p_size = (unsigned long)(p_tmp - (unsigned long)p_db.kernel_rodata.p_addr); ++ ++#if !defined(CONFIG_GRKERNSEC) ++ ++ p_db.kernel_rodata.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_rodata.p_addr, ++ (unsigned int)p_db.kernel_rodata.p_size); ++ ++#else ++ ++ p_db.kernel_rodata.p_hash = 0xFFFFFFFF; ++ ++#endif ++ ++ p_debug_log(P_LKRG_DBG, ++ "hash [0x%llx] _rodata start [0x%lx] size [0x%lx]\n",p_db.kernel_rodata.p_hash, ++ (long)p_db.kernel_rodata.p_addr, ++ (long)p_db.kernel_rodata.p_size); ++ return P_LKRG_SUCCESS; ++} ++ ++int hash_from_iommu_table(void) { ++ ++#ifdef CONFIG_X86 ++ unsigned long p_tmp = 0; ++#endif ++ ++#ifdef CONFIG_X86 ++ ++ p_db.kernel_iommu_table.p_addr = (unsigned long *)P_SYM(p_kallsyms_lookup_name)("__iommu_table"); ++ p_tmp = (unsigned long)P_SYM(p_kallsyms_lookup_name)("__iommu_table_end"); ++ ++ if (!p_db.kernel_iommu_table.p_addr || !p_tmp || p_tmp < (unsigned long)p_db.kernel_iommu_table.p_addr) { ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_db.kernel_iommu_table.p_size = (unsigned long)(p_tmp - (unsigned long)p_db.kernel_iommu_table.p_addr); ++ ++ ++#ifdef P_LKRG_IOMMU_HASH_ENABLED ++ p_db.kernel_iommu_table.p_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_iommu_table.p_addr, ++ (unsigned int)p_db.kernel_iommu_table.p_size); ++#else ++// Static value - might change in normal system... ++ p_db.kernel_iommu_table.p_hash = 0xFFFFFFFF; ++#endif ++ ++ p_debug_log(P_LKRG_DBG, ++ "hash [0x%llx] __iommu_table start [0x%lx] size [0x%lx]\n",p_db.kernel_iommu_table.p_hash, ++ (long)p_db.kernel_iommu_table.p_addr, ++ (long)p_db.kernel_iommu_table.p_size); ++ ++#else ++ ++// Static value - might change in normal system... ++ p_db.kernel_iommu_table.p_hash = 0xFFFFFFFF; ++ ++#endif ++ ++ return P_LKRG_SUCCESS; ++} ++ ++uint64_t hash_from_CPU_data(p_CPU_metadata_hash_mem *p_arg) { ++ ++ int p_tmp = 0; ++ uint64_t p_hash = 0; ++ ++ for_each_present_cpu(p_tmp) { ++ if (p_arg[p_tmp].p_cpu_online == P_CPU_ONLINE) { ++ if (cpu_online(p_tmp)) { ++ if (P_CTRL(p_msr_validate)) { ++ p_hash ^= p_lkrg_fast_hash((unsigned char *)&p_arg[p_tmp], ++ (unsigned int)sizeof(p_CPU_metadata_hash_mem)); ++ } else { ++ p_hash ^= p_lkrg_fast_hash((unsigned char *)&p_arg[p_tmp], ++ (unsigned int)offsetof(p_CPU_metadata_hash_mem, p_MSR_marker)); ++ } ++ p_debug_log(P_LKRG_DBG, ++ " Hash for cpu id %i total_hash[0x%llx]\n",p_tmp,p_hash); ++ } else { ++ // WTF?! I should never be here ++ p_print_log(P_LKRG_CRIT, ++ "WTF?! DB corrupted?"); ++ } ++ } else { ++ // Skip offline CPUs ++ p_debug_log(P_LKRG_DBG, ++ " Offline cpu id %i total_hash[0x%llx]\n",p_tmp,p_hash); ++ } ++ } ++ ++ return p_hash; ++} ++ ++int p_create_database(void) { ++ ++ int p_tmp; ++// int p_tmp_cpu; ++ ++ memset(&p_db,0,sizeof(p_hash_database)); ++ ++ if ( (P_SYM(p_jump_label_mutex) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("jump_label_mutex")) == NULL) { ++ p_print_log(P_LKRG_ERR, ++ "CREATING DATABASE: error! Can't find 'jump_label_mutex' variable :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ if ( (P_SYM(p_text_mutex) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("text_mutex")) == NULL) { ++ p_print_log(P_LKRG_ERR, ++ "CREATING DATABASE: error! Can't find 'text_mutex' variable :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ /* ++ * First gather information about CPUs in the system - CRITICAL !!! ++ */ ++ p_get_cpus(&p_db.p_cpu); ++ ++ /* ++ * OK, we now know what is the maximum number of supported CPUs ++ * in this kernel, let's allocate data here... ++ */ ++ /* ++ * This is one-shot function not in the time-critical context/section. We can sleep here so ++ * we are allowed to make 'slowpath' memory allocation - don't need to use emergency pools. ++ * ++ * __GFP_NOFAIL flag will always generate slowpath warn because developers ++ * decided to depreciate this flag ;/ ++ */ ++ if ( (p_db.p_CPU_metadata_array = kzalloc(sizeof(p_CPU_metadata_hash_mem)*p_db.p_cpu.p_nr_cpu_ids, ++ GFP_KERNEL | __GFP_REPEAT)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_print_log(P_LKRG_CRIT, ++ "CREATING DATABASE: kzalloc() error! Can't allocate memory ;[\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++// STRONG_DEBUG ++ else { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " p_db.p_CPU_metadata_array[0x%lx] with requested size[%d] " ++ "= sizeof(p_CPU_metadata_hash_mem)[%d] * p_db.p_cpu.p_nr_cpu_ids[%d]\n", ++ (unsigned long)p_db.p_CPU_metadata_array, ++ (int)(sizeof(p_CPU_metadata_hash_mem)*p_db.p_cpu.p_nr_cpu_ids), ++ (int)sizeof(p_CPU_metadata_hash_mem),p_db.p_cpu.p_nr_cpu_ids); ++ } ++ ++ /* ++ * OK, we have prepared all necessary memory. Let's try X86 specific ++ * function ++ */ ++ ++// p_tmp_cpu = get_cpu(); ++ ++ /* ++ * Sometime this function has problems and do not run on every requested CPU: ++ * smp_call_function_many(cpu_present_mask, ...); ++ * ++ * that's why we do it manually now: ++ */ ++ for_each_present_cpu(p_tmp) { ++ if (cpu_online(p_tmp)) { ++// if (p_tmp_cpu != p_tmp) { ++// p_dump_CPU_metadata(p_db.p_CPU_metadata_array); ++ ++ /* ++ * There is an undesirable situation in SMP Linux machines when sending ++ * IPI via the smp_call_function_single() API... ++ * ++ * ... more technical details about it can be found here: ++ * *) http://blog.pi3.com.pl/?p=549 ++ * *) http://lists.openwall.net/linux-kernel/2016/09/21/68 ++ */ ++ smp_call_function_single(p_tmp,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++// } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "!!! WARNING !!! CPU ID:%d is offline !!!\n",p_tmp); ++// "Let's try to run on it anyway...",p_tmp); ++// p_dump_CPU_metadata(p_db.p_CPU_metadata_array); ++// smp_call_function_single(p_tmp,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ } ++ } ++// put_cpu(); ++// smp_call_function_single(p_tmp_cpu,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ ++ /* Some arch needs extra hooks */ ++ if (p_register_arch_metadata() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_ERR, ++ "CREATING DATABASE: error! Can't register CPU architecture specific metadata :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ ++ if (hash_from_ex_table() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CREATING DATABASE ERROR: EXCEPTION TABLE CAN'T BE FOUND (skipping it)!\n"); ++ p_db.kernel_ex_table.p_hash = p_db.kernel_ex_table.p_size = 0; ++ p_db.kernel_ex_table.p_addr = NULL; ++ } ++ ++ ++ if (hash_from_kernel_rodata() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CREATING DATABASE ERROR: _RODATA CAN'T BE FOUND (skipping it)!\n"); ++ p_db.kernel_rodata.p_hash = p_db.kernel_rodata.p_size = 0; ++ p_db.kernel_rodata.p_addr = NULL; ++ } ++ ++ if (hash_from_iommu_table() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CREATING DATABASE ERROR: IOMMU TABLE CAN'T BE FOUND (skipping it)!\n"); ++ p_db.kernel_iommu_table.p_hash = p_db.kernel_iommu_table.p_size = 0; ++ p_db.kernel_iommu_table.p_addr = NULL; ++ } ++ ++ ++#if defined(CONFIG_OPTPROBES) ++ P_SYM(p_wait_for_kprobe_optimizer)(); ++#endif ++ smp_mb(); ++ ++ p_text_section_lock(); ++ ++ /* ++ * Memory allocation may fail... let's loop here! ++ */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x1) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++/* ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++*/ ++ ++ p_text_section_unlock(); ++ ++ /* Register module notification routine - must be outside p_text_section_(un)lock */ ++ p_register_module_notifier(); ++ ++/* ++ if (p_install_arch_jump_label_transform_hook()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook arch_jump_label_transform function :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ if (p_install_arch_jump_label_transform_static_hook()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook arch_jump_label_transform_static function :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++*/ ++ ++ p_debug_log(P_LKRG_DBG, ++ "p_module_list_hash => [0x%llx]\np_module_kobj_hash => [0x%llx]\n", ++ p_db.p_module_list_hash,p_db.p_module_kobj_hash); ++ ++ P_SYM(p_state_init) = 1; ++#if defined(CONFIG_OPTPROBES) ++ P_SYM(p_wait_for_kprobe_optimizer)(); ++#endif ++ smp_mb(); ++ ++#if !defined(CONFIG_GRKERNSEC) ++ p_text_section_lock(); ++ if (hash_from_kernel_stext() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "CREATING DATABASE ERROR: HASH FROM _STEXT!\n"); ++ p_text_section_unlock(); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ p_text_section_unlock(); ++#endif ++ ++ return P_LKRG_SUCCESS; ++} +diff --git a/security/lkrg/modules/database/p_database.h b/security/lkrg/modules/database/p_database.h +new file mode 100644 +index 000000000000..a6cfcec86c17 +--- /dev/null ++++ b/security/lkrg/modules/database/p_database.h +@@ -0,0 +1,222 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Database module ++ * ++ * Notes: ++ * - Let's create database - calculate hashes ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_DATABASE_H ++#define P_LKRG_DATABASE_H ++ ++/* ++ * Memory block hash ++ */ ++typedef struct p_hash_mem_block { ++ ++ long *p_addr; ++ unsigned long p_size; ++ uint64_t p_hash; ++ ++} p_hash_mem_block; ++ ++/* ++ * CPU info structure: ++ * ++ * Keep track 'online/possible/present/active' CPUs. ++ * Linux kernel keeps those data in CPU bitmask structure ++ * which is extracet via following function: ++ * ++ * static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) ++ * ++ * That's why all variables have 'int' type ++ */ ++typedef struct p_cpu_info { ++ ++ int online_CPUs; // Might be active (because it's online) but it is NOT ++ // yet, so does NOT execute any task ++ int possible_CPUs; // Physically possible CPUs handled by this kernel ++ int present_CPUs; // Currently available CPUs, but doesn't need to be used ++ // by kernel at this time. Value is dynamically updated ++ // when CPU is hotplug ++ int active_CPUs; // Currently active CPUs - can execute tasks ++ ++/* ++ * "include/linux/cpumask.h" ++ * ... ++ * 34 #if NR_CPUS == 1 ++ * 35 #define nr_cpu_ids 1 ++ * 36 #else ++ * 37 extern int nr_cpu_ids; ++ * 38 #endif ++ * ... ++ */ ++ ++ int p_nr_cpu_ids; // Should be the same as possible_CPUs ++ ++} p_cpu_info; ++ ++#define P_CPU_OFFLINE 0 ++#define P_CPU_ONLINE 1 ++ ++/* ++ * Unique metadata per CPU arch. Currently, we support: ++ * - x86 ++ * - arm64 ++ */ ++#include "arch/p_arch_metadata.h" ++ ++/* ++ * Linux Kernel Module's specific structures... ++ */ ++#include "../kmod/p_kmod.h" ++ ++/* ++ * Dynamic *_JUMP_LABEL support (arch independent) ++ */ ++#include "JUMP_LABEL/p_arch_jump_label_transform/p_arch_jump_label_transform.h" ++#include "JUMP_LABEL/p_arch_jump_label_transform_apply/p_arch_jump_label_transform_apply.h" ++ ++#if defined(CONFIG_FUNCTION_TRACER) ++/* ++ * FTRACE ++ */ ++#include "FTRACE/p_ftrace_modify_all_code/p_ftrace_modify_all_code.h" ++#include "FTRACE/p_ftrace_enable_sysctl/p_ftrace_enable_sysctl.h" ++#endif ++ ++enum p_jump_label_state { ++ ++ P_JUMP_LABEL_NONE, ++ P_JUMP_LABEL_CORE_TEXT, ++ P_JUMP_LABEL_MODULE_TEXT, ++ P_JUMP_LABEL_WTF_STATE ++ ++}; ++ ++/* ++ * During *_JUMP_LABEL modification, we need to store information about its state. ++ */ ++struct p_jump_label { ++ ++ enum p_jump_label_state p_state; ++ struct module *p_mod; ++ unsigned long *p_mod_mask; ++ ++}; ++ ++/* ++ * Main database structure containing: ++ * - memory hashes ++ * - Critical addresses ++ * - CPU specific information ++ */ ++typedef struct p_hash_database { ++ ++ /* ++ * Information about CPUs in the system - CRITICAL !!! ++ * Should be filled first. ++ */ ++ p_cpu_info p_cpu; ++ ++ /* ++ * Pointer to the dynamically allocated array - we don't know ++ * how much memory do we need until we discover how many CPUs ++ * do we have. ++ * ++ * Btw. our procedure must handle hot CPUs plug[in/out] as well !!! ++ */ ++ p_CPU_metadata_hash_mem *p_CPU_metadata_array; ++ ++ /* ++ * Hash from the all 'p_CPU_metadata_hash_mem' structures ++ */ ++ uint64_t p_CPU_metadata_hashes; ++ ++ ++ /* ++ * Linux Kernel Modules in the system ++ */ ++ unsigned int p_module_list_nr; // Count via walking through the list first ++ unsigned int p_module_kobj_nr; // Count via walking through the KOBJs first ++ ++ /* ++ * Linux Kernel Modules integrity ++ */ ++ p_module_list_mem *p_module_list_array; ++ uint64_t p_module_list_hash; ++ p_module_kobj_mem *p_module_kobj_array; ++ uint64_t p_module_kobj_hash; ++ ++ p_hash_mem_block kernel_stext; // .text ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ char *kernel_stext_copy; // copy of .text ++#endif ++ p_hash_mem_block kernel_rodata; // .rodata ++ p_hash_mem_block kernel_iommu_table; // IOMMU table ++ p_hash_mem_block kernel_ex_table; // Exception tale ++ struct p_jump_label p_jump_label; // *_JUMP_LABEL state during modification ++ ++} p_hash_database; ++ ++ ++ ++extern p_hash_database p_db; ++extern struct notifier_block p_cpu_notifier; ++ ++int hash_from_ex_table(void); ++int hash_from_kernel_stext(void); ++int hash_from_kernel_rodata(void); ++int hash_from_iommu_table(void); ++ ++static inline void p_text_section_lock(void) { ++ ++#if !defined(P_LKRG_DEBUG_BUILD) ++ lockdep_off(); ++#endif ++#if defined(CONFIG_DYNAMIC_FTRACE) ++ mutex_lock(P_SYM(p_ftrace_lock)); ++#endif ++ /* We are heavily consuming module list here - take 'module_mutex' */ ++ mutex_lock(P_SYM(p_module_mutex)); ++ while (mutex_is_locked(P_SYM(p_jump_label_mutex))) ++ schedule(); ++ mutex_lock(P_SYM(p_text_mutex)); ++} ++ ++static inline void p_text_section_unlock(void) { ++ ++ mutex_unlock(P_SYM(p_text_mutex)); ++ /* Release the 'module_mutex' */ ++ mutex_unlock(P_SYM(p_module_mutex)); ++#if defined(CONFIG_DYNAMIC_FTRACE) ++ mutex_unlock(P_SYM(p_ftrace_lock)); ++#endif ++#if !defined(P_LKRG_DEBUG_BUILD) ++ lockdep_on(); ++#endif ++ ++} ++ ++int p_create_database(void); ++void p_get_cpus(p_cpu_info *p_arg); ++int p_cmp_cpus(p_cpu_info *p_arg1, p_cpu_info *p_arg2); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++int p_cpu_callback(struct notifier_block *p_block, unsigned long p_action, void *p_hcpu); ++#endif ++int p_cpu_online_action(unsigned int p_cpu); ++int p_cpu_dead_action(unsigned int p_cpu); ++uint64_t hash_from_CPU_data(p_CPU_metadata_hash_mem *p_arg); ++ ++ ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/arch/arm/p_ed_arm_arch.h b/security/lkrg/modules/exploit_detection/arch/arm/p_ed_arm_arch.h +new file mode 100644 +index 000000000000..acc45edcd857 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/arch/arm/p_ed_arm_arch.h +@@ -0,0 +1,29 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - ARM specific code for Exploit Detection ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.X.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#if defined(CONFIG_ARM) ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_ARM_ARCH_H ++#define P_LKRG_EXPLOIT_DETECTION_ARM_ARCH_H ++ ++/* ++ * TODO ++ */ ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/arch/arm64/p_ed_arm64_arch.h b/security/lkrg/modules/exploit_detection/arch/arm64/p_ed_arm64_arch.h +new file mode 100644 +index 000000000000..def139b6b40b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/arch/arm64/p_ed_arm64_arch.h +@@ -0,0 +1,29 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - ARM64 specific code for Exploit Detection ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IV.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#if defined(CONFIG_ARM64) ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_ARM64_ARCH_H ++#define P_LKRG_EXPLOIT_DETECTION_ARM64_ARCH_H ++ ++/* ++ * TODO ++ */ ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/arch/x86/p_ed_x86_arch.h b/security/lkrg/modules/exploit_detection/arch/x86/p_ed_x86_arch.h +new file mode 100644 +index 000000000000..693a184c5d0f +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/arch/x86/p_ed_x86_arch.h +@@ -0,0 +1,236 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - x86/AMD64 specific code for Exploit Detection ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IV.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_X86 ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_X86_ARCH_H ++#define P_LKRG_EXPLOIT_DETECTION_X86_ARCH_H ++ ++/* ++ * pCFI global CPU flags ++ */ ++#define P_PCFI_X86_WP 0x1 ++#define P_PCFI_X86_SMEP 0x2 ++#define P_PCFI_X86_SMAP 0x4 ++ ++#define P_IS_WP_FLAG_ENABLED(x) (x & P_PCFI_X86_WP) ++#define P_IS_SMEP_FLAG_ENABLED(x) (x & P_PCFI_X86_SMEP) ++#define P_IS_SMAP_FLAG_ENABLED(x) (x & P_PCFI_X86_SMAP) ++ ++/* ++ * Known minor bug: these updates are non-atomic, so concurrent changing of ++ * these flags might result in all but one of the changes getting lost. We ++ * may switch to using individual Boolean variables instead of the bitmask. ++ */ ++#define P_ENABLE_WP_FLAG(x) (x |= P_PCFI_X86_WP) ++#define P_ENABLE_SMEP_FLAG(x) (x |= P_PCFI_X86_SMEP) ++#define P_ENABLE_SMAP_FLAG(x) (x |= P_PCFI_X86_SMAP) ++ ++#define P_DISABLE_WP_FLAG(x) (x &= ~P_PCFI_X86_WP) ++#define P_DISABLE_SMEP_FLAG(x) (x &= ~P_PCFI_X86_SMEP) ++#define P_DISABLE_SMAP_FLAG(x) (x &= ~P_PCFI_X86_SMAP) ++ ++/* ++ * x86 specific functions ++ */ ++static inline void p_write_cr4(unsigned long p_arg) { ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0) ++ P_SYM(p_native_write_cr4)(p_arg); ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ++ write_cr4(p_arg); ++#else ++ __write_cr4(p_arg); ++#endif ++ ++} ++ ++static inline unsigned long p_read_cr4(void) { ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ++ return read_cr4(); ++#else ++ return __read_cr4(); ++#endif ++ ++} ++ ++static inline unsigned int p_ed_pcfi_x86_validate_wp(unsigned char p_kill) { ++ ++ unsigned int p_bad = 0; ++ register unsigned long p_cr0; ++ ++ if (!P_CTRL(p_kint_validate)) ++ return p_bad; ++ ++ p_cr0 = read_cr0(); ++ ++ if (!(p_cr0 & X86_CR0_WP)) { ++ ++ switch (P_CTRL(p_kint_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "Write Protection bit was disabled and 'panic()' is enforced! Killing the kernel...\n"); ++ break; ++ ++ /* Log and restore */ ++ case 1: ++ p_print_log(P_LKRG_CRIT, ++ "Write Protection bit was disabled! Enforcing WP now!\n"); ++ p_cr0 |= X86_CR0_WP; ++ write_cr0(p_cr0); ++ wbinvd(); ++ p_bad++; ++ break; ++ ++ /* Log and accept */ ++ case 0: ++ if (!P_IS_WP_FLAG_ENABLED(p_pcfi_CPU_flags)) ++ return p_bad; ++ p_print_log(P_LKRG_CRIT, ++ "Write Protection bit was disabled! Accepting new WP state.\n"); ++ P_DISABLE_WP_FLAG(p_pcfi_CPU_flags); ++ break; ++ } ++ ++ /* ++ * It is possible to kill an almost arbitrary process here. Such situation is possible ++ * if process which disables SMEP / WP is being preempted before leverages such state. ++ * Most of the exploits which I tested didn't have such FP, but such possibility of course ++ * exists. However, it is worth the risk. Especially that such changes (SMEP / WP violation) ++ * are not legal and are always the results of malicious activity. ++ */ ++ if (p_kill) { ++ if (P_CTRL(p_pint_enforce)) { ++ p_print_log(P_LKRG_CRIT, "We might be killing arbitrary process here. However, it is worth the risk!\n"); ++ p_ed_kill_task_by_task(current); ++ } ++ } ++ } ++ ++ return p_bad; ++} ++ ++static inline unsigned int p_ed_pcfi_x86_validate_smXp(unsigned char p_kill) { ++ ++ unsigned int p_bad = 0; ++ register unsigned long p_cr4; ++ ++ ++ if (!P_CTRL(p_smep_validate) && !P_CTRL(p_smap_validate)) ++ return p_bad; ++ ++ p_cr4 = p_read_cr4(); ++ ++ if (!(p_cr4 & X86_CR4_SMEP) && P_CTRL(p_smep_validate)) { ++ ++ switch (P_CTRL(p_smep_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "SMEP was disabled and 'panic()' is enforced! Killing the kernel...\n"); ++ break; ++ ++ /* Log and restore */ ++ case 1: ++ p_print_log(P_LKRG_CRIT, ++ "SMEP was disabled! Enforcing SMEP now!\n"); ++// cr4_set_bits(X86_CR4_SMEP); ++ p_cr4 |= X86_CR4_SMEP; ++ p_write_cr4(p_cr4); ++ p_bad++; ++ break; ++ ++ /* Log and accept */ ++ case 0: ++ if (!P_IS_SMEP_FLAG_ENABLED(p_pcfi_CPU_flags)) ++ return p_bad; ++ p_print_log(P_LKRG_CRIT, ++ "SMEP was disabled! Accepting new SMEP state.\n"); ++ P_DISABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ break; ++ } ++ ++ /* ++ * It is possible to kill an almost arbitrary process here. Such situation is possible ++ * if process which disables WP / SMEP / SMAP is being preempted before leverages such state. ++ * Most of the exploits which I tested didn't have such FP, but such possibility of course ++ * exists. However, it is worth the risk. Especially that such changes (WP / SMEP / SMAP violation) ++ * are not legal and are always the results of malicious activity. ++ */ ++ if (p_kill) { ++ if (P_CTRL(p_pint_enforce)) { ++ p_print_log(P_LKRG_CRIT, "We might be killing arbitrary process here. However, it is worth the risk!\n"); ++ p_ed_kill_task_by_task(current); ++ } ++ } ++ } ++ ++ if (!(p_cr4 & X86_CR4_SMAP) && P_CTRL(p_smap_validate)) { ++ ++ switch (P_CTRL(p_smap_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "SMAP was disabled and 'panic()' is enforced! Killing the kernel...\n"); ++ break; ++ ++ /* Log and restore */ ++ case 1: ++ p_print_log(P_LKRG_CRIT, ++ "SMAP was disabled! Enforcing SMAP now!\n"); ++// cr4_set_bits(X86_CR4_SMAP); ++ p_cr4 |= X86_CR4_SMAP; ++ p_write_cr4(p_cr4); ++ p_bad++; ++ break; ++ ++ /* Log and accept */ ++ case 0: ++ if (!P_IS_SMAP_FLAG_ENABLED(p_pcfi_CPU_flags)) ++ return p_bad; ++ p_print_log(P_LKRG_CRIT, ++ "SMAP was disabled! Accepting new SMAP state.\n"); ++ P_DISABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ break; ++ } ++ ++ /* ++ * It is possible to kill an almost arbitrary process here. Such situation is possible ++ * if process which disables WP / SMEP / SMAP is being preempted before leverages such state. ++ * Most of the exploits which I tested didn't have such FP, but such possibility of course ++ * exists. However, it is worth the risk. Especially that such changes (WP / SMEP / SMAP violation) ++ * are not legal and are always the results of malicious activity. ++ */ ++ if (p_kill) { ++ if (P_CTRL(p_pint_enforce)) { ++ p_print_log(P_LKRG_CRIT, "We might be killing arbitrary process here. However, it is worth the risk!\n"); ++ p_ed_kill_task_by_task(current); ++ } ++ } ++ } ++ ++ return p_bad; ++} ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/p_exploit_detection.c b/security/lkrg/modules/exploit_detection/p_exploit_detection.c +new file mode 100644 +index 000000000000..cec81ccff7ee +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/p_exploit_detection.c +@@ -0,0 +1,2345 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Exploit detection main module ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 06.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++struct p_ed_global_variables p_ed_guard_globals; ++unsigned long p_global_off_cookie; ++unsigned long p_global_cnt_cookie; ++struct kmem_cache *p_ed_wq_valid_cache = NULL; ++struct kmem_cache *p_ed_pcfi_cache = NULL; ++ ++unsigned long p_pcfi_CPU_flags; ++ ++#define p_ed_pcfi_alloc() kmem_cache_alloc(p_ed_pcfi_cache, GFP_ATOMIC) ++#define p_ed_pcfi_free(name) kmem_cache_free(p_ed_pcfi_cache, (void *)(name)) ++ ++static int p_cmp_tasks(struct p_ed_process *p_orig, struct task_struct *p_current, char p_kill); ++static unsigned int p_iterate_lkrg_tasks_paranoid(void); ++ ++static const struct p_functions_hooks { ++ ++ const char *name; ++ int (*install)(int p_isra); ++ void (*uninstall)(void); ++ int p_fatal; ++ const char *p_error_message; ++ int is_isra_safe; ++ ++} p_functions_hooks_array[] = { ++ { ++ "security_bprm_committing_creds", ++ p_install_security_bprm_committing_creds_hook, ++ p_uninstall_security_bprm_committing_creds_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ { ++ "security_bprm_committed_creds", ++ p_install_security_bprm_committed_creds_hook, ++ p_uninstall_security_bprm_committed_creds_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ { "call_usermodehelper", ++ p_install_call_usermodehelper_hook, ++ p_uninstall_call_usermodehelper_hook, ++ 0, ++ "LKRG won't be able to guard UMH interface :( Other functionalities are going to be enforced.", ++ 0 ++ }, ++ { "call_usermodehelper_exec", ++ p_install_call_usermodehelper_exec_hook, ++ p_uninstall_call_usermodehelper_exec_hook, ++ 0, ++ "LKRG won't enforce validation on 'call_usermodehelper_exec'", ++ 1 ++ }, ++ { "wake_up_new_task", ++ p_install_wake_up_new_task_hook, ++ p_uninstall_wake_up_new_task_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "do_exit", ++ p_install_do_exit_hook, ++ p_uninstall_do_exit_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ { "security_ptrace_access", ++ p_install_security_ptrace_access_hook, ++ p_uninstall_security_ptrace_access_hook, ++ 0, ++ "LKRG won't enforce validation on 'security_ptrace_access'", ++ 0 ++ }, ++ { "sys_setuid", ++ p_install_sys_setuid_hook, ++ p_uninstall_sys_setuid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setreuid", ++ p_install_sys_setreuid_hook, ++ p_uninstall_sys_setreuid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setresuid", ++ p_install_sys_setresuid_hook, ++ p_uninstall_sys_setresuid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setfsuid", ++ p_install_sys_setfsuid_hook, ++ p_uninstall_sys_setfsuid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setgid", ++ p_install_sys_setgid_hook, ++ p_uninstall_sys_setgid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setregid", ++ p_install_sys_setregid_hook, ++ p_uninstall_sys_setregid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setresgid", ++ p_install_sys_setresgid_hook, ++ p_uninstall_sys_setresgid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_setfsgid", ++ p_install_sys_setfsgid_hook, ++ p_uninstall_sys_setfsgid_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "set_current_groups", ++ p_install_set_current_groups_hook, ++ p_uninstall_set_current_groups_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ { "generic_permission", ++ p_install_generic_permission_hook, ++ p_uninstall_generic_permission_hook, ++ 0, ++ "LKRG won't enforce validation on 'generic_permission'", ++ 1 ++ }, ++#ifdef CONFIG_SECURITY_SELINUX ++ { "sel_write_enforce", ++ p_install_sel_write_enforce_hook, ++ p_uninstall_sel_write_enforce_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++#endif ++ { "seccomp", ++ p_install_seccomp_hook, ++ p_uninstall_seccomp_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ { "sys_unshare", ++ p_install_sys_unshare_hook, ++ p_uninstall_sys_unshare_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ /* Caps. */ ++ { "sys_capset", ++ p_install_sys_capset_hook, ++ p_uninstall_sys_capset_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "cap_task_prctl", ++ p_install_cap_task_prctl_hook, ++ p_uninstall_cap_task_prctl_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ /* Keyring */ ++ { "key_change_session_keyring", ++ p_install_key_change_session_keyring_hook, ++ p_uninstall_key_change_session_keyring_hook, ++ 1, ++ NULL, ++ 1 ++ }, ++ { "sys_add_key", ++ p_install_sys_add_key_hook, ++ p_uninstall_sys_add_key_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_request_key", ++ p_install_sys_request_key_hook, ++ p_uninstall_sys_request_key_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "sys_keyctl", ++ p_install_sys_keyctl_hook, ++ p_uninstall_sys_keyctl_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++#ifdef CONFIG_COMPAT ++ { "compat_sys_keyctl", ++ p_install_compat_sys_keyctl_hook, ++ p_uninstall_compat_sys_keyctl_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ #ifdef P_SYSCALL_LAYOUT_4_17 ++ #ifdef CONFIG_X86 ++ { "compat_sys_capset", ++ p_install_compat_sys_capset_hook, ++ p_uninstall_compat_sys_capset_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "compat_sys_add_key", ++ p_install_compat_sys_add_key_hook, ++ p_uninstall_compat_sys_add_key_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "compat_sys_request_key", ++ p_install_compat_sys_request_key_hook, ++ p_uninstall_compat_sys_request_key_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ #endif /* CONFIG_X86 */ ++ #endif /* P_SYSCALL_LAYOUT_4_17 */ ++#endif /* CONFIG_COMPAT */ ++#ifdef CONFIG_X86_X32 ++ #ifdef P_SYSCALL_LAYOUT_4_17 ++ { "x32_sys_keyctl", ++ p_install_x32_sys_keyctl_hook, ++ p_uninstall_x32_sys_keyctl_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ #endif /* P_SYSCALL_LAYOUT_4_17 */ ++#endif /* CONFIG_X86_X32 */ ++ { "override_creds", ++ p_install_override_creds_hook, ++ p_uninstall_override_creds_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ { "revert_creds", ++ p_install_revert_creds_hook, ++ p_uninstall_revert_creds_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ /* Namespaces. */ ++ { "sys_setns", ++ p_install_sys_setns_hook, ++ p_uninstall_sys_setns_hook, ++ 1, ++ NULL, ++ 0 ++ }, ++ /* OverlayFS ++ * ++ * OverlayFS might not be installed in that system - it is not critical ++ * scenario. If OverlayFS is installed, used but not found (unlikely) ++ * in worst case, we might have FP. Continue... ++ */ ++ { "ovl_create_or_link", ++ p_install_ovl_create_or_link_hook, ++ p_uninstall_ovl_create_or_link_hook, ++ 0, ++ "Can't hook 'ovl_create_or_link' function. This is expected if you are not using OverlayFS.", ++ 1 ++ }, ++ /* pCFI */ ++ { "pcfi_mark_inode_dirty", ++ p_install_pcfi_mark_inode_dirty_hook, ++ p_uninstall_pcfi_mark_inode_dirty_hook, ++ 0, ++ "LKRG won't enforce pCFI validation on 'mark_inode_dirty'", ++ 0 ++ }, ++ { "pcfi_schedule", ++ p_install_pcfi_schedule_hook, ++ p_uninstall_pcfi_schedule_hook, ++ 0, ++ "LKRG won't enforce pCFI validation on 'schedule'", ++ 1 ++ }, ++ { "pcfi___queue_work", ++ p_install_pcfi___queue_work_hook, ++ p_uninstall_pcfi___queue_work_hook, ++ 0, ++ "LKRG won't enforce validation on '__queue_work'", ++ 1 ++ }, ++ { "pcfi_lookup_fast", ++ p_install_pcfi_lookup_fast_hook, ++ p_uninstall_pcfi_lookup_fast_hook, ++ 0, ++ "LKRG won't enforce pCFI validation on 'lookup_fast'", ++ 1 ++ }, ++ { "capable", ++ p_install_capable_hook, ++ p_uninstall_capable_hook, ++ 0, ++ "LKRG won't enforce validation on 'capable'", ++ 1 ++ }, ++ { "scm_send", ++ p_install_scm_send_hook, ++ p_uninstall_scm_send_hook, ++ 0, ++ "LKRG won't enforce validation on 'scm_send'", ++ 1 ++ }, ++ { NULL, NULL, NULL, 1, NULL, 0 } ++}; ++ ++static void p_ed_wq_valid_cache_zero(void *p_arg) { ++ ++ struct work_struct *p_struct = p_arg; ++ ++ memset(p_struct, 0, sizeof(struct work_struct)); ++} ++ ++int p_ed_wq_valid_cache_init(void) { ++ ++ if ( (p_ed_wq_valid_cache = kmem_cache_create("p_ed_wq_valid_cache", sizeof(struct work_struct), ++ 0, SLAB_HWCACHE_ALIGN, p_ed_wq_valid_cache_zero)) == NULL) { ++ p_print_log(P_LKRG_ERR, "kmem_cache_create() for exploit detection validation error! :(\n"); ++ return -ENOMEM; ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++static void p_ed_wq_valid_cache_delete(void) { ++ ++ flush_workqueue(system_unbound_wq); ++ if (p_ed_wq_valid_cache) { ++ kmem_cache_destroy(p_ed_wq_valid_cache); ++ p_ed_wq_valid_cache = NULL; ++ } ++} ++ ++notrace void p_dump_creds(struct p_cred *p_where, const struct cred *p_from) { ++ ++ /* Get reference to cred */ ++ get_cred(p_from); ++ ++ /* Track process's capabilities */ ++ memcpy(&p_where->cap_inheritable, &p_from->cap_inheritable, sizeof(kernel_cap_t)); ++ memcpy(&p_where->cap_permitted, &p_from->cap_permitted, sizeof(kernel_cap_t)); ++ memcpy(&p_where->cap_effective, &p_from->cap_effective, sizeof(kernel_cap_t)); ++ memcpy(&p_where->cap_bset, &p_from->cap_bset, sizeof(kernel_cap_t)); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) ++ memcpy(&p_where->cap_ambient, &p_from->cap_ambient, sizeof(kernel_cap_t)); ++#endif ++ ++ /* Track process's IDs */ ++ p_set_uid(&p_where->uid, p_get_uid(&p_from->uid)); ++ p_set_gid(&p_where->gid, p_get_gid(&p_from->gid)); ++ p_set_uid(&p_where->suid, p_get_uid(&p_from->suid)); ++ p_set_gid(&p_where->sgid, p_get_gid(&p_from->sgid)); ++ p_set_uid(&p_where->euid, p_get_uid(&p_from->euid)); ++ p_set_gid(&p_where->egid, p_get_gid(&p_from->egid)); ++ p_set_uid(&p_where->fsuid, p_get_uid(&p_from->fsuid)); ++ p_set_gid(&p_where->fsgid, p_get_gid(&p_from->fsgid)); ++ ++ /* Track process's securebits - TODO: research */ ++ p_where->securebits = p_from->securebits; ++ ++ /* Track process's critical pointers */ ++ p_where->user = p_from->user; ++ p_where->user_ns = p_from->user_ns; ++ ++ /* Release reference to cred */ ++ put_cred(p_from); ++} ++ ++notrace void p_dump_seccomp(struct p_seccomp *p_sec, struct task_struct *p_task) { ++ ++ P_SYM(p_get_seccomp_filter)(p_task); ++ p_sec->sec.mode = p_task->seccomp.mode; // Mode ++ p_sec->sec.filter = p_task->seccomp.filter; // Filter ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0) ++ if (test_task_syscall_work(p_task,SECCOMP)) ++#else ++ if (test_tsk_thread_flag(p_task,TIF_SECCOMP)) ++#endif ++ p_sec->flag = 1; ++ else ++ p_sec->flag = 0; ++ p_sec->flag_sync_thread = 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ P_SYM(p_put_seccomp_filter)(p_task->seccomp.filter); ++#else ++ P_SYM(p_put_seccomp_filter)(p_task); ++#endif ++ ++} ++ ++#if defined(P_VERIFY_ADDR_LIMIT) ++notrace static inline unsigned long p_get_addr_limit(struct task_struct *p_task) { ++ ++/* X86(-64)*/ ++#if defined(CONFIG_X86) ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0) && LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) ++ ++ return p_task->thread.addr_limit.seg; ++ ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ++ ++ struct thread_info *p_ti = task_thread_info(p_task); ++ ++ return p_ti->addr_limit.seg; ++ ++#endif ++ ++/* ARM(64) */ ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ ++ struct thread_info *p_ti = task_thread_info(p_task); ++ ++ return p_ti->addr_limit; ++ ++#endif ++} ++#endif ++ ++notrace void p_verify_addr_limit(struct p_ed_process *p_orig, struct task_struct *p_current) { ++ ++#if defined(P_VERIFY_ADDR_LIMIT) ++ ++ unsigned long p_addr_limit = ++#if defined(CONFIG_X86) ++ p_orig->p_ed_task.p_addr_limit.seg; ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ p_orig->p_ed_task.p_addr_limit; ++#endif ++ ++ /* Verify addr_limit */ ++ if (p_addr_limit != p_get_addr_limit(p_current)) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected ADDR_LIMIT segment corruption! " ++ "process[%d | %s] has different segment address! [%lx vs %lx]\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_addr_limit, ++ p_get_addr_limit(p_current)); ++ dump_stack(); ++ // kill this process! ++ p_ed_kill_task_by_task(p_current); ++ } ++ ++#endif ++} ++ ++notrace static inline void p_dump_addr_limit(mm_segment_t *p_addr_limit, struct task_struct *p_task) { ++ ++#if defined(P_VERIFY_ADDR_LIMIT) ++ ++#if defined(CONFIG_X86) ++ p_addr_limit->seg = ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ *p_addr_limit = ++#endif ++ p_get_addr_limit(p_task); ++#endif ++} ++ ++notrace void p_update_ed_process(struct p_ed_process *p_source, struct task_struct *p_task, char p_stack) { ++ ++ rcu_read_lock(); ++ get_task_struct(p_task); ++ /* Track process's metadata */ ++ p_source->p_ed_task.p_pid = p_task->pid; ++ p_source->p_ed_task.p_cred_ptr = rcu_dereference(p_task->cred); ++ p_source->p_ed_task.p_real_cred_ptr = rcu_dereference(p_task->real_cred); ++ if (p_stack) ++ p_source->p_ed_task.p_stack = p_task->stack; ++ /* Namespaces */ ++ p_source->p_ed_task.p_nsproxy = p_task->nsproxy; ++ p_source->p_ed_task.p_ns.uts_ns = p_task->nsproxy->uts_ns; ++ p_source->p_ed_task.p_ns.ipc_ns = p_task->nsproxy->ipc_ns; ++ p_source->p_ed_task.p_ns.mnt_ns = p_task->nsproxy->mnt_ns; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) ++ p_source->p_ed_task.p_ns.pid_ns_for_children = p_task->nsproxy->pid_ns_for_children; ++#else ++ p_source->p_ed_task.p_ns.pid_ns = p_task->nsproxy->pid_ns; ++#endif ++ p_source->p_ed_task.p_ns.net_ns = p_task->nsproxy->net_ns; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) ++ p_source->p_ed_task.p_ns.cgroup_ns = p_task->nsproxy->cgroup_ns; ++#endif ++ /* Creds */ ++ p_dump_creds(&p_source->p_ed_task.p_cred, p_source->p_ed_task.p_cred_ptr); ++ p_dump_creds(&p_source->p_ed_task.p_real_cred, p_source->p_ed_task.p_real_cred_ptr); ++ /* Seccomp */ ++ p_dump_seccomp(&p_source->p_ed_task.p_sec, p_task); ++ /* addr_limit */ ++#if defined(P_VERIFY_ADDR_LIMIT) ++ p_dump_addr_limit(&p_source->p_ed_task.p_addr_limit, p_task); ++#endif ++ /* Name */ ++ strncpy(p_source->p_ed_task.p_comm, p_task->comm, TASK_COMM_LEN); ++ p_source->p_ed_task.p_comm[TASK_COMM_LEN] = 0; ++ /* Should be last here to propagate potential glitching */ ++ wmb(); ++ p_source->p_ed_task.p_task = p_task; ++ put_task_struct(p_task); ++ rcu_read_unlock(); ++ ++} ++ ++#ifdef P_LKRG_TASK_OFF_DEBUG ++struct p_lkrg_debug_off_flag_callers { ++ ++ char p_id; ++ char *p_name; ++ ++} p_debug_off_flag_callers[] = { ++ ++ { 0, "RESERVED" }, ++ { 1, "RESERVED" }, ++ { 2, "RESERVED" }, ++ { 3, "RESERVED" }, ++ { 4, "p_x32_sys_keyctl_entry" }, ++ { 5, "p_x32_sys_keyctl_ret" }, ++ { 6, "p_cap_task_prctl_entry" }, ++ { 7, "p_cap_task_prctl_ret" }, ++ { 8, "p_sys_capset_entry" }, ++ { 9, "p_sys_capset_ret" }, ++ { 10, "p_compat_sys_add_key_entry" }, ++ { 11, "p_compat_sys_add_key_ret" }, ++ { 12, "p_compat_sys_capset_entry" }, ++ { 13, "p_compat_sys_capset_ret" }, ++ { 14, "RESERVED" }, ++ { 15, "RESERVED" }, ++ { 16, "RESERVED" }, ++ { 17, "RESERVED" }, ++ { 18, "p_compat_sys_keyctl_entry" }, ++ { 19, "p_compat_sys_keyctl_ret" }, ++ { 20, "p_compat_sys_request_key_entry" }, ++ { 21, "p_compat_sys_request_key_ret" }, ++ { 22, "p_key_change_session_keyring_entry" }, ++ { 23, "p_key_change_session_keyring_ret" }, ++ { 24, "p_sys_add_key_entry" }, ++ { 25, "p_sys_add_key_ret" }, ++ { 26, "p_sys_keyctl_entry" }, ++ { 27, "p_sys_keyctl_ret" }, ++ { 28, "p_sys_request_key_entry" }, ++ { 29, "p_sys_request_key_ret" }, ++ { 30, "p_ovl_create_or_link_ret" }, ++ { 31, "p_override_creds_entry" }, ++ { 32, "p_revert_creds_ret" }, ++ { 33, "p_seccomp_entry" }, ++ { 34, "p_seccomp_ret" }, ++ { 35, "p_set_current_groups_entry" }, ++ { 36, "p_set_current_groups_ret" }, ++ { 37, "p_security_bprm_committing_creds_entry" }, ++ { 38, "RESERVED" }, ++ { 39, "RESERVED" }, ++ { 40, "p_security_bprm_committed_creds_ret" }, ++ { 41, "p_sys_setfsgid_entry" }, ++ { 42, "p_sys_setfsgid_ret" }, ++ { 43, "p_sys_setfsuid_entry" }, ++ { 44, "p_sys_setfsuid_ret" }, ++ { 45, "p_sys_setgid_entry" }, ++ { 46, "p_sys_setgid_ret" }, ++ { 47, "p_sys_setns_entry" }, ++ { 48, "p_sys_setns_ret" }, ++ { 49, "p_sys_setregid_entry" }, ++ { 50, "p_sys_setregid_ret" }, ++ { 51, "p_sys_setresgid_entry" }, ++ { 52, "p_sys_setresgid_ret" }, ++ { 53, "p_sys_setresuid_entry" }, ++ { 54, "p_sys_setresuid_ret" }, ++ { 55, "p_sys_setreuid_entry" }, ++ { 56, "p_sys_setreuid_ret" }, ++ { 57, "p_sys_setuid_entry" }, ++ { 58, "p_sys_setuid_ret" }, ++ { 59, "p_sys_unshare_entry" }, ++ { 60, "p_sys_unshare_ret" }, ++ { 61, "p_wake_up_new_task_entry" }, ++ { 0, NULL } ++ ++}; ++ ++struct p_lkrg_debug_off_flag_action { ++ ++ char p_id; ++ char *p_name; ++ ++} p_debug_off_flag_action[] = { ++ ++ { 0, "OFF" }, ++ { 1, "ON" }, ++ { 2, "RESET" }, ++ { 3, "OVERRIDE OFF" }, ++ { 4, "OVERRIDE ON" }, ++ ++}; ++ ++notrace void p_debug_off_flag_off(struct p_ed_process *p_source, unsigned int p_id) { ++ ++ p_source->p_ed_task.p_off_debug_cnt++; ++ ++ if (p_source->p_ed_task.p_off_counter < P_LKRG_TASK_OFF_MAXBUF-1) { ++ /* Report current event */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 0; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } else { ++ /* Mark that we are starting overriding ring-buffer */ ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_caller = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_action = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_old_off = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_debug_val = -1; ++ /* Reset */ ++ p_source->p_ed_task.p_off_counter = 0; ++ /* Report current event */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 0; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } ++} ++ ++notrace void p_debug_off_flag_on(struct p_ed_process *p_source, unsigned int p_id) { ++ ++ p_source->p_ed_task.p_off_debug_cnt--; ++ ++ if (p_source->p_ed_task.p_off_counter < P_LKRG_TASK_OFF_MAXBUF-1) { ++ /* Report current event */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 1; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } else { ++ /* Mark that we are starting overriding ring-buffer */ ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_caller = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_action = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_old_off = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Reset */ ++ p_source->p_ed_task.p_off_counter = 0; ++ /* Report current event */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 1; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } ++} ++ ++notrace void p_debug_off_flag_override_off(struct p_ed_process *p_source, unsigned int p_id, struct pt_regs *p_regs) { ++ ++ p_source->p_ed_task.p_off_debug_cnt++; ++ ++ if (p_source->p_ed_task.p_off_counter < P_LKRG_TASK_OFF_MAXBUF-1) { ++ /* Report current event */ ++ /* Stack trace*/ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries = ++ (unsigned long *)p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_internal_buf; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries = ++ P_PCFI_STACK_BUF/sizeof(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries[0]); ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = 0; ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = ++ stack_trace_save(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries, ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries, ++ 1); ++#else ++ save_stack_trace(&p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace); ++#endif ++ /* End */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 3; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } else { ++ /* Mark that we are starting overriding ring-buffer */ ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_caller = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_action = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_old_off = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_debug_val = -1; ++ /* Reset */ ++ p_source->p_ed_task.p_off_counter = 0; ++ /* Report current event */ ++ /* Stack trace*/ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries = ++ (unsigned long *)p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_internal_buf; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries = ++ P_PCFI_STACK_BUF/sizeof(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries[0]); ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = 0; ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = ++ stack_trace_save(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries, ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries, ++ 1); ++#else ++ save_stack_trace(&p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace); ++#endif ++ /* End */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 3; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } ++} ++ ++notrace void p_debug_off_flag_override_on(struct p_ed_process *p_source, unsigned int p_id, struct pt_regs *p_regs) { ++ ++ p_source->p_ed_task.p_off_debug_cnt--; ++ ++ if (p_source->p_ed_task.p_off_counter < P_LKRG_TASK_OFF_MAXBUF-1) { ++ /* Report current event */ ++ /* Stack trace*/ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries = ++ (unsigned long *)p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_internal_buf; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries = ++ P_PCFI_STACK_BUF/sizeof(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries[0]); ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = 0; ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = ++ stack_trace_save(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries, ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries, ++ 1); ++#else ++ save_stack_trace(&p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace); ++#endif ++ /* End */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 4; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } else { ++ /* Mark that we are starting overriding ring-buffer */ ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_caller = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_action = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_old_off = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_debug_val = -1; ++ /* Reset */ ++ p_source->p_ed_task.p_off_counter = 0; ++ /* Report current event */ ++ /* Stack trace*/ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries = ++ (unsigned long *)p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_internal_buf; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries = ++ P_PCFI_STACK_BUF/sizeof(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries[0]); ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = 0; ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.nr_entries = ++ stack_trace_save(p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.entries, ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace.max_entries, ++ 1); ++#else ++ save_stack_trace(&p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_trace); ++#endif ++ /* End */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 4; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } ++} ++ ++notrace void p_debug_off_flag_reset(struct p_ed_process *p_source, unsigned int p_id) { ++ ++ p_source->p_ed_task.p_off_debug_cnt = 0; ++ ++ if (p_source->p_ed_task.p_off_counter < P_LKRG_TASK_OFF_MAXBUF-1) { ++ /* Report current event */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 2; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } else { ++ /* Mark that we are starting overriding ring-buffer */ ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_caller = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_action = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_old_off = -1; ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_debug_val = -1; ++ /* Reset */ ++ p_source->p_ed_task.p_off_counter = 0; ++ /* Report current event */ ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_caller = p_id; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_action = 2; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_old_off = ++ p_source->p_ed_task.p_off ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_debug[p_source->p_ed_task.p_off_counter].p_debug_val = ++ p_source->p_ed_task.p_off_debug_cnt; ++ /* Increment ring-buffer pointer */ ++ p_source->p_ed_task.p_off_counter++; ++ } ++} ++ ++notrace void p_debug_off_flag_dump_ring_buffer(struct p_ed_process *p_source) { ++ ++ unsigned int p_tmp; ++ ++ p_print_log(P_LKRG_CRIT, "OFF debug: normalization[0x%lx] cookie[0x%lx]\n", ++ p_global_cnt_cookie, ++ p_global_off_cookie); ++ p_print_log(P_LKRG_CRIT, "Process[%d | %s] Parent[%d | %s] has [%d] entries:\n", ++ p_source->p_ed_task.p_pid, ++ p_source->p_ed_task.p_comm, ++ p_source->p_ed_task.p_task->real_parent->pid, ++ p_source->p_ed_task.p_task->real_parent->comm, ++ p_source->p_ed_task.p_off_counter); ++ ++ if (p_source->p_ed_task.p_off_counter < 3 && ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_caller == -1 && ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_action == -1 && ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_old_off == -1 && ++ p_source->p_ed_task.p_off_debug[P_LKRG_TASK_OFF_MAXBUF-1].p_debug_val == -1) { ++ /* OK, ring buffer was overwritten. Dump a few entries before overwrite: */ ++ p_print_log(P_LKRG_CRIT, "Before overwrite:\n"); ++ for (p_tmp = P_LKRG_TASK_OFF_MAXBUF-1-6; p_tmp < P_LKRG_TASK_OFF_MAXBUF-1; p_tmp++) { ++ p_print_log(P_LKRG_CRIT," => caller[%s] action[%s] old_off[0x%lx] debug_val[%d]\n", ++ p_debug_off_flag_callers[p_source->p_ed_task.p_off_debug[p_tmp].p_caller].p_name, ++ p_debug_off_flag_action[p_source->p_ed_task.p_off_debug[p_tmp].p_action].p_name, ++ p_source->p_ed_task.p_off_debug[p_tmp].p_old_off, ++ p_source->p_ed_task.p_off_debug[p_tmp].p_debug_val); ++ if (p_source->p_ed_task.p_off_debug[p_tmp].p_action == 3 || ++ p_source->p_ed_task.p_off_debug[p_tmp].p_action == 4) { ++ p_print_log(P_LKRG_CRIT,"Stack trace:\n"); ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ stack_trace_print(p_source->p_ed_task.p_off_debug[p_tmp].p_trace.entries, ++ p_source->p_ed_task.p_off_debug[p_tmp].p_trace.nr_entries, ++ 0); ++#else ++ print_stack_trace(&p_source->p_ed_task.p_off_debug[p_tmp].p_trace, 0); ++#endif ++ } ++ } ++ p_print_log(P_LKRG_CRIT, "=== RING BUFFER OVERRIDE ===\n"); ++ } ++ ++ for (p_tmp = 0; p_tmp < p_source->p_ed_task.p_off_counter; p_tmp++) { ++ p_print_log(P_LKRG_CRIT," => caller[%s] action[%s] old_off[0x%lx] debug_val[%d]\n", ++ p_debug_off_flag_callers[p_source->p_ed_task.p_off_debug[p_tmp].p_caller].p_name, ++ p_debug_off_flag_action[p_source->p_ed_task.p_off_debug[p_tmp].p_action].p_name, ++ p_source->p_ed_task.p_off_debug[p_tmp].p_old_off, ++ p_source->p_ed_task.p_off_debug[p_tmp].p_debug_val); ++ if (p_source->p_ed_task.p_off_debug[p_tmp].p_action == 3 || ++ p_source->p_ed_task.p_off_debug[p_tmp].p_action == 4) { ++ p_print_log(P_LKRG_CRIT,"Stack trace:\n"); ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ stack_trace_print(p_source->p_ed_task.p_off_debug[p_tmp].p_trace.entries, ++ p_source->p_ed_task.p_off_debug[p_tmp].p_trace.nr_entries, ++ 0); ++#else ++ print_stack_trace(&p_source->p_ed_task.p_off_debug[p_tmp].p_trace, 0); ++#endif ++ } ++ } ++} ++#endif ++ ++inline void p_validate_off_flag(struct p_ed_process *p_source, long p_val, int *p_ret) { ++ ++ if (likely(p_val == p_global_cnt_cookie)) ++ return; ++ ++ while (p_val > p_global_cnt_cookie) { ++ p_val -= p_global_cnt_cookie; ++ if (unlikely(p_val > (p_global_cnt_cookie << 3))) ++ break; ++ } ++ ++ if (unlikely(p_val != p_global_cnt_cookie)) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has corrupted 'off' flag!\n", ++ p_source->p_ed_task.p_pid, p_source->p_ed_task.p_comm); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_print_log(P_LKRG_CRIT, ++#else ++ p_print_log(P_LKRG_INFO, ++#endif ++ "'off' flag[0x%lx] (normalization via 0x%lx)\n", ++ p_val, p_global_cnt_cookie); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_dump_ring_buffer(p_source); ++ dump_stack(); ++#endif ++ // kill this process! ++ rcu_read_lock(); ++ if (p_ret) ++ (*p_ret)++; ++ else ++ p_ed_kill_task_by_task(p_source->p_ed_task.p_task); ++ rcu_read_unlock(); ++ } ++} ++ ++inline void p_ed_is_off_off(struct p_ed_process *p_source, long p_val) { ++ ++ if (p_val != p_global_cnt_cookie) { ++ p_print_log(P_LKRG_CRIT, ++ " ON process[%d | %s] has corrupted 'off' flag!\n", ++ p_source->p_ed_task.p_pid, p_source->p_ed_task.p_comm); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_print_log(P_LKRG_CRIT, ++#else ++ p_print_log(P_LKRG_INFO, ++#endif ++ "'off' flag[0x%lx] (normalization via 0x%lx)\n", ++ p_val, p_global_cnt_cookie); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_dump_ring_buffer(p_source); ++ dump_stack(); ++#endif ++ // kill this process! ++ rcu_read_lock(); ++ p_ed_kill_task_by_task(p_source->p_ed_task.p_task); ++ rcu_read_unlock(); ++ } ++} ++ ++notrace int p_verify_ovl_create_or_link(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ ++ p_validate_off_flag(p_source,p_off,NULL); // Validate ++ ++ return p_off == 2 * p_global_cnt_cookie; ++} ++ ++notrace void p_ed_is_off_off_wrap(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ p_ed_is_off_off(p_source,p_off); ++} ++ ++notrace void p_ed_validate_off_flag_wrap(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ p_validate_off_flag(p_source,p_off,NULL); // Validate ++} ++ ++notrace void p_set_ed_process_on(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ ++ p_off -= p_global_cnt_cookie; // Normalize ++ p_ed_is_off_off(p_source,p_off); // Validate ++ ++ p_source->p_ed_task.p_off = p_off ^ p_global_off_cookie; // Encode ++ p_source->p_ed_task.p_off_count = 0; ++} ++ ++notrace void p_set_ed_process_off(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ ++ p_ed_is_off_off(p_source,p_off); // Validate ++ p_off += p_global_cnt_cookie; // Normalize ++ ++ p_source->p_ed_task.p_off = p_off ^ p_global_off_cookie; ++} ++ ++notrace void p_set_ed_process_override_on(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ ++ p_validate_off_flag(p_source,p_off,NULL); // Validate ++ p_off -= p_global_cnt_cookie; // Normalize ++ ++ p_source->p_ed_task.p_off = p_off ^ p_global_off_cookie; // Encode ++ if (p_off == p_global_cnt_cookie) ++ p_source->p_ed_task.p_off_count = 0; ++} ++ ++notrace void p_set_ed_process_override_off(struct p_ed_process *p_source) { ++ ++ register unsigned long p_off = p_source->p_ed_task.p_off ^ p_global_off_cookie; // Decode ++ ++ p_validate_off_flag(p_source,p_off,NULL); // Validate ++ p_off += p_global_cnt_cookie; // Normalize ++ ++ p_source->p_ed_task.p_off = p_off ^ p_global_off_cookie; ++} ++ ++notrace void p_reset_ed_flags(struct p_ed_process *p_source) { ++ ++ p_source->p_ed_task.p_off = p_global_cnt_cookie ^ p_global_off_cookie; ++ p_source->p_ed_task.p_off_count = 0; ++ ++} ++ ++int p_print_task_f(void *p_arg) { ++ ++ struct task_struct *p_task = (struct task_struct *)p_arg; ++ ++ p_print_log(P_LKRG_CRIT, ++ "%s [%d]\n", p_task->comm, task_pid_nr(p_task)); ++ ++ return P_LKRG_SUCCESS; ++} ++ ++int p_dump_task_f(void *p_arg) { ++ ++ struct task_struct *p_task = (struct task_struct *)p_arg; ++ struct p_ed_process *p_tmp; ++ struct rb_root *p_root; ++ ++ if ( (p_tmp = p_alloc_ed_pids()) == NULL) { ++ p_print_log(P_LKRG_ERR, ++ "p_alloc_ed_pids() returned NULL for pid %d :(\n",p_task->pid); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_update_ed_process(p_tmp, p_task, 1); ++// p_set_ed_process_on(p_tmp); ++ p_tmp->p_ed_task.p_off = p_global_cnt_cookie ^ p_global_off_cookie; ++ p_tmp->p_ed_task.p_off_count = 0; ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_tmp->p_ed_task.p_off_debug_cnt = p_tmp->p_ed_task.p_off_counter = 0; ++#endif ++ ++ p_rb_init_ed_pid_node(&p_tmp->p_rb); ++ p_root = p_rb_hash_tree_lookup(p_tmp->p_ed_task.p_pid); ++ if (p_rb_add_ed_pid(p_root, p_tmp->p_ed_task.p_pid, p_tmp)) { ++ p_print_log(P_LKRG_INFO, ++ "pid => %d is already inserted!\n",p_tmp->p_ed_task.p_pid); ++ p_free_ed_pids(p_tmp); ++ return 1; ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Inserting pid => %d\n", p_tmp->p_ed_task.p_pid); ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++int p_remove_task_pid_f(pid_t p_arg) { ++ ++ struct p_ed_process *p_tmp; ++ struct rb_root *p_root; ++ ++ p_root = p_rb_hash_tree_lookup(p_arg); ++ if ( (p_tmp = p_rb_find_ed_pid(p_root, p_arg)) == NULL) { ++ // This process is not on the list! ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_rb_del_ed_pid(p_root, p_tmp); ++ p_print_log(P_LKRG_INFO, "Removing ED pid => %d\n", p_arg); ++ ++ return P_LKRG_SUCCESS; ++} ++ ++static unsigned int p_iterate_processes(int (*p_func)(void *), char p_ver) { ++ ++ int p_ret; ++ unsigned int p_err = 0; ++ struct task_struct *p_ptmp, *p_tmp; ++ unsigned long p_flags; ++ ++ p_tasks_read_lock(&p_flags); ++ rcu_read_lock(); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++ for_each_process_thread(p_ptmp, p_tmp) { ++#else ++ // tasklist_lock ++ do_each_thread(p_ptmp, p_tmp) { ++#endif ++ ++ get_task_struct(p_tmp); ++ /* do not touch kernel threads or the global init */ ++ if (!p_is_ed_task(p_tmp)) { ++ put_task_struct(p_tmp); ++ continue; ++ } ++ ++ if ( (p_ret = p_func(p_tmp)) != 0) { ++ p_err++; ++ p_print_log(P_LKRG_INFO, ++ " Error[%d] during process[%d |%s] iteration!\n", ++ p_ret, task_pid_nr(p_tmp), p_tmp->comm); ++ if (likely(p_ver)) { ++ if (spin_is_locked(&p_tmp->sighand->siglock)) { ++ p_regs_set_ip(task_pt_regs(p_tmp), -1); ++ } else { ++ p_ed_kill_task_by_task(p_tmp); ++ } ++ } ++ } ++ put_task_struct(p_tmp); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++ } ++#else ++ // tasklist_unlock ++ } while_each_thread(p_ptmp, p_tmp); ++#endif ++ rcu_read_unlock(); ++ p_tasks_read_unlock(&p_flags); ++ ++ return p_err; ++} ++ ++static unsigned int p_iterate_lkrg_tasks_paranoid(void) { ++ ++ int p_ret = 0; ++ unsigned int i; ++ struct rb_node *p_node; ++ struct p_ed_process *p_tmp; ++ struct task_struct *p_task = NULL; ++ unsigned long p_flags; ++ ++ for (i=0; ip_ed_task.p_pid), PIDTYPE_PID)) != NULL) { ++ get_task_struct(p_task); ++ /* do not touch kernel threads or the global init */ ++ if (p_is_ed_task(p_task) && p_task->state != TASK_DEAD) { ++ if (p_cmp_tasks(p_tmp, p_task, 0x0)) { ++ p_ret++; ++ if (spin_is_locked(&p_task->sighand->siglock)) { ++ p_regs_set_ip(task_pt_regs(p_task), -1); ++ } else { ++ p_ed_kill_task_by_task(p_task); ++ } ++ } ++ } ++ put_task_struct(p_task); ++ } ++ } ++ rcu_read_unlock(); ++ p_tasks_read_unlock_raw(&p_rb_hash[i].p_lock.lock); ++ } ++ ++ /* Before leaving, verify current task */ ++ p_tasks_read_lock(&p_flags); ++ if (p_is_ed_task(current)) { ++ p_validate_task_f(current); ++ } ++ p_tasks_read_unlock(&p_flags); ++ ++ return p_ret; ++} ++ ++int p_cmp_creds(struct p_cred *p_orig, const struct cred *p_current_cred, struct task_struct *p_current, char p_opt) { ++ ++ int p_ret = 0; ++ ++ /* *UID */ ++ if (!uid_eq(p_orig->uid, p_current_cred->uid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different UID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_uid(&p_orig->uid), p_get_uid(&p_current_cred->uid)); ++ } ++ p_ret++; ++ } ++ ++ if (!uid_eq(p_orig->euid, p_current_cred->euid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different EUID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_uid(&p_orig->euid), p_get_uid(&p_current_cred->euid)); ++ } ++ p_ret++; ++ } ++ ++ if (!uid_eq(p_orig->suid, p_current_cred->suid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different SUID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_uid(&p_orig->suid), p_get_uid(&p_current_cred->suid)); ++ } ++ p_ret++; ++ } ++ ++ if (!uid_eq(p_orig->fsuid, p_current_cred->fsuid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different FSUID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_uid(&p_orig->fsuid), p_get_uid(&p_current_cred->fsuid)); ++ } ++ p_ret++; ++ } ++ ++ /* *GID */ ++ if (!gid_eq(p_orig->gid, p_current_cred->gid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different GID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_gid(&p_orig->gid), p_get_gid(&p_current_cred->gid)); ++ } ++ p_ret++; ++ } ++ ++ if (!gid_eq(p_orig->egid, p_current_cred->egid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different EGID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_gid(&p_orig->egid), p_get_gid(&p_current_cred->egid)); ++ } ++ p_ret++; ++ } ++ ++ if (!gid_eq(p_orig->sgid, p_current_cred->sgid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different SGID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_gid(&p_orig->sgid), p_get_gid(&p_current_cred->sgid)); ++ } ++ p_ret++; ++ } ++ ++ if (!gid_eq(p_orig->fsgid, p_current_cred->fsgid)) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different FSGID! %d vs %d\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_get_gid(&p_orig->fsgid), p_get_gid(&p_current_cred->fsgid)); ++ } ++ p_ret++; ++ } ++ ++ /* Namespaces */ ++ if (p_orig->user_ns != p_current_cred->user_ns) { ++ if (p_opt) { ++ p_print_log(P_LKRG_CRIT, ++ " process[%d | %s] has different user_namespace!\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "user_namespace: orig[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->user_ns, ++ (unsigned long)p_current_cred->user_ns); ++ } ++ p_ret++; ++ } ++ ++ return p_ret; ++} ++ ++static int p_cmp_tasks(struct p_ed_process *p_orig, struct task_struct *p_current, char p_kill) { ++ ++ int p_ret = 0; ++ char *p_sec_strings[3] = { "SECCOMP_MODE_DISABLED", "SECCOMP_MODE_STRICT", "SECCOMP_MODE_FILTER" }; ++ register long p_off = p_orig->p_ed_task.p_off ^ p_global_off_cookie; ++ const struct cred *p_current_cred = NULL; ++ const struct cred *p_current_real_cred = NULL; ++ ++ if (p_off - p_global_cnt_cookie) { ++ if (p_kill) ++ p_validate_off_flag(p_orig,p_off,NULL); // Validate ++ else ++ p_validate_off_flag(p_orig,p_off,&p_ret); // Validate ++ ++ p_orig->p_ed_task.p_off_count++; ++ ++ if (p_orig->p_ed_task.p_off_count > P_ED_PROCESS_OFF_MAX) { ++ p_print_log(P_LKRG_INFO, ++ "PID:%d [%s] p_off[0x%lx] / p_global_cnt[0x%lx] -> %ld | p_off_count[%u]\n", ++ p_orig->p_ed_task.p_pid, ++ p_orig->p_ed_task.p_comm, ++ p_off, p_global_cnt_cookie, ++ p_off/p_global_cnt_cookie, ++ p_orig->p_ed_task.p_off_count); ++ /* That's weird and it might be a potentially compromised process. Enforce validation now! */ ++/* ++ p_print_log(P_LKRG_CRIT, ++ " Detected data corruption attack! " ++ "process[%d | %s] has been disabled form checking %d times!\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_orig->p_ed_task.p_off_count); ++*/ ++// p_ret++; ++ } ++ return 0; ++ } ++ ++ ++ if (p_orig->p_ed_task.p_task != p_current) { ++ /* ++ * On heavily loaded SMP machines, in a very rare corner case situation, ++ * it is possible to hit an annoying kretprobe glitch. ++ * If you are one of the "lucky" guys who hit this problem for one of the processes ++ * being monitored by the Exploit Detection (ED) feature, you are a good candidate ++ * to hit another problem. If at some point kernel decided to reuse already unused ++ * pid (from the process which was affected by the glitching scenario) you are an even ++ * better candidate to hit a potential little race condition in a function return ++ * from do_fork(). ++ * When p_do_fork_ret() is invoked it tries to get a spin_lock for internal ED database ++ * (which is red-black tree identified by p_rb_ed_pids_lock lock). If another CPU/core is ++ * executing ED validation routine p_ed_enforce_validation() it might be faster to get ++ * this lock before p_do_fork_ret() does (race condition). In that case, p_do_fork_ret() ++ * does a busy wait, and the other CPU does verification. If you are the most "lucky" guy ++ * who hit the glitching problem and pid reuse scenario for the process which supposed to ++ * do attributes update for ED database from the p_do_fork_ret() function you will generate ++ * a one-time FP. As soon as FP happens and verification routine unlocks the ED database, ++ * p_do_fork_ret() kicks-in and fixes attributes, and FP will never happen again. ++ * This is a very rare corner-case situation which can only be possible if you meet all ++ * the problems together affecting exactly the same process (kretprobe glitch + pid reuse ++ * + race condition on a heavily loaded SMP machine). It is possible to mitigate this ++ * problem and we do it here. ++ */ ++ p_print_log(P_LKRG_WARN, ++ " Potential kretprobe glitch detected for process[%s] vs orig[%s]\n", ++ p_current->comm, ++ p_orig->p_ed_task.p_comm); ++ p_print_log(P_LKRG_INFO, ++ "process[0x%lx | %d | %s] vs orig[0x%lx | %d | %s]\n", ++ (unsigned long)p_current, ++ task_pid_nr(p_current), ++ p_current->comm, ++ (unsigned long)p_orig->p_ed_task.p_task, ++ p_orig->p_ed_task.p_pid, ++ p_orig->p_ed_task.p_comm); ++ return 0; ++ } ++ ++ /* ++ * Fetch pointers first ++ */ ++ p_current_cred = rcu_dereference(p_current->cred); ++ /* Get reference to cred */ ++ get_cred(p_current_cred); ++ p_current_real_cred = rcu_dereference(p_current->real_cred); ++ /* Get reference to real_cred */ ++ get_cred(p_current_real_cred); ++ ++ /* Validate stack first */ ++ if (p_ed_pcfi_validate_sp(p_current,p_orig,p_get_thread_sp(p_current))) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ p_current->comm,task_pid_nr(p_current)); ++ // kill this process! Another signal will be sent again from the caller function - it's OK. ++ if (p_kill) ++ p_pcfi_kill_task_by_task(p_current); ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_cred_ptr != p_current_cred) { ++ if (p_cmp_creds(&p_orig->p_ed_task.p_cred, p_current_cred, p_current, 0x0)) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected pointer swapping attack!" ++ "process[%d | %s] has different 'cred' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_cred_ptr, ++ (unsigned long)p_current->cred); ++ p_ret++; ++ } ++ } ++ ++ if (p_orig->p_ed_task.p_real_cred_ptr != p_current_real_cred) { ++ if (p_cmp_creds(&p_orig->p_ed_task.p_real_cred, p_current_real_cred, p_current, 0x0)) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected pointer swapping attack!" ++ "process[%d | %s] has different 'real_cred' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_real_cred_ptr, ++ (unsigned long)p_current->real_cred); ++ p_ret++; ++ } ++ } ++ ++ p_ret += p_cmp_creds(&p_orig->p_ed_task.p_cred, p_current_cred, p_current, 0x1); ++ if (p_ret) ++ p_ret += p_cmp_creds(&p_orig->p_ed_task.p_real_cred, p_current_real_cred, p_current, 0x1); ++ ++ /* Namespaces */ ++ if (p_orig->p_ed_task.p_nsproxy != p_current->nsproxy) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected namespace escape attack!" ++ "process[%d | %s] has different 'nsproxy' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_nsproxy, ++ (unsigned long)p_current->nsproxy); ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_ns.uts_ns != p_current->nsproxy->uts_ns) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected UTS namespace escape attack!" ++ "process[%d | %s] has different 'nsproxy->uts_ns' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_ns.uts_ns, ++ (unsigned long)p_current->nsproxy->uts_ns); ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_ns.ipc_ns != p_current->nsproxy->ipc_ns) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected IPC namespace escape attack!" ++ "process[%d | %s] has different 'nsproxy->ipc_ns' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_ns.ipc_ns, ++ (unsigned long)p_current->nsproxy->ipc_ns); ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_ns.mnt_ns != p_current->nsproxy->mnt_ns) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected MOUNT namespace escape attack!" ++ "process[%d | %s] has different 'nsproxy->mnt_ns' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_ns.mnt_ns, ++ (unsigned long)p_current->nsproxy->mnt_ns); ++ p_ret++; ++ } ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) ++ if (p_orig->p_ed_task.p_ns.pid_ns_for_children != p_current->nsproxy->pid_ns_for_children) { ++#else ++ if (p_orig->p_ed_task.p_ns.pid_ns != p_current->nsproxy->pid_ns) { ++#endif ++ p_print_log(P_LKRG_CRIT, ++ " Detected PID namespace escape attack!" ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) ++ "process[%d | %s] has different 'nsproxy->pid_ns_for_children' pointer\n", ++#else ++ "process[%d | %s] has different 'nsproxy->pid_ns' pointer\n", ++#endif ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) ++ (unsigned long)p_orig->p_ed_task.p_ns.pid_ns_for_children, ++ (unsigned long)p_current->nsproxy->pid_ns_for_children); ++#else ++ (unsigned long)p_orig->p_ed_task.p_ns.pid_ns, ++ (unsigned long)p_current->nsproxy->pid_ns); ++#endif ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_ns.net_ns != p_current->nsproxy->net_ns) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected NET namespace escape attack!" ++ "process[%d | %s] has different 'nsproxy->net_ns' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_ns.net_ns, ++ (unsigned long)p_current->nsproxy->net_ns); ++ p_ret++; ++ } ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) ++ if (p_orig->p_ed_task.p_ns.cgroup_ns != p_current->nsproxy->cgroup_ns) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected CGROUP namespace escape attack!" ++ "process[%d | %s] has different 'nsproxy->cgroup_ns' pointer\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_ns.cgroup_ns, ++ (unsigned long)p_current->nsproxy->cgroup_ns); ++ p_ret++; ++ } ++#endif ++ ++ /* Seccomp */ ++ if (p_orig->p_ed_task.p_sec.flag) { // SECCOMP was enabled so it make sense to compare... ++ P_SYM(p_get_seccomp_filter)(p_current); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0) ++ if (test_task_syscall_work(p_current,SECCOMP) != p_orig->p_ed_task.p_sec.flag) { ++#else ++ if (test_tsk_thread_flag(p_current,TIF_SECCOMP) != p_orig->p_ed_task.p_sec.flag) { ++#endif ++ p_print_log(P_LKRG_CRIT, ++ " Detected SECCOMP corruption!" ++ "process[%d | %s] has corrupted TIF_SECCOMP flag! [%d vs %d]\n", ++ task_pid_nr(p_current), ++ p_current->comm, ++ p_orig->p_ed_task.p_sec.flag, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0) ++ test_task_syscall_work(p_current,SECCOMP) ++#else ++ test_tsk_thread_flag(p_current,TIF_SECCOMP) ++#endif ++ ); ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_sec.sec.mode != p_current->seccomp.mode) { ++ if (p_current->seccomp.mode < 0 || p_current->seccomp.mode > 2 ++ || p_orig->p_ed_task.p_sec.sec.mode < 0 || p_orig->p_ed_task.p_sec.sec.mode > 2) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected SECCOMP corruption!" ++ "process[%d | %s] has UNKNOWN different SECCOMP mode! [%d vs %d]\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_orig->p_ed_task.p_sec.sec.mode, p_current->seccomp.mode); ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ " Detected SECCOMP corruption!" ++ "process[%d | %s] has different SECCOMP mode! [%s vs %s]\n", ++ task_pid_nr(p_current), p_current->comm, ++ p_sec_strings[p_orig->p_ed_task.p_sec.sec.mode], ++ p_sec_strings[p_current->seccomp.mode]); ++ } ++ p_ret++; ++ } ++ ++ if (p_orig->p_ed_task.p_sec.sec.filter != p_current->seccomp.filter) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected SECCOMP corruption!" ++ "process[%d | %s] has different SECCOMP filter pointer!\n", ++ task_pid_nr(p_current), ++ p_current->comm); ++ p_print_log(P_LKRG_INFO, "Original[0x%lx] vs current[0x%lx]\n", ++ (unsigned long)p_orig->p_ed_task.p_sec.sec.filter, ++ (unsigned long)p_current->seccomp.filter); ++ p_ret++; ++ } ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ P_SYM(p_put_seccomp_filter)(p_current->seccomp.filter); ++#else ++ P_SYM(p_put_seccomp_filter)(p_current); ++#endif ++ } ++ ++ /* Release reference to cred */ ++ put_cred(p_current_cred); ++ /* Release reference to real_cred */ ++ put_cred(p_current_real_cred); ++ ++ return p_ret; ++} ++ ++int p_validate_task_f(void *p_arg) { ++ ++ int p_ret = P_LKRG_SUCCESS; ++ struct p_ed_process *p_tmp; ++ struct task_struct *p_task = (struct task_struct *)p_arg; ++ ++ rcu_read_lock(); ++ get_task_struct(p_task); ++ ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(p_task))) == NULL) { ++ // This process is not on the list! ++ if (p_task->state != TASK_DEAD) { ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_INFO, ++ " Can't find process[%d |%s] in internal tracking list!\n", ++ task_pid_nr(p_task), p_task->comm); ++ } ++ goto p_validate_task_out; ++ } ++ ++ if (p_cmp_tasks(p_tmp, p_task, 0x1)) { ++ // kill this process! ++ p_ed_kill_task_by_task(p_task); ++ } ++ ++p_validate_task_out: ++ ++ put_task_struct(p_task); ++ rcu_read_unlock(); ++ ++ return p_ret; ++} ++ ++#ifdef CONFIG_SECURITY_SELINUX ++static void p_validate_selinux(void) { ++ unsigned long p_flags; ++ ++ do { ++ p_lkrg_counter_lock_lock(&p_ed_guard_globals.p_selinux_lock, &p_flags); ++ if (p_lkrg_counter_lock_val_read(&p_ed_guard_globals.p_selinux_lock)) { ++ p_lkrg_counter_lock_unlock(&p_ed_guard_globals.p_selinux_lock, &p_flags); ++ schedule(); ++ continue; ++ } else { ++ break; ++ } ++ } while(1); ++ ++#if (!defined(RHEL_RELEASE_CODE) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3)) ++ if (p_ed_guard_globals.p_selinux.p_selinux_enabled != *P_SYM(p_selinux_enabled)) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected data corruption against SELINUX! 'selinux_enabled' has " ++ "different value [%d vs %d] than expected. Original value will be restored!\n", ++ *P_SYM(p_selinux_enabled),p_ed_guard_globals.p_selinux.p_selinux_enabled); ++ *P_SYM(p_selinux_enabled) = p_ed_guard_globals.p_selinux.p_selinux_enabled; ++ } ++#endif ++ ++#ifdef P_SELINUX_VERIFY ++ if (p_selinux_state_changed()) { ++ p_print_log(P_LKRG_CRIT, ++ " Detected data corruption against SELINUX! 'selinux_state->enforcing' has " ++ "different value [%d vs %d] than expected!\n", ++ p_selinux_state_enforcing(), p_ed_guard_globals.p_selinux.p_selinux_enforcing); ++ ++ switch (P_CTRL(p_kint_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "SELinux Integrity verification failed! Killing the kernel...\n"); ++ break; ++ ++ case 1: ++ p_print_log(P_LKRG_CRIT, "Original value will be restored!\n"); ++ p_selinux_state_restore(); ++ break; ++ ++ case 0: ++ p_print_log(P_LKRG_CRIT, "Accepting new state!\n"); ++ p_selinux_state_update(); ++ break; ++ ++ } ++ ++ } ++#endif ++ p_lkrg_counter_lock_unlock(&p_ed_guard_globals.p_selinux_lock, &p_flags); ++} ++#endif ++ ++static void p_ed_wq_valid_work(struct work_struct *p_work) { ++ ++#ifdef CONFIG_SECURITY_SELINUX ++ // SELinux ++ p_validate_selinux(); ++#endif ++ ++ /* Free the worker struct */ ++ if (p_work) { ++ p_ed_free_valid(p_work); ++ } ++} ++ ++static void p_ed_validate_globals(void) { ++ ++ struct work_struct *p_worker; ++ ++ if (P_CTRL(p_kint_validate)) { ++ ++ /* Validate globals... */ ++ // ... ++ ++ /* Prepare for validation which requires 'sleeping' */ ++ while ( (p_worker = p_ed_alloc_valid()) == NULL); // Should never be NULL ++ INIT_WORK(p_worker, p_ed_wq_valid_work); ++ /* schedule for execution */ ++ queue_work(system_unbound_wq, p_worker); ++ ++ } ++} ++ ++void p_ed_validate_current(void) { ++ ++ if (!P_CTRL(p_pint_validate)) ++ return; ++ ++ if (p_is_ed_task(current)) { ++ p_validate_task_f(current); ++ } ++} ++ ++void p_ed_enforce_validation(void) { ++ ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(0); ++ ++ switch (P_CTRL(p_pint_validate)) { ++ ++ case 3: ++ p_ed_enforce_validation_paranoid(); ++ break; ++ ++ case 2: ++ case 1: ++ p_tasks_read_lock(&p_flags); ++ if (p_is_ed_task(current)) { ++ p_validate_task_f(current); ++ } ++ p_tasks_read_unlock(&p_flags); ++ break; ++ ++ case 0: ++ break; ++ } ++ ++ /* Validate critical globals */ ++// p_ed_validate_globals(); ++} ++ ++unsigned int p_ed_enforce_validation_paranoid(void) { ++ ++ unsigned int p_ret = 0; ++ ++ p_ed_pcfi_cpu(0); ++ ++ if (!P_CTRL(p_pint_validate)) ++ goto p_ed_enforce_validation_paranoid_globals; ++ ++ /* Validate processes and threads */ ++ p_ret = p_iterate_lkrg_tasks_paranoid(); ++ ++p_ed_enforce_validation_paranoid_globals: ++ ++ /* Validate critical globals */ ++ p_ed_validate_globals(); ++ ++ return p_ret; ++} ++ ++static void p_ed_pcfi_cache_zero(void *p_arg) { ++ ++ unsigned long *p_page = p_arg; ++ ++ memset(p_page, 0, P_PCFI_STACK_BUF); ++} ++ ++static int p_ed_pcfi_cache_init(void) { ++ ++ int p_ret = P_LKRG_SUCCESS; ++ ++ if ( (p_ed_pcfi_cache = kmem_cache_create("p_ed_pcfi_cache", P_PCFI_STACK_BUF, ++ 0, SLAB_HWCACHE_ALIGN, p_ed_pcfi_cache_zero)) == NULL) { ++ p_print_log(P_LKRG_ERR, "kmem_cache_create() for exploit detection pCFI error! :(\n"); ++ p_ret = -ENOMEM; ++ } ++ ++ return p_ret; ++} ++ ++static void p_ed_pcfi_cache_delete(void) { ++ ++ if (p_ed_pcfi_cache) { ++ kmem_cache_destroy(p_ed_pcfi_cache); ++ p_ed_pcfi_cache = NULL; ++ } ++} ++ ++static inline int p_is_obj_on_stack(struct task_struct *p_task, const void *p_addr) { ++ ++ void *p_stack = p_task->stack; //task_stack_page(p_task); ++ ++ return (p_addr >= p_stack) && (p_addr < (p_stack + THREAD_SIZE)); ++} ++ ++int p_ed_enforce_pcfi(struct task_struct *p_task, struct p_ed_process *p_orig, struct pt_regs *p_regs) { ++ ++ unsigned int i = 0; ++// unsigned long p_flags; ++ struct stack_trace p_trace; ++ const void *p_fp = (const void *)p_regs_get_fp(p_regs); ++#ifdef CONFIG_X86 ++#if defined(CONFIG_UNWINDER_ORC) ++ struct unwind_state p_state; ++#else ++ struct stack_frame p_frame; ++#endif ++#elif defined(CONFIG_ARM64) ++ struct stackframe p_frame; ++#elif defined(CONFIG_ARM) ++ struct stackframe p_frame; ++ const void *p_sp = (const void *)thread_saved_sp(p_task); ++ unsigned long p_high, p_low; ++#endif ++ unsigned int p_offset = 1; ++ char p_sym1[KSYM_SYMBOL_LEN]; ++ char p_not_valid = 0; ++ ++ if (p_ed_pcfi_validate_sp(p_task,p_orig,p_regs_get_sp(p_regs))) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ p_task->comm,task_pid_nr(p_task)); ++ // kill this process! ++ p_pcfi_kill_task_by_task(p_task); ++ p_not_valid = 1; ++ goto p_ed_enforce_pcfi_out; ++ } ++ ++ if (!(P_CTRL(p_pcfi_validate) & 0x2)) { ++ goto p_ed_enforce_pcfi_out; ++ } ++ ++ if (p_orig) { ++ if (p_task != p_orig->p_ed_task.p_task) { ++ p_print_log(P_LKRG_WARN, ++ " [pCFI] Potential kretprobe glitch detected for process[%s] vs orig[%s]\n", ++ p_task->comm, ++ p_orig->p_ed_task.p_comm); ++ p_print_log(P_LKRG_INFO, ++ "process[0x%lx | %d | %s] vs orig[0x%lx | %d | %s]\n", ++ (unsigned long)p_task, ++ task_pid_nr(p_task), ++ p_task->comm, ++ (unsigned long)p_orig->p_ed_task.p_task, ++ p_orig->p_ed_task.p_pid, ++ p_orig->p_ed_task.p_comm); ++ goto p_ed_enforce_pcfi_out; ++ } ++ } ++ ++ if (!p_is_obj_on_stack(p_task, p_fp)) { ++ p_debug_log(P_LKRG_WARN, ++ "Frame pointer is NOT on the stack - CFI is not enforced :(\n"); ++ goto p_ed_enforce_pcfi_out; ++ } ++ ++ while ( (p_trace.entries = p_ed_pcfi_alloc()) == NULL); // Should never be NULL ++ ++ /* Current logic always call this function with IRQ disabled */ ++// local_irq_save(p_flags); ++ ++ p_trace.max_entries = P_PCFI_STACK_BUF/sizeof(p_trace.entries[0]); ++ p_trace.nr_entries = 0; ++ ++ ++#if defined(CONFIG_X86) ++ ++#if defined(CONFIG_UNWINDER_ORC) ++ ++ if (p_trace.nr_entries < p_trace.max_entries) ++ p_trace.entries[p_trace.nr_entries++] = p_regs_get_ip(p_regs); ++ ++ do { ++ unsigned long p_addr; ++ ++ for (unwind_start(&p_state, p_task, p_regs, NULL); ++ !unwind_done(&p_state); unwind_next_frame(&p_state)) { ++ p_addr = unwind_get_return_address(&p_state); ++ if (!p_addr) ++ break; ++ p_trace.entries[p_trace.nr_entries++] = p_addr; ++ } ++ } while(0); ++ ++#else ++ ++ if (p_trace.nr_entries < p_trace.max_entries) ++ p_trace.entries[p_trace.nr_entries++] = p_regs_get_ip(p_regs); ++ ++ while (p_trace.nr_entries < p_trace.max_entries) { ++ p_frame.next_frame = NULL; ++ p_frame.return_address = 0; ++ ++ if ((unsigned long)p_fp < p_regs_get_sp(p_regs) || !p_is_obj_on_stack(p_task, p_fp)) ++ break; ++ ++ memcpy(&p_frame, p_fp, sizeof(struct stack_frame)); ++ if (p_frame.return_address) { ++ p_trace.entries[p_trace.nr_entries++] = p_frame.return_address; ++ } ++ ++ if (p_fp == p_frame.next_frame) ++ break; ++ ++ p_fp = p_frame.next_frame; ++ } ++ ++#endif ++ ++#elif defined(CONFIG_ARM) ++ ++ p_frame.fp = (unsigned long)p_fp; ++ p_frame.sp = (unsigned long)p_sp; ++ p_frame.pc = instruction_pointer(p_regs); ++ ++ if (current != p_task) { ++ if (p_trace.nr_entries < p_trace.max_entries) { ++ if (p_frame.pc) { ++ p_trace.entries[p_trace.nr_entries++] = p_frame.pc; ++ } ++ } ++ } ++ ++ while (p_trace.nr_entries < p_trace.max_entries) { ++ p_fp = (void *)p_frame.fp; ++ p_low = p_frame.sp; ++ p_high = ALIGN(p_low, THREAD_SIZE); ++ ++/* ++ if ((unsigned long)p_fp < p_regs_get_sp(p_regs) || ++ !p_is_obj_on_stack(current, p_fp) || ++ (unsigned long)(p_fp) & 0xf) ++ break; ++*/ ++ ++ if ((unsigned long)p_fp < p_low + 12 || (unsigned long)p_fp > p_high - 4) ++ break; ++ ++ p_frame.fp = *(unsigned long *)(p_fp - 12); ++ p_frame.sp = *(unsigned long *)(p_fp - 8); ++ p_frame.pc = *(unsigned long *)(p_fp - 4); ++ ++ if (p_frame.pc) { ++ p_trace.entries[p_trace.nr_entries++] = p_frame.pc; ++ } ++ ++ if (!p_frame.fp && !p_frame.pc) ++ break; ++ } ++ ++#elif defined(CONFIG_ARM64) ++ ++ p_frame.fp = (unsigned long)p_fp; ++ p_frame.pc = p_regs_get_ip(p_regs); ++ ++ if (p_trace.nr_entries < p_trace.max_entries) { ++ if (p_frame.pc) { ++ p_trace.entries[p_trace.nr_entries++] = p_frame.pc; ++ } ++ } ++ ++ while (p_trace.nr_entries < p_trace.max_entries) { ++ p_fp = (void *)p_frame.fp; ++ ++ if ((unsigned long)p_fp < p_regs_get_sp(p_regs) || ++ !p_is_obj_on_stack(current, p_fp) || ++ (unsigned long)(p_fp) & 0xf) ++ break; ++ ++ p_frame.fp = *(unsigned long *)(p_fp); ++ p_frame.pc = *(unsigned long *)(p_fp + 8); ++ ++ if (p_frame.pc) { ++ p_trace.entries[p_trace.nr_entries++] = p_frame.pc; ++ } ++ ++ if (!p_frame.fp && !p_frame.pc) ++ break; ++ } ++ ++#else ++ ++ goto p_ed_enforce_pcfi_unlock_out; ++ ++#endif ++ ++ if (p_trace.nr_entries) { ++ ++ //p_not_valid = 0; ++ for (i = 0; i < p_trace.nr_entries-p_offset; i++) { ++ if (!P_SYM(p_is_kernel_text_address)(p_trace.entries[i])) { ++ if (p_trace.nr_entries-p_offset > 4 && i > 4) { ++ memset(p_sym1,0,KSYM_SYMBOL_LEN); ++ sprint_symbol_no_offset(p_sym1,p_trace.entries[i-1]); ++ if (!strncmp(p_sym1,"ret_from_fork",0xd)) { ++ memset(p_sym1,0,KSYM_SYMBOL_LEN); ++ sprint_symbol_no_offset(p_sym1,p_trace.entries[i-2]); ++ if (!strncmp(p_sym1,"kthread",0x7)) { ++ continue; ++ } else { ++ p_not_valid = 1; ++ break; ++ } ++ } ++ p_not_valid = 1; ++ break; ++ } else { ++ p_not_valid = 1; ++ break; ++ } ++ } ++ } ++ } ++ ++ if (p_not_valid) { ++ p_print_log(P_LKRG_CRIT, ++ " Not valid call - pCFI violation: process[%s | %d] !!!\n", ++ p_task->comm,task_pid_nr(p_task)); ++ p_print_log(P_LKRG_CRIT, ++ " Frame[%d] nr_entries[%d]: [0x%lx]. Full Stack %s\n", ++ i,p_trace.nr_entries,p_trace.entries[i], ++#if !defined(CONFIG_STACKTRACE) ++ "not available (CONFIG_STACKTRACE not enabled)"); ++#else ++ "below:"); ++ printk(KERN_CRIT "--- . ---\n"); ++#if defined(CONFIG_ARCH_STACKWALK) || LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) ++ stack_trace_print(p_trace.entries, p_trace.nr_entries, 0); ++#else ++ print_stack_trace(&p_trace, 0); ++#endif ++ printk(KERN_CRIT "--- END ---\n"); ++#endif ++ p_pcfi_kill_task_by_task(p_task); ++ } ++ ++#if !defined(CONFIG_X86) && !defined(CONFIG_ARM64) && !defined(CONFIG_ARM) ++ ++p_ed_enforce_pcfi_unlock_out: ++ ++#endif ++ ++ /* Current logic always call this function with IRQ disabled */ ++// local_irq_restore(p_flags); ++ ++ p_ed_pcfi_free(p_trace.entries); ++ ++ ++p_ed_enforce_pcfi_out: ++ ++ return p_not_valid; ++} ++ ++int p_ed_pcfi_validate_sp(struct task_struct *p_task, struct p_ed_process *p_orig, unsigned long p_sp) { ++ ++ unsigned long p_stack = (p_orig) ? (unsigned long) p_orig->p_ed_task.p_stack : 0x0; ++ register unsigned long p_stack_offset; ++ int p_not_valid = 0; ++ ++ if (!P_CTRL(p_pcfi_validate)) { ++ return P_LKRG_SUCCESS; ++ } ++ ++ /* ++ * Validate alignment - this test should be passed even if we have a glitching problem ++ */ ++ if (unlikely((p_sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE / 16))) { ++ p_print_log(P_LKRG_CRIT, ++ " process [%s | %d] has invalidly aligned stack pointer!\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ p_print_log(P_LKRG_INFO, "[base:0x%lx c:0x%lx]\n", ++ p_stack, ++ p_sp); ++ p_not_valid = 1; ++ } ++ ++ if (!p_stack) { ++ return p_not_valid ? P_LKRG_GENERAL_ERROR : P_LKRG_SUCCESS; ++ } ++ ++ if (p_task != p_orig->p_ed_task.p_task) { ++ p_print_log(P_LKRG_WARN, ++ " [pCFI - SP] Potential kretprobe glitch detected for process[%s] vs orig[%s]\n", ++ p_task->comm, ++ p_orig->p_ed_task.p_comm); ++ p_print_log(P_LKRG_INFO, ++ "process[0x%lx | %d | %s] vs orig[0x%lx | %d | %s]\n", ++ (unsigned long)p_task, ++ task_pid_nr(p_task), ++ p_task->comm, ++ (unsigned long)p_orig->p_ed_task.p_task, ++ p_orig->p_ed_task.p_pid, ++ p_orig->p_ed_task.p_comm); ++ p_not_valid = 1; ++ } ++ ++ /* ++ * Validate stack base ++ */ ++ if (unlikely((p_sp & ~(THREAD_SIZE - 1)) != (p_stack & ~(THREAD_SIZE - 1)))) { ++ p_print_log(P_LKRG_CRIT, ++ " process [%s | %d] has invalid base for stack pointer!\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ p_print_log(P_LKRG_INFO, "[base:0x%lx c:0x%lx]\n", ++ p_stack, ++ p_sp); ++ p_not_valid = 1; ++ } ++ ++ /* ++ * Validate if stack is coming from the valid range (CONFIG_VMAP_STACK) ++ */ ++ ++ // TODO ++ ++ /* ++ * Validate current size of the stack. ++ */ ++ ++ p_stack_offset = p_sp - p_stack; ++ if (unlikely(p_stack_offset >= THREAD_SIZE)) { ++ p_print_log(P_LKRG_CRIT, ++ " process [%s | %d] has invalid stack pointer (stack size mismatch)!\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ p_print_log(P_LKRG_INFO, "[base:0x%lx c:0x%lx diff:0x%lx]\n", ++ p_stack, ++ p_sp, ++ p_stack_offset); ++ p_not_valid = 1; ++ } ++ ++ return p_not_valid ? P_LKRG_GENERAL_ERROR : P_LKRG_SUCCESS; ++} ++ ++ ++int p_exploit_detection_init(void) { ++ ++ int p_ret; ++ const struct p_functions_hooks *p_fh_it; ++ ++ p_global_off_cookie = (unsigned long)get_random_long(); ++ p_global_cnt_cookie = (unsigned long)get_random_long(); ++ ++ p_global_off_cookie |= P_NORMALIZE_LONG; ++ p_global_cnt_cookie |= P_NORMALIZE_LONG; ++ p_global_cnt_cookie &= P_MASK_COUNTER; ++ ++ if (p_ed_pcfi_cache_init()) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't initialize ED CFI cache :(\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++ if (p_ed_wq_valid_cache_init()) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't initialize ED WQ cache :(\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++ P_SYM(p_is_kernel_text_address) = (int (*)(unsigned long))P_SYM(p_kallsyms_lookup_name)("__kernel_text_address"); ++ ++ if (!P_SYM(p_is_kernel_text_address)) { ++ p_print_log(P_LKRG_ERR, ++ "[ED] ERROR: Can't find '__kernel_text_address' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++ P_SYM(p_mm_find_pmd) = (pmd_t *(*)(struct mm_struct *, unsigned long))P_SYM(p_kallsyms_lookup_name)("mm_find_pmd"); ++ ++ if (!P_SYM(p_mm_find_pmd)) { ++ p_print_log(P_LKRG_ERR, ++ "[ED] ERROR: Can't find 'mm_find_pmd' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++ P_SYM(p_get_seccomp_filter) = (void (*)(struct task_struct *))P_SYM(p_kallsyms_lookup_name)("get_seccomp_filter"); ++ ++ if (!P_SYM(p_get_seccomp_filter)) { ++ p_print_log(P_LKRG_ERR, ++ "[ED] ERROR: Can't find 'get_seccomp_filter' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ P_SYM(p_put_seccomp_filter) = (void (*)(struct seccomp_filter *))P_SYM(p_kallsyms_lookup_name)("__put_seccomp_filter"); ++#else ++ P_SYM(p_put_seccomp_filter) = (void (*)(struct task_struct *))P_SYM(p_kallsyms_lookup_name)("put_seccomp_filter"); ++#endif ++ ++ if (!P_SYM(p_put_seccomp_filter)) { ++ p_print_log(P_LKRG_ERR, ++ "[ED] ERROR: Can't find 'put_seccomp_filter' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++#ifdef CONFIG_SECURITY_SELINUX ++#if (!defined(RHEL_RELEASE_CODE) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3)) ++ P_SYM(p_selinux_enabled) = (int *)P_SYM(p_kallsyms_lookup_name)("selinux_enabled"); ++#endif ++ ++#if (!defined(RHEL_RELEASE_CODE) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3)) ++ if (!P_SYM(p_selinux_enabled)) { ++ p_print_log(P_LKRG_ERR, ++ "[ED] ERROR: Can't find 'selinux_enabled' variable :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++#endif ++ // SELinux information ++#ifdef P_SELINUX_VERIFY ++ if (p_selinux_state_init()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't initialize selinux :(\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++#elif defined(CONFIG_GCC_PLUGIN_RANDSTRUCT) ++ p_print_log(P_LKRG_ERR, "LKRG can't enforce SELinux validation " ++ "(CONFIG_GCC_PLUGIN_RANDSTRUCT detected)\n"); ++#endif ++#if (!defined(RHEL_RELEASE_CODE) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3)) ++ p_ed_guard_globals.p_selinux.p_selinux_enabled = *P_SYM(p_selinux_enabled); ++#endif ++ p_lkrg_counter_lock_init(&p_ed_guard_globals.p_selinux_lock); ++#endif ++ ++ if (p_init_rb_ed_pids()) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't initialize ED pids cache and red-black tree :(\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_exploit_detection_init_out; ++ } ++ ++ // Dump processes and threads ++ p_iterate_processes(p_dump_task_f,0x0); ++ ++ p_ret = P_LKRG_SUCCESS; ++ ++ for (p_fh_it = p_functions_hooks_array; p_fh_it->name != NULL; p_fh_it++) { ++ if (p_fh_it->install(p_fh_it->is_isra_safe)) { ++ if (!p_fh_it->p_fatal) { ++ p_print_log(P_LKRG_WARN, "%s\n", p_fh_it->p_error_message); ++ continue; ++ } ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't hook %s :(\n", p_fh_it->name); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_exploit_detection_exit(); ++ break; ++ } ++ } ++ ++p_exploit_detection_init_out: ++ ++ return p_ret; ++} ++ ++ ++void p_exploit_detection_exit(void) { ++ ++ const struct p_functions_hooks *p_fh_it; ++ ++#if !defined(P_LKRG_DEBUG_BUILD) ++ lockdep_off(); ++#endif ++ ++ for (p_fh_it = p_functions_hooks_array; p_fh_it->name != NULL; p_fh_it++) { ++ p_fh_it->uninstall(); ++ } ++ ++ mb(); ++ ++ /* Delete cache for ED wq validation */ ++ p_ed_wq_valid_cache_delete(); ++ /* Delete cache for ED CFI validation */ ++ p_ed_pcfi_cache_delete(); ++ /* Before deleting cache i should clean each entry! */ ++ p_delete_rb_ed_pids(); ++ ++#if !defined(P_LKRG_DEBUG_BUILD) ++ lockdep_on(); ++#endif ++ ++ p_print_log(P_LKRG_INFO, "kmem_cache \"p_ed_pids\" destroyed!\n"); ++} +diff --git a/security/lkrg/modules/exploit_detection/p_exploit_detection.h b/security/lkrg/modules/exploit_detection/p_exploit_detection.h +new file mode 100644 +index 000000000000..8de40484ea60 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/p_exploit_detection.h +@@ -0,0 +1,621 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Exploit detection main module ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 06.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_EXPLOIT_DETECTION_MAIN_H ++#define P_EXPLOIT_DETECTION_MAIN_H ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ ++ /* ++ * Linux kernel 4.17.xx introduced some changes which broke LKRG (and not only LKRG). ++ * More information about it can be found here: ++ * ++ * https://lists.gt.net/linux/kernel/2952784 ++ * https://github.com/torvalds/linux/commit/d5a00528b58cdb2c71206e18bd021e34c4eab878 ++ * ++ * In short, Linux kernel changed the exported names for syscalls (critical change ++ * from the LKRG perspective). Currently, one syscall is generating up to 4 stubs ++ * (depends on compilation option), e.g. sys_waitid and compat_sys_waitid can be now: ++ * ++ * 810f2080 T __x64_sys_waitid # x64 64-bit-ptregs -> C stub ++ * 810f20b0 T __ia32_sys_waitid # ia32 32-bit-ptregs -> C stub[*] ++ * 810f2470 T __ia32_compat_sys_waitid # ia32 32-bit-ptregs -> compat C stub ++ * 810f2490 T __x32_compat_sys_waitid # x32 64-bit-ptregs -> compat C stub ++ * ++ * [*] This stub is often unused - depends on the syscall ++ * ++ * Example from my Ubuntu VM (kernel 4.17.8) for sys_execve(): ++ * ++ * ffffffffa1a9b9d0 T __ia32_compat_sys_execve ++ * ffffffffa1a9ba90 T __ia32_sys_execve [*] ++ * ffffffffa1a9bb30 T __x32_compat_sys_execve ++ * ffffffffa1a9bb80 T __x64_sys_execve ++ * ++ * [*] Unused ++ * ++ * But at the same time sys_setuid() can be as follow: ++ * ++ * ffffffffa44a69d0 T __sys_setuid # NOT a syscall but some of the stubs can wrap it ++ * ffffffffa44a6ad0 T __x64_sys_setuid ++ * ffffffffa44a6af0 T __ia32_sys_setuid [*] ++ * ffffffffa452b0d0 T __x64_sys_setuid16 ++ * ffffffffa452b100 T __ia32_sys_setuid16 ++ * ++ * [*] Used ++ * ++ * CONFIG_COMPAT / CONFIG_IA32_EMULATION and CONFIG_X86_X32 is covered here. ++ * ++ */ ++ ++ #ifdef CONFIG_X86_64 ++ ++ #define P_SYSCALL_LAYOUT_4_17 ++ ++ #define P_GET_IA32_SYSCALL_NAME(x) P_IA32_SYSCALL_PREFIX(x) ++ #define P_GET_IA32_COMPAT_SYSCALL_NAME(x) P_IA32_COMPAT_SYSCALL_PREFIX(x) ++ ++ #define P_SYSCALL_PREFIX(x) P_TO_STRING(__x64_sys_ ## x) ++ #define P_IA32_SYSCALL_PREFIX(x) P_TO_STRING(__ia32_sys_ ## x) ++ #define P_IA32_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(__ia32_compat_sys_ ## x) ++ #define P_GET_SET_ID_PREFIX(x) P_TO_STRING(__sys_ ## x) ++ #define P_X32_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(__x32_compat_sys_ ## x) ++ ++ #define P_COMPAT_SYSCALL_PREFIX(x) P_IA32_COMPAT_SYSCALL_PREFIX(x) ++ #define P_NEW_COMPAT_SYSCALL_PREFIX(x) P_IA32_SYSCALL_PREFIX(x) ++ ++ #elif defined(CONFIG_X86_32) && LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0) ++ ++ #define P_SYSCALL_LAYOUT_5_7 ++ ++ /* ++ * Since Linux kernel 5.7 SYSCALL_WRAPPER's "magic" was backported to x86 (32 bits) ++ * arch as well. Let's correctly handle it here. More about this "magic" can be ++ * found here: ++ * ++ * https://lore.kernel.org/lkml/20200313195144.164260-10-brgerst@gmail.com/T/ ++ */ ++ ++ #define P_GET_IA32_SYSCALL_NAME(x) P_IA32_SYSCALL_PREFIX(x) ++ #define P_GET_IA32_COMPAT_SYSCALL_NAME(x) P_IA32_COMPAT_SYSCALL_PREFIX(x) ++ ++ #define P_SYSCALL_PREFIX(x) P_TO_STRING(__ia32_sys_ ## x) ++ #define P_IA32_SYSCALL_PREFIX(x) P_TO_STRING(__ia32_sys_ ## x) ++ #define P_IA32_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(__ia32_compat_sys_ ## x) ++ #define P_GET_SET_ID_PREFIX(x) P_TO_STRING(__sys_ ## x) ++ #define P_X32_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(__x32_compat_sys_ ## x) ++ ++ #define P_COMPAT_SYSCALL_PREFIX(x) P_IA32_COMPAT_SYSCALL_PREFIX(x) ++ #define P_NEW_COMPAT_SYSCALL_PREFIX(x) P_IA32_SYSCALL_PREFIX(x) ++ ++ #elif defined(CONFIG_ARM64) ++ ++ #define P_SYSCALL_LAYOUT_4_17 ++ ++ #define P_GET_ARM64_SYSCALL_NAME(x) P_ARM64_SYSCALL_PREFIX(x) ++ #define P_GET_ARM64_COMPAT_SYSCALL_NAME(x) P_ARM64_COMPAT_SYSCALL_PREFIX(x) ++ ++ #define P_SYSCALL_PREFIX(x) P_TO_STRING(__arm64_sys_ ## x) ++ #define P_ARM64_SYSCALL_PREFIX(x) P_ARM64_COMPAT_SYSCALL_PREFIX(x) ++ #define P_ARM64_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(__arm64_compat_sys_ ## x) ++ #define P_GET_SET_ID_PREFIX(x) P_TO_STRING(__sys_ ## x) ++ #define P_X32_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(__x32_compat_sys_ ## x) ++ ++ #define P_COMPAT_SYSCALL_PREFIX(x) P_ARM64_COMPAT_SYSCALL_PREFIX(x) ++ #define P_NEW_COMPAT_SYSCALL_PREFIX(x) P_ARM64_SYSCALL_PREFIX(x) ++ ++ #else ++ #define P_SYSCALL_PREFIX(x) P_TO_STRING(sys_ ## x) ++ #define P_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(compat_sys_ ## x) ++ #define P_GET_SET_ID_PREFIX(x) P_SYSCALL_PREFIX(x) ++ #define P_X32_COMPAT_SYSCALL_PREFIX ++ #define P_NEW_COMPAT_SYSCALL_PREFIX ++ #endif ++#else ++ #define P_SYSCALL_PREFIX(x) P_TO_STRING(sys_ ## x) ++ #define P_COMPAT_SYSCALL_PREFIX(x) P_TO_STRING(compat_sys_ ## x) ++ #define P_GET_SET_ID_PREFIX(x) P_SYSCALL_PREFIX(x) ++ #define P_X32_COMPAT_SYSCALL_PREFIX ++ #define P_NEW_COMPAT_SYSCALL_PREFIX ++#endif ++ ++#define P_TO_STRING(x) # x ++#define P_GET_SYSCALL_NAME(x) P_SYSCALL_PREFIX(x) ++#define P_GET_COMPAT_SYSCALL_NAME(x) P_COMPAT_SYSCALL_PREFIX(x) ++#define P_GET_NEW_COMPAT_SYSCALL_NAME(x) P_NEW_COMPAT_SYSCALL_PREFIX(x) ++#define P_GET_SET_ID_NAME(x) P_GET_SET_ID_PREFIX(x) ++#define P_GET_X32_SYSCALL_NAME(x) P_X32_COMPAT_SYSCALL_PREFIX(x) ++ ++#define P_ED_PROCESS_OFF_MAX 0x1000 ++ ++#define p_ed_alloc_valid() kmem_cache_alloc(p_ed_wq_valid_cache, GFP_ATOMIC) ++#define p_ed_free_valid(name) kmem_cache_free(p_ed_wq_valid_cache, (void *)(name)) ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) ++/* ++ * It's temporary fix - redefine what can't be imported. ++ * TODO: Need to research better way of inspecting SELinux variables! ++ */ ++/* Policy capabilities */ ++enum { ++ POLICYDB_CAPABILITY_NETPEER, ++ POLICYDB_CAPABILITY_OPENPERM, ++ POLICYDB_CAPABILITY_EXTSOCKCLASS, ++ POLICYDB_CAPABILITY_ALWAYSNETWORK, ++ POLICYDB_CAPABILITY_CGROUPSECLABEL, ++ POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION, ++ __POLICYDB_CAPABILITY_MAX ++}; ++ ++struct p_selinux_state { ++#if defined(CONFIG_SECURITY_SELINUX_DISABLE) || LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) ++ bool disabled; ++#endif ++#ifdef P_SELINUX_VERIFY ++ bool enforcing; ++#endif ++ bool checkreqprot; ++ bool initialized; ++ bool policycap[__POLICYDB_CAPABILITY_MAX]; ++ struct selinux_avc *avc; ++ struct selinux_ss *ss; ++}; ++#endif ++ ++#if !defined(CONFIG_STACKTRACE) || \ ++ (defined(CONFIG_STACKTRACE) && defined(CONFIG_ARCH_STACKWALK) && !defined(RHEL_RELEASE)) ++struct stack_trace { ++ unsigned int nr_entries, max_entries; ++ unsigned long *entries; ++ int skip; ++}; ++#endif ++ ++struct p_cred { ++ ++ kuid_t uid; /* real UID of the task */ ++ kgid_t gid; /* real GID of the task */ ++ kuid_t suid; /* saved UID of the task */ ++ kgid_t sgid; /* saved GID of the task */ ++ kuid_t euid; /* effective UID of the task */ ++ kgid_t egid; /* effective GID of the task */ ++ kuid_t fsuid; /* UID for VFS ops */ ++ kgid_t fsgid; /* GID for VFS ops */ ++ unsigned securebits; /* SUID-less security management */ ++ kernel_cap_t cap_inheritable; /* caps our children can inherit */ ++ kernel_cap_t cap_permitted; /* caps we're permitted */ ++ kernel_cap_t cap_effective; /* caps we can actually use */ ++ kernel_cap_t cap_bset; /* capability bounding set */ ++ kernel_cap_t cap_ambient; /* Ambient capability set */ ++ struct user_struct *user; /* real user ID subscription */ ++ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ ++ ++}; ++ ++struct p_seccomp { ++ ++ struct seccomp sec; ++ int flag; ++ int flag_sync_thread; ++ ++}; ++ ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ #define P_PCFI_STACK_BUF (PAGE_SIZE >> 2) ++#else ++ #define P_PCFI_STACK_BUF (PAGE_SIZE >> 1) ++#endif ++ ++#ifdef CONFIG_X86_64 ++ #define P_NORMALIZE_LONG 0x0101010101010101 ++ #define P_MASK_COUNTER 0x07FFFFFFFFFFFFFF ++#else ++ #define P_NORMALIZE_LONG 0x01010101 ++ #define P_MASK_COUNTER 0x07FFFFFF ++#endif ++ ++#ifdef P_LKRG_TASK_OFF_DEBUG ++#define P_LKRG_TASK_OFF_MAXBUF 256 ++ ++struct p_task_off_debug { ++ ++ unsigned int p_caller; ++ unsigned int p_action; ++ unsigned long p_old_off; ++ unsigned int p_debug_val; ++ /* only for override_* API */ ++ struct stack_trace p_trace; ++ char p_internal_buf[P_PCFI_STACK_BUF]; ++ ++}; ++#endif ++ ++/* X86(-64)*/ ++#if defined(CONFIG_X86) && LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) ++#ifndef CONFIG_PAX_RANDKSTACK ++ #define P_VERIFY_ADDR_LIMIT 1 ++#endif ++/* ARM(64) */ ++#elif defined(CONFIG_ARM) || (defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) ++ #define P_VERIFY_ADDR_LIMIT 2 ++#endif ++ ++struct p_ed_process_task { ++ ++ unsigned long p_off; ++ struct task_struct *p_task; ++ pid_t p_pid; ++ char p_comm[TASK_COMM_LEN+1]; ++ const struct cred *p_cred_ptr; ++ const struct cred *p_real_cred_ptr; ++ struct p_cred p_cred; ++ struct p_cred p_real_cred; ++ struct p_seccomp p_sec; ++#if defined(P_VERIFY_ADDR_LIMIT) ++ mm_segment_t p_addr_limit; ++#endif ++ struct nsproxy *p_nsproxy; ++ struct nsproxy p_ns; ++ void *p_stack; ++ unsigned int p_off_count; ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ struct p_task_off_debug p_off_debug[P_LKRG_TASK_OFF_MAXBUF]; ++ unsigned int p_off_counter; ++ int p_off_debug_cnt; ++#endif ++ ++}; ++ ++#ifdef CONFIG_SECURITY_SELINUX ++struct p_ed_guard_selinux { ++ ++#ifdef P_SELINUX_VERIFY ++ int p_selinux_enforcing; ++#endif ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) ++ int p_selinux_enabled; ++#endif ++ ++}; ++#endif ++ ++struct p_ed_global_variables { ++ ++#ifdef CONFIG_SECURITY_SELINUX ++ p_lkrg_counter_lock p_selinux_lock; ++ struct p_ed_guard_selinux p_selinux; ++#endif ++ ++}; ++ ++#include "p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.h" ++#include "syscalls/p_install.h" ++#include "syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.h" ++#include "syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.h" ++#include "syscalls/p_call_usermodehelper/p_usermode_kernel_dep.h" ++#include "syscalls/p_call_usermodehelper/p_call_usermodehelper.h" ++#include "syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.h" ++#include "syscalls/p_wake_up_new_task/p_wake_up_new_task.h" ++#include "syscalls/p_do_exit/p_do_exit.h" ++#include "syscalls/p_sys_setuid/p_sys_setuid.h" ++#include "syscalls/p_sys_setreuid/p_sys_setreuid.h" ++#include "syscalls/p_sys_setresuid/p_sys_setresuid.h" ++#include "syscalls/p_sys_setfsuid/p_sys_setfsuid.h" ++#include "syscalls/p_sys_setgid/p_sys_setgid.h" ++#include "syscalls/p_sys_setregid/p_sys_setregid.h" ++#include "syscalls/p_sys_setresgid/p_sys_setresgid.h" ++#include "syscalls/p_sys_setfsgid/p_sys_setfsgid.h" ++#include "syscalls/p_set_current_groups/p_set_current_groups.h" ++#include "syscalls/p_generic_permission/p_generic_permission.h" ++#include "syscalls/p_sel_write_enforce/p_sel_write_enforce.h" ++#include "syscalls/p_seccomp/p_seccomp.h" ++#include "syscalls/p_sys_unshare/p_sys_unshare.h" ++#include "syscalls/caps/p_sys_capset/p_sys_capset.h" ++#include "syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.h" ++#include "syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.h" ++#include "syscalls/keyring/p_sys_add_key/p_sys_add_key.h" ++#include "syscalls/keyring/p_sys_request_key/p_sys_request_key.h" ++#include "syscalls/keyring/p_sys_keyctl/p_sys_keyctl.h" ++#include "syscalls/p_security_ptrace_access/p_security_ptrace_access.h" ++#include "syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.h" ++#include "syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.h" ++#include "syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.h" ++#include "syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.h" ++#include "syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.h" ++/* Override creds */ ++#include "syscalls/override/p_override_creds/p_override_creds.h" ++#include "syscalls/override/p_revert_creds/p_revert_creds.h" ++/* Namespaces */ ++#include "syscalls/p_sys_setns/p_sys_setns.h" ++/* OverlayFS */ ++#include "syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.h" ++/* pCFI */ ++#include "syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.h" ++#include "syscalls/pCFI/p_schedule/p_schedule.h" ++#include "syscalls/pCFI/p___queue_work/p___queue_work.h" ++#include "syscalls/pCFI/p_lookup_fast/p_lookup_fast.h" ++#include "syscalls/p_capable/p_capable.h" ++#include "syscalls/p_scm_send/p_scm_send.h" ++ ++extern struct p_ed_global_variables p_ed_guard_globals; ++extern unsigned long p_pcfi_CPU_flags; ++ ++//unsigned int p_iterate_processes(int (*p_func)(void *), char p_ver); ++int p_print_task_f(void *p_arg); ++int p_dump_task_f(void *p_arg); ++int p_remove_task_pid_f(pid_t p_arg); ++ ++void p_verify_addr_limit(struct p_ed_process *p_orig, struct task_struct *p_current); ++void p_update_ed_process(struct p_ed_process *p_source, struct task_struct *p_task, char p_stack); ++void p_set_ed_process_on(struct p_ed_process *p_source); ++void p_set_ed_process_off(struct p_ed_process *p_source); ++void p_ed_is_off_off_wrap(struct p_ed_process *p_source); ++void p_ed_validate_off_flag_wrap(struct p_ed_process *p_source); ++/* For override */ ++void p_set_ed_process_override_on(struct p_ed_process *p_source); ++void p_set_ed_process_override_off(struct p_ed_process *p_source); ++void p_reset_ed_flags(struct p_ed_process *p_source); ++/* For OverlayFS */ ++int p_verify_ovl_create_or_link(struct p_ed_process *p_source); ++ ++int p_validate_task_f(void *p_arg); ++ ++//void p_ed_pcfi_cpu(unsigned char p_kill); ++void p_ed_validate_current(void); ++void p_ed_enforce_validation(void); ++unsigned int p_ed_enforce_validation_paranoid(void); ++int p_ed_enforce_pcfi(struct task_struct *p_task, struct p_ed_process *p_orig, struct pt_regs *p_regs); ++int p_ed_pcfi_validate_sp(struct task_struct *p_task, struct p_ed_process *p_orig, unsigned long p_sp); ++ ++int p_exploit_detection_init(void); ++void p_exploit_detection_exit(void); ++ ++int p_selinux_state_init(void); ++void p_selinux_state_restore(void); ++void p_selinux_state_update(void); ++int p_selinux_state_changed(void); ++int p_selinux_state_enforcing(void); ++ ++#ifdef P_LKRG_TASK_OFF_DEBUG ++void p_debug_off_flag_off(struct p_ed_process *p_source, unsigned int p_id); ++void p_debug_off_flag_on(struct p_ed_process *p_source, unsigned int p_id); ++void p_debug_off_flag_reset(struct p_ed_process *p_source, unsigned int p_id); ++void p_debug_off_flag_override_off(struct p_ed_process *p_source, unsigned int p_id, struct pt_regs *p_regs); ++void p_debug_off_flag_override_on(struct p_ed_process *p_source, unsigned int p_id, struct pt_regs *p_regs); ++#endif ++ ++#ifdef P_LKRG_CUSTOM_GET_RANDOM_LONG ++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], p_get_random_int_hash); ++ ++static inline unsigned long get_random_long(void) { ++ ++ __u32 *p_hash; ++ __u32 p_random_int_secret; ++ unsigned long p_ret; ++ ++ if (arch_get_random_long(&p_ret)) ++ return p_ret; ++ ++ get_random_bytes(&p_random_int_secret, sizeof(p_random_int_secret)); ++ p_hash = get_cpu_var(p_get_random_int_hash); ++ ++ p_hash[0] += current->pid + jiffies + get_cycles(); ++ md5_transform(p_hash, &p_random_int_secret); ++ p_ret = *(unsigned long *)p_hash; ++ put_cpu_var(p_get_random_int_hash); ++ ++ return p_ret; ++} ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0) ++#define p_force_sig(sig) force_sig((sig)) ++#else ++#define p_force_sig(sig) force_sig((sig), current) ++#endif ++ ++/* Task integrity lock API */ ++static inline void p_tasks_read_lock_raw(rwlock_t *p_arg) { ++ ++ read_lock(p_arg); ++} ++ ++static inline void p_tasks_read_unlock_raw(rwlock_t *p_arg) { ++ ++ read_unlock(p_arg); ++} ++ ++static inline void p_tasks_write_lock_noirq(rwlock_t *p_arg) { ++ ++ write_lock(p_arg); ++} ++ ++static inline void p_tasks_write_unlock_noirq(rwlock_t *p_arg) { ++ ++ write_unlock(p_arg); ++} ++ ++static inline void p_tasks_read_lock(unsigned long *p_flags) { ++ ++ read_lock(p_rb_hash_lock_lookup(task_pid_nr(current))); ++} ++ ++static inline int p_tasks_read_trylock(unsigned long *p_flags) { ++ ++// local_irq_save(*p_flags); ++ return read_trylock(p_rb_hash_lock_lookup(task_pid_nr(current))) ? 1 : ({ /* local_irq_restore(*p_flags); */ 0; }); ++} ++ ++static inline void p_tasks_read_unlock(unsigned long *p_flags) { ++ ++ read_unlock(p_rb_hash_lock_lookup(task_pid_nr(current))); ++} ++ ++static inline void p_tasks_write_lock(unsigned long *p_flags) { ++ ++ write_lock_irqsave(p_rb_hash_lock_lookup(task_pid_nr(current)), *p_flags); ++} ++ ++static inline int p_tasks_write_trylock(unsigned long *p_flags) { ++ ++ return write_trylock_irqsave(p_rb_hash_lock_lookup(task_pid_nr(current)), *p_flags); ++} ++ ++static inline void p_tasks_write_unlock(unsigned long *p_flags) { ++ ++ write_unlock_irqrestore(p_rb_hash_lock_lookup(task_pid_nr(current)), *p_flags); ++} ++ ++static inline void p_tasks_write_lock_by_pid(pid_t p_arg, unsigned long *p_flags) { ++ ++ write_lock_irqsave(p_rb_hash_lock_lookup(p_arg), *p_flags); ++} ++ ++static inline void p_tasks_write_unlock_by_pid(pid_t p_arg, unsigned long *p_flags) { ++ ++ write_unlock_irqrestore(p_rb_hash_lock_lookup(p_arg), *p_flags); ++} ++/* End */ ++ ++static inline unsigned int p_is_ed_task(struct task_struct *p_task) { ++ ++ return p_task->mm && !is_global_init(p_task); ++} ++ ++static inline int p_kill_task_by_task(struct task_struct *p_task) { ++ ++ p_print_log(P_LKRG_CRIT, ++ " Trying to kill process[%s | %d]!\n", ++ p_task->comm,task_pid_nr(p_task)); ++ ++ return send_sig_info(SIGKILL, SEND_SIG_PRIV, p_task); ++ ++// do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p_task, true); ++ ++} ++ ++static inline int p_kill_task_by_pid(pid_t p_pid) { ++ ++ return p_kill_task_by_task(pid_task(find_vpid(p_pid), PIDTYPE_PID)); ++ ++// do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p_task, true); ++ ++} ++ ++static inline int p_ed_kill_task_by_task(struct task_struct *p_task) { ++ ++ switch (P_CTRL(p_pint_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "Process[%s | %d] Integrity verification failed! Killing the kernel...\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ break; ++ ++ /* Kill task */ ++ case 1: ++ return p_kill_task_by_task(p_task); ++ break; ++ ++ /* Log and accept */ ++ case 0: ++ do { ++ struct p_ed_process *p_tmp; ++ ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ p_update_ed_process(p_tmp, p_task, 1); ++ } else { ++ p_print_log(P_LKRG_ERR, "ERROR: Can't accept changes for the corrupted process[%s | %d]\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ } ++ } while(0); ++ break; ++ ++ } ++ ++ return 0; ++} ++ ++static inline int p_pcfi_kill_task_by_task(struct task_struct *p_task) { ++ ++ switch (P_CTRL(p_pcfi_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "Process[%s | %d] pCFI verification failed! Killing the kernel...\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ break; ++ ++ /* Kill task */ ++ case 1: ++ return p_kill_task_by_task(p_task); ++ break; ++ ++ /* Log */ ++ case 0: ++ p_print_log(P_LKRG_CRIT, "Process[%s | %d] pCFI verification failed!\n", ++ p_task->comm, ++ task_pid_nr(p_task)); ++ break; ++ ++ } ++ ++ ++ return 0; ++} ++ ++/* ++ * First CPU specific code ++ */ ++#ifdef CONFIG_X86 ++ #include "arch/x86/p_ed_x86_arch.h" ++#elif defined(CONFIG_ARM) ++ #include "arch/arm/p_ed_arm_arch.h" ++#elif defined(CONFIG_ARM64) ++ #include "arch/arm64/p_ed_arm64_arch.h" ++#else ++ #error "!!! UNSUPPORTED ARCHITECTURE !!!" ++#endif ++ ++static inline unsigned int p_ed_pcfi_cpu(unsigned char p_kill) { ++ ++#ifdef CONFIG_X86 ++ unsigned int p_ret; ++ ++ p_ret = p_ed_pcfi_x86_validate_wp(p_kill); ++ p_ret += p_ed_pcfi_x86_validate_smXp(p_kill); ++ ++ return p_ret; ++ ++#elif defined(CONFIG_ARM) ++ ++ return 0; ++ ++#elif defined(CONFIG_ARM64) ++ ++ return 0; ++ ++#endif ++ ++} ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.c b/security/lkrg/modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.c +new file mode 100644 +index 000000000000..b147148d3fd3 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.c +@@ -0,0 +1,125 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Red-black tree for keeping track of usermode processes ++ * ++ * Notes: ++ * - Makes sense with own kmem_cache_* allocation ++ * ++ * Timeline: ++ * - Created: 07.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++struct kmem_cache *p_ed_pids_cache = NULL; ++struct p_tasks_root p_rb_hash[RB_HASH_SIZE] __attribute__ ((aligned(L1_CACHE_BYTES))); ++ ++struct p_ed_process *p_rb_find_ed_pid(struct rb_root *p_root, pid_t p_arg) { ++ ++ struct rb_node *p_node = p_root->rb_node; ++ struct p_ed_process *p_struct = NULL; ++ ++ while(p_node) { ++ p_struct = rb_entry(p_node, struct p_ed_process, p_rb); ++ ++ if (p_arg < p_struct->p_ed_task.p_pid) { ++ p_node = p_node->rb_left; ++ } else if (p_arg > p_struct->p_ed_task.p_pid) { ++ p_node = p_node->rb_right; ++ } else { ++ return p_struct; ++ } ++ } ++ ++ return NULL; ++} ++ ++ ++struct p_ed_process *p_rb_add_ed_pid(struct rb_root *p_root, pid_t p_arg, struct p_ed_process *p_source) { ++ ++ struct rb_node **p_node = &p_root->rb_node; ++ struct rb_node *p_parent = NULL; ++ struct p_ed_process *p_struct; ++ ++ while(*p_node) { ++ p_parent = *p_node; ++ p_struct = rb_entry(p_parent, struct p_ed_process, p_rb); ++ ++ if (p_arg < p_struct->p_ed_task.p_pid) { ++ p_node = &(*p_node)->rb_left; ++ } else if (p_arg > p_struct->p_ed_task.p_pid) { ++ p_node = &(*p_node)->rb_right; ++ } else { ++ return p_struct; ++ } ++ ++ } ++ rb_link_node(&p_source->p_rb, p_parent, p_node); // Insert this new node as a red leaf ++ rb_insert_color(&p_source->p_rb, p_root); // Rebalance the tree, finish inserting ++ ++ return NULL; ++} ++ ++ ++void p_rb_del_ed_pid(struct rb_root *p_root, struct p_ed_process *p_source) { ++ ++ rb_erase(&p_source->p_rb, p_root); // Erase the node ++ p_reset_ed_flags(p_source); ++ p_free_ed_pids(p_source); // Free the memory ++} ++ ++static void p_ed_pids_cache_init(void *p_arg) { ++ ++ struct p_ed_process *p_struct = p_arg; ++ ++ memset(p_struct, 0, sizeof(struct p_ed_process)); ++} ++ ++int p_init_rb_ed_pids(void) { ++ ++ unsigned int i; ++ ++ for (i=0; i < RB_HASH_SIZE; i++) { ++ p_rb_hash[i].p_tree.tree = RB_ROOT; ++ rwlock_init(&p_rb_hash[i].p_lock.lock); ++ } ++ ++ if ( (p_ed_pids_cache = kmem_cache_create("p_ed_pids", sizeof(struct p_ed_process), ++ 0, SLAB_HWCACHE_ALIGN, p_ed_pids_cache_init)) == NULL) { ++ p_print_log(P_LKRG_ERR, "kmem_cache_create() for ED PIDs error! :(\n"); ++ return -ENOMEM; ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++void p_delete_rb_ed_pids(void) { ++ ++ struct rb_node *p_node; ++ struct p_ed_process *p_tmp; ++ unsigned int i; ++ ++ if (p_ed_pids_cache) { ++ for (i=0; i %d\n",p_tmp->p_ed_task.p_pid); ++ p_rb_del_ed_pid(&p_rb_hash[i].p_tree.tree, p_tmp); ++ ++ } ++ p_tasks_write_unlock_noirq(&p_rb_hash[i].p_lock.lock); ++ } ++ mb(); ++ kmem_cache_destroy(p_ed_pids_cache); ++ p_ed_pids_cache = NULL; ++ } ++ ++} +diff --git a/security/lkrg/modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.h b/security/lkrg/modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.h +new file mode 100644 +index 000000000000..33e5234bf37b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/p_rb_ed_trees/p_rb_ed_pids/p_rb_ed_pids_tree.h +@@ -0,0 +1,105 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Red-black tree for keeping track of usermode processes ++ * ++ * Notes: ++ * - Makes sense with own kmem_cache_* allocation ++ * ++ * Timeline: ++ * - Created: 07.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_RB_TREE_H ++#define P_LKRG_EXPLOIT_DETECTION_RB_TREE_H ++ ++#define RB_HASH_BITS 9 ++#define RB_HASH_SIZE (1U << RB_HASH_BITS) ++#define RB_HASH_MASK (RB_HASH_SIZE - 1) ++ ++#define RB_HASH_MULT (2 * L1_CACHE_BYTES / sizeof(p_rb_hash[0]) + 3) ++#define RB_HASH_FUNC(pid) ((uint32_t)(pid) * (uint32_t)RB_HASH_MULT) & RB_HASH_MASK ++ ++#define p_alloc_ed_pids() kmem_cache_alloc(p_ed_pids_cache, GFP_ATOMIC) ++#define p_free_ed_pids(name) kmem_cache_free(p_ed_pids_cache, (void *)(name)) ++ ++//#ifdef P_LKRG_DEBUG ++#define P_DUMP_RB_ED_PIDS_TREE \ ++do { \ ++ struct rb_node *p_node; \ ++ unsigned int i; \ ++ \ ++ for (i=0; i pid[%d] [uid[%d] gid[%d] ruid[%d] rgid[%d]\n", \ ++ rb_entry(p_node, struct p_ed_process, p_rb)->p_ed_task.p_comm, \ ++ rb_entry(p_node, struct p_ed_process, p_rb)->p_ed_task.p_pid, \ ++ p_get_uid(&rb_entry(p_node, struct p_ed_process, p_rb)->p_ed_task.p_cred.uid), \ ++ p_get_gid(&rb_entry(p_node, struct p_ed_process, p_rb)->p_ed_task.p_cred.gid), \ ++ p_get_uid(&rb_entry(p_node, struct p_ed_process, p_rb)->p_ed_task.p_real_cred.uid), \ ++ p_get_gid(&rb_entry(p_node, struct p_ed_process, p_rb)->p_ed_task.p_real_cred.gid) \ ++ ); \ ++ } \ ++ } \ ++} while(0); ++//#endif ++ ++struct p_tasks_root { ++ ++ union { ++ rwlock_t lock; ++ struct rb_root dummy; ++ } p_lock; ++ ++ union { ++ struct rb_root tree; ++ rwlock_t dummy; ++ } p_tree; ++ ++}; ++ ++struct p_ed_process { ++ ++ struct rb_node p_rb; ++ struct p_ed_process_task p_ed_task; ++ /* ... add other driver-specific fields */ ++ ++}; ++ ++extern struct kmem_cache *p_ed_pids_cache; ++extern struct p_tasks_root p_rb_hash[RB_HASH_SIZE]; ++ ++struct p_ed_process *p_rb_find_ed_pid(struct rb_root *p_root, pid_t p_arg); ++struct p_ed_process *p_rb_add_ed_pid(struct rb_root *p_root, pid_t p_arg, struct p_ed_process *p_source); ++void p_rb_del_ed_pid(struct rb_root *p_root, struct p_ed_process *p_source); ++int p_init_rb_ed_pids(void); ++void p_delete_rb_ed_pids(void); ++ ++ ++static inline struct rb_root *p_rb_hash_tree_lookup(pid_t p_pid) { ++ return &p_rb_hash[RB_HASH_FUNC(p_pid)].p_tree.tree; ++} ++ ++static inline rwlock_t *p_rb_hash_lock_lookup(pid_t p_pid) { ++ return &p_rb_hash[RB_HASH_FUNC(p_pid)].p_lock.lock; ++} ++ ++static inline struct p_ed_process *p_find_ed_by_pid(pid_t p_arg) { ++ return p_rb_find_ed_pid(p_rb_hash_tree_lookup(p_arg), p_arg); ++} ++ ++static inline void p_rb_init_ed_pid_node(struct rb_node *rb) { ++ ++ rb->__rb_parent_color = 0; ++ rb->rb_right = NULL; ++ rb->rb_left = NULL; ++ RB_CLEAR_NODE(rb); ++} ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/p_selinux_state.c b/security/lkrg/modules/exploit_detection/p_selinux_state.c +new file mode 100644 +index 000000000000..8d1d102f062e +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/p_selinux_state.c +@@ -0,0 +1,77 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - selinux function ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 17.III.2021 ++ * ++ * Author: ++ * - Mariusz Zaborski (https://oshogbo.vexillium.org/) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++#ifdef P_SELINUX_VERIFY ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) ++ ++int p_selinux_state_init(void) { ++ ++ P_SYM(p_selinux_state) = (struct p_selinux_state *)P_SYM(p_kallsyms_lookup_name)("selinux_state"); ++ ++ if (P_SYM(p_selinux_state) == NULL) ++ return P_LKRG_ERR; ++ ++ return P_LKRG_SUCCESS; ++} ++ ++void p_selinux_state_restore(void) { ++ ++ P_SYM(p_selinux_state)->enforcing = p_ed_guard_globals.p_selinux.p_selinux_enforcing; ++} ++ ++ ++int p_selinux_state_enforcing(void) { ++ ++ return P_SYM(p_selinux_state)->enforcing; ++} ++ ++ #else ++int p_selinux_state_init(void) { ++ ++ P_SYM(p_selinux_enforcing) = (int *)P_SYM(p_kallsyms_lookup_name)("selinux_enforcing"); ++ ++ if (P_SYM(p_selinux_enforcing) == NULL) ++ return P_LKRG_ERR; ++ ++ p_selinux_state_update(); ++ return P_LKRG_SUCCESS; ++} ++ ++void p_selinux_state_restore(void) { ++ ++ *P_SYM(p_selinux_enforcing) = p_ed_guard_globals.p_selinux.p_selinux_enforcing; ++} ++ ++int p_selinux_state_enforcing(void) { ++ ++ return *P_SYM(p_selinux_enforcing); ++} ++ #endif ++ ++void p_selinux_state_update(void) { ++ ++ p_ed_guard_globals.p_selinux.p_selinux_enforcing = p_selinux_state_enforcing(); ++} ++ ++int p_selinux_state_changed(void) { ++ ++ return p_selinux_state_enforcing() != p_ed_guard_globals.p_selinux.p_selinux_enforcing; ++} ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.c b/security/lkrg/modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.c +new file mode 100644 +index 000000000000..b9f423f79c83 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.c +@@ -0,0 +1,95 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept X32 keyctl syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_X86_X32 ++ ++#include "../../../../../p_lkrg_main.h" ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++ ++char p_x32_sys_keyctl_kretprobe_state = 0; ++ ++static struct kretprobe p_x32_sys_keyctl_kretprobe = { ++ .kp.symbol_name = P_GET_X32_SYSCALL_NAME(keyctl), ++ .handler = p_x32_sys_keyctl_ret, ++ .entry_handler = p_x32_sys_keyctl_entry, ++ .data_size = sizeof(struct p_x32_sys_keyctl_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_x32_sys_keyctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 4); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_x32_sys_keyctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 5); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(x32_sys_keyctl) ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.h b/security/lkrg/modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.h +new file mode 100644 +index 000000000000..f6d5f4b1a69e +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/__x32/p_x32_sys_keyctl/p_x32_sys_keyctl.h +@@ -0,0 +1,43 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept X32 keyctl syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_X86_X32 ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_X32_SYS_KEYCTL_H ++#define P_LKRG_EXPLOIT_DETECTION_X32_SYS_KEYCTL_H ++ ++/* per-instance private data */ ++struct p_x32_sys_keyctl_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_x32_sys_keyctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_x32_sys_keyctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_x32_sys_keyctl_hook(int p_isra); ++void p_uninstall_x32_sys_keyctl_hook(void); ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.c b/security/lkrg/modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.c +new file mode 100644 +index 000000000000..658778de9719 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.c +@@ -0,0 +1,88 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept cap_task_prctl function ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_cap_task_prctl_kretprobe_state = 0; ++ ++static struct kretprobe p_cap_task_prctl_kretprobe = { ++ .kp.symbol_name = "cap_task_prctl", ++ .handler = p_cap_task_prctl_ret, ++ .entry_handler = p_cap_task_prctl_entry, ++ .data_size = sizeof(struct p_cap_task_prctl_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_cap_task_prctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 6); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_cap_task_prctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ long p_ret = (long)p_regs_get_ret(p_regs); ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ret >= 0) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 7); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(cap_task_prctl) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.h b/security/lkrg/modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.h +new file mode 100644 +index 000000000000..c29a5e73c4a0 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/caps/p_cap_task_prctl/p_cap_task_prctl.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept cap_task_prctl function ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_CAP_TASK_PRCTL_H ++#define P_LKRG_EXPLOIT_DETECTION_CAP_TASK_PRCTL_H ++ ++/* per-instance private data */ ++struct p_cap_task_prctl_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_cap_task_prctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_cap_task_prctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_cap_task_prctl_hook(int p_isra); ++void p_uninstall_cap_task_prctl_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.c b/security/lkrg/modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.c +new file mode 100644 +index 000000000000..dc84f047df73 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept capset syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_sys_capset_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_capset_kretprobe = { ++ .kp.symbol_name = P_GET_SYSCALL_NAME(capset), ++ .handler = p_sys_capset_ret, ++ .entry_handler = p_sys_capset_entry, ++ .data_size = sizeof(struct p_sys_capset_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_capset_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 8); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_capset_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 9); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_capset) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.h b/security/lkrg/modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.h +new file mode 100644 +index 000000000000..3c7569b9adbd +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/caps/p_sys_capset/p_sys_capset.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept capset syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_CAPSET_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_CAPSET_H ++ ++/* per-instance private data */ ++struct p_sys_capset_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_capset_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_capset_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_capset_hook(int p_isra); ++void p_uninstall_sys_capset_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.c b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.c +new file mode 100644 +index 000000000000..2d2c1c198d03 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.c +@@ -0,0 +1,100 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_add_key syscall ++ * ++ * Notes: ++ * - COMPAT version of this syscall is only available in the kernels ++ * with a new syscall implementation (4.17+) ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_X86 ++ ++#ifdef CONFIG_COMPAT ++ ++#include "../../../../../p_lkrg_main.h" ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++ ++char p_compat_sys_add_key_kretprobe_state = 0; ++ ++static struct kretprobe p_compat_sys_add_key_kretprobe = { ++ .kp.symbol_name = P_GET_NEW_COMPAT_SYSCALL_NAME(add_key), ++ .handler = p_compat_sys_add_key_ret, ++ .entry_handler = p_compat_sys_add_key_entry, ++ .data_size = sizeof(struct p_compat_sys_add_key_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_compat_sys_add_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 10); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_compat_sys_add_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 11); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(compat_sys_add_key) ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.h b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.h +new file mode 100644 +index 000000000000..6c4dce85fdb0 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_add_key/p_compat_sys_add_key.h +@@ -0,0 +1,44 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_add_key syscall ++ * ++ * Notes: ++ * - COMPAT version of this syscall is only available in the kernels ++ * with a new syscall implementation (4.17+) ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_COMPAT ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_ADD_KEY_H ++#define P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_ADD_KEY_H ++ ++/* per-instance private data */ ++struct p_compat_sys_add_key_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_compat_sys_add_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_compat_sys_add_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_compat_sys_add_key_hook(int p_isra); ++void p_uninstall_compat_sys_add_key_hook(void); ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.c b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.c +new file mode 100644 +index 000000000000..0be31550b36a +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.c +@@ -0,0 +1,100 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_capset syscall ++ * ++ * Notes: ++ * - COMPAT version of this syscall is only available in the kernels ++ * with a new syscall implementation (4.17+) ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_X86 ++ ++#ifdef CONFIG_COMPAT ++ ++#include "../../../../../p_lkrg_main.h" ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++ ++char p_compat_sys_capset_kretprobe_state = 0; ++ ++static struct kretprobe p_compat_sys_capset_kretprobe = { ++ .kp.symbol_name = P_GET_NEW_COMPAT_SYSCALL_NAME(capset), ++ .handler = p_compat_sys_capset_ret, ++ .entry_handler = p_compat_sys_capset_entry, ++ .data_size = sizeof(struct p_compat_sys_capset_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_compat_sys_capset_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 12); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_compat_sys_capset_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 13); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(compat_sys_capset) ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.h b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.h +new file mode 100644 +index 000000000000..9d4c0b681800 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_capset/p_compat_sys_capset.h +@@ -0,0 +1,44 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_capset syscall ++ * ++ * Notes: ++ * - COMPAT version of this syscall is only available in the kernels ++ * with a new syscall implementation (4.17+) ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_COMPAT ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_CAPSET_H ++#define P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_CAPSET_H ++ ++/* per-instance private data */ ++struct p_compat_sys_capset_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_compat_sys_capset_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_compat_sys_capset_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_compat_sys_capset_hook(int p_isra); ++void p_uninstall_compat_sys_capset_hook(void); ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.c b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.c +new file mode 100644 +index 000000000000..b5ae73c3f8b4 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.c +@@ -0,0 +1,91 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_keyctl syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 17.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_COMPAT ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_compat_sys_keyctl_kretprobe_state = 0; ++ ++static struct kretprobe p_compat_sys_keyctl_kretprobe = { ++ .kp.symbol_name = P_GET_COMPAT_SYSCALL_NAME(keyctl), ++ .handler = p_compat_sys_keyctl_ret, ++ .entry_handler = p_compat_sys_keyctl_entry, ++ .data_size = sizeof(struct p_compat_sys_keyctl_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_compat_sys_keyctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 18); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_compat_sys_keyctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 19); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(compat_sys_keyctl) ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.h b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.h +new file mode 100644 +index 000000000000..555f81f61109 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_keyctl/p_compat_sys_keyctl.h +@@ -0,0 +1,39 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_keyctl syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 17.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_COMPAT ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_KEYCTL_H ++#define P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_KEYCTL_H ++ ++/* per-instance private data */ ++struct p_compat_sys_keyctl_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_compat_sys_keyctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_compat_sys_keyctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_compat_sys_keyctl_hook(int p_isra); ++void p_uninstall_compat_sys_keyctl_hook(void); ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.c b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.c +new file mode 100644 +index 000000000000..f57aa46c6174 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.c +@@ -0,0 +1,100 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_request_key syscall ++ * ++ * Notes: ++ * - COMPAT version of this syscall is only available in the kernels ++ * with a new syscall implementation (4.17+) ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_X86 ++ ++#ifdef CONFIG_COMPAT ++ ++#include "../../../../../p_lkrg_main.h" ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++ ++char p_compat_sys_request_key_kretprobe_state = 0; ++ ++static struct kretprobe p_compat_sys_request_key_kretprobe = { ++ .kp.symbol_name = P_GET_NEW_COMPAT_SYSCALL_NAME(request_key), ++ .handler = p_compat_sys_request_key_ret, ++ .entry_handler = p_compat_sys_request_key_entry, ++ .data_size = sizeof(struct p_compat_sys_request_key_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_compat_sys_request_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 20); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_compat_sys_request_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 21); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(compat_sys_request_key) ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.h b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.h +new file mode 100644 +index 000000000000..226688c06caf +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/compat/p_compat_sys_request_key/p_compat_sys_request_key.h +@@ -0,0 +1,44 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept compat_request_key syscall ++ * ++ * Notes: ++ * - COMPAT version of this syscall is only available in the kernels ++ * with a new syscall implementation (4.17+) ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 13.VIII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_COMPAT ++ ++#ifdef P_SYSCALL_LAYOUT_4_17 ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_REQUEST_KEY_H ++#define P_LKRG_EXPLOIT_DETECTION_COMPAT_SYS_REQUEST_KEY_H ++ ++/* per-instance private data */ ++struct p_compat_sys_request_key_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_compat_sys_request_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_compat_sys_request_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_compat_sys_request_key_hook(int p_isra); ++void p_uninstall_compat_sys_request_key_hook(void); ++ ++#endif ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.c b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.c +new file mode 100644 +index 000000000000..55e5039a6205 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.c +@@ -0,0 +1,109 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'search_binary_handler' syscall ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls execve, we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - 18.I.2021: Replace one 'search_binary_handler' hook with two ++ * independent one to reduce the race window while ++ * the process is not being verified ++ * - 28.XII.2020: Replace various execve syscall hooks with one hook ++ * of the function 'search_binary_handler' ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_security_bprm_committed_creds_kretprobe_state = 0; ++ ++static struct kretprobe p_security_bprm_committed_creds_kretprobe = { ++ .kp.symbol_name = "security_bprm_committed_creds", ++ .handler = p_security_bprm_committed_creds_ret, ++ .entry_handler = NULL, ++ .data_size = sizeof(struct p_security_bprm_committed_creds_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++notrace struct inode *p_get_inode_from_task(struct task_struct *p_arg) { ++ ++ struct mm_struct *p_mm; ++ struct inode *p_inode = NULL; ++ ++ if (!p_arg) { ++ return NULL; ++ } ++ ++ /* ++ * This function is called from the context of newly created ++ * Process which is intercepted by our *probes. This means ++ * Process did not take control yet. Before we finish our work ++ * Nothing bad should happen in context of parsing mm_struct. ++ * For this specific operation (reading pointer to exe_file) ++ * It is safe to not use read lock. Process can't die before it ++ * is not even taken control. ++ * Additionally, we are under IRQ disabled context and it is ++ * Not safe to take any mutex/semaphore since we can be forced ++ * to sleep. ++ * Current implementation works well! ++ */ ++// down_read(&p_arg->mm->mmap_sem); ++ ++ p_mm = p_arg->mm; ++ if (p_mm->exe_file) { ++ p_inode = p_mm->exe_file->f_inode; ++ } ++ ++// up_read(&p_arg->mm->mmap_sem); ++ ++ return p_inode; ++} ++ ++notrace int p_security_bprm_committed_creds_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++// struct inode *p_inode; ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++/* ++ p_inode = p_get_inode_from_task(current); ++ ++ p_debug_kprobe_log( ++// p_print_log(P_LKRG_CRIT, ++ "p_search_binary_handler_ret: returned value => %ld comm[%s] Pid:%d inode[%ld]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,p_inode->i_ino); ++*/ ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",task_pid_nr(current)); ++ p_update_ed_process(p_tmp, current, 1); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_reset(p_tmp, 40); ++#endif ++ p_reset_ed_flags(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++GENERATE_INSTALL_FUNC(security_bprm_committed_creds) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.h b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.h +new file mode 100644 +index 000000000000..883f4b965591 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committed_creds/p_security_bprm_committed_creds.h +@@ -0,0 +1,45 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'security_bprm_committed_creds' syscall ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls execve, we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - 18.I.2021: Replace one 'search_binary_handler' hook with two ++ * independent one to reduce the race window while ++ * the process is not being verified ++ * - 28.XII.2020: Replace various execve syscall hooks with one hook ++ * of the function 'search_binary_handler' ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SECURITY_BPRM_COMMITTED_CREDS_H ++#define P_LKRG_EXPLOIT_DETECTION_SECURITY_BPRM_COMMITTED_CREDS_H ++ ++#define P_MAX_PATH PATH_MAX + 0x20 /* For weirdos used by d_path */ ++ ++ ++/* per-instance private data */ ++struct p_security_bprm_committed_creds_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++struct inode *p_get_inode_from_task(struct task_struct *p_arg); ++ ++int p_security_bprm_committed_creds_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_security_bprm_committed_creds_hook(int p_isra); ++void p_uninstall_security_bprm_committed_creds_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.c b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.c +new file mode 100644 +index 000000000000..21baf95a3593 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.c +@@ -0,0 +1,63 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'search_binary_handler' syscall ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls execve, we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - 18.I.2021: Replace one 'search_binary_handler' hook with two ++ * independent one to reduce the race window while ++ * the process is not being verified ++ * - 28.XII.2020: Replace various execve syscall hooks with one hook ++ * of the function 'search_binary_handler' ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_security_bprm_committing_creds_kretprobe_state = 0; ++ ++static struct kretprobe p_security_bprm_committing_creds_kretprobe = { ++ .kp.symbol_name = "security_bprm_committing_creds", ++ .handler = NULL, ++ .entry_handler = p_security_bprm_committing_creds_entry, ++ .data_size = sizeof(struct p_security_bprm_committing_creds_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++notrace int p_security_bprm_committing_creds_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ p_verify_addr_limit(p_tmp, current); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 37); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++GENERATE_INSTALL_FUNC(security_bprm_committing_creds) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.h b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.h +new file mode 100644 +index 000000000000..26f581c26f77 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/exec/p_security_bprm_committing_creds/p_security_bprm_committing_creds.h +@@ -0,0 +1,39 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'security_bprm_committing_creds' syscall ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls execve, we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - 18.I.2021: Replace one 'search_binary_handler' hook with two ++ * independent one to reduce the race window while ++ * the process is not being verified ++ * - 28.XII.2020: Replace various execve syscall hooks with one hook ++ * of the function 'search_binary_handler' ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SECURITY_BPRM_COMMITTING_CREDS_H ++#define P_LKRG_EXPLOIT_DETECTION_SECURITY_BPRM_COMMITTING_CREDS_H ++ ++/* per-instance private data */ ++struct p_security_bprm_committing_creds_data { ++ ktime_t entry_stamp; ++}; ++ ++int p_security_bprm_committing_creds_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_security_bprm_committing_creds_hook(int p_isra); ++void p_uninstall_security_bprm_committing_creds_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.c b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.c +new file mode 100644 +index 000000000000..7a4e6eca92eb +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.c +@@ -0,0 +1,84 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept key_change_session_keyring function ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 12.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_key_change_session_keyring_kretprobe_state = 0; ++ ++static struct kretprobe p_key_change_session_keyring_kretprobe = { ++ .kp.symbol_name = "key_change_session_keyring", ++ .handler = p_key_change_session_keyring_ret, ++ .entry_handler = p_key_change_session_keyring_entry, ++ .data_size = sizeof(struct p_key_change_session_keyring_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_key_change_session_keyring_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 22); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_key_change_session_keyring_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // Always update information - intercepted void function ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 23); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(key_change_session_keyring) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.h b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.h +new file mode 100644 +index 000000000000..97eed2c84838 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_key_change_session_keyring/p_key_change_session_keyring.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept key_change_session_keyring function ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 12.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_KEY_CHANGE_SESSION_KEYRING_H ++#define P_LKRG_EXPLOIT_DETECTION_KEY_CHANGE_SESSION_KEYRING_H ++ ++/* per-instance private data */ ++struct p_key_change_session_keyring_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_key_change_session_keyring_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_key_change_session_keyring_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_key_change_session_keyring_hook(int p_isra); ++void p_uninstall_key_change_session_keyring_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.c b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.c +new file mode 100644 +index 000000000000..c1a48886a8d8 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept add_key syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 31.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_sys_add_key_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_add_key_kretprobe = { ++ .kp.symbol_name = P_GET_SYSCALL_NAME(add_key), ++ .handler = p_sys_add_key_ret, ++ .entry_handler = p_sys_add_key_entry, ++ .data_size = sizeof(struct p_sys_add_key_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_add_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 24); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_add_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 25); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_add_key) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.h b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.h +new file mode 100644 +index 000000000000..f6dcabb67969 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_add_key/p_sys_add_key.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept add_key syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 31.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_ADD_KEY_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_ADD_KEY_H ++ ++/* per-instance private data */ ++struct p_sys_add_key_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_add_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_add_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_add_key_hook(int p_isra); ++void p_uninstall_sys_add_key_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.c b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.c +new file mode 100644 +index 000000000000..49074a8e2349 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept keyctl syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 17.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_sys_keyctl_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_keyctl_kretprobe = { ++ .kp.symbol_name = P_GET_SYSCALL_NAME(keyctl), ++ .handler = p_sys_keyctl_ret, ++ .entry_handler = p_sys_keyctl_entry, ++ .data_size = sizeof(struct p_sys_keyctl_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_keyctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 26); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_keyctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 27); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_keyctl) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.h b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.h +new file mode 100644 +index 000000000000..d0990758e29b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_keyctl/p_sys_keyctl.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept keyctl syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 17.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_KEYCTL_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_KEYCTL_H ++ ++/* per-instance private data */ ++struct p_sys_keyctl_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_keyctl_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_keyctl_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_keyctl_hook(int p_isra); ++void p_uninstall_sys_keyctl_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.c b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.c +new file mode 100644 +index 000000000000..4ee3e1a806a1 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept request_key syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 31.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_sys_request_key_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_request_key_kretprobe = { ++ .kp.symbol_name = P_GET_SYSCALL_NAME(request_key), ++ .handler = p_sys_request_key_ret, ++ .entry_handler = p_sys_request_key_entry, ++ .data_size = sizeof(struct p_sys_request_key_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_request_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 28); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_request_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "capset returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 29); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_request_key) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.h b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.h +new file mode 100644 +index 000000000000..b4a5bafefae9 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/keyring/p_sys_request_key/p_sys_request_key.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept request_key syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - Keyring support ++ * ++ * Timeline: ++ * - Created: 31.I.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_REQUEST_KEY_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_REQUEST_KEY_H ++ ++/* per-instance private data */ ++struct p_sys_request_key_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_request_key_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_request_key_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_request_key_hook(int p_isra); ++void p_uninstall_sys_request_key_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.c b/security/lkrg/modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.c +new file mode 100644 +index 000000000000..8220b68e2c54 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.c +@@ -0,0 +1,83 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'ovl_create_or_link' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls 'ovl_create_or_link', we need to correctly handle ++ * this situation and adjust 'off' flag ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.III.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../../p_lkrg_main.h" ++ ++char p_ovl_create_or_link_kretprobe_state = 0; ++ ++static struct kretprobe p_ovl_create_or_link_kretprobe = { ++ .kp.symbol_name = "ovl_create_or_link", ++ .handler = p_ovl_create_or_link_ret, ++ .entry_handler = p_ovl_create_or_link_entry, ++ .data_size = sizeof(struct p_ovl_create_or_link_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++void p_reinit_ovl_create_or_link_kretprobe(void) { ++ ++ memset(&p_ovl_create_or_link_kretprobe,0x0,sizeof(struct kretprobe)); ++ p_ovl_create_or_link_kretprobe.kp.symbol_name = "ovl_create_or_link"; ++ p_ovl_create_or_link_kretprobe.handler = p_ovl_create_or_link_ret; ++ p_ovl_create_or_link_kretprobe.entry_handler = p_ovl_create_or_link_entry; ++ p_ovl_create_or_link_kretprobe.data_size = sizeof(struct p_ovl_create_or_link_data); ++ p_ovl_create_or_link_kretprobe.maxactive = 40; ++} ++ ++int p_ovl_create_or_link_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ return 0; ++} ++ ++ ++notrace int p_ovl_create_or_link_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ if (p_is_ed_task(current)) { ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_verify_ovl_create_or_link(p_tmp)) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_override_on(p_tmp, 30, p_regs); ++#endif ++ p_set_ed_process_override_on(p_tmp); ++ } ++ } ++ p_ed_validate_current(); ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ p_pcfi_kill_task_by_task(current); ++ } ++ p_tasks_write_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(ovl_create_or_link) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.h b/security/lkrg/modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.h +new file mode 100644 +index 000000000000..ba1d838a91ac +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/override/overlayfs/p_ovl_create_or_link/p_ovl_create_or_link.h +@@ -0,0 +1,39 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'ovl_create_or_link' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls 'ovl_create_or_link', we need to correctly handle ++ * this situation and adjust 'off' flag ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.III.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_OVL_CREATE_OR_LINK_H ++#define P_LKRG_EXPLOIT_DETECTION_OVL_CREATE_OR_LINK_H ++ ++/* per-instance private data */ ++struct p_ovl_create_or_link_data { ++ ktime_t entry_stamp; ++}; ++ ++extern char p_ovl_create_or_link_kretprobe_state; ++ ++int p_ovl_create_or_link_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_ovl_create_or_link_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_ovl_create_or_link_hook(int p_isra); ++void p_uninstall_ovl_create_or_link_hook(void); ++void p_reinit_ovl_create_or_link_kretprobe(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.c b/security/lkrg/modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.c +new file mode 100644 +index 000000000000..3dc9449e9f3d +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.c +@@ -0,0 +1,86 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'override_creds' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls 'override_creds', we need to correctly 'propagate' ++ * this information. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 10.X.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++char p_override_creds_kretprobe_state = 0; ++ ++static struct kretprobe p_override_creds_kretprobe = { ++ .kp.symbol_name = "override_creds", ++ .handler = p_override_creds_ret, ++ .entry_handler = p_override_creds_entry, ++ .data_size = sizeof(struct p_override_creds_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++notrace int p_override_creds_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_write_lock(&p_flags); ++ p_ed_validate_current(); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_override_off(p_tmp, 31, p_regs); ++#endif ++ /* ++ * Set temporary 'disable' flag: ++ * - It is possible that this flag is already set. This is not a problem because in ++ * normal scenario task that ends up being here must call 'revert_creds' before ++ * switching to user-mode. This will 'normalize' this flag to the correct state. ++ */ ++ p_set_ed_process_override_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++notrace int p_override_creds_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ p_pcfi_kill_task_by_task(current); ++ } ++ } ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(override_creds) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.h b/security/lkrg/modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.h +new file mode 100644 +index 000000000000..4d5df8ecaf56 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/override/p_override_creds/p_override_creds.h +@@ -0,0 +1,36 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'override_creds' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls 'override_creds', we need to correctly 'propagate' ++ * this information. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 10.X.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_OVERRIDE_CREDS_H ++#define P_LKRG_EXPLOIT_DETECTION_OVERRIDE_CREDS_H ++ ++/* per-instance private data */ ++struct p_override_creds_data { ++ ktime_t entry_stamp; ++}; ++ ++int p_override_creds_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_override_creds_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_override_creds_hook(int p_isra); ++void p_uninstall_override_creds_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.c b/security/lkrg/modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.c +new file mode 100644 +index 000000000000..01cdcf2f1811 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.c +@@ -0,0 +1,80 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'revert_creds' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls 'rever_creds', we need to correctly handle this situation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 10.X.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++char p_revert_creds_kretprobe_state = 0; ++ ++static struct kretprobe p_revert_creds_kretprobe = { ++ .kp.symbol_name = "revert_creds", ++ .handler = p_revert_creds_ret, ++ .entry_handler = p_revert_creds_entry, ++ .data_size = sizeof(struct p_revert_creds_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++notrace int p_revert_creds_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ if (p_is_ed_task(current)) { ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ p_debug_off_flag_override_on(p_tmp, 32, p_regs); ++ } ++ p_tasks_write_unlock(&p_flags); ++ } ++#endif ++ return 0; ++} ++ ++ ++notrace int p_revert_creds_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ if (p_is_ed_task(current)) { ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ p_set_ed_process_override_on(p_tmp); ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ p_pcfi_kill_task_by_task(current); ++ } ++ } ++ p_ed_validate_current(); ++ p_tasks_write_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(revert_creds) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.h b/security/lkrg/modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.h +new file mode 100644 +index 000000000000..8404de9c9e4e +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/override/p_revert_creds/p_revert_creds.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'revert_creds' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process calls 'rever_creds', we need to correctly handle this situation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 10.X.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_REVERT_CREDS_H ++#define P_LKRG_EXPLOIT_DETECTION_REVERT_CREDS_H ++ ++/* per-instance private data */ ++struct p_revert_creds_data { ++ ktime_t entry_stamp; ++}; ++ ++int p_revert_creds_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_revert_creds_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_revert_creds_hook(int p_isra); ++void p_uninstall_revert_creds_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.c b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.c +new file mode 100644 +index 000000000000..7596d323509e +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.c +@@ -0,0 +1,73 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '____queue_work' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection CFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.XI.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_pcfi___queue_work_kretprobe_state = 0; ++ ++static struct kretprobe p_pcfi___queue_work_kretprobe = { ++ .kp.symbol_name = "__queue_work", ++ .handler = p_pcfi___queue_work_ret, ++ .entry_handler = p_pcfi___queue_work_entry, ++ .data_size = sizeof(struct p_pcfi___queue_work_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_pcfi___queue_work_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(0); ++ ++ if (p_is_ed_task(current)) { ++ /* Do not take ED lock */ ++ if (p_tasks_read_trylock(&p_flags)) { ++ p_ed_validate_current(); ++ p_tasks_read_unlock(&p_flags); ++ } ++ } ++ ++ return 0; ++} ++ ++ ++int p_pcfi___queue_work_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(pcfi___queue_work) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.h b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.h +new file mode 100644 +index 000000000000..5e99281eab7b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p___queue_work/p___queue_work.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '__queue_work' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection pCFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.II.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_PCFI___QUEUE_WORK_H ++#define P_LKRG_EXPLOIT_DETECTION_PCFI___QUEUE_WORK_H ++ ++/* per-instance private data */ ++struct p_pcfi___queue_work_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_pcfi___queue_work_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_pcfi___queue_work_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_pcfi___queue_work_hook(int p_isra); ++void p_uninstall_pcfi___queue_work_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.c b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.c +new file mode 100644 +index 000000000000..34a713e36f27 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.c +@@ -0,0 +1,82 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'lookup_fast' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection CFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.XI.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_pcfi_lookup_fast_kretprobe_state = 0; ++ ++static struct kretprobe p_pcfi_lookup_fast_kretprobe = { ++ .kp.symbol_name = "lookup_fast", ++ .handler = p_pcfi_lookup_fast_ret, ++ .entry_handler = p_pcfi_lookup_fast_entry, ++ .data_size = sizeof(struct p_pcfi_lookup_fast_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_pcfi_lookup_fast_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(1); ++ ++// p_ed_enforce_validation(); ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_pcfi_validate_sp(current, p_tmp, p_regs_get_sp(p_regs))) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ // kill this process! ++ p_pcfi_kill_task_by_task(current); ++ } ++ } ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++int p_pcfi_lookup_fast_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(pcfi_lookup_fast) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.h b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.h +new file mode 100644 +index 000000000000..0ff90d940906 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_lookup_fast/p_lookup_fast.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'lookup_fast' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection pCFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.II.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_PCFI_LOOKUP_FAST_H ++#define P_LKRG_EXPLOIT_DETECTION_PCFI_LOOKUP_FAST_H ++ ++/* per-instance private data */ ++struct p_pcfi_lookup_fast_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_pcfi_lookup_fast_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_pcfi_lookup_fast_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_pcfi_lookup_fast_hook(int p_isra); ++void p_uninstall_pcfi_lookup_fast_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.c b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.c +new file mode 100644 +index 000000000000..2458f4d06104 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.c +@@ -0,0 +1,110 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '__mark_inode_dirty' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection CFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.XI.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_pcfi_mark_inode_dirty_kretprobe_state = 0; ++ ++static struct kretprobe p_pcfi_mark_inode_dirty_kretprobe = { ++ .kp.symbol_name = "__mark_inode_dirty", ++ .handler = p_pcfi_mark_inode_dirty_ret, ++ .entry_handler = p_pcfi_mark_inode_dirty_entry, ++ .data_size = sizeof(struct p_pcfi_mark_inode_dirty_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_pcfi_mark_inode_dirty_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(1); ++ ++// p_ed_enforce_validation(); ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ struct inode *p_inode = (struct inode *)p_regs_get_arg1(p_regs); ++#if 0 ++ struct dentry *p_dentry = NULL; ++ ++ if ( (p_dentry = d_find_alias(p_inode)) != NULL) { ++ char *p_buf; ++ if ( (p_buf = kmalloc(PAGE_SIZE << 1, GFP_ATOMIC)) != NULL) { ++ char *p_tmp = dentry_path_raw(p_dentry, p_buf, PAGE_SIZE << 1); ++ if (!IS_ERR(p_tmp)) { ++ p_print_log(P_LKRG_CRIT, " " ++ "CFI failed during access to path [%s]\n",p_tmp); ++ } else { ++ p_print_log(P_LKRG_CRIT, " " ++ "CFI failed during access to path which LKRG can't extract :(\n"); ++ } ++ kfree(p_buf); ++ } else { ++ p_print_log(P_LKRG_CRIT, " " ++ "CFI failed during access to path which LKRG can't extract :(\n"); ++ } ++ dput(p_dentry); ++ } else { ++ p_print_log(P_LKRG_CRIT, " " ++ "CFI failed during access to path which LKRG can't extract :(\n"); ++ } ++#endif ++ p_print_log(P_LKRG_CRIT, " Path's inode[%lu] mode[0%o] will be isolated!\n", ++ p_inode->i_ino,p_inode->i_mode); ++ ++ p_set_uid(&p_inode->i_uid, 65534); ++ p_set_gid(&p_inode->i_gid, 65534); ++// p_inode->i_mode = 0; ++ p_inode->i_mode = 0x0 | S_IFREG; ++ } ++ } ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++int p_pcfi_mark_inode_dirty_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(pcfi_mark_inode_dirty) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.h b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.h +new file mode 100644 +index 000000000000..aa8611fcd77c +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_mark_inode_dirty/p_mark_inode_dirty.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '__mark_inode_dirty' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection pCFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.XI.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_PCFI_MARK_INODE_DIRTY_H ++#define P_LKRG_EXPLOIT_DETECTION_PCFI_MARK_INODE_DIRTY_H ++ ++/* per-instance private data */ ++struct p_pcfi_mark_inode_dirty_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_pcfi_mark_inode_dirty_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_pcfi_mark_inode_dirty_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_pcfi_mark_inode_dirty_hook(int p_isra); ++void p_uninstall_pcfi_mark_inode_dirty_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.c b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.c +new file mode 100644 +index 000000000000..11be73f9174b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.c +@@ -0,0 +1,82 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '__schedule' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection CFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 20.XI.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../../p_lkrg_main.h" ++ ++ ++char p_pcfi_schedule_kretprobe_state = 0; ++ ++static struct kretprobe p_pcfi_schedule_kretprobe = { ++ .kp.symbol_name = "schedule", ++ .handler = p_pcfi_schedule_ret, ++ .entry_handler = p_pcfi_schedule_entry, ++ .data_size = sizeof(struct p_pcfi_schedule_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_pcfi_schedule_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(1); ++ ++// p_ed_enforce_validation(); ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ // kill this process! ++ p_pcfi_kill_task_by_task(current); ++ } ++ } ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++int p_pcfi_schedule_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(pcfi_schedule) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.h b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.h +new file mode 100644 +index 000000000000..95f928a57b6b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/pCFI/p_schedule/p_schedule.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'schedule' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection pCFI ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 19.XII.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_PCFI_SCHEDULE_H ++#define P_LKRG_EXPLOIT_DETECTION_PCFI_SCHEDULE_H ++ ++/* per-instance private data */ ++struct p_pcfi_schedule_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_pcfi_schedule_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_pcfi_schedule_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_pcfi_schedule_hook(int p_isra); ++void p_uninstall_pcfi_schedule_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.c b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.c +new file mode 100644 +index 000000000000..ac2a7377f31e +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.c +@@ -0,0 +1,159 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept *call_usermodehelper* function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When kernel calls user-mode helper, we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 12.II.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++char p_call_usermodehelper_kretprobe_state = 0; ++ ++static struct kretprobe p_call_usermodehelper_kretprobe = { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0) ++ .kp.symbol_name = "____call_usermodehelper", ++#else ++ .kp.symbol_name = "call_usermodehelper_exec_async", ++#endif ++ .handler = p_call_usermodehelper_ret, ++ .entry_handler = p_call_usermodehelper_entry, ++ .data_size = sizeof(struct p_call_usermodehelper_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++static const char * const p_umh_global[] = { ++ UMH_UNIFIFW ++ UMH_LNET_UPCALL ++ UMH_OSD_LOGIN ++ "/bin/false", ++ "/bin/true", ++ "/etc/acpi/events/RadioPower.sh", ++ "/etc/acpi/wireless-rtl-ac-dc-power.sh", ++ "/lib/systemd/systemd-cgroups-agent", ++ "/sbin/bridge-stp", ++ "/sbin/critical_overtemp", ++ "/sbin/drbdadm", ++ "/sbin/hotplug", ++ "/sbin/modprobe", ++ "/sbin/nfs_cache_getent", ++ "/sbin/nfsd-recall-failed", ++ "/sbin/nfsdcltrack", ++ "/sbin/ocfs2_hb_ctl", ++ "/sbin/pnpbios", ++ "/sbin/poweroff", ++ "/sbin/request-key", ++ "/sbin/tomoyo-init", ++ "/sbin/v86d", ++ "/system/bin/start", ++ "/usr/lib/systemd/systemd-cgroups-agent", ++ "/usr/lib/systemd/systemd-coredump", ++ "/usr/sbin/eppfpga", ++ "/usr/share/apport/apport", ++}; ++ ++int p_call_usermodehelper_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ struct subprocess_info *p_subproc = (struct subprocess_info *)p_regs_get_arg1(p_regs); ++ unsigned char p_umh_allowed = 0; ++ unsigned long p_flags; ++ size_t i; ++ ++ p_ed_enforce_validation(); ++ ++ if (!P_CTRL(p_umh_validate)) { ++ goto p_call_usermodehelper_entry_out; ++ } else if (P_CTRL(p_umh_validate) == 2) { ++ goto p_call_usermodehelper_entry_not_allowed; ++ } ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ for (i = 0; i < nitems(p_umh_global); i++) { ++ if (!strcmp(p_umh_global[i], p_subproc->path)) { ++ p_umh_allowed = 1; ++ break; ++ } ++ } ++ ++#if defined(CONFIG_STATIC_USERMODEHELPER) ++ if (!strcmp(CONFIG_STATIC_USERMODEHELPER_PATH,p_subproc->path)) { ++ p_umh_allowed = 1; ++ } ++#endif ++ ++ if (!p_umh_allowed) { ++p_call_usermodehelper_entry_not_allowed: ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) && LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0) \ ++ && !(defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 3)) ++ if (!strcmp("none",p_subproc->path) && p_subproc->file) { ++ p_print_log(P_LKRG_ERR, ++ " UMH is executing file from memory...\n"); ++ } else { ++#endif ++ switch (P_CTRL(p_umh_enforce)) { ++ ++ /* Panic */ ++ case 2: ++ panic(P_LKRG_SIGNATURE ++ "Blocked usermodehelper execution of [%s]\n", ++ p_subproc->path); ++ break; ++ ++ /* Prevent execution */ ++ case 1: ++ p_print_log(P_LKRG_CRIT, ++ "Blocked usermodehelper execution of [%s]\n", ++ p_subproc->path); ++ p_force_sig(SIGKILL); ++ break; ++ ++ /* Log only */ ++ case 0: ++ p_print_log(P_LKRG_CRIT, ++ "Detected usermodehelper execution of [%s]\n", ++ p_subproc->path); ++ break; ++ ++ } ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) && LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0) \ ++ && !(defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 3)) ++ } ++#endif ++ } ++ ++p_call_usermodehelper_entry_out: ++ ++ return 0; ++} ++ ++ ++int p_call_usermodehelper_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(call_usermodehelper) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.h b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.h +new file mode 100644 +index 000000000000..c1921c2c3e21 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_call_usermodehelper.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept *call_usermodehelper* function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When kernel calls user-mode helper, we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 12.II.2018 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_CALL_USERMODEHELPER_H ++#define P_LKRG_EXPLOIT_DETECTION_CALL_USERMODEHELPER_H ++ ++/* per-instance private data */ ++struct p_call_usermodehelper_data { ++ ktime_t entry_stamp; ++}; ++ ++int p_call_usermodehelper_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_call_usermodehelper_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_call_usermodehelper_hook(int p_isra); ++void p_uninstall_call_usermodehelper_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_usermode_kernel_dep.h b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_usermode_kernel_dep.h +new file mode 100644 +index 000000000000..5de74779a6e4 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper/p_usermode_kernel_dep.h +@@ -0,0 +1,51 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Dependencies for kernel's UMH ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 14.V.2020 ++ * ++ * Author: ++ * - Mariusz Zaborski ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_USERMODEHELPER_KERNEL_DEP_H ++#define P_LKRG_EXPLOIT_DETECTION_USERMODEHELPER_KERNEL_DEP_H ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ++ #ifdef ANDROID_BUILD ++ #define UMH_UNIFIFW "/system/bin/unififw", ++ #else ++ #define UMH_UNIFIFW "/usr/sbin/unififw", ++ #endif /* ANDROID_BUILD */ ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ++ #define UMH_LNET_UPCALL "/usr/lib/lustre/lnet_upcall", \ ++ "/usr/lib/lustre/lnet_debug_log_upcall", ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) ++ #define UMH_OSD_LOGIN "/sbin/osd_login", ++#endif ++ ++#ifndef UMH_UNIFIFW ++#define UMH_UNIFIFW ++#endif ++ ++#ifndef UMH_LNET_UPCALL ++#define UMH_LNET_UPCALL ++#endif ++ ++#ifndef UMH_OSD_LOGIN ++#define UMH_OSD_LOGIN ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.c b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.c +new file mode 100644 +index 000000000000..4529c9b775e2 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.c +@@ -0,0 +1,66 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept *call_usermodehelper_exec* function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When kernel calls user-mode helper, we need to verify task's integrity ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 01.IV.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_call_usermodehelper_exec_kretprobe_state = 0; ++ ++static struct kretprobe p_call_usermodehelper_exec_kretprobe = { ++ .kp.symbol_name = "call_usermodehelper_exec", ++ .handler = p_call_usermodehelper_exec_ret, ++ .entry_handler = p_call_usermodehelper_exec_entry, ++ .data_size = sizeof(struct p_call_usermodehelper_exec_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++int p_call_usermodehelper_exec_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(1); ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ p_ed_validate_current(); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ p_pcfi_kill_task_by_task(current); ++ } ++ } ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++int p_call_usermodehelper_exec_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(call_usermodehelper_exec) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.h b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.h +new file mode 100644 +index 000000000000..73ecf3d32a79 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_call_usermodehelper_exec/p_call_usermodehelper_exec.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept *call_usermodehelper_exec* function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When kernel calls user-mode helper, we need to verify task's integrity ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 01.IV.2019 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_CALL_USERMODEHELPER_EXEC_H ++#define P_LKRG_EXPLOIT_DETECTION_CALL_USERMODEHELPER_EXEC_H ++ ++/* per-instance private data */ ++struct p_call_usermodehelper_exec_data { ++ ktime_t entry_stamp; ++}; ++ ++int p_call_usermodehelper_exec_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_call_usermodehelper_exec_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_call_usermodehelper_exec_hook(int p_isra); ++void p_uninstall_call_usermodehelper_exec_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_capable/p_capable.c b/security/lkrg/modules/exploit_detection/syscalls/p_capable/p_capable.c +new file mode 100644 +index 000000000000..05ab66375058 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_capable/p_capable.c +@@ -0,0 +1,81 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'capable' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection validation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 22.III.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_capable_kretprobe_state = 0; ++ ++static struct kretprobe p_capable_kretprobe = { ++ .kp.symbol_name = "security_capable", ++ .handler = p_capable_ret, ++ .entry_handler = p_capable_entry, ++ .data_size = sizeof(struct p_capable_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_capable_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp = NULL; ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(1); ++ ++// p_ed_enforce_validation(); ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ // kill this process! ++ p_pcfi_kill_task_by_task(current); ++ } ++ } ++ p_ed_validate_current(); ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++int p_capable_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(capable) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_capable/p_capable.h b/security/lkrg/modules/exploit_detection/syscalls/p_capable/p_capable.h +new file mode 100644 +index 000000000000..db0633d1004a +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_capable/p_capable.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'capable' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection validation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 22.III.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_CAPABLE_H ++#define P_LKRG_EXPLOIT_DETECTION_CAPABLE_H ++ ++/* per-instance private data */ ++struct p_capable_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_capable_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_capable_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_capable_hook(int p_isra); ++void p_uninstall_capable_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_do_exit/p_do_exit.c b/security/lkrg/modules/exploit_detection/syscalls/p_do_exit/p_do_exit.c +new file mode 100644 +index 000000000000..4d032f87b03d +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_do_exit/p_do_exit.c +@@ -0,0 +1,65 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept exit syscall ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process dies/exists we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_do_exit_kretprobe_state = 0; ++ ++static struct kretprobe p_do_exit_kretprobe = { ++ .kp.symbol_name = "do_exit", ++ .handler = p_do_exit_ret, ++ .entry_handler = p_do_exit_entry, ++ .data_size = sizeof(struct p_do_exit_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_do_exit_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "p_do_exit_entry: comm[%s] Pid:%d\n",current->comm,current->pid); ++ ++// p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if (p_remove_task_pid_f(task_pid_nr(current))) { ++ ;// DEBUG: p_print_log(P_LKRG_CRIT, "Can't remove ED pid (is not on the list) => %d [%s]\n",task_pid_nr(current),current->comm); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ /* A dump_stack() here will give a stack backtrace */ ++ return 0; ++} ++ ++ ++int p_do_exit_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(do_exit) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_do_exit/p_do_exit.h b/security/lkrg/modules/exploit_detection/syscalls/p_do_exit/p_do_exit.h +new file mode 100644 +index 000000000000..2d72582f55a9 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_do_exit/p_do_exit.h +@@ -0,0 +1,36 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept exit syscall ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process dies/exists we need to update RB tree. ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_DO_EXIT_H ++#define P_LKRG_EXPLOIT_DETECTION_DO_EXIT_H ++ ++/* per-instance private data */ ++struct p_do_exit_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_do_exit_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_do_exit_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_do_exit_hook(int p_isra); ++void p_uninstall_do_exit_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.c b/security/lkrg/modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.c +new file mode 100644 +index 000000000000..ec094ecc97fb +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.c +@@ -0,0 +1,98 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'generic_permission' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection validation ++ * ++ * Caveats: ++ * - Originally, this file was placing hooks on 'may_open' function. ++ * Unfortunately, GCC (8+) might enable ISRA optimization when -Ox ++ * switch was used. During kernel compilation it is usually enabled, ++ * and as a side effect we have ISRA optimization as well ++ * (undesired for LKRG). ISRA performs interprocedural scalar ++ * replacement of aggregates, removal of unused parameters and ++ * replacement of parameters passed by reference by parameters passed ++ * by value. Since it's a very invasive modification ISRA changes ++ * symbol name of the functions which was modified. ++ * Alexander (Solar Designer) pointed out that in fact we could hook ++ * inode_permission() instead. This function is exported and ++ * additionally, we'll improve our coverage since it is called on more ++ * cases than 'may_open', including things such as permission checks ++ * on creating/removing of directories, (un)linking of files, and ++ * searching for files in directories with restricted permissions. ++ * LKRG hooks 'generic_permission' since this function is also exported ++ * and is called by inode_permission() after various checks have been ++ * made. It is also called in a few other specialized cases. ++ * ++ * Timeline: ++ * - Replace 'may_open' with 'generic_permission': 17.IX.2018 ++ * - Created: 04.X.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_generic_permission_kretprobe_state = 0; ++ ++static struct kretprobe p_generic_permission_kretprobe = { ++ .kp.symbol_name = "generic_permission", ++ .handler = p_generic_permission_ret, ++ .entry_handler = p_generic_permission_entry, ++ .data_size = sizeof(struct p_generic_permission_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_generic_permission_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp = NULL; ++ unsigned long p_flags; ++ ++ p_ed_pcfi_cpu(1); ++ ++ if (p_is_ed_task(current)) { ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (p_ed_enforce_pcfi(current, p_tmp, p_regs)) { ++ p_print_log(P_LKRG_CRIT, ++ " Stack pointer corruption (ROP?) - pCFI violation: process[%s | %d] !!!\n", ++ current->comm,task_pid_nr(current)); ++ // kill this process! ++ p_pcfi_kill_task_by_task(current); ++ } ++ p_verify_addr_limit(p_tmp, current); ++ } ++ p_ed_validate_current(); ++ p_tasks_read_unlock(&p_flags); ++ } ++ ++ return 0; ++} ++ ++ ++int p_generic_permission_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(generic_permission) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.h b/security/lkrg/modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.h +new file mode 100644 +index 000000000000..58e37beab867 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_generic_permission/p_generic_permission.h +@@ -0,0 +1,53 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'generic_permission' function ++ * ++ * Notes: ++ * - Enforce Exploit Detection validation ++ * ++ * Caveats: ++ * - Originally, this file was placing hooks on 'may_open' function. ++ * Unfortunately, GCC (8+) might enable ISRA optimization when -Ox ++ * switch was used. During kernel compilation it is usually enabled, ++ * and as a side effect we have ISRA optimization as well ++ * (undesired for LKRG). ISRA performs interprocedural scalar ++ * replacement of aggregates, removal of unused parameters and ++ * replacement of parameters passed by reference by parameters passed ++ * by value. Since it's a very invasive modification ISRA changes ++ * symbol name of the functions which was modified. ++ * Alexander (Solar Designer) pointed out that in fact we could hook ++ * inode_permission() instead. This function is exported and ++ * additionally, we'll improve our coverage since it is called on more ++ * cases than 'may_open', including things such as permission checks ++ * on creating/removing of directories, (un)linking of files, and ++ * searching for files in directories with restricted permissions. ++ * LKRG hooks 'generic_permission' since this function is also exported ++ * and is called by inode_permission() after various checks have been ++ * made. It is also called in a few other specialized cases. ++ * ++ * Timeline: ++ * - Replace 'may_open' with 'generic_permission': 17.IX.2018 ++ * - Created: 04.X.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_GENERIC_PERMISSION_H ++#define P_LKRG_EXPLOIT_DETECTION_GENERIC_PERMISSION_H ++ ++/* per-instance private data */ ++struct p_generic_permission_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_generic_permission_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_generic_permission_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_generic_permission_hook(int p_isra); ++void p_uninstall_generic_permission_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_install.c b/security/lkrg/modules/exploit_detection/syscalls/p_install.c +new file mode 100644 +index 000000000000..21f96a6ce1fe +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_install.c +@@ -0,0 +1,90 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Generic install and uninstall functions ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 4.V.2020 ++ * ++ * Author: ++ * - Mariusz Zaborski (https://oshogbo.vexillium.org/) ++ * ++ */ ++ ++#include "../../../p_lkrg_main.h" ++ ++int p_install_hook(struct kretprobe *kretprobe, char *state, int p_isra) { ++ ++ int p_ret; ++ const char *p_name = kretprobe->kp.symbol_name; ++ struct p_isra_argument p_isra_arg; ++ ++ if ( (p_ret = register_kretprobe(kretprobe)) < 0) { ++ if (p_isra && p_ret == -22) { ++ p_print_log(P_LKRG_WARN, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_name, p_ret); ++ p_print_log(P_LKRG_WARN, "Trying to find ISRA / CONSTPROP name for <%s>\n",p_name); ++ p_isra_arg.p_name = p_name; ++ p_isra_arg.p_isra_name = NULL; ++ if (p_try_isra_name(&p_isra_arg)) { ++ p_name = kretprobe->kp.symbol_name = p_isra_arg.p_isra_name; ++ if ( (p_ret = register_kretprobe(kretprobe)) < 0) { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_name, p_ret); ++ return p_ret; ++ } ++ p_print_log(P_LKRG_WARN, ++ "ISRA / CONSTPROP version was found and hook was planted at <%s>\n", ++ p_name); ++ (*state)++; ++ } else { ++ p_print_log(P_LKRG_ERR, ++ "[kretprobe] register_kretprobe() for %s failed and ISRA / CONSTPROP version not found!\n", ++ p_isra_arg.p_name); ++ return p_ret; ++ } ++ } else { ++ p_print_log(P_LKRG_ERR, "[kretprobe] register_kretprobe() for <%s> failed! [err=%d]\n", ++ p_name, p_ret); ++ return p_ret; ++ } ++ } ++ ++ p_print_log(P_LKRG_INFO, "Planted [kretprobe] <%s> at: 0x%lx\n", ++ p_name, ++ (unsigned long)kretprobe->kp.addr); ++ (*state)++; ++ ++ return p_ret; ++} ++ ++void p_uninstall_hook(struct kretprobe *kretprobe, char *state) { ++ ++ const char *p_name = kretprobe->kp.symbol_name; ++ ++ if (!*state) { ++ p_print_log(P_LKRG_INFO, "[kretprobe] <%s> at 0x%lx is NOT installed\n", ++ p_name, ++ (unsigned long)kretprobe->kp.addr); ++ } else { ++ unregister_kretprobe(kretprobe); ++ p_print_log(P_LKRG_INFO, "Removing [kretprobe] <%s> at 0x%lx nmissed[%d]\n", ++ p_name, ++ (unsigned long)kretprobe->kp.addr, ++ kretprobe->nmissed); ++ if (*state == 2) { ++ // Free ISRA name buffer ++ p_print_log(P_LKRG_INFO, "Freeing ISRA / CONSTPROP buffer[0x%lx]\n", ++ (unsigned long)kretprobe->kp.symbol_name); ++ kfree(kretprobe->kp.symbol_name); ++ } ++ *state = 0; ++ } ++} +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_install.h b/security/lkrg/modules/exploit_detection/syscalls/p_install.h +new file mode 100644 +index 000000000000..fc226ba0d1cd +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_install.h +@@ -0,0 +1,37 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Generic install and uninstall functions ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 4.V.2020 ++ * ++ * Author: ++ * - Mariusz Zaborski (https://oshogbo.vexillium.org/) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_INSTALL_H ++#define P_LKRG_EXPLOIT_DETECTION_INSTALL_H ++ ++ ++int p_install_hook(struct kretprobe *kretprobe, char *state, int p_isra); ++void p_uninstall_hook(struct kretprobe *kretprobe, char *state); ++ ++#define GENERATE_INSTALL_FUNC(name) \ ++ int p_install_##name##_hook(int p_isra) { \ ++ return p_install_hook(&p_##name##_kretprobe, &p_##name##_kretprobe_state, p_isra); \ ++ } \ ++ \ ++ void p_uninstall_##name##_hook(void) { \ ++ return p_uninstall_hook(&p_##name##_kretprobe, &p_##name##_kretprobe_state); \ ++ } ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_scm_send/p_scm_send.c b/security/lkrg/modules/exploit_detection/syscalls/p_scm_send/p_scm_send.c +new file mode 100644 +index 000000000000..1a048c052869 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_scm_send/p_scm_send.c +@@ -0,0 +1,51 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '__scm_send' function ++ * ++ * Notes: ++ * - This hook enforces process validation ++ * before SCM_CREDENTIALS is used (for UNIX sockets). ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.IV.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_scm_send_kretprobe_state = 0; ++ ++static struct kretprobe p_scm_send_kretprobe = { ++ .kp.symbol_name = "__scm_send", ++ .handler = p_scm_send_ret, ++ .entry_handler = p_scm_send_entry, ++ .data_size = sizeof(struct p_scm_send_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_scm_send_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++int p_scm_send_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(scm_send) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_scm_send/p_scm_send.h b/security/lkrg/modules/exploit_detection/syscalls/p_scm_send/p_scm_send.h +new file mode 100644 +index 000000000000..ff6f2aedaccd +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_scm_send/p_scm_send.h +@@ -0,0 +1,36 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept '__scm_send' function ++ * ++ * Notes: ++ * - This hook enforces process validation ++ * before SCM_CREDENTIALS is used (for UNIX sockets). ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 28.IV.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SCM_SEND_H ++#define P_LKRG_EXPLOIT_DETECTION_SCM_SEND_H ++ ++/* per-instance private data */ ++struct p_scm_send_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_scm_send_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_scm_send_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_scm_send_hook(int p_isra); ++void p_uninstall_scm_send_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_seccomp/p_seccomp.c b/security/lkrg/modules/exploit_detection/syscalls/p_seccomp/p_seccomp.c +new file mode 100644 +index 000000000000..d5f0359573fb +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_seccomp/p_seccomp.c +@@ -0,0 +1,159 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept SECCOMP policy update ++ * ++ * Notes: ++ * - Process SECCOMP Exploit Detection validation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.XI.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_seccomp_kretprobe_state = 0; ++ ++static struct kretprobe p_seccomp_kretprobe = { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) ++ .kp.symbol_name = "do_seccomp", ++#else ++ .kp.symbol_name = "prctl_set_seccomp", ++#endif ++ .handler = p_seccomp_ret, ++ .entry_handler = p_seccomp_entry, ++ .data_size = sizeof(struct p_seccomp_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_seccomp_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++// p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - set temporary 'disable' flag! ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) ++ if (p_regs_get_arg2(p_regs) & SECCOMP_FILTER_FLAG_TSYNC) { // SECCOMP_FILTER_FLAG_TSYNC ++ struct task_struct *p_father = current; ++ struct task_struct *p_threads = p_father; ++ struct p_ed_process *p_child_tmp; ++ ++ p_tmp->p_ed_task.p_sec.flag_sync_thread = 1; ++ ++ rcu_read_lock(); ++ // Available since 3.14.0 ++ for_each_thread(p_father, p_threads) { ++ get_task_struct(p_threads); ++ p_child_tmp = p_find_ed_by_pid(task_pid_nr(p_threads)); ++ if (p_child_tmp) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_child_tmp, 33); ++#endif ++ p_set_ed_process_off(p_child_tmp); ++ } ++ put_task_struct(p_threads); ++ } ++ rcu_read_unlock(); ++ } else { ++#endif ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 33); ++#endif ++ p_set_ed_process_off(p_tmp); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) ++ } ++#endif ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_seccomp_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ char p_update = ((long)p_regs_get_ret(p_regs) >= 0) ? 1 : 0; ++ unsigned long p_flags; ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - update information! ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) ++ if (p_tmp->p_ed_task.p_sec.flag_sync_thread) { // SECCOMP_FILTER_FLAG_TSYNC ++ struct task_struct *p_father = current; ++ struct task_struct *p_threads = p_father; ++ struct p_ed_process *p_child_tmp; ++ ++ rcu_read_lock(); ++ // Available since 3.14.0 ++ for_each_thread(p_father, p_threads) { ++ get_task_struct(p_threads); ++ p_child_tmp = p_find_ed_by_pid(task_pid_nr(p_threads)); ++ if (p_child_tmp) { ++ if (p_update) { ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",p_threads->pid); ++ p_update_ed_process(p_child_tmp, p_threads, 0); ++ } ++ if (p_threads != p_father) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_child_tmp, 34); ++#endif ++ p_set_ed_process_on(p_child_tmp); ++ } ++ } ++ put_task_struct(p_threads); ++ } ++ rcu_read_unlock(); ++ } else { ++#endif ++ if (p_update) { ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) ++ } ++#endif ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 34); ++#endif ++ p_set_ed_process_on(p_tmp); ++ // In case of seccomp failing with SECCOMP_FILTER_FLAG_TSYNC flag ++ p_tmp->p_ed_task.p_sec.flag_sync_thread = 0; ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(seccomp) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_seccomp/p_seccomp.h b/security/lkrg/modules/exploit_detection/syscalls/p_seccomp/p_seccomp.h +new file mode 100644 +index 000000000000..032179faefff +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_seccomp/p_seccomp.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept SECCOMP policy update ++ * ++ * Notes: ++ * - Process SECCOMP Exploit Detection validation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 18.XI.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SECCOMP_H ++#define P_LKRG_EXPLOIT_DETECTION_SECCOMP_H ++ ++/* per-instance private data */ ++struct p_seccomp_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_seccomp_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_seccomp_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_seccomp_hook(int p_isra); ++void p_uninstall_seccomp_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.c b/security/lkrg/modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.c +new file mode 100644 +index 000000000000..3cfc7432ad1d +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.c +@@ -0,0 +1,61 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept ptrace syscall ++ * ++ * Notes: ++ * - Enforce Exploit Detection validation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 5.XI.2020 ++ * ++ * Author: ++ * - Mariusz Zaborski (https://oshogbo.vexillium.org/) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_security_ptrace_access_kretprobe_state = 0; ++ ++static struct kretprobe p_security_ptrace_access_kretprobe = { ++ .kp.symbol_name = "security_ptrace_access_check", ++ .handler = p_security_ptrace_access_ret, ++ .entry_handler = p_security_ptrace_access_entry, ++ .data_size = sizeof(struct p_security_ptrace_access_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++int p_security_ptrace_access_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - validate 'off' flag ++ p_ed_validate_off_flag_wrap(p_tmp); ++ } ++ p_tasks_read_unlock(&p_flags); ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++int p_security_ptrace_access_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(security_ptrace_access) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.h b/security/lkrg/modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.h +new file mode 100644 +index 000000000000..b50979467e9d +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_security_ptrace_access/p_security_ptrace_access.h +@@ -0,0 +1,33 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept ptrace syscall ++ * ++ * Notes: ++ * - Enforce Exploit Detection validation ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 5.XI.2020 ++ * ++ * Author: ++ * - Mariusz Zaborski (https://oshogbo.vexillium.org/) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SECURITY_PTRACE_ACCESS_H ++#define P_LKRG_EXPLOIT_DETECTION_SECURTIY_PTRACE_ACCESS_H ++ ++struct p_security_ptrace_access_data { ++ ktime_t entry_stamp; ++}; ++ ++int p_security_ptrace_access_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_security_ptrace_access_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_security_ptrace_access_hook(int p_isra); ++void p_uninstall_security_ptrace_access_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.c b/security/lkrg/modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.c +new file mode 100644 +index 000000000000..d410896cdd45 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.c +@@ -0,0 +1,97 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'sel_write_enforce' function ++ * ++ * Notes: ++ * - Intercept SELinux state modifications ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 13.XI.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++ ++#ifdef CONFIG_SECURITY_SELINUX ++ ++#include "../../../../p_lkrg_main.h" ++ ++char p_sel_write_enforce_kretprobe_state = 0; ++ ++static struct kretprobe p_sel_write_enforce_kretprobe = { ++ .kp.symbol_name = "sel_write_enforce", ++ .handler = p_sel_write_enforce_ret, ++ .entry_handler = p_sel_write_enforce_entry, ++ .data_size = sizeof(struct p_sel_write_enforce_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++/* ++ * x86-64 syscall ABI: ++ * *rax - syscall_number ++ * rdi - 1st argument ++ * rsi - 2nd argument ++ * rdx - 3rd argument ++ * rcx - 4th argument ++ * ++ * r8 - 5th one ++ * r9 - 6th one ++ */ ++ ++int p_sel_write_enforce_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_tasks_read_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - validate 'off' flag ++ p_ed_is_off_off_wrap(p_tmp); ++ } ++ p_tasks_read_unlock(&p_flags); ++ ++ p_ed_enforce_validation(); ++ ++ // lock shadow SELinux updates ++ p_lkrg_counter_lock_lock(&p_ed_guard_globals.p_selinux_lock, &p_flags); ++ p_lkrg_counter_lock_val_inc(&p_ed_guard_globals.p_selinux_lock); ++ p_lkrg_counter_lock_unlock(&p_ed_guard_globals.p_selinux_lock, &p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sel_write_enforce_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ // Success ? ++ if (!IS_ERR((void *)p_regs_get_ret(p_regs))) { ++ // track down new SELinux information ++#ifdef P_SELINUX_VERIFY ++ p_selinux_state_update(); ++#endif ++#if (!defined(RHEL_RELEASE_CODE) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) || \ ++ (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3)) ++ p_ed_guard_globals.p_selinux.p_selinux_enabled = *P_SYM(p_selinux_enabled); ++#endif ++ } ++ ++ // unlock shadow SELinux updates ++ p_lkrg_counter_lock_val_dec(&p_ed_guard_globals.p_selinux_lock); ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sel_write_enforce) ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.h b/security/lkrg/modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.h +new file mode 100644 +index 000000000000..53ac41cfaf47 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sel_write_enforce/p_sel_write_enforce.h +@@ -0,0 +1,40 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'sel_write_enforce' function ++ * ++ * Notes: ++ * - Intercept SELinux state modifications ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 13.XI.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifdef CONFIG_SECURITY_SELINUX ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SEL_WRITE_ENFORCE_H ++#define P_LKRG_EXPLOIT_DETECTION_SEL_WRITE_ENFORCE_H ++ ++ ++/* per-instance private data */ ++struct p_sel_write_enforce_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sel_write_enforce_ret(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_sel_write_enforce_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sel_write_enforce_hook(int p_isra); ++void p_uninstall_sel_write_enforce_hook(void); ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.c b/security/lkrg/modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.c +new file mode 100644 +index 000000000000..80c86053f8e3 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.c +@@ -0,0 +1,86 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setgroups/setgroups16 syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_set_current_groups_kretprobe_state = 0; ++ ++static struct kretprobe p_set_current_groups_kretprobe = { ++ .kp.symbol_name = "set_current_groups", ++ .handler = p_set_current_groups_ret, ++ .entry_handler = p_set_current_groups_entry, ++ .data_size = sizeof(struct p_set_current_groups_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_set_current_groups_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 35); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_set_current_groups_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "setgroups returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 36); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++GENERATE_INSTALL_FUNC(set_current_groups) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.h b/security/lkrg/modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.h +new file mode 100644 +index 000000000000..e3f722f629e5 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_set_current_groups/p_set_current_groups.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setgroups/setgroups16 syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SET_CURRENT_GROUPS_H ++#define P_LKRG_EXPLOIT_DETECTION_SET_CURRENT_GROUPS_H ++ ++/* per-instance private data */ ++struct p_set_current_groups_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_set_current_groups_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_set_current_groups_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_set_current_groups_hook(int p_isra); ++void p_uninstall_set_current_groups_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.c +new file mode 100644 +index 000000000000..a8f6dfdfe328 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.c +@@ -0,0 +1,92 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setfsgid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setfsgid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setfsgid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setfsgid), ++ .handler = p_sys_setfsgid_ret, ++ .entry_handler = p_sys_setfsgid_entry, ++ .data_size = sizeof(struct p_sys_setfsgid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setfsgid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ /* ++ * This is special case, since we can NOT identify when syscall fail or succeed, ++ * we must verify process list before and after the syscall ++ */ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 41); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setfsgid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setfsgid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++// if (!p_regs_get_ret(p_regs)) { ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ ++ p_update_ed_process(p_tmp, current, 0); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 42); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++// } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setfsgid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.h +new file mode 100644 +index 000000000000..f9b87b81a0c5 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsgid/p_sys_setfsgid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setfsgid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETFSGID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETFSGID_H ++ ++/* per-instance private data */ ++struct p_sys_setfsgid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setfsgid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setfsgid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setfsgid_hook(int p_isra); ++void p_uninstall_sys_setfsgid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.c +new file mode 100644 +index 000000000000..deebdcb59dc1 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.c +@@ -0,0 +1,92 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setfsuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setfsuid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setfsuid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setfsuid), ++ .handler = p_sys_setfsuid_ret, ++ .entry_handler = p_sys_setfsuid_entry, ++ .data_size = sizeof(struct p_sys_setfsuid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setfsuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ /* ++ * This is special case, since we can NOT identify when syscall fail or succeed, ++ * we must verify process list before and after the syscall ++ */ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 43); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setfsuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setfsuid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++// if (!p_regs_get_ret(p_regs)) { ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ ++ p_update_ed_process(p_tmp, current, 0); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 44); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++// } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setfsuid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.h +new file mode 100644 +index 000000000000..96f361aedb69 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setfsuid/p_sys_setfsuid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setfsuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETFSUID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETFSUID_H ++ ++/* per-instance private data */ ++struct p_sys_setfsuid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setfsuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setfsuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setfsuid_hook(int p_isra); ++void p_uninstall_sys_setfsuid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.c +new file mode 100644 +index 000000000000..f146d595be9d +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setgid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setgid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setgid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setgid), ++ .handler = p_sys_setgid_ret, ++ .entry_handler = p_sys_setgid_entry, ++ .data_size = sizeof(struct p_sys_setgid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setgid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 45); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setgid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setgid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 46); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setgid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.h +new file mode 100644 +index 000000000000..8067693b6259 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setgid/p_sys_setgid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setgid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETGID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETGID_H ++ ++/* per-instance private data */ ++struct p_sys_setgid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setgid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setgid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setgid_hook(int p_isra); ++void p_uninstall_sys_setgid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.c +new file mode 100644 +index 000000000000..e801a68ef998 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setns syscall ++ * ++ * Notes: ++ * - Dump namespace metadata ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 29.I.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setns_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setns_kretprobe = { ++ .kp.symbol_name = P_GET_SYSCALL_NAME(setns), ++ .handler = p_sys_setns_ret, ++ .entry_handler = p_sys_setns_entry, ++ .data_size = sizeof(struct p_sys_setns_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setns_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 47); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setns_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "sys_setns returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 48); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setns) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.h +new file mode 100644 +index 000000000000..353a6662cf9d +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setns/p_sys_setns.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setns syscall ++ * ++ * Notes: ++ * - Dump namespace metadata ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 29.I.2020 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETNS_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETNS_H ++ ++/* per-instance private data */ ++struct p_sys_setns_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setns_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setns_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setns_hook(int p_isra); ++void p_uninstall_sys_setns_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.c +new file mode 100644 +index 000000000000..ec421194e967 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setregid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setregid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setregid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setregid), ++ .handler = p_sys_setregid_ret, ++ .entry_handler = p_sys_setregid_entry, ++ .data_size = sizeof(struct p_sys_setregid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setregid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 49); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setregid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setregid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 50); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setregid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.h +new file mode 100644 +index 000000000000..fa20e4060d41 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setregid/p_sys_setregid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setregid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETREGID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETREGID_H ++ ++/* per-instance private data */ ++struct p_sys_setregid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setregid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setregid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setregid_hook(int p_isra); ++void p_uninstall_sys_setregid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.c +new file mode 100644 +index 000000000000..9955e886f93a +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setresgid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setresgid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setresgid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setresgid), ++ .handler = p_sys_setresgid_ret, ++ .entry_handler = p_sys_setresgid_entry, ++ .data_size = sizeof(struct p_sys_setresgid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setresgid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 51); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setresgid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setresgid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 52); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setresgid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.h +new file mode 100644 +index 000000000000..1530337b3461 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresgid/p_sys_setresgid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setresgid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETRESGID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETRESGID_H ++ ++/* per-instance private data */ ++struct p_sys_setresgid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setresgid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setresgid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setresgid_hook(int p_isra); ++void p_uninstall_sys_setresgid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.c +new file mode 100644 +index 000000000000..3c7fe2413564 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setresuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setresuid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setresuid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setresuid), ++ .handler = p_sys_setresuid_ret, ++ .entry_handler = p_sys_setresuid_entry, ++ .data_size = sizeof(struct p_sys_setresuid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setresuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 53); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setresuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setresuid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 54); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setresuid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.h +new file mode 100644 +index 000000000000..1a485c6c76d9 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setresuid/p_sys_setresuid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setresuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETRESUID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETRESUID_H ++ ++/* per-instance private data */ ++struct p_sys_setresuid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setresuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setresuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setresuid_hook(int p_isra); ++void p_uninstall_sys_setresuid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.c +new file mode 100644 +index 000000000000..b7fa6c87b496 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setreuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setreuid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setreuid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setreuid), ++ .handler = p_sys_setreuid_ret, ++ .entry_handler = p_sys_setreuid_entry, ++ .data_size = sizeof(struct p_sys_setreuid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setreuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 55); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setreuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setreuid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 56); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setreuid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.h +new file mode 100644 +index 000000000000..439babeb5a7b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setreuid/p_sys_setreuid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setreuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETREUID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETREUID_H ++ ++/* per-instance private data */ ++struct p_sys_setreuid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setreuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setreuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setreuid_hook(int p_isra); ++void p_uninstall_sys_setreuid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.c +new file mode 100644 +index 000000000000..f0c77eaddbed +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_setuid_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_setuid_kretprobe = { ++ .kp.symbol_name = P_GET_SET_ID_NAME(setuid), ++ .handler = p_sys_setuid_ret, ++ .entry_handler = p_sys_setuid_entry, ++ .data_size = sizeof(struct p_sys_setuid_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_setuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 57); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_setuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "Setuid returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 58); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_setuid) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.h +new file mode 100644 +index 000000000000..5740c4f8f334 +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_setuid/p_sys_setuid.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept setuid syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 09.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_SETUID_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_SETUID_H ++ ++/* per-instance private data */ ++struct p_sys_setuid_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_setuid_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_setuid_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_setuid_hook(int p_isra); ++void p_uninstall_sys_setuid_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.c b/security/lkrg/modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.c +new file mode 100644 +index 000000000000..68e774372e4a +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.c +@@ -0,0 +1,91 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept unshare syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_sys_unshare_kretprobe_state = 0; ++ ++static struct kretprobe p_sys_unshare_kretprobe = { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) ++ .kp.symbol_name = "ksys_unshare", ++#else ++ .kp.symbol_name = "sys_unshare", ++#endif ++ .handler = p_sys_unshare_ret, ++ .entry_handler = p_sys_unshare_entry, ++ .data_size = sizeof(struct p_sys_unshare_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_sys_unshare_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_ed_enforce_validation(); ++ ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_off(p_tmp, 59); ++#endif ++ // This process is on the ED list - set temporary 'disable' flag! ++ p_set_ed_process_off(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++ return 0; ++} ++ ++ ++int p_sys_unshare_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ p_debug_kprobe_log( ++ "unshare returned value => %ld comm[%s] Pid:%d parent[%d]\n", ++ p_regs_get_ret(p_regs),current->comm,current->pid,current->real_parent->pid); ++ ++ // Update process ++ p_tasks_write_lock(&p_flags); ++ if ( (p_tmp = p_find_ed_by_pid(task_pid_nr(current))) != NULL) { ++ if (!p_regs_get_ret(p_regs)) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, "Updating ED pid[%d]\n",current->pid); ++ p_update_ed_process(p_tmp, current, 0); ++ } ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_on(p_tmp, 60); ++#endif ++ p_set_ed_process_on(p_tmp); ++ } ++ p_tasks_write_unlock(&p_flags); ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(sys_unshare) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.h b/security/lkrg/modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.h +new file mode 100644 +index 000000000000..0e1b7e16b82b +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_sys_unshare/p_sys_unshare.h +@@ -0,0 +1,35 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept unshare syscall ++ * ++ * Notes: ++ * - None ++ * ++ * Caveats: ++ * - None ++ * ++ * Timeline: ++ * - Created: 05.XII.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_SYS_UNSHARE_H ++#define P_LKRG_EXPLOIT_DETECTION_SYS_UNSHARE_H ++ ++/* per-instance private data */ ++struct p_sys_unshare_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_sys_unshare_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_sys_unshare_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_sys_unshare_hook(int p_isra); ++void p_uninstall_sys_unshare_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.c b/security/lkrg/modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.c +new file mode 100644 +index 000000000000..0fe1a1c5882e +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.c +@@ -0,0 +1,87 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'wake_up_new_task' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process forks, child must be tracked as well! We need to update RB tree. ++ * ++ * Caveats: ++ * - Previous version was hooking 'do_fork' function. ++ * ++ * Timeline: ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../../p_lkrg_main.h" ++ ++ ++char p_wake_up_new_task_kretprobe_state = 0; ++ ++static struct kretprobe p_wake_up_new_task_kretprobe = { ++ .kp.symbol_name = "wake_up_new_task", ++ .handler = p_wake_up_new_task_ret, ++ .entry_handler = p_wake_up_new_task_entry, ++ .data_size = sizeof(struct p_wake_up_new_task_data), ++ /* Probe up to 40 instances concurrently. */ ++ .maxactive = 40, ++}; ++ ++ ++int p_wake_up_new_task_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs) { ++ ++ struct task_struct *p_task = (struct task_struct *)p_regs_get_arg1(p_regs); ++ pid_t p_pid; ++ struct p_ed_process *p_tmp; ++ unsigned long p_flags; ++ ++ if (p_task) { ++ p_pid = task_pid_nr(p_task); ++ p_tasks_write_lock_by_pid(p_pid,&p_flags); ++// if (!(p_task->flags & PF_KTHREAD || is_global_init(p_task))) { ++ if (p_is_ed_task(p_task)) { ++ int p_ret; ++ ++ if ( (p_ret = p_dump_task_f(p_task)) != 0) { ++ if (p_ret == 1) { ++ // Update process ++ if ( (p_tmp = p_find_ed_by_pid(p_pid)) != NULL) { ++ // This process is on the ED list - update information! ++ p_print_log(P_LKRG_INFO, " Updating ED pid[%d]\n",p_pid); ++ p_update_ed_process(p_tmp, p_task, 1); ++#ifdef P_LKRG_TASK_OFF_DEBUG ++ p_debug_off_flag_reset(p_tmp, 61); ++#endif ++ p_reset_ed_flags(p_tmp); ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ " Error[%d] when trying to add process[%d |%s] for tracking!\n", ++ p_ret, p_pid, p_task->comm); ++ } ++ } ++ } ++ p_tasks_write_unlock_by_pid(p_pid,&p_flags); ++ } ++ ++ p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++int p_wake_up_new_task_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs) { ++ ++// p_ed_enforce_validation(); ++ ++ return 0; ++} ++ ++ ++GENERATE_INSTALL_FUNC(wake_up_new_task) +diff --git a/security/lkrg/modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.h b/security/lkrg/modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.h +new file mode 100644 +index 000000000000..4d10ee259c8a +--- /dev/null ++++ b/security/lkrg/modules/exploit_detection/syscalls/p_wake_up_new_task/p_wake_up_new_task.h +@@ -0,0 +1,36 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Intercept 'wake_up_new_task' function ++ * ++ * Notes: ++ * - We are maintianing Red-Black tree of pid's for Exploit Detection feature. ++ * When process forks, child must be tracked as well! We need to update RB tree. ++ * ++ * Caveats: ++ * - Previous version was hooking 'do_fork' function. ++ * ++ * Timeline: ++ * - Created: 18.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_EXPLOIT_DETECTION_WAKE_UP_NEW_TASK_H ++#define P_LKRG_EXPLOIT_DETECTION_WAKE_UP_NEW_TASK_H ++ ++/* per-instance private data */ ++struct p_wake_up_new_task_data { ++ ktime_t entry_stamp; ++}; ++ ++ ++int p_wake_up_new_task_ret(struct kretprobe_instance *ri, struct pt_regs *p_regs); ++int p_wake_up_new_task_entry(struct kretprobe_instance *p_ri, struct pt_regs *p_regs); ++int p_install_wake_up_new_task_hook(int p_isra); ++void p_uninstall_wake_up_new_task_hook(void); ++ ++#endif +diff --git a/security/lkrg/modules/hashing/p_lkrg_fast_hash.c b/security/lkrg/modules/hashing/p_lkrg_fast_hash.c +new file mode 100644 +index 000000000000..031aff3c6afb +--- /dev/null ++++ b/security/lkrg/modules/hashing/p_lkrg_fast_hash.c +@@ -0,0 +1,106 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Hashing algorithm module - SipHash ++ * ++ * Notes: ++ * - Current Algorithm: ++ * *) https://131002.net/siphash/ ++ * - Previous Algorithm from: ++ * *) http://azillionmonkeys.com/qed/hash.html ++ * ++ * Timeline: ++ * - Change SuperFastHash to SipHash ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++uint128_t p_global_siphash_key; ++ ++inline void p_lkrg_siphash(const uint8_t *in, const size_t inlen, const uint8_t *k, ++ uint8_t *out, const size_t outlen); ++ ++notrace uint64_t p_lkrg_fast_hash(const char *p_data, unsigned int p_len) { ++ ++ uint64_t p_tmp = 0; ++ ++ p_lkrg_siphash(p_data, p_len, (uint8_t *)&p_global_siphash_key, (uint8_t *)&p_tmp, sizeof(p_tmp)); ++ return p_tmp; ++} ++ ++notrace inline void p_lkrg_siphash(const uint8_t *in, const size_t inlen, const uint8_t *k, ++ uint8_t *out, const size_t outlen) { ++ ++ uint64_t v0 = 0x736f6d6570736575ULL; ++ uint64_t v1 = 0x646f72616e646f6dULL; ++ uint64_t v2 = 0x6c7967656e657261ULL; ++ uint64_t v3 = 0x7465646279746573ULL; ++ uint64_t k0 = U8TO64_LE(k); ++ uint64_t k1 = U8TO64_LE(k + 8); ++ uint64_t m; ++ int i; ++ const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t)); ++ const int left = inlen & 7; ++ uint64_t b = ((uint64_t)inlen) << 56; ++ v3 ^= k1; ++ v2 ^= k0; ++ v1 ^= k1; ++ v0 ^= k0; ++ ++ for (; in != end; in += 8) { ++ m = U8TO64_LE(in); ++ v3 ^= m; ++ ++ for (i = 0; i < cROUNDS; ++i) ++ SIPROUND; ++ ++ v0 ^= m; ++ } ++ ++ switch (left) { ++ case 7: ++ b |= ((uint64_t)in[6]) << 48; ++ /* FALLTHROUGH */ ++ case 6: ++ b |= ((uint64_t)in[5]) << 40; ++ /* FALLTHROUGH */ ++ case 5: ++ b |= ((uint64_t)in[4]) << 32; ++ /* FALLTHROUGH */ ++ case 4: ++ b |= ((uint64_t)in[3]) << 24; ++ /* FALLTHROUGH */ ++ case 3: ++ b |= ((uint64_t)in[2]) << 16; ++ /* FALLTHROUGH */ ++ case 2: ++ b |= ((uint64_t)in[1]) << 8; ++ /* FALLTHROUGH */ ++ case 1: ++ b |= ((uint64_t)in[0]); ++ break; ++ case 0: ++ break; ++ } ++ ++ v3 ^= b; ++ ++ for (i = 0; i < cROUNDS; ++i) ++ SIPROUND; ++ ++ v0 ^= b; ++ ++ v2 ^= 0xff; ++ ++ for (i = 0; i < dROUNDS; ++i) ++ SIPROUND; ++ ++ b = v0 ^ v1 ^ v2 ^ v3; ++ U64TO8_LE(out, b); ++} +diff --git a/security/lkrg/modules/hashing/p_lkrg_fast_hash.h b/security/lkrg/modules/hashing/p_lkrg_fast_hash.h +new file mode 100644 +index 000000000000..13179659ae19 +--- /dev/null ++++ b/security/lkrg/modules/hashing/p_lkrg_fast_hash.h +@@ -0,0 +1,76 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Hashing algorithm module - SipHash ++ * ++ * Notes: ++ * - Current Algorithm: ++ * *) https://131002.net/siphash/ ++ * - Previous Algorithm from: ++ * *) http://azillionmonkeys.com/qed/hash.html ++ * ++ * Timeline: ++ * - Change SuperFastHash to SipHash ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_INTERNAL_FAST_HASH_H ++#define P_LKRG_INTERNAL_FAST_HASH_H ++ ++typedef struct uint128_t { ++ ++ uint64_t p_low; ++ uint64_t p_high; ++ ++} uint128_t; ++ ++/* default: SipHash-2-4 */ ++#define cROUNDS 2 ++#define dROUNDS 4 ++ ++#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b)))) ++ ++#define U32TO8_LE(p, v) \ ++ (p)[0] = (uint8_t)((v)); \ ++ (p)[1] = (uint8_t)((v) >> 8); \ ++ (p)[2] = (uint8_t)((v) >> 16); \ ++ (p)[3] = (uint8_t)((v) >> 24); ++ ++#define U64TO8_LE(p, v) \ ++ U32TO8_LE((p), (uint32_t)((v))); \ ++ U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); ++ ++#define U8TO64_LE(p) \ ++ (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \ ++ ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \ ++ ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \ ++ ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56)) ++ ++#define SIPROUND \ ++ do { \ ++ v0 += v1; \ ++ v1 = ROTL(v1, 13); \ ++ v1 ^= v0; \ ++ v0 = ROTL(v0, 32); \ ++ v2 += v3; \ ++ v3 = ROTL(v3, 16); \ ++ v3 ^= v2; \ ++ v0 += v3; \ ++ v3 = ROTL(v3, 21); \ ++ v3 ^= v0; \ ++ v2 += v1; \ ++ v1 = ROTL(v1, 17); \ ++ v1 ^= v2; \ ++ v2 = ROTL(v2, 32); \ ++ } while (0) ++ ++extern uint128_t p_global_siphash_key; ++ ++uint64_t p_lkrg_fast_hash(const char *data, unsigned int len); ++ ++#endif +diff --git a/security/lkrg/modules/integrity_timer/p_integrity_timer.c b/security/lkrg/modules/integrity_timer/p_integrity_timer.c +new file mode 100644 +index 000000000000..642a33e6633a +--- /dev/null ++++ b/security/lkrg/modules/integrity_timer/p_integrity_timer.c +@@ -0,0 +1,1798 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Integrity timer module ++ * ++ * Notes: ++ * - Periodically check critical system hashes using timer ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++/* ++ * Local timer for integrity checks... ++ */ ++struct timer_list p_timer; ++ ++unsigned int p_time_stamp = 15; /* timeout in seconds */ ++/* God mode variables ;) */ ++DEFINE_SPINLOCK(p_db_lock); ++unsigned long p_db_flags; ++unsigned int p_manual = 0; ++ ++/* kmem_cache for offloding WQ */ ++struct kmem_cache *p_offload_cache = NULL; ++ ++ ++static void p_offload_cache_zero(void *p_arg) { ++ ++ struct work_struct *p_struct = p_arg; ++ ++ memset(p_struct, 0, sizeof(struct work_struct)); ++} ++ ++int p_offload_cache_init(void) { ++ ++ if ( (p_offload_cache = kmem_cache_create("p_offload_cache", sizeof(struct work_struct), ++ 0, SLAB_HWCACHE_ALIGN, p_offload_cache_zero)) == NULL) { ++ p_print_log(P_LKRG_ERR, "kmem_cache_create() for offloading error! :(\n"); ++ return -ENOMEM; ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++void p_offload_cache_delete(void) { ++ ++ flush_workqueue(system_unbound_wq); ++ if (p_offload_cache) { ++ kmem_cache_destroy(p_offload_cache); ++ p_offload_cache = NULL; ++ } ++} ++ ++void p_integrity_timer(void) { ++ ++ p_timer.expires = jiffies + P_CTRL(p_interval)*HZ; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ++ p_timer.data = 1; ++ p_timer.function = p_offload_work; ++ init_timer(&p_timer); ++#else ++ timer_setup(&p_timer, p_offload_work, 0); ++#endif ++ add_timer(&p_timer); ++} ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ++void p_offload_work(unsigned long p_timer) { ++#else ++void p_offload_work(struct timer_list *p_timer) { ++#endif ++ ++ struct work_struct *p_worker; ++ ++ p_debug_log(P_LKRG_STRONG_DBG, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ++ "p_timer => %ld\n",p_timer); ++#else ++ "p_timer => %lx\n",(unsigned long)p_timer); ++#endif ++ ++ while ( (p_worker = p_alloc_offload()) == NULL); // Should never be NULL ++ INIT_WORK(p_worker, p_check_integrity); ++ /* schedule for execution */ ++ queue_work(system_unbound_wq, p_worker); ++ if (p_timer) ++ p_integrity_timer(); ++} ++ ++ ++void p_check_integrity(struct work_struct *p_work) { ++ ++ /* temporary hash variable */ ++ uint64_t p_tmp_hash; ++ /* per CPU temporary data */ ++ p_CPU_metadata_hash_mem *p_tmp_cpus = NULL; ++ p_cpu_info p_tmp_cpu_info; ++ /* Linux Kernel Modules integrity */ ++ unsigned int p_module_list_nr_tmp; // Count by walk through the list first ++ unsigned int p_module_kobj_nr_tmp; // Count by walk through the list first ++ p_module_list_mem *p_module_list_tmp = NULL; ++ p_module_kobj_mem *p_module_kobj_tmp = NULL; ++ char p_mod_bad_nr = 0; ++ /* Are we compromised ? */ ++ unsigned int p_hack_check = 0; ++ /* Module syncing temporary pointer */ ++ struct module *p_tmp_mod; ++ unsigned int p_tmp = 0; ++ int p_ret; ++ ++ if (unlikely(!P_CTRL(p_kint_validate)) || ++ unlikely(!p_manual && P_CTRL(p_kint_validate) == 1) || ++ unlikely(!(P_SYM(p_state_init) & 0x2))) ++ goto p_check_integrity_tasks; ++ ++ /* ++ * First allocate temporary buffer for per CPU data. Number of possible CPUs ++ * is per kernel compilation. Hot plug-in/off won't change that value so it is ++ * safe to preallocate buffer here - before lock and before recounting CPUs info. ++ */ ++ ++ /* ++ * __GFP_NOFAIL flag will always generate slowpath warn because developers ++ * decided to depreciate this flag ;/ ++ */ ++// while ( (p_tmp_cpus = kzalloc(sizeof(p_CPU_metadata_hash_mem)*p_db.p_cpu.p_nr_cpu_ids, ++// GFP_KERNEL | GFP_ATOMIC | GFP_NOFS | __GFP_REPEAT)) == NULL); ++ ++ /* ++ * We are in the off-loaded WQ context. We can sleep here (because we must be able to ++ * take 'mutex' lock which is 'sleeping' lock), so it is not strictly time-critical code. ++ * This allocation is made before we take 'spinlock' for internal database (and before ++ * we take 'sleeping mutext lock' but it doesn't count for now) we are allowed to ++ * make 'slowpath' memory allocation - don't need to use emergency pools. ++ * ++ * Emergency pools will be consumed in 'kmod' module (because we will be under 'spinlock' ++ * timing pressure). ++ */ ++ while ( (p_tmp_cpus = kzalloc(sizeof(p_CPU_metadata_hash_mem)*p_db.p_cpu.p_nr_cpu_ids, ++ GFP_KERNEL | GFP_NOFS | __GFP_REPEAT)) == NULL); ++ ++ ++ ++ /* Find information about current CPUs in the system */ ++ p_get_cpus(&p_tmp_cpu_info); ++ if (p_cmp_cpus(&p_db.p_cpu,&p_tmp_cpu_info)) { ++ p_print_log(P_LKRG_WARN, ++ "!!! INTEGRITY WARNING: Using CPU number from original database !!!\n"); ++ } ++ ++ /* ++ * Check which core did we lock and do not send IPI to yourself. ++ * It will cause internal bug in smp_call_function_single() which ++ * uses get_cpu() internally. Core must be unlocked before calling ++ * this function! ++ */ ++// p_tmp_cpuid = smp_processor_id(); ++ ++ /* ++ * Checking all online CPUs critical data ++ */ ++ get_online_cpus(); ++ ++// for_each_present_cpu(p_tmp) { ++ //for_each_online_cpu(p_tmp) { ++// if (cpu_online(p_tmp)) { ++// if (p_tmp_cpuid != p_tmp) { ++//printk(KERN_INFO "smp_call_function_single() for cpu[%d]\n",p_tmp); ++ /* ++ * smp_call_function_single() internally 'locks' the execution core. ++ * This means you should not call this function with IRQ disabled. ++ * It will generate warnings/OOPS - it is not documented but this is ++ * how this function reacts. ++ */ ++ //smp_call_function_single(p_tmp,p_dump_CPU_metadata,p_tmp_cpus,true); ++//printk(KERN_INFO "smp_call_function_single() -> DONE\n"); ++// } ++// } ++ //} ++ ++ ++ /* ++ * There is an undesirable situation in SMP Linux machines when sending ++ * IPI via the smp_call_function_single() API... ++ * ++ * ... more technical details about it can be found here: ++ * *) http://blog.pi3.com.pl/?p=549 ++ * *) http://lists.openwall.net/linux-kernel/2016/09/21/68 ++ * ++ * on_each_cpu() might mitigate this problem a bit because has extra ++ * self-balancing code for performance reasons. ++ */ ++ on_each_cpu(p_dump_CPU_metadata,p_tmp_cpus,true); ++ ++ ++ /* ++ * OK, so now get the same information for currently locked core! ++ */ ++// p_dump_CPU_metadata(p_tmp_cpus); // no return value ++ ++ /* Now we are safe to disable IRQs on current core */ ++ ++ p_tmp_hash = hash_from_CPU_data(p_tmp_cpus); ++ put_online_cpus(); ++ ++ p_text_section_lock(); ++ ++ /* ++ * Memory allocation may fail... let's loop here! ++ */ ++ while( (p_ret = p_kmod_hash(&p_module_list_nr_tmp,&p_module_list_tmp, ++ &p_module_kobj_nr_tmp,&p_module_kobj_tmp, 0x0)) != P_LKRG_SUCCESS) { ++ if (p_ret == P_LKRG_KMOD_DUMP_RACE) { ++ p_print_log(P_LKRG_ERR, ++ "Function won race with module activity thread... We need to cancel this context! :(\n"); ++ goto p_check_integrity_cancel; ++ } ++ p_print_log(P_LKRG_ERR, ++ "Function - p_kmod_hash() failed! Memory problems... :(\n"); ++ schedule(); ++ } ++/* ++ p_text_section_unlock(); ++*/ ++ ++ spin_lock_irqsave(&p_db_lock,p_db_flags); ++// spin_lock(&p_db_lock); ++ ++ if (p_db.p_CPU_metadata_hashes != p_tmp_hash) { ++ /* I'm hacked! ;( */ ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! HASHES FROM CPUs METADATA IS DIFFERENT- it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.p_CPU_metadata_hashes); ++ P_KINT_IF_ACCEPT(p_db.p_CPU_metadata_hashes, ++ p_tmp_hash, ++ p_hack_check); ++ } ++ ++ p_print_log(P_LKRG_INFO,"Hash from CPUs metadata => [0x%llx]\n",p_tmp_hash); ++ ++ /* ++ * Checking memory block: ++ * "___ex_table" ++ */ ++ if (p_db.kernel_ex_table.p_addr && p_db.kernel_ex_table.p_hash) { ++ p_tmp_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_ex_table.p_addr, ++ (unsigned int)p_db.kernel_ex_table.p_size); ++ ++ if (p_db.kernel_ex_table.p_hash != p_tmp_hash) { ++ /* I'm hacked! ;( */ ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! EXCEPTION TABLE HASH IS DIFFERENT - it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.kernel_ex_table.p_hash); ++ P_KINT_IF_ACCEPT(p_db.kernel_ex_table.p_hash, ++ p_tmp_hash, ++ p_hack_check); ++ } ++ ++ p_print_log(P_LKRG_INFO,"Hash from kernel exception table => [0x%llx]\n",p_tmp_hash); ++ } ++ ++ /* ++ * Checking memory block: ++ * "_stext" ++ */ ++ p_tmp_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_stext.p_addr, ++ (unsigned int)p_db.kernel_stext.p_size); ++ ++ if (p_db.kernel_stext.p_hash != p_tmp_hash) { ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ char *p_str1 = (unsigned char *)p_db.kernel_stext.p_addr; ++ char *p_str2 = (unsigned char *)p_db.kernel_stext_copy; ++ char p_eh_buf[0x100]; ++#endif ++ /* We detected core kernel .text corruption - we are hacked and can't recover */ ++ /* I'm hacked! ;( */ ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! _STEXT MEMORY BLOCK HASH IS DIFFERENT - it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.kernel_stext.p_hash); ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ for (p_tmp = 0; p_tmp < p_db.kernel_stext.p_size; p_tmp++) { ++ if (p_str2[p_tmp] != p_str1[p_tmp]) { ++ sprint_symbol_no_offset(p_eh_buf,(unsigned long)((unsigned long)p_db.kernel_stext.p_addr+(unsigned long)p_tmp)); ++ printk(KERN_CRIT "copy[0x%x] vs now[0x%x] offset[%d | 0x%x] symbol[%s]\n", ++ p_str2[p_tmp], ++ p_str1[p_tmp], ++ p_tmp, ++ p_tmp, ++ p_eh_buf); ++ } ++ } ++#endif ++ P_KINT_IF_ACCEPT(p_db.kernel_stext.p_hash, ++ p_tmp_hash, ++ p_hack_check); ++ } ++ ++ p_print_log(P_LKRG_INFO,"Hash from _stext memory block => [0x%llx]\n",p_tmp_hash); ++ ++ /* ++ * Checking memory block: ++ * "_rodata" ++ */ ++ if (p_db.kernel_rodata.p_addr && p_db.kernel_rodata.p_hash) { ++#if !defined(CONFIG_GRKERNSEC) ++ p_tmp_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_rodata.p_addr, ++ (unsigned int)p_db.kernel_rodata.p_size); ++#else ++ p_tmp_hash = 0xFFFFFFFF; ++#endif ++ ++ if (p_db.kernel_rodata.p_hash != p_tmp_hash) { ++ /* I'm hacked! ;( */ ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! _RODATA MEMORY BLOCK HASH IS DIFFERENT - it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.kernel_rodata.p_hash); ++ P_KINT_IF_ACCEPT(p_db.kernel_rodata.p_hash, ++ p_tmp_hash, ++ p_hack_check); ++ } ++ ++ p_print_log(P_LKRG_INFO,"Hash from _rodata memory block => [0x%llx]\n",p_tmp_hash); ++ } ++ ++ /* ++ * Checking memory block: ++ * "__iommu_table" ++ */ ++ if (p_db.kernel_iommu_table.p_addr && p_db.kernel_iommu_table.p_hash) { ++#ifdef P_LKRG_IOMMU_HASH_ENABLED ++ p_tmp_hash = p_lkrg_fast_hash((unsigned char *)p_db.kernel_iommu_table.p_addr, ++ (unsigned int)p_db.kernel_iommu_table.p_size); ++#else ++ p_tmp_hash = 0xFFFFFFFF; ++#endif ++ ++ if (p_db.kernel_iommu_table.p_hash != p_tmp_hash) { ++ /* I'm hacked! ;( */ ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! IOMMU TABLE HASH IS DIFFERENT - it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.kernel_iommu_table.p_hash); ++ P_KINT_IF_ACCEPT(p_db.kernel_iommu_table.p_hash, ++ p_tmp_hash, ++ p_hack_check); ++ } ++ ++ p_print_log(P_LKRG_INFO,"Hash from IOMMU table => [0x%llx]\n",p_tmp_hash); ++ } ++ ++/* ++ unsigned int p_module_list_nr_tmp; // Count by walk through the list first ++ unsigned int p_module_kobj_nr_tmp; // Count by walk through the list first ++ p_module_list_mem *p_module_list_tmp; ++ p_module_kobj_mem *p_module_kobj_tmp; ++*/ ++ ++ /* ++ * Checking this kernel modules integrity. ++ */ ++ ++ /* ++ * Memory allocation may fail... let's loop here! ++ */ ++// while(p_kmod_hash(&p_module_list_nr_tmp,&p_module_list_tmp, ++// &p_module_kobj_nr_tmp,&p_module_kobj_tmp) != P_LKRG_SUCCESS); ++ ++/* ++ if (p_kmod_hash(&p_module_list_nr_tmp,&p_module_list_tmp, ++ &p_module_kobj_nr_tmp,&p_module_kobj_tmp) != P_LKRG_SUCCESS) { ++ printk(P_LKRG_PRINT P_LKRG_SIGNATURE ++ " p_kmod_hash() ERROR! - skipping integrity check of modules!\n"); ++// return P_LKRG_GENERAL_ERROR; ++ } ++*/ ++ ++ /* ++ * If we enter this block it means we've found module which is ++ * not registered in module list or sysfs. ++ * Let's find out where we miss module and print which one ++ * ++ * TODO: dump as much info about this module as possible e.g. ++ * core-dump image, ddebug_table information, symbol table, etc. ++ */ ++ if (p_module_list_nr_tmp != p_module_kobj_nr_tmp) { ++ unsigned int p_tmp_cnt,p_tmp_diff = 0; ++ char p_tmp_flag,p_tmp_flag_cnt = 0; ++ ++ p_mod_bad_nr++; ++ if (p_module_list_nr_tmp < p_module_kobj_nr_tmp) { ++ /* ++ * If we found less modules in module list than KOBJs ++ * Most likely module tries to hide, we can make preassumption ++ * system might be hacked. ++ * ++ * NOTE: We should have been able to log this module in the loading ++ * stage by notifier! ++ */ ++ P_KINT_HACK_I(p_hack_check); ++ ++ p_tmp_diff = p_module_kobj_nr_tmp - p_module_list_nr_tmp; ++ ++ for (p_tmp_flag = 0, p_tmp_hash = 0; p_tmp_hash < p_module_kobj_nr_tmp; ++ p_tmp_flag = 0, p_tmp_hash++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_module_list_nr_tmp; p_tmp_cnt++) { ++ /* Is module on both lists? */ ++ if (p_module_kobj_tmp[p_tmp_hash].p_mod == p_module_list_tmp[p_tmp_cnt].p_mod) { ++ p_tmp_flag = 1; ++ break; ++ } ++ } ++ /* Did we find missing module? */ ++ if (!p_tmp_flag) { ++ /* OK we found which module is in KOBJ list but not in module list... */ ++ p_tmp_flag_cnt++; ++ ++ if (!P_CTRL(p_block_modules)) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Hidden[0x%lx] p_module_activity_ptr[0x%lx]\n", ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_activity_ptr); ++ if (p_module_kobj_tmp[p_tmp_hash].p_mod == p_module_activity_ptr) { ++ P_KINT_HACK_D(p_hack_check); ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in module list[%d] than in KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is the same as on-going module activity events (system is stable).\n"); ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_kobj_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ P_KINT_HACK_D(p_hack_check); ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in module list[%d] than in KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ P_KINT_HACK_D(p_hack_check); ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in module list[%d] than in KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! FOUND LESS[%d] MODULES IN MODULE LIST[%d] THAN IN KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ /* Let's dump information about 'hidden' module */ ++ p_print_log(P_LKRG_CRIT, ++ "HIDDEN MODULE:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_CRIT, ++ "!! MOST LIKELY SYSTEM IS HACKED - MODULE WILL BE DUMPED !! **\n"); ++ ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE HACKED! ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_kobj_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ P_KINT_HACK_D(p_hack_check); ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in module list[%d] than in KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ P_KINT_HACK_D(p_hack_check); ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in module list[%d] than in KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! FOUND LESS[%d] MODULES IN MODULE LIST[%d] THAN IN KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ /* Let's dump information about 'hidden' module */ ++ p_print_log(P_LKRG_CRIT, ++ "HIDDEN MODULE:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_CRIT, ++ "!! MOST LIKELY SYSTEM IS HACKED - MODULE WILL BE DUMPED !! **\n"); ++ ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE HACKED! ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! FOUND LESS[%d] MODULES IN MODULE LIST[%d] THAN IN KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_module_kobj_nr_tmp); ++ /* Let's dump information about 'hidden' module */ ++ p_print_log(P_LKRG_CRIT, ++ "HIDDEN MODULE:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_CRIT, ++ "!! MOST LIKELY SYSTEM IS HACKED - MODULE WILL BE DUMPED !! **\n"); ++ ++ // Dynamic module blocking is disabled so this situation shouldn't happen ++ // MOST LIKELY WE ARE HACKED! ++ // TODO: Dump module ++ } ++ } ++ } ++ /* We should never be here... we found more mismatched modules than expected */ ++ if (p_tmp_diff != p_tmp_flag_cnt) { ++ p_print_log(P_LKRG_ERR, ++ "We found more[%d] missing modules than expected[%d]... something went wrong ;(\n", ++ p_tmp_flag_cnt,p_tmp_diff); ++ } ++ } else if (p_module_kobj_nr_tmp < p_module_list_nr_tmp) { ++ /* ++ * This is strange behaviour. Most of the malicious modules don't remove them from KOBJ ++ * Just from module list. If any remove themselves from the KOBJ most likely they also ++ * Removed themselves from the module list as well. I would not make assumption system is ++ * Somehow compromised but for sure something very strange happened! That's why we should ++ * Inform about that! ++ */ ++ ++ p_tmp_diff = p_module_list_nr_tmp - p_module_kobj_nr_tmp; ++ ++ for (p_tmp_flag = 0, p_tmp_hash = 0; p_tmp_hash < p_module_list_nr_tmp; ++ p_tmp_flag = 0, p_tmp_hash++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_module_kobj_nr_tmp; p_tmp_cnt++) { ++ /* Is module on both lists? */ ++ if (p_module_list_tmp[p_tmp_hash].p_mod == p_module_kobj_tmp[p_tmp_cnt].p_mod) { ++ p_tmp_flag = 1; ++ break; ++ } ++ } ++ /* Did we find missing module? */ ++ if (!p_tmp_flag) { ++ /* OK we found which module is in MODULE LIST list but not in KOBJ... */ ++ p_tmp_flag_cnt++; ++ ++ if (!P_CTRL(p_block_modules)) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Hidden[0x%lx] p_module_activity_ptr[0x%lx]\n", ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_activity_ptr); ++ if (p_module_list_tmp[p_tmp_hash].p_mod == p_module_activity_ptr) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is the same as on-going module activity events (system is stable).\n"); ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_list_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in module list but not in KOBJs (system is stable).\n"); ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE NOT HACKED :) ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_list_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in module list but not in KOBJs (system is stable).\n"); ++ ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE NOT HACKED :) ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in KOBJ[%d] than in module list[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in module list but not in KOBJs (system is stable).\n"); ++ ++ // Dynamic module blocking is disabled so this situation shouldn't happen ++ // MOST LIKELY WE ARE NOT HACKED :) ++ // TODO: Dump module ++ } ++ } ++ } ++ /* We should never be here... we found more mismatched modules than expected */ ++ if (p_tmp_diff != p_tmp_flag_cnt) { ++ p_print_log(P_LKRG_ERR, ++ "We found more[%d] missing modules than expected[%d]... something went wrong ;(\n", ++ p_tmp_flag_cnt,p_tmp_diff); ++ } ++ ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "I should never be here!... something went wrong ;( module list[%d] KOBJ[%d]\n", ++ p_module_list_nr_tmp,p_module_kobj_nr_tmp); ++ } ++ } ++ ++ ++ /* ++ * We found as many modules in module list as in sysfs ++ * Let's validate if our database has the same information as we gathered now ++ * ++ */ ++ ++ ++ /* ++ * If we enter this block number of modules in module list and sysfs are the same. ++ * Unfortunately we have not the same number of modules in database module list ++ * than currently in the system! ++ * Let's find out which module we missing and print some information about it. ++ * ++ * TODO: dump as much info about this module as possible e.g. ++ * core-dump image, ddebug_table information, symbol table, etc. ++ */ ++ if (p_module_list_nr_tmp != p_db.p_module_list_nr) { ++ unsigned int p_tmp_cnt,p_tmp_diff = 0; ++ char p_tmp_flag,p_tmp_flag_cnt = 0; ++ ++ p_mod_bad_nr++; ++ if (p_module_list_nr_tmp < p_db.p_module_list_nr) { ++ /* ++ * We "lost" module which we didn't register somehow. ++ * It might happen regardless of notifier informing us on any ++ * module related activities. ++ * ++ * I would not make assumption system is somehow compromised ++ * but we should inform about that. ++ * ++ * It might happen when verification routine wins ++ * the race with module notification routine of acquiring ++ * module mutexes. In that case, notification routine will ++ * wait until this verification context unlocks mutexes. ++ */ ++ ++ p_tmp_diff = p_db.p_module_list_nr - p_module_list_nr_tmp; ++ ++ for (p_tmp_flag = 0, p_tmp_hash = 0; p_tmp_hash < p_db.p_module_list_nr; ++ p_tmp_flag = 0, p_tmp_hash++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_module_list_nr_tmp; p_tmp_cnt++) { ++ /* Is module on both lists? */ ++ if (p_db.p_module_list_array[p_tmp_hash].p_mod == p_module_list_tmp[p_tmp_cnt].p_mod) { ++ p_tmp_flag = 1; ++ break; ++ } ++ } ++ /* Did we find missing module? */ ++ if (!p_tmp_flag) { ++ /* OK we found which module is in DB module list but not in current module list... */ ++ p_tmp_flag_cnt++; ++ ++ // TODO: Module disappeared and we didn't notice it! We shouldn't dump it because ++ // most likely module doesn't exists anymore... ++ // But we can try to poke that page where modules used to be to find out scratches ++ // of information about it (e.g. name? symbols table?) ++ ++ if (!P_CTRL(p_block_modules)) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Lost[0x%lx] p_module_activity_ptr[0x%lx]\n", ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_module_activity_ptr); ++ if (p_db.p_module_list_array[p_tmp_hash].p_mod == p_module_activity_ptr) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is the same as on-going module activity events (system is stable).\n"); ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_db.p_module_list_array[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in DB but not in OS (system is stable).\n"); ++ ++ // Did NOT find it in the system via official API... ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_db.p_module_list_array[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in current system in module list [%d] than in DB module list[%d]\n", ++ p_tmp_diff, ++ p_module_list_nr_tmp, ++ p_db.p_module_list_nr); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_list_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_list_array[p_tmp_hash].p_module_core, ++ p_db.p_module_list_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_list_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in DB but not in OS (system is stable).\n"); ++ ++ // Did NOT find it in the system via official API... ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } ++ } ++ } ++ } ++ /* We should never be here... we found more mismatched modules than expected */ ++ if (p_tmp_diff != p_tmp_flag_cnt) { ++ p_print_log(P_LKRG_ERR, ++ "We found more[%d] missing modules than expected[%d]... something went wrong ;(\n", ++ p_tmp_flag_cnt,p_tmp_diff); ++ } ++ } else if (p_db.p_module_list_nr < p_module_list_nr_tmp) { ++ /* ++ * This is weird situation as well. Notifier should inform us ++ * whenever new module arrives and we rebuild database. ++ * ++ * It might happen when verification routine wins ++ * the race with module notification routine of acquiring ++ * module mutexes. In that case, notification routine will ++ * wait until this verification context unlocks mutexes. ++ * ++ * I would not make assumption system is somehow compromised ++ * but we should inform about that! ++ */ ++ ++ p_tmp_diff = p_module_list_nr_tmp - p_db.p_module_list_nr; ++ ++ for (p_tmp_flag = 0, p_tmp_hash = 0; p_tmp_hash < p_module_list_nr_tmp; ++ p_tmp_flag = 0, p_tmp_hash++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_db.p_module_list_nr; p_tmp_cnt++) { ++ /* Is module on both lists? */ ++ if (p_module_list_tmp[p_tmp_hash].p_mod == p_db.p_module_list_array[p_tmp_cnt].p_mod) { ++ p_tmp_flag = 1; ++ break; ++ } ++ } ++ /* Did we find missing module? */ ++ if (!p_tmp_flag) { ++ /* OK we found which module is in current module list but not in DB module list... */ ++ p_tmp_flag_cnt++; ++ ++ // TODO: Dump module ++ ++ if (!P_CTRL(p_block_modules)) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Extra[0x%lx] p_module_activity_ptr[0x%lx]\n", ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_activity_ptr); ++ if (p_module_list_tmp[p_tmp_hash].p_mod == p_module_activity_ptr) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB module list [%d] than in current module list[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module is the same as on-going module activity events (system is stable).\n"); ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_list_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB module list [%d] than in current module list[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB module list [%d] than in current module list[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! FOUND LESS[%d] MODULES IN DB IN MODULE LIST[%d] THAN IN CURRENT MODULE LIST[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ /* Let's dump information about 'hidden' module */ ++ p_print_log(P_LKRG_CRIT, ++ "EXTRA MODULE:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_CRIT, ++ "!! MOST LIKELY SYSTEM IS HACKED - MODULE WILL BE DUMPED !! **\n"); ++ ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE HACKED! ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_list_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB module list [%d] than in current module list[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB module list [%d] than in current module list[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! FOUND LESS[%d] MODULES IN DB IN MODULE LIST[%d] THAN IN CURRENT MODULE LIST[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ /* Let's dump information about 'hidden' module */ ++ p_print_log(P_LKRG_CRIT, ++ "EXTRA MODULE:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE HACKED! ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! FOUND LESS[%d] MODULES IN DB IN MODULE LIST[%d] THAN IN CURRENT MODULE LIST[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_list_nr, ++ p_module_list_nr_tmp); ++ /* Let's dump information about 'hidden' module */ ++ p_print_log(P_LKRG_CRIT, ++ "EXTRA MODULE:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_list_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_list_tmp[p_tmp_hash].p_module_core, ++ p_module_list_tmp[p_tmp_hash].p_core_text_size, ++ p_module_list_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_CRIT, ++ "!! MOST LIKELY SYSTEM IS HACKED - MODULE WILL BE DUMPED !! **\n"); ++ ++ // Dynamic module blocking is disabled so this situation shouldn't happen ++ // MOST LIKELY WE ARE HACKED! ++ // TODO: Dump module ++ } ++ } ++ } ++ ++ /* We should never be here... we found more mismatched modules than expected */ ++ if (p_tmp_diff != p_tmp_flag_cnt) { ++ p_print_log(P_LKRG_ERR, ++ "We found more[%d] missing modules than expected[%d]... something went wrong ;(\n", ++ p_tmp_flag_cnt,p_tmp_diff); ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "I should never be here!... something went wrong ;( module list[%d] DB module list[%d]\n", ++ p_module_list_nr_tmp,p_db.p_module_list_nr); ++ } ++ } ++ ++ ++ /* ++ * If we enter this block number of modules in module list and sysfs are the same. ++ * Unfortunately we have not the same number of modules in database KOBJ ++ * than currently in the system! ++ * Let's find out which module we missing and print some information about it. ++ * ++ * TODO: dump as much info about this module as possible e.g. ++ * core-dump image, ddebug_table information, symbol table, etc. ++ */ ++ if (p_module_kobj_nr_tmp != p_db.p_module_kobj_nr) { ++ unsigned int p_tmp_cnt,p_tmp_diff = 0; ++ char p_tmp_flag,p_tmp_flag_cnt = 0; ++ ++ p_mod_bad_nr++; ++ if (p_module_kobj_nr_tmp < p_db.p_module_kobj_nr) { ++ /* ++ * This is weird situation as well. Notifier should inform us ++ * whenever new module arrives and we rebuild database. ++ * ++ * It might happen when verification routine wins ++ * the race with module notification routine of acquiring ++ * module mutexes. In that case, notification routine will ++ * wait until this verification context unlocks mutexes. ++ * ++ * I would not make assumption system is somehow compromised ++ * but we should inform about that! ++ */ ++ ++ p_tmp_diff = p_db.p_module_kobj_nr - p_module_kobj_nr_tmp; ++ ++ for (p_tmp_flag = 0, p_tmp_hash = 0; p_tmp_hash < p_db.p_module_kobj_nr; ++ p_tmp_flag = 0, p_tmp_hash++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_module_kobj_nr_tmp; p_tmp_cnt++) { ++ /* Is module on both lists? */ ++ if (p_db.p_module_kobj_array[p_tmp_hash].p_mod == p_module_kobj_tmp[p_tmp_cnt].p_mod) { ++ p_tmp_flag = 1; ++ break; ++ } ++ } ++ /* Did we find missing module? */ ++ if (!p_tmp_flag) { ++ /* OK we found which module is in KOBJ DB but not in the current KOBJ list... */ ++ p_tmp_flag_cnt++; ++ ++ // TODO: Module disappeared and we didn't notice it! We shouldn't dump it because ++ // most likely module doesn't exists anymore... ++ // But we can try to poke that page where modules used to be to find out scratches ++ // of information about it (e.g. name? symbols table?) ++ ++ if (!P_CTRL(p_block_modules)) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Lost[0x%lx] p_module_activity_ptr[0x%lx]\n", ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_module_activity_ptr); ++ if (p_db.p_module_kobj_array[p_tmp_hash].p_mod == p_module_activity_ptr) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is the same as on-going module activity events (system is stable).\n"); ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_db.p_module_kobj_array[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in DB but not in OS (system is stable).\n"); ++ // Did NOT find it in the system via official API... ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_db.p_module_kobj_array[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_INFO, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Lost module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in current system in KOBJ [%d] than in DB KOBJ[%d]\n", ++ p_tmp_diff, ++ p_module_kobj_nr_tmp, ++ p_db.p_module_kobj_nr); ++ p_print_log(P_LKRG_WARN, ++ "Lost module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_db.p_module_kobj_array[p_tmp_hash].p_name, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_mod, ++ (unsigned long)p_db.p_module_kobj_array[p_tmp_hash].p_module_core, ++ p_db.p_module_kobj_array[p_tmp_hash].p_core_text_size, ++ p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in DB but not in OS (system is stable).\n"); ++ // Did NOT find it in the system via official API... ++ // TODO: Dirty dump module - from the memory scratches if possible ++ } ++ } ++ } ++ } ++ } ++ /* We should never be here... we found more mismatched modules than expected */ ++ if (p_tmp_diff != p_tmp_flag_cnt) { ++ p_print_log(P_LKRG_ERR, ++ "We found more[%d] missing modules than expected[%d]... something went wrong ;(\n", ++ p_tmp_flag_cnt,p_tmp_diff); ++ } ++ } else if (p_db.p_module_kobj_nr < p_module_kobj_nr_tmp) { ++ /* ++ * This is weird situation as well. Notifier should inform us ++ * whenever new module arrives and we rebuild database. ++ * ++ * It might happen when verification routine wins ++ * the race with module notification routine of acquiring ++ * module mutexes. In that case, notification routine will ++ * wait until this verification context unlocks mutexes. ++ * ++ * I would not make assumption system is somehow compromised ++ * but we should inform about that! ++ */ ++ ++ p_tmp_diff = p_module_kobj_nr_tmp - p_db.p_module_kobj_nr; ++ ++ for (p_tmp_flag = 0, p_tmp_hash = 0; p_tmp_hash < p_module_kobj_nr_tmp; ++ p_tmp_flag = 0, p_tmp_hash++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_db.p_module_kobj_nr; p_tmp_cnt++) { ++ /* Is module on both lists? */ ++ if (p_module_kobj_tmp[p_tmp_hash].p_mod == p_db.p_module_kobj_array[p_tmp_cnt].p_mod) { ++ p_tmp_flag = 1; ++ break; ++ } ++ } ++ /* Did we find missing module? */ ++ if (!p_tmp_flag) { ++ /* OK we found which module is in the current KOBJ list but not in KOBJ DB... */ ++ p_tmp_flag_cnt++; ++ ++ // TODO: Dump module ++ ++ if (!P_CTRL(p_block_modules)) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ // STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Extra[0x%lx] p_module_activity_ptr[0x%lx]\n", ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_activity_ptr); ++ if (p_module_kobj_tmp[p_tmp_hash].p_mod == p_module_activity_ptr) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module is the same as on-going module activity events (system is stable).\n"); ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_kobj_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_WARN, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in DB KOBJs but not in OS (system is stable).\n"); ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE NOT HACKED :) ++ // TODO: Dump module ++ } ++ } ++ } else { ++ p_tmp_mod = P_SYM(p_find_module(p_module_kobj_tmp[p_tmp_hash].p_name)); ++ if (p_tmp_mod) { ++ if (p_tmp_mod->state != MODULE_STATE_LIVE) { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module is not in the 'LIVE' state but in [%s] state (system is stable).\n", ++ (p_tmp_mod->state == 1) ? "COMING" : ++ (p_tmp_mod->state == 2) ? "GOING AWAY" : ++ (p_tmp_mod->state == 3) ? "COMING" : "UNKNOWN!"); ++ } else { ++ p_print_log(P_LKRG_INFO, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_INFO, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_INFO, ++ "Extra module has 'live' state but 'block_modules' is disabled. Module was correctly " ++ "identified through the official API. Most likely race condition appeared when system " ++ "was rebuilding database (system is stable).\n"); ++ // TODO: Dump module ++ } ++ } else { ++ p_print_log(P_LKRG_WARN, ++ "Found less[%d] modules in DB in KOBJ [%d] than in current KOBJ[%d]\n", ++ p_tmp_diff, ++ p_db.p_module_kobj_nr, ++ p_module_kobj_nr_tmp); ++ p_print_log(P_LKRG_WARN, ++ "Extra module:\nname[%s] module at addr[0x%lx] module core[0x%lx] with size[0x%x] hash[0x%llx]\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_mod, ++ (unsigned long)p_module_kobj_tmp[p_tmp_hash].p_module_core, ++ p_module_kobj_tmp[p_tmp_hash].p_core_text_size, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash); ++ p_print_log(P_LKRG_WARN, ++ "Strange behaviour detected - module was found in DB KOBJs but not in OS (system is stable).\n"); ++ ++ // Did NOT find it in the system via official API... ++ // MOST LIKELY WE ARE NOT HACKED :) ++ // TODO: Dump module ++ } ++ } ++ } ++ } ++ } ++ /* We should never be here... we found more mismatched modules than expected */ ++ if (p_tmp_diff != p_tmp_flag_cnt) { ++ p_print_log(P_LKRG_ERR, ++ "We found more[%d] missing modules than expected[%d]... something went wrong ;(\n", ++ p_tmp_flag_cnt,p_tmp_diff); ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "I should never be here!... something went wrong ;( module list[%d] DB module list[%d]\n", ++ p_module_list_nr_tmp,p_db.p_module_list_nr); ++ } ++ } ++ ++ ++/* if ( (p_module_list_nr_tmp != p_module_kobj_nr_tmp) || (p_module_list_nr_tmp != p_db.p_module_list_nr) ++ || (p_module_kobj_nr_tmp != p_db.p_module_kobj_nr) ) { ++ printk(P_LKRG_PRINT P_LKRG_SIGNATURE ++ "ALERT !!! MODULE NUMBERS ARE INCONSISTENT! IN DB: LIST[%d] KOBJ[%d], found LIST[%d], KOBJ[%d]\n", ++ p_db.p_module_list_nr,p_db.p_module_kobj_nr,p_module_list_nr_tmp,p_module_kobj_nr_tmp); ++ p_hack_check++; ++ ++ // TODO: Find missing module and DUMP as much info as possible about that! ++ ++ } ++*/ ++ ++ ++ p_tmp_hash = p_lkrg_fast_hash((unsigned char *)p_module_list_tmp, ++ (unsigned int)p_module_list_nr_tmp * sizeof(p_module_list_mem)); ++ ++ p_print_log(P_LKRG_INFO,"Hash from 'module list' => [0x%llx]\n",p_tmp_hash); ++ ++ if (p_tmp_hash != p_db.p_module_list_hash) { ++ unsigned int p_tmp_cnt,p_local_hack_check = 0; ++ ++ for (p_tmp = 0; p_tmp < p_db.p_module_list_nr; p_tmp++) { ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_module_list_nr_tmp; p_tmp_cnt++) { ++ if (p_db.p_module_list_array[p_tmp].p_mod == p_module_list_tmp[p_tmp_cnt].p_mod) { ++ if (p_db.p_module_list_array[p_tmp].p_mod_core_text_hash != p_module_list_tmp[p_tmp_cnt].p_mod_core_text_hash) { ++ /* I'm hacked! ;( */ ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! MODULE'S <%s> HASH IS DIFFERENT it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_module_list_tmp[p_tmp_cnt].p_name, ++ p_module_list_tmp[p_tmp_cnt].p_mod_core_text_hash, ++ p_db.p_module_list_array[p_tmp_cnt].p_mod_core_text_hash); ++ P_KINT_IF_ACCEPT(p_db.p_module_list_array[p_tmp_cnt].p_mod_core_text_hash, ++ p_module_list_tmp[p_tmp_cnt].p_mod_core_text_hash, ++ p_hack_check); ++ p_local_hack_check++; ++ } ++ } ++ } ++ } ++/* ++ if (!p_local_hack_check) { ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ if (p_tmp_hash != p_db.p_module_list_hash) { ++ p_local_hack_check = 1; ++ } ++ } ++*/ ++ /* ++ * OK, we know hash will be different if there is inconsistency in the number ++ * of tracked / discovered modules in module list and/or in sysfs (KOBJs) ++ */ ++ if (p_local_hack_check) { ++ if (!p_mod_bad_nr) { ++ ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ P_KINT_HACK_D(p_hack_check); ++ p_print_log(P_LKRG_INFO, ++ "Unhandled on-going module activity events detected. " ++ "Activity changed module list consistency (system is stable).\n"); ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! MODULE LIST HASH IS DIFFERENT !!! - it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.p_module_list_hash); ++ P_KINT_HACK_I(p_hack_check); ++ } ++ } ++ } ++ ++ } ++ ++ p_tmp_hash = p_lkrg_fast_hash((unsigned char *)p_module_kobj_tmp, ++ (unsigned int)p_module_kobj_nr_tmp * sizeof(p_module_kobj_mem)); ++ ++ p_print_log(P_LKRG_INFO,"Hash from 'module kobj(s)' => [0x%llx]\n",p_tmp_hash); ++ ++ if (p_tmp_hash != p_db.p_module_kobj_hash) { ++ ++ /* ++ * OK, we know hash will be different if there is inconsistency in the number ++ * of tracked / discovered modules in module list and/or in sysfs (KOBJs) ++ */ ++ if (!p_mod_bad_nr) { ++ /* Maybe we have sleeping module activity event ? */ ++ if (mutex_is_locked(&p_module_activity)) { ++ p_print_log(P_LKRG_INFO, ++ "Unhandled on-going module activity events detected. " ++ "Activity changed KOBJs consistency (system is stable).\n"); ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! MODULE KOBJ HASH IS DIFFERENT !!! - it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_tmp_hash,p_db.p_module_kobj_hash); ++ P_KINT_HACK_I(p_hack_check); ++ } ++ } ++ ++ for (p_tmp_hash = 0; p_tmp_hash < p_db.p_module_kobj_nr; p_tmp_hash++) { ++ unsigned int p_tmp_cnt; ++ for (p_tmp_cnt = 0; p_tmp_cnt < p_module_kobj_nr_tmp; p_tmp_cnt++) { ++ if (p_db.p_module_kobj_array[p_tmp_hash].p_mod == p_module_kobj_tmp[p_tmp_cnt].p_mod) ++ if (p_db.p_module_kobj_array[p_tmp_hash].p_mod_core_text_hash != p_module_kobj_tmp[p_tmp_cnt].p_mod_core_text_hash) { ++ p_print_log(P_LKRG_CRIT, ++ "[KOBJ] ALERT !!! MODULE'S <%s> HASH IS DIFFERENT it is [0x%llx] and should be [0x%llx] !!!\n", ++ p_module_kobj_tmp[p_tmp_hash].p_name, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash, ++ p_db.p_module_kobj_array[p_tmp_cnt].p_mod_core_text_hash); ++ P_KINT_IF_ACCEPT(p_db.p_module_kobj_array[p_tmp_cnt].p_mod_core_text_hash, ++ p_module_kobj_tmp[p_tmp_hash].p_mod_core_text_hash, ++ p_hack_check); ++ } ++ } ++ } ++ } ++ ++ if (p_hack_check) { ++ p_print_log(P_LKRG_CRIT, ++ "ALERT !!! SYSTEM HAS BEEN COMPROMISED - DETECTED DIFFERENT %u CHECKSUMS !!!\n",p_hack_check); ++ if (P_CTRL(p_kint_enforce == 2)) { ++ // OK, we need to crash the kernel now ++ panic(P_LKRG_SIGNATURE "Kernel Integrity verification failed! Killing the kernel...\n"); ++ } ++ } else if (P_CTRL(p_heartbeat)) { ++ p_print_log(P_LKRG_ALIVE,"System is clean!\n"); ++ } ++ ++ if (p_module_list_tmp) { ++ p_kzfree(p_module_list_tmp); ++ p_module_list_tmp = NULL; ++ } ++ if (p_module_kobj_tmp) { ++ p_kzfree(p_module_kobj_tmp); ++ p_module_kobj_tmp = NULL; ++ } ++ ++ /* God mode off ;) */ ++ spin_unlock_irqrestore(&p_db_lock,p_db_flags); ++// spin_unlock(&p_db_lock); ++ ++p_check_integrity_cancel: ++ ++ p_text_section_unlock(); ++ if (p_tmp_cpus) { ++ p_kzfree(p_tmp_cpus); ++ p_tmp_cpus = NULL; ++ } ++ ++p_check_integrity_tasks: ++ ++ if (!p_ed_enforce_validation_paranoid()) { ++ if (P_CTRL(p_heartbeat) && P_CTRL(p_pint_validate) && ++ (!P_CTRL(p_kint_validate) || (!p_manual && P_CTRL(p_kint_validate) == 1))) { ++ p_print_log(P_LKRG_ALIVE,"Tasks are clean!\n"); ++ } ++ } ++ ++ if (p_manual) ++ p_manual = 0; ++ ++ /* Free the worker struct */ ++ if (p_work) { ++ p_free_offload(p_work); ++ } ++} +diff --git a/security/lkrg/modules/integrity_timer/p_integrity_timer.h b/security/lkrg/modules/integrity_timer/p_integrity_timer.h +new file mode 100644 +index 000000000000..5851de77ebcc +--- /dev/null ++++ b/security/lkrg/modules/integrity_timer/p_integrity_timer.h +@@ -0,0 +1,54 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Integrity timer module ++ * ++ * Notes: ++ * - Periodically check critical system hashes using timer ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_INTEGRITY_TIMER_H ++#define P_LKRG_INTEGRITY_TIMER_H ++ ++#define p_alloc_offload() kmem_cache_alloc(p_offload_cache, GFP_ATOMIC) ++#define p_free_offload(name) kmem_cache_free(p_offload_cache, (void *)(name)) ++ ++#define P_KINT_HACK_D(check) check-- ++#define P_KINT_HACK_I(check) check++ ++ ++#define P_KINT_IF_ACCEPT(old, new, check) \ ++do { \ ++ if (!P_CTRL(p_kint_enforce)) { \ ++ old = new; \ ++ } \ ++ P_KINT_HACK_I(check); \ ++} while(0) ++ ++void p_check_integrity(struct work_struct *p_work); ++void p_integrity_timer(void); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ++void p_offload_work(unsigned long p_timer); ++#else ++void p_offload_work(struct timer_list *p_timer); ++#endif ++ ++// int p_cmp_bytes(char *p_new, char *p_old, unsigned long p_size, p_module_list_mem *p_module); ++ ++int p_offload_cache_init(void); ++void p_offload_cache_delete(void); ++ ++extern struct timer_list p_timer; ++extern unsigned int p_manual; ++extern spinlock_t p_db_lock; ++extern unsigned long p_db_flags; ++extern struct kmem_cache *p_offload_cache; ++ ++#endif +diff --git a/security/lkrg/modules/kmod/p_kmod.c b/security/lkrg/modules/kmod/p_kmod.c +new file mode 100644 +index 000000000000..68129b406b34 +--- /dev/null ++++ b/security/lkrg/modules/kmod/p_kmod.c +@@ -0,0 +1,670 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Kernel's modules module ++ * ++ * Notes: ++ * - Gathers information about loaded kernel modules and tries ++ * to protect them via calculating hashes from their core_text ++ * section. ++ * ++ * Timeline: ++ * - Created: 09.II.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ * Notes - https://github.com/dgoulet/kjackal/blob/master/src/module.c ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++/* Submodule for 'kmod' module */ ++#include "p_kmod_notifier.c" ++ ++ ++int p_kmod_init(void) { ++ ++#if defined(CONFIG_DYNAMIC_DEBUG) ++ P_SYM(p_ddebug_tables) = (struct list_head *)P_SYM(p_kallsyms_lookup_name)("ddebug_tables"); ++ P_SYM(p_ddebug_lock) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("ddebug_lock"); ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) ++ P_SYM(p_ddebug_remove_module_ptr) = (int(*)(const char *))P_SYM(p_kallsyms_lookup_name)("ddebug_remove_module"); ++ #endif ++#endif ++ ++ P_SYM(p_global_modules) = (struct list_head *)P_SYM(p_kallsyms_lookup_name)("modules"); ++ P_SYM(p_module_kset) = (struct kset **)P_SYM(p_kallsyms_lookup_name)("module_kset"); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,12,0) ++ P_SYM(p_module_mutex) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("module_mutex"); ++ P_SYM(p_find_module) = (struct module* (*)(const char *))P_SYM(p_kallsyms_lookup_name)("find_module"); ++#else ++ P_SYM(p_module_mutex) = (struct mutex *)&module_mutex; ++ P_SYM(p_find_module) = (struct module* (*)(const char *))find_module; ++#endif ++ ++ // DEBUG ++ p_debug_log(P_LKRG_DBG, " " ++#if defined(CONFIG_DYNAMIC_DEBUG) ++ "p_ddebug_tables[0x%lx] p_ddebug_lock[0x%lx] " ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) ++ "p_ddebug_remove_module_ptr[0x%lx]" ++ #endif ++#endif ++ "module_mutex[0x%lx] p_global_modules[0x%lx] " ++ "p_module_kset[0x%lx]\n", ++#if defined(CONFIG_DYNAMIC_DEBUG) ++ (unsigned long)P_SYM(p_ddebug_tables), ++ (unsigned long)P_SYM(p_ddebug_lock), ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) ++ (unsigned long)P_SYM(p_ddebug_remove_module_ptr), ++ #endif ++#endif ++ (unsigned long)P_SYM(p_module_mutex), ++ (unsigned long)P_SYM(p_global_modules), ++ (unsigned long)P_SYM(p_module_kset)); ++ ++ if (!P_SYM(p_global_modules)) { ++ p_print_log(P_LKRG_ERR, ++ "KMOD error! Can't initialize global modules variable :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++#if defined(CONFIG_DYNAMIC_DEBUG) ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) ++ if (!P_SYM(p_ddebug_remove_module_ptr)) { ++ p_print_log(P_LKRG_ERR, ++ "KMOD error! Can't find 'ddebug_remove_module' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ #endif ++#endif ++ ++ if (!P_SYM(p_module_kset)) { ++ p_print_log(P_LKRG_ERR, ++ "KMOD error! Can't find 'module_kset' variable :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ if (!P_SYM(p_module_mutex)) { ++ p_print_log(P_LKRG_ERR, ++ "KMOD error! Can't find 'module_mutex' variable :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ if (!P_SYM(p_find_module)) { ++ p_print_log(P_LKRG_ERR, ++ "KMOD error! Can't find 'find_module' function :( Exiting...\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++/* ++ * 'module_lock' must be taken by calling function! ++ */ ++static unsigned int p_count_modules_from_module_list(void) { ++ ++ unsigned int p_cnt = 0; ++ struct module *p_mod; ++ ++ list_for_each_entry(p_mod, P_SYM(p_global_modules), list) { ++ ++/* ++ if (p_mod->state >= MODULE_STATE_UNFORMED || ++ p_mod->state < MODULE_STATE_LIVE) ++ continue; ++*/ ++ if (p_mod->state != MODULE_STATE_LIVE) ++ continue; ++ ++/* ++ if (p_mod == P_SYM(p_find_me)) ++ continue; ++*/ ++ ++ if (!p_module_core(p_mod) || !p_core_text_size(p_mod)) ++ continue; ++ ++ p_cnt++; ++ } ++ ++ return p_cnt; ++} ++ ++/* ++ * Traverse module list ++ * ++ * 'module_lock' must be taken by calling function! ++ */ ++static int p_list_from_module_list(p_module_list_mem *p_arg, char p_flag) { ++ ++ struct module *p_mod; ++ unsigned int p_cnt = 0; ++ ++ list_for_each_entry(p_mod, P_SYM(p_global_modules), list) { ++/* ++ if (p_mod->state >= MODULE_STATE_UNFORMED || ++ p_mod->state < MODULE_STATE_LIVE) ++ continue; ++*/ ++ if (p_mod->state != MODULE_STATE_LIVE) ++ continue; ++ ++/* ++ if (p_mod == P_SYM(p_find_me)) ++ continue; ++*/ ++ ++ if (!p_module_core(p_mod) || !p_core_text_size(p_mod)) ++ continue; ++ ++ /* Pointer to THIS_MODULE per module */ ++ p_arg[p_cnt].p_mod = p_mod; ++ /* Save module name for that pointer */ ++ memcpy(p_arg[p_cnt].p_name,p_mod->name,MODULE_NAME_LEN); ++ p_arg[p_cnt].p_name[MODULE_NAME_LEN] = 0; ++ /* Pointer to the module core */ ++ p_arg[p_cnt].p_module_core = p_module_core(p_mod); ++ /* Size of the module core text section */ ++ p_arg[p_cnt].p_core_text_size = p_core_text_size(p_mod); ++ ++ /* Calculate hash from the module's core text section ;) */ ++ p_arg[p_cnt].p_mod_core_text_hash = p_lkrg_fast_hash((unsigned char *)p_arg[p_cnt].p_module_core, ++ (unsigned int)p_arg[p_cnt].p_core_text_size); ++ ++// STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "[%s | 0x%lx] module_core[0x%lx | 0x%x] hash[0x%llx]\n", ++ p_arg[p_cnt].p_name, ++ (unsigned long)p_arg[p_cnt].p_mod, ++ (unsigned long)p_arg[p_cnt].p_module_core, ++ p_arg[p_cnt].p_core_text_size, ++ p_arg[p_cnt].p_mod_core_text_hash); ++ ++ p_cnt++; ++ } ++ ++ return P_LKRG_SUCCESS; ++} ++ ++/* ++ * 'module_lock' must be taken by calling function! ++ */ ++unsigned int p_count_modules_from_sysfs_kobj(void) { ++ ++ struct module *p_mod = NULL; ++ struct kset *p_kset = *P_SYM(p_module_kset); ++ struct kobject *p_kobj = NULL, *p_tmp_safe = NULL; ++ struct module_kobject *p_mk = NULL; ++ unsigned int p_cnt = 0; ++ ++ kset_get(p_kset); ++ spin_lock(&p_kset->list_lock); ++ list_for_each_entry_safe(p_kobj, p_tmp_safe, &p_kset->list, entry) { ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ if (!P_SYM(p_module_address)((unsigned long)p_kobj)) ++ continue; ++#else ++ if (!__module_address((unsigned long)p_kobj)) ++ continue; ++#endif ++ ++ if (!p_kobj->state_initialized || !p_kobj->state_in_sysfs) { ++ /* Weirdo state :( */ ++ continue; ++ } ++ ++ if (!p_kobj->name) { ++ continue; ++ } ++ ++ p_mk = container_of(p_kobj, struct module_kobject, kobj); ++ if (!p_mk) { ++ continue; ++ } ++ ++ p_mod = p_mk->mod; ++ if (!p_mod) { ++ continue; ++ } ++ ++ if (p_mod->state != MODULE_STATE_LIVE) { ++ continue; ++ } ++ ++/* ++ if (p_mod == P_SYM(p_find_me)) { ++ continue; ++ } ++*/ ++ ++ if (!p_module_core(p_mod) || !p_core_text_size(p_mod)) { ++ continue; ++ } ++ ++ p_cnt++; ++ } ++ spin_unlock(&p_kset->list_lock); ++ kset_put(p_kset); ++ ++ return p_cnt; ++} ++ ++/* ++ * 'module_lock' must be taken by calling function! ++ */ ++static int p_list_from_sysfs_kobj(p_module_kobj_mem *p_arg) { ++ ++ struct module *p_mod = NULL; ++ struct kset *p_kset = *P_SYM(p_module_kset); ++ struct kobject *p_kobj = NULL, *p_tmp_safe = NULL; ++ struct module_kobject *p_mk = NULL; ++ unsigned int p_cnt = 0; ++ ++ kset_get(p_kset); ++ spin_lock(&p_kset->list_lock); ++ list_for_each_entry_safe(p_kobj, p_tmp_safe, &p_kset->list, entry) { ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ if (!P_SYM(p_module_address)((unsigned long)p_kobj)) ++ continue; ++#else ++ if (!__module_address((unsigned long)p_kobj)) ++ continue; ++#endif ++ ++ if (!p_kobj->state_initialized || !p_kobj->state_in_sysfs) { ++ /* Weirdo state :( */ ++ continue; ++ } ++ ++ if (!p_kobj->name) { ++ continue; ++ } ++ ++ p_mk = container_of(p_kobj, struct module_kobject, kobj); ++ if (!p_mk) { ++ continue; ++ } ++ ++ p_mod = p_mk->mod; ++ if (!p_mod) { ++ continue; ++ } ++ ++ if (p_mod->state != MODULE_STATE_LIVE) { ++ continue; ++ } ++ ++/* ++ if (p_mod == P_SYM(p_find_me)) { ++ continue; ++ } ++*/ ++ ++ if (!p_module_core(p_mod) || !p_core_text_size(p_mod)) { ++ continue; ++ } ++ ++ /* Save pointer to the 'module_kobject' structure */ ++ p_arg[p_cnt].p_mk = p_mk; ++ /* Save entire 'kobject' for this module */ ++ memcpy(&p_arg[p_cnt].kobj,p_kobj,sizeof(struct kobject)); ++ /* Exception */ ++ memset(&p_arg[p_cnt].kobj.entry,0,sizeof(struct list_head)); // module GOING_AWAY trobules ;( ++ memset(&p_arg[p_cnt].kobj.kref,0,sizeof(struct kref)); // module GOING_AWAY trobules ;( ++ /* ++ * Commit 38dc717e9715 ("module: delay kobject uevent until after module init call") ++ * delayed the kobject uevent unnecessarily too far to until after sending a ++ * MODULE_STATE_LIVE notification. As the uevent modifies internal state of the KOBJ ++ * itself, this violated the assumption that the KOBJ remains consistent and can be ++ * integrity-checked as soon as the module is LIVE. ++ * To be able to correctly handle this situation, unstable attributes are not verified. ++ */ ++ p_arg[p_cnt].kobj.state_add_uevent_sent = 0; ++ p_arg[p_cnt].kobj.state_remove_uevent_sent = 0; ++ p_arg[p_cnt].kobj.uevent_suppress = 0; ++ ++ /* Pointer to THIS_MODULE per module */ ++ p_arg[p_cnt].p_mod = p_mod; ++ /* Save module name for that pointer */ ++ memcpy(p_arg[p_cnt].p_name,p_mod->name,MODULE_NAME_LEN); ++ p_arg[p_cnt].p_name[MODULE_NAME_LEN] = 0; ++ /* Pointer to the module core */ ++ p_arg[p_cnt].p_module_core = p_module_core(p_mod); ++ /* Size of the module core text section */ ++ p_arg[p_cnt].p_core_text_size = p_core_text_size(p_mod); ++ /* Calculate hash from the module core text section ;) */ ++ p_arg[p_cnt].p_mod_core_text_hash = p_lkrg_fast_hash((unsigned char *)p_arg[p_cnt].p_module_core, ++ (unsigned int)p_arg[p_cnt].p_core_text_size); ++ ++// STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "[%s | 0x%lx] module_core[0x%lx | 0x%x] hash[0x%llx]\n" ++ "module_kobject[0x%lx] KOBJ: name[%s] parent[0x%lx] " ++ "kset[0x%lx] ktype[0x%lx] sd[0x%lx] refcount[0x%x|%d]\n", ++ p_arg[p_cnt].p_name, ++ (unsigned long)p_arg[p_cnt].p_mod, ++ (unsigned long)p_arg[p_cnt].p_module_core, ++ p_arg[p_cnt].p_core_text_size, ++ p_arg[p_cnt].p_mod_core_text_hash, ++ (unsigned long)p_arg[p_cnt].p_mk, ++ p_arg[p_cnt].kobj.name, ++ (unsigned long)p_arg[p_cnt].kobj.parent, ++ (unsigned long)p_arg[p_cnt].kobj.kset, ++ (unsigned long)p_arg[p_cnt].kobj.ktype, ++ (unsigned long)p_arg[p_cnt].kobj.sd, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) ++ p_arg[p_cnt].kobj.kref.refcount.counter, ++ p_arg[p_cnt].kobj.kref.refcount.counter); ++#else ++ p_arg[p_cnt].kobj.kref.refcount.refs.counter, ++ p_arg[p_cnt].kobj.kref.refcount.refs.counter); ++#endif ++ ++ p_cnt++; ++ } ++ spin_unlock(&p_kset->list_lock); ++ kset_put(p_kset); ++ ++ return P_LKRG_SUCCESS; ++} ++ ++/* ++ * 'module_lock' must be taken by calling function! ++ */ ++int p_kmod_hash(unsigned int *p_module_list_cnt_arg, p_module_list_mem **p_mlm_tmp, ++ unsigned int *p_module_kobj_cnt_arg, p_module_kobj_mem **p_mkm_tmp, char p_flag) { ++ ++ int p_ret = P_LKRG_GENERAL_ERROR; ++ unsigned int p_module_list_cnt_arg_old = *p_module_list_cnt_arg; ++ unsigned int p_module_kobj_cnt_arg_old = *p_module_kobj_cnt_arg; ++ ++ /* ++ * Originally this mutex was taken here. Unfortunately some use cases of this function ++ * requires to work under global DB spinlock. Because of that calling function must take ++ * 'module_mutex' ++ */ ++// mutex_lock(&module_mutex); ++ ++ *p_module_list_cnt_arg = p_count_modules_from_module_list(); ++ *p_module_kobj_cnt_arg = p_count_modules_from_sysfs_kobj(); ++ ++// STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "[p_kmod_hash] %s => Found %d modules in module list and %d modules in sysfs.\n", ++ (*p_module_list_cnt_arg != *p_module_kobj_cnt_arg) ? "DOESN'T MATCH" : "MATCH", ++ *p_module_list_cnt_arg,*p_module_kobj_cnt_arg); ++ ++ if ( (NULL == *p_mlm_tmp || NULL == *p_mkm_tmp) && p_flag == 2) { ++ /* ++ * Previous allocation failed :( ++ */ ++ ++ if (*p_mkm_tmp) { ++ p_kzfree(*p_mkm_tmp); ++ *p_mkm_tmp = NULL; ++ } ++ ++ /* First free currently used memory! */ ++ if (*p_mlm_tmp) { ++ p_kzfree(*p_mlm_tmp); ++ *p_mlm_tmp = NULL; ++ } ++ ++ if (p_db.p_jump_label.p_mod_mask) { ++ kfree(p_db.p_jump_label.p_mod_mask); ++ p_db.p_jump_label.p_mod_mask = NULL; ++ } ++ ++ p_flag = 1; ++ } ++ ++ ++ /* ++ * TODO: ++ * Allocation logic should be changed! Should preallocate memory once, and if there ++ * there is not enough space, reallocate it multiplying the size, and so on... At some ++ * point allocation won't happen at all since we will have enough room to always store ++ * all necessary information. ++ */ ++ ++ if (!p_flag || 1 == p_flag) { ++ ++ if ( (p_db.p_jump_label.p_mod_mask = kmalloc(BITS_TO_LONGS(*p_module_list_cnt_arg)*sizeof(unsigned long), ++ GFP_ATOMIC)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH kmalloc() error! Can't allocate memory for module bitmask ;[\n"); ++ goto p_kmod_hash_err; ++ } ++ ++ ++ /* ++ * OK, we now know how many modules we have in the module list ++ * in this kernel, let's allocate data here... ++ * ++ * __GFP_NOFAIL flag will always generate slowpath warn because developers ++ * decided to depreciate this flag ;/ ++ * ++ * We are under time-critical pressure. We are going to use emergency pools ++ * and we can't accept memory allocation fails. Because __GFP_NOFAIL is not ++ * 'safe' flag anymore, we are spinning until allocation succeeds. ++ */ ++ if ( (*p_mlm_tmp = kzalloc(sizeof(p_module_list_mem) * (*p_module_list_cnt_arg+P_MODULE_BUFFER_RACE), ++ GFP_ATOMIC)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH kzalloc() error! Can't allocate memory for module list ;[\n"); ++ goto p_kmod_hash_err; ++ } ++ // STRONG_DEBUG ++ else { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " p_mlm_tmp allocated at: 0x%lx with size: %zd[0x%zx]\n", ++ (unsigned long)*p_mlm_tmp, ++ sizeof(p_module_list_mem) * (*p_module_list_cnt_arg+P_MODULE_BUFFER_RACE), ++ sizeof(p_module_list_mem) * (*p_module_list_cnt_arg+P_MODULE_BUFFER_RACE)); ++ } ++ ++ /* ++ * OK, we now know how many modules we have in the sysfs kset/kobject list ++ * in this kernel, let's allocate data here... ++ * ++ * __GFP_NOFAIL flag will always generate slowpath warn because developers ++ * decided to depreciate this flag ;/ ++ * ++ * We are under time-critical pressure. We are going to use emergency pools ++ * and we can't accept memory allocation fails. Because __GFP_NOFAIL is not ++ * 'safe' flag anymore, we are spinning until allocation succeeds. ++ */ ++ if ( (*p_mkm_tmp = kzalloc(sizeof(p_module_kobj_mem) * (*p_module_kobj_cnt_arg+P_MODULE_BUFFER_RACE), ++ GFP_ATOMIC)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH kzalloc() error! Can't allocate memory for kobj list;[\n"); ++ goto p_kmod_hash_err; ++ } ++ // STRONG_DEBUG ++ else { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " p_mkm_tmp allocated at: 0x%lx with size: %zd[0x%zx]\n", ++ (unsigned long)*p_mkm_tmp, ++ sizeof(p_module_kobj_mem) * (*p_module_kobj_cnt_arg+P_MODULE_BUFFER_RACE), ++ sizeof(p_module_kobj_mem) * (*p_module_kobj_cnt_arg+P_MODULE_BUFFER_RACE)); ++ } ++ ++ } else if (p_flag == 2) { ++ ++ if (p_module_list_cnt_arg_old < *p_module_list_cnt_arg) { ++ ++ /* First free currently used memory! */ ++ if (*p_mlm_tmp) { ++ p_kzfree(*p_mlm_tmp); ++ *p_mlm_tmp = NULL; ++ } ++ ++ if (p_db.p_jump_label.p_mod_mask) { ++ kfree(p_db.p_jump_label.p_mod_mask); ++ p_db.p_jump_label.p_mod_mask = NULL; ++ } ++ ++ if ( (p_db.p_jump_label.p_mod_mask = kmalloc(BITS_TO_LONGS(*p_module_list_cnt_arg)*sizeof(unsigned long), ++ GFP_ATOMIC)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH kmalloc() error! Can't allocate memory for module bitmask ;[\n"); ++ goto p_kmod_hash_err; ++ } ++ ++ /* ++ * OK, we now know how many modules we have in the module list ++ * in this kernel, let's allocate data here... ++ * ++ * __GFP_NOFAIL flag will always generate slowpath warn because developers ++ * decided to depreciate this flag ;/ ++ * ++ * We are under time-critical pressure. We are going to use emergency pools ++ * and we can't accept memory allocation fails. Because __GFP_NOFAIL is not ++ * 'safe' flag anymore, we are spinning until allocation succeeds. ++ */ ++ if ( (*p_mlm_tmp = kzalloc(sizeof(p_module_list_mem) * (*p_module_list_cnt_arg+P_MODULE_BUFFER_RACE), ++ GFP_ATOMIC)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH kzalloc() error! Can't allocate memory for module list ;[\n"); ++ goto p_kmod_hash_err; ++ } ++ // STRONG_DEBUG ++ else { ++// p_print_log(P_LKRG_CRIT, ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " p_mlm_tmp allocated at: 0x%lx with size: %zd[0x%zx]\n", ++ (unsigned long)*p_mlm_tmp, ++ sizeof(p_module_list_mem) * (*p_module_list_cnt_arg+P_MODULE_BUFFER_RACE), ++ sizeof(p_module_list_mem) * (*p_module_list_cnt_arg+P_MODULE_BUFFER_RACE)); ++ } ++ ++ } else { ++// printk(KERN_CRIT "p_module_list_cnt_arg_old[%d] *p_module_list_cnt_arg[%d] *p_mlm_tmp[0x%lx]\n", ++// p_module_list_cnt_arg_old, *p_module_list_cnt_arg, (unsigned long)*p_mlm_tmp); ++ memset(*p_mlm_tmp,0,sizeof(p_module_list_mem) * *p_module_list_cnt_arg); ++ } ++ ++ if (p_module_kobj_cnt_arg_old < *p_module_kobj_cnt_arg) { ++ ++ if (*p_mkm_tmp) { ++ p_kzfree(*p_mkm_tmp); ++ *p_mkm_tmp = NULL; ++ } ++ ++ /* ++ * OK, we now know how many modules we have in the sysfs kset/kobject list ++ * in this kernel, let's allocate data here... ++ * ++ * __GFP_NOFAIL flag will always generate slowpath warn because developers ++ * decided to depreciate this flag ;/ ++ * ++ * We are under time-critical pressure. We are going to use emergency pools ++ * and we can't accept memory allocation fails. Because __GFP_NOFAIL is not ++ * 'safe' flag anymore, we are spinning until allocation succeeds. ++ */ ++ if ( (*p_mkm_tmp = kzalloc(sizeof(p_module_kobj_mem) * (*p_module_kobj_cnt_arg+P_MODULE_BUFFER_RACE), ++ GFP_ATOMIC)) == NULL) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_ret = P_LKRG_GENERAL_ERROR; ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH kzalloc() error! Can't allocate memory for kobj list;[\n"); ++ goto p_kmod_hash_err; ++ } ++ // STRONG_DEBUG ++ else { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " p_mkm_tmp allocated at: 0x%lx with size: %zd[0x%zx]\n", ++ (unsigned long)*p_mkm_tmp, ++ sizeof(p_module_kobj_mem) * (*p_module_kobj_cnt_arg+P_MODULE_BUFFER_RACE), ++ sizeof(p_module_kobj_mem) * (*p_module_kobj_cnt_arg+P_MODULE_BUFFER_RACE)); ++ } ++ ++ } else { ++ memset(*p_mkm_tmp,0,sizeof(p_module_kobj_mem) * *p_module_kobj_cnt_arg); ++ } ++ } else { ++ ++ if (*p_mlm_tmp) { ++ p_kzfree(*p_mlm_tmp); ++ *p_mlm_tmp = NULL; ++ } ++ if (*p_mkm_tmp) { ++ p_kzfree(*p_mkm_tmp); ++ *p_mkm_tmp = NULL; ++ } ++ if (p_db.p_jump_label.p_mod_mask) { ++ kfree(p_db.p_jump_label.p_mod_mask); ++ p_db.p_jump_label.p_mod_mask = NULL; ++ } ++ goto p_kmod_hash_err; ++ } ++ ++ if ( (p_ret = p_list_from_module_list(*p_mlm_tmp, p_flag)) != P_LKRG_SUCCESS) { ++ /* ++ * I should NEVER be here! ++ */ ++ p_print_log(P_LKRG_CRIT, ++ "KMOD HASH error! Can't allocate memory during dumping modules from module list ;[\n"); ++ goto p_kmod_hash_err; ++ } ++ ++ p_list_from_sysfs_kobj(*p_mkm_tmp); ++ ++ p_ret = P_LKRG_SUCCESS; ++ ++p_kmod_hash_err: ++ ++ if (p_ret != P_LKRG_SUCCESS) { ++ if (*p_mlm_tmp) { ++ p_kzfree(*p_mlm_tmp); ++ *p_mlm_tmp = NULL; ++ } ++ if (*p_mkm_tmp) { ++ p_kzfree(*p_mkm_tmp); ++ *p_mkm_tmp = NULL; ++ } ++ if (p_db.p_jump_label.p_mod_mask) { ++ kfree(p_db.p_jump_label.p_mod_mask); ++ p_db.p_jump_label.p_mod_mask = NULL; ++ } ++ } ++ ++ /* ++ * Originally this mutex was taken here. Unfortunately some use cases of this function ++ * requires to work under global DB spinlock. Because of that calling function must take ++ * 'module_mutex' ++ */ ++// mutex_unlock(&module_mutex); ++ ++ return p_ret; ++} +diff --git a/security/lkrg/modules/kmod/p_kmod.h b/security/lkrg/modules/kmod/p_kmod.h +new file mode 100644 +index 000000000000..8eacc4190772 +--- /dev/null ++++ b/security/lkrg/modules/kmod/p_kmod.h +@@ -0,0 +1,71 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Kernel's modules module ++ * ++ * Notes: ++ * - Gathering information about loaded kernel modules and tries ++ * to protect them via calculating hashes from their core_text ++ * section. ++ * ++ * Timeline: ++ * - Created: 09.XI.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_KERNEL_MODULES_H ++#define P_LKRG_KERNEL_MODULES_H ++ ++#define P_GLOBAL_TO_MODULE(x) \ ++({ \ ++ list_entry((void *)*(long *)(*(long*)x),struct module, list); \ ++}) ++ ++#define P_MODULE_BUFFER_RACE 5 ++#define P_NEW_KMOD_STEXT ((char*)0xdeadbabe) ++ ++typedef struct p_module_list_mem { ++ ++ struct module *p_mod; ++ char p_name[MODULE_NAME_LEN+1]; ++ void *p_module_core; ++ unsigned int p_core_text_size; ++ uint64_t p_mod_core_text_hash; ++ ++} p_module_list_mem; ++ ++ ++typedef struct p_module_kobj_mem { ++ ++ struct module_kobject *p_mk; ++ struct kobject kobj; ++ ++ struct module *p_mod; ++ char p_name[MODULE_NAME_LEN+1]; ++ void *p_module_core; ++ unsigned int p_core_text_size; ++ ++ uint64_t p_mod_core_text_hash; ++ ++} p_module_kobj_mem; ++ ++ ++/* Module activity events */ ++extern struct mutex p_module_activity; ++extern struct module *p_module_activity_ptr; ++ ++int p_block_always(void); ++void p_verify_module_live(struct module *p_mod); ++void p_verify_module_going(struct module *p_mod); ++ ++int p_kmod_init(void); ++int p_kmod_hash(unsigned int *p_module_list_cnt_arg, p_module_list_mem **p_mlm_tmp, ++ unsigned int *p_module_kobj_cnt_arg, p_module_kobj_mem **p_mkm_tmp, char p_flag); ++void p_deregister_module_notifier(void); ++void p_register_module_notifier(void); ++ ++#endif +diff --git a/security/lkrg/modules/kmod/p_kmod_notifier.c b/security/lkrg/modules/kmod/p_kmod_notifier.c +new file mode 100644 +index 000000000000..77e374440ee9 +--- /dev/null ++++ b/security/lkrg/modules/kmod/p_kmod_notifier.c +@@ -0,0 +1,346 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Kernel's modules module notifier ++ * ++ * Notes: ++ * - Register notifier function whenever there is any kernel module load/unload activity ++ * ++ * Timeline: ++ * - Created: 16.II.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++ ++static int p_module_event_notifier(struct notifier_block *p_this, unsigned long p_event, void *p_kmod); ++static void p_module_notifier_wrapper(unsigned long p_event, struct module *p_kmod); ++ ++DEFINE_MUTEX(p_module_activity); ++struct module *p_module_activity_ptr; ++ ++static struct notifier_block p_module_block_notifier = { ++ ++ .notifier_call = p_module_event_notifier, ++ .next = NULL, ++ .priority = INT_MAX ++ ++}; ++ ++ ++static void p_module_notifier_wrapper(unsigned long p_event, struct module *p_kmod) { ++ ++ ++ if (P_CTRL(p_block_modules)) { ++ p_kmod->init = p_block_always; ++ } ++ ++ return; ++} ++ ++ ++/* ++ * This function is called when module is load/unloaded ++ * ++ * Kernel supports following states: ++ * ++ * 291 enum module_state { ++ * 292 MODULE_STATE_LIVE, // Normal state. ++ * 293 MODULE_STATE_COMING, // Full formed, running module_init. ++ * 294 MODULE_STATE_GOING, // Going away. ++ * 295 MODULE_STATE_UNFORMED, // Still setting it up. ++ * 296 }; ++ */ ++static int p_module_event_notifier(struct notifier_block *p_this, unsigned long p_event, void *p_kmod) { ++ ++ struct module *p_tmp = p_kmod; ++ ++// STRONG_DEBUG ++#ifdef P_LKRG_DEBUG ++ char *p_mod_strings[] = { "New module is LIVE", ++ "New module is COMING", ++ "Module is GOING AWAY", ++ "New module is UNFORMED yet" }; ++#endif ++ ++// STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "[%ld | %s | %s] Entering function m[0x%lx] hd[0x%lx] s[0x%lx] n[0x%lx]\n", ++ p_event, ++ p_mod_strings[p_event], ++ p_tmp->name, ++ (unsigned long)p_tmp, ++ (unsigned long)p_tmp->holders_dir, ++ (unsigned long)p_tmp->sect_attrs, ++ (unsigned long)p_tmp->notes_attrs); ++ ++ /* Inform validation routine about active module activities... */ ++ mutex_lock(&p_module_activity); ++ p_module_activity_ptr = p_tmp; ++ ++// DEBUG ++ p_debug_log(P_LKRG_DBG, ++ " !! Module activity detected [<%s>] %lu: 0x%lx\n", ++ p_mod_strings[p_event], ++ p_event, ++ (unsigned long)p_kmod); ++ ++ /* ++ * If module going away, we need to rebuild our database anyway ++ * It does not depends on the 'blocking' flag ++ */ ++// if (p_tmp->state == MODULE_STATE_GOING) { <- Linux kernel bug - might not update state value :( ++ if (p_event == MODULE_STATE_GOING) { ++ ++ get_online_cpus(); ++ on_each_cpu(p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ put_online_cpus(); ++ ++ /* ++ * Now recalculate modules information in database! ++ * Every module must be tracked in the internal database ++ * (like hash from .text section) and recalculate global module hashes... ++ * ++ * Because some module is going to be unloaded from the kernel ++ * We must keep in track that information ;) ++ */ ++ p_verify_module_going(p_tmp); ++ ++ p_text_section_lock(); ++ /* ++ * First, synchronize possible database changes with other LKRG components... ++ * We want to be as fast as possible to get this lock! :) ++ * Still there is small race condition window, between taking this lock, and ++ * verification routine doing that. It might be critical from the perspective ++ * of tracking down which modules are currently active in the system and track ++ * down this information in database. ++ * Imagine situation even we have active 'blocking module' functionality and some ++ * random module is going to be unloaded. New event arrives, stack frame for this ++ * function is created and before this operation is finished and lock will be taken ++ * another CPU might already execute validation routine which will take DB lock ++ * before this function will be fast enough to do it after stack frame creation. ++ * ++ * Don't know if there is any solution for that :) ++ * ++ */ ++ ++ /* Let's play... God mode on ;) */ ++// spin_lock_irqsave(&p_db_lock,p_db_flags); ++ spin_lock(&p_db_lock); ++ ++ /* OK, now recalculate hashes again! */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x2) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Update global module list/kobj hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ /* We should be fine now! */ ++ ++ p_print_log(P_LKRG_INFO,"Hash from 'module list' => [0x%llx]\n",p_db.p_module_list_hash); ++ p_print_log(P_LKRG_INFO,"Hash from 'module kobj(s)' => [0x%llx]\n",p_db.p_module_kobj_hash); ++ ++ if (hash_from_kernel_stext() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "[module_notifier:%s] Can't recalculate hash from _STEXT!\n",p_mod_strings[p_event]); ++ } ++ p_print_log(P_LKRG_INFO,"Hash from '_stext' => [0x%llx]\n",p_db.kernel_stext.p_hash); ++ ++ goto p_module_event_notifier_unlock_out; ++ } ++ ++ if (P_CTRL(p_block_modules) && p_tmp != P_SYM(p_find_me)) { ++// if (p_tmp->state == MODULE_STATE_COMING) { <- Linux kernel bug - might not update state value :( ++ if (p_event == MODULE_STATE_COMING) { ++ /* We are not going to modify DB */ ++ p_module_notifier_wrapper(p_event,p_tmp); ++ goto p_module_event_notifier_activity_out; ++ } ++ } else { ++// if (p_tmp->state == MODULE_STATE_LIVE) { <- Linux kernel bug - might not update state value :( ++ if (p_event == MODULE_STATE_LIVE) { ++ ++ get_online_cpus(); ++ on_each_cpu(p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true); ++ p_db.p_CPU_metadata_hashes = hash_from_CPU_data(p_db.p_CPU_metadata_array); ++ put_online_cpus(); ++ ++ /* ++ * Now recalculate modules information in database! Since blocking module is disabled ++ * every new module must be add to the internal database, hash from .text section calculated ++ * and recalculate global module hashes... ++ */ ++ p_verify_module_live(p_tmp); ++ ++ p_text_section_lock(); ++ /* ++ * First, synchronize possible database changes with other LKRG components... ++ * We want to be as fast as possible to get this lock! :) ++ * Still there is small race condition window, between taking this lock, and ++ * verification routine doing that. It might be critical from the perspective ++ * of tracking down which modules are currently active in the system and track ++ * down this information in database. ++ * Imagine situation even we have active 'blocking module' functionality and some ++ * random module is going to be unloaded. New event arrives, stack frame for this ++ * function is created and before this operation is finished and lock will be taken ++ * another CPU might already execute validation routine which will take DB lock ++ * before this function will be fast enough to do it after stack frame creation. ++ * ++ * Don't know if there is any solution for that :) ++ * ++ */ ++// spin_lock_irqsave(&p_db_lock,p_db_flags); ++ spin_lock(&p_db_lock); ++ ++ /* OK, now recalculate hashes again! */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x2) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Update global module list/kobj hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ /* We should be fine now! */ ++ ++ p_print_log(P_LKRG_INFO,"Hash from 'module list' => [0x%llx]\n",p_db.p_module_list_hash); ++ p_print_log(P_LKRG_INFO,"Hash from 'module kobj(s)' => [0x%llx]\n",p_db.p_module_kobj_hash); ++ ++ if (hash_from_kernel_stext() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "[module_notifier:%s] Can't recalculate hash from _STEXT!\n",p_mod_strings[p_event]); ++ } ++ p_print_log(P_LKRG_INFO,"Hash from '_stext' => [0x%llx]\n",p_db.kernel_stext.p_hash); ++ ++ goto p_module_event_notifier_unlock_out; ++ } ++ } ++ ++ goto p_module_event_notifier_activity_out; ++ ++p_module_event_notifier_unlock_out: ++ ++ /* God mode off ;) */ ++// spin_unlock_irqrestore(&p_db_lock,p_db_flags); ++ spin_unlock(&p_db_lock); ++ p_text_section_unlock(); ++ ++p_module_event_notifier_activity_out: ++ ++ /* Inform validation routine about active module activities... */ ++ mutex_unlock(&p_module_activity); ++ ++ return NOTIFY_DONE; ++} ++ ++int p_block_always(void) { ++ ++ p_print_log(P_LKRG_CRIT, ++ "!! Module insertion blocked (from always!) !!\n"); ++ ++ return P_LKRG_GENERAL_ERROR; ++ ++} ++ ++void p_verify_module_live(struct module *p_mod) { ++ ++ if (p_ovl_create_or_link_kretprobe_state) { ++ /* We do not need to do anything for now */ ++ return; ++ } ++ ++ if (!strcmp(p_mod->name,"overlay") || !strcmp(p_mod->name,"overlay2")) { ++ unsigned int p_tmp_val; ++ ++ /* ++ * OK, we must try to hook 'ovl_create_or_link' function. ++ * Otherwise LKRG will be incompatible with docker. ++ * ++ * First, we would need to synchronize with LKRG integrity feature. ++ */ ++ p_tmp_val = P_CTRL(p_kint_validate); ++ p_lkrg_open_rw(); ++ P_CTRL(p_kint_validate) = 0; ++ p_lkrg_close_rw(); ++ /* Try to install the hook */ ++ if (p_install_ovl_create_or_link_hook(1)) { ++ p_print_log(P_LKRG_ERR, ++ "OverlayFS is being loaded but LKRG can't hook 'ovl_create_or_link' function. " ++ "It is very likely that LKRG will produce False Positives :(\n"); ++ p_print_log(P_LKRG_ERR,"It is recomended to reload LKRG module!\n"); ++ } ++ /* Done */ ++ p_lkrg_open_rw(); ++ P_CTRL(p_kint_validate) = p_tmp_val; ++ p_lkrg_close_rw(); ++ } ++} ++ ++void p_verify_module_going(struct module *p_mod) { ++ ++ if (!p_ovl_create_or_link_kretprobe_state) { ++ /* We do not need to do anything for now */ ++ return; ++ } ++ ++ if (!strcmp(p_mod->name,"overlay") || !strcmp(p_mod->name,"overlay2")) { ++ unsigned int p_tmp_val; ++ ++ /* ++ * OK, we must try to remove our hook @ 'ovl_create_or_link' function. ++ * ++ * First, we would need to synchronize with LKRG integrity feature. ++ */ ++ p_tmp_val = P_CTRL(p_kint_validate); ++ p_lkrg_open_rw(); ++ P_CTRL(p_kint_validate) = 0; ++ p_lkrg_close_rw(); ++ /* Try to uninstall the hook */ ++ p_uninstall_ovl_create_or_link_hook(); ++ p_reinit_ovl_create_or_link_kretprobe(); ++ /* Done */ ++ p_lkrg_open_rw(); ++ P_CTRL(p_kint_validate) = p_tmp_val; ++ p_lkrg_close_rw(); ++ } ++} ++ ++void p_register_module_notifier(void) { ++ ++// STRONG_DEBUG ++ p_debug_log(P_LKRG_STRONG_DBG, ++ " Registering module's noitifier routine\n"); ++ ++ register_module_notifier(&p_module_block_notifier); ++ ++} ++ ++void p_deregister_module_notifier(void) { ++ ++ unregister_module_notifier(&p_module_block_notifier); ++ ++ if (p_db.p_module_list_array) { ++ p_kzfree(p_db.p_module_list_array); ++ p_db.p_module_list_array = NULL; ++ } ++ if (p_db.p_module_kobj_array) { ++ p_kzfree(p_db.p_module_kobj_array); ++ p_db.p_module_kobj_array = NULL; ++ } ++ if (p_db.p_jump_label.p_mod_mask) { ++ kfree(p_db.p_jump_label.p_mod_mask); ++ p_db.p_jump_label.p_mod_mask = NULL; ++ } ++ ++// printk("Goodbye ;)\n"); ++} +diff --git a/security/lkrg/modules/ksyms/p_resolve_ksym.c b/security/lkrg/modules/ksyms/p_resolve_ksym.c +new file mode 100644 +index 000000000000..077bdf9f1bfe +--- /dev/null ++++ b/security/lkrg/modules/ksyms/p_resolve_ksym.c +@@ -0,0 +1,135 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Resolve kernel symbols ++ * ++ * Notes: ++ * - We try to 'resolve' old-school Linux kernel function for ++ * resolving symbols on run-time ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++//unsigned long (*p_kallsyms_lookup_name)(const char *name) = 0; ++ ++ ++static int p_find_isra_name(void *p_isra_argg, const char *name, ++ struct module *mod, unsigned long addr) { ++ ++ struct p_isra_argument *p_isra_arg = (struct p_isra_argument *)p_isra_argg; ++ char p_buf[0x100]; ++ char p_buf2[0x100]; ++ ++ snprintf(p_buf, sizeof(p_buf), "%s.isra.", p_isra_arg->p_name); ++ snprintf(p_buf2, sizeof(p_buf2), "%s.constprop.", p_isra_arg->p_name); ++ if (strncmp(p_buf, name, strlen(p_buf)) == 0) { ++ p_print_log(P_LKRG_WARN, "Found ISRA version of function <%s>\n", name); ++ if ( (p_isra_arg->p_isra_name = kzalloc(strlen(name)+1, GFP_KERNEL)) == NULL) { ++ p_print_log(P_LKRG_ERR, "[p_find_isra_name] kzalloc() failed!\n"); ++ return 0; ++ } ++ memcpy(p_isra_arg->p_isra_name, name, strlen(name)); ++ return addr; ++ } else if (strncmp(p_buf2, name, strlen(p_buf2)) == 0) { ++ p_print_log(P_LKRG_WARN, "Found CONSTPROP version of function <%s>\n", name); ++ if ( (p_isra_arg->p_isra_name = kzalloc(strlen(name)+1, GFP_KERNEL)) == NULL) { ++ p_print_log(P_LKRG_ERR, "[p_find_isra_name] kzalloc() failed!\n"); ++ return 0; ++ } ++ memcpy(p_isra_arg->p_isra_name, name, strlen(name)); ++ return addr; ++ } ++ ++ return 0; ++} ++ ++int p_try_isra_name(struct p_isra_argument *p_isra_arg) { ++ ++ return P_SYM(p_kallsyms_on_each_symbol)(p_find_isra_name, p_isra_arg); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) ++ ++static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) { ++ return 0; ++} ++ ++#else ++ ++static int p_lookup_syms_hack(void *unused, const char *name, ++ struct module *mod, unsigned long addr) { ++ ++ if (strcmp("kallsyms_lookup_name", name) == 0) { ++ P_SYM(p_kallsyms_lookup_name) = (unsigned long (*)(const char*)) (addr); ++ return addr; ++ } ++ ++ return 0; ++} ++ ++#endif ++ ++long get_kallsyms_address(void) { ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) ++ struct kprobe p_kprobe; ++#endif ++ int p_ret; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) ++ ++ /* ++ * Linux kernel 5.7+ no longer exports the kallsyms_lookup_name symbol for ++ * use from modules. We reuse the workaround originally introduced in the ++ * LTTng module to access that symbol anyway. ++ */ ++ memset(&p_kprobe, 0, sizeof(p_kprobe)); ++ p_kprobe.pre_handler = p_tmp_kprobe_handler; ++ p_kprobe.symbol_name = "kallsyms_lookup_name"; ++ if ( (p_ret = register_kprobe(&p_kprobe)) < 0) { ++ p_print_log(P_LKRG_ERR, ++ "[get_kallsyms_address] register_kprobe error [%d] :(\n", p_ret); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ P_SYM(p_kallsyms_lookup_name) = ++ (unsigned long (*)(const char*))((unsigned long)p_kprobe.addr); ++ ++#ifdef CONFIG_ARM ++#ifdef CONFIG_THUMB2_KERNEL ++ if (P_SYM(p_kallsyms_lookup_name)) ++ P_SYM(p_kallsyms_lookup_name) |= 1; /* set bit 0 in address for thumb mode */ ++#endif ++#endif ++ ++ unregister_kprobe(&p_kprobe); ++ P_SYM(p_kallsyms_on_each_symbol) = (int (*)(int (*)(void *, const char *, struct module *, ++ unsigned long), void *)) ++ P_SYM(p_kallsyms_lookup_name)("kallsyms_on_each_symbol"); ++ ++#else ++ ++ if ( (p_ret = kallsyms_on_each_symbol(p_lookup_syms_hack,NULL)) == 0) { ++ p_debug_log(P_LKRG_DBG, ++ "kallsyms_on_each_symbol error :(\n"); ++ return P_LKRG_GENERAL_ERROR; ++ } ++ ++ p_print_log(P_LKRG_INFO, ++ "kallsyms_on_each_symbol() returned => 0x%x [0x%lx]\n", ++ p_ret, ++ (unsigned long)P_SYM(p_kallsyms_lookup_name)); ++ ++ P_SYM(p_kallsyms_on_each_symbol) = kallsyms_on_each_symbol; ++ ++#endif ++ ++ return P_LKRG_SUCCESS; ++} +diff --git a/security/lkrg/modules/ksyms/p_resolve_ksym.h b/security/lkrg/modules/ksyms/p_resolve_ksym.h +new file mode 100644 +index 000000000000..7c0ede580231 +--- /dev/null ++++ b/security/lkrg/modules/ksyms/p_resolve_ksym.h +@@ -0,0 +1,32 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Resolve kernel symbols ++ * ++ * Notes: ++ * - We try to 'resolve' old-school Linux kernel function for ++ * resolving symbols on run-time ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_RESOLVE_KSYM_H ++#define P_LKRG_RESOLVE_KSYM_H ++ ++struct p_isra_argument { ++ ++ const char *p_name; ++ char *p_isra_name; ++ ++}; ++ ++int p_try_isra_name(struct p_isra_argument *p_isra_arg); ++long get_kallsyms_address(void); ++ ++#endif +diff --git a/security/lkrg/modules/notifiers/p_notifiers.c b/security/lkrg/modules/notifiers/p_notifiers.c +new file mode 100644 +index 000000000000..930451e46a91 +--- /dev/null ++++ b/security/lkrg/modules/notifiers/p_notifiers.c +@@ -0,0 +1,266 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Notifiers module ++ * ++ * Notes: ++ * - Register multiple notifiers routines for integrity checking ++ * - Unfortunately, since Linux 4.10 there isn't idle notifier anymore :( ++ * Integrity check fired on idle state won't work in newer kernels. ++ * More information can be found here: ++ * => https://patchwork.kernel.org/patch/9435797/ ++ * ++ * Timeline: ++ * - Created: 30.X.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) && defined(_ASM_X86_IDLE_H) ++static int p_idle_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#endif ++#ifdef CONFIG_CPU_FREQ ++static int p_freq_transition_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#endif ++static int p_cpu_pm_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++static int p_netdevice_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++static int p_netevent_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#if IS_ENABLED(CONFIG_IPV6) ++static int p_inet6addr_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#endif ++static int p_inetaddr_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++static int p_taskfree_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++static int p_profile_event_exit_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++static int p_profile_event_munmap_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#if defined(CONFIG_USB) ++static int p_usb_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#endif ++#if defined(CONFIG_ACPI) ++static int p_acpi_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data); ++#endif ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) && defined(_ASM_X86_IDLE_H) ++static struct notifier_block p_idle_notifier_nb = { ++ .notifier_call = p_idle_notifier, ++}; ++#endif ++ ++#ifdef CONFIG_CPU_FREQ ++static struct notifier_block p_freq_transition_nb = { ++ .notifier_call = p_freq_transition_notifier, ++}; ++#endif ++ ++static struct notifier_block p_cpu_pm_notifier_nb = { ++ .notifier_call = p_cpu_pm_notifier, ++}; ++ ++static struct notifier_block p_netdevice_notifier_nb = { ++ .notifier_call = p_netdevice_notifier, ++}; ++ ++static struct notifier_block p_netevent_notifier_nb = { ++ .notifier_call = p_netevent_notifier, ++}; ++ ++#if IS_ENABLED(CONFIG_IPV6) ++static struct notifier_block p_inet6addr_notifier_nb = { ++ .notifier_call = p_inet6addr_notifier, ++}; ++#endif ++ ++static struct notifier_block p_inetaddr_notifier_nb = { ++ .notifier_call = p_inetaddr_notifier, ++}; ++ ++static struct notifier_block p_taskfree_notifier_nb = { ++ .notifier_call = p_taskfree_notifier, ++}; ++ ++static struct notifier_block p_profile_event_exit_notifier_nb = { ++ .notifier_call = p_profile_event_exit_notifier, ++}; ++ ++static struct notifier_block p_profile_event_munmap_notifier_nb = { ++ .notifier_call = p_profile_event_munmap_notifier, ++}; ++ ++#if defined(CONFIG_USB) ++static struct notifier_block p_usb_notifier_nb = { ++ .notifier_call = p_usb_notifier, ++}; ++#endif ++ ++#if defined(CONFIG_ACPI) ++static struct notifier_block p_acpi_notifier_nb = { ++ .notifier_call = p_acpi_notifier, ++}; ++#endif ++ ++ ++void p_register_notifiers(void) { ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) && defined(_ASM_X86_IDLE_H) ++ idle_notifier_register(&p_idle_notifier_nb); ++#endif ++#ifdef CONFIG_CPU_FREQ ++ cpufreq_register_notifier(&p_freq_transition_nb, CPUFREQ_TRANSITION_NOTIFIER); ++#endif ++ cpu_pm_register_notifier(&p_cpu_pm_notifier_nb); ++ register_netdevice_notifier(&p_netdevice_notifier_nb); ++ register_netevent_notifier(&p_netevent_notifier_nb); ++#if IS_ENABLED(CONFIG_IPV6) ++ register_inet6addr_notifier(&p_inet6addr_notifier_nb); ++#endif ++ register_inetaddr_notifier(&p_inetaddr_notifier_nb); ++ task_handoff_register(&p_taskfree_notifier_nb); ++ profile_event_register(PROFILE_TASK_EXIT, &p_profile_event_exit_notifier_nb); ++ profile_event_register(PROFILE_MUNMAP, &p_profile_event_munmap_notifier_nb); ++#if defined(CONFIG_USB) ++ usb_register_notify(&p_usb_notifier_nb); ++#endif ++#if defined(CONFIG_ACPI) ++ register_acpi_notifier(&p_acpi_notifier_nb); ++#endif ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) && defined(_ASM_X86_IDLE_H) ++static int p_idle_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 0.005% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_M_SS_MORE_OFTEN_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_CPU_FREQ ++static int p_freq_transition_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 10% */ ++// P_TRY_OFFLOAD_NOTIFIER(P_RARE_RATE, " Offloading integrity check\n"); ++ /* 1%% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_OFTEN_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++#endif ++ ++static int p_cpu_pm_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 10% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_RARE_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++static int p_netdevice_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 1% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_OFTEN_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++static int p_netevent_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 5% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_LESS_RARE_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++#if IS_ENABLED(CONFIG_IPV6) ++static int p_inet6addr_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 100% */ ++ P_TRY_OFFLOAD_NOTIFIER_ALWAYS(" Offloading integrity check\n"); ++ ++ return 0; ++} ++#endif ++ ++static int p_inetaddr_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 100% */ ++ P_TRY_OFFLOAD_NOTIFIER_ALWAYS(" Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++static int p_taskfree_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 0.01% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_SS_MORE_OFTEN_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++static int p_profile_event_exit_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 0.01% */ ++ P_TRY_OFFLOAD_NOTIFIER(P_SS_MORE_OFTEN_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++static int p_profile_event_munmap_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 0.005%*/ ++ P_TRY_OFFLOAD_NOTIFIER(P_M_SS_MORE_OFTEN_RATE, " Offloading integrity check\n"); ++ ++ return 0; ++} ++ ++#if defined(CONFIG_USB) ++static int p_usb_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 100% */ ++ P_TRY_OFFLOAD_NOTIFIER_ALWAYS(" Offloading integrity check\n"); ++ ++ return 0; ++} ++#endif ++ ++#if defined(CONFIG_ACPI) ++static int p_acpi_notifier(struct notifier_block *p_nb, unsigned long p_val, void *p_data) { ++ ++ /* 100% */ ++ P_TRY_OFFLOAD_NOTIFIER_ALWAYS(" Offloading integrity check\n"); ++ ++ return 0; ++} ++#endif ++ ++ ++void p_deregister_notifiers(void) { ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) && defined(_ASM_X86_IDLE_H) ++ idle_notifier_unregister(&p_idle_notifier_nb); ++#endif ++#ifdef CONFIG_CPU_FREQ ++ cpufreq_unregister_notifier(&p_freq_transition_nb, CPUFREQ_TRANSITION_NOTIFIER); ++#endif ++ cpu_pm_unregister_notifier(&p_cpu_pm_notifier_nb); ++ unregister_netdevice_notifier(&p_netdevice_notifier_nb); ++ unregister_netevent_notifier(&p_netevent_notifier_nb); ++#if IS_ENABLED(CONFIG_IPV6) ++ unregister_inet6addr_notifier(&p_inet6addr_notifier_nb); ++#endif ++ unregister_inetaddr_notifier(&p_inetaddr_notifier_nb); ++ task_handoff_unregister(&p_taskfree_notifier_nb); ++ profile_event_unregister(PROFILE_TASK_EXIT, &p_profile_event_exit_notifier_nb); ++ profile_event_unregister(PROFILE_MUNMAP, &p_profile_event_munmap_notifier_nb); ++#if defined(CONFIG_USB) ++ usb_unregister_notify(&p_usb_notifier_nb); ++#endif ++#if defined(CONFIG_ACPI) ++ unregister_acpi_notifier(&p_acpi_notifier_nb); ++#endif ++} +diff --git a/security/lkrg/modules/notifiers/p_notifiers.h b/security/lkrg/modules/notifiers/p_notifiers.h +new file mode 100644 +index 000000000000..623dc40e0937 +--- /dev/null ++++ b/security/lkrg/modules/notifiers/p_notifiers.h +@@ -0,0 +1,74 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Notifiers module ++ * ++ * Notes: ++ * - Register multiple notifiers routines for integrity checking ++ * ++ * Timeline: ++ * - Created: 30.X.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_NOTIFIERS_MODULE_H ++#define P_LKRG_NOTIFIERS_MODULE_H ++ ++/* MAX unsigned int 4294967295 */ ++//#define P_OFTEN_RATE 5000000 ++//#define P_SUPER_OFTEN_RATE 250000 ++//#define P_RARE_RATE 80000000 ++//#define P_SUPER_RARE_RATE 3000000000 ++ ++#define P_SUPER_RARE_RATE 2147483647 /* 50% */ ++#define P_RARE_RATE 429496729 /* 10% */ ++#define P_LESS_RARE_RATE 214748364 /* 5% */ ++#define P_OFTEN_RATE 42949672 /* 1% */ ++#define P_MORE_OFTEN_RATE 21474836 /* 0.5% */ ++#define P_M_MORE_OFTEN_RATE 4294967 /* 0.1% */ ++#define P_S_MORE_OFTEN_RATE 2147483 /* 0.05% */ ++#define P_SS_MORE_OFTEN_RATE 429496 /* 0.01% */ ++#define P_M_SS_MORE_OFTEN_RATE 21474 /* 0.005% */ ++#define P_S_SS_MORE_OFTEN_RATE 42949 /* 0.001% */ ++ ++#define P_CHECK_RANDOM(x) ({ (get_random_int() < x) ? 1 : 0; }) ++ ++#ifdef P_LKRG_DEBUG ++#define P_TRY_OFFLOAD_NOTIFIER(p_arg1, p_arg2) \ ++do { \ ++ if (P_CHECK_RANDOM(p_arg1)) { \ ++ p_print_log(P_LKRG_DBG, "%s", p_arg2); \ ++ p_offload_work(0); \ ++ } \ ++} while(0) ++#else ++#define P_TRY_OFFLOAD_NOTIFIER(p_arg1, p_arg2) \ ++do { \ ++ if (P_CHECK_RANDOM(p_arg1)) { \ ++ p_offload_work(0); \ ++ } \ ++} while(0) ++#endif ++ ++#ifdef P_LKRG_DEBUG ++#define P_TRY_OFFLOAD_NOTIFIER_ALWAYS(p_arg1) \ ++do { \ ++ p_print_log(P_LKRG_DBG, "%s", p_arg1); \ ++ p_offload_work(0); \ ++} while(0) ++#else ++#define P_TRY_OFFLOAD_NOTIFIER_ALWAYS(p_arg1) \ ++do { \ ++ p_offload_work(0); \ ++} while(0) ++#endif ++ ++void p_register_notifiers(void); ++void p_deregister_notifiers(void); ++ ++ ++#endif +diff --git a/security/lkrg/modules/print_log/p_lkrg_debug_log.c b/security/lkrg/modules/print_log/p_lkrg_debug_log.c +new file mode 100644 +index 000000000000..9df3aad6b66f +--- /dev/null ++++ b/security/lkrg/modules/print_log/p_lkrg_debug_log.c +@@ -0,0 +1,191 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Debug module ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 14.V.2020 ++ * ++ * Author: ++ * - Mariusz Zaborski (https://oshogbo.vexillium.org/) ++ * ++ */ ++ ++#include "../../p_lkrg_main.h" ++#include "../database/p_database.h" ++ ++#define P_LKRG_DEBUG_RULE(fname) { (uintptr_t)fname, #fname } ++#define P_LKRG_DEBUG_RULE_KPROBE(fname) \ ++ P_LKRG_DEBUG_RULE(fname##_entry), \ ++ P_LKRG_DEBUG_RULE(fname##_ret) ++ ++void __cyg_profile_func_enter(void *this_fn, void *call_site) ++__attribute__((no_instrument_function)); ++void __cyg_profile_func_exit(void *this_fn, void *call_site) ++__attribute__((no_instrument_function)); ++ ++#ifdef P_LKRG_DEBUG_BUILD ++static struct p_addr_name { ++ uintptr_t addr; ++ const char *name; ++} p_addr_name_array[] = { ++ P_LKRG_DEBUG_RULE(p_rb_add_ed_pid), ++ P_LKRG_DEBUG_RULE(p_rb_del_ed_pid), ++ P_LKRG_DEBUG_RULE(p_init_rb_ed_pids), ++ P_LKRG_DEBUG_RULE(p_delete_rb_ed_pids), ++ P_LKRG_DEBUG_RULE(p_dump_task_f), ++ P_LKRG_DEBUG_RULE(p_remove_task_pid_f), ++ P_LKRG_DEBUG_RULE(p_ed_enforce_validation), ++ P_LKRG_DEBUG_RULE(p_ed_enforce_validation_paranoid), ++ P_LKRG_DEBUG_RULE(p_exploit_detection_init), ++ P_LKRG_DEBUG_RULE(p_exploit_detection_exit), ++ P_LKRG_DEBUG_RULE(p_install_hook), ++ P_LKRG_DEBUG_RULE(p_uninstall_hook), ++ P_LKRG_DEBUG_RULE(p_kmod_init), ++ P_LKRG_DEBUG_RULE(p_kmod_hash), ++ P_LKRG_DEBUG_RULE(p_offload_cache_init), ++ P_LKRG_DEBUG_RULE(p_offload_cache_delete), ++ P_LKRG_DEBUG_RULE(p_integrity_timer), ++ P_LKRG_DEBUG_RULE(p_offload_work), ++ P_LKRG_DEBUG_RULE(p_check_integrity), ++ P_LKRG_DEBUG_RULE(p_register_comm_channel), ++ P_LKRG_DEBUG_RULE(p_deregister_comm_channel), ++ P_LKRG_DEBUG_RULE(p_get_cpus), ++ P_LKRG_DEBUG_RULE(p_cmp_cpus), ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++ P_LKRG_DEBUG_RULE(p_cpu_callback), ++#endif ++ P_LKRG_DEBUG_RULE(p_cpu_online_action), ++ P_LKRG_DEBUG_RULE(p_cpu_dead_action), ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) ++ P_LKRG_DEBUG_RULE(p_install_switch_idt_hook), ++ P_LKRG_DEBUG_RULE(p_uninstall_switch_idt_hook), ++#endif ++ P_LKRG_DEBUG_RULE(p_register_arch_metadata), ++ P_LKRG_DEBUG_RULE(p_unregister_arch_metadata), ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0) ++ P_LKRG_DEBUG_RULE(p_install_arch_jump_label_transform_hook), ++ P_LKRG_DEBUG_RULE(p_uninstall_arch_jump_label_transform_hook), ++ P_LKRG_DEBUG_RULE(p_install_arch_jump_label_transform_apply_hook), ++ P_LKRG_DEBUG_RULE(p_uninstall_arch_jump_label_transform_apply_hook), ++#endif ++ P_LKRG_DEBUG_RULE(hash_from_ex_table), ++ P_LKRG_DEBUG_RULE(hash_from_kernel_stext), ++ P_LKRG_DEBUG_RULE(hash_from_kernel_rodata), ++ P_LKRG_DEBUG_RULE(hash_from_iommu_table), ++ P_LKRG_DEBUG_RULE(hash_from_CPU_data), ++ P_LKRG_DEBUG_RULE(p_create_database), ++ P_LKRG_DEBUG_RULE(p_register_notifiers), ++ P_LKRG_DEBUG_RULE(p_deregister_notifiers), ++ P_LKRG_DEBUG_RULE(p_hide_itself), ++ ++#ifdef P_LKRG_UNHIDE ++ P_LKRG_DEBUG_RULE(p_unhide_itself), ++#endif ++ ++ P_LKRG_DEBUG_RULE(get_kallsyms_address), ++ ++#ifdef CONFIG_X86 ++ P_LKRG_DEBUG_RULE(p_read_msr), ++ P_LKRG_DEBUG_RULE(p_dump_x86_metadata), ++#endif ++ ++#if defined(CONFIG_ARM) ++ P_LKRG_DEBUG_RULE(p_dump_arm_metadata), ++#endif ++ ++#if defined(CONFIG_ARM64) ++ P_LKRG_DEBUG_RULE(p_dump_arm64_metadata), ++#endif ++ ++#ifdef P_LKRG_STRONG_KPROBE_DEBUG ++ P_LKRG_DEBUG_RULE_KPROBE(p_cap_task_prctl), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_capset), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setuid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setregid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setns), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_unshare), ++ P_LKRG_DEBUG_RULE_KPROBE(p_generic_permission), ++ P_LKRG_DEBUG_RULE_KPROBE(p_scm_send), ++ P_LKRG_DEBUG_RULE_KPROBE(p_seccomp), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setresgid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_security_ptrace_access), ++ P_LKRG_DEBUG_RULE_KPROBE(p_compat_sys_add_key), ++ P_LKRG_DEBUG_RULE_KPROBE(p_compat_sys_capset), ++ P_LKRG_DEBUG_RULE_KPROBE(p_compat_sys_keyctl), ++ P_LKRG_DEBUG_RULE_KPROBE(p_compat_sys_request_key), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setfsgid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_call_usermodehelper_exec), ++ P_LKRG_DEBUG_RULE_KPROBE(p_set_current_groups), ++ P_LKRG_DEBUG_RULE_KPROBE(p_ovl_create_or_link), ++ P_LKRG_DEBUG_RULE_KPROBE(p_revert_creds), ++ P_LKRG_DEBUG_RULE_KPROBE(p_override_creds), ++ P_LKRG_DEBUG_RULE_KPROBE(security_bprm_committing_creds), ++ P_LKRG_DEBUG_RULE_KPROBE(security_bprm_committed_creds), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setresuid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_keyctl), ++ P_LKRG_DEBUG_RULE_KPROBE(p_key_change_session_keyring), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_add_key), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_request_key), ++ P_LKRG_DEBUG_RULE_KPROBE(p_capable), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sel_write_enforce), ++ P_LKRG_DEBUG_RULE_KPROBE(p_pcfi___queue_work), ++ P_LKRG_DEBUG_RULE_KPROBE(p_pcfi_schedule), ++ P_LKRG_DEBUG_RULE_KPROBE(p_pcfi_lookup_fast), ++ P_LKRG_DEBUG_RULE_KPROBE(p_pcfi_mark_inode_dirty), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setreuid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setgid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_call_usermodehelper), ++ P_LKRG_DEBUG_RULE_KPROBE(p_x32_sys_keyctl), ++ P_LKRG_DEBUG_RULE_KPROBE(p_sys_setfsuid), ++ P_LKRG_DEBUG_RULE_KPROBE(p_do_exit), ++ P_LKRG_DEBUG_RULE_KPROBE(p_wake_up_new_task), ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) ++ P_LKRG_DEBUG_RULE_KPROBE(p_switch_idt), ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0) ++ P_LKRG_DEBUG_RULE_KPROBE(p_arch_jump_label_transform), ++ P_LKRG_DEBUG_RULE_KPROBE(p_arch_jump_label_transform_apply), ++#endif ++#endif ++ ++ // Disable to noisy. ++ // P_LKRG_DEBUG_RULE(p_ed_enforce_pcfi), ++ // P_LKRG_DEBUG_RULE(p_rb_find_ed_pid), ++ // P_LKRG_DEBUG_RULE(p_validate_task_f), ++ // P_LKRG_DEBUG_RULE(p_ed_wq_valid_cache_init), ++ // P_LKRG_DEBUG_RULE(p_ed_pcfi_validate_sp), ++ ++ { 0, NULL } ++}; ++ ++void __cyg_profile_func_enter(void *func, void *caller) { ++ ++ struct p_addr_name *it; ++ ++ for (it = p_addr_name_array; it->name != NULL; it++) { ++ if (it->addr == (uintptr_t)func) { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Entering function <%s>\n", it->name); ++ break; ++ } ++ } ++} ++ ++void __cyg_profile_func_exit(void *func, void *caller) { ++ ++ struct p_addr_name *it; ++ ++ for (it = p_addr_name_array; it->name != NULL; it++) { ++ if (it->addr == (uintptr_t)func) { ++ p_debug_log(P_LKRG_STRONG_DBG, ++ "Leaving function <%s>\n", it->name); ++ break; ++ } ++ } ++} ++#endif /* P_LKRG_DEBUG_BUILD */ +diff --git a/security/lkrg/modules/print_log/p_lkrg_log_level_shared.h b/security/lkrg/modules/print_log/p_lkrg_log_level_shared.h +new file mode 100644 +index 000000000000..de5273fb34af +--- /dev/null ++++ b/security/lkrg/modules/print_log/p_lkrg_log_level_shared.h +@@ -0,0 +1,65 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Log level definitions ++ * ++ * Notes: ++ * - Log level definitions shared with user-mode client ++ * ++ * Timeline: ++ * - Created: 31.III.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_LOG_LEVEL_SHARED_H ++#define P_LKRG_LOG_LEVEL_SHARED_H ++ ++ ++/* ++ * Debugging definitions... ++ */ ++ ++// Do we want to provide debug information? ++#define P_LKRG_DEBUG ++ ++/* Do we want to precisely track changes of 'off' flag per each process? ++ * If yes, uncomment it here */ ++//#define P_LKRG_TASK_OFF_DEBUG ++ ++// Do we want to precisely track all kernel .text section changes? ++// By default NO. If you want it (and print relevant information) ++// Uncomment it here ++//#define P_LKRG_JUMP_LABEL_STEXT_DEBUG ++ ++// Debug every time we enter/exit notifiers function? ++// not recommended - will be too noisy for some notifiers! :) ++//#define P_LKRG_NOTIFIER_DBG ++ ++// Debug every time we enter/exit *kprobed* function? ++// not recommended - will be very noisy... ++//#define P_LKRG_STRONG_KPROBE_DEBUG ++ ++enum P_LOG_LEVELS { ++ ++ P_LOG_LEVEL_NONE, ++ P_LOG_LEVEL_ALIVE, ++ P_LOG_LEVEL_ERRORS, ++ P_LOG_LEVEL_WARNS, ++ P_LOG_LEVEL_INFOS, ++ ++#ifdef P_LKRG_DEBUG ++ ++ P_LOG_LEVEL_DBG, ++ P_LOG_LEVEL_STRONG_DBG, ++ ++#endif ++ ++ P_LOG_LEVEL_MAX ++ ++}; ++ ++#endif +diff --git a/security/lkrg/modules/print_log/p_lkrg_print_log.h b/security/lkrg/modules/print_log/p_lkrg_print_log.h +new file mode 100644 +index 000000000000..1dcdbad6116b +--- /dev/null ++++ b/security/lkrg/modules/print_log/p_lkrg_print_log.h +@@ -0,0 +1,245 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Error module ++ * ++ * Notes: ++ * - Error code definitions ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_PRINT_LOG_H ++#define P_LKRG_PRINT_LOG_H ++ ++#include "p_lkrg_log_level_shared.h" ++ ++// Everything is fine... ++#define P_LKRG_SUCCESS 0x0 ++ ++// General error ++#define P_LKRG_GENERAL_ERROR -1 ++ ++// Can't find (resolve) "kallsyms_lookup_name" function ++#define P_LKRG_RESOLVER_ERROR -100 ++ ++// Can't initialize kmod module ++#define P_LKRG_KMOD_ERROR -101 ++ ++// Can't generate database - hashes ++#define P_LKRG_DATABASE_ERROR -102 ++ ++// Can't initialize protected features ++#define P_LKRG_PROTECTED_FEATURES_ERROR -103 ++ ++// Can't register hot CPU plug[in/out] handler ++#define P_LKRG_HPCPU_ERROR -104 ++ ++// Can't register hot CPU plug[in/out] handler ++#define P_LKRG_EXPLOIT_DETECTION_ERROR -105 ++ ++// Enable hash from IOMMU table? - not recommended! ++// By default disabled ++//#define P_LKRG_IOMMU_HASH_ENABLED ++ ++#define P_LKRG_KMOD_DUMP_RACE -200 ++ ++ ++// Signature in logs... ++#define P_LKRG_SIGNATURE "[p_lkrg] " ++ ++//#define P_LKRG_PRINT __P_LKRG_CRIT ++ ++#define P_LKRG_ALIVE 1 ++#define P_LKRG_CRIT 2 ++#define P_LKRG_ERR 3 ++#define P_LKRG_WARN 4 ++#define P_LKRG_INFO 5 ++ ++#define P_LKRG_DBG 6 ++#define P_LKRG_STRONG_DBG 7 ++ ++ ++#define __P_LKRG_CRIT KERN_CRIT ++#define __P_LKRG_ERR KERN_ERR ++#define __P_LKRG_WARN KERN_WARNING ++#define __P_LKRG_INFO KERN_INFO ++ ++#define __P_LKRG_ALIVE __P_LKRG_CRIT ++ ++#define __P_LKRG_DBG KERN_ALERT ++#define __P_LKRG_STRONG_DBG __P_LKRG_DBG ++ ++ ++/* ++#ifdef P_LKRG_DEBUG ++ ++#define p_print_log(p_level, p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (p_level == P_LKRG_CRIT) { \ ++ p_print_ret = p_print_crit(__P_LKRG_CRIT P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_ALIVE) { \ ++ p_print_ret = p_print_alive(__P_LKRG_ALIVE P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_ERR) { \ ++ p_print_ret = p_print_err(__P_LKRG_ERR P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_WARN) { \ ++ p_print_ret = p_print_warn(__P_LKRG_WARN P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_INFO) { \ ++ p_print_ret = p_print_info(__P_LKRG_INFO P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_DBG) { \ ++ p_print_ret = p_print_dbg(__P_LKRG_DBG P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_STRONG_DBG) { \ ++ p_print_ret = p_print_dbg2(__P_LKRG_STRONG_DBG P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++ ++#else ++*/ ++#define p_print_log(p_level, p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (p_level == P_LKRG_CRIT) { \ ++ p_print_ret = p_print_crit(__P_LKRG_CRIT P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_ALIVE) { \ ++ p_print_ret = p_print_alive(__P_LKRG_ALIVE P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_ERR) { \ ++ p_print_ret = p_print_err(__P_LKRG_ERR P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_WARN) { \ ++ p_print_ret = p_print_warn(__P_LKRG_WARN P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_INFO) { \ ++ p_print_ret = p_print_info(__P_LKRG_INFO P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++//#endif ++ ++ ++ ++#define p_print_crit(p_fmt, p_args...) \ ++({ \ ++ printk(p_fmt, ## p_args); \ ++}) ++ ++#define p_print_alive(p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (P_CTRL(p_log_level) >= P_LOG_LEVEL_ALIVE) { \ ++ p_print_ret = printk(p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++#define p_print_err(p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (P_CTRL(p_log_level) >= P_LOG_LEVEL_ERRORS) { \ ++ p_print_ret = printk(p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++#define p_print_warn(p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (P_CTRL(p_log_level) >= P_LOG_LEVEL_WARNS) { \ ++ p_print_ret = printk(p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++#define p_print_info(p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (P_CTRL(p_log_level) >= P_LOG_LEVEL_INFOS) { \ ++ p_print_ret = printk(p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++ ++#ifdef P_LKRG_DEBUG ++ ++#ifdef P_LKRG_NOTIFIER_DBG ++ #define p_debug_notifier_log(p_fmt, p_args...) \ ++ p_debug_log(P_LKRG_STRONG_DBG, p_fmt, ## p_args) ++#else ++ #define p_debug_notifier_log(p_fmt, p_args...) ({ 0x0; }) ++#endif ++ ++#ifdef P_LKRG_STRONG_KPROBE_DEBUG ++ #define p_debug_kprobe_log(p_fmt, p_args...) \ ++ p_debug_log(P_LKRG_STRONG_DBG, p_fmt, ## p_args) ++#else ++ #define p_debug_kprobe_log(p_fmt, p_args...) ({ 0x0; }) ++#endif ++ ++#define p_debug_log(p_level, p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (p_level == P_LKRG_DBG) { \ ++ p_print_ret = p_print_dbg(__P_LKRG_DBG P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } else if (p_level == P_LKRG_STRONG_DBG) { \ ++ p_print_ret = p_print_dbg2(__P_LKRG_STRONG_DBG P_LKRG_SIGNATURE p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++#define p_print_dbg(p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (P_CTRL(p_log_level) >= P_LOG_LEVEL_DBG) { \ ++ p_print_ret = printk(p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++#define p_print_dbg2(p_fmt, p_args...) \ ++({ \ ++ int p_print_ret = 0; \ ++ \ ++ if (P_CTRL(p_log_level) >= P_LOG_LEVEL_STRONG_DBG) { \ ++ p_print_ret = printk(p_fmt, ## p_args); \ ++ } \ ++ \ ++ p_print_ret; \ ++}) ++ ++#else ++ ++#define p_debug_log(p_level, p_fmt, p_args...) ({ 0x0; }) ++ ++#define p_print_dbg(p_fmt, p_args...) ({ 0x0; }) ++#define p_print_dbg2(p_fmt, p_args...) ({ 0x0; }) ++ ++#define p_debug_notifier_log(p_fmt, p_args...) ({ 0x0; }) ++#define p_debug_kprobe_log(p_fmt, p_args...) ({ 0x0; }) ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/self-defense/hiding/p_hiding.c b/security/lkrg/modules/self-defense/hiding/p_hiding.c +new file mode 100644 +index 000000000000..393549df1615 +--- /dev/null ++++ b/security/lkrg/modules/self-defense/hiding/p_hiding.c +@@ -0,0 +1,114 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - (Un)Hiding module ++ * ++ * Notes: ++ * - (Un)Hide itself from the module system activity components ++ * ++ * Timeline: ++ * - Created: 10.XI.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "../../../p_lkrg_main.h" ++ ++/* ++struct kobject *p_find_kobj_parent; ++struct module_sect_attrs *p_find_sect_attrs; ++struct module_notes_attrs *p_find_notes_attrs; ++*/ ++ ++ ++void p_hide_itself(void) { ++ ++ if (P_CTRL(p_hide_lkrg)) { ++ p_print_log(P_LKRG_WARN, ++ "Module is already hidden!\n"); ++ return; ++ } ++ ++/* ++ p_find_kobj_parent = p_find_me->mkobj.kobj.parent; ++ p_find_sect_attrs = p_find_me->sect_attrs; ++ p_find_notes_attrs = p_find_me->notes_attrs; ++*/ ++ ++ /* We are heavily consuming module list here - take 'module_mutex' */ ++ mutex_lock(P_SYM(p_module_mutex)); ++ spin_lock(&p_db_lock); ++ ++ P_HIDE_FROM_MODULE_LIST(P_SYM(p_find_me)); ++ P_HIDE_FROM_KOBJ(P_SYM(p_find_me)); ++#if defined(CONFIG_DYNAMIC_DEBUG) ++ P_HIDE_FROM_DDEBUG(P_SYM(p_find_me)); ++#endif ++ ++ /* OK, now recalculate hashes again! */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x2) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Update global module list/kobj hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ /* We should be fine now! */ ++ ++ P_CTRL(p_hide_lkrg) = 1; ++ ++ spin_unlock(&p_db_lock); ++ /* Release the 'module_mutex' */ ++ mutex_unlock(P_SYM(p_module_mutex)); ++} ++ ++#ifdef P_LKRG_UNHIDE ++void p_unhide_itself(void) { ++ ++ /* Dead function - used only during development process */ ++ struct module *p_tmp_mod = P_GLOBAL_TO_MODULE(P_SYM(p_global_modules)); ++ struct kset *p_tmp_kset = p_tmp_mod->mkobj.kobj.kset; ++ struct kobj_type *p_tmp_ktype = p_tmp_mod->mkobj.kobj.ktype; ++ ++ if (!P_CTRL(p_hide_lkrg)) { ++ p_print_log(P_LKRG_WARN, ++ "Module is already unhidden (visible)!\n"); ++ return; ++ } ++ ++ /* We are heavily consuming module list here - take 'module_mutex' */ ++ mutex_lock(P_SYM(p_module_mutex)); ++ spin_lock(&p_db_lock); ++ ++ P_UNHIDE_FROM_MODULE_LIST(P_SYM(p_find_me),P_SYM(p_global_modules)); ++ P_UNHIDE_FROM_KOBJ(P_SYM(p_find_me),p_tmp_kset,p_tmp_ktype); ++ ++// P_UNHIDE_FROM_KOBJ(P_SYM(p_find_me),p_find_kobj_parent, ++// p_find_sect_attrs,p_find_notes_attrs); ++ ++ /* OK, now recalculate hashes again! */ ++ while(p_kmod_hash(&p_db.p_module_list_nr,&p_db.p_module_list_array, ++ &p_db.p_module_kobj_nr,&p_db.p_module_kobj_array, 0x2) != P_LKRG_SUCCESS) ++ schedule(); ++ ++ /* Update global module list/kobj hash */ ++ p_db.p_module_list_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_list_array, ++ (unsigned int)p_db.p_module_list_nr * sizeof(p_module_list_mem)); ++ ++ p_db.p_module_kobj_hash = p_lkrg_fast_hash((unsigned char *)p_db.p_module_kobj_array, ++ (unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem)); ++ /* We should be fine now! */ ++ ++ P_CTRL(p_hide_lkrg) = 0; ++ ++ spin_unlock(&p_db_lock); ++ /* Release the 'module_mutex' */ ++ mutex_unlock(P_SYM(p_module_mutex)); ++} ++#endif +diff --git a/security/lkrg/modules/self-defense/hiding/p_hiding.h b/security/lkrg/modules/self-defense/hiding/p_hiding.h +new file mode 100644 +index 000000000000..03a846425557 +--- /dev/null ++++ b/security/lkrg/modules/self-defense/hiding/p_hiding.h +@@ -0,0 +1,144 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - (Un)Hiding module ++ * ++ * Notes: ++ * - (Un)Hide itself from the module system activity components ++ * ++ * Timeline: ++ * - Created: 10.XI.2016 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_HIDING_MODULE_H ++#define P_LKRG_HIDING_MODULE_H ++ ++#define P_HIDE_FROM_MODULE_LIST(p_arg) \ ++do { \ ++ p_debug_log(P_LKRG_DBG, "Hiding module [%s | 0x%lx]\n", \ ++ p_arg->name,(unsigned long)p_arg); \ ++ list_del(&p_arg->list); \ ++ /* p_arg->list.next->prev = p_arg->list.prev; */ \ ++ /* p_arg->list.prev->next = p_arg->list.next; */ \ ++} while(0) ++ ++#define P_HIDE_FROM_KOBJ(p_arg) \ ++do { \ ++ if (p_arg->holders_dir && p_arg->holders_dir->parent) { \ ++ p_debug_log(P_LKRG_DBG, "Deleting KOBJ [0x%lx]\n", \ ++ (unsigned long)p_arg->holders_dir->parent); \ ++ kobject_del(p_arg->holders_dir->parent); \ ++ } \ ++} while(0) ++ ++/* ++#define P_HIDE_FROM_KOBJ(p_arg) \ ++do { \ ++ p_debug_log(P_LKRG_DBG, "Deleting KOBJ [0x%lx]\n", \ ++ (unsigned long)&p_arg->mkobj.kobj); \ ++ kobject_del(&p_arg->mkobj.kobj); \ ++ p_arg->sect_attrs = NULL; \ ++ p_arg->notes_attrs = NULL; \ ++} while(0) ++*/ ++ ++#if defined(CONFIG_DYNAMIC_DEBUG) ++#define P_HIDE_FROM_DDEBUG(p_arg) \ ++do { \ ++ p_debug_log(P_LKRG_DBG, \ ++ "Deleting ddebug information for module [%s]\n", \ ++ p_arg->name); \ ++ p_ddebug_remove_module(p_arg->name); \ ++} while(0) ++#endif ++ ++#ifdef P_LKRG_UNHIDE // (P_SYM(p_find_me), P_SYM(p_global_modules)) ++ ++#define P_UNHIDE_FROM_MODULE_LIST(x, y) \ ++do { \ ++ p_debug_log(P_LKRG_DBG, "Unhiding module [%s | 0x%lx]\n", \ ++ x->name,(unsigned long)x); \ ++ list_add_rcu(&x->list, y); \ ++} while(0) ++ ++ ++#define P_UNHIDE_FROM_KOBJ(p_mod,p_kset,p_ktype) \ ++do { \ ++/* struct kobject *p_kobj; */ \ ++ struct module_use *p_use; \ ++ int p_tmp; \ ++ p_debug_log(P_LKRG_DBG, "Creating KOBJ for [%s]\n", \ ++ p_mod->name); \ ++/* p_kobj = kset_find_obj(p_kset, p_mod->name); \ ++ if (p_kobj) { \ ++ p_debug_log(P_LKRG_DBG, "Module [%s] is NOT hidden!\n", \ ++ p_mod->name); \ ++ kobject_put(p_kobj); \ ++ return; \ ++ } */ \ ++ p_mod->mkobj.mod = p_mod; \ ++ memset(&p_mod->mkobj.kobj, 0, sizeof(p_mod->mkobj.kobj)); \ ++ p_mod->mkobj.kobj.kset = p_kset; \ ++ if (kobject_init_and_add(&p_mod->mkobj.kobj, p_ktype, NULL, \ ++ "%s", p_mod->name)) { \ ++ p_debug_log(P_LKRG_DBG, "FAILED :(\n"); \ ++ return; \ ++ } \ ++ p_mod->holders_dir = kobject_create_and_add("holders", \ ++ &p_mod->mkobj.kobj); \ ++ if (!p_mod->holders_dir) { \ ++ p_debug_log(P_LKRG_DBG, "FAILED :(\n"); \ ++ return; \ ++ } \ ++ if ( (p_tmp = sysfs_create_files(&p_mod->mkobj.kobj, \ ++ (const struct attribute **)&p_mod->modinfo_attrs)) != 0) { \ ++ p_debug_log(P_LKRG_DBG, "FAILED :(\n"); \ ++ return; \ ++ } \ ++ /* add_usage_links() */ \ ++ list_for_each_entry(p_use, &p_mod->target_list, target_list) { \ ++ p_tmp = sysfs_create_link(p_use->target->holders_dir, \ ++ &p_mod->mkobj.kobj, p_mod->name); \ ++ } \ ++ /* Created KOBJ for this module is very 'synthetic'. */ \ ++ /* During unloading module process, sysfs is heavily */ \ ++ /* Influenced. Some of the operations are dangerous if */ \ ++ /* Operated on 'syntethic' objects. To avoid crashes */ \ ++ /* And limit 'sysfs interaction' let's NULL some of */ \ ++ /* Critical 'information' pointers :) */ \ ++ p_mod->notes_attrs = NULL; \ ++ p_mod->sect_attrs = NULL; \ ++ kobject_uevent(&p_mod->mkobj.kobj, KOBJ_ADD); \ ++ p_debug_log(P_LKRG_DBG, "SUCCESS :)\n"); \ ++} while(0) ++ ++/* ++#define P_UNHIDE_FROM_KOBJ(p_mod,p_kobj_parent,p_sect,p_notes) \ ++do { \ ++ int p_ret; \ ++ \ ++ p_debug_log(P_LKRG_DBG, "Reestoring KOBJ[0x%lx] for [%s]\n", \ ++ (unsigned long)&p_mod->mkobj.kobj,p_mod->name); \ ++ if ( (p_ret = kobject_add(&p_mod->mkobj.kobj, p_kobj_parent, \ ++ "p_lkrg")) < 0) { \ ++ p_print_log(P_LKRG_INFO, "FAILED to restore KOBJ :(\n"); \ ++ return; \ ++ } \ ++ p_mod->sect_attrs = p_sect; \ ++ p_mod->notes_attrs = p_notes; \ ++} while(0) ++*/ ++#endif ++ ++ ++void p_hide_itself(void); ++#ifdef P_LKRG_UNHIDE ++void p_unhide_itself(void); ++#endif ++ ++#endif +diff --git a/security/lkrg/modules/wrap/p_struct_wrap.h b/security/lkrg/modules/wrap/p_struct_wrap.h +new file mode 100644 +index 000000000000..3edc0a115839 +--- /dev/null ++++ b/security/lkrg/modules/wrap/p_struct_wrap.h +@@ -0,0 +1,765 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Kernel's modules module wrapping access to some critical structures ++ * ++ * Notes: ++ * - Wrapping some of the critical structures in the system e.g.: ++ * -> k[g/u]id_t ++ * -> accessing 'struct module' structure - since kernel 4.5 'struct module' ++ * was changed. Accessing some critical variables must be smarter now. ++ * We are wrapping the necessary fields here. ++ * ++ * http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/diff/include/linux/module.h?id=7523e4dc5057 ++ * ++ * ++ * Timeline: ++ * - Created: 11.IX.2017 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_WRAPPER_H ++#define P_LKRG_WRAPPER_H ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++ ++static inline void p_set_uid(kuid_t *p_arg, unsigned int p_val) { ++ p_arg->val = p_val; ++} ++ ++static inline unsigned int p_get_uid(const kuid_t *p_from) { ++ return p_from->val; ++} ++ ++static inline void p_set_gid(kgid_t *p_arg, unsigned int p_val) { ++ p_arg->val = p_val; ++} ++ ++static inline unsigned int p_get_gid(const kgid_t *p_from) { ++ return p_from->val; ++} ++ ++#else ++ ++#ifdef CONFIG_UIDGID_STRICT_TYPE_CHECKS ++ ++static inline void p_set_uid(kuid_t *p_arg, unsigned int p_val) { ++ p_arg->val = p_val; ++} ++ ++static inline unsigned int p_get_uid(const kuid_t *p_from) { ++ return p_from->val; ++} ++ ++static inline void p_set_gid(kgid_t *p_arg, unsigned int p_val) { ++ p_arg->val = p_val; ++} ++ ++static inline unsigned int p_get_gid(const kgid_t *p_from) { ++ return p_from->val; ++} ++ ++#else ++ ++static inline void p_set_uid(kuid_t *p_arg, unsigned int p_val) { ++ *p_arg = p_val; ++} ++ ++static inline unsigned int p_get_uid(const kuid_t *p_from) { ++ return *p_from; ++} ++ ++static inline void p_set_gid(kgid_t *p_arg, unsigned int p_val) { ++ *p_arg = p_val; ++} ++ ++static inline unsigned int p_get_gid(const kgid_t *p_from) { ++ return *p_from; ++} ++ ++#endif ++ ++#endif ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 6) ++ ++#if defined(CONFIG_GRKERNSEC) ++ ++static inline void *p_module_core(struct module *p_mod) { ++ return p_mod->core_layout.base_rx; ++} ++ ++static inline unsigned int p_core_text_size(struct module *p_mod) { ++ return p_mod->core_layout.size_rx; ++} ++ ++#else ++ ++static inline void *p_module_core(struct module *p_mod) { ++ return p_mod->core_layout.base; ++} ++ ++static inline unsigned int p_core_size(struct module *p_mod) { ++ return p_mod->core_layout.size; ++} ++ ++static inline unsigned int p_core_text_size(struct module *p_mod) { ++ return p_mod->core_layout.text_size; ++} ++ ++static inline unsigned int p_init_text_size(struct module *p_mod) { ++ return p_mod->init_layout.text_size; ++} ++ ++#endif ++ ++#else ++ ++static inline void *p_module_core(struct module *p_mod) { ++ return p_mod->module_core; ++} ++ ++static inline unsigned int p_init_text_size(struct module *p_mod) { ++ return p_mod->init_text_size; ++} ++ ++static inline unsigned int p_core_text_size(struct module *p_mod) { ++ return p_mod->core_text_size; ++} ++ ++static inline unsigned int p_core_size(struct module *p_mod) { ++ return p_mod->core_size; ++} ++ ++#endif ++ ++// #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) ++ ++#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE ++ ++static inline unsigned long p_jump_entry_code(const struct jump_entry *entry) { ++ return (unsigned long)((unsigned long)&entry->code + entry->code); ++} ++ ++static inline unsigned long p_jump_entry_target(const struct jump_entry *entry) { ++ return (unsigned long)((unsigned long)&entry->target) + entry->target; ++} ++ ++static inline struct static_key *p_jump_entry_key(const struct jump_entry *entry) { ++ long offset = entry->key & ~3L; ++ ++ return (struct static_key *)((unsigned long)&entry->key + offset); ++} ++ ++#else ++ ++static inline unsigned long p_jump_entry_code(const struct jump_entry *entry) { ++ return (unsigned long)entry->code; ++} ++ ++static inline unsigned long p_jump_entry_target(const struct jump_entry *entry) { ++ return (unsigned long)entry->target; ++} ++ ++static inline struct static_key *p_jump_entry_key(const struct jump_entry *entry) { ++ return (struct static_key *)((unsigned long)entry->key & ~3UL); ++} ++ ++#endif ++ ++#if defined(CONFIG_DYNAMIC_DEBUG) ++static inline int p_ddebug_remove_module(const char *p_name) { ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) ++ ++ return ddebug_remove_module(p_name); ++ ++#else ++ ++ return P_SYM(p_ddebug_remove_module_ptr)(p_name); ++ ++#endif ++ ++} ++#endif ++ ++ ++/* ++ * Keep this section as last here. ++ * Let's define architecture dependent arguments based on the registers ++ * from the intercepted process context. ++ */ ++ ++#ifdef CONFIG_X86 ++ ++/* ++ * Get ++ */ ++ #if defined(CONFIG_X86_64) ++static inline unsigned long p_regs_get_arg1(struct pt_regs *p_regs) { ++ return p_regs->di; ++} ++ ++static inline unsigned long p_regs_get_arg2(struct pt_regs *p_regs) { ++ return p_regs->si; ++} ++ ++static inline unsigned long p_regs_get_fp(struct pt_regs *p_regs) { ++ return p_regs->bp; ++} ++ ++static inline unsigned long p_regs_get_sp(struct pt_regs *p_regs) { ++ return p_regs->sp; ++} ++ ++static inline unsigned long p_regs_get_ip(struct pt_regs *p_regs) { ++ return p_regs->ip; ++} ++ ++static inline unsigned long p_regs_get_ret(struct pt_regs *p_regs) { ++ return p_regs->ax; ++} ++ ++static inline unsigned long p_get_thread_sp(struct task_struct *p_arg) { ++ return p_arg->thread.sp; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline unsigned long p_syscall_get_arg1(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg1((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg1(p_regs); ++#endif ++} ++ ++static inline unsigned long p_syscall_get_arg2(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg2((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg2(p_regs); ++#endif ++} ++ ++ #else ++ ++static inline unsigned long p_regs_get_arg1(struct pt_regs *p_regs) { ++ return p_regs->ax; ++} ++ ++static inline unsigned long p_regs_get_arg2(struct pt_regs *p_regs) { ++ return p_regs->dx; ++} ++ ++static inline unsigned long p_regs_get_arg3(struct pt_regs *p_regs) { ++ return p_regs->cx; ++} ++ ++static inline unsigned long p_regs_get_fp(struct pt_regs *p_regs) { ++ return p_regs->bp; ++} ++ ++static inline unsigned long p_regs_get_sp(struct pt_regs *p_regs) { ++ return p_regs->sp; ++} ++ ++static inline unsigned long p_regs_get_ip(struct pt_regs *p_regs) { ++ return p_regs->ip; ++} ++ ++static inline unsigned long p_regs_get_ret(struct pt_regs *p_regs) { ++ return p_regs->ax; ++} ++ ++static inline unsigned long p_get_thread_sp(struct task_struct *p_arg) { ++ return p_arg->thread.sp; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline unsigned long p_syscall_get_arg1(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg2((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg2(p_regs); ++#endif ++} ++ ++static inline unsigned long p_syscall_get_arg2(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg3((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg3(p_regs); ++#endif ++} ++ ++ #endif ++ ++ ++/* ++ * Set ++ */ ++ #if defined(CONFIG_X86_64) ++ ++static inline void p_regs_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->di = p_val; ++} ++ ++static inline void p_regs_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->si = p_val; ++} ++ ++static inline void p_regs_set_ip(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->ip = p_val; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline void p_syscall_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg1((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg1(p_regs, p_val); ++#endif ++} ++ ++static inline void p_syscall_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg2((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg2(p_regs, p_val); ++#endif ++} ++ ++ #else ++ ++static inline void p_regs_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->ax = p_val; ++} ++ ++static inline void p_regs_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->dx = p_val; ++} ++ ++static inline void p_regs_set_arg3(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->cx = p_val; ++} ++ ++static inline void p_regs_set_ip(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->ip = p_val; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline void p_syscall_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg2((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg2(p_regs, p_val); ++#endif ++} ++ ++static inline void p_syscall_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg3((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg3(p_regs, p_val); ++#endif ++} ++ ++ #endif ++ ++ ++static inline int p_set_memory_rw(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_rw)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_page_attr_set_clr)(&p_addr, p_numpages, ++ __pgprot(_PAGE_RW), ++ __pgprot(0), ++ 0, 0, NULL); ++#endif ++} ++ ++static inline int p_set_memory_ro(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_ro)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_page_attr_set_clr)(&p_addr, p_numpages, ++ __pgprot(0), ++ __pgprot(_PAGE_RW), ++ 0, 0, NULL); ++#endif ++} ++ ++static inline int p_set_memory_np(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return 0x0; ++// return P_SYM(p_kernel_set_memory_np)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_page_attr_set_clr)(&p_addr, p_numpages, ++ __pgprot(0), ++ __pgprot(_PAGE_PRESENT), ++ 0, 0, NULL); ++#endif ++} ++ ++static inline int p_set_memory_p(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return 0x0; ++#else ++ return P_SYM(p_change_page_attr_set_clr)(&p_addr, p_numpages, ++ __pgprot(_PAGE_PRESENT), ++ __pgprot(0), ++ 0, 0, NULL); ++#endif ++} ++ ++static inline void p_lkrg_open_rw_x86(void) { ++ ++ register unsigned long p_cr0; ++ ++ preempt_disable(); ++ barrier(); ++ p_cr0 = read_cr0() ^ X86_CR0_WP; ++ write_cr0(p_cr0); ++ barrier(); ++} ++ ++static inline void p_lkrg_close_rw_x86(void) { ++ ++ register unsigned long p_cr0; ++ ++ barrier(); ++ p_cr0 = read_cr0() ^ X86_CR0_WP; ++ write_cr0(p_cr0); ++ barrier(); ++ preempt_enable(); //_no_resched(); ++} ++ ++static inline void p_lkrg_open_rw(void) { ++ ++ unsigned long p_flags; ++ ++// preempt_disable(); ++ barrier(); ++ p_set_memory_rw((unsigned long)P_CTRL_ADDR,1); ++ barrier(); ++ /* It's a good time to verify if everything is fine */ ++ p_ed_pcfi_cpu(1); ++ p_tasks_read_lock(&p_flags); ++ p_ed_validate_current(); ++ p_tasks_read_unlock(&p_flags); ++} ++ ++static inline void p_lkrg_close_rw(void) { ++ ++ barrier(); ++ p_set_memory_ro((unsigned long)P_CTRL_ADDR,1); ++ barrier(); ++// preempt_enable(); //_no_resched(); ++} ++ ++/* ARM */ ++#elif defined(CONFIG_ARM) ++ ++/* ++ * Get ++ */ ++static inline unsigned long p_regs_get_arg1(struct pt_regs *p_regs) { ++ return p_regs->ARM_r0; ++} ++ ++static inline unsigned long p_regs_get_arg2(struct pt_regs *p_regs) { ++ return p_regs->ARM_r1; ++} ++ ++static inline unsigned long p_regs_get_fp(struct pt_regs *p_regs) { ++ return p_regs->ARM_fp; ++} ++ ++static inline unsigned long p_regs_get_sp(struct pt_regs *p_regs) { ++ return frame_pointer(p_regs); ++} ++ ++static inline unsigned long p_regs_get_ip(struct pt_regs *p_regs) { ++ return p_regs->ARM_pc; ++} ++ ++static inline unsigned long p_regs_get_ret(struct pt_regs *p_regs) { ++ return p_regs->ARM_r0; ++} ++ ++static inline unsigned long p_get_thread_sp(struct task_struct *p_arg) { ++ return thread_saved_sp(p_arg); ++} ++ ++/* ++ * Syscalls ++ */ ++static inline unsigned long p_syscall_get_arg1(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg1((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg1(p_regs); ++#endif ++} ++ ++static inline unsigned long p_syscall_get_arg2(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg2((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg2(p_regs); ++#endif ++} ++ ++/* ++ * Set ++ */ ++static inline void p_regs_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->ARM_r0 = p_val; ++} ++ ++static inline void p_regs_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->ARM_r1 = p_val; ++} ++ ++static inline void p_regs_set_ip(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->ARM_pc = p_val; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline void p_syscall_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg1((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg1(p_regs, p_val); ++#endif ++} ++ ++static inline void p_syscall_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg2((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg2(p_regs, p_val); ++#endif ++} ++ ++static inline int p_set_memory_rw(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_rw)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_memory_common)(p_addr, p_numpages, ++ __pgprot(0), ++ __pgprot(L_PTE_RDONLY)); ++#endif ++} ++ ++static inline int p_set_memory_ro(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_ro)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_memory_common)(p_addr, p_numpages, ++ __pgprot(L_PTE_RDONLY), ++ __pgprot(0)); ++#endif ++} ++ ++static inline void p_lkrg_open_rw(void) { ++ ++ unsigned long p_flags; ++ ++ preempt_disable(); ++ barrier(); ++ p_set_memory_rw((unsigned long)P_CTRL_ADDR,1); ++ barrier(); ++ /* It's a good time to verify if everything is fine */ ++ p_ed_pcfi_cpu(1); ++ p_tasks_read_lock(&p_flags); ++ p_ed_validate_current(); ++ p_tasks_read_unlock(&p_flags); ++} ++ ++static inline void p_lkrg_close_rw(void) { ++ ++ barrier(); ++ p_set_memory_ro((unsigned long)P_CTRL_ADDR,1); ++ barrier(); ++ preempt_enable(); //_no_resched(); ++} ++ ++/* ARM64 */ ++#elif defined(CONFIG_ARM64) ++ ++/* ++ * Get ++ */ ++static inline unsigned long p_regs_get_arg1(struct pt_regs *p_regs) { ++ return p_regs->regs[0]; ++} ++ ++static inline unsigned long p_regs_get_arg2(struct pt_regs *p_regs) { ++ return p_regs->regs[1]; ++} ++ ++static inline unsigned long p_regs_get_fp(struct pt_regs *p_regs) { ++ return p_regs->regs[29]; ++} ++ ++static inline unsigned long p_regs_get_sp(struct pt_regs *p_regs) { ++ return p_regs->sp; ++} ++ ++static inline unsigned long p_regs_get_ip(struct pt_regs *p_regs) { ++ return p_regs->pc; ++} ++ ++static inline unsigned long p_regs_get_ret(struct pt_regs *p_regs) { ++ return p_regs->regs[0]; ++} ++ ++static inline unsigned long p_get_thread_sp(struct task_struct *p_arg) { ++ return p_arg->thread.cpu_context.sp; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline unsigned long p_syscall_get_arg1(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg1((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg1(p_regs); ++#endif ++} ++ ++static inline unsigned long p_syscall_get_arg2(struct pt_regs *p_regs) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ return p_regs_get_arg2((struct pt_regs *)p_regs_get_arg1(p_regs)); ++#else ++ return p_regs_get_arg2(p_regs); ++#endif ++} ++ ++/* ++ * Set ++ */ ++static inline void p_regs_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->regs[0] = p_val; ++} ++ ++static inline void p_regs_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->regs[1] = p_val; ++} ++ ++static inline void p_regs_set_ip(struct pt_regs *p_regs, unsigned long p_val) { ++ p_regs->pc = p_val; ++} ++ ++/* ++ * Syscalls ++ */ ++static inline void p_syscall_set_arg1(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg1((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg1(p_regs, p_val); ++#endif ++} ++ ++static inline void p_syscall_set_arg2(struct pt_regs *p_regs, unsigned long p_val) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) && defined(CONFIG_ARCH_HAS_SYSCALL_WRAPPER) ++ p_regs_set_arg2((struct pt_regs *)p_regs_get_arg1(p_regs), p_val); ++#else ++ p_regs_set_arg2(p_regs, p_val); ++#endif ++} ++ ++static inline int p_set_memory_rw(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_rw)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_memory_common)(p_addr, p_numpages, ++ __pgprot(PTE_WRITE), ++ __pgprot(PTE_RDONLY)); ++#endif ++} ++ ++static inline int p_set_memory_ro(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_ro)(p_addr, p_numpages); ++#else ++ return P_SYM(p_change_memory_common)(p_addr, p_numpages, ++ __pgprot(PTE_RDONLY), ++ __pgprot(PTE_WRITE)); ++#endif ++} ++ ++static inline int p_set_memory_np(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_valid)(p_addr, p_numpages, 0); ++#else ++ return P_SYM(p_change_memory_common)(p_addr, p_numpages, ++ __pgprot(0), ++ __pgprot(PTE_VALID)); ++#endif ++} ++ ++static inline int p_set_memory_p(unsigned long p_addr, int p_numpages) { ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ return P_SYM(p_kernel_set_memory_valid)(p_addr, p_numpages, 1); ++#else ++ return P_SYM(p_change_memory_common)(p_addr, p_numpages, ++ __pgprot(PTE_VALID), ++ __pgprot(0)); ++#endif ++} ++ ++static inline void p_lkrg_open_rw(void) { ++ ++ unsigned long p_flags; ++ ++ preempt_disable(); ++ barrier(); ++ p_set_memory_rw((unsigned long)P_CTRL_ADDR,1); ++ barrier(); ++ /* It's a good time to verify if everything is fine */ ++ p_ed_pcfi_cpu(1); ++ p_tasks_read_lock(&p_flags); ++ p_ed_validate_current(); ++ p_tasks_read_unlock(&p_flags); ++} ++ ++static inline void p_lkrg_close_rw(void) { ++ ++ barrier(); ++ p_set_memory_ro((unsigned long)P_CTRL_ADDR,1); ++ barrier(); ++ preempt_enable(); //_no_resched(); ++} ++ ++#endif ++ ++#endif +diff --git a/security/lkrg/p_lkrg_main.c b/security/lkrg/p_lkrg_main.c +new file mode 100644 +index 000000000000..3edf596c68da +--- /dev/null ++++ b/security/lkrg/p_lkrg_main.c +@@ -0,0 +1,779 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Main module ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#include "p_lkrg_main.h" ++ ++unsigned int log_level = 3; ++unsigned int heartbeat = 0; ++unsigned int block_modules = 0; ++unsigned int interval = 15; ++unsigned int kint_validate = 3; ++unsigned int kint_enforce = 2; ++unsigned int msr_validate = 0; ++unsigned int pint_validate = 1; ++unsigned int pint_enforce = 1; ++unsigned int pcfi_validate = 2; ++unsigned int pcfi_enforce = 1; ++unsigned int umh_validate = 1; ++unsigned int umh_enforce = 1; ++#if defined(CONFIG_X86) ++unsigned int smep_validate = 1; ++unsigned int smep_enforce = 2; ++unsigned int smap_validate = 1; ++unsigned int smap_enforce = 2; ++#endif ++unsigned int profile_validate = 3; ++unsigned int profile_enforce = 2; ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) ++static enum cpuhp_state p_hot_cpus; ++#endif ++unsigned int p_attr_init = 0; ++ ++p_ro_page p_ro __p_lkrg_read_only = { ++ ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ .p_marker_np1 = P_LKRG_MARKER1, ++#endif ++ ++ .p_lkrg_global_ctrl.ctrl = { ++ .p_kint_validate = 3, // kint_validate ++ .p_kint_enforce = 2, // kint_enforce ++ .p_pint_validate = 1, // pint_validate ++ .p_pint_enforce = 1, // pint_enforce ++ .p_interval = 15, // interval ++ .p_log_level = 3, // log_level ++ .p_trigger = 0, // trigger ++ .p_block_modules = 0, // block_modules ++ .p_hide_lkrg = 0, // hide_lkrg ++ .p_heartbeat = 0, // heartbeat ++#if defined(CONFIG_X86) ++ .p_smep_validate = 1, // smep_validate ++ .p_smep_enforce = 2, // smep_enforce ++ .p_smap_validate = 1, // smap_validate ++ .p_smap_enforce = 2, // smap_enforce ++#endif ++ .p_umh_validate = 1, // umh_validate ++ .p_umh_enforce = 1, // umh_enforce ++ .p_msr_validate = 0, // msr_validate ++ .p_pcfi_validate = 2, // pcfi_validate ++ .p_pcfi_enforce = 1, // pcfi_enforce ++ /* Profiles */ ++ .p_profile_validate = 3, // profile_validate ++ .p_profile_enforce = 2 // profile_enforce ++ }, ++ ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ .p_marker_np2 = P_LKRG_MARKER1, ++ .p_marker_np3 = P_LKRG_MARKER2 ++#endif ++ ++}; ++ ++ ++static void p_init_page_attr(void) { ++ ++ unsigned long *p_long_tmp = 0; ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ unsigned long p_long_offset = PAGE_SIZE/sizeof(p_long_tmp); // On purpose sizeof pointer ++#endif ++ ++ p_long_tmp = (unsigned long *)P_CTRL_ADDR; ++ ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ if (*(p_long_tmp-p_long_offset) == P_LKRG_MARKER1) { ++ p_print_log(P_LKRG_INFO, "Found marker before configuration page.\n"); ++ if (*(p_long_tmp+p_long_offset) == P_LKRG_MARKER1) { ++ p_print_log(P_LKRG_INFO, "Found marker after configuration page.\n"); ++#endif ++ P_SYM(p_state_init) = 2; ++ p_set_memory_ro((unsigned long)p_long_tmp,1); ++ p_print_log(P_LKRG_INFO, "Configuration page marked read-only.\n"); ++ p_attr_init++; ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ p_set_memory_np((unsigned long)(p_long_tmp-p_long_offset),1); ++ p_print_log(P_LKRG_INFO, "Setup guard page before configuration page.\n"); ++ if (*(p_long_tmp+p_long_offset*2) == P_LKRG_MARKER2) { ++ p_print_log(P_LKRG_INFO, "Found next marker after configuration page.\n"); ++ p_set_memory_np((unsigned long)(p_long_tmp+p_long_offset),1); ++ p_print_log(P_LKRG_INFO, "Setup guard page after configuration page.\n"); ++ p_attr_init++; ++ } ++#endif ++ ++#if !defined(CONFIG_ARM64) ++ P_SYM(p_flush_tlb_all)(); ++#else ++ flush_tlb_all(); ++#endif ++ ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ } ++ } else { ++ p_print_log(P_LKRG_CRIT, ++ "ERROR: Can't find marker pages so configuration page is NOT RO :( Continue...\n"); ++ p_print_log(P_LKRG_INFO, "*(p_long_tmp[0x%lx]-PAGE_SIZE) => [0x%lx] 0x%lx\n", ++ (unsigned long)p_long_tmp, ++ (unsigned long)p_long_tmp-p_long_offset, ++ *(p_long_tmp-p_long_offset)); ++ p_print_log(P_LKRG_INFO, "*(p_long_tmp[0x%lx]+PAGE_SIZE) => [0x%lx] 0x%lx\n", ++ (unsigned long)p_long_tmp, ++ (unsigned long)p_long_tmp+p_long_offset, ++ *(p_long_tmp+p_long_offset)); ++ p_print_log(P_LKRG_INFO, "*(p_long_tmp[0x%lx]+2*PAGE_SIZE) => [0x%lx] 0x%lx\n", ++ (unsigned long)p_long_tmp, ++ (unsigned long)p_long_tmp+2*p_long_offset, ++ *(p_long_tmp+2*p_long_offset)); ++ p_print_log(P_LKRG_INFO, "*(p_long_tmp[0x%lx]+3*PAGE_SIZE) => [0x%lx] 0x%lx\n", ++ (unsigned long)p_long_tmp, ++ (unsigned long)p_long_tmp+3*p_long_offset, ++ *(p_long_tmp+3*p_long_offset)); ++ } ++#endif ++} ++ ++static void p_uninit_page_attr(void) { ++ ++ unsigned long *p_long_tmp = 0; ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ unsigned long p_long_offset = PAGE_SIZE/sizeof(p_long_tmp); // On purpose sizeof pointer ++#endif ++ ++ if (p_attr_init) { ++ p_long_tmp = (unsigned long *)P_CTRL_ADDR; ++ p_set_memory_rw((unsigned long)p_long_tmp,1); ++ p_print_log(P_LKRG_INFO, "Configuration page marked read-write.\n"); ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ p_set_memory_p((unsigned long)(p_long_tmp-p_long_offset),1); ++ p_print_log(P_LKRG_INFO, "Disabled guard page before configuration page.\n"); ++ p_set_memory_rw((unsigned long)(p_long_tmp-p_long_offset),1); ++ *(p_long_tmp-p_long_offset) = P_LKRG_MARKER1; ++ if (p_attr_init > 1) { ++ p_print_log(P_LKRG_INFO, "Disabled guard page after configuration page.\n"); ++ p_set_memory_p((unsigned long)(p_long_tmp+p_long_offset),1); ++ p_set_memory_rw((unsigned long)(p_long_tmp+p_long_offset),1); ++ *(p_long_tmp+p_long_offset) = P_LKRG_MARKER1; ++ } ++#endif ++ ++#if !defined(CONFIG_ARM64) ++ P_SYM(p_flush_tlb_all)(); ++#else ++ flush_tlb_all(); ++#endif ++ schedule(); ++ } else { ++ p_print_log(P_LKRG_INFO, "Configuration page was NOT RO.\n"); ++ } ++ ++ p_attr_init ^= p_attr_init; ++} ++ ++void p_parse_module_params(void) { ++ ++ /* Interval */ ++ if (interval > 1800) { ++ P_CTRL(p_interval) = 1800; // Max ++ } else if (interval < 5) { ++ P_CTRL(p_interval) = 5; // Min ++ } else { ++ P_CTRL(p_interval) = interval; ++ } ++ ++ /* log_level */ ++ if (log_level >= P_LOG_LEVEL_MAX) { ++ P_CTRL(p_log_level) = P_LOG_LEVEL_MAX-1; // Max ++ } else { ++ P_CTRL(p_log_level) = log_level; ++ } ++ ++ /* heartbeat */ ++ if (heartbeat > 1) { ++ P_CTRL(p_heartbeat) = 1; ++ } else { ++ P_CTRL(p_heartbeat) = heartbeat; ++ } ++ ++ /* block_modules */ ++ if (block_modules > 1) { ++ P_CTRL(p_block_modules) = 1; ++ } else { ++ P_CTRL(p_block_modules) = block_modules; ++ } ++ ++ /* kint_validate */ ++ if (kint_validate > 3) { ++ P_CTRL(p_kint_validate) = 3; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_kint_validate) != kint_validate) { ++ P_CTRL(p_kint_validate) = kint_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* kint_enforce */ ++ if (kint_enforce > 2) { ++ P_CTRL(p_kint_enforce) = 2; ++ P_CTRL(p_profile_enforce) = 9; ++ } else if (P_CTRL(p_kint_enforce) != kint_enforce) { ++ P_CTRL(p_kint_enforce) = kint_enforce; ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ ++ /* msr_validate */ ++ if (msr_validate > 1) { ++ P_CTRL(p_msr_validate) = 1; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_msr_validate) != msr_validate) { ++ P_CTRL(p_msr_validate) = msr_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* pint_validate */ ++ if (pint_validate > 3) { ++ P_CTRL(p_pint_validate) = 3; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_pint_validate) != pint_validate) { ++ P_CTRL(p_pint_validate) = pint_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* pint_enforce */ ++ if (pint_enforce > 2) { ++ P_CTRL(p_pint_enforce) = 2; ++ P_CTRL(p_profile_enforce) = 9; ++ } else if (P_CTRL(p_pint_enforce) != pint_enforce) { ++ P_CTRL(p_pint_enforce) = pint_enforce; ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ ++ /* umh_validate */ ++ if (umh_validate > 2) { ++ P_CTRL(p_umh_validate) = 2; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_umh_validate) != umh_validate) { ++ P_CTRL(p_umh_validate) = umh_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* umh_enforce */ ++ if (umh_enforce > 2) { ++ P_CTRL(p_umh_enforce) = 2; ++ P_CTRL(p_profile_enforce) = 9; ++ } else if (P_CTRL(p_umh_enforce) != umh_enforce) { ++ P_CTRL(p_umh_enforce) = umh_enforce; ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ ++ /* pcfi_validate */ ++ if (pcfi_validate > 2) { ++ P_CTRL(p_pcfi_validate) = 2; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_pcfi_validate) != pcfi_validate) { ++ P_CTRL(p_pcfi_validate) = pcfi_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* pcfi_enforce */ ++ if (pcfi_enforce > 2) { ++ P_CTRL(p_pcfi_enforce) = 2; ++ P_CTRL(p_profile_enforce) = 9; ++ } else if (P_CTRL(p_pcfi_enforce) != pcfi_enforce) { ++ P_CTRL(p_pcfi_enforce) = pcfi_enforce; ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ ++ p_pcfi_CPU_flags = 0; ++ ++#if defined(CONFIG_X86) ++ ++ if (boot_cpu_has(X86_FEATURE_SMEP)) { ++ P_ENABLE_SMEP_FLAG(p_pcfi_CPU_flags); ++ ++ /* smep_validate */ ++ if (smep_validate > 1) { ++ P_CTRL(p_smep_validate) = 1; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_smep_validate) != smep_validate) { ++ P_CTRL(p_smep_validate) = smep_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* smep_enforce */ ++ if (smep_enforce > 2) { ++ P_CTRL(p_smep_enforce) = 2; ++ P_CTRL(p_profile_enforce) = 9; ++ } else if (P_CTRL(p_smep_enforce) != smep_enforce) { ++ P_CTRL(p_smep_enforce) = smep_enforce; ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ } else { ++ P_CTRL(p_smep_validate) = 0; ++ P_CTRL(p_smep_enforce) = 0; ++ p_print_log(P_LKRG_ERR, ++ "System does NOT support SMEP. LKRG can't enforce SMEP validation :(\n"); ++ } ++ ++ if (boot_cpu_has(X86_FEATURE_SMAP)) { ++ P_ENABLE_SMAP_FLAG(p_pcfi_CPU_flags); ++ ++ /* smap_validate */ ++ if (smap_validate > 1) { ++ P_CTRL(p_smap_validate) = 1; ++ P_CTRL(p_profile_validate) = 9; ++ } else if (P_CTRL(p_smap_validate) != smap_validate) { ++ P_CTRL(p_smap_validate) = smap_validate; ++ P_CTRL(p_profile_validate) = 9; ++ } ++ ++ /* smap_enforce */ ++ if (smap_enforce > 2) { ++ P_CTRL(p_smap_enforce) = 2; ++ P_CTRL(p_profile_enforce) = 9; ++ } else if (P_CTRL(p_smap_enforce) != smap_enforce) { ++ P_CTRL(p_smap_enforce) = smap_enforce; ++ P_CTRL(p_profile_enforce) = 9; ++ } ++ } else { ++ P_CTRL(p_smap_validate) = 0; ++ P_CTRL(p_smap_enforce) = 0; ++ p_print_log(P_LKRG_ERR, ++ "System does NOT support SMAP. LKRG can't enforce SMAP validation :(\n"); ++ } ++ ++ P_ENABLE_WP_FLAG(p_pcfi_CPU_flags); ++ ++#endif ++ ++} ++ ++/* ++ * Main entry point for the module - initialization. ++ */ ++static int __init p_lkrg_register(void) { ++ ++ int p_ret = P_LKRG_SUCCESS; ++ char p_cpu = 0; ++ char p_freeze = 0; ++ ++ p_print_log(P_LKRG_CRIT, "Loading LKRG...\n"); ++ P_SYM(p_state_init) = 0; ++ ++ /* ++ * Generate random SipHash key ++ */ ++ p_global_siphash_key.p_low = (uint64_t)get_random_long(); ++ p_global_siphash_key.p_high = (uint64_t)get_random_long(); ++ ++ p_parse_module_params(); ++ P_SYM(p_find_me) = THIS_MODULE; ++ ++ if (get_kallsyms_address() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't find kallsyms_lookup_name() function address! Exiting...\n"); ++ return P_LKRG_RESOLVER_ERROR; ++ } ++#ifdef P_LKRG_DEBUG ++ else { ++ p_print_log(P_LKRG_DBG, ++ "kallsyms_lookup_name() => 0x%lx\n",(unsigned long)P_SYM(p_kallsyms_lookup_name)); ++ } ++#endif ++ ++ P_SYM(p_freeze_processes) = (int (*)(void))P_SYM(p_kallsyms_lookup_name)("freeze_processes"); ++ ++ if (!P_SYM(p_freeze_processes)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'freeze_processes' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++ P_SYM(p_thaw_processes) = (void (*)(void))P_SYM(p_kallsyms_lookup_name)("thaw_processes"); ++ ++ if (!P_SYM(p_thaw_processes)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'thaw_processes' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++#if defined(CONFIG_X86) ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0) ++ P_SYM(p_native_write_cr4) = (void (*)(unsigned long))P_SYM(p_kallsyms_lookup_name)("native_write_cr4"); ++ ++ if (!P_SYM(p_native_write_cr4)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'native_write_cr4' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ #endif ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ P_SYM(p_module_address) = (struct module* (*)(unsigned long))P_SYM(p_kallsyms_lookup_name)("__module_address"); ++ ++ if (!P_SYM(p_module_address)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find '__module_address' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++ P_SYM(p_module_text_address) = (struct module* (*)(unsigned long))P_SYM(p_kallsyms_lookup_name)("__module_text_address"); ++ ++ if (!P_SYM(p_module_text_address)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find '__module_text_address' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++#endif ++ ++#if defined(CONFIG_OPTPROBES) ++ P_SYM(p_wait_for_kprobe_optimizer) = (void (*)(void))P_SYM(p_kallsyms_lookup_name)("wait_for_kprobe_optimizer"); ++ ++ if (!P_SYM(p_wait_for_kprobe_optimizer)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'wait_for_kprobe_optimizer' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++#endif ++ ++ // Freeze all non-kernel processes ++ while (P_SYM(p_freeze_processes)()) ++ schedule(); ++ ++ p_freeze = 1; ++ ++ /* ++ * First, we need to plant *kprobes... Before DB is created! ++ */ ++ if (p_exploit_detection_init()) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't initialize exploit detection features! Exiting...\n"); ++ p_ret = P_LKRG_EXPLOIT_DETECTION_ERROR; ++ goto p_main_error; ++ } ++ ++ if (p_offload_cache_init()) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't initialize offloading cache :(\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++ /* ++ * Initialize kmod module ++ */ ++ if (p_kmod_init()) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't initialize kernel modules handling! Exiting...\n"); ++ p_ret = P_LKRG_KMOD_ERROR; ++ goto p_main_error; ++ } ++ ++ if (p_create_database() != P_LKRG_SUCCESS) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't create database! Exiting...\n"); ++ p_ret = P_LKRG_DATABASE_ERROR; ++ goto p_main_error; ++ } ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ++ register_hotcpu_notifier(&p_cpu_notifier); ++#else ++ cpu_notifier_register_begin(); ++ __register_hotcpu_notifier(&p_cpu_notifier); ++ cpu_notifier_register_done(); ++#endif ++#else ++ if ( (p_hot_cpus = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, ++ "x86/p_lkrg:online", ++ p_cpu_online_action, ++ p_cpu_dead_action)) < 0) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't register hot CPU plug[in/out] handler! Exiting...\n"); ++ p_ret = P_LKRG_HPCPU_ERROR; ++ goto p_main_error; ++ } ++#endif ++ p_cpu = 1; ++ ++#if !defined(CONFIG_ARM64) ++ ++ P_SYM(p_flush_tlb_all) = (void (*)(void))P_SYM(p_kallsyms_lookup_name)("flush_tlb_all"); ++ ++ if (!P_SYM(p_flush_tlb_all)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'flush_tlb_all' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++#endif ++ ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ ++ P_SYM(p_kernel_set_memory_ro) = (int (*)(unsigned long, int)) ++ P_SYM(p_kallsyms_lookup_name)("set_memory_ro"); ++ if (!P_SYM(p_kernel_set_memory_ro)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'set_memory_ro' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++ P_SYM(p_kernel_set_memory_rw) = (int (*)(unsigned long, int)) ++ P_SYM(p_kallsyms_lookup_name)("set_memory_rw"); ++ if (!P_SYM(p_kernel_set_memory_rw)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'set_memory_rw' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++ #if defined(CONFIG_X86) ++ ; ++/* ++ P_SYM(p_kernel_set_memory_np) = (int (*)(unsigned long, int)) ++ P_SYM(p_kallsyms_lookup_name)("set_memory_np"); ++ if (!P_SYM(p_kernel_set_memory_np)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'set_memory_np' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++*/ ++ #elif defined(CONFIG_ARM64) ++ P_SYM(p_kernel_set_memory_valid) = (int (*)(unsigned long, int, int)) ++ P_SYM(p_kallsyms_lookup_name)("set_memory_valid"); ++ if (!P_SYM(p_kernel_set_memory_valid)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'set_memory_valid' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ #endif ++ ++#else ++ #if defined(CONFIG_X86) ++ P_SYM(p_change_page_attr_set_clr) = ++ (int (*)(unsigned long *, int, pgprot_t, pgprot_t, int, int, struct page **)) ++ P_SYM(p_kallsyms_lookup_name)("change_page_attr_set_clr"); ++ ++ if (!P_SYM(p_change_page_attr_set_clr)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'change_page_attr_set_clr' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ #elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ P_SYM(p_change_memory_common) = ++ (int (*)(unsigned long, int, pgprot_t, pgprot_t)) ++ P_SYM(p_kallsyms_lookup_name)("change_memory_common"); ++ ++ if (!P_SYM(p_change_memory_common)) { ++ p_print_log(P_LKRG_ERR, ++ "ERROR: Can't find 'change_memory_common' function :( Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ #else ++ p_print_log(P_LKRG_CRIT, "UNSUPPORTED PLATFORM! Exiting...\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ #endif ++#endif ++ ++ if (p_register_comm_channel()) { ++ p_print_log(P_LKRG_CRIT, ++ "Can't initialize communication channel (sysctl) :(\n"); ++ p_ret = P_LKRG_GENERAL_ERROR; ++ goto p_main_error; ++ } ++ ++ if (P_CTRL(p_hide_lkrg)) { ++ p_hide_itself(); ++ } ++ ++ p_integrity_timer(); ++ p_register_notifiers(); ++ p_init_page_attr(); ++ ++ p_print_log(P_LKRG_CRIT, ++ "LKRG initialized successfully!\n"); ++ ++ p_ret = P_LKRG_SUCCESS; ++ ++p_main_error: ++ ++ if (p_ret != P_LKRG_SUCCESS) { ++ P_CTRL(p_kint_validate) = 0; ++ p_deregister_notifiers(); ++ if (p_timer.function) ++ del_timer_sync(&p_timer); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ++ if (p_cpu) ++ unregister_hotcpu_notifier(&p_cpu_notifier); ++#else ++ if (p_cpu) { ++ cpu_notifier_register_begin(); ++ __unregister_hotcpu_notifier(&p_cpu_notifier); ++ cpu_notifier_register_done(); ++ } ++#endif ++#else ++ if (p_cpu) ++ cpuhp_remove_state_nocalls(p_hot_cpus); ++#endif ++ ++ p_exploit_detection_exit(); ++ p_unregister_arch_metadata(); ++ p_offload_cache_delete(); ++ p_deregister_module_notifier(); ++ if (p_db.p_CPU_metadata_array) { ++ p_kzfree(p_db.p_CPU_metadata_array); ++ p_db.p_CPU_metadata_array = NULL; ++ } ++ p_uninit_page_attr(); ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ if (p_db.kernel_stext_copy) ++ vfree(p_db.kernel_stext_copy); ++#endif ++ } ++ ++ if (p_freeze) { ++ // Thaw all non-kernel processes ++ P_SYM(p_thaw_processes)(); ++ p_freeze = 0; ++ } ++ ++ return p_ret; ++} ++ ++/* ++ * This function normally should never be called - unloading module cleanup ++ */ ++static void __exit p_lkrg_deregister(void) { ++ ++ p_print_log(P_LKRG_CRIT, "Unloading LKRG...\n"); ++ ++#ifdef P_LKRG_DEBUG ++ p_print_log(P_LKRG_DBG, ++ "I should never be here! This operation probably is going to break your system! Goodbye ;)\n"); ++#endif ++ ++ p_uninit_page_attr(); ++ ++ P_CTRL(p_kint_validate) = 0; ++ p_deregister_notifiers(); ++ if (p_timer.function) ++ del_timer_sync(&p_timer); ++ ++ ++ // Freeze all non-kernel processes ++ while (P_SYM(p_freeze_processes)()) ++ schedule(); ++ ++ p_deregister_comm_channel(); ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ++ unregister_hotcpu_notifier(&p_cpu_notifier); ++#else ++ cpu_notifier_register_begin(); ++ __unregister_hotcpu_notifier(&p_cpu_notifier); ++ cpu_notifier_register_done(); ++#endif ++#else ++ cpuhp_remove_state_nocalls(p_hot_cpus); ++#endif ++ ++ p_exploit_detection_exit(); ++ p_unregister_arch_metadata(); ++ p_offload_cache_delete(); ++ p_deregister_module_notifier(); ++ ++ ++ if (p_db.p_CPU_metadata_array) ++ p_kzfree(p_db.p_CPU_metadata_array); ++ ++#if defined(P_LKRG_JUMP_LABEL_STEXT_DEBUG) ++ if (p_db.kernel_stext_copy) ++ vfree(p_db.kernel_stext_copy); ++#endif ++ ++ // Thaw all non-kernel processes ++ P_SYM(p_thaw_processes)(); ++ ++ p_print_log(P_LKRG_CRIT, "LKRG unloaded!\n"); ++} ++ ++ ++#ifdef MODULE ++module_init(p_lkrg_register); ++#else ++late_initcall_sync(p_lkrg_register); ++#endif ++module_exit(p_lkrg_deregister); ++ ++module_param(log_level, uint, 0000); ++MODULE_PARM_DESC(log_level, "log_level [3 (warn) is default]"); ++module_param(heartbeat, uint, 0000); ++MODULE_PARM_DESC(heartbeat, "heartbeat [0 (don't print) is default]"); ++module_param(block_modules, uint, 0000); ++MODULE_PARM_DESC(block_modules, "block_modules [0 (don't block) is default]"); ++module_param(interval, uint, 0000); ++MODULE_PARM_DESC(interval, "interval [15 seconds is default]"); ++module_param(kint_validate, uint, 0000); ++MODULE_PARM_DESC(kint_validate, "kint_validate [3 (periodically + random events) is default]"); ++module_param(kint_enforce, uint, 0000); ++MODULE_PARM_DESC(kint_enforce, "kint_enforce [2 (panic) is default]"); ++module_param(msr_validate, uint, 0000); ++MODULE_PARM_DESC(msr_validate, "msr_validate [0 (disabled) is default]"); ++module_param(pint_validate, uint, 0000); ++MODULE_PARM_DESC(pint_validate, "pint_validate [1 (current) is default]"); ++module_param(pint_enforce, uint, 0000); ++MODULE_PARM_DESC(pint_enforce, "pint_enforce [1 (kill task) is default]"); ++module_param(umh_validate, uint, 0000); ++MODULE_PARM_DESC(umh_validate, "umh_validate [1 (allow specific paths) is default]"); ++module_param(umh_enforce, uint, 0000); ++MODULE_PARM_DESC(umh_enforce, "umh_enforce [1 (prevent execution) is default]"); ++module_param(pcfi_validate, uint, 0000); ++MODULE_PARM_DESC(pcfi_validate, "pcfi_validate [2 (fully enabled pCFI) is default]"); ++module_param(pcfi_enforce, uint, 0000); ++MODULE_PARM_DESC(pcfi_enforce, "pcfi_enforce [1 (kill task) is default]"); ++#if defined(CONFIG_X86) ++module_param(smep_validate, uint, 0000); ++MODULE_PARM_DESC(smep_validate, "smep_validate [1 (enabled) is default]"); ++module_param(smep_enforce, uint, 0000); ++MODULE_PARM_DESC(smep_enforce, "smep_enforce [2 (panic) is default]"); ++module_param(smap_validate, uint, 0000); ++MODULE_PARM_DESC(smap_validate, "smap_validate [1 (enabled) is default]"); ++module_param(smap_enforce, uint, 0000); ++MODULE_PARM_DESC(smap_enforce, "smap_enforce [2 (panic) is default]"); ++#endif ++ ++MODULE_AUTHOR("Adam 'pi3' Zabrocki (http://pi3.com.pl)"); ++MODULE_DESCRIPTION("pi3's Linux kernel Runtime Guard"); ++MODULE_LICENSE("GPL v2"); +diff --git a/security/lkrg/p_lkrg_main.h b/security/lkrg/p_lkrg_main.h +new file mode 100644 +index 000000000000..000296647837 +--- /dev/null ++++ b/security/lkrg/p_lkrg_main.h +@@ -0,0 +1,393 @@ ++/* ++ * pi3's Linux kernel Runtime Guard ++ * ++ * Component: ++ * - Main module ++ * ++ * Notes: ++ * - None ++ * ++ * Timeline: ++ * - Created: 24.XI.2015 ++ * ++ * Author: ++ * - Adam 'pi3' Zabrocki (http://pi3.com.pl) ++ * ++ */ ++ ++#ifndef P_LKRG_MAIN_H ++#define P_LKRG_MAIN_H ++ ++#define P_LKRG_UNHIDE ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++#ifndef RHEL_RELEASE_VERSION ++#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b)) ++#endif ++ ++#if ( (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,72)) && \ ++ (!(defined(RHEL_RELEASE_CODE)) || \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 4))) ++#define P_LKRG_CUSTOM_GET_RANDOM_LONG ++/* We use md5_transform() in our custom get_random_long() */ ++#include ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) ++#define p_kzfree kzfree ++#else ++#define p_kzfree kfree_sensitive ++#endif ++ ++#include ++#include ++#include ++#if defined(CONFIG_X86) && defined(CONFIG_UNWINDER_ORC) ++#include ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) ++#include ++#endif ++ ++/* ++ * Some custom compilation of the kernel might aggresively inline ++ * critical functions (from LKRG perspective). That's problematic ++ * for the project. However, some of the problems *might* be solved ++ * by uncommenting following definition. However, not all of them ++ * so you need to experiment. ++ */ ++//#define P_KERNEL_AGGRESSIVE_INLINING 1 ++ ++//#define p_lkrg_read_only __attribute__((__section__(".data..p_lkrg_read_only"),aligned(PAGE_SIZE))) ++#define __p_lkrg_read_only __attribute__((__section__(".p_lkrg_read_only"))) ++ ++#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) ++ #define P_LKRG_MARKER1 0x3369705f6d616441 ++ #define P_LKRG_MARKER2 0xdeadbabedeadbabe ++#else ++ #define P_LKRG_MARKER1 0x3369705f ++ #define P_LKRG_MARKER2 0xdeadbabe ++#endif ++ ++#if defined(CONFIG_SECURITY_SELINUX_DEVELOP) && !defined(CONFIG_GCC_PLUGIN_RANDSTRUCT) ++#define P_SELINUX_VERIFY ++#endif ++ ++#define nitems(val) (sizeof(val) / sizeof(val[0])) ++ ++typedef struct _p_lkrg_global_conf_structure { ++ ++#if defined(CONFIG_X86) ++ unsigned int p_smep_validate; ++ unsigned int p_smap_validate; ++#endif ++ unsigned int p_pcfi_validate; ++ unsigned int p_pint_validate; ++ unsigned int p_kint_validate; ++ unsigned int p_log_level; ++ unsigned int p_block_modules; ++ unsigned int p_msr_validate; ++ unsigned int p_heartbeat; ++ unsigned int p_interval; ++ unsigned int p_umh_validate; ++#if defined(CONFIG_X86) ++ unsigned int p_smep_enforce; ++ unsigned int p_smap_enforce; ++#endif ++ unsigned int p_pcfi_enforce; ++ unsigned int p_pint_enforce; ++ unsigned int p_kint_enforce; ++ unsigned int p_trigger; ++ unsigned int p_hide_lkrg; ++ unsigned int p_umh_enforce; ++ /* Profiles */ ++ unsigned int p_profile_validate; ++ unsigned int p_profile_enforce; ++ ++} p_lkrg_global_conf_struct; ++ ++typedef struct _p_lkrg_global_symbols_structure { ++ ++ unsigned long (*p_kallsyms_lookup_name)(const char *name); ++ int (*p_freeze_processes)(void); ++ void (*p_thaw_processes)(void); ++#if !defined(CONFIG_ARM64) ++ void (*p_flush_tlb_all)(void); ++#endif ++ ++#if defined(P_KERNEL_AGGRESSIVE_INLINING) ++ int (*p_kernel_set_memory_ro)(unsigned long addr, int numpages); ++ int (*p_kernel_set_memory_rw)(unsigned long addr, int numpages); ++ #if defined(CONFIG_X86) ++ ; ++// int (*p_kernel_set_memory_np)(unsigned long addr, int numpages); ++ #elif defined(CONFIG_ARM64) ++ int (*p_kernel_set_memory_valid)(unsigned long addr, int numpages, int enable); ++ #endif ++#else ++ #if defined(CONFIG_X86) ++ int (*p_change_page_attr_set_clr)(unsigned long *addr, int numpages, ++ pgprot_t mask_set, pgprot_t mask_clr, ++ int force_split, int in_flag, ++ struct page **pages); ++ #elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ int (*p_change_memory_common)(unsigned long addr, int numpages, ++ pgprot_t set_mask, pgprot_t clear_mask); ++ #endif ++#endif ++ int (*p_is_kernel_text_address)(unsigned long p_addr); ++ void (*p_get_seccomp_filter)(struct task_struct *p_task); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ void (*p_put_seccomp_filter)(struct seccomp_filter *p_filter); ++#else ++ void (*p_put_seccomp_filter)(struct task_struct *p_task); ++#endif ++#ifdef CONFIG_SECURITY_SELINUX ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) ++ int *p_selinux_enabled; ++#endif ++#ifdef P_SELINUX_VERIFY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0) ++ struct p_selinux_state *p_selinux_state; ++#else ++ int *p_selinux_enforcing; ++#endif ++#endif ++#endif ++ int (*p_core_kernel_text)(unsigned long p_addr); ++ pmd_t *(*p_mm_find_pmd)(struct mm_struct *mm, unsigned long address); ++ struct mutex *p_jump_label_mutex; ++ struct mutex *p_text_mutex; ++ struct text_poke_loc **p_tp_vec; ++ int *p_tp_vec_nr; ++#if defined(CONFIG_DYNAMIC_DEBUG) ++ struct list_head *p_ddebug_tables; ++ struct mutex *p_ddebug_lock; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) ++ int (*p_ddebug_remove_module_ptr)(const char *p_name); ++#endif ++#endif ++ struct list_head *p_global_modules; ++ struct kset **p_module_kset; ++#if defined(CONFIG_X86) ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0) ++ void (*p_native_write_cr4)(unsigned long p_val); ++ #endif ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) ++ struct module* (*p_module_address)(unsigned long p_val); ++ struct module* (*p_module_text_address)(unsigned long p_val); ++#endif ++ struct module* (*p_find_module)(const char *name); ++ struct mutex *p_module_mutex; ++ int (*p_kallsyms_on_each_symbol)(int (*)(void *, const char *, struct module *, unsigned long), void *); ++#if defined(CONFIG_FUNCTION_TRACER) ++ struct ftrace_rec_iter *(*p_ftrace_rec_iter_start)(void); ++ struct ftrace_rec_iter *(*p_ftrace_rec_iter_next)(struct ftrace_rec_iter *iter); ++ struct dyn_ftrace *(*p_ftrace_rec_iter_record)(struct ftrace_rec_iter *iter); ++ struct mutex *p_ftrace_lock; ++#endif ++#if defined(CONFIG_OPTPROBES) ++ void (*p_wait_for_kprobe_optimizer)(void); ++#endif ++ struct module *p_find_me; ++ unsigned int p_state_init; ++ ++} p_lkrg_global_syms; ++ ++typedef struct _p_lkrg_critical_variables { ++ ++ unsigned long p_dummy1; ++ ++} p_lkrg_critical_var; ++ ++typedef struct _p2_lkrg_global_ctrl_structure { ++ ++ p_lkrg_global_conf_struct ctrl; ++ p_lkrg_global_syms syms; ++ p_lkrg_critical_var var; ++ ++} p_lkrg_global_ctrl_struct __attribute__((aligned(PAGE_SIZE))); ++ ++typedef struct _p_lkrg_ro_page { ++ ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ unsigned long p_marker_np1 __attribute__((aligned(PAGE_SIZE))); ++#endif ++ ++ p_lkrg_global_ctrl_struct p_lkrg_global_ctrl; ++ ++#if !defined(CONFIG_ARM) && (!defined(P_KERNEL_AGGRESSIVE_INLINING) && defined(CONFIG_X86)) ++ unsigned long p_marker_np2 __attribute__((aligned(PAGE_SIZE))); ++ unsigned long p_marker_np3 __attribute__((aligned(PAGE_SIZE))); ++#endif ++ ++} p_ro_page; ++ ++ ++extern p_ro_page p_ro; ++ ++#define P_VAR(p_field) p_ro.p_lkrg_global_ctrl.var.p_field ++#define P_SYM(p_field) p_ro.p_lkrg_global_ctrl.syms.p_field ++#define P_CTRL(p_field) p_ro.p_lkrg_global_ctrl.ctrl.p_field ++#define P_CTRL_ADDR &p_ro.p_lkrg_global_ctrl ++ ++/* ++ * LKRG counter lock ++ */ ++typedef struct p_lkrg_counter_lock { ++ ++ atomic_t p_counter; ++ spinlock_t p_lock; ++ ++} p_lkrg_counter_lock; ++ ++/* Counter lock API */ ++static inline void p_lkrg_counter_lock_init(p_lkrg_counter_lock *p_arg) { ++ ++ spin_lock_init(&p_arg->p_lock); ++ smp_mb(); ++ atomic_set(&p_arg->p_counter, 0); ++ smp_mb(); ++} ++ ++static inline unsigned long p_lkrg_counter_lock_trylock(p_lkrg_counter_lock *p_arg, unsigned long *p_flags) { ++ ++ local_irq_save(*p_flags); ++ if (!spin_trylock(&p_arg->p_lock)) { ++ local_irq_restore(*p_flags); ++ return 0; ++ } ++ return 1; ++} ++ ++static inline void p_lkrg_counter_lock_lock(p_lkrg_counter_lock *p_arg, unsigned long *p_flags) { ++ ++ spin_lock_irqsave(&p_arg->p_lock, *p_flags); ++} ++ ++static inline void p_lkrg_counter_lock_unlock(p_lkrg_counter_lock *p_arg, unsigned long *p_flags) { ++ ++ spin_unlock_irqrestore(&p_arg->p_lock, *p_flags); ++} ++ ++static inline void p_lkrg_counter_lock_val_inc(p_lkrg_counter_lock *p_arg) { ++ ++ smp_mb(); ++ atomic_inc(&p_arg->p_counter); ++ smp_mb(); ++} ++ ++static inline void p_lkrg_counter_lock_val_dec(p_lkrg_counter_lock *p_arg) { ++ ++ smp_mb(); ++ atomic_dec(&p_arg->p_counter); ++ smp_mb(); ++} ++ ++static inline int p_lkrg_counter_lock_val_read(p_lkrg_counter_lock *p_arg) { ++ ++ register int p_ret; ++ ++ smp_mb(); ++ p_ret = atomic_read(&p_arg->p_counter); ++ smp_mb(); ++ ++ return p_ret; ++} ++/* End */ ++ ++/* ++ * p_lkrg modules ++ */ ++#include "modules/print_log/p_lkrg_print_log.h" // printing, error and debug module ++#include "modules/hashing/p_lkrg_fast_hash.h" // Hashing module ++#include "modules/ksyms/p_resolve_ksym.h" // Resolver module ++#include "modules/database/p_database.h" // Database module ++#include "modules/integrity_timer/p_integrity_timer.h" // Integrity timer module ++#include "modules/kmod/p_kmod.h" // Kernel's modules module ++#include "modules/notifiers/p_notifiers.h" // Notifiers module ++#include "modules/self-defense/hiding/p_hiding.h" // Hiding module ++#include "modules/exploit_detection/p_exploit_detection.h" // Exploit Detection ++#include "modules/wrap/p_struct_wrap.h" // Wrapping module ++#include "modules/comm_channel/p_comm_channel.h" // Communication channel (sysctl) module ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) ++ #define __GFP_REPEAT ((__force gfp_t)___GFP_RETRY_MAYFAIL) ++#endif ++ ++#if !defined(CONFIG_KPROBES) ++ #error "LKRG requires CONFIG_KPROBES" ++#elif !defined(CONFIG_HAVE_KRETPROBES) ++ #error "CONFIG_KPROBES is enabled, however CONFIG_HAVE_KRETPROBES is not found. LKRG requires both." ++#endif ++ ++#if !defined(CONFIG_MODULE_UNLOAD) ++ #error "LKRG requires CONFIG_MODULE_UNLOAD" ++#endif ++ ++#if !defined(CONFIG_KALLSYMS_ALL) ++ #error "LKRG requires CONFIG_KALLSYMS_ALL" ++#endif ++ ++#if !defined(CONFIG_JUMP_LABEL) ++ #error "LKRG requires CONFIG_JUMP_LABEL" ++#endif ++ ++#if !defined(CONFIG_STACKTRACE) ++/* ++ * A #warning in this header file would be printed too many times during build, ++ * so let's only do that for something truly important, which the below is not. ++ */ ++// #warning "LKRG does NOT require CONFIG_STACKTRACE. However, in case of pCFI violation, LKRG won't be able to dump full stack-trace." ++#endif ++ ++#if defined(CONFIG_PREEMPT_RT) ++ #error "LKRG does not support RT kernels (PREEMPT_RT is enabled)" ++#endif ++ ++#endif