Linux server.thearyasamaj.org 4.18.0-553.56.1.el8_10.x86_64 #1 SMP Tue Jun 10 05:00:59 EDT 2025 x86_64
Apache
: 103.90.241.146 | : 216.73.216.222
Cant Read [ /etc/named.conf ]
5.6.40
ftpuser@mantra.thearyasamaj.org
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
usr /
src /
file_protector-1.1-1578 /
ftrace_hooks /
[ HOME SHELL ]
Name
Size
Permission
Action
audit_user.h
145
B
-rw-r--r--
fsnotify_events.c
5.57
KB
-rw-r--r--
fsnotify_events.h
382
B
-rw-r--r--
fsnotify_listener.c
10.99
KB
-rw-r--r--
fsnotify_listener.h
1.45
KB
-rw-r--r--
ftrace_events.c
20.17
KB
-rw-r--r--
ftrace_events.h
732
B
-rw-r--r--
reg_tools.h
694
B
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : fsnotify_listener.c
/** @file fsnotify_listener.c @brief fsnotify events listener @details Copyright (c) 2024 Acronis International GmbH @author Denis Kopyrin (denis.kopyrin@acronis.com) @since $Id: $ */ #include "fsnotify_listener.h" #ifdef HAVE_FSNOTIFY_PUBLIC_API #include "compat.h" #include "memory.h" #include <linux/fsnotify_backend.h> #include "hashtable_compat.h" #include "fsnotify_events.h" #define FSNOTIFY_EVENTS_SMALL_TABLE_SIZE_BITS 8 #define FSNOTIFY_EVENTS_LISTENING_MASK FS_OPEN | FS_CREATE | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | FS_MODIFY // I want a special mark type so I can clean deleted sb typedef struct fsnotify_events_mark { struct fsnotify_mark mark; // sb is not referenced here, it is just for comparison sake struct super_block* sb; } fsnotify_events_mark_t; typedef struct { struct hlist_node hash_node; // sb is not referenced here, it is just for comparison sake struct super_block* sb; // mark is referenced here and its reference must be dropped when this node is removed fsnotify_events_mark_t* fe_mark; struct rcu_head rcu; } hashtable_sb_node_t; typedef struct fsnotify_events_listener { struct mutex table_writer_lock; DECLARE_HASHTABLE(sb_hashtable, FSNOTIFY_EVENTS_SMALL_TABLE_SIZE_BITS); struct fsnotify_group *group; } fsnotify_events_listener_t; static fsnotify_events_listener_t global_fsnotify_events_listener; static KMEM_STRUCT_CACHE_DECLARE(hashtable_sb_node); static KMEM_STRUCT_CACHE_DECLARE(fsnotify_events_mark); static void fsnotify_events_node_rcu_free(struct rcu_head *rcu) { // !!! mark must be NULL !!! hashtable_sb_node_t *node = container_of(rcu, hashtable_sb_node_t, rcu); KMEM_DELETE(hashtable_sb_node, node); } // On freeing mark, check if corresponding sb node is in the table and remove it // Note that it is acceptable that mark will not be in the table, it means that it was already removed. static void fsnotify_events_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) { bool found = false; hashtable_sb_node_t *node; fsnotify_events_mark_t* fe_mark = container_of(fsn_mark, fsnotify_events_mark_t, mark); unsigned int hash = hash_ptr(fe_mark->sb, FSNOTIFY_EVENTS_SMALL_TABLE_SIZE_BITS); (void) group; mutex_lock(&global_fsnotify_events_listener.table_writer_lock); { hlist_for_each_entry(node, &global_fsnotify_events_listener.sb_hashtable[hash], hash_node) { if (node->sb == fe_mark->sb) { if (node->fe_mark != fe_mark) { DPRINTF("Mark being freed is not the one in the table, ignoring"); } else { found = true; } break; } } if (found) hlist_del_init_rcu(&node->hash_node); } mutex_unlock(&global_fsnotify_events_listener.table_writer_lock); if (found) { DPRINTF("sb node removed from the table sb=%p mark=%p", fe_mark->sb, fe_mark); // as dropping a reference held by the table fsnotify_put_mark(fsn_mark); call_rcu(&node->rcu, fsnotify_events_node_rcu_free); } } static void fsnotify_events_free_mark(struct fsnotify_mark *fsn_mark) { fsnotify_events_mark_t *fe_mark = container_of(fsn_mark, struct fsnotify_events_mark, mark); KMEM_DELETE(fsnotify_events_mark, fe_mark); } #ifdef FSNOTIFY_MASK_SECOND static int fsnotify_events_handle_event(struct fsnotify_group *group, struct inode *dir, u32 mask, const void *data, int data_type, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) #else static int fsnotify_events_handle_event(struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) #endif { (void) group; (void) dir; (void) file_name; (void) cookie; (void) iter_info; handle_fsnotify_event(mask, data, data_type, file_name ? file_name->name : NULL); return 0; } static const struct fsnotify_ops k_fsnotify_events_ops = { .handle_event = fsnotify_events_handle_event, .freeing_mark = fsnotify_events_freeing_mark, .free_mark = fsnotify_events_free_mark }; void fsnotify_events_listener_init(void) { #if defined(HAVE_FSNOTIFY_GROUP_NOFS) || defined(FSNOTIFY_GROUP_USER) global_fsnotify_events_listener.group = fsnotify_alloc_group(&k_fsnotify_events_ops, 0 /*no flags*/); #else global_fsnotify_events_listener.group = fsnotify_alloc_group(&k_fsnotify_events_ops); #endif IPRINTF("group=%p", global_fsnotify_events_listener.group); } // This is slightly inefficient because I have to go over the whole table to find the next mark // but I would really like to be safe and not cause potential leaks. // TODO: Put all marks in a list and iterate over it static struct fsnotify_mark* remove_next_mark_from_table(void) { struct fsnotify_mark* fsn_mark = NULL; int idx; DPRINTF("removing next mark from the table"); mutex_lock(&global_fsnotify_events_listener.table_writer_lock); for (idx = 0; !fsn_mark && idx < (int) ARRAY_SIZE(global_fsnotify_events_listener.sb_hashtable); idx++) { while (!fsn_mark) { hashtable_sb_node_t* node; struct hlist_node* first = global_fsnotify_events_listener.sb_hashtable[idx].first; if (!first) break; DPRINTF("removing next mark from the table idx=%d first=%p", idx, first); node = hlist_entry(first, hashtable_sb_node_t, hash_node); hlist_del_init_rcu(first); fsn_mark = &node->fe_mark->mark; DPRINTF("removing next mark from the table idx=%d mark=%p", idx, fsn_mark); call_rcu(&node->rcu, fsnotify_events_node_rcu_free); } } mutex_unlock(&global_fsnotify_events_listener.table_writer_lock); DPRINTF("removed next mark from the table mark=%p", fsn_mark); return fsn_mark; } // !!! This is not very thread safe at all, I expect that no one will add new events on top !!! static void clear_marks(void) { do { struct fsnotify_mark *mark = remove_next_mark_from_table(); if (!mark) break; fsnotify_destroy_mark(mark, global_fsnotify_events_listener.group); fsnotify_put_mark(mark); } while (1); } void fsnotify_events_listener_deinit(void) { if (!global_fsnotify_events_listener.group) return; // !!! LSM/syscall hooks must be down at this point including waits via rundown protection !!! clear_marks(); fsnotify_put_group(global_fsnotify_events_listener.group); } int fsnotify_events_listener_global_init(void) { mutex_init(&global_fsnotify_events_listener.table_writer_lock); hash_init(global_fsnotify_events_listener.sb_hashtable); global_fsnotify_events_listener.group = NULL; KMEM_STRUCT_CACHE_NAME(hashtable_sb_node) = NULL; KMEM_STRUCT_CACHE_NAME(fsnotify_events_mark) = NULL; if (!KMEM_STRUCT_CACHE_INIT(hashtable_sb_node, 0, NULL)) { EPRINTF("Failed to create hashtable_sb_node cache"); goto fail; } if (!KMEM_STRUCT_CACHE_INIT(fsnotify_events_mark, 0, NULL)) { EPRINTF("Failed to create fsnotify_events_mark cache"); goto fail; } return 0; fail: fsnotify_events_listener_global_init_fail_free(); return -ENOMEM; } void fsnotify_events_listener_global_init_fail_free(void) { KMEM_STRUCT_CACHE_DEINIT(fsnotify_events_mark); KMEM_STRUCT_CACHE_DEINIT(hashtable_sb_node); } void fsnotify_events_listener_global_deinit(void) { fsnotify_wait_marks_destroyed(); // For hashtable clear synchronize synchronize_rcu(); rcu_barrier(); KMEM_STRUCT_CACHE_DEINIT(fsnotify_events_mark); KMEM_STRUCT_CACHE_DEINIT(hashtable_sb_node); } bool fsnotify_events_listener_registered(void) { return !!global_fsnotify_events_listener.group; } static fsnotify_events_mark_t* mark_sb(struct super_block* sb) { int err; fsnotify_events_mark_t* fe_mark; #ifndef HAVE_FSNOTIFY_SB_INFO fsnotify_connp_t* marks_obj = &sb->s_fsnotify_marks; struct fsnotify_mark *fsn_mark = fsnotify_find_mark(marks_obj, global_fsnotify_events_listener.group); #else void* marks_obj = sb; struct fsnotify_mark *fsn_mark = fsnotify_find_mark(marks_obj, FSNOTIFY_OBJ_TYPE_SB, global_fsnotify_events_listener.group); #endif if (fsn_mark) { DPRINTF("fsnotify mark already exists for sb %p", sb); fsnotify_put_mark(fsn_mark); return NULL; } fe_mark = KMEM_NEW(fsnotify_events_mark); if (!fe_mark) { return NULL; } DPRINTF("allocated fsnotify mark for sb %p, mark=%p", sb, fe_mark); fsn_mark = &fe_mark->mark; fe_mark->sb = sb; fsnotify_init_mark(fsn_mark, global_fsnotify_events_listener.group); fsn_mark->mask = FSNOTIFY_EVENTS_LISTENING_MASK; // Notice that sb reference must be held during this to ensure that sb will not be // deleted before it is advertised in the hashtable. // It is crucial that 'fsnotify_clear_marks_by_sb' will not be invoked. #ifdef HAVE_FSNOTIFY_ADD_MARK_FSID err = fsnotify_add_mark(fsn_mark, marks_obj, FSNOTIFY_OBJ_TYPE_SB, 0, NULL); #else err = fsnotify_add_mark(fsn_mark, marks_obj, FSNOTIFY_OBJ_TYPE_SB, 0); #endif if (err) { WPRINTF("failed to add fsnotify mark for sb %p", sb); fsnotify_put_mark(fsn_mark); return NULL; } else { DPRINTF("added fsnotify mark for sb %p", sb); } return fe_mark; } void fsnotify_events_listen_sb(struct super_block* sb) { hashtable_sb_node_t *node; hashtable_sb_node_t *search_node; fsnotify_events_mark_t *fe_mark; bool failed_to_insert = false; unsigned int hash; hash = hash_ptr(sb, FSNOTIFY_EVENTS_SMALL_TABLE_SIZE_BITS); if (!global_fsnotify_events_listener.group) return; // In rcu manner check if given sb is already being listened to hlist_for_each_entry_rcu(search_node, &global_fsnotify_events_listener.sb_hashtable[hash], hash_node) { if (search_node->sb == sb) { return; } } // node does not exist, create it node = KMEM_NEW(hashtable_sb_node); if (!node) return; DPRINTF("marking sb=%p with node=%p", sb, node); fe_mark = mark_sb(sb); if (!fe_mark) { KMEM_DELETE(hashtable_sb_node, node); return; } node->sb = sb; node->fe_mark = fe_mark; DPRINTF("sb node created sb=%p mark=%p", sb, fe_mark); mutex_lock(&global_fsnotify_events_listener.table_writer_lock); DPRINTF("inserting into the table sb=%p mark=%p hash=%d", sb, fe_mark, hash); // I do not see how this could possible happen but I will check this just in case hlist_for_each_entry(search_node, &global_fsnotify_events_listener.sb_hashtable[hash], hash_node) { if (search_node->sb == sb) { WPRINTF("sb node already in the table, this should not happen sb=%p new_mark=%p in_mark=%p", sb, fe_mark, search_node->fe_mark); failed_to_insert = true; break; } } if (!failed_to_insert) { hlist_add_head_rcu(&node->hash_node, &global_fsnotify_events_listener.sb_hashtable[hash]); } mutex_unlock(&global_fsnotify_events_listener.table_writer_lock); if (failed_to_insert) { // Attempt to remediate this case by removing the mark we just added WPRINTF("Failed to insert sb node into the table, deleting a created mark"); fsnotify_destroy_mark(&fe_mark->mark, global_fsnotify_events_listener.group); fsnotify_put_mark(&fe_mark->mark); // No one saw the node yet, so it is safe to free it KMEM_DELETE(hashtable_sb_node, node); } } #endif
Close