diff --git a/libbpf-tools/klockstat.bpf.c b/libbpf-tools/klockstat.bpf.c index b2a94354d37b..c53cdcdb96c3 100644 --- a/libbpf-tools/klockstat.bpf.c +++ b/libbpf-tools/klockstat.bpf.c @@ -832,201 +832,6 @@ int BPF_KPROBE(kprobe_up_write, struct rw_semaphore *lock) return 0; } -/* CONFIG_DEBUG_LOCK_ALLOC is enabled */ - -SEC("kprobe/mutex_lock_nested") -int BPF_KPROBE(kprobe_mutex_lock_nested, struct mutex *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/mutex_lock_nested") -int BPF_KRETPROBE(kprobe_mutex_lock_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - lock_acquired(*lock); - return 0; -} - -SEC("kprobe/mutex_lock_interruptible_nested") -int BPF_KPROBE(kprobe_mutex_lock_interruptible_nested, struct mutex *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/mutex_lock_interruptible_nested") -int BPF_KRETPROBE(kprobe_mutex_lock_interruptible_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - - if (ret) - lock_aborted(*lock); - else - lock_acquired(*lock); - return 0; -} - -SEC("kprobe/mutex_lock_killable_nested") -int BPF_KPROBE(kprobe_mutex_lock_killable_nested, struct mutex *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/mutex_lock_killable_nested") -int BPF_KRETPROBE(kprobe_mutex_lock_killable_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - - if (ret) - lock_aborted(*lock); - else - lock_acquired(*lock); - return 0; -} - -SEC("kprobe/down_read_nested") -int BPF_KPROBE(kprobe_down_read_nested, struct rw_semaphore *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/down_read_nested") -int BPF_KRETPROBE(kprobe_down_read_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - - lock_acquired(*lock); - return 0; -} - -SEC("kprobe/down_read_killable_nested") -int BPF_KPROBE(kprobe_down_read_killable_nested, struct rw_semaphore *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/down_read_killable_nested") -int BPF_KRETPROBE(kprobe_down_read_killable_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - - if (ret) - lock_aborted(*lock); - else - lock_acquired(*lock); - return 0; -} - -SEC("kprobe/down_write_nested") -int BPF_KPROBE(kprobe_down_write_nested, struct rw_semaphore *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/down_write_nested") -int BPF_KRETPROBE(kprobe_down_write_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - - lock_acquired(*lock); - return 0; -} - -SEC("kprobe/down_write_killable_nested") -int BPF_KPROBE(kprobe_down_write_killable_nested, struct rw_semaphore *lock) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - - bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY); - lock_contended(ctx, lock); - return 0; -} - -SEC("kretprobe/down_write_killable_nested") -int BPF_KRETPROBE(kprobe_down_write_killable_exit_nested, long ret) -{ - u32 tid = (u32)bpf_get_current_pid_tgid(); - void **lock; - - lock = bpf_map_lookup_elem(&locks, &tid); - if (!lock) - return 0; - - bpf_map_delete_elem(&locks, &tid); - - if (ret) - lock_aborted(*lock); - else - lock_acquired(*lock); - return 0; -} - SEC("kprobe/rtnetlink_rcv_msg") int BPF_KPROBE(kprobe_rtnetlink_rcv_msg, struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *ext) diff --git a/libbpf-tools/klockstat.c b/libbpf-tools/klockstat.c index bf3b3abe993d..d3c4926e2511 100644 --- a/libbpf-tools/klockstat.c +++ b/libbpf-tools/klockstat.c @@ -103,7 +103,9 @@ static const char *lock_ksym_names[] = { "mutex_lock_interruptible_nested", "mutex_lock_killable", "mutex_lock_killable_nested", + "_mutex_lock_killable", "mutex_trylock", + "_mutex_trylock_nest_lock", "down_read", "down_read_nested", "down_read_interruptible", @@ -775,8 +777,6 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va static void enable_fentry(struct klockstat_bpf *obj) { - bool debug_lock; - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock, false); bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit, false); bpf_program__set_autoload(obj->progs.kprobe_mutex_trylock, false); @@ -804,22 +804,6 @@ static void enable_fentry(struct klockstat_bpf *obj) bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false); bpf_program__set_autoload(obj->progs.kprobe_up_write, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false); - - bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg, false); bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg_exit, false); bpf_program__set_autoload(obj->progs.kprobe_netlink_dump, false); @@ -828,8 +812,7 @@ static void enable_fentry(struct klockstat_bpf *obj) bpf_program__set_autoload(obj->progs.kprobe_sock_do_ioctl_exit, false); /* CONFIG_DEBUG_LOCK_ALLOC is on */ - debug_lock = fentry_can_attach("mutex_lock_nested", NULL); - if (!debug_lock) + if (!fentry_can_attach("mutex_lock_nested", NULL)) return; bpf_program__set_attach_target(obj->progs.mutex_lock, 0, @@ -857,10 +840,24 @@ static void enable_fentry(struct klockstat_bpf *obj) "down_write_nested"); bpf_program__set_attach_target(obj->progs.down_write_exit, 0, "down_write_nested"); - bpf_program__set_attach_target(obj->progs.down_write_killable, 0, - "down_write_killable_nested"); - bpf_program__set_attach_target(obj->progs.down_write_killable_exit, 0, - "down_write_killable_nested"); + + /* Since v6.16 mutex_lock_killable nested variant is implemented differently */ + if (fentry_can_attach("_mutex_lock_killable", NULL)) { + bpf_program__set_attach_target(obj->progs.mutex_lock_killable, 0, + "_mutex_lock_killable"); + bpf_program__set_attach_target(obj->progs.mutex_lock_killable_exit, 0, + "_mutex_lock_killable"); + } else { + bpf_program__set_attach_target(obj->progs.down_write_killable, 0, + "down_write_killable_nested"); + bpf_program__set_attach_target(obj->progs.down_write_killable_exit, 0, + "down_write_killable_nested"); + } + + /* Since v6.16 mutex_trylock also have a nested variant */ + if (fentry_can_attach("_mutex_trylock_nest_lock", NULL)) + bpf_program__set_attach_target(obj->progs.mutex_trylock_exit, 0, + "_mutex_trylock_nest_lock"); } static void enable_kprobes(struct klockstat_bpf *obj) @@ -897,39 +894,52 @@ static void enable_kprobes(struct klockstat_bpf *obj) bpf_program__set_autoload(obj->progs.sock_do_ioctl_exit, false); /* CONFIG_DEBUG_LOCK_ALLOC is on */ - if (kprobe_exists("mutex_lock_nested")) { - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit, false); - - bpf_program__set_autoload(obj->progs.kprobe_down_read, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_exit, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_killable, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_exit, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_killable, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false); + if (!kprobe_exists("mutex_lock_nested")) + return; + + bpf_program__set_attach_target(obj->progs.mutex_lock, 0, + "mutex_lock_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_exit, 0, + "mutex_lock_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_interruptible, 0, + "mutex_lock_interruptible_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_interruptible_exit, 0, + "mutex_lock_interruptible_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_killable, 0, + "mutex_lock_killable_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_killable_exit, 0, + "mutex_lock_killable_nested"); + + bpf_program__set_attach_target(obj->progs.kprobe_down_read, 0, + "down_read_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_down_read_exit, 0, + "down_read_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_down_read_killable, 0, + "down_read_killable_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_down_read_killable_exit, 0, + "down_read_killable_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_down_write, 0, + "down_write_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_down_write_exit, 0, + "down_write_nested"); + + /* Since v6.16 mutex_lock_killable nested variant is implemented differently */ + if (kprobe_exists("_mutex_lock_killable")) { + bpf_program__set_attach_target(obj->progs.mutex_lock_killable, 0, + "_mutex_lock_killable"); + bpf_program__set_attach_target(obj->progs.mutex_lock_killable_exit, 0, + "_mutex_lock_killable"); } else { - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false); - - bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false); - bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false); + bpf_program__set_attach_target(obj->progs.kprobe_down_write_killable, 0, + "down_write_killable_nested"); + bpf_program__set_attach_target(obj->progs.kprobe_down_write_killable_exit, 0, + "down_write_killable_nested"); } + + /* Since v6.16 mutex_trylock also have a nested variant */ + if (kprobe_exists("_mutex_trylock_nest_lock")) + bpf_program__set_attach_target(obj->progs.mutex_trylock_exit, 0, + "_mutex_trylock_nest_lock"); } static void disable_nldump_ioctl_probes(struct klockstat_bpf *obj)