Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
195 changes: 0 additions & 195 deletions libbpf-tools/klockstat.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -832,201 +832,6 @@ int BPF_KPROBE(kprobe_up_write, struct rw_semaphore *lock)
return 0;
}

/* CONFIG_DEBUG_LOCK_ALLOC is enabled */

SEC("kprobe/mutex_lock_nested")
int BPF_KPROBE(kprobe_mutex_lock_nested, struct mutex *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/mutex_lock_nested")
int BPF_KRETPROBE(kprobe_mutex_lock_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);
lock_acquired(*lock);
return 0;
}

SEC("kprobe/mutex_lock_interruptible_nested")
int BPF_KPROBE(kprobe_mutex_lock_interruptible_nested, struct mutex *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/mutex_lock_interruptible_nested")
int BPF_KRETPROBE(kprobe_mutex_lock_interruptible_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);

if (ret)
lock_aborted(*lock);
else
lock_acquired(*lock);
return 0;
}

SEC("kprobe/mutex_lock_killable_nested")
int BPF_KPROBE(kprobe_mutex_lock_killable_nested, struct mutex *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/mutex_lock_killable_nested")
int BPF_KRETPROBE(kprobe_mutex_lock_killable_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);

if (ret)
lock_aborted(*lock);
else
lock_acquired(*lock);
return 0;
}

SEC("kprobe/down_read_nested")
int BPF_KPROBE(kprobe_down_read_nested, struct rw_semaphore *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/down_read_nested")
int BPF_KRETPROBE(kprobe_down_read_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);

lock_acquired(*lock);
return 0;
}

SEC("kprobe/down_read_killable_nested")
int BPF_KPROBE(kprobe_down_read_killable_nested, struct rw_semaphore *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/down_read_killable_nested")
int BPF_KRETPROBE(kprobe_down_read_killable_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);

if (ret)
lock_aborted(*lock);
else
lock_acquired(*lock);
return 0;
}

SEC("kprobe/down_write_nested")
int BPF_KPROBE(kprobe_down_write_nested, struct rw_semaphore *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/down_write_nested")
int BPF_KRETPROBE(kprobe_down_write_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);

lock_acquired(*lock);
return 0;
}

SEC("kprobe/down_write_killable_nested")
int BPF_KPROBE(kprobe_down_write_killable_nested, struct rw_semaphore *lock)
{
u32 tid = (u32)bpf_get_current_pid_tgid();

bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
lock_contended(ctx, lock);
return 0;
}

SEC("kretprobe/down_write_killable_nested")
int BPF_KRETPROBE(kprobe_down_write_killable_exit_nested, long ret)
{
u32 tid = (u32)bpf_get_current_pid_tgid();
void **lock;

lock = bpf_map_lookup_elem(&locks, &tid);
if (!lock)
return 0;

bpf_map_delete_elem(&locks, &tid);

if (ret)
lock_aborted(*lock);
else
lock_acquired(*lock);
return 0;
}

SEC("kprobe/rtnetlink_rcv_msg")
int BPF_KPROBE(kprobe_rtnetlink_rcv_msg, struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *ext)
Expand Down
120 changes: 65 additions & 55 deletions libbpf-tools/klockstat.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,9 @@ static const char *lock_ksym_names[] = {
"mutex_lock_interruptible_nested",
"mutex_lock_killable",
"mutex_lock_killable_nested",
"_mutex_lock_killable",
"mutex_trylock",
"_mutex_trylock_nest_lock",
"down_read",
"down_read_nested",
"down_read_interruptible",
Expand Down Expand Up @@ -775,8 +777,6 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va

static void enable_fentry(struct klockstat_bpf *obj)
{
bool debug_lock;

bpf_program__set_autoload(obj->progs.kprobe_mutex_lock, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_trylock, false);
Expand Down Expand Up @@ -804,22 +804,6 @@ static void enable_fentry(struct klockstat_bpf *obj)
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_up_write, false);

bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false);

bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false);

bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg, false);
bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_netlink_dump, false);
Expand All @@ -828,8 +812,7 @@ static void enable_fentry(struct klockstat_bpf *obj)
bpf_program__set_autoload(obj->progs.kprobe_sock_do_ioctl_exit, false);

/* CONFIG_DEBUG_LOCK_ALLOC is on */
debug_lock = fentry_can_attach("mutex_lock_nested", NULL);
if (!debug_lock)
if (!fentry_can_attach("mutex_lock_nested", NULL))
return;

bpf_program__set_attach_target(obj->progs.mutex_lock, 0,
Expand Down Expand Up @@ -857,10 +840,24 @@ static void enable_fentry(struct klockstat_bpf *obj)
"down_write_nested");
bpf_program__set_attach_target(obj->progs.down_write_exit, 0,
"down_write_nested");
bpf_program__set_attach_target(obj->progs.down_write_killable, 0,
"down_write_killable_nested");
bpf_program__set_attach_target(obj->progs.down_write_killable_exit, 0,
"down_write_killable_nested");

/* Since v6.16 mutex_lock_killable nested variant is implemented differently */
if (fentry_can_attach("_mutex_lock_killable", NULL)) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_mutex_lock_killable handling is inside DEBUG-only section.
This function exists in all kernels >= 6.16 (not debug-specific) ?
Line 849 may also need to be adjusted depending on line 877.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see it in /proc/kallsyms on a non-debug kernel and both definitions of _mutex_lock_killable(), in mutex.c and rtmutex_api.c are inside a #ifdef CONFIG_DEBUG_LOCK_ALLOC section.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. You are right — _mutex_lock_killable handling belongs in the debug section. Thanks for the correction.
  2. The two assignments to mutex_lock_killable / mutex_lock_killable_exit are mutually exclusive alternatives depending on the kernel version, but the code expresses this as an unconditional default followed by a conditional update. An if/else would make the intent clearer.

Thank you.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright. I'll update the pull request soon with your suggestion.

bpf_program__set_attach_target(obj->progs.mutex_lock_killable, 0,
"_mutex_lock_killable");
bpf_program__set_attach_target(obj->progs.mutex_lock_killable_exit, 0,
"_mutex_lock_killable");
} else {
bpf_program__set_attach_target(obj->progs.down_write_killable, 0,
"down_write_killable_nested");
bpf_program__set_attach_target(obj->progs.down_write_killable_exit, 0,
"down_write_killable_nested");
Comment on lines +850 to +854
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In enable_fentry(), the fallback branch of the _mutex_lock_killable check is setting attach targets for down_write_killable(_exit), which is unrelated to mutex_lock_killable. This also means down_write_killable(_exit) won’t be retargeted to down_write_killable_nested when _mutex_lock_killable exists. Please retarget down_write_killable(_exit) to down_write_killable_nested unconditionally (for CONFIG_DEBUG_LOCK_ALLOC), and keep the mutex_lock_killable targets on mutex_lock_killable_nested when _mutex_lock_killable isn’t available.

Copilot uses AI. Check for mistakes.
}

/* Since v6.16 mutex_trylock also have a nested variant */
if (fentry_can_attach("_mutex_trylock_nest_lock", NULL))
bpf_program__set_attach_target(obj->progs.mutex_trylock_exit, 0,
"_mutex_trylock_nest_lock");
}

static void enable_kprobes(struct klockstat_bpf *obj)
Expand Down Expand Up @@ -897,39 +894,52 @@ static void enable_kprobes(struct klockstat_bpf *obj)
bpf_program__set_autoload(obj->progs.sock_do_ioctl_exit, false);

/* CONFIG_DEBUG_LOCK_ALLOC is on */
if (kprobe_exists("mutex_lock_nested")) {
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit, false);

bpf_program__set_autoload(obj->progs.kprobe_down_read, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_killable, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false);
if (!kprobe_exists("mutex_lock_nested"))
return;

bpf_program__set_attach_target(obj->progs.mutex_lock, 0,
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

enable_kprobes() is retargeting obj->progs.mutex_lock (the fentry program that was just set to autoload=false) to mutex_lock_nested, instead of retargeting the kprobe entry program. This likely leaves kprobe_mutex_lock still attached to mutex_lock (non-nested), breaking DEBUG_LOCK_ALLOC nested tracing. Please set the attach target on obj->progs.kprobe_mutex_lock (and its kretprobe) instead of the fentry program.

Suggested change
bpf_program__set_attach_target(obj->progs.mutex_lock, 0,
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock, 0,

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes indeed, that's a typo. Will fix.

"mutex_lock_nested");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_exit, 0,
"mutex_lock_nested");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_interruptible, 0,
"mutex_lock_interruptible_nested");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_interruptible_exit, 0,
"mutex_lock_interruptible_nested");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_killable, 0,
"mutex_lock_killable_nested");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_killable_exit, 0,
"mutex_lock_killable_nested");

bpf_program__set_attach_target(obj->progs.kprobe_down_read, 0,
"down_read_nested");
bpf_program__set_attach_target(obj->progs.kprobe_down_read_exit, 0,
"down_read_nested");
bpf_program__set_attach_target(obj->progs.kprobe_down_read_killable, 0,
"down_read_killable_nested");
bpf_program__set_attach_target(obj->progs.kprobe_down_read_killable_exit, 0,
"down_read_killable_nested");
bpf_program__set_attach_target(obj->progs.kprobe_down_write, 0,
"down_write_nested");
bpf_program__set_attach_target(obj->progs.kprobe_down_write_exit, 0,
"down_write_nested");

/* Since v6.16 mutex_lock_killable nested variant is implemented differently */
if (kprobe_exists("_mutex_lock_killable")) {
bpf_program__set_attach_target(obj->progs.mutex_lock_killable, 0,
"_mutex_lock_killable");
bpf_program__set_attach_target(obj->progs.mutex_lock_killable_exit, 0,
Comment on lines +928 to +930
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In enable_kprobes(), when _mutex_lock_killable exists you’re calling bpf_program__set_attach_target() on the fentry mutex_lock_killable programs, but those were set to autoload=false at the top of enable_kprobes(). This means kprobe_mutex_lock_killable(_exit) will still attach to the old symbol and miss nested killable locks on newer kernels. Please retarget the kprobe mutex_lock_killable entry/exit programs to _mutex_lock_killable instead.

Suggested change
bpf_program__set_attach_target(obj->progs.mutex_lock_killable, 0,
"_mutex_lock_killable");
bpf_program__set_attach_target(obj->progs.mutex_lock_killable_exit, 0,
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_killable, 0,
"_mutex_lock_killable");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_lock_killable_exit, 0,

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Obviously some sloppy copy and paste business on my part. My bad! Will fix.

"_mutex_lock_killable");
} else {
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false);

bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false);
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false);
bpf_program__set_attach_target(obj->progs.kprobe_down_write_killable, 0,
"down_write_killable_nested");
bpf_program__set_attach_target(obj->progs.kprobe_down_write_killable_exit, 0,
"down_write_killable_nested");
Comment on lines 932 to +936
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In enable_kprobes(), retargeting down_write_killable(_exit) to down_write_killable_nested is currently inside the else branch of the _mutex_lock_killable check. down_write_killable_nested selection shouldn’t depend on whether _mutex_lock_killable exists; on kernels where _mutex_lock_killable exists, down_write_killable(_exit) will remain attached to non-nested symbols. Please move the down_write_killable(_exit) retargeting out of this conditional (or duplicate it in both branches).

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same issue as above. Same fix.

}

/* Since v6.16 mutex_trylock also have a nested variant */
if (kprobe_exists("_mutex_trylock_nest_lock"))
bpf_program__set_attach_target(obj->progs.mutex_trylock_exit, 0,
"_mutex_trylock_nest_lock");
Comment on lines +940 to +942
Copy link

Copilot AI Mar 31, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In enable_kprobes(), the nested mutex_trylock handling is retargeting obj->progs.mutex_trylock_exit (fexit program) even though enable_kprobes() disables autoload for that program. It also retargets only the exit side; for kprobe/kretprobe pairs you need both entry and exit attached to the same target to keep the per-thread lock map consistent. Please retarget kprobe_mutex_trylock and kprobe_mutex_trylock_exit to _mutex_trylock_nest_lock when present.

Suggested change
if (kprobe_exists("_mutex_trylock_nest_lock"))
bpf_program__set_attach_target(obj->progs.mutex_trylock_exit, 0,
"_mutex_trylock_nest_lock");
if (kprobe_exists("_mutex_trylock_nest_lock")) {
bpf_program__set_attach_target(obj->progs.kprobe_mutex_trylock, 0,
"_mutex_trylock_nest_lock");
bpf_program__set_attach_target(obj->progs.kprobe_mutex_trylock_exit, 0,
"_mutex_trylock_nest_lock");
}

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right. the *_trylock mutexes only have an exit version in the fentry case, not the kprobe one. Will fix.

}

static void disable_nldump_ioctl_probes(struct klockstat_bpf *obj)
Expand Down
Loading