diff --git a/FAQ.txt b/FAQ.txt index 83b66ebdd3ac..0f3993b4f7dc 100644 --- a/FAQ.txt +++ b/FAQ.txt @@ -29,7 +29,7 @@ A: The so-called Kernel lockdown might be the root cause. Try disabling it with echo 1 > /proc/sys/kernel/sysrq echo x > /proc/sysrq-trigger Also see https://github.com/iovisor/bcc/issues/2525 - + If you have Secure Boot enabled you need to press Alt-PrintScr-x on the keyboard instead: ``` This sysrq operation is disabled from userspace. diff --git a/INSTALL.md b/INSTALL.md index 775d61fcf16e..449fe05cf5e1 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -311,7 +311,7 @@ mv /lib/modules/$KERNEL_VERSION-microsoft-standard-WSL2+/ /lib/modules/$KERNEL_V Then you can install bcc tools package according your distribution. -If you met some problems, try to +If you met some problems, try to ``` sudo mount -t debugfs debugfs /sys/kernel/debug ``` @@ -387,7 +387,7 @@ sudo apt install -y zip bison build-essential cmake flex git libedit-dev \ sudo apt install -y zip bison build-essential cmake flex git libedit-dev \ libllvm14 llvm-14-dev libclang-14-dev python3 zlib1g-dev libelf-dev libfl-dev python3-setuptools \ liblzma-dev libdebuginfod-dev arping netperf iperf - + # For Lunar Lobster (23.04) sudo apt install -y zip bison build-essential cmake flex git libedit-dev \ libllvm15 llvm-15-dev libclang-15-dev python3 zlib1g-dev libelf-dev libfl-dev python3-setuptools \ @@ -433,7 +433,7 @@ suppose you're running with root or add sudo first ### Install build dependencies ``` dnf install -y bison cmake ethtool flex git iperf3 libstdc++-devel python3-netaddr python3-pip gcc gcc-c++ make zlib-devel elfutils-libelf-devel -# dnf install -y luajit luajit-devel ## if use luajit, will report some lua function(which in lua5.3) undefined problem +# dnf install -y luajit luajit-devel ## if use luajit, will report some lua function(which in lua5.3) undefined problem dnf install -y clang clang-devel llvm llvm-devel llvm-static ncurses-devel dnf -y install netperf pip3 install pyroute2 @@ -449,7 +449,7 @@ cd bcc-build/ ## here llvm should always link shared library cmake ../bcc -DCMAKE_INSTALL_PREFIX=/usr -DENABLE_LLVM_SHARED=1 make -j10 -make install +make install ``` after install, you may add bcc directory to your $PATH, which you can add to ~/.bashrc @@ -460,20 +460,20 @@ export PATH=$bcctools:$bccexamples:$PATH ``` ### let path take effect ``` -source ~/.bashrc +source ~/.bashrc ``` -then run +then run ``` hello_world.py ``` -Or +Or ``` cd /usr/share/bcc/examples ./hello_world.py ./tracing/bitehist.py cd /usr/share/bcc/tools -./bitesize +./bitesize ``` diff --git a/docker/build/Dockerfile.ubuntu b/docker/build/Dockerfile.ubuntu index 87b5fa6f0237..c9c9971bbb7b 100644 --- a/docker/build/Dockerfile.ubuntu +++ b/docker/build/Dockerfile.ubuntu @@ -77,7 +77,7 @@ do \ then \ apt-get install -y libpolly-${version}-dev; \ fi; \ -done \ +done \ && \ apt-get -y clean' diff --git a/docs/reference_guide.md b/docs/reference_guide.md index c12a62507663..8a55c9e0964c 100644 --- a/docs/reference_guide.md +++ b/docs/reference_guide.md @@ -2616,7 +2616,7 @@ cannot call GPL only function from proprietary program eBPF program compilation needs kernel sources or kernel headers with headers compiled. In case your kernel sources are at a non-standard location where BCC -cannot find then, its possible to provide BCC the absolute path of the location +cannot find then, it's possible to provide BCC the absolute path of the location by setting `BCC_KERNEL_SOURCE` to it. ## 2. Kernel version overriding diff --git a/docs/tutorial_bcc_python_developer.md b/docs/tutorial_bcc_python_developer.md index f4cb2c128acf..cf822a916a5c 100644 --- a/docs/tutorial_bcc_python_developer.md +++ b/docs/tutorial_bcc_python_developer.md @@ -229,10 +229,10 @@ if BPF.get_kprobe_functions(b'blk_start_request'): b.attach_kprobe(event="blk_mq_start_request", fn_name="trace_start") if BPF.get_kprobe_functions(b'__blk_account_io_done'): - # __blk_account_io_done is available before kernel v6.4. + # __blk_account_io_done is available before kernel v6.4. b.attach_kprobe(event="__blk_account_io_done", fn_name="trace_completion") elif BPF.get_kprobe_functions(b'blk_account_io_done'): - # blk_account_io_done is traceable (not inline) before v5.16. + # blk_account_io_done is traceable (not inline) before v5.16. b.attach_kprobe(event="blk_account_io_done", fn_name="trace_completion") else: b.attach_kprobe(event="blk_mq_end_request", fn_name="trace_completion") @@ -531,7 +531,7 @@ print("%-14s %-12s %-6s %s" % ("TIME(s)", "COMMAND", "PID", "UID")) def print_event(cpu, data, size): event = b["events"].event(data) - printb(b"%-14.3f %-12s %-6d %d" % ((event.ts/1000000000), + printb(b"%-14.3f %-12s %-6d %d" % ((event.ts/1000000000), event.comm, event.pid, event.uid)) # loop with callback to print_event @@ -553,8 +553,8 @@ list``` for a list of tracepoints. Linux >= 4.7 is required to attach BPF programs to tracepoints. 1. ```args->uid```: ```args``` is auto-populated to be a structure of the tracepoint arguments. The comment above says where you can see that structure. -Eg: - +Eg: + ``` # sudo cat /sys/kernel/debug/tracing/events/syscalls/sys_enter_setuid/format name: sys_enter_setuid @@ -564,10 +564,10 @@ Eg: field:unsigned char common_flags; offset:2; size:1; signed:0; field:unsigned char common_preempt_count; offset:3; size:1; signed:0; field:int common_pid; offset:4; size:4; signed:1; - + field:int __syscall_nr; offset:8; size:4; signed:1; field:uid_t uid; offset:16; size:8; signed:0; - + print fmt: "uid: 0x%08lx", ((unsigned long)(REC->uid)) ``` In this case, there are only one member `uid` to be printed. diff --git a/examples/networking/http_filter/README.md b/examples/networking/http_filter/README.md index 8e1daf13b8bf..15b20757d4e2 100644 --- a/examples/networking/http_filter/README.md +++ b/examples/networking/http_filter/README.md @@ -7,7 +7,7 @@ eBPF application that parses HTTP packets and extracts (and prints on screen) th ## Usage Example - $ sudo python http-parse-complete.py + $ sudo python http-parse-complete.py GET /pipermail/iovisor-dev/ HTTP/1.1 HTTP/1.1 200 OK GET /favicon.ico HTTP/1.1 @@ -42,6 +42,6 @@ Two versions of this code are available in this repository: ## How to execute this sample This sample can be executed by typing either one the two commands below: - + $ sudo python http-parse-simple.py $ sudo python http-parse-complete.py diff --git a/examples/networking/net_monitor.py b/examples/networking/net_monitor.py index 1670e045a35c..5d3042852a5b 100644 --- a/examples/networking/net_monitor.py +++ b/examples/networking/net_monitor.py @@ -2,8 +2,8 @@ # # net_monitor.py Aggregates incoming network traffic # outputs source ip, destination ip, the number of their network traffic, and current time -# how to use : net_monitor.py -# +# how to use : net_monitor.py +# # Copyright (c) 2020 YoungEun Choe from bcc import BPF @@ -34,7 +34,7 @@ def help(): #define ETH_HLEN 14 BPF_PERF_OUTPUT(skb_events); -BPF_HASH(packet_cnt, u64, long, 256); +BPF_HASH(packet_cnt, u64, long, 256); int packet_monitor(struct __sk_buff *skb) { u8 *cursor = 0; @@ -42,20 +42,20 @@ def help(): long* count = 0; long one = 1; u64 pass_value = 0; - + struct ethernet_t *ethernet = cursor_advance(cursor, sizeof(*ethernet)); struct ip_t *ip = cursor_advance(cursor, sizeof(*ip)); if (ip->ver != 4) return 0; - if (ip->nextp != IP_TCP) + if (ip->nextp != IP_TCP) { - if (ip -> nextp != IP_UDP) + if (ip -> nextp != IP_UDP) { - if (ip -> nextp != IP_ICMP) - return 0; + if (ip -> nextp != IP_ICMP) + return 0; } } - + saddr = ip -> src; daddr = ip -> dst; @@ -63,7 +63,7 @@ def help(): pass_value = pass_value << 32; pass_value = pass_value + daddr; - count = packet_cnt.lookup(&pass_value); + count = packet_cnt.lookup(&pass_value); if (count) // check if this map exists *count += 1; else // if the map for the key doesn't exist, create one @@ -113,7 +113,7 @@ def decimal_to_human(input_value): formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S") if output_len != 0: print('\ncurrent packet nums:') - + for i in range(0,output_len): srcdst = packet_cnt_output[i][0].value src = (srcdst >> 32) & 0xFFFFFFFF @@ -124,8 +124,8 @@ def decimal_to_human(input_value): decimal_to_human(str(dst)) + ' ' + str(pkt_num) + ' ' + 'time : ' + formatted_time print(monitor_result) - packet_cnt.clear() # delete map entries after printing output. confirmed it deletes values and keys too - + packet_cnt.clear() # delete map entries after printing output. confirmed it deletes values and keys too + except KeyboardInterrupt: sys.stdout.close() pass diff --git a/examples/networking/tcp_mon_block/README.md b/examples/networking/tcp_mon_block/README.md index e7babc2d4f7d..79cac67cdea5 100644 --- a/examples/networking/tcp_mon_block/README.md +++ b/examples/networking/tcp_mon_block/README.md @@ -1,24 +1,24 @@ # eBPF tcp_mon_block -This eBPF program uses netlink TC, kernel tracepoints and kprobes to monitor outgoing connections from given PIDs (usually HTTP web servers) and block connections to all addresses initiated from them, unless they are listed in allow_list.json +This eBPF program uses netlink TC, kernel tracepoints and kprobes to monitor outgoing connections from given PIDs (usually HTTP web servers) and block connections to all addresses initiated from them, unless they are listed in allow_list.json To run the example: 1. Run python3 web_server.py . Note the server's PID (will be printed to stdout) 2. Add the server's PID to allow_list.json . You can replace the first entry on the JSON file and put your PID instead 3. Run tcp_mon_block.py -i network_interface_name (-v for verbose output). For example: python3 tcp_mon_block.py -i eth0 - 4. Put your web_server's listening IP in 'server_address' variable in http_client.py and run python3 http_client.py + 4. Put your web_server's listening IP in 'server_address' variable in http_client.py and run python3 http_client.py **Explanation**: web_server.py is a simple HTTP web server built with flask. It has a SSRF vulnerability in the route to /public_ip (you can read more about this vulnerability here https://portswigger.net/web-security/ssrf). -This route demonstrates a web server which connects to some remote API server (which is pretty common behavior) and receives some data. The attached POC simply connects to https://api.ipify.org and fetches the server's public IP, then sends it back to the client. -However, this specific route receives the API address to connect to from the user (http_client.py is used as the client in this POC, but in real life scenarios it will probably be a web browser). +This route demonstrates a web server which connects to some remote API server (which is pretty common behavior) and receives some data. The attached POC simply connects to https://api.ipify.org and fetches the server's public IP, then sends it back to the client. +However, this specific route receives the API address to connect to from the user (http_client.py is used as the client in this POC, but in real life scenarios it will probably be a web browser). This creates a SSRF vulnerability as an attacker can put any address he/she wishes to force the web server to connect to it instead of the intended API address (https://api.ipify.org) -**Run the POC twice:** +**Run the POC twice:** **First**, run only web_server.py and http_client.py . http_client.py will send 2 requests to the web server: @@ -28,7 +28,7 @@ This creates a SSRF vulnerability as an attacker can put any address he/she wish **Now run the POC again** -First run web_server.py but this time add the web server's PID to allow_list.json and then run tcp_mon_block.py as mentioned earlier. +First run web_server.py but this time add the web server's PID to allow_list.json and then run tcp_mon_block.py as mentioned earlier. This will make sure the web server will only connect to the predefined allow_list of addresses (this can be either an IPv4, URL or domain name), essentially blocking any connection to any address not listed in the allow_list. @@ -49,7 +49,7 @@ After web_server.py initiated a connection to a non-allowed address: -**Prerequisites**: +**Prerequisites**: 1. BCC and pyroute2 for tcp_mon_block 2. Python3 flask and requests in order to run the web_server.py and http_client.py POC diff --git a/examples/networking/tunnel_monitor/README.md b/examples/networking/tunnel_monitor/README.md index 92cb46770602..60e66bdf8b35 100644 --- a/examples/networking/tunnel_monitor/README.md +++ b/examples/networking/tunnel_monitor/README.md @@ -27,7 +27,7 @@ dependencies. You will need nodejs+npm installed on the system to run this, but the setup script will only install packages in the local directory. ``` -[user@localhost tunnel_monitor]$ ./setup.sh +[user@localhost tunnel_monitor]$ ./setup.sh Cloning into 'chord-transitions'... remote: Counting objects: 294, done. ... @@ -40,7 +40,7 @@ fastclick#1.0.6 bower_components/fastclick Then, start the simulation by running main.py: ``` -[root@bcc-dev tunnel_monitor]# python main.py +[root@bcc-dev tunnel_monitor]# python main.py Launching host 1 of 9 Launching host 2 of 9 ... diff --git a/examples/networking/vlan_filter/data-plane-tracing.c b/examples/networking/vlan_filter/data-plane-tracing.c index 59c292d0e57b..f4cd9f747d7f 100644 --- a/examples/networking/vlan_filter/data-plane-tracing.c +++ b/examples/networking/vlan_filter/data-plane-tracing.c @@ -5,9 +5,9 @@ #define IP_TCP 6 #define IP_UDP 17 #define IP_ICMP 1 -/* +/* In 802.3, both the source and destination addresses are 48 bits (4 bytes) MAC address. - 6 bytes (src) + 6 bytes (dst) + 2 bytes (type) = 14 bytes + 6 bytes (src) + 6 bytes (dst) + 2 bytes (type) = 14 bytes */ #define ETH_HLEN 14 @@ -18,18 +18,18 @@ return 0 -> DROP the packet return -1 -> KEEP the packet and return it to user space (userspace can read it from the socket_fd ) */ -int vlan_filter(struct __sk_buff *skb) { - u8 *cursor = 0; +int vlan_filter(struct __sk_buff *skb) { + u8 *cursor = 0; struct ethernet_t *ethernet = cursor_advance(cursor, sizeof(*ethernet)); - + //filter IP packets (ethernet type = 0x0800) 0x0800 is IPv4 packet switch(ethernet->type){ case 0x0800: goto IP; default: goto DROP; } - + IP: ; struct ip_t *ip = cursor_advance(cursor, sizeof(*ip)); // IP header (datagram) switch (ip->nextp){ diff --git a/examples/networking/vlan_filter/data-plane-tracing.py b/examples/networking/vlan_filter/data-plane-tracing.py index efaa7f1069f1..7113f877fa9f 100755 --- a/examples/networking/vlan_filter/data-plane-tracing.py +++ b/examples/networking/vlan_filter/data-plane-tracing.py @@ -30,7 +30,7 @@ def help(): print(" -i if_name select interface if_name. Default is eth0") print(" -k kafka_server_name select kafka server name. Default is save to file") print(" If -k option is not specified data will be saved to file.") - + print("") print("examples:") print(" data-plane-tracing # bind socket to eth0") @@ -40,7 +40,7 @@ def help(): #arguments interface="eth0" kafkaserver='' - + #check provided arguments if len(argv) == 2: if str(argv[1]) == '-h': @@ -52,16 +52,16 @@ def help(): if str(argv[1]) == '-i': interface = argv[2] elif str(argv[1]) == '-k': - kafkaserver = argv[2] + kafkaserver = argv[2] else: usage() - + if len(argv) == 5: if str(argv[1]) == '-i': interface = argv[2] kafkaserver = argv[4] elif str(argv[1]) == '-k': - kafkaserver = argv[2] + kafkaserver = argv[2] interface = argv[4] else: usage() @@ -69,8 +69,8 @@ def help(): if len(argv) > 5: usage() -print ("binding socket to '%s'" % interface) - +print ("binding socket to '%s'" % interface) + #initialize BPF - load source code from http-parse-simple.c bpf = BPF(src_file = "data-plane-tracing.c", debug = 0) @@ -96,71 +96,71 @@ def help(): try: ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr'] except: - ip = '127.0.0.1' + ip = '127.0.0.1' print("| Timestamp | Host Name | Host IP | IP Version | Source Host IP | Dest Host IP | Source Host Port | Dest Host Port | VNI | Source VM MAC | Dest VM MAC | VLAN ID | Source VM IP | Dest VM IP | Protocol | Source VM Port | Dest VM Port | Packet Length |") while 1: #retrieve raw packet from socket packet_str = os.read(socket_fd, 2048) - + #convert packet into bytearray packet_bytearray = bytearray(packet_str) - + #ethernet header length - ETH_HLEN = 14 - + ETH_HLEN = 14 + #VXLAN header length VXLAN_HLEN = 8 - + #VLAN header length VLAN_HLEN = 4 - + #Inner TCP/UDP header length TCP_HLEN = 20 UDP_HLEN = 8 - + #calculate packet total length total_length = packet_bytearray[ETH_HLEN + 2] #load MSB total_length = total_length << 8 #shift MSB total_length = total_length + packet_bytearray[ETH_HLEN+3] #add LSB - + #calculate ip header length ip_header_length = packet_bytearray[ETH_HLEN] #load Byte ip_header_length = ip_header_length & 0x0F #mask bits 0..3 ip_header_length = ip_header_length << 2 #shift to obtain length - + #calculate payload offset payload_offset = ETH_HLEN + ip_header_length + UDP_HLEN + VXLAN_HLEN - + #parsing ip version from ip packet header ipversion = str(bin(packet_bytearray[14])[2:5]) - + #parsing source ip address, destination ip address from ip packet header src_host_ip = str(packet_bytearray[26]) + "." + str(packet_bytearray[27]) + "." + str(packet_bytearray[28]) + "." + str(packet_bytearray[29]) dest_host_ip = str(packet_bytearray[30]) + "." + str(packet_bytearray[31]) + "." + str(packet_bytearray[32]) + "." + str(packet_bytearray[33]) - + #parsing source port and destination port src_host_port = packet_bytearray[34] << 8 | packet_bytearray[35] dest_host_port = packet_bytearray[36] << 8 | packet_bytearray[37] - + #parsing VNI from VXLAN header VNI = str((packet_bytearray[46])+(packet_bytearray[47])+(packet_bytearray[48])) - + #parsing source mac address and destination mac address mac_add = [packet_bytearray[50], packet_bytearray[51], packet_bytearray[52], packet_bytearray[53], packet_bytearray[54], packet_bytearray[55]] src_vm_mac = ":".join(map(lambda b: format(b, "02x"), mac_add)) mac_add = [packet_bytearray[56], packet_bytearray[57], packet_bytearray[58], packet_bytearray[59], packet_bytearray[60], packet_bytearray[61]] dest_vm_mac = ":".join(map(lambda b: format(b, "02x"), mac_add)) - + #parsing VLANID from VLAN header VLANID="" VLANID = str((packet_bytearray[64])+(packet_bytearray[65])) #parsing source vm ip address, destination vm ip address from encapsulated ip packet header src_vm_ip = str(packet_bytearray[80]) + "." + str(packet_bytearray[81]) + "." + str(packet_bytearray[82]) + "." + str(packet_bytearray[83]) - dest_vm_ip = str(packet_bytearray[84]) + "." + str(packet_bytearray[85]) + "." + str(packet_bytearray[86]) + "." + str(packet_bytearray[87]) - + dest_vm_ip = str(packet_bytearray[84]) + "." + str(packet_bytearray[85]) + "." + str(packet_bytearray[86]) + "." + str(packet_bytearray[87]) + #parsing source port and destination port if (packet_bytearray[77]==6 or packet_bytearray[77]==17): src_vm_port = packet_bytearray[88] << 8 | packet_bytearray[88] @@ -171,23 +171,23 @@ def help(): type = str(packet_bytearray[88]) else: continue - + timestamp = str(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f')) - + #send data to remote server via Kafka Messaging Bus if kafkaserver: MESSAGE = (timestamp, socket.gethostname(),ip, str(int(ipversion, 2)), str(src_host_ip), str(dest_host_ip), str(src_host_port), str(dest_host_port), str(int(VNI)), str(src_vm_mac), str(dest_vm_mac), str(int(VLANID)), src_vm_ip, dest_vm_ip, str(packet_bytearray[77]), str(src_vm_port), str(dest_vm_port), str(total_length)) print (MESSAGE) MESSAGE = ','.join(MESSAGE) - MESSAGE = MESSAGE.encode() + MESSAGE = MESSAGE.encode() producer = KafkaProducer(bootstrap_servers=[kafkaserver]) producer.send('iovisor-topic', key=b'iovisor', value=MESSAGE) - + #save data to files else: MESSAGE = timestamp+","+socket.gethostname()+","+ip+","+str(int(ipversion, 2))+","+src_host_ip+","+dest_host_ip+","+str(src_host_port)+","+str(dest_host_port)+","+str(int(VNI))+","+str(src_vm_mac)+","+str(dest_vm_mac)+","+str(int(VLANID))+","+src_vm_ip+","+dest_vm_ip+","+str(packet_bytearray[77])+","+str(src_vm_port)+","+str(dest_vm_port)+","+str(total_length) print (MESSAGE) - #save data to a file on hour basis + #save data to a file on hour basis filename = "./vlan-data-"+time.strftime("%Y-%m-%d-%H")+"-00" with open(filename, "a") as f: f.write("%s\n" % MESSAGE) diff --git a/examples/networking/vlan_filter/test_setup.sh b/examples/networking/vlan_filter/test_setup.sh index 967cf21b1f04..12a897e657ac 100755 --- a/examples/networking/vlan_filter/test_setup.sh +++ b/examples/networking/vlan_filter/test_setup.sh @@ -14,7 +14,7 @@ ip netns add netns22 ip netns add netns3 ip netns add netns4 -# set up veth devices in netns11 to netns21 with connection to netns3 +# set up veth devices in netns11 to netns21 with connection to netns3 ip link add veth11 type veth peer name veth13 ip link add veth21 type veth peer name veth23 ip link set veth11 netns netns11 @@ -22,15 +22,15 @@ ip link set veth21 netns netns21 ip link set veth13 netns netns3 ip link set veth23 netns netns3 -# set up veth devices in netns12 and netns22 with connection to netns4 +# set up veth devices in netns12 and netns22 with connection to netns4 ip link add veth12 type veth peer name veth14 ip link add veth22 type veth peer name veth24 ip link set veth12 netns netns12 ip link set veth22 netns netns22 ip link set veth14 netns netns4 ip link set veth24 netns netns4 - -# assign IP addresses and set the devices up + +# assign IP addresses and set the devices up ip netns exec netns11 ifconfig veth11 192.168.100.11/24 up ip netns exec netns11 ip link set lo up ip netns exec netns12 ifconfig veth12 192.168.100.12/24 up @@ -40,16 +40,16 @@ ip netns exec netns21 ip link set lo up ip netns exec netns22 ifconfig veth22 192.168.200.22/24 up ip netns exec netns22 ip link set lo up -# set up bridge brx and its ports -ip netns exec netns3 brctl addbr brx +# set up bridge brx and its ports +ip netns exec netns3 brctl addbr brx ip netns exec netns3 ip link set brx up ip netns exec netns3 ip link set veth13 up ip netns exec netns3 ip link set veth23 up ip netns exec netns3 brctl addif brx veth13 ip netns exec netns3 brctl addif brx veth23 -# set up bridge bry and its ports -ip netns exec netns4 brctl addbr bry +# set up bridge bry and its ports +ip netns exec netns4 brctl addbr bry ip netns exec netns4 ip link set bry up ip netns exec netns4 ip link set veth14 up ip netns exec netns4 ip link set veth24 up @@ -95,14 +95,14 @@ ip netns exec netns3 bridge vlan del vid 1 dev veth23 ip netns exec netns4 bridge vlan del vid 1 dev veth14 ip netns exec netns4 bridge vlan del vid 1 dev veth24 -# set up bridge brvx and its ports -ip netns exec netns3 brctl addbr brvx +# set up bridge brvx and its ports +ip netns exec netns3 brctl addbr brvx ip netns exec netns3 ip link set brvx up ip netns exec netns3 ip link set vethx11 up ip netns exec netns3 brctl addif brvx vethx11 -# set up bridge brvy and its ports -ip netns exec netns4 brctl addbr brvy +# set up bridge brvy and its ports +ip netns exec netns4 brctl addbr brvy ip netns exec netns4 ip link set brvy up ip netns exec netns4 ip link set vethy11 up ip netns exec netns4 brctl addif brvy vethy11 @@ -132,15 +132,15 @@ ip link add veth7 type veth peer name veth8 ip link set veth7 up ip link set veth8 up -# set up bridge brjx and its ports -brctl addbr brjx +# set up bridge brjx and its ports +brctl addbr brjx ip link set brjx up ip link set veth4 up brctl addif brjx veth4 brctl addif brjx veth7 -# set up bridge brjy and its ports -brctl addbr brjy +# set up bridge brjy and its ports +brctl addbr brjy ip link set brjy up ip link set veth6 up brctl addif brjy veth6 diff --git a/examples/perf/ipc.py b/examples/perf/ipc.py index 73d02a03aa66..ee6bf8620cd8 100755 --- a/examples/perf/ipc.py +++ b/examples/perf/ipc.py @@ -51,7 +51,7 @@ u64 clk_start = clk.perf_read(cpu); u64 inst_start = inst.perf_read(cpu); u64 time_start = bpf_ktime_get_ns(); - + u64* kptr = NULL; kptr = data.lookup(&clk_k); if (kptr) { @@ -93,7 +93,7 @@ u64 clk_end = clk.perf_read(cpu); u64 inst_end = inst.perf_read(cpu); u64 time_end = bpf_ktime_get_ns(); - + struct perf_delta perf_data = {} ; u64* kptr = NULL; kptr = data.lookup(&clk_k); @@ -104,7 +104,7 @@ } else { return; } - + kptr = data.lookup(&inst_k); if (kptr) { perf_data.inst_delta = inst_end - *kptr; @@ -150,7 +150,7 @@ def print_data(cpu, data, size): e = b["output"].event(data) - print("%-8d %-12d %-8.2f %-8s %d" % (e.clk_delta, e.inst_delta, + print("%-8d %-12d %-8.2f %-8s %d" % (e.clk_delta, e.inst_delta, 1.0* e.inst_delta/e.clk_delta, str(round(e.time_delta * 1e-3, 2)) + ' us', cpu)) print("Counters Data") diff --git a/examples/tracing/disksnoop.py b/examples/tracing/disksnoop.py index c354f24b7712..2d0401c61b3e 100755 --- a/examples/tracing/disksnoop.py +++ b/examples/tracing/disksnoop.py @@ -48,10 +48,10 @@ b.attach_kprobe(event="blk_mq_start_request", fn_name="trace_start") if BPF.get_kprobe_functions(b'__blk_account_io_done'): - # __blk_account_io_done is available before kernel v6.4. + # __blk_account_io_done is available before kernel v6.4. b.attach_kprobe(event="__blk_account_io_done", fn_name="trace_completion") elif BPF.get_kprobe_functions(b'blk_account_io_done'): - # blk_account_io_done is traceable (not inline) before v5.16. + # blk_account_io_done is traceable (not inline) before v5.16. b.attach_kprobe(event="blk_account_io_done", fn_name="trace_completion") else: b.attach_kprobe(event="blk_mq_end_request", fn_name="trace_completion") diff --git a/examples/tracing/disksnoop_example.txt b/examples/tracing/disksnoop_example.txt index 835291223de2..94062e77d57d 100644 --- a/examples/tracing/disksnoop_example.txt +++ b/examples/tracing/disksnoop_example.txt @@ -3,7 +3,7 @@ Demonstrations of disksnoop.py, the Linux eBPF/bcc version. This traces block I/O, a prints a line to summarize each I/O completed: -# ./disksnoop.py +# ./disksnoop.py TIME(s) T BYTES LAT(ms) 16458043.435457 W 4096 2.73 16458043.435981 W 4096 3.24 diff --git a/examples/tracing/setuid_monitor.py b/examples/tracing/setuid_monitor.py index d91a027145c2..df0197f0ddb5 100755 --- a/examples/tracing/setuid_monitor.py +++ b/examples/tracing/setuid_monitor.py @@ -47,7 +47,7 @@ def print_event(cpu, data, size): event = b["events"].event(data) - printb(b"%-14.3f %-12s %-6d %d" % ((event.ts/1000000000), + printb(b"%-14.3f %-12s %-6d %d" % ((event.ts/1000000000), event.comm, event.pid, event.uid)) # loop with callback to print_event diff --git a/examples/tracing/stacksnoop_example.txt b/examples/tracing/stacksnoop_example.txt index 76784fde8973..d5e8063e822d 100644 --- a/examples/tracing/stacksnoop_example.txt +++ b/examples/tracing/stacksnoop_example.txt @@ -16,7 +16,7 @@ TIME(s) SYSCALL ret_from_fork This shows that submit_bio() was called by submit_bh(), which was called -by jbd2_journal_commit_transaction(), and so on. +by jbd2_journal_commit_transaction(), and so on. For high frequency functions, see stackcount, which summarizes in-kernel for efficiency. If you don't know if your function is low or high frequency, try diff --git a/examples/tracing/tcpv4connect_example.txt b/examples/tracing/tcpv4connect_example.txt index 0ff06e3687fa..6b0ca60b2728 100644 --- a/examples/tracing/tcpv4connect_example.txt +++ b/examples/tracing/tcpv4connect_example.txt @@ -7,9 +7,9 @@ output (IP addresses changed to protect the innocent): # ./tcpv4connect.py PID COMM SADDR DADDR DPORT -1479 telnet 127.0.0.1 127.0.0.1 23 -1469 curl 10.201.219.236 54.245.105.25 80 -1469 curl 10.201.219.236 54.67.101.145 80 +1479 telnet 127.0.0.1 127.0.0.1 23 +1469 curl 10.201.219.236 54.245.105.25 80 +1469 curl 10.201.219.236 54.67.101.145 80 This output shows three connections, one from a "telnet" process and two from "curl". The output details shows the source address, destination address, diff --git a/examples/tracing/vfsreadlat_example.txt b/examples/tracing/vfsreadlat_example.txt index 1d95f6a576e4..c74eb6282589 100644 --- a/examples/tracing/vfsreadlat_example.txt +++ b/examples/tracing/vfsreadlat_example.txt @@ -1,10 +1,10 @@ Demonstrations of vfsreadlat.py, the Linux eBPF/bcc version. -This example traces the latency of vfs_read (time from call to return), printing +This example traces the latency of vfs_read (time from call to return), printing it as a histogram distribution. By default, output is every five seconds: -# ./vfsreadlat.py +# ./vfsreadlat.py Tracing... Hit Ctrl-C to end. usecs : count distribution diff --git a/libbpf-tools/readahead.c b/libbpf-tools/readahead.c index b95f1f384bfa..f8e1c0cef77d 100644 --- a/libbpf-tools/readahead.c +++ b/libbpf-tools/readahead.c @@ -120,7 +120,7 @@ static int attach_access(struct readahead_bpf *obj) if (fentry_can_attach("mark_page_accessed", NULL)) return bpf_program__set_autoload(obj->progs.mark_page_accessed, true); - + fprintf(stderr, "failed to attach to access functions\n"); return -1; } diff --git a/man/man8/bashreadline.8 b/man/man8/bashreadline.8 index bc68a491c342..a4c37c5bb25e 100644 --- a/man/man8/bashreadline.8 +++ b/man/man8/bashreadline.8 @@ -24,7 +24,7 @@ Print usage message. .TP \-s Specify the location of libreadline.so shared library when you failed to run the -script directly with error: "Exception: could not determine address of symbol +script directly with error: "Exception: could not determine address of symbol \'readline\'". Default value is /lib/libreadline.so. .SH EXAMPLES .TP diff --git a/man/man8/biotop.8 b/man/man8/biotop.8 index 47392bc7e45e..1442a39eb77a 100644 --- a/man/man8/biotop.8 +++ b/man/man8/biotop.8 @@ -4,7 +4,7 @@ biotop \- Block device (disk) I/O by process top. .SH SYNOPSIS .B biotop [\-h] [\-C] [\-r MAXROWS] [\-p PID] [interval] [count] .SH DESCRIPTION -This is top for disks. +This is top for disks. This traces block device I/O (disk I/O), and prints a per-process summary every interval (by default, 1 second). The summary is sorted on the top disk diff --git a/man/man8/bitesize.8 b/man/man8/bitesize.8 index 655f69e7f5a4..6bd171600686 100644 --- a/man/man8/bitesize.8 +++ b/man/man8/bitesize.8 @@ -29,9 +29,9 @@ An ASCII bar chart to visualize the distribution (count column) .SH OVERHEAD This traces a block I/O tracepoint to update a histogram, which is -asynchronously copied to user-space. This method is very efficient, and -the overhead for most storage I/O rates (< 10k IOPS) should be negligible. -If you have a higher IOPS storage environment, test and quantify the overhead +asynchronously copied to user-space. This method is very efficient, and +the overhead for most storage I/O rates (< 10k IOPS) should be negligible. +If you have a higher IOPS storage environment, test and quantify the overhead before use. .SH SOURCE diff --git a/man/man8/cachestat.8 b/man/man8/cachestat.8 index 172194d498f0..206cc7c2713f 100644 --- a/man/man8/cachestat.8 +++ b/man/man8/cachestat.8 @@ -64,7 +64,7 @@ Cached amount of data in current page cache taken from /proc/meminfo. .SH OVERHEAD This traces various kernel page cache functions and maintains in-kernel counts, which are asynchronously copied to user-space. While the rate of operations can -be very high (>1G/sec) we can have up to 34% overhead, this is still a relatively efficient way to trace +be very high (>1G/sec) we can have up to 34% overhead, this is still a relatively efficient way to trace these events, and so the overhead is expected to be small for normal workloads. Measure in a test environment. .SH SOURCE diff --git a/man/man8/compactsnoop.8 b/man/man8/compactsnoop.8 index e9cde0ced516..a6e247db9885 100644 --- a/man/man8/compactsnoop.8 +++ b/man/man8/compactsnoop.8 @@ -12,7 +12,7 @@ caused by some critical processes or not. This works by tracing the compact zone events using raw_tracepoints and one kretprobe. -For the Centos 7.6 (3.10.x kernel), see the version under tools/old, which +For the Centos 7.6 (3.10.x kernel), see the version under tools/old, which uses an older memory compaction mechanism. Since this uses BPF, only the root user can use this tool. @@ -108,14 +108,14 @@ The compaction's result. For (CentOS 7.6's kernel), the status include: .PP .in +8n -"skipped" (COMPACT_SKIPPED): compaction didn't start as it was not possible or +"skipped" (COMPACT_SKIPPED): compaction didn't start as it was not possible or direct reclaim was more suitable .PP .in +8n "continue" (COMPACT_CONTINUE): compaction should continue to another pageblock .PP .in +8n -"partial" (COMPACT_PARTIAL): direct compaction partially compacted a zone and +"partial" (COMPACT_PARTIAL): direct compaction partially compacted a zone and there are suitable pages .PP .in +8n @@ -125,19 +125,19 @@ there are suitable pages For (kernel 4.7 and above): .PP .in +8n -"not_suitable_zone" (COMPACT_NOT_SUITABLE_ZONE): For more detailed tracepoint +"not_suitable_zone" (COMPACT_NOT_SUITABLE_ZONE): For more detailed tracepoint output - internal to compaction .PP .in +8n -"skipped" (COMPACT_SKIPPED): compaction didn't start as it was not possible or +"skipped" (COMPACT_SKIPPED): compaction didn't start as it was not possible or direct reclaim was more suitable .PP .in +8n -"deferred" (COMPACT_DEFERRED): compaction didn't start as it was deferred due +"deferred" (COMPACT_DEFERRED): compaction didn't start as it was deferred due to past failures .PP .in +8n -"no_suitable_page" (COMPACT_NOT_SUITABLE_PAGE): For more detailed tracepoint +"no_suitable_page" (COMPACT_NOT_SUITABLE_PAGE): For more detailed tracepoint output - internal to compaction .PP .in +8n @@ -156,7 +156,7 @@ of the zone but wasn't successful to compact suitable pages. contentions .PP .in +8n -"success" (COMPACT_SUCCESS): direct compaction terminated after concluding that +"success" (COMPACT_SUCCESS): direct compaction terminated after concluding that the allocation should now succeed .PP .in +8n diff --git a/man/man8/criticalstat.8 b/man/man8/criticalstat.8 index ee0220b83cdf..9d05927d779e 100644 --- a/man/man8/criticalstat.8 +++ b/man/man8/criticalstat.8 @@ -20,12 +20,12 @@ has to be built with certain CONFIG options enabled. See below. Enable following kernel configurations based on which kernel version you use. - CONFIG_DEBUG_PREEMPT - CONFIG_PREEMPT_TRACER - + For kernel 4.19 and later: - CONFIG_PREEMPTIRQ_TRACEPOINTS - CONFIG_TRACE_IRQFLAGS - CONFIG_TRACE_PREEMPT_TOGGLE - + For kernel 4.15 to 4.18: - CONFIG_PREEMPTIRQ_EVENTS - CONFIG_PROVE_LOCKING diff --git a/man/man8/drsnoop.8 b/man/man8/drsnoop.8 index 8fb3789afe1b..2ff1771bd08c 100644 --- a/man/man8/drsnoop.8 +++ b/man/man8/drsnoop.8 @@ -4,12 +4,12 @@ drsnoop \- Trace direct reclaim events. Uses Linux eBPF/bcc. .SH SYNOPSIS .B drsnoop [\-h] [\-T] [\-U] [\-p PID] [\-t TID] [\-u UID] [\-d DURATION] [-n name] [-v] .SH DESCRIPTION -drsnoop trace direct reclaim events, showing which processes are allocing pages +drsnoop trace direct reclaim events, showing which processes are allocing pages with direct reclaiming. This can be useful for discovering when allocstall (/p- roc/vmstat) continues to increase, whether it is caused by some critical proc- esses or not. -This works by tracing the direct reclaim events using kernel tracepoints. +This works by tracing the direct reclaim events using kernel tracepoints. This makes use of a Linux 4.4 feature (bpf_perf_event_output()); for kernels older than 4.4, see the version under tools/old, @@ -43,7 +43,7 @@ Total duration of trace in seconds. .TP \-n name Only print processes where its name partially matches 'name' -\-v verbose +\-v verbose Run in verbose mode. Will output system memory state .TP \-v @@ -95,9 +95,9 @@ Thread ID COMM Process name .SH OVERHEAD -This traces the kernel direct reclaim tracepoints and prints output for each -event. As the rate of this is generally expected to be low (< 1000/s), the -overhead is also expected to be negligible. +This traces the kernel direct reclaim tracepoints and prints output for each +event. As the rate of this is generally expected to be low (< 1000/s), the +overhead is also expected to be negligible. .SH SOURCE This is from bcc. .IP diff --git a/man/man8/filegone.8 b/man/man8/filegone.8 index 9e634d917780..a9d835740c9a 100644 --- a/man/man8/filegone.8 +++ b/man/man8/filegone.8 @@ -5,7 +5,7 @@ filegone \- Trace why file gone (deleted or renamed). Uses Linux eBPF/bcc. .B filegone [\-h] [\-p PID] .SH DESCRIPTION This traces why file gone/vanished, providing information on who deleted or -renamed the file. +renamed the file. This works by tracing the kernel vfs_unlink() , vfs_rmdir() , vfs_rename functions. diff --git a/man/man8/funcslower.8 b/man/man8/funcslower.8 index 06f17934350a..f7214bcd7642 100644 --- a/man/man8/funcslower.8 +++ b/man/man8/funcslower.8 @@ -96,13 +96,13 @@ between slow and failed function calls. FUNC The function name, followed by its arguments if requested. .SH OVERHEAD -Depending on the function(s) being traced, overhead can become severe. For +Depending on the function(s) being traced, overhead can become severe. For example, tracing a common function like malloc() can slow down a C/C++ program by a factor of 2 or more. On the other hand, tracing a low-frequency event like the SyS_setreuid() function will probably not be as prohibitive, and in fact negligible for functions that are called up to 100-1000 times per second. -You should first use the funclatency and argdist tools for investigation, +You should first use the funclatency and argdist tools for investigation, because they summarize data in-kernel and have a much lower overhead than this tool. To get a general idea of the number of times a particular function is called (and estimate the overhead), use the funccount tool, e.g.: diff --git a/man/man8/memleak.8 b/man/man8/memleak.8 index 2fd2676435c7..129e79340b0b 100644 --- a/man/man8/memleak.8 +++ b/man/man8/memleak.8 @@ -93,7 +93,7 @@ stacks 10 times before quitting. # .B memleak -s 5 --top=5 10 .TP -Run ./allocs and print outstanding allocation stacks for that process: +Run ./allocs and print outstanding allocation stacks for that process: # .B memleak -c "./allocs" .TP diff --git a/man/man8/netqtop.8 b/man/man8/netqtop.8 index bfa34d11f3e4..06a7e508082b 100644 --- a/man/man8/netqtop.8 +++ b/man/man8/netqtop.8 @@ -1,20 +1,20 @@ .TH netqtop 8 "2020-07-30" "USER COMMANDS" .SH NAME -netqtop \- Summarize PPS, BPS, average size of packets and packet counts ordered by packet sizes +netqtop \- Summarize PPS, BPS, average size of packets and packet counts ordered by packet sizes on each queue of a network interface. .SH SYNOPSIS .B netqtop [\-n nic] [\-i interval] [\-t throughput] .SH DESCRIPTION -netqtop accounts statistics of both transmitted and received packets on each queue of -a specified network interface to help developers check if its traffic load is balanced. -The result is displayed as a table with columns of PPS, BPS, average size and -packet counts in range [0,64), [64, 5120), [512, 2048), [2048, 16K), [16K, 64K). +netqtop accounts statistics of both transmitted and received packets on each queue of +a specified network interface to help developers check if its traffic load is balanced. +The result is displayed as a table with columns of PPS, BPS, average size and +packet counts in range [0,64), [64, 5120), [512, 2048), [2048, 16K), [16K, 64K). This is printed every given interval (default 1) in seconds. -The tool uses the net:net_dev_start_xmit and net:netif_receive_skb kernel tracepoints. +The tool uses the net:net_dev_start_xmit and net:netif_receive_skb kernel tracepoints. Since it uses tracepoint, the tool only works on Linux 4.7+. -netqtop introduces significant overhead while network traffic is large. See OVERHEAD +netqtop introduces significant overhead while network traffic is large. See OVERHEAD section below. .SH REQUIREMENTS @@ -36,11 +36,11 @@ Account statistics of eth0 and output every 2 seconds: # .B netqtop -n eth0 -i 1 .SH OVERHEAD -In performance test, netqtop introduces a overhead up to 30% PPS drop -while printing interval is set to 1 second. So be mindful of potential packet drop +In performance test, netqtop introduces a overhead up to 30% PPS drop +while printing interval is set to 1 second. So be mindful of potential packet drop when using this tool. -It also increases ping-pong latency by about 1 usec. +It also increases ping-pong latency by about 1 usec. .SH SOURCE This is from bcc .IP @@ -48,7 +48,7 @@ https://github.com/iovisor/bcc .PP Also look in the bcc distribution for a netqtop_example.txt file containing example usage, output and commentary for this tool. -.SH OS +.SH OS Linux .SH STABILITY Unstable - in development diff --git a/man/man8/nfsslower.8 b/man/man8/nfsslower.8 index 22b36e3ec579..b3e7e440701e 100644 --- a/man/man8/nfsslower.8 +++ b/man/man8/nfsslower.8 @@ -78,7 +78,7 @@ Size of I/O, in bytes. LAT(ms) Latency (duration) of I/O, measured from when it was issued by VFS to the filesystem, to when it completed. This time is inclusive of RPC latency, -network latency, cache lookup, remote fileserver processing latency, etc. +network latency, cache lookup, remote fileserver processing latency, etc. Its a more accurate measure of the latency suffered by applications performing NFS read/write calls to a fileserver. .TP diff --git a/man/man8/readahead.8 b/man/man8/readahead.8 index a2a109149b2c..af46427d2c95 100644 --- a/man/man8/readahead.8 +++ b/man/man8/readahead.8 @@ -4,19 +4,19 @@ readahead \- Show performance of read-ahead cache .SH SYNOPSIS .B readahead [-d DURATION] .SH DESCRIPTION -The tool shows the performance of read-ahead caching on the system under a given load to investigate any -caching issues. It shows a count of unused pages in the cache and also prints a histogram showing how +The tool shows the performance of read-ahead caching on the system under a given load to investigate any +caching issues. It shows a count of unused pages in the cache and also prints a histogram showing how long they have remained there. This tool traces the \fB__do_page_cache_readahead()\fR kernel function to track entry and exit in the -readahead mechanism in the kernel and then uses \fB__page_cache_alloc()\fR and \fBmark_page_accessed()\fR +readahead mechanism in the kernel and then uses \fB__page_cache_alloc()\fR and \fBmark_page_accessed()\fR functions to calculate the age of the page in the cache as well as see how many are left unaccessed. Since this uses BPF, only the root user can use this tool. .SS NOTE ON KPROBES USAGE -Since the tool uses Kprobes, depending on your linux kernel's compilation, these functions may be inlined -and hence not available for Kprobes. To see whether you have the functions available, check \fBvmlinux\fR -source and binary to confirm whether inlining is happening or not. You can also check \fB/proc/kallsyms\fR +Since the tool uses Kprobes, depending on your linux kernel's compilation, these functions may be inlined +and hence not available for Kprobes. To see whether you have the functions available, check \fBvmlinux\fR +source and binary to confirm whether inlining is happening or not. You can also check \fB/proc/kallsyms\fR on the host and verify if the target functions are present there before using this. .SH REQUIREMENTS CONFIG_BPF, bcc @@ -25,15 +25,15 @@ CONFIG_BPF, bcc Print usage message .TP \-d DURATION -Trace the read-ahead caching system for DURATION seconds +Trace the read-ahead caching system for DURATION seconds .SH EXAMPLES .TP Trace for 30 seconds and show histogram of page age (ms) in read-ahead cache along with unused page count: # .B readahead -d 30 .SH OVERHEAD -The kernel functions instrumented by this program could be high-frequency depending on the profile of the -application (for example sequential IO). We advise the users to measure and monitor the overhead before leaving +The kernel functions instrumented by this program could be high-frequency depending on the profile of the +application (for example sequential IO). We advise the users to measure and monitor the overhead before leaving this turned on in production environments. .SH SOURCE This originated as a bpftrace tool from the book "BPF Performance Tools", diff --git a/man/man8/tcpcong.8 b/man/man8/tcpcong.8 index 877ed805f4e2..8c8ec60cf5cf 100644 --- a/man/man8/tcpcong.8 +++ b/man/man8/tcpcong.8 @@ -7,12 +7,12 @@ tcpcong \- Measure tcp congestion state duration. Uses Linux eBPF/bcc. this tool measures tcp sockets congestion control status duration, and prints a summary of tcp congestion state durations along with the number of total state changes. - -It uses dynamic tracing of kernel tcp congestion control status + +It uses dynamic tracing of kernel tcp congestion control status updating functions, and will need to be updated to match kernel changes. The traced functions are only called when there is congestion state update, -and therefore have low overhead. we also use BPF map to store traced data +and therefore have low overhead. we also use BPF map to store traced data to reduce overhead. See the OVERHEAD section for more details. Since this uses BPF, only the root user can use this tool. .SH REQUIREMENTS @@ -114,10 +114,10 @@ Number of congestion status in this time range. distribution ASCII representation of the distribution (the count column). .SH OVERHEAD -This traces the kernel tcp congestion status change functions. -As called rate per second of these functions per socket is low(<10000), the -overhead is also expected to be negligible. If you have an application that -will create thousands of tcp connections, then test and understand overhead +This traces the kernel tcp congestion status change functions. +As called rate per second of these functions per socket is low(<10000), the +overhead is also expected to be negligible. If you have an application that +will create thousands of tcp connections, then test and understand overhead before use. .SH SOURCE This is from bcc. diff --git a/man/man8/tplist.8 b/man/man8/tplist.8 index da5edf37e5b2..413d8611bffe 100644 --- a/man/man8/tplist.8 +++ b/man/man8/tplist.8 @@ -5,7 +5,7 @@ tplist \- Display kernel tracepoints or USDT probes and their formats. .B tplist [-p PID] [-l LIB] [-v] [filter] .SH DESCRIPTION tplist lists all kernel tracepoints, and can optionally print out the tracepoint -format; namely, the variables that you can trace when the tracepoint is hit. +format; namely, the variables that you can trace when the tracepoint is hit. tplist can also list USDT probes embedded in a specific library or executable, and can list USDT probes for all the libraries loaded by a specific process. These features are usually used in conjunction with the argdist and/or trace tools. @@ -40,7 +40,7 @@ Print all net tracepoints with their format: .B tplist -v 'net:*' .TP Print all USDT probes in libpthread: -$ +$ .B tplist -l pthread .TP Print all USDT probes in process 4717 from the libc provider: diff --git a/man/man8/uflow.8 b/man/man8/uflow.8 index 1d0951c36367..0179086302f9 100644 --- a/man/man8/uflow.8 +++ b/man/man8/uflow.8 @@ -77,7 +77,7 @@ The method name. .SH OVERHEAD This tool has extremely high overhead because it prints every method call. For some scenarios, you might see lost samples in the output as the tool is unable -to keep up with the rate of data coming from the kernel. Filtering by class +to keep up with the rate of data coming from the kernel. Filtering by class or method prefix can help reduce the amount of data printed, but there is still a very high overhead in the collection mechanism. Do not use for performance- sensitive production scenarios, and always test first. diff --git a/man/man8/ugc.8 b/man/man8/ugc.8 index 782ae6341bb1..e0d0b87c0e69 100644 --- a/man/man8/ugc.8 +++ b/man/man8/ugc.8 @@ -78,8 +78,8 @@ DESCRIPTION The runtime-provided description of this garbage collection event. .SH OVERHEAD Garbage collection events, even if frequent, should not produce a considerable -overhead when traced because they are still not very common. Even hundreds of -GCs per second (which is a very high rate) will still produce a fairly +overhead when traced because they are still not very common. Even hundreds of +GCs per second (which is a very high rate) will still produce a fairly negligible overhead. .SH SOURCE This is from bcc. diff --git a/man/man8/uobjnew.8 b/man/man8/uobjnew.8 index f4a9c74ce504..17d2fd79bc15 100644 --- a/man/man8/uobjnew.8 +++ b/man/man8/uobjnew.8 @@ -67,9 +67,9 @@ BYTES The number of bytes allocated. .SH OVERHEAD Object allocation events are quite frequent, and therefore the overhead from -running this tool can be considerable. Use with caution and make sure to +running this tool can be considerable. Use with caution and make sure to test before using in a production environment. Nonetheless, even thousands of -allocations per second will likely produce a reasonable overhead when +allocations per second will likely produce a reasonable overhead when investigating a problem. .SH SOURCE This is from bcc. diff --git a/man/man8/wakeuptime.8 b/man/man8/wakeuptime.8 index 8630ae4add89..1a5a5e90d060 100644 --- a/man/man8/wakeuptime.8 +++ b/man/man8/wakeuptime.8 @@ -4,7 +4,7 @@ wakeuptime \- Summarize sleep to wakeup time by waker kernel stack. Uses Linux e .SH SYNOPSIS .B wakeuptime [\-h] [\-u] [\-p PID] [\-v] [\-f] [\-\-stack-storage-size STACK_STORAGE_SIZE] [\-m MIN_BLOCK_TIME] [\-M MAX_BLOCK_TIME] [duration] .SH DESCRIPTION -This program shows the kernel stack traces for threads that woke up other +This program shows the kernel stack traces for threads that woke up other blocked threads, along with the process names of the waker and target, along with a sum of the time that the target was blocked: the "blocked time". It works by tracing when threads block and when they were then woken up, and diff --git a/man/man8/wqlat.8 b/man/man8/wqlat.8 index 66a9a073872c..31ae5fd72cfe 100644 --- a/man/man8/wqlat.8 +++ b/man/man8/wqlat.8 @@ -5,14 +5,14 @@ wqlat \- Summarize kernel workqueue latency as a histogram. .B wqlat [\-h] [\-T] [\-N] [\-W] [\-w WQNAME] [interval [count]] .SH DESCRIPTION wqlat traces work's waiting on workqueue, and records the distribution -of work's queuing latency (time). This is printed as a histogram +of work's queuing latency (time). This is printed as a histogram either on Ctrl-C, or after a given interval in seconds. This tool uses in-kernel eBPF maps for storing timestamps and the histogram, for efficiency. This tool uses the workqueue:workqueue_queue_work and workqueue:workqueue_execute_start -kernel tracepoints, which is a stable tracing mechanism. Please note BPF programs can +kernel tracepoints, which is a stable tracing mechanism. Please note BPF programs can attach to tracepoints from Linux 4.7 only, so this tools can only support kernel 4.7 or later version. @@ -44,7 +44,7 @@ Number of outputs. .TP Summarize kernel workqueue latency as a histogram: # -.B wqlat +.B wqlat .TP Print 1 second summaries, 10 times: # @@ -61,7 +61,7 @@ Print 1 second summaries, 10 times per workqueue: .TP Print 1 second summaries for workqueue nvmet_tcp_wq: # -.B wqlat \-w nvmet_tcp_wq 1 +.B wqlat \-w nvmet_tcp_wq 1 .SH FIELDS .TP usecs @@ -78,8 +78,8 @@ An ASCII bar chart to visualize the distribution (count column) .SH OVERHEAD This traces kernel functions and maintains in-kernel timestamps and a histogram, which are asynchronously copied to user-space. This method is very efficient, -and the overhead for most workqueue scheduling rates (< 100k) should be -negligible.If you have a higher workqueue scheduling, please test and quantify +and the overhead for most workqueue scheduling rates (< 100k) should be +negligible.If you have a higher workqueue scheduling, please test and quantify the overhead before use. .SH SOURCE This is from bcc. diff --git a/src/cc/api/BPF.h b/src/cc/api/BPF.h index a07ca16baecb..4e4f96d4a398 100644 --- a/src/cc/api/BPF.h +++ b/src/cc/api/BPF.h @@ -152,7 +152,7 @@ class BPF { bpf_probe_attach_type attach_type = BPF_PROBE_ENTRY, pid_t pid = -1, uint64_t symbol_offset = 0); - StatusTuple detach_all_uprobes_for_binary(const std::string& binary_path); + StatusTuple detach_all_uprobes_for_binary(const std::string& binary_path); StatusTuple attach_usdt(const USDT& usdt, pid_t pid = -1); StatusTuple attach_usdt_all(); StatusTuple detach_usdt(const USDT& usdt, pid_t pid = -1); diff --git a/src/cc/bcc_proc.c b/src/cc/bcc_proc.c index 89de4fd8efab..0d4ec921aba7 100644 --- a/src/cc/bcc_proc.c +++ b/src/cc/bcc_proc.c @@ -528,7 +528,7 @@ static bool which_so_in_ldconfig_cache(const char* libname, char* libpath) { for (i = 0; i < lib_cache_count; ++i) { if (!strncmp(lib_cache[i].libname, soname, soname_len) && match_so_flags(lib_cache[i].flags)) { - + const char* path = lib_cache[i].path; const size_t pathlen = strlen(path); if (pathlen >= PATH_MAX) { diff --git a/tests/cc/test_perf_event.cc b/tests/cc/test_perf_event.cc index c7f418c7a79d..75d2001e51c3 100644 --- a/tests/cc/test_perf_event.cc +++ b/tests/cc/test_perf_event.cc @@ -98,7 +98,7 @@ TEST_CASE("test attach perf event", "[bpf_perf_event]") { int on_event(void *ctx) { int zero = 0; - + u64 p = bpf_get_current_pid_tgid(); pid.update(&zero, &p); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) diff --git a/tests/python/test_disassembler.py b/tests/python/test_disassembler.py index e031b5f12438..7966d89a5777 100755 --- a/tests/python/test_disassembler.py +++ b/tests/python/test_disassembler.py @@ -116,7 +116,7 @@ class TestDisassembler(TestCase): (0xd5, "if %dst s<= %imm goto pc%off <%jmp>"), (0xdc, "%dst endian %src"), (0xdd, "if %dst s<= %imm goto pc%off <%jmp>"),] - + @classmethod def build_instr(cls, op): dst = random.randint(0, 0xf) @@ -124,7 +124,7 @@ def build_instr(cls, op): offset = random.randint(0, 0xffff) imm = random.randint(0, 0xffffffff) return BPFInstr(op, dst, src, offset, imm) - + @classmethod def format_instr(cls, instr, fmt): uimm = ct.c_uint32(instr.imm).value @@ -135,7 +135,7 @@ def format_instr(cls, instr, fmt): .replace("%sim", "%+d" % (instr.imm)) .replace("%off", "%+d" % (instr.offset)) .replace("%jmp", "%d" % (instr.offset + 1))) - + def test_func(self): b = BPF(text=b""" struct key_t {int a; short b; struct {int c:4; int d:8;} e;} __attribute__((__packed__)); @@ -150,7 +150,7 @@ def test_func(self): 0: (b7) r0 = 1 1: (95) exit""", b.disassemble_func(b"test_func")) - + def _assert_equal_ignore_fd_id(s1, s2): # In first line of string like # Layout of BPF map test_map (type HASH, FD 3, ID 0): @@ -181,7 +181,7 @@ def _assert_equal_ignore_fd_id(s1, s2): } key; unsigned long long value;""", b.decode_table(b"test_map")) - + def test_bpf_isa(self): for op, instr_fmt in self.opcodes: instr_fmt @@ -192,6 +192,6 @@ def test_bpf_isa(self): target_text = self.format_instr(instr, instr_fmt) self.assertEqual(disassembler.disassemble_str(instr_str)[0], "%4d: (%02x) %s" % (0, op, target_text)) - + if __name__ == "__main__": main() diff --git a/tools/argdist.py b/tools/argdist.py index 3c8f6d612192..aeb998e3440a 100755 --- a/tools/argdist.py +++ b/tools/argdist.py @@ -125,7 +125,7 @@ def _generate_retprobe_prefix(self): text += "if (%s == 0) { return 0 ; }\n" % val_name self.param_val_names[pname] = val_name return text - + def _generate_comm_prefix(self): text = """ struct val_t { diff --git a/tools/argdist_example.txt b/tools/argdist_example.txt index 83468c718446..4267abc4b572 100644 --- a/tools/argdist_example.txt +++ b/tools/argdist_example.txt @@ -37,7 +37,7 @@ p:c:malloc(size_t size):size_t:size It seems that the application is allocating blocks of size 16. The COUNT column contains the number of occurrences of a particular event, and the -EVENT column describes the event. In this case, the "size" parameter was +EVENT column describes the event. In this case, the "size" parameter was probed and its value was 16, repeatedly. Now, suppose you wanted a histogram of buffer sizes passed to the write() @@ -152,7 +152,7 @@ p:c:puts(char *str):char*:str It looks like the message "Press ENTER to start." was printed twice during the 10 seconds we were tracing. -What about reads? You could trace gets() across the system and print the +What about reads? You could trace gets() across the system and print the strings input by the user (note how "r" is used instead of "p" to attach a probe to the function's return): @@ -240,7 +240,7 @@ per byte allocated. Let's go: 512 -> 1023 : 1 |**** | 1024 -> 2047 : 1 |**** | 2048 -> 4095 : 9 |****************************************| - 4096 -> 8191 : 1 |**** | + 4096 -> 8191 : 1 |**** | It looks like a tri-modal distribution. Some allocations are extremely cheap, and take 2-15 nanoseconds per byte. Other allocations are slower, and take @@ -434,7 +434,7 @@ argdist -C 'r::__vfs_read():u32:$PID:$latency > 100000' argdist -C 'r::__vfs_read():u32:$COMM:$latency > 100000' Print frequency of reads by process name where the latency was >0.1ms -argdist -H 'r::__vfs_read(void *file, void *buf, size_t count):size_t:$entry(count):$latency > 1000000' +argdist -H 'r::__vfs_read(void *file, void *buf, size_t count):size_t:$entry(count):$latency > 1000000' Print a histogram of read sizes that were longer than 1ms argdist -H \ @@ -444,7 +444,7 @@ argdist -H \ argdist -C 'p:c:fork()#fork calls' Count fork() calls in libc across all processes - Can also use funccount.py, which is easier and more flexible + Can also use funccount.py, which is easier and more flexible argdist -H 't:block:block_rq_complete():u32:args->nr_sector' Print histogram of number of sectors in completing block I/O requests @@ -463,7 +463,7 @@ argdist -H 'p:c:sleep(u32 seconds):u32:seconds' \ argdist -p 2780 -z 120 \ -C 'p:c:write(int fd, char* buf, size_t len):char*:buf:fd==1' Spy on writes to STDOUT performed by process 2780, up to a string size - of 120 characters + of 120 characters argdist -I 'kernel/sched/sched.h' \ -C 'p::__account_cfs_rq_runtime(struct cfs_rq *cfs_rq):s64:cfs_rq->runtime_remaining' diff --git a/tools/biopattern.py b/tools/biopattern.py index f2f79f9d87bd..de8df4752dc3 100755 --- a/tools/biopattern.py +++ b/tools/biopattern.py @@ -112,7 +112,7 @@ def mkdev(major, minor): exiting = 0 if args.interval else 1 counters = b.get_table("counters") -print("%-9s %-7s %5s %5s %8s %10s" % +print("%-9s %-7s %5s %5s %8s %10s" % ("TIME", "DISK", "%RND", "%SEQ", "COUNT", "KBYTES")) while True: @@ -120,7 +120,7 @@ def mkdev(major, minor): sleep(int(args.interval)) except KeyboardInterrupt: exiting = 1 - + for k, v in (counters.items_lookup_and_delete_batch() if htab_batch_ops else counters.items()): total = v.random + v.sequential diff --git a/tools/bpflist_example.txt b/tools/bpflist_example.txt index bc44d1f314e2..8a95091655c2 100644 --- a/tools/bpflist_example.txt +++ b/tools/bpflist_example.txt @@ -7,10 +7,10 @@ are currently running on the system. For example: # bpflist PID COMM TYPE COUNT -4058 fileslower prog 4 -4058 fileslower map 2 -4106 bashreadline map 1 -4106 bashreadline prog 1 +4058 fileslower prog 4 +4058 fileslower map 2 +4106 bashreadline map 1 +4106 bashreadline prog 1 From the output above, the fileslower and bashreadline tools are running. fileslower has installed 4 BPF programs (functions) and has opened 2 BPF maps @@ -22,12 +22,12 @@ include the process id in the name of the probe. For example: # bpflist -v PID COMM TYPE COUNT -4058 fileslower prog 4 -4058 fileslower kprobe 4 -4058 fileslower map 2 -4106 bashreadline uprobe 1 -4106 bashreadline prog 1 -4106 bashreadline map 1 +4058 fileslower prog 4 +4058 fileslower kprobe 4 +4058 fileslower map 2 +4106 bashreadline uprobe 1 +4106 bashreadline prog 1 +4106 bashreadline map 1 In double-verbose mode, the probe definitions are also displayed: @@ -42,12 +42,12 @@ open uprobes: r:uprobes/r__bin_bash_0xa4dd0_bcc_4106 /bin/bash:0x00000000000a4dd0 PID COMM TYPE COUNT -4058 fileslower prog 4 -4058 fileslower kprobe 4 -4058 fileslower map 2 -4106 bashreadline uprobe 1 -4106 bashreadline prog 1 -4106 bashreadline map 1 +4058 fileslower prog 4 +4058 fileslower kprobe 4 +4058 fileslower map 2 +4106 bashreadline uprobe 1 +4106 bashreadline prog 1 +4106 bashreadline map 1 USAGE: diff --git a/tools/btrfsdist_example.txt b/tools/btrfsdist_example.txt index 4cadc76a97c7..2b78d3f0bae2 100644 --- a/tools/btrfsdist_example.txt +++ b/tools/btrfsdist_example.txt @@ -4,7 +4,7 @@ Demonstrations of btrfsdist, the Linux eBPF/bcc version. btrfsdist traces btrfs reads, writes, opens, and fsyncs, and summarizes their latency as a power-of-2 histogram. For example: -# ./btrfsdist +# ./btrfsdist Tracing btrfs operation latency... Hit Ctrl-C to end. ^C diff --git a/tools/btrfsslower.py b/tools/btrfsslower.py index 16c47733f0f7..7ac1179e32d0 100755 --- a/tools/btrfsslower.py +++ b/tools/btrfsslower.py @@ -63,7 +63,7 @@ debug = 0 if args.duration: args.duration = timedelta(seconds=int(args.duration)) - + # define BPF program bpf_text = """ #include diff --git a/tools/btrfsslower_example.txt b/tools/btrfsslower_example.txt index 21ab64c10945..cf3655fee783 100644 --- a/tools/btrfsslower_example.txt +++ b/tools/btrfsslower_example.txt @@ -86,7 +86,7 @@ TIME COMM PID T BYTES OFF_KB LAT(ms) FILENAME While tracing, the following commands were run in another window: # date > date.txt -# cksum date.txt +# cksum date.txt The output of btrfsslower now includes open operations ("O"), and writes ("W"). The first read from cksum(1) returned 29 bytes, and the second returned 0: diff --git a/tools/cachestat_example.txt b/tools/cachestat_example.txt index 32d504a927ad..1ab21094c365 100644 --- a/tools/cachestat_example.txt +++ b/tools/cachestat_example.txt @@ -21,7 +21,7 @@ the hit ration down to 55%. This shows a 1 Gbyte uncached file that is read twice: -(root) ~ # ./cachestat.py +(root) ~ # ./cachestat.py HITS MISSES DIRTIES HITRATIO BUFFERS_MB CACHED_MB 1 0 0 100.00% 5 191 198 12136 0 1.61% 5 238 @@ -65,7 +65,7 @@ and the HITRATIO was around 99%. This output shows a 1 Gbyte file being created and added to the page cache: -(root) ~ # ./cachestat.py +(root) ~ # ./cachestat.py HITS MISSES DIRTIES HITRATIO BUFFERS_MB CACHED_MB 1 0 0 100.00% 8 209 0 0 165584 0.00% 8 856 diff --git a/tools/cpudist_example.txt b/tools/cpudist_example.txt index d7ef69a2f404..1980f4e083e6 100644 --- a/tools/cpudist_example.txt +++ b/tools/cpudist_example.txt @@ -57,7 +57,7 @@ able to run for 4-16ms before being descheduled (this is likely the quantum length). Occasionally, tasks had to be descheduled a lot earlier -- possibly because they competed for a shared lock. -If necessary, you can restrict the output to include only threads from a +If necessary, you can restrict the output to include only threads from a particular process -- this helps reduce noise: # ./cpudist.py -p $(pidof parprimes) diff --git a/tools/cpuunclaimed_example.txt b/tools/cpuunclaimed_example.txt index 64158a9ba7b1..ee48f04b3342 100644 --- a/tools/cpuunclaimed_example.txt +++ b/tools/cpuunclaimed_example.txt @@ -40,7 +40,7 @@ to the coarseness of its 99 Hertz samples. This is an 8 CPU system, with an 8 CPU-bound threaded application running that has been bound to one CPU (via taskset): -# ./cpuunclaimed.py +# ./cpuunclaimed.py Sampling run queues... Output every 1 seconds. Hit Ctrl-C to end. %CPU 12.63%, unclaimed idle 86.36% %CPU 12.50%, unclaimed idle 87.50% diff --git a/tools/criticalstat_example.txt b/tools/criticalstat_example.txt index 705f4d76081b..02bfec49cab7 100644 --- a/tools/criticalstat_example.txt +++ b/tools/criticalstat_example.txt @@ -12,12 +12,12 @@ Since this uses BPF, only the root user can use this tool. Further, the kernel has to be built with certain CONFIG options enabled inorder for it to work: - CONFIG_DEBUG_PREEMPT - CONFIG_PREEMPT_TRACER - + For kernel 4.19 and later: - CONFIG_PREEMPTIRQ_TRACEPOINTS - CONFIG_TRACE_IRQFLAGS - CONFIG_TRACE_PREEMPT_TOGGLE - + For kernel 4.15 to 4.18: - CONFIG_PREEMPTIRQ_EVENTS - CONFIG_PROVE_LOCKING diff --git a/tools/dcsnoop_example.txt b/tools/dcsnoop_example.txt index 2184db0e1ff1..54c1b63ce0a4 100644 --- a/tools/dcsnoop_example.txt +++ b/tools/dcsnoop_example.txt @@ -6,7 +6,7 @@ further investigation beyond dcstat(8). The output is likely verbose, as dcache lookups are likely frequent. By default, only failed lookups are shown. For example: -# ./dcsnoop.py +# ./dcsnoop.py TIME(s) PID COMM T FILE 0.002837 1643 snmpd M net/dev 0.002852 1643 snmpd M 1643 diff --git a/tools/dcstat_example.txt b/tools/dcstat_example.txt index 574473f5a02f..7ae420c7bab6 100644 --- a/tools/dcstat_example.txt +++ b/tools/dcstat_example.txt @@ -3,7 +3,7 @@ Demonstrations of dcstat, the Linux eBPF/bcc version. dcstat shows directory entry cache (dcache) statistics. For example: -# ./dcstat +# ./dcstat TIME REFS/s SLOW/s MISS/s HIT% 08:11:47: 2059 141 97 95.29 08:11:48: 79974 151 106 99.87 @@ -30,7 +30,7 @@ ratio to 53%, and more importantly, a miss rate of over 10 thousand per second. Here's an interesting workload: -# ./dcstat +# ./dcstat TIME REFS/s SLOW/s MISS/s HIT% 08:15:53: 250683 141 97 99.96 08:15:54: 266115 145 101 99.96 @@ -44,7 +44,7 @@ does not exist. Here's the C program that generated the workload: 1 #include 2 #include 3 #include - 4 + 4 5 int 6 main(int argc, char *argv[]) 7 { @@ -67,13 +67,13 @@ each time (which is also a missing file), using the following C code: 2 #include 3 #include 4 #include - 5 + 5 6 int 7 main(int argc, char *argv[]) 8 { 9 int fd, i = 0; 10 char buf[128] = {}; - 11 + 11 12 while (1) { 13 sprintf(buf, "bad%d", i++); 14 fd = open(buf, O_RDONLY); @@ -83,7 +83,7 @@ each time (which is also a missing file), using the following C code: Here's dcstat: -# ./dcstat +# ./dcstat TIME REFS/s SLOW/s MISS/s HIT% 08:18:52: 241131 237544 237505 1.51 08:18:53: 238210 236323 236278 0.82 diff --git a/tools/drsnoop_example.txt b/tools/drsnoop_example.txt index 0c41fa531dbd..931059312102 100644 --- a/tools/drsnoop_example.txt +++ b/tools/drsnoop_example.txt @@ -103,7 +103,7 @@ This caught the 'summond' command because it partially matches 'mond' that's pas to the '-n' option. -The -v option can be used to show system memory state (now only free mem) at +The -v option can be used to show system memory state (now only free mem) at the beginning of direct reclaiming: # ./drsnoop.py -v diff --git a/tools/ext4dist_example.txt b/tools/ext4dist_example.txt index def8e8bacffb..b7a6c8076fdb 100644 --- a/tools/ext4dist_example.txt +++ b/tools/ext4dist_example.txt @@ -4,7 +4,7 @@ Demonstrations of ext4dist, the Linux eBPF/bcc version. ext4dist traces ext4 reads, writes, opens, and fsyncs, and summarizes their latency as a power-of-2 histogram. For example: -# ./ext4dist +# ./ext4dist Tracing ext4 operation latency... Hit Ctrl-C to end. ^C diff --git a/tools/ext4slower.py b/tools/ext4slower.py index 4faf4b5c3e18..cee5be356ba4 100755 --- a/tools/ext4slower.py +++ b/tools/ext4slower.py @@ -100,7 +100,7 @@ // The current ext4 (Linux 4.5) uses generic_file_read_iter(), instead of it's // own function, for reads. So we need to trace that and then filter on ext4, // which I do by checking file->f_op. -// The new Linux version (since form 4.10) uses ext4_file_read_iter(), And if the 'CONFIG_FS_DAX' +// The new Linux version (since form 4.10) uses ext4_file_read_iter(), And if the 'CONFIG_FS_DAX' // is not set, then ext4_file_read_iter() will call generic_file_read_iter(), else it will call // ext4_dax_read_iter(), and trace generic_file_read_iter() will fail. int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb) diff --git a/tools/filegone_example.txt b/tools/filegone_example.txt index 0234e8dc8a15..ddf8ac907e9a 100644 --- a/tools/filegone_example.txt +++ b/tools/filegone_example.txt @@ -4,7 +4,7 @@ Demonstrations of filegone, the Linux eBPF/bcc version. filegone traces why file gone, either been deleted or renamed For example: -# ./filegone +# ./filegone 18:30:56 22905 vim DELETE .fstab.swpx 18:30:56 22905 vim DELETE .fstab.swp 18:31:00 22905 vim DELETE .viminfo diff --git a/tools/filelife_example.txt b/tools/filelife_example.txt index c3d67953185f..846e3323027f 100644 --- a/tools/filelife_example.txt +++ b/tools/filelife_example.txt @@ -4,7 +4,7 @@ Demonstrations of filelife, the Linux eBPF/bcc version. filelife traces short-lived files: those that have been created and then deleted while tracing. For example: -# ./filelife +# ./filelife TIME PID COMM AGE(s) FILE 05:57:59 8556 gcc 0.04 ccCB5EDe.s 05:57:59 8560 rm 0.02 .entry_64.o.d diff --git a/tools/fileslower.py b/tools/fileslower.py index 0383913edbcb..8faa6d30c88a 100755 --- a/tools/fileslower.py +++ b/tools/fileslower.py @@ -225,7 +225,7 @@ "BYTES", "LAT(ms)", "FILENAME")) start_ts = time.time() -DNAME_INLINE_LEN = 32 +DNAME_INLINE_LEN = 32 def print_event(cpu, data, size): event = b["events"].event(data) diff --git a/tools/fileslower_example.txt b/tools/fileslower_example.txt index 0e0c7caf2f7e..7bbc967e9ac7 100644 --- a/tools/fileslower_example.txt +++ b/tools/fileslower_example.txt @@ -4,7 +4,7 @@ Demonstrations of fileslower, the Linux eBPF/bcc version. fileslower shows file-based synchronous reads and writes slower than a threshold. For example: -# ./fileslower +# ./fileslower Tracing sync read/writes slower than 10 ms TIME(s) COMM PID D BYTES LAT(ms) FILENAME 0.000 randread.pl 4762 R 8192 12.70 data1 diff --git a/tools/filetop.py b/tools/filetop.py index 568ae97a0950..1890bde78238 100755 --- a/tools/filetop.py +++ b/tools/filetop.py @@ -200,7 +200,7 @@ else: b.attach_kprobe(event="vfs_read", fn_name="trace_read_entry") b.attach_kprobe(event="vfs_write", fn_name="trace_write_entry") - + # check whether hash table batch ops is supported htab_batch_ops = True if BPF.kernel_struct_has_field(b'bpf_map_ops', diff --git a/tools/funcinterval_example.txt b/tools/funcinterval_example.txt index b3fea3e9e4b8..d22bc4376833 100644 --- a/tools/funcinterval_example.txt +++ b/tools/funcinterval_example.txt @@ -2,7 +2,7 @@ Demonstrations of funcinterval, the Linux eBPF/bcc version. eBPF/bcc is very suitable for platform performance tuning. By funclatency, we can profile specific functions to know how latency -this function costs. However, sometimes performance drop is not about the +this function costs. However, sometimes performance drop is not about the latency of function but the interval between function calls. funcinterval is born for this purpose. diff --git a/tools/funcslower_example.txt b/tools/funcslower_example.txt index 86524c2dadcc..191aa24fa43f 100644 --- a/tools/funcslower_example.txt +++ b/tools/funcslower_example.txt @@ -9,28 +9,28 @@ failed. For example, trace the open() function in libc when it is slower than # ./funcslower c:open -u 1 Tracing function calls slower than 1 us... Ctrl+C to quit. COMM PID LAT(us) RVAL FUNC -less 27074 33.77 3 c:open -less 27074 9.96 ffffffffffffffff c:open -less 27074 5.92 ffffffffffffffff c:open -less 27074 15.88 ffffffffffffffff c:open -less 27074 8.89 3 c:open -less 27074 15.89 3 c:open -sh 27075 20.97 4 c:open -bash 27075 20.14 4 c:open -lesspipe.sh 27075 18.77 4 c:open -lesspipe.sh 27075 11.21 4 c:open -lesspipe.sh 27075 13.68 4 c:open -file 27076 14.83 ffffffffffffffff c:open -file 27076 8.02 4 c:open -file 27076 10.26 4 c:open -file 27076 6.55 4 c:open -less 27074 11.67 4 c:open +less 27074 33.77 3 c:open +less 27074 9.96 ffffffffffffffff c:open +less 27074 5.92 ffffffffffffffff c:open +less 27074 15.88 ffffffffffffffff c:open +less 27074 8.89 3 c:open +less 27074 15.89 3 c:open +sh 27075 20.97 4 c:open +bash 27075 20.14 4 c:open +lesspipe.sh 27075 18.77 4 c:open +lesspipe.sh 27075 11.21 4 c:open +lesspipe.sh 27075 13.68 4 c:open +file 27076 14.83 ffffffffffffffff c:open +file 27076 8.02 4 c:open +file 27076 10.26 4 c:open +file 27076 6.55 4 c:open +less 27074 11.67 4 c:open ^C This shows several open operations performed by less and some helpers it invoked in the process. The latency (in microseconds) is shown, as well as the return value from the open() function, which helps indicate if there is a correlation -between failures and slow invocations. Most open() calls seemed to have +between failures and slow invocations. Most open() calls seemed to have completed successfully (returning a valid file descriptor), but some have failed and returned -1. @@ -39,14 +39,14 @@ You can also trace kernel functions: # ./funcslower -m 10 vfs_read Tracing function calls slower than 10 ms... Ctrl+C to quit. COMM PID LAT(ms) RVAL FUNC -bash 11527 78.97 1 vfs_read -bash 11527 101.26 1 vfs_read -bash 11527 1053.60 1 vfs_read -bash 11527 44.21 1 vfs_read -bash 11527 79.50 1 vfs_read -bash 11527 33.37 1 vfs_read -bash 11527 112.17 1 vfs_read -bash 11527 101.49 1 vfs_read +bash 11527 78.97 1 vfs_read +bash 11527 101.26 1 vfs_read +bash 11527 1053.60 1 vfs_read +bash 11527 44.21 1 vfs_read +bash 11527 79.50 1 vfs_read +bash 11527 33.37 1 vfs_read +bash 11527 112.17 1 vfs_read +bash 11527 101.49 1 vfs_read ^C Occasionally, it is also useful to see the arguments passed to the functions. diff --git a/tools/inject_example.txt b/tools/inject_example.txt index 77cef4a10863..13c166aee92f 100644 --- a/tools/inject_example.txt +++ b/tools/inject_example.txt @@ -75,7 +75,7 @@ we want to fail the dentry allocation of a file creatively named 'bananas'. We can do the following: # ./inject.py kmalloc -v 'd_alloc_parallel(struct dentry *parent, const struct -qstr *name)(STRCMP(name->name, 'bananas'))' +qstr *name)(STRCMP(name->name, 'bananas'))' While this script is executing, any operation that would cause a dentry allocation where the name is 'bananas' fails, as expected. diff --git a/tools/lib/ucalls_example.txt b/tools/lib/ucalls_example.txt index 31b3bc895b93..4516ef48bbf2 100644 --- a/tools/lib/ucalls_example.txt +++ b/tools/lib/ucalls_example.txt @@ -6,8 +6,8 @@ Perl, PHP, Python, Ruby, Tcl, and Linux system calls. It displays statistics on the most frequently called methods, as well as the latency (duration) of these methods. -Through the syscalls support, ucalls can provide basic information on a -process' interaction with the system including syscall counts and latencies. +Through the syscalls support, ucalls can provide basic information on a +process' interaction with the system including syscall counts and latencies. This can then be used for further exploration with other BCC tools like trace, argdist, biotop, fileslower, and others. @@ -24,7 +24,7 @@ slowy/App.isPrime 8969 4841017.64 ^C -To trace only syscalls in a particular process and print the top 10 most +To trace only syscalls in a particular process and print the top 10 most frequently-invoked ones: # ucalls -l none -ST 10 7913 diff --git a/tools/lib/uflow_example.txt b/tools/lib/uflow_example.txt index c7621f531caf..2174bd900fe9 100644 --- a/tools/lib/uflow_example.txt +++ b/tools/lib/uflow_example.txt @@ -13,35 +13,35 @@ For example, trace all Ruby method calls in a specific process: # ./uflow -l ruby 27245 Tracing method calls in ruby process 27245... Ctrl-C to quit. CPU PID TID TIME(us) METHOD -3 27245 27245 4.536 <- IO.gets -3 27245 27245 4.536 <- IRB::StdioInputMethod.gets -3 27245 27245 4.536 -> IRB::Context.verbose? -3 27245 27245 4.536 -> NilClass.nil? -3 27245 27245 4.536 <- NilClass.nil? -3 27245 27245 4.536 -> IO.tty? -3 27245 27245 4.536 <- IO.tty? -3 27245 27245 4.536 -> Kernel.kind_of? -3 27245 27245 4.536 <- Kernel.kind_of? -3 27245 27245 4.536 <- IRB::Context.verbose? -3 27245 27245 4.536 <- IRB::Irb.signal_status -3 27245 27245 4.536 -> String.chars -3 27245 27245 4.536 <- String.chars +3 27245 27245 4.536 <- IO.gets +3 27245 27245 4.536 <- IRB::StdioInputMethod.gets +3 27245 27245 4.536 -> IRB::Context.verbose? +3 27245 27245 4.536 -> NilClass.nil? +3 27245 27245 4.536 <- NilClass.nil? +3 27245 27245 4.536 -> IO.tty? +3 27245 27245 4.536 <- IO.tty? +3 27245 27245 4.536 -> Kernel.kind_of? +3 27245 27245 4.536 <- Kernel.kind_of? +3 27245 27245 4.536 <- IRB::Context.verbose? +3 27245 27245 4.536 <- IRB::Irb.signal_status +3 27245 27245 4.536 -> String.chars +3 27245 27245 4.536 <- String.chars ^C In the preceding output, indentation indicates the depth of the flow graph, and the <- and -> arrows indicate the direction of the event (exit or entry). -Often, the amount of output can be overwhelming. You can filter specific +Often, the amount of output can be overwhelming. You can filter specific classes or methods. For example, trace only methods from the Thread class: # ./uflow -C java/lang/Thread $(pidof java) Tracing method calls in java process 27722... Ctrl-C to quit. CPU PID TID TIME(us) METHOD -3 27722 27731 3.144 -> java/lang/Thread. -3 27722 27731 3.144 -> java/lang/Thread.init -3 27722 27731 3.144 -> java/lang/Thread.init -3 27722 27731 3.144 -> java/lang/Thread.currentThread -3 27722 27731 3.144 <- java/lang/Thread.currentThread +3 27722 27731 3.144 -> java/lang/Thread. +3 27722 27731 3.144 -> java/lang/Thread.init +3 27722 27731 3.144 -> java/lang/Thread.init +3 27722 27731 3.144 -> java/lang/Thread.currentThread +3 27722 27731 3.144 <- java/lang/Thread.currentThread 3 27722 27731 3.144 -> java/lang/Thread.getThreadGroup 3 27722 27731 3.144 <- java/lang/Thread.getThreadGroup 3 27722 27731 3.144 -> java/lang/ThreadGroup.checkAccess @@ -50,32 +50,32 @@ CPU PID TID TIME(us) METHOD 3 27722 27731 3.144 <- java/lang/ThreadGroup.addUnstarted 3 27722 27731 3.145 -> java/lang/Thread.isDaemon 3 27722 27731 3.145 <- java/lang/Thread.isDaemon -3 27722 27731 3.145 -> java/lang/Thread.getPriority -3 27722 27731 3.145 <- java/lang/Thread.getPriority +3 27722 27731 3.145 -> java/lang/Thread.getPriority +3 27722 27731 3.145 <- java/lang/Thread.getPriority 3 27722 27731 3.145 -> java/lang/Thread.getContextClassLoader 3 27722 27731 3.145 <- java/lang/Thread.getContextClassLoader -3 27722 27731 3.145 -> java/lang/Thread.setPriority -3 27722 27731 3.145 -> java/lang/Thread.checkAccess -3 27722 27731 3.145 <- java/lang/Thread.checkAccess +3 27722 27731 3.145 -> java/lang/Thread.setPriority +3 27722 27731 3.145 -> java/lang/Thread.checkAccess +3 27722 27731 3.145 <- java/lang/Thread.checkAccess 3 27722 27731 3.145 -> java/lang/Thread.getThreadGroup 3 27722 27731 3.145 <- java/lang/Thread.getThreadGroup 3 27722 27731 3.145 -> java/lang/ThreadGroup.getMaxPriority 3 27722 27731 3.145 <- java/lang/ThreadGroup.getMaxPriority 3 27722 27731 3.145 -> java/lang/Thread.setPriority0 3 27722 27731 3.145 <- java/lang/Thread.setPriority0 -3 27722 27731 3.145 <- java/lang/Thread.setPriority -3 27722 27731 3.145 -> java/lang/Thread.nextThreadID -3 27722 27731 3.145 <- java/lang/Thread.nextThreadID -3 27722 27731 3.145 <- java/lang/Thread.init -3 27722 27731 3.145 <- java/lang/Thread.init -3 27722 27731 3.145 <- java/lang/Thread. -3 27722 27731 3.145 -> java/lang/Thread.start -3 27722 27731 3.145 -> java/lang/ThreadGroup.add -3 27722 27731 3.145 <- java/lang/ThreadGroup.add -3 27722 27731 3.145 -> java/lang/Thread.start0 -3 27722 27731 3.145 <- java/lang/Thread.start0 -3 27722 27731 3.146 <- java/lang/Thread.start -2 27722 27742 3.146 -> java/lang/Thread.run +3 27722 27731 3.145 <- java/lang/Thread.setPriority +3 27722 27731 3.145 -> java/lang/Thread.nextThreadID +3 27722 27731 3.145 <- java/lang/Thread.nextThreadID +3 27722 27731 3.145 <- java/lang/Thread.init +3 27722 27731 3.145 <- java/lang/Thread.init +3 27722 27731 3.145 <- java/lang/Thread. +3 27722 27731 3.145 -> java/lang/Thread.start +3 27722 27731 3.145 -> java/lang/ThreadGroup.add +3 27722 27731 3.145 <- java/lang/ThreadGroup.add +3 27722 27731 3.145 -> java/lang/Thread.start0 +3 27722 27731 3.145 <- java/lang/Thread.start0 +3 27722 27731 3.146 <- java/lang/Thread.start +2 27722 27742 3.146 -> java/lang/Thread.run ^C The reason that the CPU number is printed in the first column is that events diff --git a/tools/lib/ugc_example.txt b/tools/lib/ugc_example.txt index 083cdb64dd2c..0defb2caf0e9 100644 --- a/tools/lib/ugc_example.txt +++ b/tools/lib/ugc_example.txt @@ -2,7 +2,7 @@ Demonstrations of ugc. ugc traces garbage collection events in high-level languages, including Java, -Python, Ruby, and Node. Each GC event is printed with some additional +Python, Ruby, and Node. Each GC event is printed with some additional information provided by that language's runtime, if available. The duration of the GC event is also provided. @@ -10,7 +10,7 @@ For example, to trace all garbage collection events in a specific Node process: # ugc $(pidof node) Tracing garbage collections in node process 30012... Ctrl-C to quit. -START TIME (us) DESCRIPTION +START TIME (us) DESCRIPTION 1.500 1181.00 GC scavenge 1.505 1704.00 GC scavenge 1.509 1534.00 GC scavenge @@ -46,7 +46,7 @@ switches can be useful for this: # ugc -F Tenured $(pidof java) Tracing garbage collections in java process 29907... Ctrl-C to quit. -START TIME (us) DESCRIPTION +START TIME (us) DESCRIPTION 0.360 4309.00 MarkSweepCompact Tenured Gen used=287528->287528 max=173408256->173408256 2.459 4232.00 MarkSweepCompact Tenured Gen used=287528->287528 max=173408256->173408256 4.648 4139.00 MarkSweepCompact Tenured Gen used=287528->287528 max=173408256->173408256 @@ -54,7 +54,7 @@ START TIME (us) DESCRIPTION # ugc -M 1 $(pidof java) Tracing garbage collections in java process 29907... Ctrl-C to quit. -START TIME (us) DESCRIPTION +START TIME (us) DESCRIPTION 0.160 3715.00 MarkSweepCompact Code Cache used=287528->3209472 max=173408256->251658240 0.160 3975.00 MarkSweepCompact Metaspace used=287528->3092104 max=173408256->18446744073709551615 0.160 4058.00 MarkSweepCompact Compressed Class Space used=287528->266840 max=173408256->1073741824 diff --git a/tools/lib/ustat_example.txt b/tools/lib/ustat_example.txt index 11ee2de4aa06..27308bf56b40 100644 --- a/tools/lib/ustat_example.txt +++ b/tools/lib/ustat_example.txt @@ -1,7 +1,7 @@ Demonstrations of ustat. -ustat is a "top"-like tool for monitoring events in high-level languages. It +ustat is a "top"-like tool for monitoring events in high-level languages. It prints statistics about garbage collections, method calls, object allocations, and various other events for every process that it recognizes with a Java, Node, Perl, PHP, Python, Ruby, and Tcl runtime. @@ -12,35 +12,35 @@ For example: Tracing... Output every 10 secs. Hit Ctrl-C to end 12:17:17 loadavg: 0.33 0.08 0.02 5/211 26284 -PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s -3018 node/node 0 3 0 0 0 0 +PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s +3018 node/node 0 3 0 0 0 0 ^C Detaching... -If desired, you can instruct ustat to print a certain number of entries and -exit, which can be useful to get a quick picture on what's happening on the -system over a short time interval. Here, we ask ustat to print 5-second +If desired, you can instruct ustat to print a certain number of entries and +exit, which can be useful to get a quick picture on what's happening on the +system over a short time interval. Here, we ask ustat to print 5-second summaries 12 times (for a total time of 1 minute): # ./ustat.py -C 5 12 Tracing... Output every 5 secs. Hit Ctrl-C to end 12:18:26 loadavg: 0.27 0.11 0.04 2/336 26455 -PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s -3018 node/node 0 1 0 0 0 0 +PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s +3018 node/node 0 1 0 0 0 0 12:18:31 loadavg: 0.33 0.12 0.04 2/336 26456 -PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s -3018 node/node 0 0 0 0 0 0 -26439 java -XX:+ExtendedDT 2776045 0 0 0 0 0 +PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s +3018 node/node 0 0 0 0 0 0 +26439 java -XX:+ExtendedDT 2776045 0 0 0 0 0 12:18:37 loadavg: 0.38 0.14 0.05 2/336 26457 -PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s -3018 node/node 0 0 0 0 0 0 -26439 java -XX:+ExtendedDT 2804378 0 0 0 0 0 +PID CMDLINE METHOD/s GC/s OBJNEW/s CLOAD/s EXC/s THR/s +3018 node/node 0 0 0 0 0 0 +26439 java -XX:+ExtendedDT 2804378 0 0 0 0 0 (...more output omitted for brevity) @@ -75,4 +75,4 @@ examples: ./ustat -C # don't clear the screen ./ustat -l java # Java processes only ./ustat 5 # 5 second summaries - ./ustat 5 10 # 5 second summaries, 10 times only + ./ustat 5 10 # 5 second summaries, 10 times only diff --git a/tools/lib/uthreads_example.txt b/tools/lib/uthreads_example.txt index 988092691227..4f30fe5b454b 100644 --- a/tools/lib/uthreads_example.txt +++ b/tools/lib/uthreads_example.txt @@ -10,13 +10,13 @@ For example, trace all Java thread creation events: # ./uthreads -l java 27420 Tracing thread events in process 27420 (language: java)... Ctrl-C to quit. -TIME ID TYPE DESCRIPTION -18.596 R=9/N=0 start SIGINT handler -18.596 R=4/N=0 stop Signal Dispatcher +TIME ID TYPE DESCRIPTION +18.596 R=9/N=0 start SIGINT handler +18.596 R=4/N=0 stop Signal Dispatcher ^C The ID column in the preceding output shows the thread's runtime ID and native -ID, when available. The accuracy of this information depends on the Java +ID, when available. The accuracy of this information depends on the Java runtime. @@ -24,11 +24,11 @@ Next, trace only pthread creation events in some native application: # ./uthreads 27450 Tracing thread events in process 27450 (language: c)... Ctrl-C to quit. -TIME ID TYPE DESCRIPTION +TIME ID TYPE DESCRIPTION 0.924 27462 pthread primes_thread [primes] -0.927 27463 pthread primes_thread [primes] -0.928 27464 pthread primes_thread [primes] -0.928 27465 pthread primes_thread [primes] +0.927 27463 pthread primes_thread [primes] +0.928 27464 pthread primes_thread [primes] +0.928 27465 pthread primes_thread [primes] ^C The thread name ("primes_thread" in this example) is resolved from debuginfo. diff --git a/tools/memleak_example.txt b/tools/memleak_example.txt index 4d4a2665dfc6..93ab18f799c0 100644 --- a/tools/memleak_example.txt +++ b/tools/memleak_example.txt @@ -29,7 +29,7 @@ inspect each allocation individually -- you get a nice summary of which stack is responsible for a large leak. Occasionally, you do want the individual allocation details. Perhaps the same -stack is allocating various sizes and you want to confirm which sizes are +stack is allocating various sizes and you want to confirm which sizes are prevalent. Use the -a switch: # ./memleak -p $(pidof allocs) -a @@ -109,18 +109,18 @@ to reduce the memory overhead. To avoid false positives, allocations younger than a certain age (500ms by default) are not printed. To change this threshold, use the -o switch. -By default, memleak prints its output every 5 seconds. To change this -interval, pass the interval as a positional parameter to memleak. You can +By default, memleak prints its output every 5 seconds. To change this +interval, pass the interval as a positional parameter to memleak. You can also control the number of times the output will be printed before exiting. For example: # ./memleak 1 10 ... will print the outstanding allocation statistics every second, for ten -times, and then exit. +times, and then exit. memleak may introduce considerable overhead if your application or kernel is -allocating and freeing memory at a very high rate. In that case, you can +allocating and freeing memory at a very high rate. In that case, you can control the overhead by sampling every N-th allocation. For example, to sample roughly 10% of the allocations and print the outstanding allocations every 5 seconds, 3 times before quitting: @@ -142,9 +142,9 @@ Attaching to pid 2614, Ctrl+C to quit. main+0x6d [allocs] __libc_start_main+0xf0 [libc-2.21.so] -Note that even though the application leaks 16 bytes of memory every second, +Note that even though the application leaks 16 bytes of memory every second, the report (printed every 5 seconds) doesn't "see" all the allocations because -of the sampling rate applied. +of the sampling rate applied. Profiling in memory part is hard to be accurate because of BPF infrastructure. memleak keeps misjudging memory leak on the complicated environment which has @@ -177,7 +177,7 @@ Attaching to pid 2623, Ctrl+C to quit. 0x0000559b478700b7 main+0x4a7 [redis-server] 0x00007fdf47029d90 __libc_start_call_main+0x80 [libc.so.6] -When using the --symbols-prefix argument, memleak can trace the third-party memory +When using the --symbols-prefix argument, memleak can trace the third-party memory allocations, such as jemalloc whose symbols are usually identified by the "je_" prefix in redis project. diff --git a/tools/netqtop.c b/tools/netqtop.c index e64ed7fdbefe..e0bf122db7e0 100644 --- a/tools/netqtop.c +++ b/tools/netqtop.c @@ -1,7 +1,7 @@ #include #include -#if IFNAMSIZ != 16 +#if IFNAMSIZ != 16 #error "IFNAMSIZ != 16 is not supported" #endif #define MAX_QUEUE_NUM 1024 @@ -90,7 +90,7 @@ TRACEPOINT_PROBE(net, net_dev_start_xmit){ return 0; } updata_data(data, skb->len); - + return 0; } @@ -122,6 +122,6 @@ TRACEPOINT_PROBE(net, netif_receive_skb){ return 0; } updata_data(data, skb.len); - + return 0; } diff --git a/tools/netqtop.py b/tools/netqtop.py index 9aa314f09118..5f9167158006 100755 --- a/tools/netqtop.py +++ b/tools/netqtop.py @@ -38,11 +38,11 @@ def print_table(table, qnum): # ---- print headers ---------------- headers = [ - "QueueID", - "avg_size", - "[0, 64)", - "[64, 512)", - "[512, 2K)", + "QueueID", + "avg_size", + "[0, 64)", + "[64, 512)", + "[512, 2K)", "[2K, 16K)", "[16K, 64K)" ] @@ -94,7 +94,7 @@ def print_table(table, qnum): ] else: data = [k,0,0,0,0,0,0,0] - + # print a line per queue avg = 0 if data[2] != 0: @@ -117,7 +117,7 @@ def print_table(table, qnum): )) else: print() - + # ------- print total -------------- print(" Total %-11s%-11s%-11s%-11s%-11s%-11s" % ( to_str(tAVG), diff --git a/tools/netqtop_example.txt b/tools/netqtop_example.txt index 443cfb715f69..ea11a65d0be8 100644 --- a/tools/netqtop_example.txt +++ b/tools/netqtop_example.txt @@ -1,16 +1,16 @@ Demonstrations of netqtop. -netqtop traces the kernel functions performing packet transmit (xmit_one) -and packet receive (__netif_receive_skb_core) on data link layer. The tool -not only traces every packet via a specified network interface, but also accounts -the PPS, BPS and average size of packets as well as packet amounts (categorized by -size range) on sending and receiving direction respectively. Results are printed -as tables, which can be used to understand traffic load allocation on each queue -of interested network interface to see if it is balanced. And the overall performance +netqtop traces the kernel functions performing packet transmit (xmit_one) +and packet receive (__netif_receive_skb_core) on data link layer. The tool +not only traces every packet via a specified network interface, but also accounts +the PPS, BPS and average size of packets as well as packet amounts (categorized by +size range) on sending and receiving direction respectively. Results are printed +as tables, which can be used to understand traffic load allocation on each queue +of interested network interface to see if it is balanced. And the overall performance is provided in the buttom. -For example, suppose you want to know current traffic on lo, and print result +For example, suppose you want to know current traffic on lo, and print result every second: # ./netqtop.py -n lo -i 1 Thu Sep 10 11:28:39 2020 @@ -91,33 +91,33 @@ To see PPS and BPS of each queue, use -t: # ./netqtop.py -n lo -i 1 -t Thu Sep 10 11:37:02 2020 TX - QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS + QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS 0 114 0 10 0 0 0 1.11K 10.0 Total 114 0 10 0 0 0 1.11K 10.0 RX - QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS + QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS 0 100 4 6 0 0 0 1000.0 10.0 Total 100 4 6 0 0 0 1000.0 10.0 ----------------------------------------------------------------------------------------------- Thu Sep 10 11:37:03 2020 TX - QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS + QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS 0 271 0 3 1 0 0 1.06K 4.0 Total 271 0 3 1 0 0 1.06K 4.0 RX - QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS + QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS 0 257 2 1 1 0 0 1.0K 4.0 Total 257 2 1 1 0 0 1.0K 4.0 ----------------------------------------------------------------------------------------------- -When filtering multi-queue NICs, you do not need to specify the number of queues, +When filtering multi-queue NICs, you do not need to specify the number of queues, the tool calculates it for you: # ./netqtop.py -n eth0 -t Thu Sep 10 11:39:21 2020 TX - QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS + QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS 0 0 0 0 0 0 0 0.0 0.0 1 0 0 0 0 0 0 0.0 0.0 2 0 0 0 0 0 0 0.0 0.0 @@ -153,7 +153,7 @@ TX Total 141 2 9 0 0 0 1.52K 11.0 RX - QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS + QueueID avg_size [0, 64) [64, 512) [512, 2K) [2K, 16K) [16K, 64K) BPS PPS 0 127 3 9 0 0 0 1.5K 12.0 1 0 0 0 0 0 0 0.0 0.0 2 0 0 0 0 0 0 0.0 0.0 diff --git a/tools/nfsslower_example.txt b/tools/nfsslower_example.txt index 823b64acc7ea..d6bea9c0ddac 100644 --- a/tools/nfsslower_example.txt +++ b/tools/nfsslower_example.txt @@ -57,7 +57,7 @@ TIME COMM PID T BYTES OFF_KB LAT(ms) FILENAME This shows all NFS_READS that were more than 1ms. Depending on your latency to your fileserver, you might need to tweak this value to -remove +remove A threshold of 0 will trace all operations. Warning: the output will be verbose, as it will include all file system cache hits. diff --git a/tools/old/tcptop.py b/tools/old/tcptop.py index bcdbd591346e..072d6dc72d03 100755 --- a/tools/old/tcptop.py +++ b/tools/old/tcptop.py @@ -131,7 +131,7 @@ def range_check(string): bpf_probe_read_kernel(&family, sizeof(family), &sk->__sk_common.skc_family); FILTER_FAMILY - + if (family == AF_INET) { struct ipv4_key_t ipv4_key = {.pid = pid}; bpf_get_current_comm(&ipv4_key.name, sizeof(ipv4_key.name)); @@ -211,7 +211,7 @@ def range_check(string): return 0; FILTER_FAMILY - + if (family == AF_INET) { struct ipv4_key_t ipv4_key = {.pid = pid}; bpf_get_current_comm(&ipv4_key.name, sizeof(ipv4_key.name)); diff --git a/tools/reset-trace_example.txt b/tools/reset-trace_example.txt index 37b2232ac325..d944bd0c2b03 100644 --- a/tools/reset-trace_example.txt +++ b/tools/reset-trace_example.txt @@ -185,7 +185,7 @@ And again with quiet: Here is an example of reset-trace detecting an unrelated tracing session: -# ./reset-trace.sh +# ./reset-trace.sh Noticed unrelated tracing file /sys/kernel/debug/tracing/set_ftrace_filter isn't set as expected. Not resetting (-F to force, -v for verbose). And verbose: diff --git a/tools/runqlat_example.txt b/tools/runqlat_example.txt index 857e5165df37..968651551159 100644 --- a/tools/runqlat_example.txt +++ b/tools/runqlat_example.txt @@ -6,7 +6,7 @@ how long tasks spent waiting their turn to run on-CPU. Here is a heavily loaded system: -# ./runqlat +# ./runqlat Tracing run queue latency... Hit Ctrl-C to end. ^C usecs : count distribution diff --git a/tools/runqlen_example.txt b/tools/runqlen_example.txt index 60c76feca81a..5f1cadd0c92b 100644 --- a/tools/runqlen_example.txt +++ b/tools/runqlen_example.txt @@ -39,7 +39,7 @@ runqlat tool. Here's an example of an issue that runqlen can identify. Starting with the system-wide summary: -# ./runqlen.py +# ./runqlen.py Sampling run queue length... Hit Ctrl-C to end. ^C runqlen : count distribution @@ -204,7 +204,7 @@ quickly. The -O option prints run queue occupancy: the percentage of time that there was work queued waiting its turn. Eg: -# ./runqlen.py -OT 1 +# ./runqlen.py -OT 1 Sampling run queue length... Hit Ctrl-C to end. 19:54:53 @@ -225,7 +225,7 @@ runqocc: 40.83% This can also be examined per-CPU: -# ./runqlen.py -COT 1 +# ./runqlen.py -COT 1 Sampling run queue length... Hit Ctrl-C to end. 19:55:03 diff --git a/tools/sslsniff_example.txt b/tools/sslsniff_example.txt index 4d7b754ab42f..9599e419af21 100644 --- a/tools/sslsniff_example.txt +++ b/tools/sslsniff_example.txt @@ -191,7 +191,7 @@ optional arguments: Size of captured buffer -l, --latency show function latency --handshake show SSL handshake latency, enabled only if latency - option is on. + option is on. --extra-lib EXTRA_LIB Intercept calls from extra library (format: lib_type:lib_path) diff --git a/tools/statsnoop_example.txt b/tools/statsnoop_example.txt index 08aa0e781995..22728f98fecd 100644 --- a/tools/statsnoop_example.txt +++ b/tools/statsnoop_example.txt @@ -4,7 +4,7 @@ Demonstrations of statsnoop, the Linux eBPF/bcc version. statsnoop traces the different stat() syscalls system-wide, and prints various details. Example output: -# ./statsnoop +# ./statsnoop PID COMM FD ERR PATH 31126 bash 0 0 . 31126 bash -1 2 /usr/local/sbin/iconfig diff --git a/tools/swapin_example.txt b/tools/swapin_example.txt index e958813db2e6..e31cca02f2d2 100644 --- a/tools/swapin_example.txt +++ b/tools/swapin_example.txt @@ -4,7 +4,7 @@ Demonstrations of swapin, the Linux BCC/eBPF version. This tool counts swapins by process, to show which process is affected by swapping. For example: -# swapin.py +# swapin.py Counting swap ins. Ctrl-C to end. 13:36:58 COMM PID COUNT diff --git a/tools/tcpcong_example.txt b/tools/tcpcong_example.txt index 837c3b20c899..85b9b9f27f30 100644 --- a/tools/tcpcong_example.txt +++ b/tools/tcpcong_example.txt @@ -1,16 +1,16 @@ Demonstrations of tcpcong, the Linux eBPF/bcc version. This tool traces linux kernel's tcp congestion control status change functions, -then calculate duration of every status and record it, at last prints it as -tables or histogram, which can be used for evaluating the tcp congestion +then calculate duration of every status and record it, at last prints it as +tables or histogram, which can be used for evaluating the tcp congestion algorithm's performance. For example: -./tcpcong +./tcpcong Tracing tcp congestion control status duration... Hit Ctrl-C to end. ^C -LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs +LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs 192.168.219.3/34968 192.168.219.4/19230 884 12 102 507 0 2721 192.168.219.3/34976 192.168.219.4/19230 869 12 133 490 0 2737 192.168.219.3/34982 192.168.219.4/19230 807 0 0 699 0 3158 @@ -50,20 +50,20 @@ LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms The example shows all tcp socket's congestion status duration for milliseconds, open_ms column is the duration of tcp connection in open status whose cwnd can -increase; dod_ms column is the duration of tcp connection in disorder status -who receives disordered packet; rcov_ms column is the duration of tcp -connection in recovery status who receives 3 duplicated acks; cwr_ms column +increase; dod_ms column is the duration of tcp connection in disorder status +who receives disordered packet; rcov_ms column is the duration of tcp +connection in recovery status who receives 3 duplicated acks; cwr_ms column is the duration of tcp connection who receives explicitly congest notifier and -two acks to reduce the cwnd. the last column chgs prints total status change +two acks to reduce the cwnd. the last column chgs prints total status change number of the socket. An interval can be provided, and also optionally a count. Eg, printing output every 1 second, and including timestamps (-T): -./tcpcong -T 1 3 +./tcpcong -T 1 3 Tracing tcp congestion control status duration... Hit Ctrl-C to end. 07:37:55 -LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs +LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs 192.168.219.3/34968 192.168.219.4/19230 742 15 82 311 0 1678 192.168.219.3/34976 192.168.219.4/19230 700 12 98 340 0 1965 192.168.219.3/34982 192.168.219.4/19230 634 0 1 516 0 2471 @@ -102,7 +102,7 @@ LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms 192.168.219.3/60948 192.168.219.4/19228 597 11 76 293 0 1641 07:37:57 -LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs +LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs 192.168.219.3/34968 192.168.219.4/19230 469 9 255 265 0 1305 192.168.219.3/34976 192.168.219.4/19230 580 11 91 316 0 1916 192.168.219.3/34982 192.168.219.4/19230 566 0 0 433 0 2092 @@ -182,52 +182,52 @@ LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms An local port and remote port can be specified, and also optionally a count. Eg printing output every 1 second, and including timestamps (-T) for local ports 30000-40000 and remote ports 19225-19227: -./tcpcong -T -L 30000-40000 -R 19225-19227 1 3 +./tcpcong -T -L 30000-40000 -R 19225-19227 1 3 Tracing tcp congestion control status duration... Hit Ctrl-C to end. 07:39:11 -LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs +LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs 192.168.219.3/39070 192.168.219.1/19225 668 4 32 455 0 1706 192.168.219.3/39098 192.168.219.1/19225 692 4 38 424 0 2110 192.168.219.3/39112 192.168.219.1/19225 564 0 2 593 0 2291 192.168.219.3/39120 192.168.219.1/19225 599 0 4 555 0 2387 07:39:12 -LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs +LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs 192.168.219.3/39070 192.168.219.1/19225 576 3 27 391 0 1525 192.168.219.3/39098 192.168.219.1/19225 580 3 36 379 0 1893 192.168.219.3/39112 192.168.219.1/19225 474 1 10 512 0 2009 192.168.219.3/39120 192.168.219.1/19225 505 1 9 483 0 2022 07:39:13 -LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs +LAddrPort RAddrPort Open_ms Dod_ms Rcov_ms Cwr_ms Los_ms Chgs 192.168.219.3/39070 192.168.219.1/19225 546 6 27 418 0 1659 192.168.219.3/39098 192.168.219.1/19225 564 4 40 390 0 1937 192.168.219.3/39112 192.168.219.1/19225 479 0 3 514 0 2008 192.168.219.3/39120 192.168.219.1/19225 515 0 4 479 0 1982 The (-u) option can be specified for recording the duration as miroseconds. -Eg printing output every 1 second, and including timestamps (-T) and +Eg printing output every 1 second, and including timestamps (-T) and microseconds (-u) for local ports 30000-40000 and remote ports 19225-19227: -./tcpcong -T -u -L 30000-40000 -R 19225-19227 1 3 +./tcpcong -T -u -L 30000-40000 -R 19225-19227 1 3 Tracing tcp congestion control status duration... Hit Ctrl-C to end. 07:39:44 -LAddrPort RAddrPort Open_us Dod_us Rcov_us Cwr_us Los_us Chgs +LAddrPort RAddrPort Open_us Dod_us Rcov_us Cwr_us Los_us Chgs 192.168.219.3/39070 192.168.219.1/19225 600971 3232 38601 509796 0 1843 192.168.219.3/39098 192.168.219.1/19225 667184 5585 26285 453575 0 1969 192.168.219.3/39112 192.168.219.1/19225 580982 22 1502 569479 0 2210 192.168.219.3/39120 192.168.219.1/19225 600280 201 955 550752 0 2327 07:39:45 -LAddrPort RAddrPort Open_us Dod_us Rcov_us Cwr_us Los_us Chgs +LAddrPort RAddrPort Open_us Dod_us Rcov_us Cwr_us Los_us Chgs 192.168.219.3/39070 192.168.219.1/19225 567189 2029 25966 404698 0 1612 192.168.219.3/39098 192.168.219.1/19225 597201 2263 24073 376454 0 1578 192.168.219.3/39112 192.168.219.1/19225 500792 846 9297 489264 0 1850 192.168.219.3/39120 192.168.219.1/19225 518700 94 749 480171 0 1967 07:39:46 -LAddrPort RAddrPort Open_us Dod_us Rcov_us Cwr_us Los_us Chgs +LAddrPort RAddrPort Open_us Dod_us Rcov_us Cwr_us Los_us Chgs 192.168.219.3/39070 192.168.219.1/19225 587340 5324 37035 370066 0 1602 192.168.219.3/39098 192.168.219.1/19225 532986 5630 31624 345336 0 1319 192.168.219.3/39112 192.168.219.1/19225 481936 1129 6244 510235 0 1909 @@ -262,8 +262,8 @@ fe80::bace:f6ff:fe14:d21c/32814 fe80::bace:f6ff:fe43:fe96/19226 841706 103 fe80::bace:f6ff:fe14:d21c/32816 fe80::bace:f6ff:fe43:fe96/19226 633320 0 0 286584 0 565 -The distribution of congestion status duration can be printed as a histogram -with the -d option and also optionally a count. Eg printing output every +The distribution of congestion status duration can be printed as a histogram +with the -d option and also optionally a count. Eg printing output every 1 second for microseconds, and including timestamps (-T): ./tcpcong.py -d -u -T 1 2 Tracing tcp congestion control status duration... Hit Ctrl-C to end. diff --git a/tools/tcpsubnet_example.txt b/tools/tcpsubnet_example.txt index 49576d6784d5..b41adda1867d 100644 --- a/tools/tcpsubnet_example.txt +++ b/tools/tcpsubnet_example.txt @@ -53,7 +53,7 @@ to, Eg: With this information, we can come up with a reasonable range of IPs to monitor, Eg: - + # tcpsubnet.py 192.30.253.110/27,0.0.0.0/0 Tracing... Output every 1 secs. Hit Ctrl-C to end [03/05/18 22:38:58] diff --git a/tools/tcpsynbl_example.txt b/tools/tcpsynbl_example.txt index 716b55c10cdf..c0d0b15b0ee1 100644 --- a/tools/tcpsynbl_example.txt +++ b/tools/tcpsynbl_example.txt @@ -6,7 +6,7 @@ This lets you see how close your applications are to hitting the backlog limit and dropping SYNs (causing performance issues with SYN retransmits). For example: -# ./tcpsynbl.py +# ./tcpsynbl.py Tracing SYN backlog size. Ctrl-C to end. ^C diff --git a/tools/tcptop.py b/tools/tcptop.py index 57646bf2d9be..e83bb80bd790 100755 --- a/tools/tcptop.py +++ b/tools/tcptop.py @@ -133,7 +133,7 @@ def range_check(string): bpf_probe_read_kernel(&family, sizeof(family), &sk->__sk_common.skc_family); FILTER_FAMILY - + if (family == AF_INET) { struct ipv4_key_t ipv4_key = {.pid = pid}; bpf_get_current_comm(&ipv4_key.name, sizeof(ipv4_key.name)); diff --git a/tools/tcptracer.py b/tools/tcptracer.py index 0404b5a3e2f7..8134b2e3f61e 100755 --- a/tools/tcptracer.py +++ b/tools/tcptracer.py @@ -190,7 +190,7 @@ u64 pid = bpf_get_current_pid_tgid(); ##FILTER_PID## - + u16 family = sk->__sk_common.skc_family; ##FILTER_FAMILY## @@ -296,7 +296,7 @@ u16 family = skp->__sk_common.skc_family; ##FILTER_FAMILY## - + u8 ipver = 0; if (check_family(skp, AF_INET)) { ipver = 4; @@ -385,7 +385,7 @@ u64 pid = bpf_get_current_pid_tgid(); ##FILTER_PID## - + u16 family = skp->__sk_common.skc_family; ##FILTER_FAMILY## @@ -473,7 +473,7 @@ #endif ##FILTER_NETNS## - + u16 family = newsk->__sk_common.skc_family; ##FILTER_FAMILY## diff --git a/tools/ttysnoop.py b/tools/ttysnoop.py index 55641985bcde..ed537e8d6711 100755 --- a/tools/ttysnoop.py +++ b/tools/ttysnoop.py @@ -141,7 +141,7 @@ def usage(): if (iocb->ki_filp->f_inode->i_ino != PTS) return 0; /** - * commit 8cd54c1c8480 iov_iter: separate direction from flavour + * commit 8cd54c1c8480 iov_iter: separate direction from flavour * `type` is represented by iter_type and data_source separately */ #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) diff --git a/tools/vfscount_example.txt b/tools/vfscount_example.txt index 32a50ffd5ccd..939111452358 100644 --- a/tools/vfscount_example.txt +++ b/tools/vfscount_example.txt @@ -1,7 +1,7 @@ Demonstrations of vfscount, the Linux eBPF/bcc version. -This counts VFS calls during time, by tracing all kernel functions beginning +This counts VFS calls during time, by tracing all kernel functions beginning with "vfs_", By defaults, the time is 99999999s # ./vfscount Tracing... Ctrl-C to end. diff --git a/tools/wqlat_example.txt b/tools/wqlat_example.txt index 85d2ecdc9dbd..9d96183d8af2 100644 --- a/tools/wqlat_example.txt +++ b/tools/wqlat_example.txt @@ -1,12 +1,12 @@ Demonstrations of wqlat, the Linux eBPF/bcc version. This tool traces work's waiting on workqueue, and records the distribution -of work's queuing latency (time), printing this as a histogram when Ctrl-C +of work's queuing latency (time), printing this as a histogram when Ctrl-C is hit. For example: -./wqlat +./wqlat Tracing work queue request latency time... Hit Ctrl-C to end. ^C usecs : count distribution @@ -30,7 +30,7 @@ waiting latency is between 1us and 32us.The highest latency seen while tracing is between 4 and 8 ms:the last row printed, for which there is 1 work We can also specify the per workqueue option (-W), along with interval -and count parameters. Eg, printing out every 1 second, and including +and count parameters. Eg, printing out every 1 second, and including timestamps(-T): ./wqlat -T -W 1 2 @@ -61,7 +61,7 @@ An tracing one workqueue (-w) can be specified, along with interval and count. Eg, printing output every 1 second, and including Timestamps(-T) and workqueue nvmet_tcp_wq: -./wqlat -T -w nvmet_tcp_wq 1 2 +./wqlat -T -w nvmet_tcp_wq 1 2 Tracing work queue request latency time... Hit Ctrl-C to end. 06:18:03 diff --git a/tools/xfsdist_example.txt b/tools/xfsdist_example.txt index c6465016d34a..4596010af396 100644 --- a/tools/xfsdist_example.txt +++ b/tools/xfsdist_example.txt @@ -4,7 +4,7 @@ Demonstrations of xfsdist, the Linux eBPF/bcc version. xfsdist traces XFS reads, writes, opens, and fsyncs, and summarizes their latency as a power-of-2 histogram. For example: -# ./xfsdist +# ./xfsdist Tracing XFS operation latency... Hit Ctrl-C to end. ^C diff --git a/tools/xfsslower.py b/tools/xfsslower.py index e7d54479c959..bc622382e8ea 100755 --- a/tools/xfsslower.py +++ b/tools/xfsslower.py @@ -141,7 +141,7 @@ if (FILTER_PID) return 0; - + struct key_t key = {}; key.id = id; key.type = TRACE_OPEN; diff --git a/tools/xfsslower_example.txt b/tools/xfsslower_example.txt index 4c6ae3331270..44532fd47841 100644 --- a/tools/xfsslower_example.txt +++ b/tools/xfsslower_example.txt @@ -108,7 +108,7 @@ offsets: a sequential workload. A -j option will print just the fields (parsable output, csv): -# ./xfsslower -j 1 +# ./xfsslower -j 1 ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE 125563830632,randread.pl,12155,R,8192,27824193536,1057,data1 125565050578,randread.pl,12155,R,8192,16908525568,1969,data1 diff --git a/tools/zfsdist_example.txt b/tools/zfsdist_example.txt index a02d4dc0edd1..b3b21a38ac69 100644 --- a/tools/zfsdist_example.txt +++ b/tools/zfsdist_example.txt @@ -5,7 +5,7 @@ zfsdist traces ZFS reads, writes, opens, and fsyncs, and summarizes their latency as a power-of-2 histogram. It has been written to work on ZFS on Linux (http://zfsonlinux.org). For example: -# ./zfsdist +# ./zfsdist Tracing ZFS operation latency... Hit Ctrl-C to end. ^C diff --git a/tools/zfsslower_example.txt b/tools/zfsslower_example.txt index fddae6e266b2..0b20febe1402 100644 --- a/tools/zfsslower_example.txt +++ b/tools/zfsslower_example.txt @@ -5,7 +5,7 @@ zfsslower shows ZFS reads, writes, opens, and fsyncs, slower than a threshold. It has been written to work on ZFS on Linux (http://zfsonlinux.org). For example: -# ./zfsslower +# ./zfsslower Tracing ZFS operations slower than 10 ms TIME COMM PID T BYTES OFF_KB LAT(ms) FILENAME 06:31:28 dd 25570 W 131072 38784 303.92 data1