-
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathoutput.json
106 lines (106 loc) · 39.8 KB
/
output.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
[
{
"request": "Write a BPF code that increments a counter in a LRU hash map every time the sched_migrate_task tracepoint is triggered, using the cgroup ID as the key and 1 as the value.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_tracing.h>\n#include \"bits.bpf.h\"\n#include \"maps.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_LRU_HASH);\n __uint(max_entries, 1024);\n __type(key, u64);\n __type(value, u64);\n} cgroup_sched_migrations_total SEC(\".maps\");\n\nSEC(\"tp_btf/sched_migrate_task\")\nint BPF_PROG(sched_migrate_task)\n{\n u64 cgroup_id = bpf_get_current_cgroup_id();\n increment_map(&cgroup_sched_migrations_total, &cgroup_id, 1);\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that traces the kill system call, capturing information about the sender and receiver of the signal, the signal number, and the return value. The captured data is stored in a hash map.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n\n#define MAX_ENTRIES\t10240\n#define TASK_COMM_LEN\t16\n\nstruct event {\n\tunsigned int pid;\n\tunsigned int tpid;\n\tint sig;\n\tint ret;\n\tchar comm[TASK_COMM_LEN];\n};\n\nstruct {\n\t__uint(type, BPF_MAP_TYPE_HASH);\n\t__uint(max_entries, MAX_ENTRIES);\n\t__type(key, __u32);\n\t__type(value, struct event);\n} values SEC(\".maps\");\n\n\nstatic int probe_entry(pid_t tpid, int sig)\n{\n\tstruct event event = {};\n\t__u64 pid_tgid;\n\t__u32 tid;\n\n\tpid_tgid = bpf_get_current_pid_tgid();\n\ttid = (__u32)pid_tgid;\n\tevent.pid = pid_tgid >> 32;\n\tevent.tpid = tpid;\n\tevent.sig = sig;\n\tbpf_get_current_comm(event.comm, sizeof(event.comm));\n\tbpf_map_update_elem(&values, &tid, &event, BPF_ANY);\n\treturn 0;\n}\n\nstatic int probe_exit(void *ctx, int ret)\n{\n\t__u64 pid_tgid = bpf_get_current_pid_tgid();\n\t__u32 tid = (__u32)pid_tgid;\n\tstruct event *eventp;\n\n\teventp = bpf_map_lookup_elem(&values, &tid);\n\tif (!eventp)\n\t\treturn 0;\n\n\teventp->ret = ret;\n\tbpf_printk(\"PID %d (%s) sent signal %d \",\n\t\t eventp->pid, eventp->comm, eventp->sig);\n\tbpf_printk(\"to PID %d, ret = %d\",\n\t\t eventp->tpid, ret);\n\ncleanup:\n\tbpf_map_delete_elem(&values, &tid);\n\treturn 0;\n}\n\nSEC(\"tracepoint/syscalls/sys_enter_kill\")\nint kill_entry(struct trace_event_raw_sys_enter *ctx)\n{\n\tpid_t tpid = (pid_t)ctx->args[0];\n\tint sig = (int)ctx->args[1];\n\n\treturn probe_entry(tpid, sig);\n}\n\nSEC(\"tracepoint/syscalls/sys_exit_kill\")\nint kill_exit(struct trace_event_raw_sys_exit *ctx)\n{\n\treturn probe_exit(ctx, ctx->ret);\n}\n\nchar LICENSE[] SEC(\"license\") = \"Dual BSD/GPL\";\n"
},
{
"request": "Write a BPF code that intercepts the \"kfree_skb\" event in the kernel and counts the number of times a socket buffer (sk_buff) is freed based on various criteria such as Ethernet protocol, IP protocol, port number, and drop reason using a hash map.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_endian.h>\n#include \"maps.bpf.h\"\n\n#define ETH_P_IPV6 0x86DD\n#define ETH_P_IP 0x0800\n\nstruct kfree_skb_key_t {\n u16 eth_proto;\n u16 ip_proto;\n u16 port;\n u16 reason;\n};\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 10240);\n __type(key, struct kfree_skb_key_t);\n __type(value, u64);\n} kfree_skb_total SEC(\".maps\");\n\nSEC(\"tp_btf/kfree_skb\") int BPF_PROG(kfree_skb, struct sk_buff *skb, void *location, enum skb_drop_reason reason)\n{\n struct kfree_skb_key_t key;\n struct ethhdr eth_hdr;\n struct iphdr ip_hdr;\n struct ipv6hdr ipv6_hdr;\n struct tcphdr tcp_hdr;\n struct udphdr udp_hdr;\n u16 ip_proto = 0;\n\n // Same as skb_mac_header_was_set:\n // * https://elixir.bootlin.com/linux/v6.5-rc1/source/include/linux/skbuff.h#L2899\n if (skb->mac_header == (typeof(skb->mac_header)) ~0U) {\n return 0;\n }\n\n if (bpf_probe_read_kernel(ð_hdr, sizeof(eth_hdr), skb->head + skb->mac_header)) {\n return 0;\n }\n\n key.eth_proto = bpf_ntohs(eth_hdr.h_proto);\n\n if (!key.eth_proto && !bpf_ntohs(skb->protocol)) {\n return 0;\n }\n\n switch (key.eth_proto) {\n case ETH_P_IP:\n if (bpf_probe_read_kernel(&ip_hdr, sizeof(ip_hdr), skb->head + skb->network_header) < 0) {\n return 0;\n }\n ip_proto = ip_hdr.protocol;\n break;\n case ETH_P_IPV6:\n if (bpf_probe_read_kernel(&ipv6_hdr, sizeof(ipv6_hdr), skb->head + skb->network_header) < 0) {\n return 0;\n }\n ip_proto = ipv6_hdr.nexthdr;\n break;\n }\n\n key.ip_proto = ip_proto;\n\n // Same as skb_transport_header_was_set:\n // * https://elixir.bootlin.com/linux/v6.5-rc1/source/include/linux/skbuff.h#L2860\n if (skb->transport_header == (typeof(skb->transport_header)) ~0U) {\n return 0;\n }\n\n // Using key.ip_proto directly is not allowed for some reason:\n //\n // ; switch (key.ip_proto) {\n // 48: (54) w1 &= 65535\n // R1 32-bit pointer arithmetic prohibited\n switch (ip_proto) {\n case IPPROTO_TCP:\n if (bpf_probe_read_kernel(&tcp_hdr, sizeof(tcp_hdr), skb->head + skb->transport_header) < 0) {\n return 0;\n }\n key.port = bpf_ntohs(tcp_hdr.dest);\n break;\n case IPPROTO_UDP:\n if (bpf_probe_read_kernel(&udp_hdr, sizeof(udp_hdr), skb->head + skb->transport_header) < 0) {\n return 0;\n }\n key.port = bpf_ntohs(udp_hdr.dest);\n break;\n }\n\n key.reason = reason;\n\n increment_map(&kfree_skb_total, &key, 1);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that traces calls to the `oom_kill_process` function and records the value of the `cgroup_id` variable in the `oom_kills_total` map using perf events.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n\nstruct {\n __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);\n __uint(key_size, sizeof(u32));\n __uint(value_size, sizeof(u32));\n} oom_kills_total SEC(\".maps\");\n\nSEC(\"kprobe/oom_kill_process\")\nint BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc, const char *message)\n{\n u64 cgroup_id = 0;\n\n struct mem_cgroup *mcg = BPF_CORE_READ(oc, memcg);\n if (mcg) {\n cgroup_id = BPF_CORE_READ(mcg, css.cgroup, kn, id);\n }\n\n bpf_perf_event_output(ctx, &oom_kills_total, BPF_F_CURRENT_CPU, &cgroup_id, sizeof(cgroup_id));\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that uses a tracepoint to monitor the syscalls->sys_enter_execve event and extracts information such as process ID, user ID, parent process ID, and command name of the executing process. The code then outputs this information using a perf event.",
"bpf": "// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)\n#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_core_read.h>\n#include \"execsnoop.h\"\n\nstruct {\n\t__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);\n\t__uint(key_size, sizeof(u32));\n\t__uint(value_size, sizeof(u32));\n} events SEC(\".maps\");\n\nSEC(\"tracepoint/syscalls/sys_enter_execve\")\nint tracepoint__syscalls__sys_enter_execve(struct trace_event_raw_sys_enter* ctx)\n{\n\tu64 id;\n\tpid_t pid, tgid;\n\tstruct event event={0};\n\tstruct task_struct *task;\n\n\tuid_t uid = (u32)bpf_get_current_uid_gid();\n\tid = bpf_get_current_pid_tgid();\n\ttgid = id >> 32;\n\n\tevent.pid = tgid;\n\tevent.uid = uid;\n\ttask = (struct task_struct*)bpf_get_current_task();\n\tevent.ppid = BPF_CORE_READ(task, real_parent, tgid);\n\tbpf_get_current_comm(&event.comm, sizeof(event.comm));\n\tbpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));\n\treturn 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n\n"
},
{
"request": "Write a BPF code that attaches to the `readline` function in the `bash` binary and prints the content read along with the process ID and name.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n\n#define TASK_COMM_LEN 16\n#define MAX_LINE_SIZE 80\n\n/* Format of u[ret]probe section definition supporting auto-attach:\n * u[ret]probe/binary:function[+offset]\n *\n * binary can be an absolute/relative path or a filename; the latter is resolved to a\n * full binary path via bpf_program__attach_uprobe_opts.\n *\n * Specifying uprobe+ ensures we carry out strict matching; either \"uprobe\" must be\n * specified (and auto-attach is not possible) or the above format is specified for\n * auto-attach.\n */\nSEC(\"uretprobe//bin/bash:readline\")\nint BPF_KRETPROBE(printret, const void *ret)\n{\n\tchar str[MAX_LINE_SIZE];\n\tchar comm[TASK_COMM_LEN];\n\tu32 pid;\n\n\tif (!ret)\n\t\treturn 0;\n\n\tbpf_get_current_comm(&comm, sizeof(comm));\n\n\tpid = bpf_get_current_pid_tgid() >> 32;\n\tbpf_probe_read_user_str(str, sizeof(str), ret);\n\n\tbpf_printk(\"PID %d (%s) read: %s \", pid, comm, str);\n\n\treturn 0;\n};\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";"
},
{
"request": "Write a BPF code that parses incoming packets and counts the total number of packets based on their Ethernet type, protocol, and destination port. The code skips counting packets with port numbers in the ephemeral range (32768 and above) to keep the metrics tidy.",
"bpf": "#include \"vmlinux.h\"\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_endian.h>\n#include \"maps.bpf.h\"\n\n#define ETH_P_IPV6 0x86DD\n#define ETH_P_IP 0x0800\n\nstruct packet_key_t {\n u16 eth_type;\n u16 proto;\n u16 port;\n};\n\nstruct hdr_cursor {\n void *pos;\n};\n\nstruct {\n __uint(type, BPF_MAP_TYPE_LRU_HASH);\n __uint(max_entries, 1024);\n __type(key, struct packet_key_t);\n __type(value, u64);\n} xdp_incoming_packets_total SEC(\".maps\");\n\n// Primitive header extraction macros. See xdp-tutorial repo for more robust parsers:\n// * https://github.com/xdp-project/xdp-tutorial/blob/master/common/parsing_helpers.h\n#define parse_args struct hdr_cursor *cursor, void *data_end, struct\n#define parse_header(type) \\\n static bool parse_##type(parse_args type **hdr) \\\n { \\\n size_t offset = sizeof(**hdr); \\\n \\\n if (cursor->pos + offset > data_end) { \\\n return false; \\\n } \\\n \\\n *hdr = cursor->pos; \\\n cursor->pos += offset; \\\n \\\n return true; \\\n }\n\nparse_header(ethhdr);\nparse_header(iphdr);\nparse_header(ipv6hdr);\nparse_header(tcphdr);\nparse_header(udphdr);\n\nstatic int xdp_trace(struct xdp_md *ctx)\n{\n void *data_end = (void *) (long) ctx->data_end;\n void *data = (void *) (long) ctx->data;\n struct packet_key_t key = {};\n struct hdr_cursor cursor = { .pos = data };\n struct ethhdr *eth_hdr;\n struct iphdr *ip_hdr;\n struct ipv6hdr *ipv6_hdr;\n struct udphdr *udp_hdr;\n struct tcphdr *tcp_hdr;\n\n if (!parse_ethhdr(&cursor, data_end, ð_hdr)) {\n return XDP_PASS;\n }\n\n key.eth_type = bpf_ntohs(eth_hdr->h_proto);\n\n switch (eth_hdr->h_proto) {\n case bpf_htons(ETH_P_IP):\n if (!parse_iphdr(&cursor, data_end, &ip_hdr)) {\n return XDP_PASS;\n }\n\n key.proto = ip_hdr->protocol;\n break;\n case bpf_htons(ETH_P_IPV6):\n if (!parse_ipv6hdr(&cursor, data_end, &ipv6_hdr)) {\n return XDP_PASS;\n }\n\n key.proto = ipv6_hdr->nexthdr;\n break;\n }\n\n switch (key.proto) {\n case IPPROTO_TCP:\n if (!parse_tcphdr(&cursor, data_end, &tcp_hdr)) {\n return XDP_PASS;\n }\n\n key.port = bpf_ntohs(tcp_hdr->dest);\n break;\n case IPPROTO_UDP:\n if (!parse_udphdr(&cursor, data_end, &udp_hdr)) {\n return XDP_PASS;\n }\n\n key.port = bpf_ntohs(udp_hdr->dest);\n break;\n }\n\n // Skip ephemeral port range to keep metrics tidy\n if (key.port >= 32768) {\n return XDP_PASS;\n }\n\n increment_map(&xdp_incoming_packets_total, &key, 1);\n\n return XDP_PASS;\n}\n\nSEC(\"xdp/lo\")\nint trace_lo(struct xdp_md *ctx)\n{\n return xdp_trace(ctx);\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that increments a BPF map called 'syscalls_total' with the system call ID every time a system call is entered. The map has a maximum of 1024 entries and uses a hash table.",
"bpf": "#include \"vmlinux.h\"\n#include <bpf/bpf_tracing.h>\n#include \"maps.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 1024);\n __type(key, u64);\n __type(value, u64);\n} syscalls_total SEC(\".maps\");\n\nSEC(\"tp_btf/sys_enter\")\nint BPF_PROG(sys_enter, struct pt_regs *regs, long id)\n{\n increment_map(&syscalls_total, &id, 1);\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that increments a hash map called page_cache_ops_total for each specific kernel function that is probed.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n#include \"maps.bpf.h\"\n#include \"regs-ip.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 4);\n __type(key, u64);\n __type(value, u64);\n} page_cache_ops_total SEC(\".maps\");\n\nSEC(\"kprobe/add_to_page_cache_lru\")\nint add_to_page_cache_lru(struct pt_regs *ctx)\n{\n u64 ip = KPROBE_REGS_IP_FIX(PT_REGS_IP_CORE(ctx));\n increment_map(&page_cache_ops_total, &ip, 1);\n return 0;\n}\n\nSEC(\"kprobe/mark_page_accessed\")\nint mark_page_accessed(struct pt_regs *ctx)\n{\n u64 ip = KPROBE_REGS_IP_FIX(PT_REGS_IP_CORE(ctx));\n increment_map(&page_cache_ops_total, &ip, 1);\n return 0;\n}\n\n// This function is usually not visible.\nSEC(\"kprobe/folio_account_dirtied\")\nint folio_account_dirtied(struct pt_regs *ctx)\n{\n u64 ip = KPROBE_REGS_IP_FIX(PT_REGS_IP_CORE(ctx));\n increment_map(&page_cache_ops_total, &ip, 1);\n return 0;\n}\n\nSEC(\"kprobe/mark_buffer_dirty\")\nint mark_buffer_dirty(struct pt_regs *ctx)\n{\n u64 ip = KPROBE_REGS_IP_FIX(PT_REGS_IP_CORE(ctx));\n increment_map(&page_cache_ops_total, &ip, 1);\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that monitors the latency of socket accept operations and keeps track of the frequency and duration of these operations based on the listening port and latency range.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n#include \"bits.bpf.h\"\n#include \"maps.bpf.h\"\n\n// 27 buckets for latency, max range is 33.6s .. 67.1s\n#define MAX_LATENCY_SLOT 26\n\n// Max number of listening ports we expect to see on the host\n#define MAX_PORTS 1024\n\nstruct socket_latency_key_t {\n u16 port;\n u64 slot;\n};\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 10240);\n __type(key, struct request_sock *);\n __type(value, u64);\n} start SEC(\".maps\");\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, (MAX_LATENCY_SLOT + 1) * MAX_PORTS);\n __type(key, struct socket_latency_key_t);\n __type(value, u64);\n} accept_latency_seconds SEC(\".maps\");\n\nSEC(\"kprobe/inet_csk_reqsk_queue_hash_add\")\nint BPF_KPROBE(kprobe__inet_csk_reqsk_queue_hash_add, struct sock *sk, struct request_sock *req)\n{\n u64 ts = bpf_ktime_get_ns();\n bpf_map_update_elem(&start, &req, &ts, BPF_ANY);\n return 0;\n}\n\nSEC(\"kprobe/inet_csk_accept\")\nint BPF_KPROBE(kprobe__inet_csk_accept, struct sock *sk)\n{\n u64 *tsp, delta_us, latency_slot;\n struct inet_connection_sock *icsk = (struct inet_connection_sock *) sk;\n struct request_sock *req = BPF_CORE_READ(icsk, icsk_accept_queue).rskq_accept_head;\n struct socket_latency_key_t latency_key = {};\n\n tsp = bpf_map_lookup_elem(&start, &req);\n if (!tsp) {\n return 0;\n }\n\n delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;\n\n // Latency histogram key\n latency_slot = log2l(delta_us);\n\n // Cap latency bucket at max value\n if (latency_slot > MAX_LATENCY_SLOT) {\n latency_slot = MAX_LATENCY_SLOT;\n }\n\n latency_key.port = BPF_CORE_READ(sk, __sk_common).skc_num;\n latency_key.slot = latency_slot;\n increment_map(&accept_latency_seconds, &latency_key, 1);\n\n latency_key.slot = MAX_LATENCY_SLOT + 1;\n increment_map(&accept_latency_seconds, &latency_key, delta_us);\n\n bpf_map_delete_elem(&start, &req);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that tracks cache misses and cache references using perf events and updates two hash maps, 'llc_misses_total' and 'llc_references_total', with the corresponding CPU and sample period data.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include \"maps.bpf.h\"\n\n#define MAX_CPUS 512\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, MAX_CPUS);\n __type(key, u32);\n __type(value, u64);\n} llc_references_total SEC(\".maps\");\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, MAX_CPUS);\n __type(key, u32);\n __type(value, u64);\n} llc_misses_total SEC(\".maps\");\n\nstatic int trace_event(void *map, u32 cpu, u64 sample_period)\n{\n increment_map(map, &cpu, sample_period);\n\n return 0;\n}\n\nSEC(\"perf_event/type=0,config=3,frequency=1\")\nint on_cache_miss(struct bpf_perf_event_data *ctx)\n{\n return trace_event(&llc_misses_total, bpf_get_smp_processor_id(), ctx->sample_period);\n}\n\nSEC(\"perf_event/type=0,config=2,frequency=1\")\nint on_cache_reference(struct bpf_perf_event_data *ctx)\n{\n return trace_event(&llc_references_total, bpf_get_smp_processor_id(), ctx->sample_period);\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that tracks the latency of block I/O requests and categorizes them into latency buckets based on the duration of the request.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_core_read.h>\n#include \"bits.bpf.h\"\n#include \"maps.bpf.h\"\n\n// Max number of disks we expect to see on the host\n#define MAX_DISKS 255\n\n// 27 buckets for latency, max range is 33.6s .. 67.1s\n#define MAX_LATENCY_SLOT 27\n\n#define MKDEV(ma, mi) ((mi & 0xff) | (ma << 8) | ((mi & ~0xff) << 12))\n\n#define REQ_OP_BITS 8\n#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)\n\nstruct disk_latency_key_t {\n u32 dev;\n u8 op;\n u64 slot;\n};\n\nextern int LINUX_KERNEL_VERSION __kconfig;\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 10000);\n __type(key, struct request *);\n __type(value, u64);\n} start SEC(\".maps\");\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, (MAX_LATENCY_SLOT + 1) * MAX_DISKS);\n __type(key, struct disk_latency_key_t);\n __type(value, u64);\n} bio_latency_seconds SEC(\".maps\");\n\n/**\n * commit d152c682f03c (\"block: add an explicit ->disk backpointer to the\n * request_queue\") and commit f3fa33acca9f (\"block: remove the ->rq_disk\n * field in struct request\") make some changes to `struct request` and\n * `struct request_queue`. Now, to get the `struct gendisk *` field in a CO-RE\n * way, we need both `struct request` and `struct request_queue`.\n * see:\n * https://github.com/torvalds/linux/commit/d152c682f03c\n * https://github.com/torvalds/linux/commit/f3fa33acca9f\n */\nstruct request_queue___x {\n struct gendisk *disk;\n} __attribute__((preserve_access_index));\n\nstruct request___x {\n struct request_queue___x *q;\n struct gendisk *rq_disk;\n} __attribute__((preserve_access_index));\n\nstatic __always_inline struct gendisk *get_disk(void *request)\n{\n struct request___x *r = request;\n\n if (bpf_core_field_exists(r->rq_disk))\n return BPF_CORE_READ(r, rq_disk);\n return BPF_CORE_READ(r, q, disk);\n}\n\nstatic __always_inline int trace_rq_start(struct request *rq)\n{\n u64 ts = bpf_ktime_get_ns();\n bpf_map_update_elem(&start, &rq, &ts, 0);\n return 0;\n}\n\nSEC(\"raw_tp/block_rq_insert\")\nint block_rq_insert(struct bpf_raw_tracepoint_args *ctx)\n{\n /**\n * commit a54895fa (v5.11-rc1) changed tracepoint argument list\n * from TP_PROTO(struct request_queue *q, struct request *rq)\n * to TP_PROTO(struct request *rq)\n */\n if (LINUX_KERNEL_VERSION < KERNEL_VERSION(5, 11, 0)) {\n return trace_rq_start((void *) ctx->args[1]);\n } else {\n return trace_rq_start((void *) ctx->args[0]);\n }\n}\n\nSEC(\"raw_tp/block_rq_issue\")\nint block_rq_issue(struct bpf_raw_tracepoint_args *ctx)\n{\n /**\n * commit a54895fa (v5.11-rc1) changed tracepoint argument list\n * from TP_PROTO(struct request_queue *q, struct request *rq)\n * to TP_PROTO(struct request *rq)\n */\n if (LINUX_KERNEL_VERSION < KERNEL_VERSION(5, 11, 0)) {\n return trace_rq_start((void *) ctx->args[1]);\n } else {\n return trace_rq_start((void *) ctx->args[0]);\n }\n}\n\nSEC(\"raw_tp/block_rq_complete\")\nint block_rq_complete(struct bpf_raw_tracepoint_args *ctx)\n{\n u64 *tsp, flags, delta_us, latency_slot;\n struct gendisk *disk;\n struct request *rq = (struct request *) ctx->args[0];\n struct disk_latency_key_t latency_key = {};\n\n tsp = bpf_map_lookup_elem(&start, &rq);\n if (!tsp) {\n return 0;\n }\n\n // Delta in microseconds\n delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;\n\n // Latency histogram key\n latency_slot = log2l(delta_us);\n\n // Cap latency bucket at max value\n if (latency_slot > MAX_LATENCY_SLOT) {\n latency_slot = MAX_LATENCY_SLOT;\n }\n\n disk = get_disk(rq);\n flags = BPF_CORE_READ(rq, cmd_flags);\n\n latency_key.slot = latency_slot;\n latency_key.dev = disk ? MKDEV(BPF_CORE_READ(disk, major), BPF_CORE_READ(disk, first_minor)) : 0;\n latency_key.op = flags & REQ_OP_MASK;\n\n increment_map(&bio_latency_seconds, &latency_key, 1);\n\n latency_key.slot = MAX_LATENCY_SLOT + 1;\n increment_map(&bio_latency_seconds, &latency_key, delta_us);\n\n bpf_map_delete_elem(&start, &rq);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that counts the number of calls to the malloc function in the libc.so.6 library and stores the count in a BPF hash map called libc_malloc_calls_total.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include \"bits.bpf.h\"\n#include \"maps.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 1024);\n __type(key, u64);\n __type(value, u64);\n} libc_malloc_calls_total SEC(\".maps\");\n\nSEC(\"uprobe/libc.so.6:malloc\")\nint do_count(struct pt_regs *ctx)\n{\n u64 cgroup_id = bpf_get_current_cgroup_id();\n\n increment_map(&libc_malloc_calls_total, &cgroup_id, 1);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that calculates and records the latency of a kernel function called \"shrink_node\" into different latency buckets, represented as an array map, based on the duration of the function execution.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include \"bits.bpf.h\"\n#include \"maps.bpf.h\"\n\n// 27 buckets for latency, max range is 33.6s .. 67.1s\n#define MAX_LATENCY_SLOT 26\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 10240);\n __type(key, u32);\n __type(value, u64);\n} start SEC(\".maps\");\n\nstruct {\n __uint(type, BPF_MAP_TYPE_ARRAY);\n __uint(max_entries, MAX_LATENCY_SLOT + 1);\n __type(key, u32);\n __type(value, u64);\n} shrink_node_latency_seconds SEC(\".maps\");\n\nSEC(\"kprobe/shrink_node\")\nint shrink_node_enter(struct pt_regs *ctx)\n{\n u32 pid = bpf_get_current_pid_tgid();\n u64 ts = bpf_ktime_get_ns();\n bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);\n return 0;\n}\n\nSEC(\"kretprobe/shrink_node\")\nint shrink_node_exit(struct pt_regs *ctx)\n{\n u32 pid = bpf_get_current_pid_tgid();\n u64 *tsp, latency_us, latency_slot;\n\n tsp = bpf_map_lookup_elem(&start, &pid);\n if (!tsp) {\n return 0;\n }\n\n // Latency in microseconds\n latency_us = (bpf_ktime_get_ns() - *tsp) / 1000;\n\n // Latency histogram key\n latency_slot = log2l(latency_us);\n\n // Cap latency bucket at max value\n if (latency_slot > MAX_LATENCY_SLOT) {\n latency_slot = MAX_LATENCY_SLOT;\n }\n\n increment_map(&shrink_node_latency_seconds, &latency_slot, 1);\n\n latency_slot = MAX_LATENCY_SLOT + 1;\n increment_map(&shrink_node_latency_seconds, &latency_slot, latency_us);\n\n bpf_map_delete_elem(&start, &pid);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that counts the total number of times a Python function is called using USDT (User Statically Defined Tracepoints) markers and saves the count in a hash map.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/usdt.bpf.h>\n#include \"bits.bpf.h\"\n#include \"maps.bpf.h\"\n\nstruct call_t {\n char module[128];\n char function[128];\n};\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 1024);\n __type(key, struct call_t);\n __type(value, u64);\n} python_function_entries_total SEC(\".maps\");\n\nSEC(\"usdt/python3:python:function__entry\")\nint BPF_USDT(do_count, void *arg0, void *arg1, void *arg2)\n{\n struct call_t call = {};\n\n // https://docs.python.org/3/howto/instrumentation.html#available-static-markers\n bpf_probe_read_user_str(&call.module, sizeof(call.module), arg0);\n bpf_probe_read_user_str(&call.function, sizeof(call.function), arg1);\n\n increment_map(&python_function_entries_total, &call, 1);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that handles TCP socket connections and checks if the receive slow start threshold (rcv_ssthresh) is below a certain value, incrementing a map if it is. The code also includes support for both fentry/fexit and kprobe methods for tracing.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n#include \"maps.bpf.h\"\n\n/* Minimum value for tp->rcv_ssthresh that is not considered a clamp */\n#define MIN_CLAMP 32 * 1024\n\nstruct {\n __uint(type, BPF_MAP_TYPE_ARRAY);\n __uint(max_entries, 1);\n __type(key, u32);\n __type(value, u64);\n} tcp_window_clamps_total SEC(\".maps\");\n\nstatic int handle_tcp_sock(struct tcp_sock *tp)\n{\n u32 zero = 0, rcv_ssthresh;\n\n if (!tp) {\n return 0;\n }\n\n rcv_ssthresh = BPF_CORE_READ(tp, rcv_ssthresh);\n\n if (rcv_ssthresh < MIN_CLAMP) {\n increment_map(&tcp_window_clamps_total, &zero, 1);\n }\n\n return 0;\n}\n\n#ifdef FENTRY_SUPPORT\n// If fentry/fexit is supported, use it for simpler and faster probe.\n// You need to pass -DFENTRY_SUPPORT in compiler flags to enable this.\n\nSEC(\"fexit/tcp_try_rmem_schedule\")\nint BPF_PROG(tcp_try_rmem_schedule_exit, struct sock *sk)\n{\n return handle_tcp_sock((struct tcp_sock *) sk);\n}\n\n#else\n// Otherwise, fall back to good old kprobe.\n\nstruct {\n __uint(type, BPF_MAP_TYPE_LRU_HASH);\n __uint(max_entries, 1024);\n __type(key, u64);\n __type(value, struct sock *);\n} tcp_rmem_schedule_enters SEC(\".maps\");\n\nstatic u64 enter_key()\n{\n u32 tgid = bpf_get_current_pid_tgid() >> 32;\n if (tgid) {\n // If tgid is present, use it as high bits in the compound key.\n return ((u64) tgid) << 32;\n }\n\n // If tgid is zero, combine it with processor id to prevent tgid / cpu collisions.\n return ((u64) tgid << 32) | (u32) bpf_get_smp_processor_id();\n}\n\nSEC(\"kprobe/tcp_try_rmem_schedule\")\nint BPF_KPROBE(tcp_try_rmem_schedule, struct sock *sk)\n{\n u64 key = enter_key();\n\n bpf_map_update_elem(&tcp_rmem_schedule_enters, &key, &sk, BPF_NOEXIST);\n\n return 0;\n}\n\nSEC(\"kretprobe/tcp_try_rmem_schedule\")\nint BPF_KRETPROBE(tcp_try_rmem_schedule_ret)\n{\n u64 key = enter_key();\n struct sock **skp = bpf_map_lookup_elem(&tcp_rmem_schedule_enters, &key);\n\n if (!skp) {\n return 0;\n }\n\n bpf_map_delete_elem(&tcp_rmem_schedule_enters, &key);\n\n return handle_tcp_sock((struct tcp_sock *) *skp);\n}\n\n#endif\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that traces the \"openat\" system calls and prints the process ID of the processes entering those system calls, with an option to filter the trace based on a specific process ID.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n\n/// @description \"Process ID to trace\"\nconst volatile int pid_target = 0;\n\nSEC(\"tracepoint/syscalls/sys_enter_openat\")\nint tracepoint__syscalls__sys_enter_openat(struct trace_event_raw_sys_enter* ctx)\n{\n\tu64 id = bpf_get_current_pid_tgid();\n\tu32 pid = id;\n\n\tif (pid_target && pid_target != pid)\n\t\treturn false;\n\t// Use bpf_printk to print the process information\n\tbpf_printk(\"Process ID: %d enter sys openat\\n\", pid);\n\treturn 0;\n}\n\n/// \"Trace open family syscalls.\"\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that captures exit events of processes or threads and sends the data to user-space for post-processing.",
"bpf": "// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause\n/* Copyright (c) 2020 Facebook */\n#include \"vmlinux.h\"\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n#include \"exitsnoop.h\"\n\nchar LICENSE[] SEC(\"license\") = \"Dual BSD/GPL\";\n\nstruct {\n\t__uint(type, BPF_MAP_TYPE_RINGBUF);\n\t__uint(max_entries, 256 * 1024);\n} rb SEC(\".maps\");\n\nSEC(\"tp/sched/sched_process_exit\")\nint handle_exit(struct trace_event_raw_sched_process_template* ctx)\n{\n\tstruct task_struct *task;\n\tstruct event *e;\n\tpid_t pid, tid;\n\tu64 id, ts, *start_ts, duration_ns = 0;\n\t\n\t/* get PID and TID of exiting thread/process */\n\tid = bpf_get_current_pid_tgid();\n\tpid = id >> 32;\n\ttid = (u32)id;\n\n\t/* ignore thread exits */\n\tif (pid != tid)\n\t\treturn 0;\n\n\t/* reserve sample from BPF ringbuf */\n\te = bpf_ringbuf_reserve(&rb, sizeof(*e), 0);\n\tif (!e)\n\t\treturn 0;\n\n\t/* fill out the sample with data */\n\ttask = (struct task_struct *)bpf_get_current_task();\n\n\te->duration_ns = duration_ns;\n\te->pid = pid;\n\te->ppid = BPF_CORE_READ(task, real_parent, tgid);\n\te->exit_code = (BPF_CORE_READ(task, exit_code) >> 8) & 0xff;\n\tbpf_get_current_comm(&e->comm, sizeof(e->comm));\n\n\t/* send data to user-space for post-processing */\n\tbpf_ringbuf_submit(e, 0);\n\treturn 0;\n}\n"
},
{
"request": "Write a BPF code that counts the number of times a timer starts using a raw tracepoint and stores the count in a hash map called \"raw_timer_starts_total\".",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_core_read.h>\n#include \"maps.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 1024);\n __type(key, u64);\n __type(value, u64);\n} raw_timer_starts_total SEC(\".maps\");\n\nSEC(\"raw_tp/timer_start\")\nint do_count(struct bpf_raw_tracepoint_args *ctx)\n{\n struct timer_list *timer = (struct timer_list *) ctx->args[0];\n u64 function = (u64) BPF_CORE_READ(timer, function);\n\n increment_map(&raw_timer_starts_total, &function, 1);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that counts the number of TCP SYN packets in the backlog and stores the count in a hash map. The code uses two kprobes to intercept TCPv4 and TCPv6 SYN packet receptions and then calculates the count by dividing the backlog by a specified multiplier.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n#include \"maps.bpf.h\"\n\n#define BUCKET_MULTIPLIER 50\n#define BUCKET_COUNT 20\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, BUCKET_COUNT + 2);\n __type(key, u64);\n __type(value, u64);\n} tcp_syn_backlog SEC(\".maps\");\n\nstatic int do_count(u64 backlog)\n{\n u64 bucket = backlog / BUCKET_MULTIPLIER;\n\n increment_map(&tcp_syn_backlog, &bucket, 1);\n increment_map(&tcp_syn_backlog, &bucket, backlog);\n\n return 0;\n}\n\nSEC(\"kprobe/tcp_v4_syn_recv_sock\")\nint BPF_KPROBE(kprobe__tcp_v4_syn_recv_sock, struct sock *sk)\n{\n return do_count(BPF_CORE_READ(sk, sk_ack_backlog) / 50);\n}\n\nSEC(\"kprobe/tcp_v6_syn_recv_sock\")\nint BPF_KPROBE(kprobe__tcp_v6_syn_recv_sock, struct sock *sk)\n{\n return do_count(BPF_CORE_READ(sk, sk_ack_backlog) / 50);\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that tracks the allocation and deallocation of BPF JIT pages by tracing the 'bpf_jit_binary_alloc' kernel function and updating a BPF map with the currently allocated pages.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_helpers.h>\n\nconst volatile u64 kaddr_bpf_jit_current = 0;\n\nstruct {\n __uint(type, BPF_MAP_TYPE_ARRAY);\n __uint(max_entries, 1);\n __type(key, u32);\n __type(value, u64);\n} bpf_jit_pages_currently_allocated SEC(\".maps\");\n\nstatic int update_current()\n{\n u32 zero_key = 0;\n s64 current_value = 0;\n\n if (!kaddr_bpf_jit_current) {\n return 0;\n }\n\n bpf_probe_read_kernel(¤t_value, sizeof(current_value), (const void *) kaddr_bpf_jit_current);\n bpf_map_update_elem(&bpf_jit_pages_currently_allocated, &zero_key, ¤t_value, BPF_ANY);\n\n return 0;\n}\n\n// Sometimes bpf_jit_charge_modmem / bpf_jit_uncharge_modmem get elided,\n// so we're tracing the outer entrypoint here instead. It's common to see\n// calls to bpf_jit_binary_free not being traced too, so we skip that.\nSEC(\"kprobe/bpf_jit_binary_alloc\")\nint trace_change()\n{\n return update_current();\n}\n\n// This code runs right after program is attached, allowing initialization\n// of the metric in the absence of any updates from bpf jit.\nSEC(\"uprobe//proc/self/exe:post_attach_mark\")\nint do_init()\n{\n return update_current();\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that counts the number of received UDP packets per local port, excluding ephemeral ports above 32768, and stores the count in a hash map.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_tracing.h>\n#include \"maps.bpf.h\"\n\n#define UPPER_PORT_BOUND 32768\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, UPPER_PORT_BOUND);\n __type(key, u16);\n __type(value, u64);\n} udp_fail_queue_rcv_skbs_total SEC(\".maps\");\n\nSEC(\"tp_btf/udp_fail_queue_rcv_skb\")\nint BPF_PROG(udp_fail_queue_rcv_skb, int rc, struct sock *sk)\n{\n u16 lport = sk->__sk_common.skc_num;\n\n // We are not interested in ephemeral ports for outbound connections.\n // There's a ton of them and they don't easily correlate with services.\n // To still have some visibility, we put all of the ephemeral ports into\n // the same local_port=\"0\" label and defer to debugging with tracepoints\n // to find what port and service are having issues.\n if (lport >= UPPER_PORT_BOUND) {\n lport = 0;\n }\n\n increment_map(&udp_fail_queue_rcv_skbs_total, &lport, 1);\n\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that traces the \"do_unlinkat\" function and prints the process ID, filename, and return value when the function is called and returned.",
"bpf": "// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause\n/* Copyright (c) 2021 Sartura */\n#include \"vmlinux.h\"\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n#include <bpf/bpf_core_read.h>\n\nchar LICENSE[] SEC(\"license\") = \"Dual BSD/GPL\";\n\nSEC(\"kprobe/do_unlinkat\")\nint BPF_KPROBE(do_unlinkat, int dfd, struct filename *name)\n{\n\tpid_t pid;\n\tconst char *filename;\n\n\tpid = bpf_get_current_pid_tgid() >> 32;\n\tfilename = BPF_CORE_READ(name, name);\n\tbpf_printk(\"KPROBE ENTRY pid = %d, filename = %s\\n\", pid, filename);\n\treturn 0;\n}\n\nSEC(\"kretprobe/do_unlinkat\")\nint BPF_KRETPROBE(do_unlinkat_exit, long ret)\n{\n\tpid_t pid;\n\n\tpid = bpf_get_current_pid_tgid() >> 32;\n\tbpf_printk(\"KPROBE EXIT: pid = %d, ret = %ld\\n\", pid, ret);\n\treturn 0;\n}\n"
},
{
"request": "Write a BPF code that increments a counter in a hash map every time a timer is started, with the maximum number of entries in the map set to 1024.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_tracing.h>\n#include \"maps.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_HASH);\n __uint(max_entries, 1024);\n __type(key, u64);\n __type(value, u64);\n} timer_starts_total SEC(\".maps\");\n\nSEC(\"tp_btf/timer_start\")\nint BPF_PROG(timer_start, struct timer_list *timer)\n{\n u64 function = (u64) timer->function;\n increment_map(&timer_starts_total, &function, 1);\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
},
{
"request": "Write a BPF code that traces the \"sys_enter_write\" system call and prints a message when it is triggered by a specified process ID.",
"bpf": "/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */\n#define BPF_NO_GLOBAL_DATA\n#include <linux/bpf.h>\n#include <bpf/bpf_helpers.h>\n#include <bpf/bpf_tracing.h>\n\ntypedef unsigned int u32;\ntypedef int pid_t;\nconst pid_t pid_filter = 0;\n\nchar LICENSE[] SEC(\"license\") = \"Dual BSD/GPL\";\n\nSEC(\"tp/syscalls/sys_enter_write\")\nint handle_tp(void *ctx)\n{\n\tpid_t pid = bpf_get_current_pid_tgid() >> 32;\n\tif (pid_filter && pid != pid_filter)\n\t\treturn 0;\n\tbpf_printk(\"BPF triggered sys_enter_write from PID %d.\\n\", pid);\n\treturn 0;\n}\n"
},
{
"request": "Write a BPF code that tracks the total number of times each softirq is invoked using a per-cpu hash map.",
"bpf": "#include <vmlinux.h>\n#include <bpf/bpf_tracing.h>\n#include \"maps.bpf.h\"\n\nstruct {\n __uint(type, BPF_MAP_TYPE_PERCPU_HASH);\n __uint(max_entries, NR_SOFTIRQS);\n __type(key, u32);\n __type(value, u64);\n} softirqs_total SEC(\".maps\");\n\nSEC(\"tp_btf/softirq_entry\")\nint BPF_PROG(softirq_entry, unsigned int vec_nr)\n{\n increment_map(&softirqs_total, &vec_nr, 1);\n return 0;\n}\n\nchar LICENSE[] SEC(\"license\") = \"GPL\";\n"
}
]