-
Notifications
You must be signed in to change notification settings - Fork 451
Expand file tree
/
Copy pathuprobe_base.bpf.c
More file actions
795 lines (692 loc) · 21.5 KB
/
uprobe_base.bpf.c
File metadata and controls
795 lines (692 loc) · 21.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
/*
* This code runs using bpf in the Linux kernel.
* Copyright 2022- The Yunshan Networks Authors.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* SPDX-License-Identifier: GPL-2.0
*/
#define HASH_ENTRIES_MAX 40960
struct http2_tcp_seq_key {
int tgid;
int fd;
__u32 tcp_seq_end;
};
/* *INDENT-OFF* */
/*
* In uprobe go_tls_read_exit()
* Save the TCP sequence number before the syscall(read())
*
* In uprobe http2 read() (after syscall read()), lookup TCP sequence number recorded previously on the map.
* e.g.: In go_http2serverConn_processHeaders(), get TCP sequence before syscall read().
*
* Note: Use for after uprobe read() only.
*/
struct bpf_map_def SEC("maps") http2_tcp_seq_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(struct http2_tcp_seq_key),
.value_size = sizeof(__u32),
.max_entries = HASH_ENTRIES_MAX,
.feat_flags = FEATURE_FLAG_UPROBE_GOLANG,
};
/*
* The binary executable file offset of the GO process
* key: pid
* value: struct ebpf_proc_info
*/
struct bpf_map_def SEC("maps") proc_info_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int),
.value_size = sizeof(struct ebpf_proc_info),
.max_entries = HASH_ENTRIES_MAX,
.feat_flags = FEATURE_FLAG_UPROBE_GOLANG,
};
// Process ID and coroutine ID, marking the coroutine in the system
struct go_key {
__u32 tgid;
__u64 goid;
} __attribute__((packed));
// The mapping of coroutines to ancestors, the map is updated when a new
// coroutine is created
// key : current gorouting (struct go_key)
// value : ancerstor goid
struct bpf_map_def SEC("maps") go_ancerstor_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(struct go_key),
.value_size = sizeof(__u64),
.max_entries = HASH_ENTRIES_MAX,
.feat_flags = FEATURE_FLAG_UPROBE_GOLANG,
};
// Used to determine the timeout, as a termination condition for finding
// ancestors.
// key : current gorouting (struct go_key)
// value: timestamp when the data was inserted into the map
struct bpf_map_def SEC("maps") go_rw_ts_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(struct go_key),
.value_size = sizeof(__u64),
.max_entries = HASH_ENTRIES_MAX,
.feat_flags = FEATURE_FLAG_UPROBE_GOLANG,
};
// Pass data between coroutine entry and exit functions
struct go_newproc_caller {
__u64 goid;
void *sp; // stack pointer
} __attribute__((packed));
struct bpf_map_def SEC("maps") pid_tgid_callerid_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u64),
.value_size = sizeof(struct go_newproc_caller),
.max_entries = HASH_ENTRIES_MAX,
.feat_flags = FEATURE_FLAG_UPROBE_GOLANG,
};
/*
* Goroutines Map
* key: {tgid, pid}
* value: goroutine ID
*/
struct bpf_map_def SEC("maps") goroutines_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u64),
.value_size = sizeof(__u64),
.max_entries = MAX_SYSTEM_THREADS,
.feat_flags = FEATURE_FLAG_UPROBE_GOLANG,
};
/* *INDENT-ON* */
// The first 16 bytes are fixed headers,
// and the total reported buffer does not exceed 1k
#define HTTP2_BUFFER_INFO_SIZE (CAP_DATA_SIZE - 16)
// Make the eBPF validator happy
#define HTTP2_BUFFER_UESLESS (CAP_DATA_SIZE)
struct __http2_buffer {
__u32 fd;
__u32 stream_id;
__u32 header_len;
__u32 value_len;
char info[HTTP2_BUFFER_INFO_SIZE + HTTP2_BUFFER_UESLESS];
};
// The first 8 bytes are fixed headers,
// and the total reported buffer does not exceed 1k
#define HTTP2_DATAFRAME_DATA_SIZE (CAP_DATA_SIZE - 8)
struct __http2_dataframe {
__u32 stream_id;
__u32 data_len;
char data[HTTP2_DATAFRAME_DATA_SIZE + HTTP2_BUFFER_UESLESS];
};
#define SOCKET_DATA_HEADER offsetof(typeof(struct __socket_data), data)
struct __http2_stack {
union {
union {
char __raw[sizeof(struct __socket_data) + 8];
struct {
__u32 __unused_events_num;
__u32 __unused_len;
char __unused_header[SOCKET_DATA_HEADER];
union {
struct __http2_buffer http2_buffer;
struct __http2_dataframe
http2_dataframe;
};
} __attribute__ ((packed));
};
struct {
__u32 events_num;
__u32 len;
struct __socket_data send_buffer;
} __attribute__ ((packed));
};
bool tls;
} __attribute__ ((packed));
MAP_PERARRAY(http2_stack, __u32, struct __http2_stack, 1, FEATURE_FLAG_UPROBE_GOLANG)
static __inline struct __http2_stack *get_http2_stack()
{
int k0 = 0;
return bpf_map_lookup_elem(&NAME(http2_stack), &k0);
}
static __inline void update_http2_tls(bool tls)
{
struct __http2_stack *stack = get_http2_stack();
if (stack)
stack->tls = tls;
}
static __inline bool is_http2_tls()
{
struct __http2_stack *stack = get_http2_stack();
if (stack)
return stack->tls;
return false;
}
// The function address is used to set the hook point. itab is used for http2
// to obtain fd. After directly parsing the Go ELF file, the address of the
// function must be obtained, but the itab may not be obtained.
// 函数地址用于设置 hook 点. itab 用于 http2 获取 fd. 在直接解析 Go ELF 文件后,
// 一定能获取到函数的地址,但是不一定能获取 itab.
static __inline bool skip_http2_kprobe(void)
{
__u64 id;
pid_t pid;
id = bpf_get_current_pid_tgid();
pid = id >> 32;
struct ebpf_proc_info *info;
info = bpf_map_lookup_elem(&proc_info_map, &pid);
if (!info) {
return false;
}
if (info->net_TCPConn_itab) {
return true;
}
// HTTP2
if (info->crypto_tls_Conn_itab) {
return true;
}
// gRPC
if (info->credentials_syscallConn_itab) {
return true;
}
return false;
}
static __inline __u64 get_current_goroutine(void)
{
__u64 current_thread = bpf_get_current_pid_tgid();
__u64 *goid_ptr = bpf_map_lookup_elem(&goroutines_map, ¤t_thread);
if (goid_ptr) {
return *goid_ptr;
}
return 0;
}
static __inline bool is_final_ancestor(__u32 tgid, __u64 goid, __u64 now,
__u64 timeout)
{
struct go_key key = {.tgid = tgid,.goid = goid };
__u64 *ts = bpf_map_lookup_elem(&go_rw_ts_map, &key);
if (!ts) {
return false;
}
return now < *ts + timeout;
}
// Try to find an ancestor coroutine that can represent this request.
// The ancestor coroutine needs to meet two conditions:
// 1. There have been socket read or write operations in the recent period of time
// 2. All of its ancestor coroutines do not satisfy condition 1
// If no such coroutine exists, mark itself as a coroutine that can represent the request and return.
static __inline __u64 get_rw_goid(__u64 timeout, bool is_socket_io)
{
__u32 tgid = (__u32) (bpf_get_current_pid_tgid() >> 32);
__u64 ts = bpf_ktime_get_ns();
__u64 goid = get_current_goroutine();
if (goid == 0) {
return 0;
}
__u64 ancestor = goid;
int idx = 0;
#pragma unroll
for (idx = 0; idx < 6; ++idx) {
if (is_final_ancestor(tgid, ancestor, ts, timeout)) {
return ancestor;
}
struct go_key key = {.tgid = tgid,.goid = ancestor };
__u64 *newancestor =
bpf_map_lookup_elem(&go_ancerstor_map, &key);
if (!newancestor) {
break;
}
ancestor = *newancestor;
}
if (!is_socket_io) {
return 0;
}
struct go_key key = {.tgid = tgid,.goid = goid };
bpf_map_update_elem(&go_rw_ts_map, &key, &ts, BPF_ANY);
return goid;
}
static __inline bool is_current_go_process(void)
{
__u32 tgid = (__u32) (bpf_get_current_pid_tgid() >> 32);
struct ebpf_proc_info *info =
bpf_map_lookup_elem(&proc_info_map, &tgid);
if (info && info->version) {
return true;
} else {
return false;
}
}
static __inline bool is_tcp_conn_interface(void *conn,
struct ebpf_proc_info *info)
{
struct go_interface i;
bpf_probe_read_user(&i, sizeof(i), conn);
return info ? i.type == info->net_TCPConn_itab : false;
}
static __inline int get_fd_from_tcp_conn_interface(void *conn,
struct ebpf_proc_info *info)
{
if (!is_tcp_conn_interface(conn, info)) {
return -1;
}
int offset_fd_sysfd = info->offsets[OFFSET_IDX_SYSFD_POLL_FD];
if (offset_fd_sysfd < 0)
return -1;
struct go_interface i = {};
void *ptr;
int fd;
bpf_probe_read_user(&i, sizeof(i), conn);
bpf_probe_read_user(&ptr, sizeof(ptr), i.ptr);
bpf_probe_read_user(&fd, sizeof(fd), ptr + offset_fd_sysfd);
return fd;
}
static __inline int get_fd_from_tls_conn_struct(void *conn,
struct ebpf_proc_info *info)
{
int offset_conn_conn = info->offsets[OFFSET_IDX_CONN_TLS_CONN];
if (offset_conn_conn < 0)
return -1;
return get_fd_from_tcp_conn_interface(conn + offset_conn_conn, info);
}
static __inline int
get_fd_from_go_proxyproto_interface(void *conn, struct ebpf_proc_info *info)
{
/* conn = {tab = 0x770a10
* <go:itab.*github.com/armon/go-proxyproto.Conn,net.Conn>, data = 0xc0001963c0}
* (gdb) x/16xg 0xc0001963c0
* 0xc0001963c0: 0x000000c0001947e0 0x0000000000770ac0
* 0xc0001963d0: 0x000000c0001bc090 0x0000000000000000
*
* struct github.com/armon/go-proxyproto.Conn {
* bufio.Reader * bufReader; (0x000000c0001947e0)
* net.Conn conn; (tab net.TCPConn,net.Conn,
* data 0x000000c0001bc090)
*/
struct go_interface i = {};
bpf_probe_read_user(&i, sizeof(i), conn);
void *proxyproto_conn = i.ptr + 8;
// proxyproto_conn is 'net.TCPConn,net.Conn'
return get_fd_from_tcp_conn_interface(proxyproto_conn, info);
}
static __inline bool is_tls_conn_interface(void *conn,
struct ebpf_proc_info *info)
{
struct go_interface i;
bpf_probe_read_user(&i, sizeof(i), conn);
return info ? i.type == info->crypto_tls_Conn_itab : false;
}
static __inline int get_fd_from_tls_conn_interface(void *conn,
struct ebpf_proc_info *info)
{
if (!is_tls_conn_interface(conn, info)) {
return -1;
}
struct go_interface i = {};
bpf_probe_read_user(&i, sizeof(i), conn);
int fd = get_fd_from_tls_conn_struct(i.ptr, info);
if (fd > 0)
return fd;
fd = get_fd_from_go_proxyproto_interface(i.ptr, info);
if (fd > 0) {
return fd;
}
return -1;
}
static __inline int get_fd_from_h2c_rwConn_interface(void *conn, struct ebpf_proc_info
*info)
{
/*
* The process of inferring the file descriptor (0x0000000000000004)
* through the 'conn':
* +(gdb) p conn
* +$3 = {tab = 0x70e270 <rwConn,net.Conn>, data = 0xc0000abe90}
* +(gdb) x/16xg 0xc0000abe90
* +0xc0000abe90: 0x000000000070e320 0x000000c000110020
* +(gdb) x/16xg 0x000000c000110020
* +0xc000110020: 0x000000c000128280 0x0000000000000000
* +(gdb) x/16xg 0x000000c000128280
* +0xc000128280: 0x0000000000000000 0x0000000000000000
* +0xc000128290: 0x0000000000000004 0x00007fdaac18f6e8
*/
struct go_interface i = {};
bpf_probe_read_user(&i, sizeof(i), conn);
return get_fd_from_tcp_conn_interface(i.ptr, info);
}
static __inline int
get_fd_from_tcp_or_tls_conn_interface(void *conn, struct ebpf_proc_info *info)
{
/*
* Currently supported:
* go.itab.*net.TCPConn,net.Conn
* go.itab.*crypto/tls.Conn,net.Conn
* go.itab.*golang.org/x/net/http2/h2c.rwConn,net.Conn
* go:itab.*golang.org/x/net/http2/h2c.bufConn,net.Conn
*/
int fd;
fd = get_fd_from_tls_conn_interface(conn, info);
if (fd > 0) {
update_http2_tls(true);
return fd;
}
fd = get_fd_from_tcp_conn_interface(conn, info);
if (fd > 0) {
return fd;
}
fd = get_fd_from_go_proxyproto_interface(conn, info);
if (fd > 0) {
return fd;
}
fd = get_fd_from_h2c_rwConn_interface(conn, info);
if (fd > 0) {
return fd;
}
return -1;
}
// Go implements a new way of passing function arguments and results using
// registers instead of the stack. We need the go version and the computer
// architecture to determine the parameter locations
static __inline bool is_register_based_call(struct ebpf_proc_info *info)
{
#if defined(__x86_64__)
// https://go.dev/doc/go1.17
return info->version >= GO_VERSION(1, 17, 0);
#elif defined(__aarch64__)
// https://groups.google.com/g/golang-checkins/c/SO9OmZYkOXU
return info->version >= GO_VERSION(1, 18, 0);
#else
_Pragma("error \"Must specify a BPF target arch\"");
#endif
}
UPROG(runtime_execute) (struct pt_regs *ctx)
{
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 tgid = pid_tgid >> 32;
struct ebpf_proc_info *info =
bpf_map_lookup_elem(&proc_info_map, &tgid);
if (!info) {
return 0;
}
int offset_g_goid = info->offsets[OFFSET_IDX_GOID_RUNTIME_G];
if (offset_g_goid < 0) {
return 0;
}
void *g_ptr;
if (is_register_based_call(info)) {
g_ptr = (void *)PT_GO_REGS_PARM1(ctx);
} else {
bpf_probe_read_user(&g_ptr, sizeof(g_ptr),
(void *)(PT_REGS_SP(ctx) + 8));
}
__s64 goid = 0;
bpf_probe_read_user(&goid, sizeof(goid), g_ptr + offset_g_goid);
bpf_map_update_elem(&goroutines_map, &pid_tgid, &goid, BPF_ANY);
return 0;
}
// This function creates a new go coroutine, and the parent and child
// coroutine numbers are in the parameters and return values respectively.
// Pass the function parameters through pid_tgid_callerid_map
//
// go 1.15 ~ 1.17: func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g
// go1.18+ :func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g
UPROG(enter_runtime_newproc1) (struct pt_regs *ctx)
{
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 tgid = pid_tgid >> 32;
struct ebpf_proc_info *info =
bpf_map_lookup_elem(&proc_info_map, &tgid);
if (!info) {
return 0;
}
// go less than 1.15 cannot get parent-child coroutine relationship
// ~ go1.14: func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr)
if (info->version < GO_VERSION(1, 15, 0)) {
return 0;
}
int offset_g_goid = info->offsets[OFFSET_IDX_GOID_RUNTIME_G];
if (offset_g_goid < 0) {
return 0;
}
void *g_ptr;
if (is_register_based_call(info)) {
// https://github.com/golang/go/commit/8e5304f7298a0eef48e4796017c51b4d9aeb52b5
if (info->version >= GO_VERSION(1, 18, 0)) {
g_ptr = (void *)PT_GO_REGS_PARM2(ctx);
} else {
g_ptr = (void *)PT_GO_REGS_PARM4(ctx);
}
} else {
if (info->version >= GO_VERSION(1, 18, 0)) {
bpf_probe_read_user(&g_ptr, sizeof(g_ptr),
(void *)(PT_REGS_SP(ctx) + 16));
} else {
bpf_probe_read_user(&g_ptr, sizeof(g_ptr),
(void *)(PT_REGS_SP(ctx) + 32));
}
}
__s64 goid = 0;
bpf_probe_read_user(&goid, sizeof(goid), g_ptr + offset_g_goid);
if (!goid) {
return 0;
}
struct go_newproc_caller caller = {
.goid = goid,
.sp = (void *)PT_REGS_SP(ctx),
};
bpf_map_update_elem(&pid_tgid_callerid_map, &pid_tgid, &caller,
BPF_ANY);
return 0;
}
// The mapping relationship between parent and child coroutines is stored in go_ancerstor_map
//
// go 1.15 ~ 1.17: func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g
// go1.18+ :func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g
UPROG(exit_runtime_newproc1) (struct pt_regs *ctx)
{
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 tgid = pid_tgid >> 32;
struct ebpf_proc_info *info =
bpf_map_lookup_elem(&proc_info_map, &tgid);
if (!info) {
return 0;
}
if (info->version < GO_VERSION(1, 15, 0)) {
return 0;
}
int offset_g_goid = info->offsets[OFFSET_IDX_GOID_RUNTIME_G];
if (offset_g_goid < 0) {
return 0;
}
struct go_newproc_caller *caller =
bpf_map_lookup_elem(&pid_tgid_callerid_map, &pid_tgid);
if (!caller) {
return 0;
}
void *g_ptr;
if (is_register_based_call(info)) {
g_ptr = (void *)PT_GO_REGS_PARM1(ctx);
} else {
if (info->version >= GO_VERSION(1, 18, 0)) {
bpf_probe_read_user(&g_ptr, sizeof(g_ptr),
caller->sp + 32);
} else {
bpf_probe_read_user(&g_ptr, sizeof(g_ptr),
caller->sp + 48);
}
}
__s64 goid = 0;
bpf_probe_read_user(&goid, sizeof(goid), g_ptr + offset_g_goid);
if (!goid) {
bpf_map_delete_elem(&pid_tgid_callerid_map, &pid_tgid);
return 0;
}
struct go_key key = {.tgid = tgid,.goid = goid };
goid = caller->goid;
bpf_map_update_elem(&go_ancerstor_map, &key, &goid, BPF_ANY);
bpf_map_delete_elem(&pid_tgid_callerid_map, &pid_tgid);
return 0;
}
static __inline int do_process_exit(void *ctx)
{
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
pid_t pid, tid;
__u64 id;
id = bpf_get_current_pid_tgid();
pid = id >> 32;
tid = (__u32) id;
// If is a process, clear proc_info_map element and submit event.
if (pid == tid) {
bpf_map_delete_elem(&proc_info_map, &pid);
struct process_event_t data;
data.pid = pid;
data.meta.event_type = EVENT_TYPE_PROC_EXIT;
bpf_get_current_comm(data.name, sizeof(data.name));
bpf_perf_event_output(ctx, &NAME(socket_data),
BPF_F_CURRENT_CPU, &data, sizeof(data));
}
bpf_map_delete_elem(&goroutines_map, &id);
return 0;
}
KPROG(do_exit) (struct pt_regs *ctx) {
return do_process_exit((void *)ctx);
}
// /sys/kernel/debug/tracing/events/sched/sched_process_exit/format
TP_SCHED_PROG(process_exit) (struct sched_comm_exit_ctx *ctx) {
return do_process_exit((void *)ctx);
}
static inline int kernel_clone_exit(bool is_kprobe, bool maybe_thread,
long ret, void *ctx)
{
// For tracepoint: error or parent process
if (ret != 0 && !is_kprobe)
return 0;
__u64 id = bpf_get_current_pid_tgid();
int pid = (int)id;
int tgid = (int)(id >> 32);
// filter threads
if (pid != tgid)
return 0;
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
struct process_event_t data;
data.meta.event_type = EVENT_TYPE_PROC_EXEC;
/*
* For kprobe type, it was found that the return value is never 0, which
* indicates that the current process is the parent process rather than
* the child process. In this case, we take the return value (since the
* return value is the child process ID).
*/
if (ret > 0)
data.pid = ret;
else
data.pid = pid;
data.maybe_thread = maybe_thread;
bpf_get_current_comm(data.name, sizeof(data.name));
bpf_perf_event_output(ctx, &NAME(socket_data),
BPF_F_CURRENT_CPU, &data, sizeof(data));
return 0;
}
/*
* In order to handle older kernels, such as Linux 4.14 and 3.10.0-957.el7,
* which lack '/sys/kernel/debug/tracing/events/syscalls/sys_exit_fork' and
* '/sys/kernel/debug/tracing/events/syscalls/sys_exit_clone', we use
* kretprobe as a substitute for tracepoint type.
*/
KRETPROG(sys_fork) (struct pt_regs* ctx) {
return kernel_clone_exit(true, false, (long)PT_REGS_RC(ctx), ctx);
}
KRETPROG(__x64_sys_fork) (struct pt_regs* ctx) {
return kernel_clone_exit(true, false, (long)PT_REGS_RC(ctx), ctx);
}
KRETPROG(__arm64_sys_fork) (struct pt_regs* ctx) {
return kernel_clone_exit(true, false, (long)PT_REGS_RC(ctx), ctx);
}
KRETPROG(sys_clone) (struct pt_regs* ctx) {
return kernel_clone_exit(true, true, (long)PT_REGS_RC(ctx), ctx);
}
KRETPROG(__x64_sys_clone) (struct pt_regs* ctx) {
return kernel_clone_exit(true, true, (long)PT_REGS_RC(ctx), ctx);
}
KRETPROG(__arm64_sys_clone) (struct pt_regs* ctx) {
return kernel_clone_exit(true, true, (long)PT_REGS_RC(ctx), ctx);
}
// /sys/kernel/debug/tracing/events/syscalls/sys_exit_fork/format
TP_SYSCALL_PROG(exit_fork) (struct syscall_comm_exit_ctx * ctx) {
return kernel_clone_exit(false, false, (long)ctx->ret, ctx);
}
// /sys/kernel/debug/tracing/events/syscalls/sys_exit_clone/format
TP_SYSCALL_PROG(exit_clone) (struct syscall_comm_exit_ctx * ctx) {
return kernel_clone_exit(false, false, (long)ctx->ret, ctx);
}
static __inline int __process_exec(void *ctx)
{
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
struct process_event_t data;
__u64 id = bpf_get_current_pid_tgid();
pid_t pid = id >> 32;
pid_t tid = (__u32) id;
if (pid == tid) {
data.meta.event_type = EVENT_TYPE_PROC_EXEC;
data.pid = pid;
data.maybe_thread = false;
bpf_get_current_comm(data.name, sizeof(data.name));
bpf_perf_event_output(ctx, &NAME(socket_data),
BPF_F_CURRENT_CPU, &data, sizeof(data));
}
return 0;
}
#if defined(__x86_64__)
KRETPROG(__x64_sys_execve) (struct pt_regs *ctx) {
return __process_exec((void *)ctx);
}
KRETPROG(__x64_sys_execveat) (struct pt_regs *ctx) {
return __process_exec((void *)ctx);
}
#else
KRETPROG(__arm64_sys_execve) (struct pt_regs *ctx) {
return __process_exec((void *)ctx);
}
KRETPROG(__arm64_sys_execveat) (struct pt_regs *ctx) {
return __process_exec((void *)ctx);
}
#endif
// /sys/kernel/debug/tracing/events/sched/sched_process_exec/format
TP_SCHED_PROG(process_exec) (struct sched_comm_exec_ctx *ctx)
{
return __process_exec((void *)ctx);
}
// uretprobe: fires when _dl_open() returns after dynamically loading a library.
// Attached from user-space via program__attach_uprobe() to ld-linux's _dl_open.
URETPROG(dl_open_uretprobe) (struct pt_regs *ctx)
{
struct member_fields_offset *offset = retrieve_ready_kern_offset();
if (offset == NULL)
return 0;
__u64 id = bpf_get_current_pid_tgid();
pid_t pid = id >> 32;
struct library_load_event_t data = {};
data.meta.event_type = EVENT_TYPE_LIB_LOAD;
data.pid = pid;
bpf_perf_event_output(ctx, &NAME(socket_data),
BPF_F_CURRENT_CPU, &data, sizeof(data));
return 0;
}