blob: eacfc6a2060d6413b9e7d61bfb150e1d4c7c0406 [file] [log] [blame]
Quentin Monnet00b8a5f2021-11-12 00:17:34 +00001// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2019 Facebook */
3
4#ifndef _GNU_SOURCE
5#define _GNU_SOURCE
6#endif
7#include <ctype.h>
8#include <errno.h>
9#include <fcntl.h>
10#include <linux/err.h>
11#include <stdbool.h>
12#include <stdio.h>
13#include <string.h>
14#include <unistd.h>
15#include <bpf/bpf.h>
16#include <bpf/libbpf.h>
17#include <sys/types.h>
18#include <sys/stat.h>
19#include <sys/mman.h>
20#include <bpf/btf.h>
21
22#include "json_writer.h"
23#include "main.h"
24
25#define MAX_OBJ_NAME_LEN 64
26
27static void sanitize_identifier(char *name)
28{
29 int i;
30
31 for (i = 0; name[i]; i++)
32 if (!isalnum(name[i]) && name[i] != '_')
33 name[i] = '_';
34}
35
36static bool str_has_prefix(const char *str, const char *prefix)
37{
38 return strncmp(str, prefix, strlen(prefix)) == 0;
39}
40
41static bool str_has_suffix(const char *str, const char *suffix)
42{
43 size_t i, n1 = strlen(str), n2 = strlen(suffix);
44
45 if (n1 < n2)
46 return false;
47
48 for (i = 0; i < n2; i++) {
49 if (str[n1 - i - 1] != suffix[n2 - i - 1])
50 return false;
51 }
52
53 return true;
54}
55
56static void get_obj_name(char *name, const char *file)
57{
58 /* Using basename() GNU version which doesn't modify arg. */
59 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
60 name[MAX_OBJ_NAME_LEN - 1] = '\0';
61 if (str_has_suffix(name, ".o"))
62 name[strlen(name) - 2] = '\0';
63 sanitize_identifier(name);
64}
65
66static void get_header_guard(char *guard, const char *obj_name)
67{
68 int i;
69
70 sprintf(guard, "__%s_SKEL_H__", obj_name);
71 for (i = 0; guard[i]; i++)
72 guard[i] = toupper(guard[i]);
73}
74
75static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
76{
77 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
78 const char *name = bpf_map__name(map);
79 int i, n;
80
81 if (!bpf_map__is_internal(map)) {
82 snprintf(buf, buf_sz, "%s", name);
83 return true;
84 }
85
86 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
87 const char *sfx = sfxs[i], *p;
88
89 p = strstr(name, sfx);
90 if (p) {
91 snprintf(buf, buf_sz, "%s", p + 1);
92 sanitize_identifier(buf);
93 return true;
94 }
95 }
96
97 return false;
98}
99
100static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
101{
102 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
103 int i, n;
104
105 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
106 const char *pfx = pfxs[i];
107
108 if (str_has_prefix(sec_name, pfx)) {
109 snprintf(buf, buf_sz, "%s", sec_name + 1);
110 sanitize_identifier(buf);
111 return true;
112 }
113 }
114
115 return false;
116}
117
118static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
119{
120 vprintf(fmt, args);
121}
122
123static int codegen_datasec_def(struct bpf_object *obj,
124 struct btf *btf,
125 struct btf_dump *d,
126 const struct btf_type *sec,
127 const char *obj_name)
128{
129 const char *sec_name = btf__name_by_offset(btf, sec->name_off);
130 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
131 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
132 char var_ident[256], sec_ident[256];
133 bool strip_mods = false;
134
135 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
136 return 0;
137
138 if (strcmp(sec_name, ".kconfig") != 0)
139 strip_mods = true;
140
141 printf(" struct %s__%s {\n", obj_name, sec_ident);
142 for (i = 0; i < vlen; i++, sec_var++) {
143 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
144 const char *var_name = btf__name_by_offset(btf, var->name_off);
145 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
146 .field_name = var_ident,
147 .indent_level = 2,
148 .strip_mods = strip_mods,
149 );
150 int need_off = sec_var->offset, align_off, align;
151 __u32 var_type_id = var->type;
152
153 /* static variables are not exposed through BPF skeleton */
154 if (btf_var(var)->linkage == BTF_VAR_STATIC)
155 continue;
156
157 if (off > need_off) {
158 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
159 sec_name, i, need_off, off);
160 return -EINVAL;
161 }
162
163 align = btf__align_of(btf, var->type);
164 if (align <= 0) {
165 p_err("Failed to determine alignment of variable '%s': %d",
166 var_name, align);
167 return -EINVAL;
168 }
169 /* Assume 32-bit architectures when generating data section
170 * struct memory layout. Given bpftool can't know which target
171 * host architecture it's emitting skeleton for, we need to be
172 * conservative and assume 32-bit one to ensure enough padding
173 * bytes are generated for pointer and long types. This will
174 * still work correctly for 64-bit architectures, because in
175 * the worst case we'll generate unnecessary padding field,
176 * which on 64-bit architectures is not strictly necessary and
177 * would be handled by natural 8-byte alignment. But it still
178 * will be a correct memory layout, based on recorded offsets
179 * in BTF.
180 */
181 if (align > 4)
182 align = 4;
183
184 align_off = (off + align - 1) / align * align;
185 if (align_off != need_off) {
186 printf("\t\tchar __pad%d[%d];\n",
187 pad_cnt, need_off - off);
188 pad_cnt++;
189 }
190
191 /* sanitize variable name, e.g., for static vars inside
192 * a function, it's name is '<function name>.<variable name>',
193 * which we'll turn into a '<function name>_<variable name>'
194 */
195 var_ident[0] = '\0';
196 strncat(var_ident, var_name, sizeof(var_ident) - 1);
197 sanitize_identifier(var_ident);
198
199 printf("\t\t");
200 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
201 if (err)
202 return err;
203 printf(";\n");
204
205 off = sec_var->offset + sec_var->size;
206 }
207 printf(" } *%s;\n", sec_ident);
208 return 0;
209}
210
211static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
212{
213 struct btf *btf = bpf_object__btf(obj);
214 int n = btf__type_cnt(btf);
215 struct btf_dump *d;
216 struct bpf_map *map;
217 const struct btf_type *sec;
218 char sec_ident[256], map_ident[256];
219 int i, err = 0;
220
221 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
222 err = libbpf_get_error(d);
223 if (err)
224 return err;
225
226 bpf_object__for_each_map(map, obj) {
227 /* only generate definitions for memory-mapped internal maps */
228 if (!bpf_map__is_internal(map))
229 continue;
Christy Lee232383c2022-01-07 16:42:15 -0800230 if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000231 continue;
232
233 if (!get_map_ident(map, map_ident, sizeof(map_ident)))
234 continue;
235
236 sec = NULL;
237 for (i = 1; i < n; i++) {
238 const struct btf_type *t = btf__type_by_id(btf, i);
239 const char *name;
240
241 if (!btf_is_datasec(t))
242 continue;
243
244 name = btf__str_by_offset(btf, t->name_off);
245 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
246 continue;
247
248 if (strcmp(sec_ident, map_ident) == 0) {
249 sec = t;
250 break;
251 }
252 }
253
254 /* In some cases (e.g., sections like .rodata.cst16 containing
255 * compiler allocated string constants only) there will be
256 * special internal maps with no corresponding DATASEC BTF
257 * type. In such case, generate empty structs for each such
258 * map. It will still be memory-mapped and its contents
259 * accessible from user-space through BPF skeleton.
260 */
261 if (!sec) {
262 printf(" struct %s__%s {\n", obj_name, map_ident);
263 printf(" } *%s;\n", map_ident);
264 } else {
265 err = codegen_datasec_def(obj, btf, d, sec, obj_name);
266 if (err)
267 goto out;
268 }
269 }
270
271
272out:
273 btf_dump__free(d);
274 return err;
275}
276
277static void codegen(const char *template, ...)
278{
279 const char *src, *end;
280 int skip_tabs = 0, n;
281 char *s, *dst;
282 va_list args;
283 char c;
284
285 n = strlen(template);
286 s = malloc(n + 1);
287 if (!s)
288 exit(-1);
289 src = template;
290 dst = s;
291
292 /* find out "baseline" indentation to skip */
293 while ((c = *src++)) {
294 if (c == '\t') {
295 skip_tabs++;
296 } else if (c == '\n') {
297 break;
298 } else {
299 p_err("unrecognized character at pos %td in template '%s': '%c'",
300 src - template - 1, template, c);
301 free(s);
302 exit(-1);
303 }
304 }
305
306 while (*src) {
307 /* skip baseline indentation tabs */
308 for (n = skip_tabs; n > 0; n--, src++) {
309 if (*src != '\t') {
310 p_err("not enough tabs at pos %td in template '%s'",
311 src - template - 1, template);
312 free(s);
313 exit(-1);
314 }
315 }
316 /* trim trailing whitespace */
317 end = strchrnul(src, '\n');
318 for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
319 ;
320 memcpy(dst, src, n);
321 dst += n;
322 if (*end)
323 *dst++ = '\n';
324 src = *end ? end + 1 : end;
325 }
326 *dst++ = '\0';
327
328 /* print out using adjusted template */
329 va_start(args, template);
330 n = vprintf(s, args);
331 va_end(args);
332
333 free(s);
334}
335
336static void print_hex(const char *data, int data_sz)
337{
338 int i, len;
339
340 for (i = 0, len = 0; i < data_sz; i++) {
341 int w = data[i] ? 4 : 2;
342
343 len += w;
344 if (len > 78) {
345 printf("\\\n");
346 len = w;
347 }
348 if (!data[i])
349 printf("\\0");
350 else
351 printf("\\x%02x", (unsigned char)data[i]);
352 }
353}
354
355static size_t bpf_map_mmap_sz(const struct bpf_map *map)
356{
357 long page_sz = sysconf(_SC_PAGE_SIZE);
358 size_t map_sz;
359
360 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
361 map_sz = roundup(map_sz, page_sz);
362 return map_sz;
363}
364
365static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
366{
367 struct bpf_program *prog;
368
369 bpf_object__for_each_program(prog, obj) {
370 const char *tp_name;
371
372 codegen("\
373 \n\
374 \n\
375 static inline int \n\
376 %1$s__%2$s__attach(struct %1$s *skel) \n\
377 { \n\
378 int prog_fd = skel->progs.%2$s.prog_fd; \n\
379 ", obj_name, bpf_program__name(prog));
380
Andrii Nakryiko2be08b72022-01-24 11:42:51 -0800381 switch (bpf_program__type(prog)) {
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000382 case BPF_PROG_TYPE_RAW_TRACEPOINT:
383 tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
Alexei Starovoitovc14cc3c2022-01-31 14:05:24 -0800384 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000385 break;
386 case BPF_PROG_TYPE_TRACING:
Alexei Starovoitov0a09d352022-01-31 14:05:22 -0800387 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
Alexei Starovoitovc14cc3c2022-01-31 14:05:24 -0800388 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
Alexei Starovoitov0a09d352022-01-31 14:05:22 -0800389 else
Alexei Starovoitovc14cc3c2022-01-31 14:05:24 -0800390 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000391 break;
392 default:
393 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
394 break;
395 }
396 codegen("\
397 \n\
398 \n\
399 if (fd > 0) \n\
400 skel->links.%1$s_fd = fd; \n\
401 return fd; \n\
402 } \n\
403 ", bpf_program__name(prog));
404 }
405
406 codegen("\
407 \n\
408 \n\
409 static inline int \n\
410 %1$s__attach(struct %1$s *skel) \n\
411 { \n\
412 int ret = 0; \n\
413 \n\
414 ", obj_name);
415
416 bpf_object__for_each_program(prog, obj) {
417 codegen("\
418 \n\
419 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
420 ", obj_name, bpf_program__name(prog));
421 }
422
423 codegen("\
424 \n\
425 return ret < 0 ? ret : 0; \n\
426 } \n\
427 \n\
428 static inline void \n\
429 %1$s__detach(struct %1$s *skel) \n\
430 { \n\
431 ", obj_name);
432
433 bpf_object__for_each_program(prog, obj) {
434 codegen("\
435 \n\
436 skel_closenz(skel->links.%1$s_fd); \n\
437 ", bpf_program__name(prog));
438 }
439
440 codegen("\
441 \n\
442 } \n\
443 ");
444}
445
446static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
447{
448 struct bpf_program *prog;
449 struct bpf_map *map;
450 char ident[256];
451
452 codegen("\
453 \n\
454 static void \n\
455 %1$s__destroy(struct %1$s *skel) \n\
456 { \n\
457 if (!skel) \n\
458 return; \n\
459 %1$s__detach(skel); \n\
460 ",
461 obj_name);
462
463 bpf_object__for_each_program(prog, obj) {
464 codegen("\
465 \n\
466 skel_closenz(skel->progs.%1$s.prog_fd); \n\
467 ", bpf_program__name(prog));
468 }
469
470 bpf_object__for_each_map(map, obj) {
471 if (!get_map_ident(map, ident, sizeof(ident)))
472 continue;
473 if (bpf_map__is_internal(map) &&
Christy Lee232383c2022-01-07 16:42:15 -0800474 (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000475 printf("\tmunmap(skel->%1$s, %2$zd);\n",
476 ident, bpf_map_mmap_sz(map));
477 codegen("\
478 \n\
479 skel_closenz(skel->maps.%1$s.map_fd); \n\
480 ", ident);
481 }
482 codegen("\
483 \n\
484 free(skel); \n\
485 } \n\
486 ",
487 obj_name);
488}
489
490static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
491{
492 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
493 struct bpf_map *map;
494 char ident[256];
495 int err = 0;
496
497 err = bpf_object__gen_loader(obj, &opts);
498 if (err)
499 return err;
500
501 err = bpf_object__load(obj);
502 if (err) {
503 p_err("failed to load object file");
504 goto out;
505 }
506 /* If there was no error during load then gen_loader_opts
507 * are populated with the loader program.
508 */
509
510 /* finish generating 'struct skel' */
511 codegen("\
512 \n\
513 }; \n\
514 ", obj_name);
515
516
517 codegen_attach_detach(obj, obj_name);
518
519 codegen_destroy(obj, obj_name);
520
521 codegen("\
522 \n\
523 static inline struct %1$s * \n\
524 %1$s__open(void) \n\
525 { \n\
526 struct %1$s *skel; \n\
527 \n\
528 skel = calloc(sizeof(*skel), 1); \n\
529 if (!skel) \n\
530 goto cleanup; \n\
531 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
532 ",
533 obj_name, opts.data_sz);
534 bpf_object__for_each_map(map, obj) {
535 const void *mmap_data = NULL;
536 size_t mmap_size = 0;
537
538 if (!get_map_ident(map, ident, sizeof(ident)))
539 continue;
540
541 if (!bpf_map__is_internal(map) ||
Christy Lee232383c2022-01-07 16:42:15 -0800542 !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000543 continue;
544
545 codegen("\
546 \n\
547 skel->%1$s = \n\
548 mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\
549 MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\
550 if (skel->%1$s == (void *) -1) \n\
551 goto cleanup; \n\
552 memcpy(skel->%1$s, (void *)\"\\ \n\
553 ", ident, bpf_map_mmap_sz(map));
554 mmap_data = bpf_map__initial_value(map, &mmap_size);
555 print_hex(mmap_data, mmap_size);
556 printf("\", %2$zd);\n"
557 "\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n",
558 ident, mmap_size);
559 }
560 codegen("\
561 \n\
562 return skel; \n\
563 cleanup: \n\
564 %1$s__destroy(skel); \n\
565 return NULL; \n\
566 } \n\
567 \n\
568 static inline int \n\
569 %1$s__load(struct %1$s *skel) \n\
570 { \n\
571 struct bpf_load_and_run_opts opts = {}; \n\
572 int err; \n\
573 \n\
574 opts.ctx = (struct bpf_loader_ctx *)skel; \n\
575 opts.data_sz = %2$d; \n\
576 opts.data = (void *)\"\\ \n\
577 ",
578 obj_name, opts.data_sz);
579 print_hex(opts.data, opts.data_sz);
580 codegen("\
581 \n\
582 \"; \n\
583 ");
584
585 codegen("\
586 \n\
587 opts.insns_sz = %d; \n\
588 opts.insns = (void *)\"\\ \n\
589 ",
590 opts.insns_sz);
591 print_hex(opts.insns, opts.insns_sz);
592 codegen("\
593 \n\
594 \"; \n\
595 err = bpf_load_and_run(&opts); \n\
596 if (err < 0) \n\
597 return err; \n\
598 ", obj_name);
599 bpf_object__for_each_map(map, obj) {
600 const char *mmap_flags;
601
602 if (!get_map_ident(map, ident, sizeof(ident)))
603 continue;
604
605 if (!bpf_map__is_internal(map) ||
Christy Lee232383c2022-01-07 16:42:15 -0800606 !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000607 continue;
608
Christy Lee232383c2022-01-07 16:42:15 -0800609 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000610 mmap_flags = "PROT_READ";
611 else
612 mmap_flags = "PROT_READ | PROT_WRITE";
613
614 printf("\tskel->%1$s =\n"
615 "\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n"
616 "\t\t\tskel->maps.%1$s.map_fd, 0);\n",
617 ident, bpf_map_mmap_sz(map), mmap_flags);
618 }
619 codegen("\
620 \n\
621 return 0; \n\
622 } \n\
623 \n\
624 static inline struct %1$s * \n\
625 %1$s__open_and_load(void) \n\
626 { \n\
627 struct %1$s *skel; \n\
628 \n\
629 skel = %1$s__open(); \n\
630 if (!skel) \n\
631 return NULL; \n\
632 if (%1$s__load(skel)) { \n\
633 %1$s__destroy(skel); \n\
634 return NULL; \n\
635 } \n\
636 return skel; \n\
637 } \n\
638 ", obj_name);
639
640 codegen("\
641 \n\
642 \n\
643 #endif /* %s */ \n\
644 ",
645 header_guard);
646 err = 0;
647out:
648 return err;
649}
650
651static int do_skeleton(int argc, char **argv)
652{
653 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
654 size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
655 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
656 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
657 struct bpf_object *obj = NULL;
658 const char *file;
659 char ident[256];
660 struct bpf_program *prog;
661 int fd, err = -1;
662 struct bpf_map *map;
663 struct btf *btf;
664 struct stat st;
665
666 if (!REQ_ARGS(1)) {
667 usage();
668 return -1;
669 }
670 file = GET_ARG();
671
672 while (argc) {
673 if (!REQ_ARGS(2))
674 return -1;
675
676 if (is_prefix(*argv, "name")) {
677 NEXT_ARG();
678
679 if (obj_name[0] != '\0') {
680 p_err("object name already specified");
681 return -1;
682 }
683
684 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
685 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
686 } else {
687 p_err("unknown arg %s", *argv);
688 return -1;
689 }
690
691 NEXT_ARG();
692 }
693
694 if (argc) {
695 p_err("extra unknown arguments");
696 return -1;
697 }
698
699 if (stat(file, &st)) {
700 p_err("failed to stat() %s: %s", file, strerror(errno));
701 return -1;
702 }
703 file_sz = st.st_size;
704 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
705 fd = open(file, O_RDONLY);
706 if (fd < 0) {
707 p_err("failed to open() %s: %s", file, strerror(errno));
708 return -1;
709 }
710 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
711 if (obj_data == MAP_FAILED) {
712 obj_data = NULL;
713 p_err("failed to mmap() %s: %s", file, strerror(errno));
714 goto out;
715 }
716 if (obj_name[0] == '\0')
717 get_obj_name(obj_name, file);
718 opts.object_name = obj_name;
719 if (verifier_logs)
720 /* log_level1 + log_level2 + stats, but not stable UAPI */
721 opts.kernel_log_level = 1 + 2 + 4;
722 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
723 err = libbpf_get_error(obj);
724 if (err) {
725 char err_buf[256];
726
727 libbpf_strerror(err, err_buf, sizeof(err_buf));
728 p_err("failed to open BPF object file: %s", err_buf);
729 obj = NULL;
730 goto out;
731 }
732
733 bpf_object__for_each_map(map, obj) {
734 if (!get_map_ident(map, ident, sizeof(ident))) {
735 p_err("ignoring unrecognized internal map '%s'...",
736 bpf_map__name(map));
737 continue;
738 }
739 map_cnt++;
740 }
741 bpf_object__for_each_program(prog, obj) {
742 prog_cnt++;
743 }
744
745 get_header_guard(header_guard, obj_name);
746 if (use_loader) {
747 codegen("\
748 \n\
749 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
750 /* THIS FILE IS AUTOGENERATED! */ \n\
751 #ifndef %2$s \n\
752 #define %2$s \n\
753 \n\
754 #include <stdlib.h> \n\
755 #include <bpf/bpf.h> \n\
756 #include <bpf/skel_internal.h> \n\
757 \n\
758 struct %1$s { \n\
759 struct bpf_loader_ctx ctx; \n\
760 ",
761 obj_name, header_guard
762 );
763 } else {
764 codegen("\
765 \n\
766 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
767 \n\
768 /* THIS FILE IS AUTOGENERATED! */ \n\
769 #ifndef %2$s \n\
770 #define %2$s \n\
771 \n\
772 #include <errno.h> \n\
773 #include <stdlib.h> \n\
774 #include <bpf/libbpf.h> \n\
775 \n\
776 struct %1$s { \n\
777 struct bpf_object_skeleton *skeleton; \n\
778 struct bpf_object *obj; \n\
779 ",
780 obj_name, header_guard
781 );
782 }
783
784 if (map_cnt) {
785 printf("\tstruct {\n");
786 bpf_object__for_each_map(map, obj) {
787 if (!get_map_ident(map, ident, sizeof(ident)))
788 continue;
789 if (use_loader)
790 printf("\t\tstruct bpf_map_desc %s;\n", ident);
791 else
792 printf("\t\tstruct bpf_map *%s;\n", ident);
793 }
794 printf("\t} maps;\n");
795 }
796
797 if (prog_cnt) {
798 printf("\tstruct {\n");
799 bpf_object__for_each_program(prog, obj) {
800 if (use_loader)
801 printf("\t\tstruct bpf_prog_desc %s;\n",
802 bpf_program__name(prog));
803 else
804 printf("\t\tstruct bpf_program *%s;\n",
805 bpf_program__name(prog));
806 }
807 printf("\t} progs;\n");
808 printf("\tstruct {\n");
809 bpf_object__for_each_program(prog, obj) {
810 if (use_loader)
811 printf("\t\tint %s_fd;\n",
812 bpf_program__name(prog));
813 else
814 printf("\t\tstruct bpf_link *%s;\n",
815 bpf_program__name(prog));
816 }
817 printf("\t} links;\n");
818 }
819
820 btf = bpf_object__btf(obj);
821 if (btf) {
822 err = codegen_datasecs(obj, obj_name);
823 if (err)
824 goto out;
825 }
826 if (use_loader) {
827 err = gen_trace(obj, obj_name, header_guard);
828 goto out;
829 }
830
831 codegen("\
832 \n\
833 }; \n\
834 \n\
835 static void \n\
836 %1$s__destroy(struct %1$s *obj) \n\
837 { \n\
838 if (!obj) \n\
839 return; \n\
840 if (obj->skeleton) \n\
841 bpf_object__destroy_skeleton(obj->skeleton);\n\
842 free(obj); \n\
843 } \n\
844 \n\
845 static inline int \n\
846 %1$s__create_skeleton(struct %1$s *obj); \n\
847 \n\
848 static inline struct %1$s * \n\
849 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
850 { \n\
851 struct %1$s *obj; \n\
852 int err; \n\
853 \n\
854 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
855 if (!obj) { \n\
856 errno = ENOMEM; \n\
857 return NULL; \n\
858 } \n\
859 \n\
860 err = %1$s__create_skeleton(obj); \n\
861 if (err) \n\
862 goto err_out; \n\
863 \n\
864 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
865 if (err) \n\
866 goto err_out; \n\
867 \n\
868 return obj; \n\
869 err_out: \n\
870 %1$s__destroy(obj); \n\
871 errno = -err; \n\
872 return NULL; \n\
873 } \n\
874 \n\
875 static inline struct %1$s * \n\
876 %1$s__open(void) \n\
877 { \n\
878 return %1$s__open_opts(NULL); \n\
879 } \n\
880 \n\
881 static inline int \n\
882 %1$s__load(struct %1$s *obj) \n\
883 { \n\
884 return bpf_object__load_skeleton(obj->skeleton); \n\
885 } \n\
886 \n\
887 static inline struct %1$s * \n\
888 %1$s__open_and_load(void) \n\
889 { \n\
890 struct %1$s *obj; \n\
891 int err; \n\
892 \n\
893 obj = %1$s__open(); \n\
894 if (!obj) \n\
895 return NULL; \n\
896 err = %1$s__load(obj); \n\
897 if (err) { \n\
898 %1$s__destroy(obj); \n\
899 errno = -err; \n\
900 return NULL; \n\
901 } \n\
902 return obj; \n\
903 } \n\
904 \n\
905 static inline int \n\
906 %1$s__attach(struct %1$s *obj) \n\
907 { \n\
908 return bpf_object__attach_skeleton(obj->skeleton); \n\
909 } \n\
910 \n\
911 static inline void \n\
912 %1$s__detach(struct %1$s *obj) \n\
913 { \n\
914 return bpf_object__detach_skeleton(obj->skeleton); \n\
915 } \n\
916 ",
917 obj_name
918 );
919
920 codegen("\
921 \n\
922 \n\
923 static inline const void *%1$s__elf_bytes(size_t *sz); \n\
924 \n\
925 static inline int \n\
926 %1$s__create_skeleton(struct %1$s *obj) \n\
927 { \n\
928 struct bpf_object_skeleton *s; \n\
929 \n\
930 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
931 if (!s) \n\
932 goto err; \n\
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000933 \n\
934 s->sz = sizeof(*s); \n\
935 s->name = \"%1$s\"; \n\
936 s->obj = &obj->obj; \n\
937 ",
938 obj_name
939 );
940 if (map_cnt) {
941 codegen("\
942 \n\
943 \n\
944 /* maps */ \n\
945 s->map_cnt = %zu; \n\
946 s->map_skel_sz = sizeof(*s->maps); \n\
947 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
948 if (!s->maps) \n\
949 goto err; \n\
950 ",
951 map_cnt
952 );
953 i = 0;
954 bpf_object__for_each_map(map, obj) {
955 if (!get_map_ident(map, ident, sizeof(ident)))
956 continue;
957
958 codegen("\
959 \n\
960 \n\
961 s->maps[%zu].name = \"%s\"; \n\
962 s->maps[%zu].map = &obj->maps.%s; \n\
963 ",
964 i, bpf_map__name(map), i, ident);
965 /* memory-mapped internal maps */
966 if (bpf_map__is_internal(map) &&
Christy Lee232383c2022-01-07 16:42:15 -0800967 (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
Quentin Monnet00b8a5f2021-11-12 00:17:34 +0000968 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
969 i, ident);
970 }
971 i++;
972 }
973 }
974 if (prog_cnt) {
975 codegen("\
976 \n\
977 \n\
978 /* programs */ \n\
979 s->prog_cnt = %zu; \n\
980 s->prog_skel_sz = sizeof(*s->progs); \n\
981 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
982 if (!s->progs) \n\
983 goto err; \n\
984 ",
985 prog_cnt
986 );
987 i = 0;
988 bpf_object__for_each_program(prog, obj) {
989 codegen("\
990 \n\
991 \n\
992 s->progs[%1$zu].name = \"%2$s\"; \n\
993 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
994 s->progs[%1$zu].link = &obj->links.%2$s;\n\
995 ",
996 i, bpf_program__name(prog));
997 i++;
998 }
999 }
1000 codegen("\
1001 \n\
1002 \n\
1003 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
1004 \n\
Wei Fu99ff4fc2022-01-08 16:40:08 +08001005 obj->skeleton = s; \n\
Quentin Monnet00b8a5f2021-11-12 00:17:34 +00001006 return 0; \n\
1007 err: \n\
1008 bpf_object__destroy_skeleton(s); \n\
1009 return -ENOMEM; \n\
1010 } \n\
1011 \n\
1012 static inline const void *%2$s__elf_bytes(size_t *sz) \n\
1013 { \n\
1014 *sz = %1$d; \n\
1015 return (const void *)\"\\ \n\
1016 "
1017 , file_sz, obj_name);
1018
1019 /* embed contents of BPF object file */
1020 print_hex(obj_data, file_sz);
1021
1022 codegen("\
1023 \n\
1024 \"; \n\
1025 } \n\
1026 \n\
1027 #endif /* %s */ \n\
1028 ",
1029 header_guard);
1030 err = 0;
1031out:
1032 bpf_object__close(obj);
1033 if (obj_data)
1034 munmap(obj_data, mmap_sz);
1035 close(fd);
1036 return err;
1037}
1038
1039static int do_object(int argc, char **argv)
1040{
1041 struct bpf_linker *linker;
1042 const char *output_file, *file;
1043 int err = 0;
1044
1045 if (!REQ_ARGS(2)) {
1046 usage();
1047 return -1;
1048 }
1049
1050 output_file = GET_ARG();
1051
1052 linker = bpf_linker__new(output_file, NULL);
1053 if (!linker) {
1054 p_err("failed to create BPF linker instance");
1055 return -1;
1056 }
1057
1058 while (argc) {
1059 file = GET_ARG();
1060
1061 err = bpf_linker__add_file(linker, file, NULL);
1062 if (err) {
1063 p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
1064 goto out;
1065 }
1066 }
1067
1068 err = bpf_linker__finalize(linker);
1069 if (err) {
1070 p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
1071 goto out;
1072 }
1073
1074 err = 0;
1075out:
1076 bpf_linker__free(linker);
1077 return err;
1078}
1079
1080static int do_help(int argc, char **argv)
1081{
1082 if (json_output) {
1083 jsonw_null(json_wtr);
1084 return 0;
1085 }
1086
1087 fprintf(stderr,
1088 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1089 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1090 " %1$s %2$s help\n"
1091 "\n"
1092 " " HELP_SPEC_OPTIONS " |\n"
1093 " {-L|--use-loader} }\n"
1094 "",
1095 bin_name, "gen");
1096
1097 return 0;
1098}
1099
1100static const struct cmd cmds[] = {
1101 { "object", do_object },
1102 { "skeleton", do_skeleton },
1103 { "help", do_help },
1104 { 0 }
1105};
1106
1107int do_gen(int argc, char **argv)
1108{
1109 return cmd_select(cmds, argc, argv, do_help);
1110}