Merge remote-tracking branch 'origin/upstream' am: 85f60adb4d
Original change: undetermined
Change-Id: I22c3f03b55542e5bc05eb503ce81596ad10efd08
Signed-off-by: Automerger Merge Worker <[email protected]>
diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644
index 0000000..2967cf3
--- /dev/null
+++ b/.cargo/config.toml
@@ -0,0 +1,2 @@
+[target.x86_64-unknown-linux-gnu]
+runner = "sudo -E"
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..9790192
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,6 @@
+{
+ "git": {
+ "sha1": "0176e5b4fbce1a33ad68652a2e21fe292c5b1ec8"
+ },
+ "path_in_vcs": "libbpf-rs"
+}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..b9bce70
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,175 @@
+0.24.0
+------
+- Split `{Open,}{Map,Program}` into `{Open,}{Map,Program}` (for shared
+ access) and `{Open,}{Map,Program}Mut` (for exclusive access)
+- Added `AsRawLibbpf` impl for `OpenObject` and `ObjectBuilder`
+- Decoupled `Map` and `MapHandle` more and introduced `MapCore` trait
+ abstracting over common functionality
+- Adjusted `SkelBuilder::open` method to require mutable reference to
+ storage space for BPF object
+- Adjusted `{Open,}Object::from_ptr` constructor to be infallible
+- Added `{Open,}Object::maps{_mut,}` and `{Open,}Object::progs{_mut,}`
+ for BPF map and program iteration
+- Adjusted various APIs to return/use `OsStr` instead of `CStr` or `str`
+- Adjusted `{Open,}Program` to lazily retrieve name and section
+ - Changed `name` and `section` methods to return `&OsStr` and made
+ constructors infallible
+- Adjusted `OpenObject::name` to return `Option<&OsStr>`
+- Removed `Result` return type from
+ `OpenProgram::{set_log_level,set_autoload,set_flags}`
+- Added `Object::name` method
+- Added `Copy` and `Clone` impls for types inside `btf::types` module
+- Adjusted `OpenMap::set_inner_map_fd` to return `Result`
+- Adjusted `ProgramInput::context_in` field to be a mutable reference
+- Made inner `query::Tag` contents publicly accessible
+- Fixed potential memory leak in `RingBufferBuilder::build`
+- Removed `Display` implementation of various `enum` types
+
+
+0.23.2
+------
+- Fixed build failure on Android platforms
+
+
+0.23.1
+------
+- Added support for user ring buffers
+- Fixed handling of bloom filter type maps
+ - Added `Map::lookup_bloom_filter` for looking up elements in a bloom filter
+
+
+0.23.0
+------
+- Overhauled crate feature set:
+ - Removed `novendor` feature
+ - Added `vendored` feature to use vendored copies of all needed libraries
+- Added `Program::attach_ksyscall` for attaching to ksyscall handlers
+- Added `Program::test_run` as a way for test-running programs
+- Added `OpenMap::initial_value{,_mut}` for retrieving a map's initial value
+- Added `replace` functionality to `Xdp` type
+- Added low-level `consume_raw` and `poll_raw` methods to `RingBuffer` type
+- Added `recursion_misses` attribute to `query::ProgramInfo` type
+- Added `AsRawLibbpf` impl for `OpenProgram`
+- Fixed incorrect inference of `btf::types::MemberAttr::Bitfield` variant
+- Fixed examples not building on non-x86 architectures
+- Fixed potentially missing padding byte initialization on some target
+ architectures
+- Fixed compilation issues caused by mismatching function signatures in certain
+ cross-compilation contexts
+- Updated `libbpf-sys` dependency to `1.4.0`
+- Bumped minimum Rust version to `1.71`
+
+
+0.22.1
+------
+- Introduced `Xdp` type for working with XDP programs
+- Fixed handling of autocreate maps with `Object` type
+
+
+0.22.0
+------
+- Reworked `Error` type:
+ - Replaced `enum` with data variants with `struct` hiding internal structure
+ - Added support for chaining of errors
+ - Overhauled how errors are displayed
+- Overhauled `query::ProgramInfo` and `query::ProgInfoIter` to make them more
+ readily usable
+- Added `Btf::from_vmlinux` constructor and adjusted `Btf::from_path` to work
+ with both raw and ELF files
+- Reworked `ObjectBuilder`:
+ - Made `name` method fallible
+ - Adjusted `opts` to return a reference to `libbpf_sys::bpf_object_open_opts`
+ - Removed object name argument from `open_memory` constructor
+ - Added `pin_root_path` setter
+- Added `AsRawLibbpf` trait as a unified way to retrieve `libbpf` equivalents
+ for `libbpf-rs` objects
+- Added `Map::update_batch` method
+- Implemented `Send` for `Link`
+- Bumped minimum Rust version to `1.65`
+- Updated `bitflags` dependency to `2.0`
+
+
+0.21.2
+------
+- Enabled key iteration on `MapHandle` objects (formerly possible only on `Map`
+ objects)
+- Bumped minimum Rust version to `1.64`
+
+
+0.21.1
+------
+- Fixed build failures on 32 bit x86 and aarch32
+
+
+0.21.0
+------
+- Added `TcHook::get_handle` and `TcHook::get_priority` methods for restoring
+ TcHook object
+- Added `Program::get_fd_by_id` and `Program::get_id_by_fd` methods for restoring
+ bpf management data
+- Added `Map::is_pinned` and `Map::get_pin_path` methods for getting map pin status
+- Added `Program::attach_iter` for attaching of programs to an iterator
+- Added `Map::delete_batch` method for bulk deletion of elements
+- Added read/update/delete support for queue and stack `Map` types
+- Added a new `MapHandle` which provides most functionality previously found in
+ `Map`
+- Removed support for creating `Map` objects standalone (i.e. maps not created
+ by libbpf)
+- Removed various `<object-type>::fd()` methods in favor of
+ `<object-type>::as_fd()`
+- Improved `btf_type_match!` macro, adding support for most of Rust's `match`
+ capabilities
+- Added `skel` module exposing skeleton related traits
+- Fixed issue where instances of `Map` created or opened without going through
+ `Object` would leak file descriptors
+- Fixed potential Uprobe attachment failures on optimized builds caused by
+ improper `libbpf_sys::bpf_object_open_opts` object initialization
+- Adjusted various methods to work with `BorrowedFd` instead of raw file
+ descriptors
+- Made `RingBufferBuilder::add` enforce that `self` cannot outlive the maps
+ passed into it
+- Adjusted `Error::System` variant textual representation to include `errno`
+ string
+
+
+0.20.1
+------
+- Added bindings for BTF via newly introduced `btf` module
+- Added `Map` constructors from pinned paths and from map id
+- Added `Map::as_libbpf_bpf_map_ptr` and `Object::as_libbpf_bpf_object_ptr`
+ accessors
+- Added `MapInfo` type as a convenience wrapper around `bpf_map_info`
+ - Added `Map::info` to `Map` to make it easier to derive `MapInfo` from a
+ `Map` instance
+- Added `set_log_level`, `log_level`, and `autoload` methods to `OpenProgram`
+- Removed deprecated `Link::get_fd` method
+- Bumped minimum Rust version to `1.63`
+
+
+0.20.0
+------
+- Added support for USDT probes
+- Added BPF linker support with new `Linker` type
+- Added `Program::attach_uprobe_with_opts` for attaching Uprobes with additional
+ options
+- Added `tproxy` example
+- Added option to `RingBuffer::poll` to block indefinitely
+- Added support for querying BPF program type using `OpenProgram::prog_type`
+- Added support for retrieving a BPF program's instructions using
+ `OpenProgram::insns` & `Program::insns`
+- Added `MapType::is_supported`, `ProgramType::is_supported`, and
+ `ProgramType::is_helper_supported` methods
+- Added `PerfBuffer::as_libbpf_perf_buffer_ptr` to access underlying
+ `libbpf-sys` object
+- Adjusted various `Map` methods to work on shared receivers
+- Fixed `Link::open` constructor to be a static method
+- Fixed unsoundness in skeleton logic caused by aliased `Box` contents
+- Implemented `Send` for `PerfBuffer` and `RingBuffer`
+- Made more types implement `Clone` and `Debug`
+- Run leak sanitizer in CI
+- Updated various dependencies
+
+
+0.19.1
+------
+- Initial documented release
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..629293d
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,123 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.71"
+name = "libbpf-rs"
+version = "0.24.2"
+authors = [
+ "Daniel Xu <[email protected]>",
+ "Daniel Müller <[email protected]>",
+]
+build = "build.rs"
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
+description = "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys"
+homepage = "https://github.com/libbpf/libbpf-rs"
+readme = "README.md"
+keywords = [
+ "bpf",
+ "ebpf",
+ "libbpf",
+]
+license = "LGPL-2.1-only OR BSD-2-Clause"
+repository = "https://github.com/libbpf/libbpf-rs"
+
+[lib]
+name = "libbpf_rs"
+path = "src/lib.rs"
+
+[[test]]
+name = "test"
+path = "tests/test.rs"
+
+[[test]]
+name = "test_print"
+path = "tests/test_print.rs"
+
+[[test]]
+name = "test_tc"
+path = "tests/test_tc.rs"
+
+[[test]]
+name = "test_xdp"
+path = "tests/test_xdp.rs"
+
+[dependencies.bitflags]
+version = "2.0"
+
+[dependencies.libbpf-sys]
+version = "1.4.1"
+default-features = false
+
+[dependencies.libc]
+version = "0.2"
+
+[dependencies.vsprintf]
+version = "2.0"
+
+[dev-dependencies._cc_unused]
+version = "1.0.3"
+package = "cc"
+
+[dev-dependencies._pkg-config_unused]
+version = "0.3.3"
+package = "pkg-config"
+
+[dev-dependencies.log]
+version = "0.4.4"
+
+[dev-dependencies.memmem]
+version = "0.1.1"
+
+[dev-dependencies.plain]
+version = "0.2.3"
+
+[dev-dependencies.probe]
+version = "0.3"
+
+[dev-dependencies.scopeguard]
+version = "1.1"
+
+[dev-dependencies.serial_test]
+version = "3.0"
+default-features = false
+
+[dev-dependencies.tempfile]
+version = "3.3"
+
+[dev-dependencies.test-tag]
+version = "0.1"
+
+[build-dependencies.libbpf-sys]
+version = "1.4.1"
+optional = true
+default-features = false
+
+[build-dependencies.tempfile]
+version = "3.3"
+optional = true
+
+[features]
+default = ["libbpf-sys/vendored-libbpf"]
+dont-generate-test-files = []
+generate-test-files = [
+ "libbpf-sys/vendored-libbpf",
+ "dep:tempfile",
+]
+static = ["libbpf-sys/static"]
+vendored = ["libbpf-sys/vendored"]
+
+[badges.maintenance]
+status = "actively-developed"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..a4cf5fb
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,61 @@
+[package]
+name = "libbpf-rs"
+description = "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys"
+repository = "https://github.com/libbpf/libbpf-rs"
+homepage = "https://github.com/libbpf/libbpf-rs"
+readme = "README.md"
+version = "0.24.2"
+authors = ["Daniel Xu <[email protected]>", "Daniel Müller <[email protected]>"]
+edition = "2021"
+rust-version = "1.71"
+license = "LGPL-2.1-only OR BSD-2-Clause"
+keywords = ["bpf", "ebpf", "libbpf"]
+
+[badges]
+maintenance = { status = "actively-developed" }
+
+[features]
+# By default the crate uses a vendored libbpf, but requires other necessary libs
+# to be present on the system.
+default = ["libbpf-sys/vendored-libbpf"]
+# Link all required libraries statically.
+static = ["libbpf-sys/static"]
+# Use vendored versions of all required libraries.
+vendored = ["libbpf-sys/vendored"]
+
+# Below here are dev-mostly features that should not be needed by
+# regular users.
+
+# Enable this feature to opt in to the generation of unit test files.
+# Having these test files created is necessary for running tests.
+generate-test-files = ["libbpf-sys/vendored-libbpf", "dep:tempfile"]
+# Disable generation of test files. This feature takes preference over
+# `generate-test-files`.
+dont-generate-test-files = []
+
+[dependencies]
+bitflags = "2.0"
+libbpf-sys = { version = "1.4.1", default-features = false }
+libc = "0.2"
+vsprintf = "2.0"
+
+[build-dependencies]
+libbpf-sys = { version = "1.4.1", default-features = false, optional = true }
+tempfile = { version = "3.3", optional = true }
+
+[dev-dependencies]
+libbpf-rs = {path = ".", features = ["generate-test-files"]}
+log = "0.4.4"
+memmem = "0.1.1"
+plain = "0.2.3"
+probe = "0.3"
+scopeguard = "1.1"
+serial_test = { version = "3.0", default-features = false }
+tempfile = "3.3"
+test-tag = "0.1"
+
+# A set of unused dependencies that we require to force correct minimum versions
+# of transitive dependencies, for cases where our dependencies have incorrect
+# dependency specifications themselves.
+_cc_unused = { package = "cc", version = "1.0.3" }
+_pkg-config_unused = { package = "pkg-config", version = "0.3.3" }
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..199b4df
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) <year> <owner> . All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSE.BSD-2-Clause b/LICENSE.BSD-2-Clause
new file mode 100644
index 0000000..da366e2
--- /dev/null
+++ b/LICENSE.BSD-2-Clause
@@ -0,0 +1,32 @@
+Valid-License-Identifier: BSD-2-Clause
+SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html
+Usage-Guide:
+ To use the BSD 2-clause "Simplified" License put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: BSD-2-Clause
+License-Text:
+
+Copyright (c) <year> <owner> . All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..1b609cf
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,20 @@
+name: "libbpf-rs"
+description: "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys"
+third_party {
+ identifier {
+ type: "crates.io"
+ value: "libbpf-rs"
+ }
+ identifier {
+ type: "Archive"
+ value: "https://static.crates.io/crates/libbpf-rs/libbpf-rs-0.24.2.crate"
+ primary_source: true
+ }
+ version: "0.24.2"
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2024
+ month: 8
+ day: 22
+ }
+}
diff --git a/MODULE_LICENSE_BSD_LIKE b/MODULE_LICENSE_BSD_LIKE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_BSD_LIKE
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..1e32b04
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,3 @@
+# Bug component: 688011
+include platform/prebuilts/rust:main:/OWNERS
+include platform/system/bpf:main:/OWNERS_bpf
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..8df2f52
--- /dev/null
+++ b/README.md
@@ -0,0 +1,30 @@
+[](https://github.com/libbpf/libbpf-rs/actions/workflows/test.yml)
+[](https://blog.rust-lang.org/2023/07/13/Rust-1.71.0.html)
+
+# libbpf-rs
+
+[](https://crates.io/crates/libbpf-rs)
+
+Idiomatic Rust wrapper around [libbpf](https://github.com/libbpf/libbpf).
+
+- [Changelog](CHANGELOG.md)
+
+To use in your project, add into your `Cargo.toml`:
+```toml
+[dependencies]
+libbpf-rs = "0.24"
+```
+
+See [full documentation here](https://docs.rs/libbpf-rs).
+
+This crate adheres to Cargo's [semantic versioning rules][cargo-semver]. At a
+minimum, it builds with the most recent Rust stable release minus five minor
+versions ("N - 5"). E.g., assuming the most recent Rust stable is `1.68`, the
+crate is guaranteed to build with `1.63` and higher.
+
+## Contributing
+
+We welcome all contributions! Please see the [contributor's
+guide](../CONTRIBUTING.md) for more information.
+
+[cargo-semver]: https://doc.rust-lang.org/cargo/reference/resolver.html#semver-compatibility
diff --git a/build.rs b/build.rs
new file mode 100644
index 0000000..6b38454
--- /dev/null
+++ b/build.rs
@@ -0,0 +1,225 @@
+#![allow(clippy::let_unit_value)]
+
+use std::env;
+use std::env::consts::ARCH;
+use std::ffi::OsStr;
+use std::fs::read_dir;
+use std::io::Error;
+use std::io::ErrorKind;
+use std::io::Result;
+use std::ops::Deref as _;
+use std::path::Path;
+use std::process::Command;
+use std::process::Stdio;
+
+
+/// Format a command with the given list of arguments as a string.
+fn format_command<C, A, S>(command: C, args: A) -> String
+where
+ C: AsRef<OsStr>,
+ A: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+{
+ args.into_iter().fold(
+ command.as_ref().to_string_lossy().into_owned(),
+ |mut cmd, arg| {
+ cmd += " ";
+ cmd += arg.as_ref().to_string_lossy().deref();
+ cmd
+ },
+ )
+}
+
+/// Run a command with the provided arguments.
+fn run<C, A, S>(command: C, args: A) -> Result<()>
+where
+ C: AsRef<OsStr>,
+ A: IntoIterator<Item = S> + Clone,
+ S: AsRef<OsStr>,
+{
+ let instance = Command::new(command.as_ref())
+ .stdin(Stdio::null())
+ .stdout(Stdio::null())
+ .env_clear()
+ .envs(env::vars().filter(|(k, _)| k == "PATH"))
+ .args(args.clone())
+ .output()
+ .map_err(|err| {
+ Error::new(
+ ErrorKind::Other,
+ format!(
+ "failed to run `{}`: {err}",
+ format_command(command.as_ref(), args.clone())
+ ),
+ )
+ })?;
+
+ if !instance.status.success() {
+ let code = if let Some(code) = instance.status.code() {
+ format!(" ({code})")
+ } else {
+ " (terminated by signal)".to_string()
+ };
+
+ let stderr = String::from_utf8_lossy(&instance.stderr);
+ let stderr = stderr.trim_end();
+ let stderr = if !stderr.is_empty() {
+ format!(": {stderr}")
+ } else {
+ String::new()
+ };
+
+ Err(Error::new(
+ ErrorKind::Other,
+ format!(
+ "`{}` reported non-zero exit-status{code}{stderr}",
+ format_command(command, args)
+ ),
+ ))
+ } else {
+ Ok(())
+ }
+}
+
+fn adjust_mtime(path: &Path) -> Result<()> {
+ // Note that `OUT_DIR` is only present at runtime.
+ let out_dir = env::var("OUT_DIR").unwrap();
+ // The $OUT_DIR/output file is (in current versions of Cargo [as of
+ // 1.69]) the file containing the reference time stamp that Cargo
+ // checks to determine whether something is considered outdated and
+ // in need to be rebuild. It's an implementation detail, yes, but we
+ // don't rely on it for anything essential.
+ let output = Path::new(&out_dir)
+ .parent()
+ .ok_or_else(|| Error::new(ErrorKind::Other, "OUT_DIR has no parent"))?
+ .join("output");
+
+ if !output.exists() {
+ // The file may not exist for legitimate reasons, e.g., when we
+ // build for the very first time. If there is not reference there
+ // is nothing for us to do, so just bail.
+ return Ok(())
+ }
+
+ let () = run(
+ "touch",
+ [
+ "-m".as_ref(),
+ "--reference".as_ref(),
+ output.as_os_str(),
+ path.as_os_str(),
+ ],
+ )?;
+ Ok(())
+}
+
+/// Compile `src` into `dst` using the provided compiler.
+fn compile(compiler: &str, src: &Path, dst: &Path, options: &[&str]) {
+ let dst = src.with_file_name(dst);
+ println!("cargo:rerun-if-changed={}", src.display());
+ println!("cargo:rerun-if-changed={}", dst.display());
+
+ let () = run(
+ compiler,
+ options
+ .iter()
+ .map(OsStr::new)
+ .chain([src.as_os_str(), "-o".as_ref(), dst.as_os_str()]),
+ )
+ .unwrap_or_else(|err| panic!("failed to run `{compiler}`: {err}"));
+
+ let () = adjust_mtime(&dst).unwrap();
+}
+
+/// Extract vendored libbpf header files into a directory.
+#[cfg(feature = "generate-test-files")]
+fn extract_libbpf_headers(target_dir: &Path) {
+ use std::fs;
+ use std::fs::OpenOptions;
+ use std::io::Write;
+
+ let dir = target_dir.join("bpf");
+ let () = fs::create_dir_all(&dir).unwrap();
+ for (filename, contents) in libbpf_sys::API_HEADERS.iter() {
+ let path = dir.as_path().join(filename);
+ let mut file = OpenOptions::new()
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(path)
+ .unwrap();
+ file.write_all(contents.as_bytes()).unwrap();
+ }
+}
+
+#[cfg(feature = "generate-test-files")]
+fn with_bpf_headers<F>(f: F)
+where
+ F: FnOnce(&Path),
+{
+ use tempfile::tempdir;
+
+ let header_parent_dir = tempdir().unwrap();
+ let () = extract_libbpf_headers(header_parent_dir.path());
+ let () = f(header_parent_dir.path());
+}
+
+#[cfg(not(feature = "generate-test-files"))]
+fn with_bpf_headers<F>(_f: F)
+where
+ F: FnOnce(&Path),
+{
+ unimplemented!()
+}
+
+/// Prepare the various test files.
+fn prepare_test_files(crate_root: &Path) {
+ let bin_dir = crate_root.join("tests").join("bin");
+ let src_dir = bin_dir.join("src");
+ let include = crate_root.join("../vmlinux/include").join(ARCH);
+
+ with_bpf_headers(|bpf_hdr_dir| {
+ for result in read_dir(&src_dir).unwrap() {
+ let entry = result.unwrap();
+ let src = entry.file_name();
+ let obj = Path::new(&src).with_extension("o");
+ let src = src_dir.join(&src);
+ let dst = bin_dir.join(obj);
+ let arch = option_env!("CARGO_CFG_TARGET_ARCH").unwrap_or(ARCH);
+ let arch = match arch {
+ "x86_64" => "x86",
+ "aarch64" => "arm64",
+ "powerpc64" => "powerpc",
+ "s390x" => "s390",
+ x => x,
+ };
+
+ compile(
+ "clang",
+ &src,
+ &dst,
+ &[
+ "-g",
+ "-O2",
+ "-target",
+ "bpf",
+ "-c",
+ "-I",
+ include.to_str().unwrap(),
+ "-I",
+ &format!("{}", bpf_hdr_dir.display()),
+ "-D",
+ &format!("__TARGET_ARCH_{arch}"),
+ ],
+ );
+ }
+ })
+}
+
+fn main() {
+ let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
+
+ if cfg!(feature = "generate-test-files") && !cfg!(feature = "dont-generate-test-files") {
+ prepare_test_files(crate_dir.as_ref());
+ }
+}
diff --git a/src/btf/mod.rs b/src/btf/mod.rs
new file mode 100644
index 0000000..088a05e
--- /dev/null
+++ b/src/btf/mod.rs
@@ -0,0 +1,764 @@
+//! Parse and introspect btf information, from files or loaded objects.
+//!
+//! To find a specific type you can use one of 3 methods
+//!
+//! - [Btf::type_by_name]
+//! - [Btf::type_by_id]
+//! - [Btf::type_by_kind]
+//!
+//! All of these are generic over `K`, which is any type that can be created from a [`BtfType`],
+//! for all of these methods, not finding any type by the passed parameter or finding a type of
+//! another [`BtfKind`] will result in a [`None`] being returned (or filtered out in the case of
+//! [`Btf::type_by_kind`]). If you want to get a type independently of the kind, just make sure `K`
+//! binds to [`BtfType`].
+
+pub mod types;
+
+use std::ffi::CStr;
+use std::ffi::CString;
+use std::ffi::OsStr;
+use std::fmt;
+use std::fmt::Debug;
+use std::fmt::Display;
+use std::fmt::Formatter;
+use std::fmt::Result as FmtResult;
+use std::io;
+use std::marker::PhantomData;
+use std::mem::size_of;
+use std::num::NonZeroUsize;
+use std::ops::Deref;
+use std::os::raw::c_ulong;
+use std::os::raw::c_void;
+use std::os::unix::prelude::AsRawFd;
+use std::os::unix::prelude::FromRawFd;
+use std::os::unix::prelude::OsStrExt;
+use std::os::unix::prelude::OwnedFd;
+use std::path::Path;
+use std::ptr;
+use std::ptr::NonNull;
+
+use crate::util::parse_ret_i32;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::ErrorExt as _;
+use crate::Result;
+
+use self::types::Composite;
+
+/// The various btf types.
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+#[repr(u32)]
+pub enum BtfKind {
+ /// [Void](types::Void)
+ Void = 0,
+ /// [Int](types::Int)
+ Int,
+ /// [Ptr](types::Ptr)
+ Ptr,
+ /// [Array](types::Array)
+ Array,
+ /// [Struct](types::Struct)
+ Struct,
+ /// [Union](types::Union)
+ Union,
+ /// [Enum](types::Enum)
+ Enum,
+ /// [Fwd](types::Fwd)
+ Fwd,
+ /// [Typedef](types::Typedef)
+ Typedef,
+ /// [Volatile](types::Volatile)
+ Volatile,
+ /// [Const](types::Const)
+ Const,
+ /// [Restrict](types::Restrict)
+ Restrict,
+ /// [Func](types::Func)
+ Func,
+ /// [FuncProto](types::FuncProto)
+ FuncProto,
+ /// [Var](types::Var)
+ Var,
+ /// [DataSec](types::DataSec)
+ DataSec,
+ /// [Float](types::Float)
+ Float,
+ /// [DeclTag](types::DeclTag)
+ DeclTag,
+ /// [TypeTag](types::TypeTag)
+ TypeTag,
+ /// [Enum64](types::Enum64)
+ Enum64,
+}
+
+impl TryFrom<u32> for BtfKind {
+ type Error = u32;
+
+ fn try_from(value: u32) -> Result<Self, Self::Error> {
+ use BtfKind::*;
+
+ Ok(match value {
+ x if x == Void as u32 => Void,
+ x if x == Int as u32 => Int,
+ x if x == Ptr as u32 => Ptr,
+ x if x == Array as u32 => Array,
+ x if x == Struct as u32 => Struct,
+ x if x == Union as u32 => Union,
+ x if x == Enum as u32 => Enum,
+ x if x == Fwd as u32 => Fwd,
+ x if x == Typedef as u32 => Typedef,
+ x if x == Volatile as u32 => Volatile,
+ x if x == Const as u32 => Const,
+ x if x == Restrict as u32 => Restrict,
+ x if x == Func as u32 => Func,
+ x if x == FuncProto as u32 => FuncProto,
+ x if x == Var as u32 => Var,
+ x if x == DataSec as u32 => DataSec,
+ x if x == Float as u32 => Float,
+ x if x == DeclTag as u32 => DeclTag,
+ x if x == TypeTag as u32 => TypeTag,
+ x if x == Enum64 as u32 => Enum64,
+ v => return Err(v),
+ })
+ }
+}
+
+/// The id of a btf type.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct TypeId(u32);
+
+impl From<u32> for TypeId {
+ fn from(s: u32) -> Self {
+ Self(s)
+ }
+}
+
+impl From<TypeId> for u32 {
+ fn from(t: TypeId) -> Self {
+ t.0
+ }
+}
+
+impl Display for TypeId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.0)
+ }
+}
+
+#[derive(Debug)]
+enum DropPolicy {
+ Nothing,
+ SelfPtrOnly,
+ ObjPtr(*mut libbpf_sys::bpf_object),
+}
+
+/// The btf information of a bpf object.
+///
+/// The lifetime bound protects against this object outliving its source. This can happen when it
+/// was derived from an [`Object`](super::Object), which owns the data this structs points too. When
+/// instead the [`Btf::from_path`] method is used, the lifetime will be `'static` since it doesn't
+/// borrow from anything.
+pub struct Btf<'source> {
+ ptr: NonNull<libbpf_sys::btf>,
+ drop_policy: DropPolicy,
+ _marker: PhantomData<&'source ()>,
+}
+
+impl Btf<'static> {
+ /// Load the btf information from specified path.
+ pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Self> {
+ fn inner(path: &Path) -> Result<Btf<'static>> {
+ let path = CString::new(path.as_os_str().as_bytes()).map_err(|_| {
+ Error::with_invalid_data(format!("invalid path {path:?}, has null bytes"))
+ })?;
+ let ptr = unsafe { libbpf_sys::btf__parse(path.as_ptr(), ptr::null_mut()) };
+ let ptr = validate_bpf_ret(ptr).context("failed to parse BTF information")?;
+ Ok(Btf {
+ ptr,
+ drop_policy: DropPolicy::SelfPtrOnly,
+ _marker: PhantomData,
+ })
+ }
+ inner(path.as_ref())
+ }
+
+ /// Load the vmlinux btf information from few well-known locations.
+ pub fn from_vmlinux() -> Result<Self> {
+ let ptr = unsafe { libbpf_sys::btf__load_vmlinux_btf() };
+ let ptr = validate_bpf_ret(ptr).context("failed to load BTF from vmlinux")?;
+
+ Ok(Btf {
+ ptr,
+ drop_policy: DropPolicy::SelfPtrOnly,
+ _marker: PhantomData,
+ })
+ }
+
+ /// Load the btf information of an bpf object from a program id.
+ pub fn from_prog_id(id: u32) -> Result<Self> {
+ let fd = parse_ret_i32(unsafe { libbpf_sys::bpf_prog_get_fd_by_id(id) })?;
+ let fd = unsafe {
+ // SAFETY: parse_ret_i32 will check that this fd is above -1
+ OwnedFd::from_raw_fd(fd)
+ };
+ let mut info = libbpf_sys::bpf_prog_info::default();
+ parse_ret_i32(unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(
+ fd.as_raw_fd(),
+ (&mut info as *mut libbpf_sys::bpf_prog_info).cast::<c_void>(),
+ &mut (size_of::<libbpf_sys::bpf_prog_info>() as u32),
+ )
+ })?;
+
+ let ptr = unsafe { libbpf_sys::btf__load_from_kernel_by_id(info.btf_id) };
+ let ptr = validate_bpf_ret(ptr).context("failed to load BTF from kernel")?;
+
+ Ok(Self {
+ ptr,
+ drop_policy: DropPolicy::SelfPtrOnly,
+ _marker: PhantomData,
+ })
+ }
+}
+
+impl<'btf> Btf<'btf> {
+ /// Create a new `Btf` instance from the given [`libbpf_sys::bpf_object`].
+ pub fn from_bpf_object(obj: &'btf libbpf_sys::bpf_object) -> Result<Option<Self>> {
+ Self::from_bpf_object_raw(obj)
+ }
+
+ fn from_bpf_object_raw(obj: *const libbpf_sys::bpf_object) -> Result<Option<Self>> {
+ let ptr = unsafe {
+ // SAFETY: the obj pointer is valid since it's behind a reference.
+ libbpf_sys::bpf_object__btf(obj)
+ };
+ // Contrary to general `libbpf` contract, `bpf_object__btf` may
+ // return `NULL` without setting `errno`.
+ if ptr.is_null() {
+ return Ok(None)
+ }
+ let ptr = validate_bpf_ret(ptr).context("failed to create BTF from BPF object")?;
+ let slf = Self {
+ ptr,
+ drop_policy: DropPolicy::Nothing,
+ _marker: PhantomData,
+ };
+ Ok(Some(slf))
+ }
+
+ /// From raw bytes coming from an object file.
+ pub fn from_raw(name: &'btf str, object_file: &'btf [u8]) -> Result<Option<Self>> {
+ let cname = CString::new(name)
+ .map_err(|_| Error::with_invalid_data(format!("invalid path {name:?}, has null bytes")))
+ .unwrap();
+
+ let obj_opts = libbpf_sys::bpf_object_open_opts {
+ sz: size_of::<libbpf_sys::bpf_object_open_opts>() as libbpf_sys::size_t,
+ object_name: cname.as_ptr(),
+ ..Default::default()
+ };
+
+ let ptr = unsafe {
+ libbpf_sys::bpf_object__open_mem(
+ object_file.as_ptr() as *const c_void,
+ object_file.len() as c_ulong,
+ &obj_opts,
+ )
+ };
+
+ let mut bpf_obj = validate_bpf_ret(ptr).context("failed to open BPF object from memory")?;
+ // SAFETY: The pointer has been validated.
+ let bpf_obj = unsafe { bpf_obj.as_mut() };
+ match Self::from_bpf_object_raw(bpf_obj) {
+ Ok(Some(this)) => Ok(Some(Self {
+ drop_policy: DropPolicy::ObjPtr(bpf_obj),
+ ..this
+ })),
+ x => {
+ // SAFETY: The obj pointer is valid because we checked
+ // its validity.
+ unsafe {
+ // We free it here, otherwise it will be a memory
+ // leak as this codepath (Ok(None) | Err(e)) does
+ // not reference it anymore and as such it can be
+ // dropped.
+ libbpf_sys::bpf_object__close(bpf_obj)
+ };
+ x
+ }
+ }
+ }
+
+ /// Gets a string at a given offset.
+ ///
+ /// Returns [`None`] when the offset is out of bounds or if the name is empty.
+ fn name_at(&self, offset: u32) -> Option<&OsStr> {
+ let name = unsafe {
+ // SAFETY:
+ // Assuming that btf is a valid pointer, this is always okay to call.
+ libbpf_sys::btf__name_by_offset(self.ptr.as_ptr(), offset)
+ };
+ NonNull::new(name as *mut _)
+ .map(|p| unsafe {
+ // SAFETY: a non-null pointer coming from libbpf is always valid
+ OsStr::from_bytes(CStr::from_ptr(p.as_ptr()).to_bytes())
+ })
+ .filter(|s| !s.is_empty()) // treat empty strings as none
+ }
+
+ /// Whether this btf instance has no types.
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// The number of [BtfType]s in this object.
+ pub fn len(&self) -> usize {
+ unsafe {
+ // SAFETY: the btf pointer is valid.
+ libbpf_sys::btf__type_cnt(self.ptr.as_ptr()) as usize
+ }
+ }
+
+ /// The btf pointer size.
+ pub fn ptr_size(&self) -> Result<NonZeroUsize> {
+ let sz = unsafe { libbpf_sys::btf__pointer_size(self.ptr.as_ptr()) as usize };
+ NonZeroUsize::new(sz).ok_or_else(|| {
+ Error::with_io_error(io::ErrorKind::Other, "could not determine pointer size")
+ })
+ }
+
+ /// Find a btf type by name
+ ///
+ /// # Panics
+ /// If `name` has null bytes.
+ pub fn type_by_name<'s, K>(&'s self, name: &str) -> Option<K>
+ where
+ K: TryFrom<BtfType<'s>>,
+ {
+ let c_string = CString::new(name)
+ .map_err(|_| Error::with_invalid_data(format!("{name:?} contains null bytes")))
+ .unwrap();
+ let ty = unsafe {
+ // SAFETY: the btf pointer is valid and the c_string pointer was created from safe code
+ // therefore it's also valid.
+ libbpf_sys::btf__find_by_name(self.ptr.as_ptr(), c_string.as_ptr())
+ };
+ if ty < 0 {
+ None
+ } else {
+ self.type_by_id(TypeId(ty as _))
+ }
+ }
+
+ /// Find a type by it's [TypeId].
+ pub fn type_by_id<'s, K>(&'s self, type_id: TypeId) -> Option<K>
+ where
+ K: TryFrom<BtfType<'s>>,
+ {
+ let btf_type = unsafe {
+ // SAFETY: the btf pointer is valid.
+ libbpf_sys::btf__type_by_id(self.ptr.as_ptr(), type_id.0)
+ };
+
+ let btf_type = NonNull::new(btf_type as *mut libbpf_sys::btf_type)?;
+
+ let ty = unsafe {
+ // SAFETY: if it is non-null then it points to a valid type.
+ btf_type.as_ref()
+ };
+
+ let name = self.name_at(ty.name_off);
+
+ BtfType {
+ type_id,
+ name,
+ source: self,
+ ty,
+ }
+ .try_into()
+ .ok()
+ }
+
+ /// Find all types of a specific type kind.
+ pub fn type_by_kind<'s, K>(&'s self) -> impl Iterator<Item = K> + 's
+ where
+ K: TryFrom<BtfType<'s>>,
+ {
+ (1..self.len() as u32)
+ .map(TypeId::from)
+ .filter_map(|id| self.type_by_id(id))
+ .filter_map(|t| K::try_from(t).ok())
+ }
+}
+
+impl AsRawLibbpf for Btf<'_> {
+ type LibbpfType = libbpf_sys::btf;
+
+ /// Retrieve the underlying [`libbpf_sys::btf`] object.
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+impl Debug for Btf<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ struct BtfDumper<'btf>(&'btf Btf<'btf>);
+
+ impl Debug for BtfDumper<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ f.debug_list()
+ .entries(
+ (1..self.0.len())
+ .map(|i| TypeId::from(i as u32))
+ // SANITY: A type with this ID should always exist
+ // given that BTF IDs are fully populated up
+ // to `len`. Conversion to `BtfType` is
+ // always infallible.
+ .map(|id| self.0.type_by_id::<BtfType<'_>>(id).unwrap()),
+ )
+ .finish()
+ }
+ }
+
+ f.debug_tuple("Btf<'_>").field(&BtfDumper(self)).finish()
+ }
+}
+
+impl Drop for Btf<'_> {
+ fn drop(&mut self) {
+ match self.drop_policy {
+ DropPolicy::Nothing => {}
+ DropPolicy::SelfPtrOnly => {
+ unsafe {
+ // SAFETY: the btf pointer is valid.
+ libbpf_sys::btf__free(self.ptr.as_ptr())
+ }
+ }
+ DropPolicy::ObjPtr(obj) => {
+ unsafe {
+ // SAFETY: the bpf obj pointer is valid.
+ // closing the obj automatically frees the associated btf object.
+ libbpf_sys::bpf_object__close(obj)
+ }
+ }
+ }
+ }
+}
+
+/// An undiscriminated btf type
+///
+/// The [`btf_type_match`](crate::btf_type_match) can be used to match on the variants of this type
+/// as if it was a rust enum.
+///
+/// You can also use the [`TryFrom`] trait to convert to any of the possible [`types`].
+#[derive(Clone, Copy)]
+pub struct BtfType<'btf> {
+ type_id: TypeId,
+ name: Option<&'btf OsStr>,
+ source: &'btf Btf<'btf>,
+ /// the __bindgen_anon_1 field is a union defined as
+ /// ```no_run
+ /// union btf_type__bindgen_ty_1 {
+ /// size_: u32,
+ /// type_: u32,
+ /// }
+ /// ```
+ ty: &'btf libbpf_sys::btf_type,
+}
+
+impl Debug for BtfType<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BtfType")
+ .field("type_id", &self.type_id)
+ .field("name", &self.name())
+ .field("source", &self.source.as_libbpf_object())
+ .field("ty", &(self.ty as *const _))
+ .finish()
+ }
+}
+
+impl<'btf> BtfType<'btf> {
+ /// This type's type id.
+ #[inline]
+ pub fn type_id(&self) -> TypeId {
+ self.type_id
+ }
+
+ /// This type's name.
+ #[inline]
+ pub fn name(&'_ self) -> Option<&'btf OsStr> {
+ self.name
+ }
+
+ /// This type's kind.
+ #[inline]
+ pub fn kind(&self) -> BtfKind {
+ ((self.ty.info >> 24) & 0x1f).try_into().unwrap()
+ }
+
+ #[inline]
+ fn vlen(&self) -> u32 {
+ self.ty.info & 0xffff
+ }
+
+ #[inline]
+ fn kind_flag(&self) -> bool {
+ (self.ty.info >> 31) == 1
+ }
+
+ /// Whether this represent's a modifier.
+ #[inline]
+ pub fn is_mod(&self) -> bool {
+ matches!(
+ self.kind(),
+ BtfKind::Volatile | BtfKind::Const | BtfKind::Restrict | BtfKind::TypeTag
+ )
+ }
+
+ /// Whether this represents any kind of enum.
+ #[inline]
+ pub fn is_any_enum(&self) -> bool {
+ matches!(self.kind(), BtfKind::Enum | BtfKind::Enum64)
+ }
+
+ /// Whether this btf type is core compatible to `other`.
+ #[inline]
+ pub fn is_core_compat(&self, other: &Self) -> bool {
+ self.kind() == other.kind() || (self.is_any_enum() && other.is_any_enum())
+ }
+
+ /// Whether this type represents a composite type (struct/union).
+ #[inline]
+ pub fn is_composite(&self) -> bool {
+ matches!(self.kind(), BtfKind::Struct | BtfKind::Union)
+ }
+
+ /// The size of the described type.
+ ///
+ /// # Safety
+ ///
+ /// This function can only be called when the [`Self::kind`] returns one of:
+ /// - [`BtfKind::Int`],
+ /// - [`BtfKind::Float`],
+ /// - [`BtfKind::Enum`],
+ /// - [`BtfKind::Struct`],
+ /// - [`BtfKind::Union`],
+ /// - [`BtfKind::DataSec`],
+ /// - [`BtfKind::Enum64`],
+ #[inline]
+ unsafe fn size_unchecked(&self) -> u32 {
+ unsafe { self.ty.__bindgen_anon_1.size }
+ }
+
+ /// The [`TypeId`] of the referenced type.
+ ///
+ /// # Safety
+ /// This function can only be called when the [`Self::kind`] returns one of:
+ /// - [`BtfKind::Ptr`],
+ /// - [`BtfKind::Typedef`],
+ /// - [`BtfKind::Volatile`],
+ /// - [`BtfKind::Const`],
+ /// - [`BtfKind::Restrict`],
+ /// - [`BtfKind::Func`],
+ /// - [`BtfKind::FuncProto`],
+ /// - [`BtfKind::Var`],
+ /// - [`BtfKind::DeclTag`],
+ /// - [`BtfKind::TypeTag`],
+ #[inline]
+ unsafe fn referenced_type_id_unchecked(&self) -> TypeId {
+ unsafe { self.ty.__bindgen_anon_1.type_ }.into()
+ }
+
+ /// If this type implements [`ReferencesType`], returns the type it references.
+ pub fn next_type(&self) -> Option<Self> {
+ match self.kind() {
+ BtfKind::Ptr
+ | BtfKind::Typedef
+ | BtfKind::Volatile
+ | BtfKind::Const
+ | BtfKind::Restrict
+ | BtfKind::Func
+ | BtfKind::FuncProto
+ | BtfKind::Var
+ | BtfKind::DeclTag
+ | BtfKind::TypeTag => {
+ let tid = unsafe {
+ // SAFETY: we checked the kind
+ self.referenced_type_id_unchecked()
+ };
+ self.source.type_by_id(tid)
+ }
+
+ BtfKind::Void
+ | BtfKind::Int
+ | BtfKind::Array
+ | BtfKind::Struct
+ | BtfKind::Union
+ | BtfKind::Enum
+ | BtfKind::Fwd
+ | BtfKind::DataSec
+ | BtfKind::Float
+ | BtfKind::Enum64 => None,
+ }
+ }
+
+ /// Given a type, follows the refering type ids until it finds a type that isn't a modifier or
+ /// a [`BtfKind::Typedef`].
+ ///
+ /// See [is_mod](Self::is_mod).
+ pub fn skip_mods_and_typedefs(&self) -> Self {
+ let mut ty = *self;
+ loop {
+ if ty.is_mod() || ty.kind() == BtfKind::Typedef {
+ ty = ty.next_type().unwrap();
+ } else {
+ return ty;
+ }
+ }
+ }
+
+ /// Returns the alignment of this type, if this type points to some modifier or typedef, those
+ /// will be skipped until the underlying type (with an alignment) is found.
+ ///
+ /// See [skip_mods_and_typedefs](Self::skip_mods_and_typedefs).
+ pub fn alignment(&self) -> Result<NonZeroUsize> {
+ let skipped = self.skip_mods_and_typedefs();
+ match skipped.kind() {
+ BtfKind::Int => {
+ let ptr_size = skipped.source.ptr_size()?;
+ let int = types::Int::try_from(skipped).unwrap();
+ Ok(Ord::min(
+ ptr_size,
+ NonZeroUsize::new(((int.bits + 7) / 8).into()).unwrap(),
+ ))
+ }
+ BtfKind::Ptr => skipped.source.ptr_size(),
+ BtfKind::Array => types::Array::try_from(skipped)
+ .unwrap()
+ .contained_type()
+ .alignment(),
+ BtfKind::Struct | BtfKind::Union => {
+ let c = Composite::try_from(skipped).unwrap();
+ let mut align = NonZeroUsize::new(1usize).unwrap();
+ for m in c.iter() {
+ align = Ord::max(
+ align,
+ skipped
+ .source
+ .type_by_id::<Self>(m.ty)
+ .unwrap()
+ .alignment()?,
+ );
+ }
+
+ Ok(align)
+ }
+ BtfKind::Enum | BtfKind::Enum64 | BtfKind::Float => {
+ Ok(Ord::min(skipped.source.ptr_size()?, unsafe {
+ // SAFETY: We checked the type.
+ // Unwrap: Enums in C have always size >= 1
+ NonZeroUsize::new_unchecked(skipped.size_unchecked() as usize)
+ }))
+ }
+ BtfKind::Var => {
+ let var = types::Var::try_from(skipped).unwrap();
+ var.source
+ .type_by_id::<Self>(var.referenced_type_id())
+ .unwrap()
+ .alignment()
+ }
+ BtfKind::DataSec => unsafe {
+ // SAFETY: We checked the type.
+ NonZeroUsize::new(skipped.size_unchecked() as usize)
+ }
+ .ok_or_else(|| Error::with_invalid_data("DataSec with size of 0")),
+ BtfKind::Void
+ | BtfKind::Volatile
+ | BtfKind::Const
+ | BtfKind::Restrict
+ | BtfKind::Typedef
+ | BtfKind::FuncProto
+ | BtfKind::Fwd
+ | BtfKind::Func
+ | BtfKind::DeclTag
+ | BtfKind::TypeTag => Err(Error::with_invalid_data(format!(
+ "Cannot get alignment of type with kind {:?}. TypeId is {}",
+ skipped.kind(),
+ skipped.type_id(),
+ ))),
+ }
+ }
+}
+
+/// Some btf types have a size field, describing their size.
+///
+/// # Safety
+///
+/// It's only safe to implement this for types where the underlying btf_type has a .size set.
+///
+/// See the [docs](https://www.kernel.org/doc/html/latest/bpf/btf.html) for a reference of which
+/// [`BtfKind`] can implement this trait.
+pub unsafe trait HasSize<'btf>: Deref<Target = BtfType<'btf>> + sealed::Sealed {
+ /// The size of the described type.
+ #[inline]
+ fn size(&self) -> usize {
+ unsafe { self.size_unchecked() as usize }
+ }
+}
+
+/// Some btf types refer to other types by their type id.
+///
+/// # Safety
+///
+/// It's only safe to implement this for types where the underlying btf_type has a .type set.
+///
+/// See the [docs](https://www.kernel.org/doc/html/latest/bpf/btf.html) for a reference of which
+/// [`BtfKind`] can implement this trait.
+pub unsafe trait ReferencesType<'btf>:
+ Deref<Target = BtfType<'btf>> + sealed::Sealed
+{
+ /// The referenced type's id.
+ #[inline]
+ fn referenced_type_id(&self) -> TypeId {
+ unsafe { self.referenced_type_id_unchecked() }
+ }
+
+ /// The referenced type.
+ #[inline]
+ fn referenced_type(&self) -> BtfType<'btf> {
+ self.source.type_by_id(self.referenced_type_id()).unwrap()
+ }
+}
+
+mod sealed {
+ pub trait Sealed {}
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use std::mem::discriminant;
+
+ #[test]
+ fn from_vmlinux() {
+ assert!(Btf::from_vmlinux().is_ok());
+ }
+
+ #[test]
+ fn btf_kind() {
+ use BtfKind::*;
+
+ for t in [
+ Void, Int, Ptr, Array, Struct, Union, Enum, Fwd, Typedef, Volatile, Const, Restrict,
+ Func, FuncProto, Var, DataSec, Float, DeclTag, TypeTag, Enum64,
+ ] {
+ // check if discriminants match after a roundtrip conversion
+ assert_eq!(
+ discriminant(&t),
+ discriminant(&BtfKind::try_from(t as u32).unwrap())
+ );
+ }
+ }
+}
diff --git a/src/btf/types.rs b/src/btf/types.rs
new file mode 100644
index 0000000..f931c3e
--- /dev/null
+++ b/src/btf/types.rs
@@ -0,0 +1,1187 @@
+//! Wrappers representing concrete btf types.
+
+use std::ffi::OsStr;
+use std::fmt;
+use std::fmt::Display;
+use std::ops::Deref;
+
+use super::BtfKind;
+use super::BtfType;
+use super::HasSize;
+use super::ReferencesType;
+use super::TypeId;
+
+// Generate a btf type that doesn't have any fields, i.e. there is no data after the BtfType
+// pointer.
+macro_rules! gen_fieldless_concrete_type {
+ (
+ $(#[$docs:meta])*
+ $name:ident $(with $trait:ident)?
+ ) => {
+ $(#[$docs])*
+ #[derive(Clone, Copy, Debug)]
+ pub struct $name<'btf> {
+ source: BtfType<'btf>,
+ }
+
+ impl<'btf> TryFrom<BtfType<'btf>> for $name<'btf> {
+ type Error = BtfType<'btf>;
+
+ fn try_from(t: BtfType<'btf>) -> ::core::result::Result<Self, Self::Error> {
+ if t.kind() == BtfKind::$name {
+ Ok($name { source: t })
+ } else {
+ Err(t)
+ }
+ }
+ }
+
+ impl<'btf> ::std::ops::Deref for $name<'btf> {
+ type Target = BtfType<'btf>;
+ fn deref(&self) -> &Self::Target {
+ &self.source
+ }
+ }
+
+ $(
+ impl super::sealed::Sealed for $name<'_> {}
+ unsafe impl<'btf> $trait<'btf> for $name<'btf> {}
+ )*
+ };
+}
+
+// Generate a btf type that has at least one field, and as such, there is data following the
+// btf_type pointer.
+macro_rules! gen_concrete_type {
+ (
+ $(#[$docs:meta])*
+ $libbpf_ty:ident as $name:ident $(with $trait:ident)?
+ ) => {
+ $(#[$docs])*
+ #[derive(Clone, Copy, Debug)]
+ pub struct $name<'btf> {
+ source: BtfType<'btf>,
+ ptr: &'btf libbpf_sys::$libbpf_ty,
+ }
+
+ impl<'btf> TryFrom<BtfType<'btf>> for $name<'btf> {
+ type Error = BtfType<'btf>;
+
+ fn try_from(t: BtfType<'btf>) -> ::core::result::Result<Self, Self::Error> {
+ if t.kind() == BtfKind::$name {
+ let ptr = unsafe {
+ // SAFETY:
+ //
+ // It's in bounds to access the memory following this btf_type
+ // because we've checked the type
+ (t.ty as *const libbpf_sys::btf_type).offset(1)
+ };
+ let ptr = ptr.cast::<libbpf_sys::$libbpf_ty>();
+ Ok($name {
+ source: t,
+ // SAFETY:
+ //
+ // This pointer is aligned.
+ // all fields of all struct have size and
+ // alignment of u32, if t.ty was aligned, then this must be as well
+ //
+ // It's initialized
+ // libbpf guarantees this since we've checked the type
+ //
+ // The lifetime will match the lifetime of the original t.ty reference.
+ ptr: unsafe { &*ptr },
+ })
+ } else {
+ Err(t)
+ }
+ }
+ }
+
+ impl<'btf> ::std::ops::Deref for $name<'btf> {
+ type Target = BtfType<'btf>;
+ fn deref(&self) -> &Self::Target {
+ &self.source
+ }
+ }
+
+ $(
+ impl super::sealed::Sealed for $name<'_> {}
+ unsafe impl<'btf> $trait<'btf> for $name<'btf> {}
+ )*
+ };
+}
+
+macro_rules! gen_collection_members_concrete_type {
+ (
+ $libbpf_ty:ident as $name:ident $(with $trait:ident)?;
+
+ $(#[$docs:meta])*
+ struct $member_name:ident $(<$lt:lifetime>)? {
+ $(
+ $(#[$field_docs:meta])*
+ pub $field:ident : $type:ty
+ ),* $(,)?
+ }
+
+ |$btf:ident, $member:ident $(, $kind_flag:ident)?| $convert:expr
+ ) => {
+ impl<'btf> ::std::ops::Deref for $name<'btf> {
+ type Target = BtfType<'btf>;
+ fn deref(&self) -> &Self::Target {
+ &self.source
+ }
+ }
+
+ impl<'btf> $name<'btf> {
+ /// Whether this type has no members
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.members.is_empty()
+ }
+
+ #[doc = ::core::concat!("How many members this [`", ::core::stringify!($name), "`] has")]
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.members.len()
+ }
+
+ #[doc = ::core::concat!("Get a [`", ::core::stringify!($member_name), "`] at a given index")]
+ /// # Errors
+ ///
+ /// This function returns [`None`] when the index is out of bounds.
+ pub fn get(&self, index: usize) -> Option<$member_name$(<$lt>)*> {
+ self.members.get(index).map(|m| self.c_to_rust_member(m))
+ }
+
+ #[doc = ::core::concat!("Returns an iterator over the [`", ::core::stringify!($member_name), "`]'s of the [`", ::core::stringify!($name), "`]")]
+ pub fn iter(&'btf self) -> impl ExactSizeIterator<Item = $member_name$(<$lt>)*> + 'btf {
+ self.members.iter().map(|m| self.c_to_rust_member(m))
+ }
+
+ fn c_to_rust_member(&self, member: &libbpf_sys::$libbpf_ty) -> $member_name$(<$lt>)* {
+ let $btf = self.source.source;
+ let $member = member;
+ $(let $kind_flag = self.source.kind_flag();)*
+ $convert
+ }
+ }
+
+ $(#[$docs])*
+ #[derive(Clone, Copy, Debug)]
+ pub struct $member_name $(<$lt>)? {
+ $(
+ $(#[$field_docs])*
+ pub $field: $type
+ ),*
+ }
+
+ $(
+ impl $crate::btf::sealed::Sealed for $name<'_> {}
+ unsafe impl<'btf> $trait<'btf> for $name<'btf> {}
+ )*
+ };
+}
+
+macro_rules! gen_collection_concrete_type {
+ (
+ $(#[$docs:meta])*
+ $libbpf_ty:ident as $name:ident $(with $trait:ident)?;
+
+ $($rest:tt)+
+ ) => {
+ $(#[$docs])*
+ #[derive(Clone, Copy, Debug)]
+ pub struct $name<'btf> {
+ source: BtfType<'btf>,
+ members: &'btf [libbpf_sys::$libbpf_ty],
+ }
+
+ impl<'btf> TryFrom<BtfType<'btf>> for $name<'btf> {
+ type Error = BtfType<'btf>;
+
+ fn try_from(t: BtfType<'btf>) -> ::core::result::Result<Self, Self::Error> {
+ if t.kind() == BtfKind::$name {
+ let base_ptr = unsafe {
+ // SAFETY:
+ //
+ // It's in bounds to access the memory following this btf_type
+ // because we've checked the type
+ (t.ty as *const libbpf_sys::btf_type).offset(1)
+ };
+ let members = unsafe {
+ // SAFETY:
+ //
+ // This pointer is aligned.
+ // all fields of all struct have size and
+ // alignment of u32, if t.ty was aligned, then this must be as well
+ //
+ // It's initialized
+ // libbpf guarantees this since we've checked the type
+ //
+ // The lifetime will match the lifetime of the original t.ty reference.
+ //
+ // The docs specify the length of the array is stored in vlen.
+ std::slice::from_raw_parts(base_ptr.cast(), t.vlen() as usize)
+ };
+ Ok(Self { source: t, members })
+ } else {
+ Err(t)
+ }
+ }
+ }
+
+ gen_collection_members_concrete_type!{
+ $libbpf_ty as $name $(with $trait)?;
+ $($rest)*
+ }
+ };
+}
+
+/// The attributes of a member.
+#[derive(Clone, Copy, Debug)]
+pub enum MemberAttr {
+ /// Member is a normal field.
+ Normal {
+ /// The offset of this member in the struct/union.
+ offset: u32,
+ },
+ /// Member is a bitfield.
+ BitField {
+ /// The size of the bitfield.
+ size: u8,
+ /// The offset of the bitfield.
+ offset: u32,
+ },
+}
+
+impl MemberAttr {
+ #[inline]
+ fn new(kflag: bool, offset: u32) -> Self {
+ if kflag {
+ let size = (offset >> 24) as u8;
+ if size != 0 {
+ Self::BitField {
+ size,
+ offset: offset & 0x00_ff_ff_ff,
+ }
+ } else {
+ Self::Normal { offset }
+ }
+ } else {
+ Self::Normal { offset }
+ }
+ }
+}
+
+/// The kind of linkage a variable of function can have.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+#[repr(u32)]
+pub enum Linkage {
+ /// Static linkage
+ Static = 0,
+ /// Global linkage
+ Global,
+ /// External linkage
+ Extern,
+ /// Unknown
+ Unknown,
+}
+
+impl From<u32> for Linkage {
+ fn from(value: u32) -> Self {
+ use Linkage::*;
+
+ match value {
+ x if x == Static as u32 => Static,
+ x if x == Global as u32 => Global,
+ x if x == Extern as u32 => Extern,
+ _ => Unknown,
+ }
+ }
+}
+
+impl From<Linkage> for u32 {
+ fn from(value: Linkage) -> Self {
+ value as u32
+ }
+}
+
+impl Display for Linkage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "{}",
+ match self {
+ Linkage::Static => "static",
+ Linkage::Global => "global",
+ Linkage::Extern => "extern",
+ Linkage::Unknown => "(unknown)",
+ }
+ )
+ }
+}
+
+// Void
+gen_fieldless_concrete_type! {
+ /// The representation of the c_void type.
+ Void
+}
+
+// Int
+
+/// An integer.
+///
+/// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int)
+#[derive(Clone, Copy, Debug)]
+pub struct Int<'btf> {
+ source: BtfType<'btf>,
+ /// The encoding of the number.
+ pub encoding: IntEncoding,
+ /// The offset in bits where the value of this integer starts. Mostly usefull for bitfields in
+ /// structs.
+ pub offset: u8,
+ /// The number of bits in the int. (For example, an u8 has 8 bits).
+ pub bits: u8,
+}
+
+/// The kinds of ways a btf [Int] can be encoded.
+#[derive(Clone, Copy, Debug)]
+pub enum IntEncoding {
+ /// No encoding.
+ None,
+ /// Signed.
+ Signed,
+ /// It's a c_char.
+ Char,
+ /// It's a bool.
+ Bool,
+}
+
+impl<'btf> TryFrom<BtfType<'btf>> for Int<'btf> {
+ type Error = BtfType<'btf>;
+
+ fn try_from(t: BtfType<'btf>) -> Result<Self, Self::Error> {
+ if t.kind() == BtfKind::Int {
+ let int = {
+ let base_ptr = t.ty as *const libbpf_sys::btf_type;
+ let u32_ptr = unsafe {
+ // SAFETY:
+ //
+ // It's in bounds to access the memory following this btf_type
+ // because we've checked the type
+ base_ptr.offset(1).cast::<u32>()
+ };
+ unsafe {
+ // SAFETY:
+ //
+ // This pointer is aligned.
+ // all fields of all struct have size and
+ // alignment of u32, if t.ty was aligned, then this must be as well
+ //
+ // It's initialized
+ // libbpf guarantees this since we've checked the type
+ //
+ // The lifetime will match the lifetime of the original t.ty reference.
+ *u32_ptr
+ }
+ };
+ let encoding = match (int & 0x0f_00_00_00) >> 24 {
+ 0b1 => IntEncoding::Signed,
+ 0b10 => IntEncoding::Char,
+ 0b100 => IntEncoding::Bool,
+ _ => IntEncoding::None,
+ };
+ Ok(Self {
+ source: t,
+ encoding,
+ offset: ((int & 0x00_ff_00_00) >> 24) as u8,
+ bits: (int & 0x00_00_00_ff) as u8,
+ })
+ } else {
+ Err(t)
+ }
+ }
+}
+
+impl<'btf> Deref for Int<'btf> {
+ type Target = BtfType<'btf>;
+ fn deref(&self) -> &Self::Target {
+ &self.source
+ }
+}
+
+// SAFETY: Int has the .size field set.
+impl super::sealed::Sealed for Int<'_> {}
+unsafe impl<'btf> HasSize<'btf> for Int<'btf> {}
+
+// Ptr
+gen_fieldless_concrete_type! {
+ /// A pointer.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-ptr)
+ Ptr with ReferencesType
+}
+
+// Array
+gen_concrete_type! {
+ /// An array.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-array)
+ btf_array as Array
+}
+
+impl<'s> Array<'s> {
+ /// The type id of the stored type.
+ #[inline]
+ pub fn ty(&self) -> TypeId {
+ self.ptr.type_.into()
+ }
+
+ /// The type of index used.
+ #[inline]
+ pub fn index_ty(&self) -> TypeId {
+ self.ptr.index_type.into()
+ }
+
+ /// The capacity of the array.
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.ptr.nelems as usize
+ }
+
+ /// The type contained in this array.
+ #[inline]
+ pub fn contained_type(&self) -> BtfType<'s> {
+ self.source
+ .source
+ .type_by_id(self.ty())
+ .expect("arrays should always reference an existing type")
+ }
+}
+
+// Struct
+gen_collection_concrete_type! {
+ /// A struct.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-struct)
+ btf_member as Struct with HasSize;
+
+ /// A member of a [Struct]
+ struct StructMember<'btf> {
+ /// The member's name
+ pub name: Option<&'btf OsStr>,
+ /// The member's type
+ pub ty: TypeId,
+ /// The attributes of this member.
+ pub attr: MemberAttr,
+ }
+
+ |btf, member, kflag| StructMember {
+ name: btf.name_at(member.name_off),
+ ty: member.type_.into(),
+ attr: MemberAttr::new(kflag, member.offset),
+ }
+}
+
+// Union
+gen_collection_concrete_type! {
+ /// A Union.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-union)
+ btf_member as Union with HasSize;
+
+ /// A member of an [Union]
+ struct UnionMember<'btf> {
+ /// The member's name
+ pub name: Option<&'btf OsStr>,
+ /// The member's type
+ pub ty: TypeId,
+ /// The attributes of this member.
+ pub attr: MemberAttr,
+ }
+
+ |btf, member, kflag| UnionMember {
+ name: btf.name_at(member.name_off),
+ ty: member.type_.into(),
+ attr: MemberAttr::new(kflag, member.offset),
+ }
+}
+
+/// A Composite type, which can be one of a [`Struct`] or a [`Union`].
+///
+/// Sometimes it's not useful to distinguish them, in that case, one can use this
+/// type to inspect any of them.
+#[derive(Clone, Copy, Debug)]
+pub struct Composite<'btf> {
+ source: BtfType<'btf>,
+ /// Whether this type is a struct.
+ pub is_struct: bool,
+ members: &'btf [libbpf_sys::btf_member],
+}
+
+impl<'btf> From<Struct<'btf>> for Composite<'btf> {
+ fn from(s: Struct<'btf>) -> Self {
+ Self {
+ source: s.source,
+ is_struct: true,
+ members: s.members,
+ }
+ }
+}
+
+impl<'btf> From<Union<'btf>> for Composite<'btf> {
+ fn from(s: Union<'btf>) -> Self {
+ Self {
+ source: s.source,
+ is_struct: false,
+ members: s.members,
+ }
+ }
+}
+
+impl<'btf> TryFrom<BtfType<'btf>> for Composite<'btf> {
+ type Error = BtfType<'btf>;
+
+ fn try_from(t: BtfType<'btf>) -> Result<Self, Self::Error> {
+ Struct::try_from(t)
+ .map(Self::from)
+ .or_else(|_| Union::try_from(t).map(Self::from))
+ }
+}
+
+impl<'btf> TryFrom<Composite<'btf>> for Struct<'btf> {
+ type Error = Composite<'btf>;
+
+ fn try_from(value: Composite<'btf>) -> Result<Self, Self::Error> {
+ if value.is_struct {
+ Ok(Self {
+ source: value.source,
+ members: value.members,
+ })
+ } else {
+ Err(value)
+ }
+ }
+}
+
+impl<'btf> TryFrom<Composite<'btf>> for Union<'btf> {
+ type Error = Composite<'btf>;
+
+ fn try_from(value: Composite<'btf>) -> Result<Self, Self::Error> {
+ if !value.is_struct {
+ Ok(Self {
+ source: value.source,
+ members: value.members,
+ })
+ } else {
+ Err(value)
+ }
+ }
+}
+
+// Composite
+gen_collection_members_concrete_type! {
+ btf_member as Composite with HasSize;
+
+ /// A member of a [Struct]
+ struct CompositeMember<'btf> {
+ /// The member's name
+ pub name: Option<&'btf OsStr>,
+ /// The member's type
+ pub ty: TypeId,
+ /// If this member is a bifield, these are it's attributes.
+ pub attr: MemberAttr
+ }
+
+ |btf, member, kflag| CompositeMember {
+ name: btf.name_at(member.name_off),
+ ty: member.type_.into(),
+ attr: MemberAttr::new(kflag, member.offset),
+ }
+}
+
+// Enum
+gen_collection_concrete_type! {
+ /// An Enum of at most 32 bits.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-enum)
+ btf_enum as Enum with HasSize;
+
+ /// A member of an [Enum]
+ struct EnumMember<'btf> {
+ /// The name of this enum variant.
+ pub name: Option<&'btf OsStr>,
+ /// The numeric value of this enum variant.
+ pub value: i32,
+ }
+
+ |btf, member| EnumMember {
+ name: btf.name_at(member.name_off),
+ value: member.val,
+ }
+}
+
+// Fwd
+gen_fieldless_concrete_type! {
+ /// A forward declared C type.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-fwd)
+ Fwd
+}
+
+impl Fwd<'_> {
+ /// The kind of C type that is forwardly declared.
+ pub fn kind(&self) -> FwdKind {
+ if self.source.kind_flag() {
+ FwdKind::Union
+ } else {
+ FwdKind::Struct
+ }
+ }
+}
+
+/// The kinds of types that can be forward declared.
+#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
+pub enum FwdKind {
+ /// A struct.
+ Struct,
+ /// A union.
+ Union,
+}
+
+// Typedef
+gen_fieldless_concrete_type! {
+ /// A C typedef.
+ ///
+ /// References the original type.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-typedef)
+ Typedef with ReferencesType
+}
+
+// Volatile
+gen_fieldless_concrete_type! {
+ /// The volatile modifier.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-volatile)
+ Volatile with ReferencesType
+}
+
+// Const
+gen_fieldless_concrete_type! {
+ /// The const modifier.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-const)
+ Const with ReferencesType
+}
+
+// Restrict
+gen_fieldless_concrete_type! {
+ /// The restrict modifier.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-restrict)
+ Restrict with ReferencesType
+}
+
+// Func
+gen_fieldless_concrete_type! {
+ /// A function.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-func)
+ Func with ReferencesType
+}
+
+impl Func<'_> {
+ /// This function's linkage.
+ #[inline]
+ pub fn linkage(&self) -> Linkage {
+ self.source.vlen().into()
+ }
+}
+
+// FuncProto
+gen_collection_concrete_type! {
+ /// A function prototype.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-func-proto)
+ btf_param as FuncProto with ReferencesType;
+
+ /// A parameter of a [FuncProto].
+ struct FuncProtoParam<'btf> {
+ /// The parameter's name
+ pub name: Option<&'btf OsStr>,
+ /// The parameter's type
+ pub ty: TypeId,
+ }
+
+ |btf, member| FuncProtoParam {
+ name: btf.name_at(member.name_off),
+ ty: member.type_.into()
+ }
+}
+
+// Var
+gen_concrete_type! {
+ /// A global variable.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-var)
+ btf_var as Var with ReferencesType
+}
+
+impl Var<'_> {
+ /// The kind of linkage this variable has.
+ #[inline]
+ pub fn linkage(&self) -> Linkage {
+ self.ptr.linkage.into()
+ }
+}
+
+// DataSec
+gen_collection_concrete_type! {
+ /// An ELF's data section, such as `.data`, `.bss` or `.rodata`.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-datasec)
+ btf_var_secinfo as DataSec with HasSize;
+
+ /// Describes the btf var in a section.
+ ///
+ /// See [`DataSec`].
+ struct VarSecInfo {
+ /// The type id of the var
+ pub ty: TypeId,
+ /// The offset in the section
+ pub offset: u32,
+ /// The size of the type.
+ pub size: usize,
+ }
+
+ |_btf, member| VarSecInfo {
+ ty: member.type_.into(),
+ offset: member.offset,
+ size: member.size as usize
+ }
+}
+
+// Float
+gen_fieldless_concrete_type! {
+ /// A floating point number.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-float)
+ Float with HasSize
+}
+
+// DeclTag
+gen_concrete_type! {
+ /// A declaration tag.
+ ///
+ /// A custom tag the programmer can attach to a symbol.
+ ///
+ /// See the [clang docs](https://clang.llvm.org/docs/AttributeReference.html#btf-decl-tag) on
+ /// it.
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-decl-tag)
+ btf_decl_tag as DeclTag with ReferencesType
+}
+
+impl DeclTag<'_> {
+ /// The component index is present only when the tag points to a struct/union member or a
+ /// function argument.
+ /// And component_idx indicates which member or argument, this decl tag refers to.
+ #[inline]
+ pub fn component_index(&self) -> Option<u32> {
+ self.ptr.component_idx.try_into().ok()
+ }
+}
+
+// TypeTag
+gen_fieldless_concrete_type! {
+ /// A type tag.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-type-tag)
+ TypeTag with ReferencesType
+}
+
+// Enum64
+gen_collection_concrete_type! {
+ /// An Enum of 64 bits.
+ ///
+ /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-enum64)
+ btf_enum64 as Enum64 with HasSize;
+
+ /// A member of an [Enum64].
+ struct Enum64Member<'btf> {
+ /// The name of this enum variant.
+ pub name: Option<&'btf OsStr>,
+ /// The numeric value of this enum variant.
+ pub value: u64,
+ }
+
+ |btf, member| Enum64Member {
+ name: btf.name_at(member.name_off),
+ value: {
+ let hi: u64 = member.val_hi32.into();
+ let lo: u64 = member.val_lo32.into();
+ hi << 32 | lo
+ },
+ }
+}
+
+/// A macro that allows matching on the type of a [`BtfType`] as if it was an enum.
+///
+/// Each pattern can be of two types.
+///
+/// ```no_run
+/// use libbpf_rs::btf::BtfType;
+/// use libbpf_rs::btf_type_match;
+///
+/// # fn do_something_with_an_int(i: libbpf_rs::btf::types::Int) -> &'static str { "" }
+/// let ty: BtfType;
+/// # ty = todo!();
+/// btf_type_match!(match ty {
+/// BtfKind::Int(i) => do_something_with_an_int(i),
+/// BtfKind::Struct => "it's a struct",
+/// BtfKind::Union => {
+/// "it's a union"
+/// },
+/// _ => "default",
+/// });
+/// ```
+///
+/// Variable Binding.
+///
+/// ```compile_fail
+/// BtfKind::Int(i) => {
+/// // we can use i here and it will be an `Int`
+/// }
+/// ```
+///
+/// NonBinding.
+///
+/// ```compile_fail
+/// BtfKind::Int => {
+/// // we don't have access to the variable, but we know the scrutinee is an Int
+/// }
+/// ```
+///
+/// Multiple Variants
+/// ```compile_fail
+/// BtfKind::Struct | BtfKind::Union => {
+/// // we don't have access to the variable,
+/// // but we know the scrutinee is either a Struct or a Union
+/// }
+/// ```
+///
+/// Special case for [`Struct`] and [`Union`]: [`Composite`]
+/// ```compile_fail
+/// BtfKind::Composite(c) => {
+/// // we can use `c` as an instance of `Composite`.
+/// // this branch will match if the type is either a Struct or a Union.
+/// }
+/// ```
+// $(BtfKind::$name:ident $(($var:ident))? => $action:expr $(,)?)+
+#[macro_export]
+macro_rules! btf_type_match {
+ // base rule
+ (
+ match $ty:ident {
+ $($pattern:tt)+
+ }
+ ) => {{
+ let ty: $crate::btf::BtfType<'_> = $ty;
+ $crate::__btf_type_match!(match ty.kind() { } $($pattern)*)
+ }};
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __btf_type_match {
+ /*
+ * Composite special case
+ *
+ * This is similar to simple-match but it's hardcoded for composite which matches both structs
+ * and unions.
+ */
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ BtfKind::Composite $( ($var:ident) )? => $action:expr,
+ $($rest:tt)*
+ ) => {
+ $crate::__btf_type_match!(match $ty.kind() { $($p => $a,)* }
+ BtfKind::Composite $( ($var) )* => { $action }
+ $($rest)*
+ )
+ };
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ BtfKind::Composite $(($var:ident))? => $action:block
+ $($rest:tt)*
+ ) => {
+ $crate::__btf_type_match!(match $ty.kind() {
+ $($p => $a,)*
+ $crate::btf::BtfKind::Struct | $crate::btf::BtfKind::Union => {
+ $(let $var = $crate::btf::types::Composite::try_from($ty).unwrap();)*
+ $action
+ }
+ }
+ $($rest)*
+ )
+ };
+ // simple-match: match on simple patterns that use an expression followed by a comma
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ BtfKind::$name:ident $(($var:ident))? => $action:expr,
+ $($rest:tt)*
+ ) => {
+ $crate::__btf_type_match!(
+ match $ty.kind() { $($p => $a),* }
+ BtfKind::$name $(($var))? => { $action }
+ $($rest)*
+ )
+ };
+ // simple-match: match on simple patterns that use a block without a comma
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ BtfKind::$name:ident $(($var:ident))? => $action:block
+ $($rest:tt)*
+ ) => {
+ $crate::__btf_type_match!(match $ty.kind() {
+ $($p => $a,)*
+ $crate::btf::BtfKind::$name => {
+ $(let $var = $crate::btf::types::$name::try_from($ty).unwrap();)*
+ $action
+ }
+ }
+ $($rest)*
+ )
+ };
+ // or-pattern: match on one or more variants without capturing a variable and using an
+ // expression followed by a comma.
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ $(BtfKind::$name:ident)|+ => $action:expr,
+ $($rest:tt)*
+ ) => {
+ $crate::__btf_type_match!(
+ match $ty.kind() { $($p => $a),* }
+ $(BtfKind::$name)|* => { $action }
+ $($rest)*
+ )
+ };
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ $(BtfKind::$name:ident)|+ => $action:block
+ $($rest:tt)*
+ ) => {
+ $crate::__btf_type_match!(match $ty.kind() {
+ $($p => $a,)*
+ $($crate::btf::BtfKind::$name)|* => {
+ $action
+ }
+ }
+ $($rest)*
+ )
+ };
+ // default match case
+ //
+ // we only need the expression case here because this case is not followed by a $rest:tt like
+ // the others, which let's us use the $(,)? pattern.
+ (
+ match $ty:ident.kind() { $($p:pat => $a:expr),* }
+ _ => $action:expr $(,)?
+ ) => {
+ $crate::__btf_type_match!(match $ty.kind() {
+ $($p => $a,)*
+ _ => { $action }
+ }
+
+ )
+ };
+ // stop case, where the code is actually generated
+ (match $ty:ident.kind() { $($p:pat => $a:expr),* } ) => {
+ match $ty.kind() {
+ $($p => $a),*
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ // creates a dummy btftype, not it's not safe to use this type, but it is safe to match on it,
+ // which is all we need for these tests.
+ macro_rules! dummy_type {
+ ($ty:ident) => {
+ let btf = $crate::Btf {
+ ptr: std::ptr::NonNull::dangling(),
+ drop_policy: $crate::btf::DropPolicy::Nothing,
+ _marker: std::marker::PhantomData,
+ };
+ let $ty = BtfType {
+ type_id: $crate::btf::TypeId::from(1),
+ name: None,
+ source: &btf,
+ ty: &libbpf_sys::btf_type::default(),
+ };
+ };
+ }
+
+ fn foo(_: super::Int<'_>) -> &'static str {
+ "int"
+ }
+
+ #[test]
+ fn full_switch_case() {
+ dummy_type!(ty);
+ btf_type_match!(match ty {
+ BtfKind::Int(i) => foo(i),
+ BtfKind::Struct => "it's a struct",
+ BtfKind::Void => "",
+ BtfKind::Ptr => "",
+ BtfKind::Array => "",
+ BtfKind::Union => "",
+ BtfKind::Enum => "",
+ BtfKind::Fwd => "",
+ BtfKind::Typedef => "",
+ BtfKind::Volatile => "",
+ BtfKind::Const => "",
+ BtfKind::Restrict => "",
+ BtfKind::Func => "",
+ BtfKind::FuncProto => "",
+ BtfKind::Var => "",
+ BtfKind::DataSec => "",
+ BtfKind::Float => "",
+ BtfKind::DeclTag => "",
+ BtfKind::TypeTag => "",
+ BtfKind::Enum64 => "",
+ });
+ }
+
+ #[test]
+ fn partial_match() {
+ dummy_type!(ty);
+ btf_type_match!(match ty {
+ BtfKind::Int => "int",
+ _ => "default",
+ });
+ }
+
+ #[test]
+ fn or_pattern_match() {
+ dummy_type!(ty);
+ // we ask rustfmt to not format this block so that we can keep the trailing `,` in the
+ // const | restrict branch.
+ #[rustfmt::skip]
+ btf_type_match!(match ty {
+ BtfKind::Int => "int",
+ BtfKind::Struct | BtfKind::Union => "composite",
+ BtfKind::Typedef | BtfKind::Volatile => {
+ "qualifier"
+ }
+ BtfKind::Const | BtfKind::Restrict => {
+ "const or restrict"
+ },
+ _ => "default",
+ });
+ }
+
+ #[test]
+ fn match_arm_with_brackets() {
+ dummy_type!(ty);
+ // we ask rustfmt to not format this block so that we can keep the trailing `,` in the int
+ // branch.
+ #[rustfmt::skip]
+ btf_type_match!(match ty {
+ BtfKind::Void => {
+ "void"
+ }
+ BtfKind::Int => {
+ "int"
+ },
+ BtfKind::Struct => "struct",
+ _ => "default",
+ });
+ }
+
+ #[test]
+ fn match_on_composite() {
+ dummy_type!(ty);
+ btf_type_match!(match ty {
+ BtfKind::Composite(c) => c.is_struct,
+ _ => false,
+ });
+ btf_type_match!(match ty {
+ BtfKind::Composite(c) => {
+ c.is_struct
+ }
+ _ => false,
+ });
+ // we ask rustfmt to not format this block so that we can keep the trailing `,` in the
+ // composite branch.
+ #[rustfmt::skip]
+ btf_type_match!(match ty {
+ BtfKind::Composite(c) => {
+ c.is_struct
+ },
+ _ => false,
+ });
+ }
+
+ #[test]
+ fn match_arm_with_multiple_statements() {
+ dummy_type!(ty);
+
+ btf_type_match!(match ty {
+ BtfKind::Int(i) => {
+ let _ = i;
+ "int"
+ }
+ _ => {
+ let _ = 1;
+ "default"
+ }
+ });
+ }
+
+ #[test]
+ fn non_expression_guards() {
+ dummy_type!(ty);
+
+ btf_type_match!(match ty {
+ BtfKind::Int => {
+ let _ = 1;
+ "int"
+ }
+ BtfKind::Typedef | BtfKind::Const => {
+ let _ = 1;
+ "qualifier"
+ }
+ _ => {
+ let _ = 1;
+ "default"
+ }
+ });
+
+ btf_type_match!(match ty {
+ BtfKind::Int => {
+ let _ = 1;
+ }
+ BtfKind::Typedef | BtfKind::Const => {
+ let _ = 1;
+ }
+ _ => {
+ let _ = 1;
+ }
+ });
+ }
+
+ #[test]
+ fn linkage_type() {
+ use std::mem::discriminant;
+ use Linkage::*;
+
+ for t in [Static, Global, Extern, Unknown] {
+ // check if discriminants match after a roundtrip conversion
+ assert_eq!(discriminant(&t), discriminant(&Linkage::from(t as u32)));
+ }
+ }
+}
diff --git a/src/error.rs b/src/error.rs
new file mode 100644
index 0000000..d9d89d0
--- /dev/null
+++ b/src/error.rs
@@ -0,0 +1,613 @@
+use std::borrow::Borrow;
+use std::borrow::Cow;
+use std::error;
+use std::error::Error as _;
+use std::fmt::Debug;
+use std::fmt::Display;
+use std::fmt::Formatter;
+use std::fmt::Result as FmtResult;
+use std::io;
+use std::mem::transmute;
+use std::ops::Deref;
+use std::result;
+
+/// A result type using our [`Error`] by default.
+pub type Result<T, E = Error> = result::Result<T, E>;
+
+#[allow(clippy::wildcard_imports)]
+mod private {
+ use super::*;
+
+ pub trait Sealed {}
+
+ impl<T> Sealed for Option<T> {}
+ impl<T, E> Sealed for Result<T, E> {}
+ impl Sealed for &'static str {}
+ impl Sealed for String {}
+ impl Sealed for Error {}
+
+ impl Sealed for io::Error {}
+}
+
+/// A `str` replacement whose owned representation is a `Box<str>` and
+/// not a `String`.
+#[derive(Debug)]
+#[repr(transparent)]
+#[doc(hidden)]
+pub struct Str(str);
+
+impl ToOwned for Str {
+ type Owned = Box<str>;
+
+ #[inline]
+ fn to_owned(&self) -> Self::Owned {
+ self.0.to_string().into_boxed_str()
+ }
+}
+
+impl Borrow<Str> for Box<str> {
+ #[inline]
+ fn borrow(&self) -> &Str {
+ // SAFETY: `Str` is `repr(transparent)` and so `&str` and `&Str`
+ // can trivially be converted into each other.
+ unsafe { transmute::<&str, &Str>(self.deref()) }
+ }
+}
+
+impl Deref for Str {
+ type Target = str;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+// For convenient use in `format!`, for example.
+impl Display for Str {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ Display::fmt(&self.0, f)
+ }
+}
+
+/// A helper trait to abstracting over various string types, allowing
+/// for conversion into a `Cow<'static, Str>`. This is the `Cow` enabled
+/// equivalent of `ToString`.
+pub trait IntoCowStr: private::Sealed {
+ fn into_cow_str(self) -> Cow<'static, Str>;
+}
+
+impl IntoCowStr for &'static str {
+ fn into_cow_str(self) -> Cow<'static, Str> {
+ // SAFETY: `Str` is `repr(transparent)` and so `&str` and `&Str`
+ // can trivially be converted into each other.
+ let other = unsafe { transmute::<&str, &Str>(self) };
+ Cow::Borrowed(other)
+ }
+}
+
+impl IntoCowStr for String {
+ fn into_cow_str(self) -> Cow<'static, Str> {
+ Cow::Owned(self.into_boxed_str())
+ }
+}
+
+// TODO: We may want to support optionally storing a backtrace in
+// terminal variants.
+enum ErrorImpl {
+ Io(io::Error),
+ // Unfortunately, if we just had a single `Context` variant that
+ // contains a `Cow`, this inner `Cow` would cause an overall enum
+ // size increase by a machine word, because currently `rustc`
+ // seemingly does not fold the necessary bits into the outer enum.
+ // We have two variants to work around that until `rustc` is smart
+ // enough.
+ ContextOwned {
+ context: Box<str>,
+ source: Box<ErrorImpl>,
+ },
+ ContextStatic {
+ context: &'static str,
+ source: Box<ErrorImpl>,
+ },
+}
+
+impl ErrorImpl {
+ fn kind(&self) -> ErrorKind {
+ match self {
+ Self::Io(error) => match error.kind() {
+ io::ErrorKind::NotFound => ErrorKind::NotFound,
+ io::ErrorKind::PermissionDenied => ErrorKind::PermissionDenied,
+ io::ErrorKind::AlreadyExists => ErrorKind::AlreadyExists,
+ io::ErrorKind::WouldBlock => ErrorKind::WouldBlock,
+ io::ErrorKind::InvalidInput => ErrorKind::InvalidInput,
+ io::ErrorKind::InvalidData => ErrorKind::InvalidData,
+ io::ErrorKind::TimedOut => ErrorKind::TimedOut,
+ io::ErrorKind::WriteZero => ErrorKind::WriteZero,
+ io::ErrorKind::Interrupted => ErrorKind::Interrupted,
+ io::ErrorKind::Unsupported => ErrorKind::Unsupported,
+ io::ErrorKind::UnexpectedEof => ErrorKind::UnexpectedEof,
+ io::ErrorKind::OutOfMemory => ErrorKind::OutOfMemory,
+ _ => ErrorKind::Other,
+ },
+ Self::ContextOwned { source, .. } | Self::ContextStatic { source, .. } => {
+ source.deref().kind()
+ }
+ }
+ }
+
+ #[cfg(test)]
+ fn is_owned(&self) -> Option<bool> {
+ match self {
+ Self::ContextOwned { .. } => Some(true),
+ Self::ContextStatic { .. } => Some(false),
+ _ => None,
+ }
+ }
+}
+
+impl Debug for ErrorImpl {
+ // We try to mirror roughly how anyhow's Error is behaving, because
+ // that makes the most sense.
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ if f.alternate() {
+ let mut dbg;
+
+ match self {
+ Self::Io(io) => {
+ dbg = f.debug_tuple(stringify!(Io));
+ dbg.field(io)
+ }
+ Self::ContextOwned { context, .. } => {
+ dbg = f.debug_tuple(stringify!(ContextOwned));
+ dbg.field(context)
+ }
+ Self::ContextStatic { context, .. } => {
+ dbg = f.debug_tuple(stringify!(ContextStatic));
+ dbg.field(context)
+ }
+ }
+ .finish()
+ } else {
+ let () = match self {
+ Self::Io(error) => write!(f, "Error: {error}")?,
+ Self::ContextOwned { context, .. } => write!(f, "Error: {context}")?,
+ Self::ContextStatic { context, .. } => write!(f, "Error: {context}")?,
+ };
+
+ if let Some(source) = self.source() {
+ let () = f.write_str("\n\nCaused by:")?;
+
+ let mut error = Some(source);
+ while let Some(err) = error {
+ let () = write!(f, "\n {err:}")?;
+ error = err.source();
+ }
+ }
+ Ok(())
+ }
+ }
+}
+
+impl Display for ErrorImpl {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ let () = match self {
+ Self::Io(error) => Display::fmt(error, f)?,
+ Self::ContextOwned { context, .. } => Display::fmt(context, f)?,
+ Self::ContextStatic { context, .. } => Display::fmt(context, f)?,
+ };
+
+ if f.alternate() {
+ let mut error = self.source();
+ while let Some(err) = error {
+ let () = write!(f, ": {err}")?;
+ error = err.source();
+ }
+ }
+ Ok(())
+ }
+}
+
+impl error::Error for ErrorImpl {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ Self::Io(error) => error.source(),
+ Self::ContextOwned { source, .. } | Self::ContextStatic { source, .. } => Some(source),
+ }
+ }
+}
+
+/// An enum providing a rough classification of errors.
+///
+/// The variants of this type partly resemble those of
+/// [`std::io::Error`], because these are the most common sources of
+/// error that the crate concerns itself with.
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[non_exhaustive]
+pub enum ErrorKind {
+ /// An entity was not found, often a file.
+ NotFound,
+ /// The operation lacked the necessary privileges to complete.
+ PermissionDenied,
+ /// An entity already exists, often a file.
+ AlreadyExists,
+ /// The operation needs to block to complete, but the blocking
+ /// operation was requested to not occur.
+ WouldBlock,
+ /// A parameter was incorrect.
+ InvalidInput,
+ /// Data not valid for the operation were encountered.
+ InvalidData,
+ /// The I/O operation's timeout expired, causing it to be canceled.
+ TimedOut,
+ /// An error returned when an operation could not be completed
+ /// because a call to [`write`] returned [`Ok(0)`].
+ WriteZero,
+ /// This operation was interrupted.
+ ///
+ /// Interrupted operations can typically be retried.
+ Interrupted,
+ /// This operation is unsupported on this platform.
+ Unsupported,
+ /// An error returned when an operation could not be completed
+ /// because an "end of file" was reached prematurely.
+ UnexpectedEof,
+ /// An operation could not be completed, because it failed
+ /// to allocate enough memory.
+ OutOfMemory,
+ /// A custom error that does not fall under any other I/O error
+ /// kind.
+ Other,
+}
+
+/// The error type used by the library.
+///
+/// Errors generally form a chain, with higher-level errors typically
+/// providing additional context for lower level ones. E.g., an IO error
+/// such as file-not-found could be reported by a system level API (such
+/// as [`std::fs::File::open`]) and may be contextualized with the path
+/// to the file attempted to be opened.
+///
+/// ```
+/// use std::fs::File;
+/// use std::error::Error as _;
+/// # use libbpf_rs::ErrorExt as _;
+///
+/// let path = "/does-not-exist";
+/// let result = File::open(path).with_context(|| format!("failed to open {path}"));
+///
+/// let err = result.unwrap_err();
+/// assert_eq!(err.to_string(), "failed to open /does-not-exist");
+///
+/// // Retrieve the underlying error.
+/// let inner_err = err.source().unwrap();
+/// assert!(inner_err.to_string().starts_with("No such file or directory"));
+/// ```
+///
+/// For convenient reporting, the [`Display`][std::fmt::Display]
+/// representation takes care of reporting the complete error chain when
+/// the alternate flag is set:
+/// ```
+/// # use std::fs::File;
+/// # use std::error::Error as _;
+/// # use libbpf_rs::ErrorExt as _;
+/// # let path = "/does-not-exist";
+/// # let result = File::open(path).with_context(|| format!("failed to open {path}"));
+/// # let err = result.unwrap_err();
+/// // > failed to open /does-not-exist: No such file or directory (os error 2)
+/// println!("{err:#}");
+/// ```
+///
+/// The [`Debug`][std::fmt::Debug] representation similarly will print
+/// the entire error chain, but will do so in a multi-line format:
+/// ```
+/// # use std::fs::File;
+/// # use std::error::Error as _;
+/// # use libbpf_rs::ErrorExt as _;
+/// # let path = "/does-not-exist";
+/// # let result = File::open(path).with_context(|| format!("failed to open {path}"));
+/// # let err = result.unwrap_err();
+/// // > Error: failed to open /does-not-exist
+/// // >
+/// // > Caused by:
+/// // > No such file or directory (os error 2)
+/// println!("{err:?}");
+/// ```
+// Representation is optimized for fast copying (a single machine word),
+// not so much for fast creation (as it is heap allocated). We generally
+// expect errors to be exceptional, though a lot of functionality is
+// fallible (i.e., returns a `Result<T, Error>` which would be penalized
+// by a large `Err` variant).
+#[repr(transparent)]
+pub struct Error {
+ /// The top-most error of the chain.
+ error: Box<ErrorImpl>,
+}
+
+impl Error {
+ /// Create an [`Error`] from an OS error code (typically `errno`).
+ ///
+ /// # Notes
+ /// An OS error code should always be positive.
+ #[inline]
+ pub fn from_raw_os_error(code: i32) -> Self {
+ debug_assert!(
+ code > 0,
+ "OS error code should be positive integer; got: {code}"
+ );
+ Self::from(io::Error::from_raw_os_error(code))
+ }
+
+ #[inline]
+ pub(crate) fn with_io_error<E>(kind: io::ErrorKind, error: E) -> Self
+ where
+ E: ToString,
+ {
+ Self::from(io::Error::new(kind, error.to_string()))
+ }
+
+ #[inline]
+ pub(crate) fn with_invalid_data<E>(error: E) -> Self
+ where
+ E: ToString,
+ {
+ Self::with_io_error(io::ErrorKind::InvalidData, error)
+ }
+
+ /// Retrieve a rough error classification in the form of an
+ /// [`ErrorKind`].
+ #[inline]
+ pub fn kind(&self) -> ErrorKind {
+ self.error.kind()
+ }
+
+ /// Layer the provided context on top of this `Error`, creating a
+ /// new one in the process.
+ fn layer_context(self, context: Cow<'static, Str>) -> Self {
+ match context {
+ Cow::Owned(context) => Self {
+ error: Box::new(ErrorImpl::ContextOwned {
+ context,
+ source: self.error,
+ }),
+ },
+ Cow::Borrowed(context) => Self {
+ error: Box::new(ErrorImpl::ContextStatic {
+ context,
+ source: self.error,
+ }),
+ },
+ }
+ }
+}
+
+impl Debug for Error {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ Debug::fmt(&self.error, f)
+ }
+}
+
+impl Display for Error {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ Display::fmt(&self.error, f)
+ }
+}
+
+impl error::Error for Error {
+ #[inline]
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ self.error.source()
+ }
+}
+
+impl From<io::Error> for Error {
+ fn from(other: io::Error) -> Self {
+ Self {
+ error: Box::new(ErrorImpl::Io(other)),
+ }
+ }
+}
+
+/// A trait providing ergonomic chaining capabilities to [`Error`].
+pub trait ErrorExt: private::Sealed {
+ /// The output type produced by [`context`](Self::context) and
+ /// [`with_context`](Self::with_context).
+ type Output;
+
+ /// Add context to this error.
+ // If we had specialization of sorts we could be more lenient as to
+ // what we can accept, but for now this method always works with
+ // static strings and nothing else.
+ fn context<C>(self, context: C) -> Self::Output
+ where
+ C: IntoCowStr;
+
+ /// Add context to this error, using a closure for lazy evaluation.
+ fn with_context<C, F>(self, f: F) -> Self::Output
+ where
+ C: IntoCowStr,
+ F: FnOnce() -> C;
+}
+
+impl ErrorExt for Error {
+ type Output = Error;
+
+ fn context<C>(self, context: C) -> Self::Output
+ where
+ C: IntoCowStr,
+ {
+ self.layer_context(context.into_cow_str())
+ }
+
+ fn with_context<C, F>(self, f: F) -> Self::Output
+ where
+ C: IntoCowStr,
+ F: FnOnce() -> C,
+ {
+ self.layer_context(f().into_cow_str())
+ }
+}
+
+impl<T, E> ErrorExt for Result<T, E>
+where
+ E: ErrorExt,
+{
+ type Output = Result<T, E::Output>;
+
+ fn context<C>(self, context: C) -> Self::Output
+ where
+ C: IntoCowStr,
+ {
+ match self {
+ Ok(val) => Ok(val),
+ Err(err) => Err(err.context(context)),
+ }
+ }
+
+ fn with_context<C, F>(self, f: F) -> Self::Output
+ where
+ C: IntoCowStr,
+ F: FnOnce() -> C,
+ {
+ match self {
+ Ok(val) => Ok(val),
+ Err(err) => Err(err.with_context(f)),
+ }
+ }
+}
+
+impl ErrorExt for io::Error {
+ type Output = Error;
+
+ fn context<C>(self, context: C) -> Self::Output
+ where
+ C: IntoCowStr,
+ {
+ Error::from(self).context(context)
+ }
+
+ fn with_context<C, F>(self, f: F) -> Self::Output
+ where
+ C: IntoCowStr,
+ F: FnOnce() -> C,
+ {
+ Error::from(self).with_context(f)
+ }
+}
+
+/// A trait providing conversion shortcuts for creating `Error`
+/// instances.
+pub trait IntoError<T>: private::Sealed
+where
+ Self: Sized,
+{
+ fn ok_or_error<C, F>(self, kind: io::ErrorKind, f: F) -> Result<T, Error>
+ where
+ C: ToString,
+ F: FnOnce() -> C;
+
+ #[inline]
+ fn ok_or_invalid_data<C, F>(self, f: F) -> Result<T, Error>
+ where
+ C: ToString,
+ F: FnOnce() -> C,
+ {
+ self.ok_or_error(io::ErrorKind::InvalidData, f)
+ }
+}
+
+impl<T> IntoError<T> for Option<T> {
+ #[inline]
+ fn ok_or_error<C, F>(self, kind: io::ErrorKind, f: F) -> Result<T, Error>
+ where
+ C: ToString,
+ F: FnOnce() -> C,
+ {
+ self.ok_or_else(|| Error::with_io_error(kind, f().to_string()))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use std::mem::size_of;
+
+ /// Check various features of our `Str` wrapper type.
+ #[test]
+ fn str_wrapper() {
+ let b = "test string".to_string().into_boxed_str();
+ let s: &Str = b.borrow();
+ let _b: Box<str> = s.to_owned();
+
+ assert_eq!(s.to_string(), b.deref());
+ assert_eq!(format!("{s:?}"), "Str(\"test string\")");
+ }
+
+ /// Check that our `Error` type's size is as expected.
+ #[test]
+ fn error_size() {
+ assert_eq!(size_of::<Error>(), size_of::<usize>());
+ assert_eq!(size_of::<ErrorImpl>(), 4 * size_of::<usize>());
+ }
+
+ /// Check that we can format errors as expected.
+ #[test]
+ fn error_formatting() {
+ let err = io::Error::new(io::ErrorKind::InvalidData, "some invalid data");
+ let err = Error::from(err);
+
+ let src = err.source();
+ assert!(src.is_none(), "{src:?}");
+ assert!(err.error.is_owned().is_none());
+ assert_eq!(err.kind(), ErrorKind::InvalidData);
+ assert_eq!(format!("{err}"), "some invalid data");
+ assert_eq!(format!("{err:#}"), "some invalid data");
+ assert_eq!(format!("{err:?}"), "Error: some invalid data");
+ // TODO: The inner format may not actually be all that stable.
+ let expected = r#"Io(
+ Custom {
+ kind: InvalidData,
+ error: "some invalid data",
+ },
+)"#;
+ assert_eq!(format!("{err:#?}"), expected);
+
+ let err = err.context("inner context");
+ let src = err.source();
+ assert!(src.is_some(), "{src:?}");
+ assert!(!err.error.is_owned().unwrap());
+ assert_eq!(err.kind(), ErrorKind::InvalidData);
+ assert_eq!(format!("{err}"), "inner context");
+ assert_eq!(format!("{err:#}"), "inner context: some invalid data");
+
+ let expected = r#"Error: inner context
+
+Caused by:
+ some invalid data"#;
+ assert_eq!(format!("{err:?}"), expected);
+ // Nope, not going to bother.
+ assert_ne!(format!("{err:#?}"), "");
+
+ let err = err.context("outer context".to_string());
+ let src = err.source();
+ assert!(src.is_some(), "{src:?}");
+ assert!(err.error.is_owned().unwrap());
+ assert_eq!(err.kind(), ErrorKind::InvalidData);
+ assert_eq!(format!("{err}"), "outer context");
+ assert_eq!(
+ format!("{err:#}"),
+ "outer context: inner context: some invalid data"
+ );
+
+ let expected = r#"Error: outer context
+
+Caused by:
+ inner context
+ some invalid data"#;
+ assert_eq!(format!("{err:?}"), expected);
+ assert_ne!(format!("{err:#?}"), "");
+ }
+}
diff --git a/src/iter.rs b/src/iter.rs
new file mode 100644
index 0000000..5e8a631
--- /dev/null
+++ b/src/iter.rs
@@ -0,0 +1,45 @@
+use std::io;
+use std::os::fd::AsFd;
+use std::os::fd::AsRawFd;
+use std::os::fd::FromRawFd;
+use std::os::fd::OwnedFd;
+
+use crate::Error;
+use crate::Link;
+use crate::Result;
+
+/// Represents a bpf iterator for reading kernel data structures. This requires
+/// Linux 5.8.
+///
+/// This implements [`std::io::Read`] for reading bytes from the iterator.
+/// Methods require working with raw bytes. You may find libraries such as
+/// [`plain`](https://crates.io/crates/plain) helpful.
+#[derive(Debug)]
+pub struct Iter {
+ fd: OwnedFd,
+}
+
+impl Iter {
+ /// Create a new `Iter` wrapping the provided `Link`.
+ pub fn new(link: &Link) -> Result<Self> {
+ let link_fd = link.as_fd().as_raw_fd();
+ let fd = unsafe { libbpf_sys::bpf_iter_create(link_fd) };
+ if fd < 0 {
+ return Err(Error::from(io::Error::last_os_error()));
+ }
+ Ok(Self {
+ fd: unsafe { OwnedFd::from_raw_fd(fd) },
+ })
+ }
+}
+
+impl io::Read for Iter {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let bytes_read =
+ unsafe { libc::read(self.fd.as_raw_fd(), buf.as_mut_ptr() as *mut _, buf.len()) };
+ if bytes_read < 0 {
+ return Err(io::Error::last_os_error());
+ }
+ Ok(bytes_read as usize)
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..29c9d13
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,181 @@
+//! # libbpf-rs
+//!
+//! `libbpf-rs` is a safe, idiomatic, and opinionated wrapper around
+//! [libbpf](https://github.com/libbpf/libbpf/).
+//!
+//! libbpf-rs, together with `libbpf-cargo` (libbpf cargo plugin) allow you
+//! to write Compile-Once-Run-Everywhere (CO-RE) eBPF programs. Note this document
+//! uses "eBPF" and "BPF" interchangeably.
+//!
+//! More information about CO-RE is [available
+//! here](https://facebookmicrosites.github.io/bpf/blog/2020/02/19/bpf-portability-and-co-re.html).
+//!
+//! ## High level workflow
+//!
+//! 1. Create new rust project (via `cargo new` or similar) at path `$PROJ_PATH`
+//! 2. Create directory `$PROJ_PATH/src/bpf`
+//! 3. Write CO-RE bpf code in `$PROJ_PATH/src/bpf/${MYFILE}.bpf.c`, where `$MYFILE` may be any
+//! valid filename. Note the `.bpf.c` extension is required.
+//! 4. Create a [build script](https://doc.rust-lang.org/cargo/reference/build-scripts.html) that
+//! builds and generates a skeleton module using `libbpf_cargo::SkeletonBuilder`
+//! 5. Write your userspace code by importing and using the generated module. Import the
+//! module by using the [path
+//! attribute](https://doc.rust-lang.org/reference/items/modules.html#the-path-attribute).
+//! Your userspace code goes in `$PROJ_PATH/src/` as it would in a normal rust project.
+//! 6. Continue regular rust workflow (ie `cargo build`, `cargo run`, etc)
+//!
+//! ## Alternate workflow
+//!
+//! While using the skeleton is recommended, it is also possible to directly use libbpf-rs.
+//!
+//! 1. Follow steps 1-3 of "High level workflow"
+//! 2. Generate a BPF object file. Options include manually invoking `clang`, creating a build
+//! script to invoke `clang`, or using `libbpf-cargo` cargo plugins.
+//! 3. Write your userspace code in `$PROJ_PATH/src/` as you would a normal rust project and point
+//! libbpf-rs at your BPF object file
+//! 4. Continue regular rust workflow (ie `cargo build`, `cargo run`, etc)
+//!
+//! ## Design
+//!
+//! libbpf-rs models various "phases":
+//! ```text
+//! from_*() load()
+//! | |
+//! v v
+//! ObjectBuilder -> OpenObject -> Object
+//! ^ ^
+//! | |
+//! <pre-load modifications> |
+//! |
+//! <post-load interactions>
+//! ```
+//!
+//! The entry point into libbpf-rs is [`ObjectBuilder`]. `ObjectBuilder` helps open the BPF object
+//! file. After the object file is opened, you are returned an [`OpenObject`] where you can
+//! perform all your pre-load operations. Pre-load means before any BPF maps are created or BPF
+//! programs are loaded and verified by the kernel. Finally, after the BPF object is loaded, you
+//! are returned an [`Object`] instance where you can read/write to BPF maps, attach BPF programs
+//! to hooks, etc.
+//!
+//! You _must_ keep the [`Object`] alive the entire duration you interact with anything inside the
+//! BPF object it represents. This is further documented in [`Object`] documentation.
+//!
+//! ## Example
+//!
+//! This is probably the best way to understand how libbpf-rs and libbpf-cargo work together.
+//!
+//! [See example here](https://github.com/libbpf/libbpf-rs/tree/master/examples/runqslower).
+
+#![allow(clippy::let_and_return, clippy::let_unit_value)]
+#![warn(
+ elided_lifetimes_in_paths,
+ missing_debug_implementations,
+ missing_docs,
+ single_use_lifetimes,
+ clippy::absolute_paths,
+ clippy::wildcard_imports,
+ rustdoc::broken_intra_doc_links
+)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+pub mod btf;
+mod error;
+mod iter;
+mod link;
+mod linker;
+mod map;
+mod object;
+mod perf_buffer;
+mod print;
+mod program;
+pub mod query;
+mod ringbuf;
+mod skeleton;
+mod tc;
+mod user_ringbuf;
+mod util;
+mod xdp;
+
+pub use libbpf_sys;
+
+pub use crate::btf::Btf;
+pub use crate::btf::HasSize;
+pub use crate::btf::ReferencesType;
+pub use crate::error::Error;
+pub use crate::error::ErrorExt;
+pub use crate::error::ErrorKind;
+pub use crate::error::Result;
+pub use crate::iter::Iter;
+pub use crate::link::Link;
+pub use crate::linker::Linker;
+pub use crate::map::Map;
+pub use crate::map::MapCore;
+pub use crate::map::MapFlags;
+pub use crate::map::MapHandle;
+pub use crate::map::MapInfo;
+pub use crate::map::MapKeyIter;
+pub use crate::map::MapMut;
+pub use crate::map::MapType;
+pub use crate::map::OpenMap;
+pub use crate::map::OpenMapMut;
+pub use crate::object::AsRawLibbpf;
+pub use crate::object::MapIter;
+pub use crate::object::Object;
+pub use crate::object::ObjectBuilder;
+pub use crate::object::OpenObject;
+pub use crate::object::ProgIter;
+pub use crate::perf_buffer::PerfBuffer;
+pub use crate::perf_buffer::PerfBufferBuilder;
+pub use crate::print::get_print;
+pub use crate::print::set_print;
+pub use crate::print::PrintCallback;
+pub use crate::print::PrintLevel;
+pub use crate::program::Input as ProgramInput;
+pub use crate::program::OpenProgram;
+pub use crate::program::OpenProgramMut;
+pub use crate::program::Output as ProgramOutput;
+pub use crate::program::Program;
+pub use crate::program::ProgramAttachType;
+pub use crate::program::ProgramMut;
+pub use crate::program::ProgramType;
+pub use crate::program::TracepointOpts;
+pub use crate::program::UprobeOpts;
+pub use crate::program::UsdtOpts;
+pub use crate::ringbuf::RingBuffer;
+pub use crate::ringbuf::RingBufferBuilder;
+pub use crate::tc::TcAttachPoint;
+pub use crate::tc::TcHook;
+pub use crate::tc::TcHookBuilder;
+pub use crate::tc::TC_CUSTOM;
+pub use crate::tc::TC_EGRESS;
+pub use crate::tc::TC_H_CLSACT;
+pub use crate::tc::TC_H_INGRESS;
+pub use crate::tc::TC_H_MIN_EGRESS;
+pub use crate::tc::TC_H_MIN_INGRESS;
+pub use crate::tc::TC_INGRESS;
+pub use crate::user_ringbuf::UserRingBuffer;
+pub use crate::user_ringbuf::UserRingBufferSample;
+pub use crate::util::num_possible_cpus;
+pub use crate::xdp::Xdp;
+pub use crate::xdp::XdpFlags;
+
+
+/// An unconstructible dummy type used for tagging mutable type
+/// variants.
+#[doc(hidden)]
+#[derive(Copy, Clone, Debug)]
+pub enum Mut {}
+
+
+/// Used for skeleton -- an end user may not consider this API stable
+#[doc(hidden)]
+pub mod __internal_skel {
+ pub use super::skeleton::*;
+}
+
+/// Skeleton related definitions.
+pub mod skel {
+ pub use super::skeleton::OpenSkel;
+ pub use super::skeleton::Skel;
+ pub use super::skeleton::SkelBuilder;
+}
diff --git a/src/link.rs b/src/link.rs
new file mode 100644
index 0000000..a168380
--- /dev/null
+++ b/src/link.rs
@@ -0,0 +1,139 @@
+use std::fmt::Debug;
+use std::os::unix::io::AsFd;
+use std::os::unix::io::BorrowedFd;
+use std::path::Path;
+use std::path::PathBuf;
+use std::ptr::NonNull;
+
+use crate::util;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::ErrorExt as _;
+use crate::Program;
+use crate::Result;
+
+/// Represents an attached [`Program`].
+///
+/// This struct is used to model ownership. The underlying program will be detached
+/// when this object is dropped if nothing else is holding a reference count.
+#[derive(Debug)]
+pub struct Link {
+ ptr: NonNull<libbpf_sys::bpf_link>,
+}
+
+impl Link {
+ /// Create a new [`Link`] from a [`libbpf_sys::bpf_link`].
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must point to a correctly initialized [`libbpf_sys::bpf_link`].
+ pub(crate) unsafe fn new(ptr: NonNull<libbpf_sys::bpf_link>) -> Self {
+ Link { ptr }
+ }
+
+ /// Create link from BPF FS file.
+ pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+ let ptr = unsafe { libbpf_sys::bpf_link__open(path_ptr) };
+ let ptr = validate_bpf_ret(ptr).context("failed to open link")?;
+ let slf = unsafe { Self::new(ptr) };
+ Ok(slf)
+ }
+
+ /// Takes ownership from pointer.
+ ///
+ /// # Safety
+ ///
+ /// It is not safe to manipulate `ptr` after this operation.
+ pub unsafe fn from_ptr(ptr: NonNull<libbpf_sys::bpf_link>) -> Self {
+ unsafe { Self::new(ptr) }
+ }
+
+ /// Replace the underlying prog with `prog`.
+ pub fn update_prog(&mut self, prog: &Program<'_>) -> Result<()> {
+ let ret =
+ unsafe { libbpf_sys::bpf_link__update_program(self.ptr.as_ptr(), prog.ptr.as_ptr()) };
+ util::parse_ret(ret)
+ }
+
+ /// Release "ownership" of underlying BPF resource (typically, a BPF program
+ /// attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
+ /// links, when destructed through bpf_link__destroy() call won't attempt to
+ /// detach/unregistered that BPF resource. This is useful in situations where,
+ /// say, attached BPF program has to outlive userspace program that attached it
+ /// in the system. Depending on type of BPF program, though, there might be
+ /// additional steps (like pinning BPF program in BPF FS) necessary to ensure
+ /// exit of userspace program doesn't trigger automatic detachment and clean up
+ /// inside the kernel.
+ pub fn disconnect(&mut self) {
+ unsafe { libbpf_sys::bpf_link__disconnect(self.ptr.as_ptr()) }
+ }
+
+ /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this link to bpffs.
+ pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+
+ let ret = unsafe { libbpf_sys::bpf_link__pin(self.ptr.as_ptr(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// from bpffs
+ pub fn unpin(&mut self) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_link__unpin(self.ptr.as_ptr()) };
+ util::parse_ret(ret)
+ }
+
+ /// Returns path to BPF FS file or `None` if not pinned.
+ pub fn pin_path(&self) -> Option<PathBuf> {
+ let path_ptr = unsafe { libbpf_sys::bpf_link__pin_path(self.ptr.as_ptr()) };
+ if path_ptr.is_null() {
+ return None;
+ }
+
+ let path = match util::c_ptr_to_string(path_ptr) {
+ Ok(p) => p,
+ Err(_) => return None,
+ };
+
+ Some(PathBuf::from(path.as_str()))
+ }
+
+ /// Detach the link.
+ pub fn detach(&self) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_link__detach(self.ptr.as_ptr()) };
+ util::parse_ret(ret)
+ }
+}
+
+impl AsRawLibbpf for Link {
+ type LibbpfType = libbpf_sys::bpf_link;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_link`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+// SAFETY: `bpf_link` objects can safely be sent to a different thread.
+unsafe impl Send for Link {}
+
+impl AsFd for Link {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ let fd = unsafe { libbpf_sys::bpf_link__fd(self.ptr.as_ptr()) };
+ // SAFETY: `bpf_link__fd` always returns a valid fd and the underlying
+ // libbpf object is not destroyed until the object is dropped,
+ // which means the fd remains valid as well.
+ unsafe { BorrowedFd::borrow_raw(fd) }
+ }
+}
+
+impl Drop for Link {
+ fn drop(&mut self) {
+ let _ = unsafe { libbpf_sys::bpf_link__destroy(self.ptr.as_ptr()) };
+ }
+}
diff --git a/src/linker.rs b/src/linker.rs
new file mode 100644
index 0000000..6a592ef
--- /dev/null
+++ b/src/linker.rs
@@ -0,0 +1,101 @@
+use std::path::Path;
+use std::ptr::null_mut;
+use std::ptr::NonNull;
+
+use crate::util::path_to_cstring;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::ErrorExt as _;
+use crate::Result;
+
+/// A type used for linking multiple BPF object files into a single one.
+///
+/// Please refer to
+/// <https://lwn.net/ml/bpf/[email protected]/> for
+/// additional details.
+#[derive(Debug)]
+pub struct Linker {
+ /// The `libbpf` linker object.
+ linker: NonNull<libbpf_sys::bpf_linker>,
+}
+
+impl Linker {
+ /// Instantiate a `Linker` object.
+ pub fn new<P>(output: P) -> Result<Self>
+ where
+ P: AsRef<Path>,
+ {
+ let output = path_to_cstring(output)?;
+ let opts = null_mut();
+ // SAFETY: `output` is a valid pointer and `opts` is accepted as NULL.
+ let ptr = unsafe { libbpf_sys::bpf_linker__new(output.as_ptr(), opts) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach iterator")?;
+ let slf = Self { linker: ptr };
+ Ok(slf)
+ }
+
+ /// Add a file to the set of files to link.
+ pub fn add_file<P>(&mut self, file: P) -> Result<()>
+ where
+ P: AsRef<Path>,
+ {
+ let file = path_to_cstring(file)?;
+ let opts = null_mut();
+ // SAFETY: `linker` and `file` are a valid pointers.
+ let err =
+ unsafe { libbpf_sys::bpf_linker__add_file(self.linker.as_ptr(), file.as_ptr(), opts) };
+ if err != 0 {
+ Err(Error::from_raw_os_error(err)).context("bpf_linker__add_file failed")
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Link all BPF object files [added](Self::add_file) to this object into
+ /// a single one.
+ pub fn link(&self) -> Result<()> {
+ // SAFETY: `linker` is a valid pointer.
+ let err = unsafe { libbpf_sys::bpf_linker__finalize(self.linker.as_ptr()) };
+ if err != 0 {
+ return Err(Error::from_raw_os_error(err)).context("bpf_linker__finalize failed");
+ }
+ Ok(())
+ }
+}
+
+impl AsRawLibbpf for Linker {
+ type LibbpfType = libbpf_sys::bpf_linker;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_linker`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.linker
+ }
+}
+
+// SAFETY: `bpf_linker` can be sent to a different thread.
+unsafe impl Send for Linker {}
+
+impl Drop for Linker {
+ fn drop(&mut self) {
+ // SAFETY: `linker` is a valid pointer returned by `bpf_linker__new`.
+ unsafe { libbpf_sys::bpf_linker__free(self.linker.as_ptr()) }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ /// Check that `Linker` is `Send`.
+ #[test]
+ fn linker_is_send() {
+ fn test<T>()
+ where
+ T: Send,
+ {
+ }
+
+ test::<Linker>();
+ }
+}
diff --git a/src/map.rs b/src/map.rs
new file mode 100644
index 0000000..817f583
--- /dev/null
+++ b/src/map.rs
@@ -0,0 +1,1367 @@
+use core::ffi::c_void;
+use std::ffi::CStr;
+use std::ffi::CString;
+use std::ffi::OsStr;
+use std::ffi::OsString;
+use std::fmt::Debug;
+use std::fs::remove_file;
+use std::io;
+use std::marker::PhantomData;
+use std::mem;
+use std::mem::transmute;
+use std::ops::Deref;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::AsFd;
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::BorrowedFd;
+use std::os::unix::io::FromRawFd;
+use std::os::unix::io::OwnedFd;
+use std::os::unix::io::RawFd;
+use std::path::Path;
+use std::ptr;
+use std::ptr::NonNull;
+use std::slice;
+use std::slice::from_raw_parts;
+
+use bitflags::bitflags;
+use libbpf_sys::bpf_map_info;
+use libbpf_sys::bpf_obj_get_info_by_fd;
+
+use crate::util;
+use crate::util::parse_ret_i32;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::ErrorExt as _;
+use crate::Link;
+use crate::Mut;
+use crate::Result;
+
+/// An immutable parsed but not yet loaded BPF map.
+pub type OpenMap<'obj> = OpenMapImpl<'obj>;
+/// A mutable parsed but not yet loaded BPF map.
+pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
+
+
+/// Represents a parsed but not yet loaded BPF map.
+///
+/// This object exposes operations that need to happen before the map is created.
+///
+/// Some methods require working with raw bytes. You may find libraries such as
+/// [`plain`](https://crates.io/crates/plain) helpful.
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct OpenMapImpl<'obj, T = ()> {
+ ptr: NonNull<libbpf_sys::bpf_map>,
+ _phantom: PhantomData<&'obj T>,
+}
+
+// TODO: Document members.
+#[allow(missing_docs)]
+impl<'obj> OpenMap<'obj> {
+ /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`.
+ pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
+ // SAFETY: We inferred the address from a reference, which is always
+ // valid.
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Retrieve the [`OpenMap`]'s name.
+ pub fn name(&self) -> &OsStr {
+ // SAFETY: We ensured `ptr` is valid during construction.
+ let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
+ // SAFETY: `bpf_map__name` can return NULL but only if it's passed
+ // NULL. We know `ptr` is not NULL.
+ let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
+ OsStr::from_bytes(name_c_str.to_bytes())
+ }
+
+ /// Retrieve type of the map.
+ pub fn map_type(&self) -> MapType {
+ let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
+ MapType::from(ty)
+ }
+
+ fn initial_value_raw(&self) -> (*mut u8, usize) {
+ let mut size = 0u64;
+ let ptr = unsafe {
+ libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
+ };
+ (ptr.cast(), size as _)
+ }
+
+ /// Retrieve the initial value of the map.
+ pub fn initial_value(&self) -> Option<&[u8]> {
+ let (ptr, size) = self.initial_value_raw();
+ if ptr.is_null() {
+ None
+ } else {
+ let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
+ Some(data)
+ }
+ }
+}
+
+impl<'obj> OpenMapMut<'obj> {
+ /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`.
+ pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Retrieve the initial value of the map.
+ pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
+ let (ptr, size) = self.initial_value_raw();
+ if ptr.is_null() {
+ None
+ } else {
+ let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
+ Some(data)
+ }
+ }
+
+ pub fn set_map_ifindex(&mut self, idx: u32) {
+ unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
+ }
+
+ pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
+ let ret = unsafe {
+ libbpf_sys::bpf_map__set_initial_value(
+ self.ptr.as_ptr(),
+ data.as_ptr() as *const c_void,
+ data.len() as libbpf_sys::size_t,
+ )
+ };
+
+ util::parse_ret(ret)
+ }
+
+ pub fn set_type(&mut self, ty: MapType) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_key_size(&mut self, size: u32) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_value_size(&mut self, size: u32) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
+ let ret = unsafe {
+ libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
+ };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+
+ let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// Reuse an fd for a BPF map
+ pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
+ util::parse_ret(ret)
+ }
+
+ /// Reuse an already-pinned map for `self`.
+ pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let cstring = util::path_to_cstring(path)?;
+
+ let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
+ if fd < 0 {
+ return Err(Error::from(io::Error::last_os_error()));
+ }
+
+ let fd = unsafe { OwnedFd::from_raw_fd(fd) };
+
+ let reuse_result = self.reuse_fd(fd.as_fd());
+
+ reuse_result
+ }
+}
+
+impl<'obj> Deref for OpenMapMut<'obj> {
+ type Target = OpenMap<'obj>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory
+ // representation of both types is the same.
+ unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
+ }
+}
+
+impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
+ type LibbpfType = libbpf_sys::bpf_map;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_map`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
+ let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
+ let fd = util::parse_ret_i32(fd).ok().map(|fd| fd as RawFd);
+ fd
+}
+
+/// Return the size of one value including padding for interacting with per-cpu
+/// maps. The values are aligned to 8 bytes.
+fn percpu_aligned_value_size<M>(map: &M) -> usize
+where
+ M: MapCore + ?Sized,
+{
+ let val_size = map.value_size() as usize;
+ util::roundup(val_size, 8)
+}
+
+/// Returns the size of the buffer needed for a lookup/update of a per-cpu map.
+fn percpu_buffer_size<M>(map: &M) -> Result<usize>
+where
+ M: MapCore + ?Sized,
+{
+ let aligned_val_size = percpu_aligned_value_size(map);
+ let ncpu = crate::num_possible_cpus()?;
+ Ok(ncpu * aligned_val_size)
+}
+
+/// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter
+/// map, before passing the key to the bpf functions that support the map of type
+/// queue/stack/bloom-filter.
+fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
+where
+ M: MapCore + ?Sized,
+{
+ // For all they keyless maps we null out the key per documentation of libbpf
+ if map.key_size() == 0 && map.map_type().is_keyless() {
+ return ptr::null();
+ }
+
+ key.as_ptr() as *const c_void
+}
+
+/// Internal function to return a value from a map into a buffer of the given size.
+fn lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>>
+where
+ M: MapCore + ?Sized,
+{
+ if key.len() != map.key_size() as usize {
+ return Err(Error::with_invalid_data(format!(
+ "key_size {} != {}",
+ key.len(),
+ map.key_size()
+ )));
+ };
+
+ let mut out: Vec<u8> = Vec::with_capacity(out_size);
+
+ let ret = unsafe {
+ libbpf_sys::bpf_map_lookup_elem_flags(
+ map.as_fd().as_raw_fd(),
+ map_key(map, key),
+ out.as_mut_ptr() as *mut c_void,
+ flags.bits(),
+ )
+ };
+
+ if ret == 0 {
+ unsafe {
+ out.set_len(out_size);
+ }
+ Ok(Some(out))
+ } else {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::NotFound {
+ Ok(None)
+ } else {
+ Err(Error::from(err))
+ }
+ }
+}
+
+/// Internal function to update a map. This does not check the length of the
+/// supplied value.
+fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
+where
+ M: MapCore + ?Sized,
+{
+ if key.len() != map.key_size() as usize {
+ return Err(Error::with_invalid_data(format!(
+ "key_size {} != {}",
+ key.len(),
+ map.key_size()
+ )));
+ };
+
+ let ret = unsafe {
+ libbpf_sys::bpf_map_update_elem(
+ map.as_fd().as_raw_fd(),
+ map_key(map, key),
+ value.as_ptr() as *const c_void,
+ flags.bits(),
+ )
+ };
+
+ util::parse_ret(ret)
+}
+
+#[allow(clippy::wildcard_imports)]
+mod private {
+ use super::*;
+
+ pub trait Sealed {}
+
+ impl<T> Sealed for MapImpl<'_, T> {}
+ impl Sealed for MapHandle {}
+}
+
+/// A trait representing core functionality common to fully initialized maps.
+pub trait MapCore: Debug + AsFd + private::Sealed {
+ /// Retrieve the map's name.
+ fn name(&self) -> &OsStr;
+
+ /// Retrieve type of the map.
+ fn map_type(&self) -> MapType;
+
+ /// Retrieve the size of the map's keys.
+ fn key_size(&self) -> u32;
+
+ /// Retrieve the size of the map's values.
+ fn value_size(&self) -> u32;
+
+ /// Fetch extra map information
+ #[inline]
+ fn info(&self) -> Result<MapInfo> {
+ MapInfo::new(self.as_fd())
+ }
+
+ /// Returns an iterator over keys in this map
+ ///
+ /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration,
+ /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words,
+ /// iteration becomes unpredictable.
+ fn keys(&self) -> MapKeyIter<'_> {
+ MapKeyIter::new(self.as_fd(), self.key_size())
+ }
+
+ /// Returns map value as `Vec` of `u8`.
+ ///
+ /// `key` must have exactly [`Self::key_size()`] elements.
+ ///
+ /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`]
+ /// must be used.
+ /// If the map is of type bloom_filter the function [`Self::lookup_bloom_filter()`] must be used
+ fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
+ if self.map_type().is_bloom_filter() {
+ return Err(Error::with_invalid_data(
+ "lookup_bloom_filter() must be used for bloom filter maps",
+ ));
+ }
+ if self.map_type().is_percpu() {
+ return Err(Error::with_invalid_data(format!(
+ "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
+ self.map_type(),
+ )));
+ }
+
+ let out_size = self.value_size() as usize;
+ lookup_raw(self, key, flags, out_size)
+ }
+
+ /// Returns if the given value is likely present in bloom_filter as `bool`.
+ ///
+ /// `value` must have exactly [`Self::value_size()`] elements.
+ fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
+ let ret = unsafe {
+ libbpf_sys::bpf_map_lookup_elem(
+ self.as_fd().as_raw_fd(),
+ ptr::null(),
+ value.to_vec().as_mut_ptr() as *mut c_void,
+ )
+ };
+
+ if ret == 0 {
+ Ok(true)
+ } else {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::NotFound {
+ Ok(false)
+ } else {
+ Err(Error::from(err))
+ }
+ }
+ }
+
+ /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps.
+ ///
+ /// For normal maps, [`Self::lookup()`] must be used.
+ fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
+ if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
+ return Err(Error::with_invalid_data(format!(
+ "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
+ self.map_type(),
+ )));
+ }
+
+ let val_size = self.value_size() as usize;
+ let aligned_val_size = percpu_aligned_value_size(self);
+ let out_size = percpu_buffer_size(self)?;
+
+ let raw_res = lookup_raw(self, key, flags, out_size)?;
+ if let Some(raw_vals) = raw_res {
+ let mut out = Vec::new();
+ for chunk in raw_vals.chunks_exact(aligned_val_size) {
+ out.push(chunk[..val_size].to_vec());
+ }
+ Ok(Some(out))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Deletes an element from the map.
+ ///
+ /// `key` must have exactly [`Self::key_size()`] elements.
+ fn delete(&self, key: &[u8]) -> Result<()> {
+ if key.len() != self.key_size() as usize {
+ return Err(Error::with_invalid_data(format!(
+ "key_size {} != {}",
+ key.len(),
+ self.key_size()
+ )));
+ };
+
+ let ret = unsafe {
+ libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
+ };
+ util::parse_ret(ret)
+ }
+
+ /// Deletes many elements in batch mode from the map.
+ ///
+ /// `keys` must have exactly [`Self::key_size()` * count] elements.
+ fn delete_batch(
+ &self,
+ keys: &[u8],
+ count: u32,
+ elem_flags: MapFlags,
+ flags: MapFlags,
+ ) -> Result<()> {
+ if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
+ return Err(Error::with_invalid_data(format!(
+ "batch key_size {} != {} * {}",
+ keys.len(),
+ self.key_size(),
+ count
+ )));
+ };
+
+ #[allow(clippy::needless_update)]
+ let opts = libbpf_sys::bpf_map_batch_opts {
+ sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
+ elem_flags: elem_flags.bits(),
+ flags: flags.bits(),
+ // bpf_map_batch_opts might have padding fields on some platform
+ ..Default::default()
+ };
+
+ let mut count = count;
+ let ret = unsafe {
+ libbpf_sys::bpf_map_delete_batch(
+ self.as_fd().as_raw_fd(),
+ keys.as_ptr() as *const c_void,
+ (&mut count) as *mut u32,
+ &opts as *const libbpf_sys::bpf_map_batch_opts,
+ )
+ };
+ util::parse_ret(ret)
+ }
+
+ /// Same as [`Self::lookup()`] except this also deletes the key from the map.
+ ///
+ /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`]
+ /// and [`MapType::Stack`].
+ ///
+ /// `key` must have exactly [`Self::key_size()`] elements.
+ fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
+ if key.len() != self.key_size() as usize {
+ return Err(Error::with_invalid_data(format!(
+ "key_size {} != {}",
+ key.len(),
+ self.key_size()
+ )));
+ };
+
+ let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
+
+ let ret = unsafe {
+ libbpf_sys::bpf_map_lookup_and_delete_elem(
+ self.as_fd().as_raw_fd(),
+ map_key(self, key),
+ out.as_mut_ptr() as *mut c_void,
+ )
+ };
+
+ if ret == 0 {
+ unsafe {
+ out.set_len(self.value_size() as usize);
+ }
+ Ok(Some(out))
+ } else {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::NotFound {
+ Ok(None)
+ } else {
+ Err(Error::from(err))
+ }
+ }
+ }
+
+ /// Update an element.
+ ///
+ /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly
+ /// [`Self::value_size()`] elements.
+ ///
+ /// For per-cpu maps, [`Self::update_percpu()`] must be used.
+ fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
+ if self.map_type().is_percpu() {
+ return Err(Error::with_invalid_data(format!(
+ "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
+ self.map_type(),
+ )));
+ }
+
+ if value.len() != self.value_size() as usize {
+ return Err(Error::with_invalid_data(format!(
+ "value_size {} != {}",
+ value.len(),
+ self.value_size()
+ )));
+ };
+
+ update_raw(self, key, value, flags)
+ }
+
+ /// Updates many elements in batch mode in the map
+ ///
+ /// `keys` must have exactly [`Self::key_size()` * count] elements. `value` must have exactly
+ /// [`Self::key_size()` * count] elements
+ fn update_batch(
+ &self,
+ keys: &[u8],
+ values: &[u8],
+ count: u32,
+ elem_flags: MapFlags,
+ flags: MapFlags,
+ ) -> Result<()> {
+ if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
+ return Err(Error::with_invalid_data(format!(
+ "batch key_size {} != {} * {}",
+ keys.len(),
+ self.key_size(),
+ count
+ )));
+ };
+
+ if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
+ return Err(Error::with_invalid_data(format!(
+ "batch value_size {} != {} * {}",
+ values.len(),
+ self.value_size(),
+ count
+ )));
+ }
+
+ #[allow(clippy::needless_update)]
+ let opts = libbpf_sys::bpf_map_batch_opts {
+ sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
+ elem_flags: elem_flags.bits(),
+ flags: flags.bits(),
+ // bpf_map_batch_opts might have padding fields on some platform
+ ..Default::default()
+ };
+
+ let mut count = count;
+ let ret = unsafe {
+ libbpf_sys::bpf_map_update_batch(
+ self.as_fd().as_raw_fd(),
+ keys.as_ptr() as *const c_void,
+ values.as_ptr() as *const c_void,
+ (&mut count) as *mut u32,
+ &opts as *const libbpf_sys::bpf_map_batch_opts,
+ )
+ };
+
+ util::parse_ret(ret)
+ }
+
+ /// Update an element in an per-cpu map with one value per cpu.
+ ///
+ /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one
+ /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus])
+ /// with exactly [`Self::value_size()`] elements each.
+ ///
+ /// For per-cpu maps, [`Self::update_percpu()`] must be used.
+ fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
+ if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
+ return Err(Error::with_invalid_data(format!(
+ "update() must be used for maps that are not per-cpu (type of the map is {:?})",
+ self.map_type(),
+ )));
+ }
+
+ if values.len() != crate::num_possible_cpus()? {
+ return Err(Error::with_invalid_data(format!(
+ "number of values {} != number of cpus {}",
+ values.len(),
+ crate::num_possible_cpus()?
+ )));
+ };
+
+ let val_size = self.value_size() as usize;
+ let aligned_val_size = percpu_aligned_value_size(self);
+ let buf_size = percpu_buffer_size(self)?;
+
+ let mut value_buf = vec![0; buf_size];
+
+ for (i, val) in values.iter().enumerate() {
+ if val.len() != val_size {
+ return Err(Error::with_invalid_data(format!(
+ "value size for cpu {} is {} != {}",
+ i,
+ val.len(),
+ val_size
+ )));
+ }
+
+ value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
+ .copy_from_slice(val);
+ }
+
+ update_raw(self, key, &value_buf, flags)
+ }
+}
+
+/// An immutable loaded BPF map.
+pub type Map<'obj> = MapImpl<'obj>;
+/// A mutable loaded BPF map.
+pub type MapMut<'obj> = MapImpl<'obj, Mut>;
+
+/// Represents a libbpf-created map.
+///
+/// Some methods require working with raw bytes. You may find libraries such as
+/// [`plain`](https://crates.io/crates/plain) helpful.
+#[derive(Debug)]
+pub struct MapImpl<'obj, T = ()> {
+ ptr: NonNull<libbpf_sys::bpf_map>,
+ _phantom: PhantomData<&'obj T>,
+}
+
+impl<'obj> Map<'obj> {
+ /// Create a [`Map`] from a [`libbpf_sys::bpf_map`].
+ pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
+ // SAFETY: We inferred the address from a reference, which is always
+ // valid.
+ let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
+ assert!(
+ map_fd(ptr).is_some(),
+ "provided BPF map does not have file descriptor"
+ );
+
+ Self {
+ ptr,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a
+ /// file descriptor.
+ ///
+ /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic
+ /// will be the result.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must point to a loaded map.
+ #[doc(hidden)]
+ pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
+ Self {
+ ptr,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Returns whether map is pinned or not flag
+ pub fn is_pinned(&self) -> bool {
+ unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
+ }
+
+ /// Returns the pin_path if the map is pinned, otherwise, None is returned
+ pub fn get_pin_path(&self) -> Option<&OsStr> {
+ let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
+ if path_ptr.is_null() {
+ // means map is not pinned
+ return None;
+ }
+ let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
+ Some(OsStr::from_bytes(path_c_str.to_bytes()))
+ }
+}
+
+impl<'obj> MapMut<'obj> {
+ /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`].
+ pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
+ // SAFETY: We inferred the address from a reference, which is always
+ // valid.
+ let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
+ assert!(
+ map_fd(ptr).is_some(),
+ "provided BPF map does not have file descriptor"
+ );
+
+ Self {
+ ptr,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this map to bpffs.
+ pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+
+ let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this map from bpffs.
+ pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+ let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// Attach a struct ops map
+ pub fn attach_struct_ops(&mut self) -> Result<Link> {
+ if self.map_type() != MapType::StructOps {
+ return Err(Error::with_invalid_data(format!(
+ "Invalid map type ({:?}) for attach_struct_ops()",
+ self.map_type(),
+ )));
+ }
+
+ let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+}
+
+impl<'obj> Deref for MapMut<'obj> {
+ type Target = Map<'obj>;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
+ }
+}
+
+impl<T> AsFd for MapImpl<'_, T> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ // SANITY: Our map must always have a file descriptor associated with
+ // it.
+ let fd = map_fd(self.ptr).unwrap();
+ // SAFETY: `fd` is guaranteed to be valid for the lifetime of
+ // the created object.
+ let fd = unsafe { BorrowedFd::borrow_raw(fd as _) };
+ fd
+ }
+}
+
+impl<T> MapCore for MapImpl<'_, T>
+where
+ T: Debug,
+{
+ fn name(&self) -> &OsStr {
+ // SAFETY: We ensured `ptr` is valid during construction.
+ let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
+ // SAFETY: `bpf_map__name` can return NULL but only if it's passed
+ // NULL. We know `ptr` is not NULL.
+ let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
+ OsStr::from_bytes(name_c_str.to_bytes())
+ }
+
+ #[inline]
+ fn map_type(&self) -> MapType {
+ let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
+ MapType::from(ty)
+ }
+
+ #[inline]
+ fn key_size(&self) -> u32 {
+ unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
+ }
+
+ #[inline]
+ fn value_size(&self) -> u32 {
+ unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
+ }
+}
+
+impl AsRawLibbpf for Map<'_> {
+ type LibbpfType = libbpf_sys::bpf_map;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_map`].
+ #[inline]
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+/// A handle to a map. Handles can be duplicated and dropped.
+///
+/// While possible to [created directly][MapHandle::create], in many cases it is
+/// useful to create such a handle from an existing [`Map`]:
+/// ```no_run
+/// # use libbpf_rs::Map;
+/// # use libbpf_rs::MapHandle;
+/// # let get_map = || -> &Map { todo!() };
+/// let map: &Map = get_map();
+/// let map_handle = MapHandle::try_from(map).unwrap();
+/// ```
+///
+/// Some methods require working with raw bytes. You may find libraries such as
+/// [`plain`](https://crates.io/crates/plain) helpful.
+#[derive(Debug)]
+pub struct MapHandle {
+ fd: OwnedFd,
+ name: OsString,
+ ty: MapType,
+ key_size: u32,
+ value_size: u32,
+}
+
+impl MapHandle {
+ /// Create a bpf map whose data is not managed by libbpf.
+ pub fn create<T: AsRef<OsStr>>(
+ map_type: MapType,
+ name: Option<T>,
+ key_size: u32,
+ value_size: u32,
+ max_entries: u32,
+ opts: &libbpf_sys::bpf_map_create_opts,
+ ) -> Result<Self> {
+ let name = match name {
+ Some(name) => name.as_ref().to_os_string(),
+ // The old version kernel don't support specifying map name.
+ None => OsString::new(),
+ };
+ let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
+ Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
+ })?;
+ let name_c_ptr = if name.is_empty() {
+ ptr::null()
+ } else {
+ name_c_str.as_bytes_with_nul().as_ptr()
+ };
+
+ let fd = unsafe {
+ libbpf_sys::bpf_map_create(
+ map_type.into(),
+ name_c_ptr.cast(),
+ key_size,
+ value_size,
+ max_entries,
+ opts,
+ )
+ };
+ let () = util::parse_ret(fd)?;
+
+ Ok(Self {
+ // SAFETY: A file descriptor coming from the `bpf_map_create`
+ // function is always suitable for ownership and can be
+ // cleaned up with close.
+ fd: unsafe { OwnedFd::from_raw_fd(fd) },
+ name,
+ ty: map_type,
+ key_size,
+ value_size,
+ })
+ }
+
+ /// Open a previously pinned map from its path.
+ ///
+ /// # Panics
+ /// If the path contains null bytes.
+ pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
+ fn inner(path: &Path) -> Result<MapHandle> {
+ let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
+ let fd = parse_ret_i32(unsafe {
+ // SAFETY
+ // p is never null since we allocated ourselves.
+ libbpf_sys::bpf_obj_get(p.as_ptr())
+ })?;
+ MapHandle::from_fd(unsafe {
+ // SAFETY
+ // A file descriptor coming from the bpf_obj_get function is always suitable for
+ // ownership and can be cleaned up with close.
+ OwnedFd::from_raw_fd(fd)
+ })
+ }
+
+ inner(path.as_ref())
+ }
+
+ /// Open a loaded map from its map id.
+ pub fn from_map_id(id: u32) -> Result<Self> {
+ parse_ret_i32(unsafe {
+ // SAFETY
+ // This function is always safe to call.
+ libbpf_sys::bpf_map_get_fd_by_id(id)
+ })
+ .map(|fd| unsafe {
+ // SAFETY
+ // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable
+ // for ownership and can be cleaned up with close.
+ OwnedFd::from_raw_fd(fd)
+ })
+ .and_then(Self::from_fd)
+ }
+
+ fn from_fd(fd: OwnedFd) -> Result<Self> {
+ let info = MapInfo::new(fd.as_fd())?;
+ Ok(Self {
+ fd,
+ name: info.name()?.into(),
+ ty: info.map_type(),
+ key_size: info.info.key_size,
+ value_size: info.info.value_size,
+ })
+ }
+
+ /// Freeze the map as read-only from user space.
+ ///
+ /// Entries from a frozen map can no longer be updated or deleted with the
+ /// bpf() system call. This operation is not reversible, and the map remains
+ /// immutable from user space until its destruction. However, read and write
+ /// permissions for BPF programs to the map remain unchanged.
+ pub fn freeze(&self) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
+
+ util::parse_ret(ret)
+ }
+
+ /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this map to bpffs.
+ pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+
+ let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this map from bpffs.
+ pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ remove_file(path).context("failed to remove pin map")
+ }
+}
+
+impl MapCore for MapHandle {
+ #[inline]
+ fn name(&self) -> &OsStr {
+ &self.name
+ }
+
+ #[inline]
+ fn map_type(&self) -> MapType {
+ self.ty
+ }
+
+ #[inline]
+ fn key_size(&self) -> u32 {
+ self.key_size
+ }
+
+ #[inline]
+ fn value_size(&self) -> u32 {
+ self.value_size
+ }
+}
+
+impl AsFd for MapHandle {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.fd.as_fd()
+ }
+}
+
+impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
+where
+ T: Debug,
+{
+ type Error = Error;
+
+ fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
+ Ok(Self {
+ fd: other
+ .as_fd()
+ .try_clone_to_owned()
+ .context("failed to duplicate map file descriptor")?,
+ name: other.name().to_os_string(),
+ ty: other.map_type(),
+ key_size: other.key_size(),
+ value_size: other.value_size(),
+ })
+ }
+}
+
+impl TryFrom<&MapHandle> for MapHandle {
+ type Error = Error;
+
+ fn try_from(other: &MapHandle) -> Result<Self> {
+ Ok(Self {
+ fd: other
+ .as_fd()
+ .try_clone_to_owned()
+ .context("failed to duplicate map file descriptor")?,
+ name: other.name().to_os_string(),
+ ty: other.map_type(),
+ key_size: other.key_size(),
+ value_size: other.value_size(),
+ })
+ }
+}
+
+bitflags! {
+ /// Flags to configure [`Map`] operations.
+ #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
+ pub struct MapFlags: u64 {
+ /// See [`libbpf_sys::BPF_ANY`].
+ const ANY = libbpf_sys::BPF_ANY as _;
+ /// See [`libbpf_sys::BPF_NOEXIST`].
+ const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
+ /// See [`libbpf_sys::BPF_EXIST`].
+ const EXIST = libbpf_sys::BPF_EXIST as _;
+ /// See [`libbpf_sys::BPF_F_LOCK`].
+ const LOCK = libbpf_sys::BPF_F_LOCK as _;
+ }
+}
+
+/// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi.
+// If you add a new per-cpu map, also update `is_percpu`.
+#[non_exhaustive]
+#[repr(u32)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub enum MapType {
+ Unspec = 0,
+ Hash,
+ Array,
+ ProgArray,
+ PerfEventArray,
+ PercpuHash,
+ PercpuArray,
+ StackTrace,
+ CgroupArray,
+ LruHash,
+ LruPercpuHash,
+ LpmTrie,
+ ArrayOfMaps,
+ HashOfMaps,
+ Devmap,
+ Sockmap,
+ Cpumap,
+ Xskmap,
+ Sockhash,
+ CgroupStorage,
+ ReuseportSockarray,
+ PercpuCgroupStorage,
+ Queue,
+ Stack,
+ SkStorage,
+ DevmapHash,
+ StructOps,
+ RingBuf,
+ InodeStorage,
+ TaskStorage,
+ BloomFilter,
+ UserRingBuf,
+ /// We choose to specify our own "unknown" type here b/c it's really up to the kernel
+ /// to decide if it wants to reject the map. If it accepts it, it just means whoever
+ /// using this library is a bit out of date.
+ Unknown = u32::MAX,
+}
+
+impl MapType {
+ /// Returns if the map is of one of the per-cpu types.
+ pub fn is_percpu(&self) -> bool {
+ matches!(
+ self,
+ MapType::PercpuArray
+ | MapType::PercpuHash
+ | MapType::LruPercpuHash
+ | MapType::PercpuCgroupStorage
+ )
+ }
+
+ /// Returns if the map is keyless map type as per documentation of libbpf
+ /// Keyless map types are: Queues, Stacks and Bloom Filters
+ fn is_keyless(&self) -> bool {
+ matches!(self, MapType::Queue | MapType::Stack | MapType::BloomFilter)
+ }
+
+ /// Returns if the map is of bloom filter type
+ pub fn is_bloom_filter(&self) -> bool {
+ MapType::BloomFilter.eq(self)
+ }
+
+ /// Detects if host kernel supports this BPF map type.
+ ///
+ /// Make sure the process has required set of CAP_* permissions (or runs as
+ /// root) when performing feature checking.
+ pub fn is_supported(&self) -> Result<bool> {
+ let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
+ match ret {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => Err(Error::from_raw_os_error(-ret)),
+ }
+ }
+}
+
+impl From<u32> for MapType {
+ fn from(value: u32) -> Self {
+ use MapType::*;
+
+ match value {
+ x if x == Unspec as u32 => Unspec,
+ x if x == Hash as u32 => Hash,
+ x if x == Array as u32 => Array,
+ x if x == ProgArray as u32 => ProgArray,
+ x if x == PerfEventArray as u32 => PerfEventArray,
+ x if x == PercpuHash as u32 => PercpuHash,
+ x if x == PercpuArray as u32 => PercpuArray,
+ x if x == StackTrace as u32 => StackTrace,
+ x if x == CgroupArray as u32 => CgroupArray,
+ x if x == LruHash as u32 => LruHash,
+ x if x == LruPercpuHash as u32 => LruPercpuHash,
+ x if x == LpmTrie as u32 => LpmTrie,
+ x if x == ArrayOfMaps as u32 => ArrayOfMaps,
+ x if x == HashOfMaps as u32 => HashOfMaps,
+ x if x == Devmap as u32 => Devmap,
+ x if x == Sockmap as u32 => Sockmap,
+ x if x == Cpumap as u32 => Cpumap,
+ x if x == Xskmap as u32 => Xskmap,
+ x if x == Sockhash as u32 => Sockhash,
+ x if x == CgroupStorage as u32 => CgroupStorage,
+ x if x == ReuseportSockarray as u32 => ReuseportSockarray,
+ x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
+ x if x == Queue as u32 => Queue,
+ x if x == Stack as u32 => Stack,
+ x if x == SkStorage as u32 => SkStorage,
+ x if x == DevmapHash as u32 => DevmapHash,
+ x if x == StructOps as u32 => StructOps,
+ x if x == RingBuf as u32 => RingBuf,
+ x if x == InodeStorage as u32 => InodeStorage,
+ x if x == TaskStorage as u32 => TaskStorage,
+ x if x == BloomFilter as u32 => BloomFilter,
+ x if x == UserRingBuf as u32 => UserRingBuf,
+ _ => Unknown,
+ }
+ }
+}
+
+impl From<MapType> for u32 {
+ fn from(value: MapType) -> Self {
+ value as u32
+ }
+}
+
+/// An iterator over the keys of a BPF map.
+#[derive(Debug)]
+pub struct MapKeyIter<'map> {
+ map_fd: BorrowedFd<'map>,
+ prev: Option<Vec<u8>>,
+ next: Vec<u8>,
+}
+
+impl<'map> MapKeyIter<'map> {
+ fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
+ Self {
+ map_fd,
+ prev: None,
+ next: vec![0; key_size as usize],
+ }
+ }
+}
+
+impl Iterator for MapKeyIter<'_> {
+ type Item = Vec<u8>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr());
+
+ let ret = unsafe {
+ libbpf_sys::bpf_map_get_next_key(
+ self.map_fd.as_raw_fd(),
+ prev as _,
+ self.next.as_mut_ptr() as _,
+ )
+ };
+ if ret != 0 {
+ None
+ } else {
+ self.prev = Some(self.next.clone());
+ Some(self.next.clone())
+ }
+ }
+}
+
+/// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It
+/// provides the ability to retrieve the details of a certain map.
+#[derive(Debug)]
+pub struct MapInfo {
+ /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object.
+ pub info: bpf_map_info,
+}
+
+impl MapInfo {
+ /// Create a `MapInfo` object from a fd.
+ pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
+ let mut map_info = bpf_map_info::default();
+ let mut size = mem::size_of_val(&map_info) as u32;
+ // SAFETY: All pointers are derived from references and hence valid.
+ let () = util::parse_ret(unsafe {
+ bpf_obj_get_info_by_fd(
+ fd.as_raw_fd(),
+ &mut map_info as *mut bpf_map_info as *mut c_void,
+ &mut size as *mut u32,
+ )
+ })?;
+ Ok(Self { info: map_info })
+ }
+
+ /// Get the map type
+ #[inline]
+ pub fn map_type(&self) -> MapType {
+ MapType::from(self.info.type_)
+ }
+
+ /// Get the name of this map.
+ ///
+ /// Returns error if the underlying data in the structure is not a valid
+ /// utf-8 string.
+ pub fn name<'a>(&self) -> Result<&'a str> {
+ // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size.
+ let char_slice =
+ unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
+
+ util::c_char_slice_to_cstr(char_slice)
+ .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
+ .to_str()
+ .map_err(Error::with_invalid_data)
+ }
+
+ /// Get the map flags.
+ #[inline]
+ pub fn flags(&self) -> MapFlags {
+ MapFlags::from_bits_truncate(self.info.map_flags as u64)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use std::mem::discriminant;
+
+ #[test]
+ fn map_type() {
+ use MapType::*;
+
+ for t in [
+ Unspec,
+ Hash,
+ Array,
+ ProgArray,
+ PerfEventArray,
+ PercpuHash,
+ PercpuArray,
+ StackTrace,
+ CgroupArray,
+ LruHash,
+ LruPercpuHash,
+ LpmTrie,
+ ArrayOfMaps,
+ HashOfMaps,
+ Devmap,
+ Sockmap,
+ Cpumap,
+ Xskmap,
+ Sockhash,
+ CgroupStorage,
+ ReuseportSockarray,
+ PercpuCgroupStorage,
+ Queue,
+ Stack,
+ SkStorage,
+ DevmapHash,
+ StructOps,
+ RingBuf,
+ InodeStorage,
+ TaskStorage,
+ BloomFilter,
+ UserRingBuf,
+ Unknown,
+ ] {
+ // check if discriminants match after a roundtrip conversion
+ assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
+ }
+ }
+}
diff --git a/src/object.rs b/src/object.rs
new file mode 100644
index 0000000..7279128
--- /dev/null
+++ b/src/object.rs
@@ -0,0 +1,405 @@
+use core::ffi::c_void;
+use std::ffi::CStr;
+use std::ffi::CString;
+use std::ffi::OsStr;
+use std::mem;
+use std::os::unix::ffi::OsStrExt as _;
+use std::path::Path;
+use std::ptr;
+use std::ptr::addr_of;
+use std::ptr::NonNull;
+
+use crate::map::map_fd;
+use crate::set_print;
+use crate::util;
+use crate::util::validate_bpf_ret;
+use crate::Btf;
+use crate::ErrorExt as _;
+use crate::Map;
+use crate::MapMut;
+use crate::OpenMap;
+use crate::OpenMapMut;
+use crate::OpenProgram;
+use crate::OpenProgramMut;
+use crate::PrintLevel;
+use crate::Program;
+use crate::ProgramMut;
+use crate::Result;
+
+
+/// An iterator over the maps in a BPF object.
+#[derive(Debug)]
+pub struct MapIter<'obj> {
+ obj: &'obj libbpf_sys::bpf_object,
+ last: *mut libbpf_sys::bpf_map,
+}
+
+impl<'obj> MapIter<'obj> {
+ /// Create a new iterator over the maps of the given BPF object.
+ pub fn new(obj: &'obj libbpf_sys::bpf_object) -> Self {
+ Self {
+ obj,
+ last: ptr::null_mut(),
+ }
+ }
+}
+
+impl Iterator for MapIter<'_> {
+ type Item = NonNull<libbpf_sys::bpf_map>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.last = unsafe { libbpf_sys::bpf_object__next_map(self.obj, self.last) };
+ NonNull::new(self.last)
+ }
+}
+
+
+/// An iterator over the programs in a BPF object.
+#[derive(Debug)]
+pub struct ProgIter<'obj> {
+ obj: &'obj libbpf_sys::bpf_object,
+ last: *mut libbpf_sys::bpf_program,
+}
+
+impl<'obj> ProgIter<'obj> {
+ /// Create a new iterator over the programs of the given BPF object.
+ pub fn new(obj: &'obj libbpf_sys::bpf_object) -> Self {
+ Self {
+ obj,
+ last: ptr::null_mut(),
+ }
+ }
+}
+
+impl Iterator for ProgIter<'_> {
+ type Item = NonNull<libbpf_sys::bpf_program>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.last = unsafe { libbpf_sys::bpf_object__next_program(self.obj, self.last) };
+ NonNull::new(self.last)
+ }
+}
+
+
+/// A trait implemented for types that are thin wrappers around `libbpf` types.
+///
+/// The trait provides access to the underlying `libbpf` (or `libbpf-sys`)
+/// object. In many cases, this enables direct usage of `libbpf-sys`
+/// functionality when higher-level bindings are not yet provided by this crate.
+pub trait AsRawLibbpf {
+ /// The underlying `libbpf` type.
+ type LibbpfType;
+
+ /// Retrieve the underlying `libbpf` object.
+ ///
+ /// # Warning
+ /// By virtue of working with a mutable raw pointer this method effectively
+ /// circumvents mutability and liveness checks. While by-design, usage is
+ /// meant as an escape-hatch more than anything else. If you find yourself
+ /// making use of it, please consider discussing your workflow with crate
+ /// maintainers to see if it would make sense to provide safer wrappers.
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType>;
+}
+
+/// Builder for creating an [`OpenObject`]. Typically the entry point into libbpf-rs.
+#[derive(Debug)]
+pub struct ObjectBuilder {
+ name: Option<CString>,
+ pin_root_path: Option<CString>,
+
+ opts: libbpf_sys::bpf_object_open_opts,
+}
+
+impl Default for ObjectBuilder {
+ fn default() -> Self {
+ let opts = libbpf_sys::bpf_object_open_opts {
+ sz: mem::size_of::<libbpf_sys::bpf_object_open_opts>() as libbpf_sys::size_t,
+ object_name: ptr::null(),
+ relaxed_maps: false,
+ pin_root_path: ptr::null(),
+ kconfig: ptr::null(),
+ btf_custom_path: ptr::null(),
+ kernel_log_buf: ptr::null_mut(),
+ kernel_log_size: 0,
+ kernel_log_level: 0,
+ ..Default::default()
+ };
+ Self {
+ name: None,
+ pin_root_path: None,
+ opts,
+ }
+ }
+}
+
+impl ObjectBuilder {
+ /// Override the generated name that would have been inferred from the constructor.
+ pub fn name<T: AsRef<str>>(&mut self, name: T) -> Result<&mut Self> {
+ self.name = Some(util::str_to_cstring(name.as_ref())?);
+ self.opts.object_name = self.name.as_ref().map_or(ptr::null(), |p| p.as_ptr());
+ Ok(self)
+ }
+
+ /// Set the pin_root_path for maps that are pinned by name.
+ ///
+ /// By default, this is NULL which bpf translates to /sys/fs/bpf
+ pub fn pin_root_path<T: AsRef<Path>>(&mut self, path: T) -> Result<&mut Self> {
+ self.pin_root_path = Some(util::path_to_cstring(path)?);
+ self.opts.pin_root_path = self
+ .pin_root_path
+ .as_ref()
+ .map_or(ptr::null(), |p| p.as_ptr());
+ Ok(self)
+ }
+
+ /// Option to parse map definitions non-strictly, allowing extra attributes/data
+ pub fn relaxed_maps(&mut self, relaxed_maps: bool) -> &mut Self {
+ self.opts.relaxed_maps = relaxed_maps;
+ self
+ }
+
+ /// Option to print debug output to stderr.
+ ///
+ /// Note: This function uses [`set_print`] internally and will overwrite any callbacks
+ /// currently in use.
+ pub fn debug(&mut self, dbg: bool) -> &mut Self {
+ if dbg {
+ set_print(Some((PrintLevel::Debug, |_, s| print!("{s}"))));
+ } else {
+ set_print(None);
+ }
+ self
+ }
+
+ /// Open an object using the provided path on the file system.
+ pub fn open_file<P: AsRef<Path>>(&mut self, path: P) -> Result<OpenObject> {
+ let path = path.as_ref();
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+ let opts_ptr = self.as_libbpf_object().as_ptr();
+
+ let ptr = unsafe { libbpf_sys::bpf_object__open_file(path_ptr, opts_ptr) };
+ let ptr = validate_bpf_ret(ptr)
+ .with_context(|| format!("failed to open object from `{}`", path.display()))?;
+
+ let obj = unsafe { OpenObject::from_ptr(ptr) };
+ Ok(obj)
+ }
+
+ /// Open an object from memory.
+ pub fn open_memory(&mut self, mem: &[u8]) -> Result<OpenObject> {
+ let opts_ptr = self.as_libbpf_object().as_ptr();
+ let ptr = unsafe {
+ libbpf_sys::bpf_object__open_mem(
+ mem.as_ptr() as *const c_void,
+ mem.len() as libbpf_sys::size_t,
+ opts_ptr,
+ )
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to open object from memory")?;
+ let obj = unsafe { OpenObject::from_ptr(ptr) };
+ Ok(obj)
+ }
+}
+
+impl AsRawLibbpf for ObjectBuilder {
+ type LibbpfType = libbpf_sys::bpf_object_open_opts;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_object_open_opts`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ // SAFETY: A reference is always a valid pointer.
+ unsafe { NonNull::new_unchecked(addr_of!(self.opts).cast_mut()) }
+ }
+}
+
+
+/// Represents an opened (but not yet loaded) BPF object file.
+///
+/// Use this object to access [`OpenMap`]s and [`OpenProgram`]s.
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct OpenObject {
+ ptr: NonNull<libbpf_sys::bpf_object>,
+}
+
+impl OpenObject {
+ /// Takes ownership from pointer.
+ ///
+ /// # Safety
+ ///
+ /// Operations on the returned object are undefined if `ptr` is any one of:
+ /// - null
+ /// - points to an unopened `bpf_object`
+ /// - points to a loaded `bpf_object`
+ ///
+ /// It is not safe to manipulate `ptr` after this operation.
+ pub unsafe fn from_ptr(ptr: NonNull<libbpf_sys::bpf_object>) -> Self {
+ Self { ptr }
+ }
+
+ /// Takes underlying `libbpf_sys::bpf_object` pointer.
+ pub fn take_ptr(mut self) -> NonNull<libbpf_sys::bpf_object> {
+ let ptr = {
+ let Self { ptr } = &mut self;
+ *ptr
+ };
+ // avoid double free of self.ptr
+ mem::forget(self);
+ ptr
+ }
+
+ /// Retrieve the object's name.
+ pub fn name(&self) -> Option<&OsStr> {
+ // SAFETY: We ensured `ptr` is valid during construction.
+ let name_ptr = unsafe { libbpf_sys::bpf_object__name(self.ptr.as_ptr()) };
+ // SAFETY: `libbpf_get_error` is always safe to call.
+ let err = unsafe { libbpf_sys::libbpf_get_error(name_ptr as *const _) };
+ if err != 0 {
+ return None
+ }
+ let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
+ let str = OsStr::from_bytes(name_c_str.to_bytes());
+ Some(str)
+ }
+
+ /// Retrieve an iterator over all BPF maps in the object.
+ pub fn maps(&self) -> impl Iterator<Item = OpenMap<'_>> {
+ MapIter::new(unsafe { self.ptr.as_ref() }).map(|ptr| unsafe { OpenMap::new(ptr.as_ref()) })
+ }
+
+ /// Retrieve an iterator over all BPF maps in the object.
+ pub fn maps_mut(&mut self) -> impl Iterator<Item = OpenMapMut<'_>> {
+ MapIter::new(unsafe { self.ptr.as_ref() })
+ .map(|mut ptr| unsafe { OpenMapMut::new_mut(ptr.as_mut()) })
+ }
+
+ /// Retrieve an iterator over all BPF programs in the object.
+ pub fn progs(&self) -> impl Iterator<Item = OpenProgram<'_>> {
+ ProgIter::new(unsafe { self.ptr.as_ref() })
+ .map(|ptr| unsafe { OpenProgram::new(ptr.as_ref()) })
+ }
+
+ /// Retrieve an iterator over all BPF programs in the object.
+ pub fn progs_mut(&mut self) -> impl Iterator<Item = OpenProgramMut<'_>> {
+ ProgIter::new(unsafe { self.ptr.as_ref() })
+ .map(|mut ptr| unsafe { OpenProgramMut::new_mut(ptr.as_mut()) })
+ }
+
+ /// Load the maps and programs contained in this BPF object into the system.
+ pub fn load(self) -> Result<Object> {
+ let ret = unsafe { libbpf_sys::bpf_object__load(self.ptr.as_ptr()) };
+ let () = util::parse_ret(ret)?;
+
+ let obj = unsafe { Object::from_ptr(self.take_ptr()) };
+
+ Ok(obj)
+ }
+}
+
+impl AsRawLibbpf for OpenObject {
+ type LibbpfType = libbpf_sys::bpf_object;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_object`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+impl Drop for OpenObject {
+ fn drop(&mut self) {
+ // `self.ptr` may be null if `load()` was called. This is ok: libbpf noops
+ unsafe {
+ libbpf_sys::bpf_object__close(self.ptr.as_ptr());
+ }
+ }
+}
+
+/// Represents a loaded BPF object file.
+///
+/// An `Object` is logically in charge of all the contained [`Program`]s and [`Map`]s as well as
+/// the associated metadata and runtime state that underpins the userspace portions of BPF program
+/// execution. As a libbpf-rs user, you must keep the `Object` alive during the entire lifetime
+/// of your interaction with anything inside the `Object`.
+///
+/// Note that this is an explanation of the motivation -- Rust's lifetime system should already be
+/// enforcing this invariant.
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct Object {
+ ptr: NonNull<libbpf_sys::bpf_object>,
+}
+
+impl Object {
+ /// Takes ownership from pointer.
+ ///
+ /// # Safety
+ ///
+ /// If `ptr` is not already loaded then further operations on the returned object are
+ /// undefined.
+ ///
+ /// It is not safe to manipulate `ptr` after this operation.
+ pub unsafe fn from_ptr(ptr: NonNull<libbpf_sys::bpf_object>) -> Self {
+ Self { ptr }
+ }
+
+ /// Retrieve the object's name.
+ pub fn name(&self) -> Option<&OsStr> {
+ // SAFETY: We ensured `ptr` is valid during construction.
+ let name_ptr = unsafe { libbpf_sys::bpf_object__name(self.ptr.as_ptr()) };
+ // SAFETY: `libbpf_get_error` is always safe to call.
+ let err = unsafe { libbpf_sys::libbpf_get_error(name_ptr as *const _) };
+ if err != 0 {
+ return None
+ }
+ let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
+ let str = OsStr::from_bytes(name_c_str.to_bytes());
+ Some(str)
+ }
+
+ /// Parse the btf information associated with this bpf object.
+ pub fn btf(&self) -> Result<Option<Btf<'_>>> {
+ Btf::from_bpf_object(unsafe { &*self.ptr.as_ptr() })
+ }
+
+ /// Retrieve an iterator over all BPF maps in the object.
+ pub fn maps(&self) -> impl Iterator<Item = Map<'_>> {
+ MapIter::new(unsafe { self.ptr.as_ref() })
+ .filter(|ptr| map_fd(*ptr).is_some())
+ .map(|ptr| unsafe { Map::new(ptr.as_ref()) })
+ }
+
+ /// Retrieve an iterator over all BPF maps in the object.
+ pub fn maps_mut(&mut self) -> impl Iterator<Item = MapMut<'_>> {
+ MapIter::new(unsafe { self.ptr.as_ref() })
+ .filter(|ptr| map_fd(*ptr).is_some())
+ .map(|mut ptr| unsafe { MapMut::new_mut(ptr.as_mut()) })
+ }
+
+ /// Retrieve an iterator over all BPF programs in the object.
+ pub fn progs(&self) -> impl Iterator<Item = Program<'_>> {
+ ProgIter::new(unsafe { self.ptr.as_ref() }).map(|ptr| unsafe { Program::new(ptr.as_ref()) })
+ }
+
+ /// Retrieve an iterator over all BPF programs in the object.
+ pub fn progs_mut(&self) -> impl Iterator<Item = ProgramMut<'_>> {
+ ProgIter::new(unsafe { self.ptr.as_ref() })
+ .map(|mut ptr| unsafe { ProgramMut::new_mut(ptr.as_mut()) })
+ }
+}
+
+impl AsRawLibbpf for Object {
+ type LibbpfType = libbpf_sys::bpf_object;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_object`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+impl Drop for Object {
+ fn drop(&mut self) {
+ unsafe {
+ libbpf_sys::bpf_object__close(self.ptr.as_ptr());
+ }
+ }
+}
diff --git a/src/perf_buffer.rs b/src/perf_buffer.rs
new file mode 100644
index 0000000..490683a
--- /dev/null
+++ b/src/perf_buffer.rs
@@ -0,0 +1,268 @@
+use core::ffi::c_void;
+use std::fmt::Debug;
+use std::fmt::Formatter;
+use std::fmt::Result as FmtResult;
+use std::os::unix::io::AsFd;
+use std::os::unix::prelude::AsRawFd;
+use std::ptr;
+use std::ptr::NonNull;
+use std::slice;
+use std::time::Duration;
+
+use crate::util;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::ErrorExt as _;
+use crate::Map;
+use crate::MapCore as _;
+use crate::MapType;
+use crate::Result;
+
+// Workaround for `trait_alias`
+// (https://doc.rust-lang.org/unstable-book/language-features/trait-alias.html)
+// not being available yet. This is just a custom trait plus a blanket implementation.
+pub trait SampleCb: FnMut(i32, &[u8]) {}
+impl<T> SampleCb for T where T: FnMut(i32, &[u8]) {}
+
+pub trait LostCb: FnMut(i32, u64) {}
+impl<T> LostCb for T where T: FnMut(i32, u64) {}
+
+struct CbStruct<'b> {
+ sample_cb: Option<Box<dyn SampleCb + 'b>>,
+ lost_cb: Option<Box<dyn LostCb + 'b>>,
+}
+
+impl Debug for CbStruct<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ let Self { sample_cb, lost_cb } = self;
+ f.debug_struct("CbStruct")
+ .field("sample_cb", &sample_cb.as_ref().map(|cb| &cb as *const _))
+ .field("lost_cb", &lost_cb.as_ref().map(|cb| &cb as *const _))
+ .finish()
+ }
+}
+
+/// Builds [`PerfBuffer`] instances.
+pub struct PerfBufferBuilder<'a, 'b> {
+ map: &'a Map<'a>,
+ pages: usize,
+ sample_cb: Option<Box<dyn SampleCb + 'b>>,
+ lost_cb: Option<Box<dyn LostCb + 'b>>,
+}
+
+impl<'a> PerfBufferBuilder<'a, '_> {
+ /// Create a new `PerfBufferBuilder` using the provided `Map`.
+ pub fn new(map: &'a Map<'a>) -> Self {
+ Self {
+ map,
+ pages: 64,
+ sample_cb: None,
+ lost_cb: None,
+ }
+ }
+}
+
+impl<'a, 'b> PerfBufferBuilder<'a, 'b> {
+ /// Callback to run when a sample is received.
+ ///
+ /// This callback provides a raw byte slice. You may find libraries such as
+ /// [`plain`](https://crates.io/crates/plain) helpful.
+ ///
+ /// Callback arguments are: `(cpu, data)`.
+ pub fn sample_cb<NewCb: SampleCb + 'b>(self, cb: NewCb) -> PerfBufferBuilder<'a, 'b> {
+ PerfBufferBuilder {
+ map: self.map,
+ pages: self.pages,
+ sample_cb: Some(Box::new(cb)),
+ lost_cb: self.lost_cb,
+ }
+ }
+
+ /// Callback to run when a sample is received.
+ ///
+ /// Callback arguments are: `(cpu, lost_count)`.
+ pub fn lost_cb<NewCb: LostCb + 'b>(self, cb: NewCb) -> PerfBufferBuilder<'a, 'b> {
+ PerfBufferBuilder {
+ map: self.map,
+ pages: self.pages,
+ sample_cb: self.sample_cb,
+ lost_cb: Some(Box::new(cb)),
+ }
+ }
+
+ /// The number of pages to size the ring buffer.
+ pub fn pages(self, pages: usize) -> PerfBufferBuilder<'a, 'b> {
+ PerfBufferBuilder {
+ map: self.map,
+ pages,
+ sample_cb: self.sample_cb,
+ lost_cb: self.lost_cb,
+ }
+ }
+
+ /// Build the `PerfBuffer` object as configured.
+ pub fn build(self) -> Result<PerfBuffer<'b>> {
+ if self.map.map_type() != MapType::PerfEventArray {
+ return Err(Error::with_invalid_data("Must use a PerfEventArray map"));
+ }
+
+ if !self.pages.is_power_of_two() {
+ return Err(Error::with_invalid_data("Page count must be power of two"));
+ }
+
+ let c_sample_cb: libbpf_sys::perf_buffer_sample_fn = if self.sample_cb.is_some() {
+ Some(Self::call_sample_cb)
+ } else {
+ None
+ };
+
+ let c_lost_cb: libbpf_sys::perf_buffer_lost_fn = if self.lost_cb.is_some() {
+ Some(Self::call_lost_cb)
+ } else {
+ None
+ };
+
+ let callback_struct_ptr = Box::into_raw(Box::new(CbStruct {
+ sample_cb: self.sample_cb,
+ lost_cb: self.lost_cb,
+ }));
+
+ let ptr = unsafe {
+ libbpf_sys::perf_buffer__new(
+ self.map.as_fd().as_raw_fd(),
+ self.pages as libbpf_sys::size_t,
+ c_sample_cb,
+ c_lost_cb,
+ callback_struct_ptr as *mut _,
+ ptr::null(),
+ )
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to create perf buffer")?;
+ let pb = PerfBuffer {
+ ptr,
+ _cb_struct: unsafe { Box::from_raw(callback_struct_ptr) },
+ };
+ Ok(pb)
+ }
+
+ unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, cpu: i32, data: *mut c_void, size: u32) {
+ let callback_struct = ctx as *mut CbStruct<'_>;
+
+ if let Some(cb) = unsafe { &mut (*callback_struct).sample_cb } {
+ let slice = unsafe { slice::from_raw_parts(data as *const u8, size as usize) };
+ cb(cpu, slice);
+ }
+ }
+
+ unsafe extern "C" fn call_lost_cb(ctx: *mut c_void, cpu: i32, count: u64) {
+ let callback_struct = ctx as *mut CbStruct<'_>;
+
+ if let Some(cb) = unsafe { &mut (*callback_struct).lost_cb } {
+ cb(cpu, count);
+ }
+ }
+}
+
+impl Debug for PerfBufferBuilder<'_, '_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ let Self {
+ map,
+ pages,
+ sample_cb,
+ lost_cb,
+ } = self;
+ f.debug_struct("PerfBufferBuilder")
+ .field("map", map)
+ .field("pages", pages)
+ .field("sample_cb", &sample_cb.as_ref().map(|cb| &cb as *const _))
+ .field("lost_cb", &lost_cb.as_ref().map(|cb| &cb as *const _))
+ .finish()
+ }
+}
+
+/// Represents a special kind of [`Map`]. Typically used to transfer data between
+/// [`Program`][crate::Program]s and userspace.
+#[derive(Debug)]
+pub struct PerfBuffer<'b> {
+ ptr: NonNull<libbpf_sys::perf_buffer>,
+ // Hold onto the box so it'll get dropped when PerfBuffer is dropped
+ _cb_struct: Box<CbStruct<'b>>,
+}
+
+// TODO: Document methods.
+#[allow(missing_docs)]
+impl PerfBuffer<'_> {
+ pub fn epoll_fd(&self) -> i32 {
+ unsafe { libbpf_sys::perf_buffer__epoll_fd(self.ptr.as_ptr()) }
+ }
+
+ pub fn poll(&self, timeout: Duration) -> Result<()> {
+ let ret =
+ unsafe { libbpf_sys::perf_buffer__poll(self.ptr.as_ptr(), timeout.as_millis() as i32) };
+ util::parse_ret(ret)
+ }
+
+ pub fn consume(&self) -> Result<()> {
+ let ret = unsafe { libbpf_sys::perf_buffer__consume(self.ptr.as_ptr()) };
+ util::parse_ret(ret)
+ }
+
+ pub fn consume_buffer(&self, buf_idx: usize) -> Result<()> {
+ let ret = unsafe {
+ libbpf_sys::perf_buffer__consume_buffer(
+ self.ptr.as_ptr(),
+ buf_idx as libbpf_sys::size_t,
+ )
+ };
+ util::parse_ret(ret)
+ }
+
+ pub fn buffer_cnt(&self) -> usize {
+ unsafe { libbpf_sys::perf_buffer__buffer_cnt(self.ptr.as_ptr()) as usize }
+ }
+
+ pub fn buffer_fd(&self, buf_idx: usize) -> Result<i32> {
+ let ret = unsafe {
+ libbpf_sys::perf_buffer__buffer_fd(self.ptr.as_ptr(), buf_idx as libbpf_sys::size_t)
+ };
+ util::parse_ret_i32(ret)
+ }
+}
+
+impl AsRawLibbpf for PerfBuffer<'_> {
+ type LibbpfType = libbpf_sys::perf_buffer;
+
+ /// Retrieve the underlying [`libbpf_sys::perf_buffer`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+// SAFETY: `perf_buffer` objects can safely be polled from any thread.
+unsafe impl Send for PerfBuffer<'_> {}
+
+impl Drop for PerfBuffer<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ libbpf_sys::perf_buffer__free(self.ptr.as_ptr());
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ /// Check that `PerfBuffer` is `Send`.
+ #[test]
+ fn perfbuffer_is_send() {
+ fn test<T>()
+ where
+ T: Send,
+ {
+ }
+
+ test::<PerfBuffer<'_>>();
+ }
+}
diff --git a/src/print.rs b/src/print.rs
new file mode 100644
index 0000000..b71c81d
--- /dev/null
+++ b/src/print.rs
@@ -0,0 +1,151 @@
+use std::ffi::c_char;
+use std::ffi::c_int;
+use std::ffi::c_void;
+use std::io;
+use std::io::Write;
+use std::mem;
+use std::sync::Mutex;
+
+use crate::util::LazyLock;
+
+/// An enum representing the different supported print levels.
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
+#[repr(u32)]
+pub enum PrintLevel {
+ /// Print warnings and more severe messages.
+ Warn = libbpf_sys::LIBBPF_WARN,
+ /// Print general information and more severe messages.
+ Info = libbpf_sys::LIBBPF_INFO,
+ /// Print debug information and more severe messages.
+ Debug = libbpf_sys::LIBBPF_DEBUG,
+}
+
+impl From<libbpf_sys::libbpf_print_level> for PrintLevel {
+ fn from(level: libbpf_sys::libbpf_print_level) -> Self {
+ match level {
+ libbpf_sys::LIBBPF_WARN => Self::Warn,
+ libbpf_sys::LIBBPF_INFO => Self::Info,
+ libbpf_sys::LIBBPF_DEBUG => Self::Debug,
+ // shouldn't happen, but anything unknown becomes the highest level
+ _ => Self::Warn,
+ }
+ }
+}
+
+/// The type of callback functions suitable for being provided to [`set_print`].
+pub type PrintCallback = fn(PrintLevel, String);
+
+/// Mimic the default print functionality of libbpf. This way if the user calls `get_print` when no
+/// previous callback had been set, with the intention of restoring it, everything will behave as
+/// expected.
+fn default_callback(_lvl: PrintLevel, msg: String) {
+ let _ = io::stderr().write(msg.as_bytes());
+}
+
+// While we can't say that set_print is thread-safe, because we shouldn't assume that of
+// libbpf_set_print, we should still make sure that things are sane on the rust side of things.
+// Therefore we are using a lock to keep the log level and the callback in sync.
+//
+// We don't do anything that can panic with the lock held, so we'll unconditionally unwrap() when
+// locking the mutex.
+//
+// Note that default print behavior ignores debug messages.
+static PRINT_CB: LazyLock<Mutex<Option<(PrintLevel, PrintCallback)>>> =
+ LazyLock::new(|| Mutex::new(Some((PrintLevel::Info, default_callback))));
+
+extern "C" fn outer_print_cb(
+ level: libbpf_sys::libbpf_print_level,
+ fmtstr: *const c_char,
+ // bindgen generated va_list type varies on different platforms, so just use void pointer
+ // instead. It's safe because this argument is always a pointer.
+ // The pointer of this function would be transmuted and passing to libbpf_set_print below.
+ // See <https://github.com/rust-lang/rust-bindgen/issues/2631>
+ va_list: *mut c_void,
+) -> c_int {
+ let level = level.into();
+ if let Some((min_level, func)) = { *PRINT_CB.lock().unwrap() } {
+ if level <= min_level {
+ let msg = match unsafe { vsprintf::vsprintf(fmtstr, va_list) } {
+ Ok(s) => s,
+ Err(e) => format!("Failed to parse libbpf output: {e}"),
+ };
+ func(level, msg);
+ }
+ }
+ 0 // return value is ignored by libbpf
+}
+
+/// Set a callback to receive log messages from libbpf, instead of printing them to stderr.
+///
+/// # Arguments
+///
+/// * `callback` - Either a tuple `(min_level, function)` where `min_level` is the lowest priority
+/// log message to handle, or `None` to disable all printing.
+///
+/// This overrides (and is overridden by) [`ObjectBuilder::debug`][crate::ObjectBuilder::debug]
+///
+/// # Examples
+///
+/// To pass all messages to the `log` crate:
+///
+/// ```
+/// use libbpf_rs::{PrintLevel, set_print};
+///
+/// fn print_to_log(level: PrintLevel, msg: String) {
+/// match level {
+/// PrintLevel::Debug => log::debug!("{}", msg),
+/// PrintLevel::Info => log::info!("{}", msg),
+/// PrintLevel::Warn => log::warn!("{}", msg),
+/// }
+/// }
+///
+/// set_print(Some((PrintLevel::Debug, print_to_log)));
+/// ```
+///
+/// To disable printing completely:
+///
+/// ```
+/// use libbpf_rs::set_print;
+/// set_print(None);
+/// ```
+///
+/// To temporarliy suppress output:
+///
+/// ```
+/// use libbpf_rs::set_print;
+///
+/// let prev = set_print(None);
+/// // do things quietly
+/// set_print(prev);
+/// ```
+pub fn set_print(
+ mut callback: Option<(PrintLevel, PrintCallback)>,
+) -> Option<(PrintLevel, PrintCallback)> {
+ // # Safety
+ // outer_print_cb has the same function signature as libbpf_print_fn_t
+ #[allow(clippy::missing_transmute_annotations)]
+ let real_cb: libbpf_sys::libbpf_print_fn_t =
+ unsafe { Some(mem::transmute(outer_print_cb as *const ())) };
+ let real_cb: libbpf_sys::libbpf_print_fn_t = callback.as_ref().and(real_cb);
+ mem::swap(&mut callback, &mut *PRINT_CB.lock().unwrap());
+ unsafe { libbpf_sys::libbpf_set_print(real_cb) };
+ callback
+}
+
+/// Return the current print callback and level.
+///
+/// # Examples
+///
+/// To temporarily suppress output:
+///
+/// ```
+/// use libbpf_rs::{get_print, set_print};
+///
+/// let prev = get_print();
+/// set_print(None);
+/// // do things quietly
+/// set_print(prev);
+/// ```
+pub fn get_print() -> Option<(PrintLevel, PrintCallback)> {
+ *PRINT_CB.lock().unwrap()
+}
diff --git a/src/program.rs b/src/program.rs
new file mode 100644
index 0000000..f3e7726
--- /dev/null
+++ b/src/program.rs
@@ -0,0 +1,1245 @@
+// `rustdoc` is buggy, claiming that we have some links to private items
+// when they are actually public.
+#![allow(rustdoc::private_intra_doc_links)]
+
+use std::ffi::c_void;
+use std::ffi::CStr;
+use std::ffi::OsStr;
+use std::marker::PhantomData;
+use std::mem;
+use std::mem::size_of;
+use std::mem::size_of_val;
+use std::mem::transmute;
+use std::ops::Deref;
+use std::os::unix::ffi::OsStrExt as _;
+use std::os::unix::io::AsFd;
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::BorrowedFd;
+use std::os::unix::io::FromRawFd;
+use std::os::unix::io::OwnedFd;
+use std::path::Path;
+use std::ptr;
+use std::ptr::NonNull;
+use std::slice;
+
+use libbpf_sys::bpf_func_id;
+
+use crate::util;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::ErrorExt as _;
+use crate::Link;
+use crate::Mut;
+use crate::Result;
+
+/// Options to optionally be provided when attaching to a uprobe.
+#[derive(Clone, Debug, Default)]
+pub struct UprobeOpts {
+ /// Offset of kernel reference counted USDT semaphore.
+ pub ref_ctr_offset: usize,
+ /// Custom user-provided value accessible through `bpf_get_attach_cookie`.
+ pub cookie: u64,
+ /// uprobe is return probe, invoked at function return time.
+ pub retprobe: bool,
+ /// Function name to attach to.
+ ///
+ /// Could be an unqualified ("abc") or library-qualified "abc@LIBXYZ" name.
+ /// To specify function entry, `func_name` should be set while `func_offset`
+ /// argument to should be 0. To trace an offset within a function, specify
+ /// `func_name` and use `func_offset` argument to specify offset within the
+ /// function. Shared library functions must specify the shared library
+ /// binary_path.
+ pub func_name: String,
+ #[doc(hidden)]
+ pub _non_exhaustive: (),
+}
+
+/// Options to optionally be provided when attaching to a USDT.
+#[derive(Clone, Debug, Default)]
+pub struct UsdtOpts {
+ /// Custom user-provided value accessible through `bpf_usdt_cookie`.
+ pub cookie: u64,
+ #[doc(hidden)]
+ pub _non_exhaustive: (),
+}
+
+impl From<UsdtOpts> for libbpf_sys::bpf_usdt_opts {
+ fn from(opts: UsdtOpts) -> Self {
+ let UsdtOpts {
+ cookie,
+ _non_exhaustive,
+ } = opts;
+ #[allow(clippy::needless_update)]
+ libbpf_sys::bpf_usdt_opts {
+ sz: size_of::<Self>() as _,
+ usdt_cookie: cookie,
+ // bpf_usdt_opts might have padding fields on some platform
+ ..Default::default()
+ }
+ }
+}
+
+/// Options to optionally be provided when attaching to a tracepoint.
+#[derive(Clone, Debug, Default)]
+pub struct TracepointOpts {
+ /// Custom user-provided value accessible through `bpf_get_attach_cookie`.
+ pub cookie: u64,
+ #[doc(hidden)]
+ pub _non_exhaustive: (),
+}
+
+impl From<TracepointOpts> for libbpf_sys::bpf_tracepoint_opts {
+ fn from(opts: TracepointOpts) -> Self {
+ let TracepointOpts {
+ cookie,
+ _non_exhaustive,
+ } = opts;
+
+ #[allow(clippy::needless_update)]
+ libbpf_sys::bpf_tracepoint_opts {
+ sz: size_of::<Self>() as _,
+ bpf_cookie: cookie,
+ // bpf_tracepoint_opts might have padding fields on some platform
+ ..Default::default()
+ }
+ }
+}
+
+
+/// An immutable parsed but not yet loaded BPF program.
+pub type OpenProgram<'obj> = OpenProgramImpl<'obj>;
+/// A mutable parsed but not yet loaded BPF program.
+pub type OpenProgramMut<'obj> = OpenProgramImpl<'obj, Mut>;
+
+/// Represents a parsed but not yet loaded BPF program.
+///
+/// This object exposes operations that need to happen before the program is loaded.
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct OpenProgramImpl<'obj, T = ()> {
+ ptr: NonNull<libbpf_sys::bpf_program>,
+ _phantom: PhantomData<&'obj T>,
+}
+
+// TODO: Document variants.
+#[allow(missing_docs)]
+impl<'obj> OpenProgram<'obj> {
+ /// Create a new [`OpenProgram`] from a ptr to a `libbpf_sys::bpf_program`.
+ pub fn new(prog: &'obj libbpf_sys::bpf_program) -> Self {
+ // SAFETY: We inferred the address from a reference, which is always
+ // valid.
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(prog as *const _ as *mut _) },
+ _phantom: PhantomData,
+ }
+ }
+
+ // The `ProgramType` of this `OpenProgram`.
+ pub fn prog_type(&self) -> ProgramType {
+ ProgramType::from(unsafe { libbpf_sys::bpf_program__type(self.ptr.as_ptr()) })
+ }
+
+ /// Retrieve the name of this `OpenProgram`.
+ pub fn name(&self) -> &OsStr {
+ let name_ptr = unsafe { libbpf_sys::bpf_program__name(self.ptr.as_ptr()) };
+ let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
+ // SAFETY: `bpf_program__name` always returns a non-NULL pointer.
+ OsStr::from_bytes(name_c_str.to_bytes())
+ }
+
+ /// Retrieve the name of the section this `OpenProgram` belongs to.
+ pub fn section(&self) -> &OsStr {
+ // SAFETY: The program is always valid.
+ let p = unsafe { libbpf_sys::bpf_program__section_name(self.ptr.as_ptr()) };
+ // SAFETY: `bpf_program__section_name` will always return a non-NULL
+ // pointer.
+ let section_c_str = unsafe { CStr::from_ptr(p) };
+ let section = OsStr::from_bytes(section_c_str.to_bytes());
+ section
+ }
+
+ /// Returns the number of instructions that form the program.
+ ///
+ /// Note: Keep in mind, libbpf can modify the program's instructions
+ /// and consequently its instruction count, as it processes the BPF object file.
+ /// So [`OpenProgram::insn_cnt`] and [`Program::insn_cnt`] may return different values.
+ pub fn insn_cnt(&self) -> usize {
+ unsafe { libbpf_sys::bpf_program__insn_cnt(self.ptr.as_ptr()) as usize }
+ }
+
+ /// Gives read-only access to BPF program's underlying BPF instructions.
+ ///
+ /// Keep in mind, libbpf can modify and append/delete BPF program's
+ /// instructions as it processes BPF object file and prepares everything for
+ /// uploading into the kernel. So [`OpenProgram::insns`] and [`Program::insns`] may return
+ /// different sets of instructions. As an example, during BPF object load phase BPF program
+ /// instructions will be CO-RE-relocated, BPF subprograms instructions will be appended, ldimm64
+ /// instructions will have FDs embedded, etc. So instructions returned before load and after it
+ /// might be quite different.
+ pub fn insns(&self) -> &[libbpf_sys::bpf_insn] {
+ let count = self.insn_cnt();
+ let ptr = unsafe { libbpf_sys::bpf_program__insns(self.ptr.as_ptr()) };
+ unsafe { slice::from_raw_parts(ptr, count) }
+ }
+}
+
+impl<'obj> OpenProgramMut<'obj> {
+ /// Create a new [`OpenProgram`] from a ptr to a `libbpf_sys::bpf_program`.
+ pub fn new_mut(prog: &'obj mut libbpf_sys::bpf_program) -> Self {
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(prog as *mut _) },
+ _phantom: PhantomData,
+ }
+ }
+
+ pub fn set_prog_type(&mut self, prog_type: ProgramType) {
+ let rc = unsafe { libbpf_sys::bpf_program__set_type(self.ptr.as_ptr(), prog_type as u32) };
+ debug_assert!(util::parse_ret(rc).is_ok(), "{rc}");
+ }
+
+ pub fn set_attach_type(&mut self, attach_type: ProgramAttachType) {
+ let rc = unsafe {
+ libbpf_sys::bpf_program__set_expected_attach_type(self.ptr.as_ptr(), attach_type as u32)
+ };
+ debug_assert!(util::parse_ret(rc).is_ok(), "{rc}");
+ }
+
+ pub fn set_ifindex(&mut self, idx: u32) {
+ unsafe { libbpf_sys::bpf_program__set_ifindex(self.ptr.as_ptr(), idx) }
+ }
+
+ /// Set the log level for the bpf program.
+ ///
+ /// The log level is interpreted by bpf kernel code and interpretation may
+ /// change with newer kernel versions. Refer to the kernel source code for
+ /// details.
+ ///
+ /// In general, a value of `0` disables logging while values `> 0` enables
+ /// it.
+ pub fn set_log_level(&mut self, log_level: u32) {
+ let rc = unsafe { libbpf_sys::bpf_program__set_log_level(self.ptr.as_ptr(), log_level) };
+ debug_assert!(util::parse_ret(rc).is_ok(), "{rc}");
+ }
+
+ /// Set whether a bpf program should be automatically loaded by default
+ /// when the bpf object is loaded.
+ pub fn set_autoload(&mut self, autoload: bool) {
+ let rc = unsafe { libbpf_sys::bpf_program__set_autoload(self.ptr.as_ptr(), autoload) };
+ debug_assert!(util::parse_ret(rc).is_ok(), "{rc}");
+ }
+
+ pub fn set_attach_target(
+ &mut self,
+ attach_prog_fd: i32,
+ attach_func_name: Option<String>,
+ ) -> Result<()> {
+ let ret = if let Some(name) = attach_func_name {
+ // NB: we must hold onto a CString otherwise our pointer dangles
+ let name_c = util::str_to_cstring(&name)?;
+ unsafe {
+ libbpf_sys::bpf_program__set_attach_target(
+ self.ptr.as_ptr(),
+ attach_prog_fd,
+ name_c.as_ptr(),
+ )
+ }
+ } else {
+ unsafe {
+ libbpf_sys::bpf_program__set_attach_target(
+ self.ptr.as_ptr(),
+ attach_prog_fd,
+ ptr::null(),
+ )
+ }
+ };
+ util::parse_ret(ret)
+ }
+
+ pub fn set_flags(&mut self, flags: u32) {
+ let rc = unsafe { libbpf_sys::bpf_program__set_flags(self.ptr.as_ptr(), flags) };
+ debug_assert!(util::parse_ret(rc).is_ok(), "{rc}");
+ }
+}
+
+impl<'obj> Deref for OpenProgramMut<'obj> {
+ type Target = OpenProgram<'obj>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: `OpenProgramImpl` is `repr(transparent)` and so
+ // in-memory representation of both types is the same.
+ unsafe { transmute::<&OpenProgramMut<'obj>, &OpenProgram<'obj>>(self) }
+ }
+}
+
+impl<T> AsRawLibbpf for OpenProgramImpl<'_, T> {
+ type LibbpfType = libbpf_sys::bpf_program;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_program`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+/// Type of a [`Program`]. Maps to `enum bpf_prog_type` in kernel uapi.
+#[non_exhaustive]
+#[repr(u32)]
+#[derive(Copy, Clone, Debug)]
+// TODO: Document variants.
+#[allow(missing_docs)]
+pub enum ProgramType {
+ Unspec = 0,
+ SocketFilter,
+ Kprobe,
+ SchedCls,
+ SchedAct,
+ Tracepoint,
+ Xdp,
+ PerfEvent,
+ CgroupSkb,
+ CgroupSock,
+ LwtIn,
+ LwtOut,
+ LwtXmit,
+ SockOps,
+ SkSkb,
+ CgroupDevice,
+ SkMsg,
+ RawTracepoint,
+ CgroupSockAddr,
+ LwtSeg6local,
+ LircMode2,
+ SkReuseport,
+ FlowDissector,
+ CgroupSysctl,
+ RawTracepointWritable,
+ CgroupSockopt,
+ Tracing,
+ StructOps,
+ Ext,
+ Lsm,
+ SkLookup,
+ Syscall,
+ /// See [`MapType::Unknown`][crate::MapType::Unknown]
+ Unknown = u32::MAX,
+}
+
+impl ProgramType {
+ /// Detects if host kernel supports this BPF program type
+ ///
+ /// Make sure the process has required set of CAP_* permissions (or runs as
+ /// root) when performing feature checking.
+ pub fn is_supported(&self) -> Result<bool> {
+ let ret = unsafe { libbpf_sys::libbpf_probe_bpf_prog_type(*self as u32, ptr::null()) };
+ match ret {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => Err(Error::from_raw_os_error(-ret)),
+ }
+ }
+
+ /// Detects if host kernel supports the use of a given BPF helper from this BPF program type.
+ /// * `helper_id` - BPF helper ID (enum bpf_func_id) to check support for
+ ///
+ /// Make sure the process has required set of CAP_* permissions (or runs as
+ /// root) when performing feature checking.
+ pub fn is_helper_supported(&self, helper_id: bpf_func_id) -> Result<bool> {
+ let ret =
+ unsafe { libbpf_sys::libbpf_probe_bpf_helper(*self as u32, helper_id, ptr::null()) };
+ match ret {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => Err(Error::from_raw_os_error(-ret)),
+ }
+ }
+}
+
+impl From<u32> for ProgramType {
+ fn from(value: u32) -> Self {
+ use ProgramType::*;
+
+ match value {
+ x if x == Unspec as u32 => Unspec,
+ x if x == SocketFilter as u32 => SocketFilter,
+ x if x == Kprobe as u32 => Kprobe,
+ x if x == SchedCls as u32 => SchedCls,
+ x if x == SchedAct as u32 => SchedAct,
+ x if x == Tracepoint as u32 => Tracepoint,
+ x if x == Xdp as u32 => Xdp,
+ x if x == PerfEvent as u32 => PerfEvent,
+ x if x == CgroupSkb as u32 => CgroupSkb,
+ x if x == CgroupSock as u32 => CgroupSock,
+ x if x == LwtIn as u32 => LwtIn,
+ x if x == LwtOut as u32 => LwtOut,
+ x if x == LwtXmit as u32 => LwtXmit,
+ x if x == SockOps as u32 => SockOps,
+ x if x == SkSkb as u32 => SkSkb,
+ x if x == CgroupDevice as u32 => CgroupDevice,
+ x if x == SkMsg as u32 => SkMsg,
+ x if x == RawTracepoint as u32 => RawTracepoint,
+ x if x == CgroupSockAddr as u32 => CgroupSockAddr,
+ x if x == LwtSeg6local as u32 => LwtSeg6local,
+ x if x == LircMode2 as u32 => LircMode2,
+ x if x == SkReuseport as u32 => SkReuseport,
+ x if x == FlowDissector as u32 => FlowDissector,
+ x if x == CgroupSysctl as u32 => CgroupSysctl,
+ x if x == RawTracepointWritable as u32 => RawTracepointWritable,
+ x if x == CgroupSockopt as u32 => CgroupSockopt,
+ x if x == Tracing as u32 => Tracing,
+ x if x == StructOps as u32 => StructOps,
+ x if x == Ext as u32 => Ext,
+ x if x == Lsm as u32 => Lsm,
+ x if x == SkLookup as u32 => SkLookup,
+ x if x == Syscall as u32 => Syscall,
+ _ => Unknown,
+ }
+ }
+}
+
+/// Attach type of a [`Program`]. Maps to `enum bpf_attach_type` in kernel uapi.
+#[non_exhaustive]
+#[repr(u32)]
+#[derive(Clone, Debug)]
+// TODO: Document variants.
+#[allow(missing_docs)]
+pub enum ProgramAttachType {
+ CgroupInetIngress,
+ CgroupInetEgress,
+ CgroupInetSockCreate,
+ CgroupSockOps,
+ SkSkbStreamParser,
+ SkSkbStreamVerdict,
+ CgroupDevice,
+ SkMsgVerdict,
+ CgroupInet4Bind,
+ CgroupInet6Bind,
+ CgroupInet4Connect,
+ CgroupInet6Connect,
+ CgroupInet4PostBind,
+ CgroupInet6PostBind,
+ CgroupUdp4Sendmsg,
+ CgroupUdp6Sendmsg,
+ LircMode2,
+ FlowDissector,
+ CgroupSysctl,
+ CgroupUdp4Recvmsg,
+ CgroupUdp6Recvmsg,
+ CgroupGetsockopt,
+ CgroupSetsockopt,
+ TraceRawTp,
+ TraceFentry,
+ TraceFexit,
+ ModifyReturn,
+ LsmMac,
+ TraceIter,
+ CgroupInet4Getpeername,
+ CgroupInet6Getpeername,
+ CgroupInet4Getsockname,
+ CgroupInet6Getsockname,
+ XdpDevmap,
+ CgroupInetSockRelease,
+ XdpCpumap,
+ SkLookup,
+ Xdp,
+ SkSkbVerdict,
+ SkReuseportSelect,
+ SkReuseportSelectOrMigrate,
+ PerfEvent,
+ /// See [`MapType::Unknown`][crate::MapType::Unknown]
+ Unknown = u32::MAX,
+}
+
+impl From<u32> for ProgramAttachType {
+ fn from(value: u32) -> Self {
+ use ProgramAttachType::*;
+
+ match value {
+ x if x == CgroupInetIngress as u32 => CgroupInetIngress,
+ x if x == CgroupInetEgress as u32 => CgroupInetEgress,
+ x if x == CgroupInetSockCreate as u32 => CgroupInetSockCreate,
+ x if x == CgroupSockOps as u32 => CgroupSockOps,
+ x if x == SkSkbStreamParser as u32 => SkSkbStreamParser,
+ x if x == SkSkbStreamVerdict as u32 => SkSkbStreamVerdict,
+ x if x == CgroupDevice as u32 => CgroupDevice,
+ x if x == SkMsgVerdict as u32 => SkMsgVerdict,
+ x if x == CgroupInet4Bind as u32 => CgroupInet4Bind,
+ x if x == CgroupInet6Bind as u32 => CgroupInet6Bind,
+ x if x == CgroupInet4Connect as u32 => CgroupInet4Connect,
+ x if x == CgroupInet6Connect as u32 => CgroupInet6Connect,
+ x if x == CgroupInet4PostBind as u32 => CgroupInet4PostBind,
+ x if x == CgroupInet6PostBind as u32 => CgroupInet6PostBind,
+ x if x == CgroupUdp4Sendmsg as u32 => CgroupUdp4Sendmsg,
+ x if x == CgroupUdp6Sendmsg as u32 => CgroupUdp6Sendmsg,
+ x if x == LircMode2 as u32 => LircMode2,
+ x if x == FlowDissector as u32 => FlowDissector,
+ x if x == CgroupSysctl as u32 => CgroupSysctl,
+ x if x == CgroupUdp4Recvmsg as u32 => CgroupUdp4Recvmsg,
+ x if x == CgroupUdp6Recvmsg as u32 => CgroupUdp6Recvmsg,
+ x if x == CgroupGetsockopt as u32 => CgroupGetsockopt,
+ x if x == CgroupSetsockopt as u32 => CgroupSetsockopt,
+ x if x == TraceRawTp as u32 => TraceRawTp,
+ x if x == TraceFentry as u32 => TraceFentry,
+ x if x == TraceFexit as u32 => TraceFexit,
+ x if x == ModifyReturn as u32 => ModifyReturn,
+ x if x == LsmMac as u32 => LsmMac,
+ x if x == TraceIter as u32 => TraceIter,
+ x if x == CgroupInet4Getpeername as u32 => CgroupInet4Getpeername,
+ x if x == CgroupInet6Getpeername as u32 => CgroupInet6Getpeername,
+ x if x == CgroupInet4Getsockname as u32 => CgroupInet4Getsockname,
+ x if x == CgroupInet6Getsockname as u32 => CgroupInet6Getsockname,
+ x if x == XdpDevmap as u32 => XdpDevmap,
+ x if x == CgroupInetSockRelease as u32 => CgroupInetSockRelease,
+ x if x == XdpCpumap as u32 => XdpCpumap,
+ x if x == SkLookup as u32 => SkLookup,
+ x if x == Xdp as u32 => Xdp,
+ x if x == SkSkbVerdict as u32 => SkSkbVerdict,
+ x if x == SkReuseportSelect as u32 => SkReuseportSelect,
+ x if x == SkReuseportSelectOrMigrate as u32 => SkReuseportSelectOrMigrate,
+ x if x == PerfEvent as u32 => PerfEvent,
+ _ => Unknown,
+ }
+ }
+}
+
+/// The input a program accepts.
+///
+/// This type is mostly used in conjunction with the [`Program::test_run`]
+/// facility.
+#[derive(Debug, Default)]
+pub struct Input<'dat> {
+ /// The input context to provide.
+ ///
+ /// The input is mutable because the kernel may modify it.
+ pub context_in: Option<&'dat mut [u8]>,
+ /// The output context buffer provided to the program.
+ pub context_out: Option<&'dat mut [u8]>,
+ /// Additional data to provide to the program.
+ pub data_in: Option<&'dat [u8]>,
+ /// The output data buffer provided to the program.
+ pub data_out: Option<&'dat mut [u8]>,
+ /// The 'cpu' value passed to the kernel.
+ pub cpu: u32,
+ /// The 'flags' value passed to the kernel.
+ pub flags: u32,
+ /// The struct is non-exhaustive and open to extension.
+ #[doc(hidden)]
+ pub _non_exhaustive: (),
+}
+
+/// The output a program produces.
+///
+/// This type is mostly used in conjunction with the [`Program::test_run`]
+/// facility.
+#[derive(Debug)]
+pub struct Output<'dat> {
+ /// The value returned by the program.
+ pub return_value: u32,
+ /// The output context filled by the program/kernel.
+ pub context: Option<&'dat mut [u8]>,
+ /// Output data filled by the program.
+ pub data: Option<&'dat mut [u8]>,
+ /// The struct is non-exhaustive and open to extension.
+ #[doc(hidden)]
+ pub _non_exhaustive: (),
+}
+
+/// An immutable loaded BPF program.
+pub type Program<'obj> = ProgramImpl<'obj>;
+/// A mutable loaded BPF program.
+pub type ProgramMut<'obj> = ProgramImpl<'obj, Mut>;
+
+
+/// Represents a loaded [`Program`].
+///
+/// This struct is not safe to clone because the underlying libbpf resource cannot currently
+/// be protected from data races.
+///
+/// If you attempt to attach a `Program` with the wrong attach method, the `attach_*`
+/// method will fail with the appropriate error.
+#[derive(Debug)]
+#[repr(transparent)]
+pub struct ProgramImpl<'obj, T = ()> {
+ pub(crate) ptr: NonNull<libbpf_sys::bpf_program>,
+ _phantom: PhantomData<&'obj T>,
+}
+
+impl<'obj> Program<'obj> {
+ /// Create a [`Program`] from a [`libbpf_sys::bpf_program`]
+ pub fn new(prog: &'obj libbpf_sys::bpf_program) -> Self {
+ // SAFETY: We inferred the address from a reference, which is always
+ // valid.
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(prog as *const _ as *mut _) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Retrieve the name of this `Program`.
+ pub fn name(&self) -> &OsStr {
+ let name_ptr = unsafe { libbpf_sys::bpf_program__name(self.ptr.as_ptr()) };
+ let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
+ // SAFETY: `bpf_program__name` always returns a non-NULL pointer.
+ OsStr::from_bytes(name_c_str.to_bytes())
+ }
+
+ /// Retrieve the name of the section this `Program` belongs to.
+ pub fn section(&self) -> &OsStr {
+ // SAFETY: The program is always valid.
+ let p = unsafe { libbpf_sys::bpf_program__section_name(self.ptr.as_ptr()) };
+ // SAFETY: `bpf_program__section_name` will always return a non-NULL
+ // pointer.
+ let section_c_str = unsafe { CStr::from_ptr(p) };
+ let section = OsStr::from_bytes(section_c_str.to_bytes());
+ section
+ }
+
+ /// Retrieve the type of the program.
+ pub fn prog_type(&self) -> ProgramType {
+ ProgramType::from(unsafe { libbpf_sys::bpf_program__type(self.ptr.as_ptr()) })
+ }
+
+ /// Returns program fd by id
+ pub fn get_fd_by_id(id: u32) -> Result<OwnedFd> {
+ let ret = unsafe { libbpf_sys::bpf_prog_get_fd_by_id(id) };
+ let fd = util::parse_ret_i32(ret)?;
+ // SAFETY
+ // A file descriptor coming from the bpf_prog_get_fd_by_id function is always suitable for
+ // ownership and can be cleaned up with close.
+ Ok(unsafe { OwnedFd::from_raw_fd(fd) })
+ }
+
+ /// Returns program id by fd
+ pub fn get_id_by_fd(fd: BorrowedFd<'_>) -> Result<u32> {
+ let mut prog_info = libbpf_sys::bpf_prog_info::default();
+ let prog_info_ptr: *mut libbpf_sys::bpf_prog_info = &mut prog_info;
+ let mut len = size_of::<libbpf_sys::bpf_prog_info>() as u32;
+ let ret = unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(
+ fd.as_raw_fd(),
+ prog_info_ptr as *mut c_void,
+ &mut len,
+ )
+ };
+ util::parse_ret(ret)?;
+ Ok(prog_info.id)
+ }
+
+ /// Returns flags that have been set for the program.
+ pub fn flags(&self) -> u32 {
+ unsafe { libbpf_sys::bpf_program__flags(self.ptr.as_ptr()) }
+ }
+
+ /// Retrieve the attach type of the program.
+ pub fn attach_type(&self) -> ProgramAttachType {
+ ProgramAttachType::from(unsafe {
+ libbpf_sys::bpf_program__expected_attach_type(self.ptr.as_ptr())
+ })
+ }
+
+ /// Return `true` if the bpf program is set to autoload, `false` otherwise.
+ pub fn autoload(&self) -> bool {
+ unsafe { libbpf_sys::bpf_program__autoload(self.ptr.as_ptr()) }
+ }
+
+ /// Return the bpf program's log level.
+ pub fn log_level(&self) -> u32 {
+ unsafe { libbpf_sys::bpf_program__log_level(self.ptr.as_ptr()) }
+ }
+
+ /// Returns the number of instructions that form the program.
+ ///
+ /// Please see note in [`OpenProgram::insn_cnt`].
+ pub fn insn_cnt(&self) -> usize {
+ unsafe { libbpf_sys::bpf_program__insn_cnt(self.ptr.as_ptr()) as usize }
+ }
+
+ /// Gives read-only access to BPF program's underlying BPF instructions.
+ ///
+ /// Please see note in [`OpenProgram::insns`].
+ pub fn insns(&self) -> &[libbpf_sys::bpf_insn] {
+ let count = self.insn_cnt();
+ let ptr = unsafe { libbpf_sys::bpf_program__insns(self.ptr.as_ptr()) };
+ unsafe { slice::from_raw_parts(ptr, count) }
+ }
+}
+
+impl<'obj> ProgramMut<'obj> {
+ /// Create a [`Program`] from a [`libbpf_sys::bpf_program`]
+ pub fn new_mut(prog: &'obj mut libbpf_sys::bpf_program) -> Self {
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(prog as *mut _) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this program to bpffs.
+ pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+
+ let ret = unsafe { libbpf_sys::bpf_program__pin(self.ptr.as_ptr(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
+ /// this program from bpffs
+ pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
+ let path_c = util::path_to_cstring(path)?;
+ let path_ptr = path_c.as_ptr();
+
+ let ret = unsafe { libbpf_sys::bpf_program__unpin(self.ptr.as_ptr(), path_ptr) };
+ util::parse_ret(ret)
+ }
+
+ /// Auto-attach based on prog section
+ pub fn attach(&mut self) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach(self.ptr.as_ptr()) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach BPF program")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a
+ /// [cgroup](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html).
+ pub fn attach_cgroup(&mut self, cgroup_fd: i32) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach_cgroup(self.ptr.as_ptr(), cgroup_fd) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach cgroup")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a [perf event](https://linux.die.net/man/2/perf_event_open).
+ pub fn attach_perf_event(&mut self, pfd: i32) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach_perf_event(self.ptr.as_ptr(), pfd) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach perf event")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a [userspace
+ /// probe](https://www.kernel.org/doc/html/latest/trace/uprobetracer.html).
+ pub fn attach_uprobe<T: AsRef<Path>>(
+ &mut self,
+ retprobe: bool,
+ pid: i32,
+ binary_path: T,
+ func_offset: usize,
+ ) -> Result<Link> {
+ let path = util::path_to_cstring(binary_path)?;
+ let path_ptr = path.as_ptr();
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_uprobe(
+ self.ptr.as_ptr(),
+ retprobe,
+ pid,
+ path_ptr,
+ func_offset as libbpf_sys::size_t,
+ )
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach uprobe")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a [userspace
+ /// probe](https://www.kernel.org/doc/html/latest/trace/uprobetracer.html),
+ /// providing additional options.
+ pub fn attach_uprobe_with_opts(
+ &mut self,
+ pid: i32,
+ binary_path: impl AsRef<Path>,
+ func_offset: usize,
+ opts: UprobeOpts,
+ ) -> Result<Link> {
+ let path = util::path_to_cstring(binary_path)?;
+ let path_ptr = path.as_ptr();
+ let UprobeOpts {
+ ref_ctr_offset,
+ cookie,
+ retprobe,
+ func_name,
+ _non_exhaustive,
+ } = opts;
+
+ let func_name = util::str_to_cstring(&func_name)?;
+ let opts = libbpf_sys::bpf_uprobe_opts {
+ sz: size_of::<libbpf_sys::bpf_uprobe_opts>() as _,
+ ref_ctr_offset: ref_ctr_offset as libbpf_sys::size_t,
+ bpf_cookie: cookie,
+ retprobe,
+ func_name: func_name.as_ptr(),
+ ..Default::default()
+ };
+
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_uprobe_opts(
+ self.ptr.as_ptr(),
+ pid,
+ path_ptr,
+ func_offset as libbpf_sys::size_t,
+ &opts as *const _,
+ )
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach uprobe")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a [kernel
+ /// probe](https://www.kernel.org/doc/html/latest/trace/kprobetrace.html).
+ pub fn attach_kprobe<T: AsRef<str>>(&mut self, retprobe: bool, func_name: T) -> Result<Link> {
+ let func_name = util::str_to_cstring(func_name.as_ref())?;
+ let func_name_ptr = func_name.as_ptr();
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_kprobe(self.ptr.as_ptr(), retprobe, func_name_ptr)
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach kprobe")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to the specified syscall
+ pub fn attach_ksyscall<T: AsRef<str>>(
+ &mut self,
+ retprobe: bool,
+ syscall_name: T,
+ ) -> Result<Link> {
+ let opts = libbpf_sys::bpf_ksyscall_opts {
+ sz: size_of::<libbpf_sys::bpf_ksyscall_opts>() as _,
+ retprobe,
+ ..Default::default()
+ };
+
+ let syscall_name = util::str_to_cstring(syscall_name.as_ref())?;
+ let syscall_name_ptr = syscall_name.as_ptr();
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_ksyscall(self.ptr.as_ptr(), syscall_name_ptr, &opts)
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach ksyscall")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ fn attach_tracepoint_impl(
+ &mut self,
+ tp_category: &str,
+ tp_name: &str,
+ tp_opts: Option<TracepointOpts>,
+ ) -> Result<Link> {
+ let tp_category = util::str_to_cstring(tp_category)?;
+ let tp_category_ptr = tp_category.as_ptr();
+ let tp_name = util::str_to_cstring(tp_name)?;
+ let tp_name_ptr = tp_name.as_ptr();
+
+ let ptr = if let Some(tp_opts) = tp_opts {
+ let tp_opts = libbpf_sys::bpf_tracepoint_opts::from(tp_opts);
+ unsafe {
+ libbpf_sys::bpf_program__attach_tracepoint_opts(
+ self.ptr.as_ptr(),
+ tp_category_ptr,
+ tp_name_ptr,
+ &tp_opts as *const _,
+ )
+ }
+ } else {
+ unsafe {
+ libbpf_sys::bpf_program__attach_tracepoint(
+ self.ptr.as_ptr(),
+ tp_category_ptr,
+ tp_name_ptr,
+ )
+ }
+ };
+
+ let ptr = validate_bpf_ret(ptr).context("failed to attach tracepoint")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a [kernel
+ /// tracepoint](https://www.kernel.org/doc/html/latest/trace/tracepoints.html).
+ pub fn attach_tracepoint(
+ &mut self,
+ tp_category: impl AsRef<str>,
+ tp_name: impl AsRef<str>,
+ ) -> Result<Link> {
+ self.attach_tracepoint_impl(tp_category.as_ref(), tp_name.as_ref(), None)
+ }
+
+ /// Attach this program to a [kernel
+ /// tracepoint](https://www.kernel.org/doc/html/latest/trace/tracepoints.html),
+ /// providing additional options.
+ pub fn attach_tracepoint_with_opts(
+ &mut self,
+ tp_category: impl AsRef<str>,
+ tp_name: impl AsRef<str>,
+ tp_opts: TracepointOpts,
+ ) -> Result<Link> {
+ self.attach_tracepoint_impl(tp_category.as_ref(), tp_name.as_ref(), Some(tp_opts))
+ }
+
+ /// Attach this program to a [raw kernel
+ /// tracepoint](https://lwn.net/Articles/748352/).
+ pub fn attach_raw_tracepoint<T: AsRef<str>>(&mut self, tp_name: T) -> Result<Link> {
+ let tp_name = util::str_to_cstring(tp_name.as_ref())?;
+ let tp_name_ptr = tp_name.as_ptr();
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_raw_tracepoint(self.ptr.as_ptr(), tp_name_ptr)
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach raw tracepoint")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach to an [LSM](https://en.wikipedia.org/wiki/Linux_Security_Modules) hook
+ pub fn attach_lsm(&mut self) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach_lsm(self.ptr.as_ptr()) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach LSM")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach to a [fentry/fexit kernel probe](https://lwn.net/Articles/801479/)
+ pub fn attach_trace(&mut self) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach_trace(self.ptr.as_ptr()) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach fentry/fexit kernel probe")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach a verdict/parser to a [sockmap/sockhash](https://lwn.net/Articles/731133/)
+ pub fn attach_sockmap(&self, map_fd: i32) -> Result<()> {
+ let err = unsafe {
+ libbpf_sys::bpf_prog_attach(
+ self.as_fd().as_raw_fd(),
+ map_fd,
+ self.attach_type() as u32,
+ 0,
+ )
+ };
+ util::parse_ret(err)
+ }
+
+ /// Attach this program to [XDP](https://lwn.net/Articles/825998/)
+ pub fn attach_xdp(&mut self, ifindex: i32) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach_xdp(self.ptr.as_ptr(), ifindex) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach XDP program")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to [netns-based programs](https://lwn.net/Articles/819618/)
+ pub fn attach_netns(&mut self, netns_fd: i32) -> Result<Link> {
+ let ptr = unsafe { libbpf_sys::bpf_program__attach_netns(self.ptr.as_ptr(), netns_fd) };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach network namespace program")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ fn attach_usdt_impl(
+ &mut self,
+ pid: i32,
+ binary_path: &Path,
+ usdt_provider: &str,
+ usdt_name: &str,
+ usdt_opts: Option<UsdtOpts>,
+ ) -> Result<Link> {
+ let path = util::path_to_cstring(binary_path)?;
+ let path_ptr = path.as_ptr();
+ let usdt_provider = util::str_to_cstring(usdt_provider)?;
+ let usdt_provider_ptr = usdt_provider.as_ptr();
+ let usdt_name = util::str_to_cstring(usdt_name)?;
+ let usdt_name_ptr = usdt_name.as_ptr();
+ let usdt_opts = usdt_opts.map(libbpf_sys::bpf_usdt_opts::from);
+ let usdt_opts_ptr = usdt_opts
+ .as_ref()
+ .map(|opts| opts as *const _)
+ .unwrap_or_else(ptr::null);
+
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_usdt(
+ self.ptr.as_ptr(),
+ pid,
+ path_ptr,
+ usdt_provider_ptr,
+ usdt_name_ptr,
+ usdt_opts_ptr,
+ )
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to attach USDT")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Attach this program to a [USDT](https://lwn.net/Articles/753601/) probe
+ /// point. The entry point of the program must be defined with
+ /// `SEC("usdt")`.
+ pub fn attach_usdt(
+ &mut self,
+ pid: i32,
+ binary_path: impl AsRef<Path>,
+ usdt_provider: impl AsRef<str>,
+ usdt_name: impl AsRef<str>,
+ ) -> Result<Link> {
+ self.attach_usdt_impl(
+ pid,
+ binary_path.as_ref(),
+ usdt_provider.as_ref(),
+ usdt_name.as_ref(),
+ None,
+ )
+ }
+
+ /// Attach this program to a [USDT](https://lwn.net/Articles/753601/) probe
+ /// point, providing additional options. The entry point of the program must
+ /// be defined with `SEC("usdt")`.
+ pub fn attach_usdt_with_opts(
+ &mut self,
+ pid: i32,
+ binary_path: impl AsRef<Path>,
+ usdt_provider: impl AsRef<str>,
+ usdt_name: impl AsRef<str>,
+ usdt_opts: UsdtOpts,
+ ) -> Result<Link> {
+ self.attach_usdt_impl(
+ pid,
+ binary_path.as_ref(),
+ usdt_provider.as_ref(),
+ usdt_name.as_ref(),
+ Some(usdt_opts),
+ )
+ }
+
+ /// Attach this program to a
+ /// [BPF Iterator](https://www.kernel.org/doc/html/latest/bpf/bpf_iterators.html).
+ /// The entry point of the program must be defined with `SEC("iter")` or `SEC("iter.s")`.
+ pub fn attach_iter(&mut self, map_fd: BorrowedFd<'_>) -> Result<Link> {
+ let mut linkinfo = libbpf_sys::bpf_iter_link_info::default();
+ linkinfo.map.map_fd = map_fd.as_raw_fd() as _;
+ let attach_opt = libbpf_sys::bpf_iter_attach_opts {
+ link_info: &mut linkinfo as *mut libbpf_sys::bpf_iter_link_info,
+ link_info_len: size_of::<libbpf_sys::bpf_iter_link_info>() as _,
+ sz: size_of::<libbpf_sys::bpf_iter_attach_opts>() as _,
+ ..Default::default()
+ };
+ let ptr = unsafe {
+ libbpf_sys::bpf_program__attach_iter(
+ self.ptr.as_ptr(),
+ &attach_opt as *const libbpf_sys::bpf_iter_attach_opts,
+ )
+ };
+
+ let ptr = validate_bpf_ret(ptr).context("failed to attach iterator")?;
+ // SAFETY: the pointer came from libbpf and has been checked for errors.
+ let link = unsafe { Link::new(ptr) };
+ Ok(link)
+ }
+
+ /// Test run the program with the given input data.
+ ///
+ /// This function uses the
+ /// [BPF_PROG_RUN](https://www.kernel.org/doc/html/latest/bpf/bpf_prog_run.html)
+ /// facility.
+ pub fn test_run<'dat>(&mut self, input: Input<'dat>) -> Result<Output<'dat>> {
+ unsafe fn slice_from_array<'t, T>(items: *mut T, num_items: usize) -> Option<&'t mut [T]> {
+ if items.is_null() {
+ None
+ } else {
+ Some(unsafe { slice::from_raw_parts_mut(items, num_items) })
+ }
+ }
+
+ let Input {
+ context_in,
+ mut context_out,
+ data_in,
+ mut data_out,
+ cpu,
+ flags,
+ _non_exhaustive: (),
+ } = input;
+
+ let mut opts = unsafe { mem::zeroed::<libbpf_sys::bpf_test_run_opts>() };
+ opts.sz = size_of_val(&opts) as _;
+ opts.ctx_in = context_in
+ .as_ref()
+ .map(|data| data.as_ptr().cast())
+ .unwrap_or_else(ptr::null);
+ opts.ctx_size_in = context_in.map(|data| data.len() as _).unwrap_or(0);
+ opts.ctx_out = context_out
+ .as_mut()
+ .map(|data| data.as_mut_ptr().cast())
+ .unwrap_or_else(ptr::null_mut);
+ opts.ctx_size_out = context_out.map(|data| data.len() as _).unwrap_or(0);
+ opts.data_in = data_in
+ .map(|data| data.as_ptr().cast())
+ .unwrap_or_else(ptr::null);
+ opts.data_size_in = data_in.map(|data| data.len() as _).unwrap_or(0);
+ opts.data_out = data_out
+ .as_mut()
+ .map(|data| data.as_mut_ptr().cast())
+ .unwrap_or_else(ptr::null_mut);
+ opts.data_size_out = data_out.map(|data| data.len() as _).unwrap_or(0);
+ opts.cpu = cpu;
+ opts.flags = flags;
+
+ let rc = unsafe { libbpf_sys::bpf_prog_test_run_opts(self.as_fd().as_raw_fd(), &mut opts) };
+ let () = util::parse_ret(rc)?;
+ let output = Output {
+ return_value: opts.retval,
+ context: unsafe { slice_from_array(opts.ctx_out.cast(), opts.ctx_size_out as _) },
+ data: unsafe { slice_from_array(opts.data_out.cast(), opts.data_size_out as _) },
+ _non_exhaustive: (),
+ };
+ Ok(output)
+ }
+}
+
+impl<'obj> Deref for ProgramMut<'obj> {
+ type Target = Program<'obj>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: `ProgramImpl` is `repr(transparent)` and so in-memory
+ // representation of both types is the same.
+ unsafe { transmute::<&ProgramMut<'obj>, &Program<'obj>>(self) }
+ }
+}
+
+impl<T> AsFd for ProgramImpl<'_, T> {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ let fd = unsafe { libbpf_sys::bpf_program__fd(self.ptr.as_ptr()) };
+ unsafe { BorrowedFd::borrow_raw(fd) }
+ }
+}
+
+impl<T> AsRawLibbpf for ProgramImpl<'_, T> {
+ type LibbpfType = libbpf_sys::bpf_program;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_program`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use std::mem::discriminant;
+
+ #[test]
+ fn program_type() {
+ use ProgramType::*;
+
+ for t in [
+ Unspec,
+ SocketFilter,
+ Kprobe,
+ SchedCls,
+ SchedAct,
+ Tracepoint,
+ Xdp,
+ PerfEvent,
+ CgroupSkb,
+ CgroupSock,
+ LwtIn,
+ LwtOut,
+ LwtXmit,
+ SockOps,
+ SkSkb,
+ CgroupDevice,
+ SkMsg,
+ RawTracepoint,
+ CgroupSockAddr,
+ LwtSeg6local,
+ LircMode2,
+ SkReuseport,
+ FlowDissector,
+ CgroupSysctl,
+ RawTracepointWritable,
+ CgroupSockopt,
+ Tracing,
+ StructOps,
+ Ext,
+ Lsm,
+ SkLookup,
+ Syscall,
+ Unknown,
+ ] {
+ // check if discriminants match after a roundtrip conversion
+ assert_eq!(discriminant(&t), discriminant(&ProgramType::from(t as u32)));
+ }
+ }
+
+ #[test]
+ fn program_attach_type() {
+ use ProgramAttachType::*;
+
+ for t in [
+ CgroupInetIngress,
+ CgroupInetEgress,
+ CgroupInetSockCreate,
+ CgroupSockOps,
+ SkSkbStreamParser,
+ SkSkbStreamVerdict,
+ CgroupDevice,
+ SkMsgVerdict,
+ CgroupInet4Bind,
+ CgroupInet6Bind,
+ CgroupInet4Connect,
+ CgroupInet6Connect,
+ CgroupInet4PostBind,
+ CgroupInet6PostBind,
+ CgroupUdp4Sendmsg,
+ CgroupUdp6Sendmsg,
+ LircMode2,
+ FlowDissector,
+ CgroupSysctl,
+ CgroupUdp4Recvmsg,
+ CgroupUdp6Recvmsg,
+ CgroupGetsockopt,
+ CgroupSetsockopt,
+ TraceRawTp,
+ TraceFentry,
+ TraceFexit,
+ ModifyReturn,
+ LsmMac,
+ TraceIter,
+ CgroupInet4Getpeername,
+ CgroupInet6Getpeername,
+ CgroupInet4Getsockname,
+ CgroupInet6Getsockname,
+ XdpDevmap,
+ CgroupInetSockRelease,
+ XdpCpumap,
+ SkLookup,
+ Xdp,
+ SkSkbVerdict,
+ SkReuseportSelect,
+ SkReuseportSelectOrMigrate,
+ PerfEvent,
+ Unknown,
+ ] {
+ // check if discriminants match after a roundtrip conversion
+ assert_eq!(
+ discriminant(&t),
+ discriminant(&ProgramAttachType::from(t as u32))
+ );
+ }
+ }
+}
diff --git a/src/query.rs b/src/query.rs
new file mode 100644
index 0000000..a44984f
--- /dev/null
+++ b/src/query.rs
@@ -0,0 +1,705 @@
+//! Query the host about BPF
+//!
+//! For example, to list the name of every bpf program running on the system:
+//! ```
+//! use libbpf_rs::query::ProgInfoIter;
+//!
+//! let mut iter = ProgInfoIter::default();
+//! for prog in iter {
+//! println!("{}", prog.name.to_string_lossy());
+//! }
+//! ```
+
+use std::ffi::c_void;
+use std::ffi::CString;
+use std::io;
+use std::mem::size_of_val;
+use std::os::fd::AsFd;
+use std::os::fd::AsRawFd;
+use std::os::fd::BorrowedFd;
+use std::os::fd::FromRawFd;
+use std::os::fd::OwnedFd;
+use std::os::raw::c_char;
+use std::ptr;
+use std::time::Duration;
+
+use crate::util;
+use crate::MapType;
+use crate::ProgramAttachType;
+use crate::ProgramType;
+use crate::Result;
+
+macro_rules! gen_info_impl {
+ // This magic here allows us to embed doc comments into macro expansions
+ ($(#[$attr:meta])*
+ $name:ident, $info_ty:ty, $uapi_info_ty:ty, $next_id:expr, $fd_by_id:expr) => {
+ $(#[$attr])*
+ #[derive(Default, Debug)]
+ pub struct $name {
+ cur_id: u32,
+ }
+
+ impl $name {
+ // Returns Some(next_valid_fd), None on none left
+ fn next_valid_fd(&mut self) -> Option<OwnedFd> {
+ loop {
+ if unsafe { $next_id(self.cur_id, &mut self.cur_id) } != 0 {
+ return None;
+ }
+
+ let fd = unsafe { $fd_by_id(self.cur_id) };
+ if fd < 0 {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::NotFound {
+ continue;
+ }
+
+ return None;
+ }
+
+ return Some(unsafe { OwnedFd::from_raw_fd(fd)});
+ }
+ }
+ }
+
+ impl Iterator for $name {
+ type Item = $info_ty;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let fd = self.next_valid_fd()?;
+
+ // We need to use std::mem::zeroed() instead of just using
+ // ::default() because padding bytes need to be zero as well.
+ // Old kernels which know about fewer fields than we do will
+ // check to make sure every byte past what they know is zero
+ // and will return E2BIG otherwise.
+ let mut item: $uapi_info_ty = unsafe { std::mem::zeroed() };
+ let item_ptr: *mut $uapi_info_ty = &mut item;
+ let mut len = size_of_val(&item) as u32;
+
+ let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len) };
+ let parsed_uapi = if ret != 0 {
+ None
+ } else {
+ <$info_ty>::from_uapi(fd.as_fd(), item)
+ };
+
+ parsed_uapi
+ }
+ }
+ };
+}
+
+/// BTF Line information
+#[derive(Clone, Debug)]
+pub struct LineInfo {
+ /// Offset of instruction in vector
+ pub insn_off: u32,
+ /// File name offset
+ pub file_name_off: u32,
+ /// Line offset in debug info
+ pub line_off: u32,
+ /// Line number
+ pub line_num: u32,
+ /// Line column number
+ pub line_col: u32,
+}
+
+impl From<&libbpf_sys::bpf_line_info> for LineInfo {
+ fn from(item: &libbpf_sys::bpf_line_info) -> Self {
+ LineInfo {
+ insn_off: item.insn_off,
+ file_name_off: item.file_name_off,
+ line_off: item.line_off,
+ line_num: item.line_col >> 10,
+ line_col: item.line_col & 0x3ff,
+ }
+ }
+}
+
+/// Bpf identifier tag
+#[derive(Debug, Clone, Default)]
+#[repr(C)]
+pub struct Tag(pub [u8; 8]);
+
+/// Information about a BPF program
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct ProgramInfo {
+ pub name: CString,
+ pub ty: ProgramType,
+ pub tag: Tag,
+ pub id: u32,
+ pub jited_prog_insns: Vec<u8>,
+ pub xlated_prog_insns: Vec<u8>,
+ /// Duration since system boot
+ pub load_time: Duration,
+ pub created_by_uid: u32,
+ pub map_ids: Vec<u32>,
+ pub ifindex: u32,
+ pub gpl_compatible: bool,
+ pub netns_dev: u64,
+ pub netns_ino: u64,
+ pub jited_ksyms: Vec<*const c_void>,
+ pub jited_func_lens: Vec<u32>,
+ pub btf_id: u32,
+ pub func_info_rec_size: u32,
+ pub func_info: Vec<libbpf_sys::bpf_func_info>,
+ pub line_info: Vec<LineInfo>,
+ pub jited_line_info: Vec<*const c_void>,
+ pub line_info_rec_size: u32,
+ pub jited_line_info_rec_size: u32,
+ pub prog_tags: Vec<Tag>,
+ pub run_time_ns: u64,
+ pub run_cnt: u64,
+ /// Skipped BPF executions due to recursion or concurrent execution prevention.
+ pub recursion_misses: u64,
+}
+
+/// An iterator for the information of loaded bpf programs
+#[derive(Default, Debug)]
+pub struct ProgInfoIter {
+ cur_id: u32,
+ opts: ProgInfoQueryOptions,
+}
+
+/// Options to query the program info currently loaded
+#[derive(Clone, Default, Debug)]
+pub struct ProgInfoQueryOptions {
+ /// Include the vector of bpf instructions in the result
+ include_xlated_prog_insns: bool,
+ /// Include the vector of jited instructions in the result
+ include_jited_prog_insns: bool,
+ /// Include the ids of maps associated with the program
+ include_map_ids: bool,
+ /// Include source line information corresponding to xlated code
+ include_line_info: bool,
+ /// Include function type information corresponding to xlated code
+ include_func_info: bool,
+ /// Include source line information corresponding to jited code
+ include_jited_line_info: bool,
+ /// Include function type information corresponding to jited code
+ include_jited_func_lens: bool,
+ /// Include program tags
+ include_prog_tags: bool,
+ /// Include the jited kernel symbols
+ include_jited_ksyms: bool,
+}
+
+impl ProgInfoIter {
+ /// Generate an iter from more specific query options
+ pub fn with_query_opts(opts: ProgInfoQueryOptions) -> Self {
+ Self {
+ opts,
+ ..Self::default()
+ }
+ }
+}
+
+impl ProgInfoQueryOptions {
+ /// Include the vector of jited bpf instructions in the result
+ pub fn include_xlated_prog_insns(mut self, v: bool) -> Self {
+ self.include_xlated_prog_insns = v;
+ self
+ }
+
+ /// Include the vector of jited instructions in the result
+ pub fn include_jited_prog_insns(mut self, v: bool) -> Self {
+ self.include_jited_prog_insns = v;
+ self
+ }
+
+ /// Include the ids of maps associated with the program
+ pub fn include_map_ids(mut self, v: bool) -> Self {
+ self.include_map_ids = v;
+ self
+ }
+
+ /// Include source line information corresponding to xlated code
+ pub fn include_line_info(mut self, v: bool) -> Self {
+ self.include_line_info = v;
+ self
+ }
+
+ /// Include function type information corresponding to xlated code
+ pub fn include_func_info(mut self, v: bool) -> Self {
+ self.include_func_info = v;
+ self
+ }
+
+ /// Include source line information corresponding to jited code
+ pub fn include_jited_line_info(mut self, v: bool) -> Self {
+ self.include_jited_line_info = v;
+ self
+ }
+
+ /// Include function type information corresponding to jited code
+ pub fn include_jited_func_lens(mut self, v: bool) -> Self {
+ self.include_jited_func_lens = v;
+ self
+ }
+
+ /// Include program tags
+ pub fn include_prog_tags(mut self, v: bool) -> Self {
+ self.include_prog_tags = v;
+ self
+ }
+
+ /// Include the jited kernel symbols
+ pub fn include_jited_ksyms(mut self, v: bool) -> Self {
+ self.include_jited_ksyms = v;
+ self
+ }
+
+ /// Include everything there is in the query results
+ pub fn include_all(self) -> Self {
+ Self {
+ include_xlated_prog_insns: true,
+ include_jited_prog_insns: true,
+ include_map_ids: true,
+ include_line_info: true,
+ include_func_info: true,
+ include_jited_line_info: true,
+ include_jited_func_lens: true,
+ include_prog_tags: true,
+ include_jited_ksyms: true,
+ }
+ }
+}
+
+impl ProgramInfo {
+ fn load_from_fd(fd: BorrowedFd<'_>, opts: &ProgInfoQueryOptions) -> Result<Self> {
+ let mut item = libbpf_sys::bpf_prog_info::default();
+
+ let mut xlated_prog_insns: Vec<u8> = Vec::new();
+ let mut jited_prog_insns: Vec<u8> = Vec::new();
+ let mut map_ids: Vec<u32> = Vec::new();
+ let mut jited_line_info: Vec<*const c_void> = Vec::new();
+ let mut line_info: Vec<libbpf_sys::bpf_line_info> = Vec::new();
+ let mut func_info: Vec<libbpf_sys::bpf_func_info> = Vec::new();
+ let mut jited_func_lens: Vec<u32> = Vec::new();
+ let mut prog_tags: Vec<Tag> = Vec::new();
+ let mut jited_ksyms: Vec<*const c_void> = Vec::new();
+
+ let item_ptr: *mut libbpf_sys::bpf_prog_info = &mut item;
+ let mut len = size_of_val(&item) as u32;
+
+ let ret = unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len)
+ };
+ util::parse_ret(ret)?;
+
+ // SANITY: `libbpf` should guarantee NUL termination.
+ let name = util::c_char_slice_to_cstr(&item.name).unwrap();
+ let ty = ProgramType::from(item.type_);
+
+ if opts.include_xlated_prog_insns {
+ xlated_prog_insns.resize(item.xlated_prog_len as usize, 0u8);
+ item.xlated_prog_insns = xlated_prog_insns.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.xlated_prog_len = 0;
+ }
+
+ if opts.include_jited_prog_insns {
+ jited_prog_insns.resize(item.jited_prog_len as usize, 0u8);
+ item.jited_prog_insns = jited_prog_insns.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.jited_prog_len = 0;
+ }
+
+ if opts.include_map_ids {
+ map_ids.resize(item.nr_map_ids as usize, 0u32);
+ item.map_ids = map_ids.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_map_ids = 0;
+ }
+
+ if opts.include_line_info {
+ line_info.resize(
+ item.nr_line_info as usize,
+ libbpf_sys::bpf_line_info::default(),
+ );
+ item.line_info = line_info.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_line_info = 0;
+ }
+
+ if opts.include_func_info {
+ func_info.resize(
+ item.nr_func_info as usize,
+ libbpf_sys::bpf_func_info::default(),
+ );
+ item.func_info = func_info.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_func_info = 0;
+ }
+
+ if opts.include_jited_line_info {
+ jited_line_info.resize(item.nr_jited_line_info as usize, ptr::null());
+ item.jited_line_info = jited_line_info.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_jited_line_info = 0;
+ }
+
+ if opts.include_jited_func_lens {
+ jited_func_lens.resize(item.nr_jited_func_lens as usize, 0);
+ item.jited_func_lens = jited_func_lens.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_jited_func_lens = 0;
+ }
+
+ if opts.include_prog_tags {
+ prog_tags.resize(item.nr_prog_tags as usize, Tag::default());
+ item.prog_tags = prog_tags.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_prog_tags = 0;
+ }
+
+ if opts.include_jited_ksyms {
+ jited_ksyms.resize(item.nr_jited_ksyms as usize, ptr::null());
+ item.jited_ksyms = jited_ksyms.as_mut_ptr() as *mut c_void as u64;
+ } else {
+ item.nr_jited_ksyms = 0;
+ }
+
+ let ret = unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len)
+ };
+ util::parse_ret(ret)?;
+
+ return Ok(ProgramInfo {
+ name: name.to_owned(),
+ ty,
+ tag: Tag(item.tag),
+ id: item.id,
+ jited_prog_insns,
+ xlated_prog_insns,
+ load_time: Duration::from_nanos(item.load_time),
+ created_by_uid: item.created_by_uid,
+ map_ids,
+ ifindex: item.ifindex,
+ gpl_compatible: item._bitfield_1.get_bit(0),
+ netns_dev: item.netns_dev,
+ netns_ino: item.netns_ino,
+ jited_ksyms,
+ jited_func_lens,
+ btf_id: item.btf_id,
+ func_info_rec_size: item.func_info_rec_size,
+ func_info,
+ line_info: line_info.iter().map(|li| li.into()).collect(),
+ jited_line_info,
+ line_info_rec_size: item.line_info_rec_size,
+ jited_line_info_rec_size: item.jited_line_info_rec_size,
+ prog_tags,
+ run_time_ns: item.run_time_ns,
+ run_cnt: item.run_cnt,
+ recursion_misses: item.recursion_misses,
+ });
+ }
+}
+
+impl ProgInfoIter {
+ fn next_valid_fd(&mut self) -> Option<OwnedFd> {
+ loop {
+ if unsafe { libbpf_sys::bpf_prog_get_next_id(self.cur_id, &mut self.cur_id) } != 0 {
+ return None;
+ }
+
+ let fd = unsafe { libbpf_sys::bpf_prog_get_fd_by_id(self.cur_id) };
+ if fd < 0 {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::NotFound {
+ continue;
+ }
+ return None;
+ }
+
+ return Some(unsafe { OwnedFd::from_raw_fd(fd) });
+ }
+ }
+}
+
+impl Iterator for ProgInfoIter {
+ type Item = ProgramInfo;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let fd = self.next_valid_fd()?;
+
+ let prog = ProgramInfo::load_from_fd(fd.as_fd(), &self.opts);
+
+ match prog {
+ Ok(p) => Some(p),
+ // TODO: We should consider bubbling up errors properly.
+ Err(_err) => None,
+ }
+ }
+}
+
+/// Information about a BPF map
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct MapInfo {
+ pub name: CString,
+ pub ty: MapType,
+ pub id: u32,
+ pub key_size: u32,
+ pub value_size: u32,
+ pub max_entries: u32,
+ pub map_flags: u32,
+ pub ifindex: u32,
+ pub btf_vmlinux_value_type_id: u32,
+ pub netns_dev: u64,
+ pub netns_ino: u64,
+ pub btf_id: u32,
+ pub btf_key_type_id: u32,
+ pub btf_value_type_id: u32,
+}
+
+impl MapInfo {
+ fn from_uapi(_fd: BorrowedFd<'_>, s: libbpf_sys::bpf_map_info) -> Option<Self> {
+ // SANITY: `libbpf` should guarantee NUL termination.
+ let name = util::c_char_slice_to_cstr(&s.name).unwrap();
+ let ty = MapType::from(s.type_);
+
+ Some(Self {
+ name: name.to_owned(),
+ ty,
+ id: s.id,
+ key_size: s.key_size,
+ value_size: s.value_size,
+ max_entries: s.max_entries,
+ map_flags: s.map_flags,
+ ifindex: s.ifindex,
+ btf_vmlinux_value_type_id: s.btf_vmlinux_value_type_id,
+ netns_dev: s.netns_dev,
+ netns_ino: s.netns_ino,
+ btf_id: s.btf_id,
+ btf_key_type_id: s.btf_key_type_id,
+ btf_value_type_id: s.btf_value_type_id,
+ })
+ }
+}
+
+gen_info_impl!(
+ /// Iterator that returns [`MapInfo`]s.
+ MapInfoIter,
+ MapInfo,
+ libbpf_sys::bpf_map_info,
+ libbpf_sys::bpf_map_get_next_id,
+ libbpf_sys::bpf_map_get_fd_by_id
+);
+
+/// Information about BPF type format
+#[derive(Debug, Clone)]
+pub struct BtfInfo {
+ /// The name associated with this btf information in the kernel
+ pub name: CString,
+ /// The raw btf bytes from the kernel
+ pub btf: Vec<u8>,
+ /// The btf id associated with this btf information in the kernel
+ pub id: u32,
+}
+
+impl BtfInfo {
+ fn load_from_fd(fd: BorrowedFd<'_>) -> Result<Self> {
+ let mut item = libbpf_sys::bpf_btf_info::default();
+ let mut btf: Vec<u8> = Vec::new();
+ let mut name: Vec<u8> = Vec::new();
+
+ let item_ptr: *mut libbpf_sys::bpf_btf_info = &mut item;
+ let mut len = size_of_val(&item) as u32;
+
+ let ret = unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len)
+ };
+ util::parse_ret(ret)?;
+
+ // The API gives you the ascii string length while expecting
+ // you to give it back space for a nul-terminator
+ item.name_len += 1;
+ name.resize(item.name_len as usize, 0u8);
+ item.name = name.as_mut_ptr() as *mut c_void as u64;
+
+ btf.resize(item.btf_size as usize, 0u8);
+ item.btf = btf.as_mut_ptr() as *mut c_void as u64;
+
+ let ret = unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len)
+ };
+ util::parse_ret(ret)?;
+
+ Ok(BtfInfo {
+ // SANITY: Our buffer contained space for a NUL byte and we set its
+ // contents to 0. Barring a `libbpf` bug a NUL byte will be
+ // present.
+ name: CString::from_vec_with_nul(name).unwrap(),
+ btf,
+ id: item.id,
+ })
+ }
+}
+
+#[derive(Debug, Default)]
+/// An iterator for the btf type information of modules and programs
+/// in the kernel
+pub struct BtfInfoIter {
+ cur_id: u32,
+}
+
+impl BtfInfoIter {
+ // Returns Some(next_valid_fd), None on none left
+ fn next_valid_fd(&mut self) -> Option<OwnedFd> {
+ loop {
+ if unsafe { libbpf_sys::bpf_btf_get_next_id(self.cur_id, &mut self.cur_id) } != 0 {
+ return None;
+ }
+
+ let fd = unsafe { libbpf_sys::bpf_btf_get_fd_by_id(self.cur_id) };
+ if fd < 0 {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::NotFound {
+ continue;
+ }
+ return None;
+ }
+
+ return Some(unsafe { OwnedFd::from_raw_fd(fd) });
+ }
+ }
+}
+
+impl Iterator for BtfInfoIter {
+ type Item = BtfInfo;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let fd = self.next_valid_fd()?;
+
+ let info = BtfInfo::load_from_fd(fd.as_fd());
+
+ match info {
+ Ok(i) => Some(i),
+ // TODO: We should consider bubbling up errors properly.
+ Err(_err) => None,
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct RawTracepointLinkInfo {
+ pub name: String,
+}
+
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct TracingLinkInfo {
+ pub attach_type: ProgramAttachType,
+}
+
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct CgroupLinkInfo {
+ pub cgroup_id: u64,
+ pub attach_type: ProgramAttachType,
+}
+
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct NetNsLinkInfo {
+ pub ino: u32,
+ pub attach_type: ProgramAttachType,
+}
+
+#[derive(Debug, Clone)]
+// TODO: Document variants.
+#[allow(missing_docs)]
+pub enum LinkTypeInfo {
+ RawTracepoint(RawTracepointLinkInfo),
+ Tracing(TracingLinkInfo),
+ Cgroup(CgroupLinkInfo),
+ Iter,
+ NetNs(NetNsLinkInfo),
+ Unknown,
+}
+
+/// Information about a BPF link
+#[derive(Debug, Clone)]
+// TODO: Document members.
+#[allow(missing_docs)]
+pub struct LinkInfo {
+ pub info: LinkTypeInfo,
+ pub id: u32,
+ pub prog_id: u32,
+}
+
+impl LinkInfo {
+ fn from_uapi(fd: BorrowedFd<'_>, mut s: libbpf_sys::bpf_link_info) -> Option<Self> {
+ let type_info = match s.type_ {
+ libbpf_sys::BPF_LINK_TYPE_RAW_TRACEPOINT => {
+ let mut buf = [0; 256];
+ s.__bindgen_anon_1.raw_tracepoint.tp_name = buf.as_mut_ptr() as u64;
+ s.__bindgen_anon_1.raw_tracepoint.tp_name_len = buf.len() as u32;
+ let item_ptr: *mut libbpf_sys::bpf_link_info = &mut s;
+ let mut len = size_of_val(&s) as u32;
+
+ let ret = unsafe {
+ libbpf_sys::bpf_obj_get_info_by_fd(
+ fd.as_raw_fd(),
+ item_ptr as *mut c_void,
+ &mut len,
+ )
+ };
+ if ret != 0 {
+ return None;
+ }
+
+ LinkTypeInfo::RawTracepoint(RawTracepointLinkInfo {
+ name: util::c_ptr_to_string(
+ unsafe { s.__bindgen_anon_1.raw_tracepoint.tp_name } as *const c_char,
+ )
+ .unwrap_or_else(|_| "?".to_string()),
+ })
+ }
+ libbpf_sys::BPF_LINK_TYPE_TRACING => LinkTypeInfo::Tracing(TracingLinkInfo {
+ attach_type: ProgramAttachType::from(unsafe {
+ s.__bindgen_anon_1.tracing.attach_type
+ }),
+ }),
+ libbpf_sys::BPF_LINK_TYPE_CGROUP => LinkTypeInfo::Cgroup(CgroupLinkInfo {
+ cgroup_id: unsafe { s.__bindgen_anon_1.cgroup.cgroup_id },
+ attach_type: ProgramAttachType::from(unsafe {
+ s.__bindgen_anon_1.cgroup.attach_type
+ }),
+ }),
+ libbpf_sys::BPF_LINK_TYPE_ITER => LinkTypeInfo::Iter,
+ libbpf_sys::BPF_LINK_TYPE_NETNS => LinkTypeInfo::NetNs(NetNsLinkInfo {
+ ino: unsafe { s.__bindgen_anon_1.netns.netns_ino },
+ attach_type: ProgramAttachType::from(unsafe {
+ s.__bindgen_anon_1.netns.attach_type
+ }),
+ }),
+ _ => LinkTypeInfo::Unknown,
+ };
+
+ Some(Self {
+ info: type_info,
+ id: s.id,
+ prog_id: s.prog_id,
+ })
+ }
+}
+
+gen_info_impl!(
+ /// Iterator that returns [`LinkInfo`]s.
+ LinkInfoIter,
+ LinkInfo,
+ libbpf_sys::bpf_link_info,
+ libbpf_sys::bpf_link_get_next_id,
+ libbpf_sys::bpf_link_get_fd_by_id
+);
diff --git a/src/ringbuf.rs b/src/ringbuf.rs
new file mode 100644
index 0000000..6aacdbe
--- /dev/null
+++ b/src/ringbuf.rs
@@ -0,0 +1,251 @@
+use core::ffi::c_void;
+use std::fmt::Debug;
+use std::fmt::Formatter;
+use std::fmt::Result as FmtResult;
+use std::ops::Deref as _;
+use std::ops::DerefMut as _;
+use std::os::raw::c_ulong;
+use std::os::unix::prelude::AsRawFd;
+use std::os::unix::prelude::BorrowedFd;
+use std::ptr::null_mut;
+use std::ptr::NonNull;
+use std::slice;
+use std::time::Duration;
+
+use crate::util;
+use crate::util::validate_bpf_ret;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::ErrorExt as _;
+use crate::MapCore;
+use crate::MapType;
+use crate::Result;
+
+type Cb<'a> = Box<dyn FnMut(&[u8]) -> i32 + 'a>;
+
+struct RingBufferCallback<'a> {
+ cb: Cb<'a>,
+}
+
+impl<'a> RingBufferCallback<'a> {
+ fn new<F>(cb: F) -> Self
+ where
+ F: FnMut(&[u8]) -> i32 + 'a,
+ {
+ RingBufferCallback { cb: Box::new(cb) }
+ }
+}
+
+impl Debug for RingBufferCallback<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ let Self { cb } = self;
+ f.debug_struct("RingBufferCallback")
+ .field("cb", &(cb.deref() as *const _))
+ .finish()
+ }
+}
+
+/// Builds [`RingBuffer`] instances.
+///
+/// `ringbuf`s are a special kind of [`Map`][crate::Map], used to transfer data
+/// between [`Program`][crate::Program]s and userspace. As of Linux 5.8, the
+/// `ringbuf` map is now preferred over the `perf buffer`.
+#[derive(Debug, Default)]
+pub struct RingBufferBuilder<'slf, 'cb> {
+ fd_callbacks: Vec<(BorrowedFd<'slf>, RingBufferCallback<'cb>)>,
+}
+
+impl<'slf, 'cb: 'slf> RingBufferBuilder<'slf, 'cb> {
+ /// Create a new `RingBufferBuilder` object.
+ pub fn new() -> Self {
+ RingBufferBuilder {
+ fd_callbacks: vec![],
+ }
+ }
+
+ /// Add a new ringbuf `map` and associated `callback` to this ring buffer
+ /// manager. The callback should take one argument, a slice of raw bytes,
+ /// and return an i32.
+ ///
+ /// Non-zero return values in the callback will stop ring buffer consumption early.
+ ///
+ /// The callback provides a raw byte slice. You may find libraries such as
+ /// [`plain`](https://crates.io/crates/plain) helpful.
+ pub fn add<NewF>(&mut self, map: &'slf dyn MapCore, callback: NewF) -> Result<&mut Self>
+ where
+ NewF: FnMut(&[u8]) -> i32 + 'cb,
+ {
+ if map.map_type() != MapType::RingBuf {
+ return Err(Error::with_invalid_data("Must use a RingBuf map"));
+ }
+ self.fd_callbacks
+ .push((map.as_fd(), RingBufferCallback::new(callback)));
+ Ok(self)
+ }
+
+ /// Build a new [`RingBuffer`]. Must have added at least one ringbuf.
+ pub fn build(self) -> Result<RingBuffer<'cb>> {
+ let mut cbs = vec![];
+ let mut rb_ptr: Option<NonNull<libbpf_sys::ring_buffer>> = None;
+ let c_sample_cb: libbpf_sys::ring_buffer_sample_fn = Some(Self::call_sample_cb);
+
+ for (fd, callback) in self.fd_callbacks {
+ let mut sample_cb = Box::new(callback);
+ match rb_ptr {
+ None => {
+ // Allocate a new ringbuf manager and add a ringbuf to it
+ // SAFETY: All pointers are valid or rightly NULL.
+ // The object referenced by `sample_cb` is
+ // not modified by `libbpf`
+ let ptr = unsafe {
+ libbpf_sys::ring_buffer__new(
+ fd.as_raw_fd(),
+ c_sample_cb,
+ sample_cb.deref_mut() as *mut _ as *mut _,
+ null_mut(),
+ )
+ };
+ let ptr = validate_bpf_ret(ptr).context("failed to create new ring buffer")?;
+ rb_ptr = Some(ptr)
+ }
+ Some(mut ptr) => {
+ // Add a ringbuf to the existing ringbuf manager
+ // SAFETY: All pointers are valid or rightly NULL.
+ // The object referenced by `sample_cb` is
+ // not modified by `libbpf`
+ let err = unsafe {
+ libbpf_sys::ring_buffer__add(
+ ptr.as_ptr(),
+ fd.as_raw_fd(),
+ c_sample_cb,
+ sample_cb.deref_mut() as *mut _ as *mut _,
+ )
+ };
+
+ // Handle errors
+ if err != 0 {
+ // SAFETY: The pointer is valid.
+ let () = unsafe { libbpf_sys::ring_buffer__free(ptr.as_mut()) };
+ return Err(Error::from_raw_os_error(err));
+ }
+ }
+ }
+
+ let () = cbs.push(sample_cb);
+ }
+
+ match rb_ptr {
+ Some(ptr) => Ok(RingBuffer { ptr, _cbs: cbs }),
+ None => Err(Error::with_invalid_data(
+ "You must add at least one ring buffer map and callback before building",
+ )),
+ }
+ }
+
+ unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, data: *mut c_void, size: c_ulong) -> i32 {
+ let callback_struct = ctx as *mut RingBufferCallback<'_>;
+ let callback = unsafe { (*callback_struct).cb.as_mut() };
+ let slice = unsafe { slice::from_raw_parts(data as *const u8, size as usize) };
+
+ callback(slice)
+ }
+}
+
+/// The canonical interface for managing a collection of `ringbuf` maps.
+///
+/// `ringbuf`s are a special kind of [`Map`][crate::Map], used to transfer data
+/// between [`Program`][crate::Program]s and userspace. As of Linux 5.8, the
+/// `ringbuf` map is now preferred over the `perf buffer`.
+#[derive(Debug)]
+pub struct RingBuffer<'cb> {
+ ptr: NonNull<libbpf_sys::ring_buffer>,
+ #[allow(clippy::vec_box)]
+ _cbs: Vec<Box<RingBufferCallback<'cb>>>,
+}
+
+impl RingBuffer<'_> {
+ /// Poll from all open ring buffers, calling the registered callback for
+ /// each one. Polls continually until we either run out of events to consume
+ /// or `timeout` is reached. If `timeout` is Duration::MAX, this will block
+ /// indefinitely until an event occurs.
+ ///
+ /// Return the amount of events consumed, or a negative value in case of error.
+ pub fn poll_raw(&self, timeout: Duration) -> i32 {
+ let mut timeout_ms = -1;
+ if timeout != Duration::MAX {
+ timeout_ms = timeout.as_millis() as i32;
+ }
+
+ unsafe { libbpf_sys::ring_buffer__poll(self.ptr.as_ptr(), timeout_ms) }
+ }
+
+ /// Poll from all open ring buffers, calling the registered callback for
+ /// each one. Polls continually until we either run out of events to consume
+ /// or `timeout` is reached. If `timeout` is Duration::MAX, this will block
+ /// indefinitely until an event occurs.
+ pub fn poll(&self, timeout: Duration) -> Result<()> {
+ let ret = self.poll_raw(timeout);
+
+ util::parse_ret(ret)
+ }
+
+ /// Greedily consume from all open ring buffers, calling the registered
+ /// callback for each one. Consumes continually until we run out of events
+ /// to consume or one of the callbacks returns a non-zero integer.
+ ///
+ /// Return the amount of events consumed, or a negative value in case of error.
+ pub fn consume_raw(&self) -> i32 {
+ unsafe { libbpf_sys::ring_buffer__consume(self.ptr.as_ptr()) }
+ }
+
+ /// Greedily consume from all open ring buffers, calling the registered
+ /// callback for each one. Consumes continually until we run out of events
+ /// to consume or one of the callbacks returns a non-zero integer.
+ pub fn consume(&self) -> Result<()> {
+ let ret = self.consume_raw();
+
+ util::parse_ret(ret)
+ }
+
+ /// Get an fd that can be used to sleep until data is available
+ pub fn epoll_fd(&self) -> i32 {
+ unsafe { libbpf_sys::ring_buffer__epoll_fd(self.ptr.as_ptr()) }
+ }
+}
+
+impl AsRawLibbpf for RingBuffer<'_> {
+ type LibbpfType = libbpf_sys::ring_buffer;
+
+ /// Retrieve the underlying [`libbpf_sys::ring_buffer`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+// SAFETY: `ring_buffer` objects can safely be polled from any thread.
+unsafe impl Send for RingBuffer<'_> {}
+
+impl Drop for RingBuffer<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ libbpf_sys::ring_buffer__free(self.ptr.as_ptr());
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ /// Check that `RingBuffer` is `Send`.
+ #[test]
+ fn ringbuffer_is_send() {
+ fn test<T>()
+ where
+ T: Send,
+ {
+ }
+
+ test::<RingBuffer<'_>>();
+ }
+}
diff --git a/src/skeleton.rs b/src/skeleton.rs
new file mode 100644
index 0000000..dc38edc
--- /dev/null
+++ b/src/skeleton.rs
@@ -0,0 +1,399 @@
+use core::ffi::c_void;
+use std::alloc::alloc_zeroed;
+use std::alloc::dealloc;
+use std::alloc::Layout;
+use std::ffi::CString;
+use std::mem::size_of;
+use std::mem::MaybeUninit;
+use std::os::raw::c_char;
+use std::os::raw::c_ulong;
+use std::ptr;
+use std::ptr::addr_of;
+use std::ptr::NonNull;
+
+use libbpf_sys::bpf_link;
+use libbpf_sys::bpf_map;
+use libbpf_sys::bpf_map_skeleton;
+use libbpf_sys::bpf_object;
+use libbpf_sys::bpf_object_skeleton;
+use libbpf_sys::bpf_prog_skeleton;
+use libbpf_sys::bpf_program;
+
+use crate::error::IntoError as _;
+use crate::util;
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::Object;
+use crate::ObjectBuilder;
+use crate::OpenObject;
+use crate::Result;
+
+#[derive(Debug)]
+struct MapSkelConfig {
+ name: String,
+ p: Box<*mut bpf_map>,
+ mmaped: Option<Box<*mut c_void>>,
+}
+
+#[derive(Debug)]
+struct ProgSkelConfig {
+ name: String,
+ p: Box<*mut bpf_program>,
+ link: Box<*mut bpf_link>,
+}
+
+#[allow(missing_docs)]
+#[derive(Debug)]
+pub struct ObjectSkeletonConfigBuilder<'dat> {
+ data: &'dat [u8],
+ p: Box<*mut bpf_object>,
+ name: Option<String>,
+ maps: Vec<MapSkelConfig>,
+ progs: Vec<ProgSkelConfig>,
+}
+
+fn str_to_cstring_and_pool(s: &str, pool: &mut Vec<CString>) -> Result<*const c_char> {
+ let cname = util::str_to_cstring(s)?;
+ let p = cname.as_ptr();
+ pool.push(cname);
+
+ Ok(p)
+}
+
+impl<'dat> ObjectSkeletonConfigBuilder<'dat> {
+ /// Construct a new instance
+ ///
+ /// `object_data` is the contents of the `.o` from clang
+ ///
+ /// `p` is a reference to the pointer where `libbpf_sys::bpf_object` should be
+ /// stored/retrieved
+ pub fn new(object_data: &'dat [u8]) -> Self {
+ Self {
+ data: object_data,
+ p: Box::new(ptr::null_mut()),
+ name: None,
+ maps: Vec::new(),
+ progs: Vec::new(),
+ }
+ }
+
+ #[allow(missing_docs)]
+ pub fn name<T: AsRef<str>>(&mut self, name: T) -> &mut Self {
+ self.name = Some(name.as_ref().to_string());
+ self
+ }
+
+ /// Adds a map to the config
+ ///
+ /// Set `mmaped` to `true` if the map is mmap'able to userspace
+ pub fn map<T: AsRef<str>>(&mut self, name: T, mmaped: bool) -> &mut Self {
+ let m = if mmaped {
+ Some(Box::new(ptr::null_mut()))
+ } else {
+ None
+ };
+
+ self.maps.push(MapSkelConfig {
+ name: name.as_ref().to_string(),
+ p: Box::new(ptr::null_mut()),
+ mmaped: m,
+ });
+
+ self
+ }
+
+ /// Adds a prog to the config
+ pub fn prog<T: AsRef<str>>(&mut self, name: T) -> &mut Self {
+ self.progs.push(ProgSkelConfig {
+ name: name.as_ref().to_string(),
+ p: Box::new(ptr::null_mut()),
+ link: Box::new(ptr::null_mut()),
+ });
+
+ self
+ }
+
+ fn build_maps(
+ maps: &mut [MapSkelConfig],
+ s: &mut bpf_object_skeleton,
+ string_pool: &mut Vec<CString>,
+ ) -> Option<Layout> {
+ if maps.is_empty() {
+ return None;
+ }
+
+ s.map_cnt = maps.len() as i32;
+ s.map_skel_sz = size_of::<bpf_map_skeleton>() as i32;
+
+ let layout = Layout::array::<bpf_map_skeleton>(maps.len())
+ .expect("Failed to allocate memory for maps skeleton");
+
+ unsafe {
+ s.maps = alloc_zeroed(layout) as *mut bpf_map_skeleton;
+ for (i, map) in maps.iter_mut().enumerate() {
+ let current_map = s.maps.add(i);
+
+ // Opt to panic on error here. We've already allocated memory and we'd rather not
+ // leak. Extremely unlikely to have invalid unicode anyways.
+ (*current_map).name = str_to_cstring_and_pool(&map.name, string_pool)
+ .expect("Invalid unicode in map name");
+ (*current_map).map = &mut *map.p;
+ (*current_map).mmaped = if let Some(ref mut mmaped) = map.mmaped {
+ &mut **mmaped
+ } else {
+ ptr::null_mut()
+ };
+ }
+ }
+
+ Some(layout)
+ }
+
+ fn build_progs(
+ progs: &mut [ProgSkelConfig],
+ s: &mut bpf_object_skeleton,
+ string_pool: &mut Vec<CString>,
+ ) -> Option<Layout> {
+ if progs.is_empty() {
+ return None;
+ }
+
+ s.prog_cnt = progs.len() as i32;
+ s.prog_skel_sz = size_of::<bpf_prog_skeleton>() as i32;
+
+ let layout = Layout::array::<bpf_prog_skeleton>(progs.len())
+ .expect("Failed to allocate memory for progs skeleton");
+
+ unsafe {
+ s.progs = alloc_zeroed(layout) as *mut bpf_prog_skeleton;
+ for (i, prog) in progs.iter_mut().enumerate() {
+ let current_prog = s.progs.add(i);
+
+ // See above for `expect()` rationale
+ (*current_prog).name = str_to_cstring_and_pool(&prog.name, string_pool)
+ .expect("Invalid unicode in prog name");
+ (*current_prog).prog = &mut *prog.p;
+ (*current_prog).link = &mut *prog.link;
+ }
+ }
+
+ Some(layout)
+ }
+
+ #[allow(missing_docs)]
+ pub fn build(mut self) -> Result<ObjectSkeletonConfig<'dat>> {
+ // Holds `CString`s alive so pointers to them stay valid
+ let mut string_pool = Vec::new();
+
+ let mut s = libbpf_sys::bpf_object_skeleton {
+ sz: size_of::<bpf_object_skeleton>() as c_ulong,
+ ..Default::default()
+ };
+
+ if let Some(ref n) = self.name {
+ s.name = str_to_cstring_and_pool(n, &mut string_pool)?;
+ }
+
+ // libbpf_sys will use it as const despite the signature
+ s.data = self.data.as_ptr() as *mut c_void;
+ s.data_sz = self.data.len() as c_ulong;
+
+ // Give s ownership over the box
+ s.obj = Box::into_raw(self.p);
+
+ let maps_layout = Self::build_maps(&mut self.maps, &mut s, &mut string_pool);
+ let progs_layout = Self::build_progs(&mut self.progs, &mut s, &mut string_pool);
+
+ Ok(ObjectSkeletonConfig {
+ inner: s,
+ maps: self.maps,
+ progs: self.progs,
+ maps_layout,
+ progs_layout,
+ _data: self.data,
+ _string_pool: string_pool,
+ })
+ }
+}
+
+/// Helper struct that wraps a `libbpf_sys::bpf_object_skeleton`.
+///
+/// This struct will:
+/// * ensure lifetimes are valid for dependencies (pointers, data buffer)
+/// * free any allocated memory on drop
+///
+/// This struct can be moved around at will. Upon drop, all allocated resources will be freed
+#[derive(Debug)]
+pub struct ObjectSkeletonConfig<'dat> {
+ inner: bpf_object_skeleton,
+ maps: Vec<MapSkelConfig>,
+ progs: Vec<ProgSkelConfig>,
+ /// Layout necessary to `dealloc` memory
+ maps_layout: Option<Layout>,
+ /// Same as above
+ progs_layout: Option<Layout>,
+ /// Hold this reference so that compiler guarantees buffer lives as long as us
+ _data: &'dat [u8],
+ /// Hold strings alive so pointers to them stay valid
+ _string_pool: Vec<CString>,
+}
+
+impl ObjectSkeletonConfig<'_> {
+ /// Returns the `mmaped` pointer for a map at the specified `index`.
+ ///
+ /// The index is determined by the order in which the map was passed to
+ /// `ObjectSkeletonConfigBuilder::map`. Index starts at 0.
+ ///
+ /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive.
+ pub fn map_mmap_ptr(&self, index: usize) -> Result<*mut c_void> {
+ if index >= self.maps.len() {
+ return Err(Error::with_invalid_data(format!(
+ "Invalid map index: {index}"
+ )));
+ }
+
+ let p = self.maps[index]
+ .mmaped
+ .as_ref()
+ .ok_or_invalid_data(|| "Map does not have mmaped ptr")?;
+ Ok(**p)
+ }
+
+ /// Returns the link pointer for a prog at the specified `index`.
+ ///
+ /// The index is determined by the order in which the prog was passed to
+ /// `ObjectSkeletonConfigBuilder::prog`. Index starts at 0.
+ ///
+ /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive.
+ pub fn prog_link_ptr(&self, index: usize) -> Result<*mut bpf_link> {
+ if index >= self.progs.len() {
+ return Err(Error::with_invalid_data(format!(
+ "Invalid prog index: {index}"
+ )));
+ }
+
+ Ok(*self.progs[index].link)
+ }
+}
+
+impl AsRawLibbpf for ObjectSkeletonConfig<'_> {
+ type LibbpfType = libbpf_sys::bpf_object_skeleton;
+
+ /// Retrieve the underlying [`libbpf_sys::bpf_object_skeleton`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ // SAFETY: A reference is always a valid pointer.
+ unsafe { NonNull::new_unchecked(addr_of!(self.inner).cast_mut()) }
+ }
+}
+
+impl Drop for ObjectSkeletonConfig<'_> {
+ // Note we do *not* run `libbpf_sys::bpf_object__destroy_skeleton` here.
+ //
+ // Couple reasons:
+ //
+ // 1) We did not allocate `libbpf_sys::bpf_object_skeleton` on the heap and
+ // `libbpf_sys::bpf_object__destroy_skeleton` will try to free from heap
+ //
+ // 2) `libbpf_object_skeleton` assumes it "owns" the object and everything inside it.
+ // libbpf-cargo's generated skeleton instead gives ownership of the object to
+ // libbpf-rs::*Object. The destructors in libbpf-rs::*Object will know when and how to do
+ // cleanup.
+ fn drop(&mut self) {
+ assert_eq!(self.maps_layout.is_none(), self.inner.maps.is_null());
+ assert_eq!(self.progs_layout.is_none(), self.inner.progs.is_null());
+
+ if let Some(layout) = self.maps_layout {
+ unsafe {
+ dealloc(self.inner.maps as _, layout);
+ }
+ }
+
+ if let Some(layout) = self.progs_layout {
+ unsafe {
+ dealloc(self.inner.progs as _, layout);
+ }
+ }
+
+ let _ = unsafe { Box::from_raw(self.inner.obj) };
+ }
+}
+
+/// A trait for skeleton builder.
+pub trait SkelBuilder<'obj> {
+ /// Define that when BPF object is opened, the returned type should implement the [`OpenSkel`]
+ /// trait
+ type Output: OpenSkel<'obj>;
+
+ /// Open eBPF object and return [`OpenSkel`]
+ fn open(self, object: &'obj mut MaybeUninit<OpenObject>) -> Result<Self::Output>;
+
+ /// Open eBPF object with [`libbpf_sys::bpf_object_open_opts`] and return [`OpenSkel`]
+ fn open_opts(
+ self,
+ open_opts: libbpf_sys::bpf_object_open_opts,
+ object: &'obj mut MaybeUninit<OpenObject>,
+ ) -> Result<Self::Output>;
+
+ /// Get a reference to [`ObjectBuilder`]
+ fn object_builder(&self) -> &ObjectBuilder;
+
+ /// Get a mutable reference to [`ObjectBuilder`]
+ fn object_builder_mut(&mut self) -> &mut ObjectBuilder;
+}
+
+/// A trait for opened skeleton.
+///
+/// In addition to the methods defined in this trait, skeletons that implement this trait will also
+/// have bespoke implementations of a few additional methods to facilitate access to global
+/// variables of the BPF program. These methods will be named `bss()`, `data()`, and `rodata()`.
+/// Each corresponds to the variables stored in the BPF ELF program section of the same name.
+/// However if your BPF program lacks one of these sections the corresponding rust method will not
+/// be generated.
+///
+/// The type of the value returned by each of these methods will be specific to your BPF program.
+/// A common convention is to define a single global variable in the BPF program with a struct type
+/// containing a field for each configuration parameter <sup>\[[source]\]</sup>. libbpf-rs
+/// auto-generates this pattern for you without you having to define such a struct type in your BPF
+/// program. It does this by examining each of the global variables in your BPF program's `.bss`,
+/// `.data`, and `.rodata` sections and then creating Rust struct types. Since these struct types
+/// are specific to the layout of your BPF program, they are not documented in this crate. However
+/// you can see documentation for them by running `cargo doc` in your own project and looking at
+/// the `imp` module. You can also view their implementation by looking at the generated skeleton
+/// rust source file. The use of these methods can also be seen in the examples 'capable',
+/// 'runqslower', and 'tproxy'.
+///
+/// If you ever doubt whether libbpf-rs has placed a particular variable in the correct struct
+/// type, you can see which section each global variable is stored in by examining the output of
+/// the following command (after a successful build):
+///
+/// ```sh
+/// bpf-objdump --syms ./target/bpf/*.bpf.o
+/// ```
+///
+/// [source]: https://nakryiko.com/posts/bcc-to-libbpf-howto-guide/#application-configuration
+pub trait OpenSkel<'obj> {
+ /// Define that when BPF object is loaded, the returned type should implement the [`Skel`] trait
+ type Output: Skel<'obj>;
+
+ /// Load BPF object and return [`Skel`].
+ fn load(self) -> Result<Self::Output>;
+
+ /// Get a reference to [`OpenObject`].
+ fn open_object(&self) -> &OpenObject;
+
+ /// Get a mutable reference to [`OpenObject`].
+ fn open_object_mut(&mut self) -> &mut OpenObject;
+}
+
+/// A trait for loaded skeleton.
+pub trait Skel<'obj> {
+ /// Attach BPF object.
+ fn attach(&mut self) -> Result<()> {
+ unimplemented!()
+ }
+ /// Get a reference to [`Object`].
+ fn object(&self) -> &Object;
+
+ /// Get a mutable reference to [`Object`].
+ fn object_mut(&mut self) -> &mut Object;
+}
diff --git a/src/tc.rs b/src/tc.rs
new file mode 100644
index 0000000..fc09bd3
--- /dev/null
+++ b/src/tc.rs
@@ -0,0 +1,323 @@
+use std::io;
+use std::mem::size_of;
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::BorrowedFd;
+
+use crate::Error;
+use crate::Result;
+
+/// See [`libbpf_sys::bpf_tc_attach_point`].
+pub type TcAttachPoint = libbpf_sys::bpf_tc_attach_point;
+/// See [`libbpf_sys::BPF_TC_INGRESS`].
+pub const TC_INGRESS: TcAttachPoint = libbpf_sys::BPF_TC_INGRESS;
+/// See [`libbpf_sys::BPF_TC_EGRESS`].
+pub const TC_EGRESS: TcAttachPoint = libbpf_sys::BPF_TC_EGRESS;
+/// See [`libbpf_sys::BPF_TC_CUSTOM`].
+pub const TC_CUSTOM: TcAttachPoint = libbpf_sys::BPF_TC_CUSTOM;
+
+pub type TcFlags = libbpf_sys::bpf_tc_flags;
+/// See [`libbpf_sys::BPF_TC_F_REPLACE`].
+pub const BPF_TC_F_REPLACE: TcFlags = libbpf_sys::BPF_TC_F_REPLACE;
+
+// from kernel @ include/uapi/linux/pkt_sched.h
+#[allow(missing_docs)]
+pub const TC_H_INGRESS: u32 = 0xFFFFFFF1;
+#[allow(missing_docs)]
+pub const TC_H_CLSACT: u32 = TC_H_INGRESS;
+#[allow(missing_docs)]
+pub const TC_H_MIN_INGRESS: u32 = 0xFFF2;
+#[allow(missing_docs)]
+pub const TC_H_MIN_EGRESS: u32 = 0xFFF3;
+#[allow(missing_docs)]
+pub const TC_H_MAJ_MASK: u32 = 0xFFFF0000;
+#[allow(missing_docs)]
+pub const TC_H_MIN_MASK: u32 = 0x0000FFFF;
+
+/// Represents a location where a TC-BPF filter can be attached.
+///
+/// The BPF TC subsystem has different control paths from other BPF programs.
+/// As such a BPF program using a TC Hook (`SEC("classifier")` or `SEC("tc")`) must be operated
+/// more independently from other [`Program`][crate::Program]s.
+///
+/// This struct exposes operations to create, attach, query and destroy
+/// a bpf_tc_hook using the TC subsystem.
+///
+/// Documentation about the libbpf TC interface can be found
+/// [here](https://lwn.net/ml/bpf/[email protected]/).
+///
+/// An example of using a BPF TC program can found
+/// [here](https://github.com/libbpf/libbpf-rs/tree/master/examples/tc_port_whitelist).
+#[derive(Clone, Copy, Debug)]
+pub struct TcHook {
+ hook: libbpf_sys::bpf_tc_hook,
+ opts: libbpf_sys::bpf_tc_opts,
+}
+
+impl TcHook {
+ /// Create a new [`TcHook`] given the file descriptor of the loaded
+ /// `SEC("tc")` [`Program`][crate::Program].
+ pub fn new(fd: BorrowedFd<'_>) -> Self {
+ let mut tc_hook = TcHook {
+ hook: libbpf_sys::bpf_tc_hook::default(),
+ opts: libbpf_sys::bpf_tc_opts::default(),
+ };
+
+ tc_hook.hook.sz = size_of::<libbpf_sys::bpf_tc_hook>() as libbpf_sys::size_t;
+ tc_hook.opts.sz = size_of::<libbpf_sys::bpf_tc_opts>() as libbpf_sys::size_t;
+ tc_hook.opts.prog_fd = fd.as_raw_fd();
+
+ tc_hook
+ }
+
+ /// Create a new [`TcHook`] as well as the underlying qdiscs
+ ///
+ /// If a [`TcHook`] already exists with the same parameters as the hook calling
+ /// [`Self::create()`], this function will still succeed.
+ ///
+ /// Will always fail on a `TC_CUSTOM` hook
+ pub fn create(&mut self) -> Result<Self> {
+ let err = unsafe { libbpf_sys::bpf_tc_hook_create(&mut self.hook as *mut _) };
+ if err != 0 {
+ let err = io::Error::from_raw_os_error(-err);
+ // the hook may already exist, this is not an error
+ if err.kind() == io::ErrorKind::AlreadyExists {
+ Ok(*self)
+ } else {
+ Err(Error::from(err))
+ }
+ } else {
+ Ok(*self)
+ }
+ }
+
+ /// Set the interface to attach to
+ ///
+ /// Interfaces can be listed by using `ip link` command from the iproute2 software package
+ pub fn ifindex(&mut self, idx: i32) -> &mut Self {
+ self.hook.ifindex = idx;
+ self
+ }
+
+ /// Set what type of TC point to attach onto
+ ///
+ /// `TC_EGRESS`, `TC_INGRESS`, or `TC_CUSTOM`
+ ///
+ /// An `TC_EGRESS|TC_INGRESS` hook can be used as an attach point for calling
+ /// [`Self::destroy()`] to remove the clsact bpf tc qdisc, but cannot be used for an
+ /// [`Self::attach()`] operation
+ pub fn attach_point(&mut self, ap: TcAttachPoint) -> &mut Self {
+ self.hook.attach_point = ap;
+ self
+ }
+
+ /// Set the parent of a hook
+ ///
+ /// Will cause an EINVAL upon [`Self::attach()`] if set upon an
+ /// `TC_EGRESS/TC_INGRESS/(TC_EGRESS|TC_INGRESS)` hook
+ ///
+ /// Must be set on a `TC_CUSTOM` hook
+ ///
+ /// Current acceptable values are `TC_H_CLSACT` for `maj`, and `TC_H_MIN_EGRESS` or
+ /// `TC_H_MIN_INGRESS` for `min`
+ pub fn parent(&mut self, maj: u32, min: u32) -> &mut Self {
+ /* values from libbpf.h BPF_TC_PARENT() */
+ let parent = (maj & TC_H_MAJ_MASK) | (min & TC_H_MIN_MASK);
+ self.hook.parent = parent;
+ self
+ }
+
+ /// Set whether this hook should replace an existing hook
+ ///
+ /// If replace is not true upon attach, and a hook already exists
+ /// an EEXIST error will be returned from [`Self::attach()`]
+ pub fn replace(&mut self, replace: bool) -> &mut Self {
+ if replace {
+ self.opts.flags = BPF_TC_F_REPLACE;
+ } else {
+ self.opts.flags = 0;
+ }
+ self
+ }
+
+ /// Set the handle of a hook.
+ /// If unset upon attach, the kernel will assign a handle for the hook
+ pub fn handle(&mut self, handle: u32) -> &mut Self {
+ self.opts.handle = handle;
+ self
+ }
+
+ /// Get the handle of a hook.
+ /// Only has meaning after hook is attached
+ pub fn get_handle(&self) -> u32 {
+ self.opts.handle
+ }
+
+ /// Set the priority of a hook
+ /// If unset upon attach, the kernel will assign a priority for the hook
+ pub fn priority(&mut self, priority: u32) -> &mut Self {
+ self.opts.priority = priority;
+ self
+ }
+
+ /// Get the priority of a hook
+ /// Only has meaning after hook is attached
+ pub fn get_priority(&self) -> u32 {
+ self.opts.priority
+ }
+
+ /// Query a hook to inspect the program identifier (prog_id)
+ pub fn query(&mut self) -> Result<u32> {
+ let mut opts = self.opts;
+ opts.prog_id = 0;
+ opts.prog_fd = 0;
+ opts.flags = 0;
+
+ let err = unsafe { libbpf_sys::bpf_tc_query(&self.hook as *const _, &mut opts as *mut _) };
+ if err != 0 {
+ Err(Error::from(io::Error::last_os_error()))
+ } else {
+ Ok(opts.prog_id)
+ }
+ }
+
+ /// Attach a filter to the TcHook so that the program starts processing
+ ///
+ /// Once the hook is processing, changing the values will have no effect unless the hook is
+ /// [`Self::attach()`]'d again (`replace=true` being required)
+ ///
+ /// Users can create a second hook by changing the handle, the priority or the attach_point and
+ /// calling the [`Self::attach()`] method again. Beware doing this. It might be better to
+ /// Copy the TcHook and change the values on the copied hook for easier [`Self::detach()`]
+ ///
+ /// NOTE: Once a [`TcHook`] is attached, it, and the maps it uses, will outlive the userspace
+ /// application that spawned them Make sure to detach if this is not desired
+ pub fn attach(&mut self) -> Result<Self> {
+ self.opts.prog_id = 0;
+ let err =
+ unsafe { libbpf_sys::bpf_tc_attach(&self.hook as *const _, &mut self.opts as *mut _) };
+ if err != 0 {
+ Err(Error::from(io::Error::last_os_error()))
+ } else {
+ Ok(*self)
+ }
+ }
+
+ /// Detach a filter from a [`TcHook`]
+ pub fn detach(&mut self) -> Result<()> {
+ let mut opts = self.opts;
+ opts.prog_id = 0;
+ opts.prog_fd = 0;
+ opts.flags = 0;
+
+ let err = unsafe { libbpf_sys::bpf_tc_detach(&self.hook as *const _, &opts as *const _) };
+ if err != 0 {
+ Err(Error::from_raw_os_error(-err))
+ } else {
+ self.opts.prog_id = 0;
+ Ok(())
+ }
+ }
+
+ /// Destroy attached filters
+ ///
+ /// If called on a hook with an attach_point of `TC_EGRESS`, will detach all egress hooks
+ ///
+ /// If called on a hook with an attach_point of `TC_INGRESS`, will detach all ingress hooks
+ ///
+ /// If called on a hook with an attach_point of `TC_EGRESS|TC_INGRESS`, will destroy the clsact
+ /// tc qdisc and detach all hooks
+ ///
+ /// Will error with EOPNOTSUPP if attach_point is `TC_CUSTOM`
+ ///
+ /// It is good practice to query before destroying as the tc qdisc may be used by multiple
+ /// programs
+ pub fn destroy(&mut self) -> Result<()> {
+ let err = unsafe { libbpf_sys::bpf_tc_hook_destroy(&mut self.hook as *mut _) };
+ if err != 0 {
+ Err(Error::from_raw_os_error(-err))
+ } else {
+ Ok(())
+ }
+ }
+}
+
+/// Builds [`TcHook`] instances.
+///
+/// [`TcHookBuilder`] is a way to ergonomically create multiple `TcHook`s,
+/// all with similar initial values.
+///
+/// Once a `TcHook` is created via the [`Self::hook()`] method, the `TcHook`'s values can still
+/// be adjusted before [`TcHook::attach()`] is called.
+#[derive(Debug)]
+pub struct TcHookBuilder<'fd> {
+ fd: BorrowedFd<'fd>,
+ ifindex: i32,
+ parent_maj: u32,
+ parent_min: u32,
+ replace: bool,
+ handle: u32,
+ priority: u32,
+}
+
+impl<'fd> TcHookBuilder<'fd> {
+ /// Create a new `TcHookBuilder` with fd
+ /// this fd should come from a loaded [`Program`][crate::Program]
+ pub fn new(fd: BorrowedFd<'fd>) -> Self {
+ TcHookBuilder {
+ fd,
+ ifindex: 0,
+ parent_maj: 0,
+ parent_min: 0,
+ replace: false,
+ handle: 0,
+ priority: 0,
+ }
+ }
+
+ /// Set the initial interface index to attach the hook on
+ pub fn ifindex(&mut self, ifindex: i32) -> &mut Self {
+ self.ifindex = ifindex;
+ self
+ }
+
+ /// Set the initial parent of a hook
+ pub fn parent(&mut self, maj: u32, min: u32) -> &mut Self {
+ self.parent_maj = maj;
+ self.parent_min = min;
+ self
+ }
+
+ /// Set whether created hooks should replace existing hooks
+ pub fn replace(&mut self, replace: bool) -> &mut Self {
+ self.replace = replace;
+ self
+ }
+
+ /// Set the initial handle for a hook
+ pub fn handle(&mut self, handle: u32) -> &mut Self {
+ self.handle = handle;
+ self
+ }
+
+ /// Set the initial priority for a hook
+ pub fn priority(&mut self, priority: u32) -> &mut Self {
+ self.priority = priority;
+ self
+ }
+
+ /// Create a [`TcHook`] given the values previously set
+ ///
+ /// Once a hook is created, the values can still be changed on the `TcHook`
+ /// by calling the `TcHooks` setter methods
+ pub fn hook(&self, attach_point: TcAttachPoint) -> TcHook {
+ let mut hook = TcHook::new(self.fd);
+ hook.ifindex(self.ifindex)
+ .handle(self.handle)
+ .priority(self.priority)
+ .parent(self.parent_maj, self.parent_min)
+ .replace(self.replace)
+ .attach_point(attach_point);
+
+ hook
+ }
+}
diff --git a/src/user_ringbuf.rs b/src/user_ringbuf.rs
new file mode 100644
index 0000000..094d2e6
--- /dev/null
+++ b/src/user_ringbuf.rs
@@ -0,0 +1,169 @@
+use libc::E2BIG;
+use libc::ENOSPC;
+use std::io;
+use std::ops::Deref;
+use std::ops::DerefMut;
+use std::os::fd::AsRawFd;
+use std::os::raw::c_uint;
+use std::os::raw::c_void;
+use std::ptr::null_mut;
+use std::ptr::NonNull;
+use std::slice::from_raw_parts;
+use std::slice::from_raw_parts_mut;
+
+use crate::AsRawLibbpf;
+use crate::Error;
+use crate::MapCore;
+use crate::MapType;
+use crate::Result;
+
+/// A mutable reference to sample from a [`UserRingBuffer`].
+///
+/// To write to the sample, dereference with `as_mut()` to get a mutable
+/// reference to the raw byte slice. You may find libraries such as
+/// [`plain`](https://crates.io/crates/plain) helpful to convert between raw
+/// bytes and structs.
+#[derive(Debug)]
+pub struct UserRingBufferSample<'slf> {
+ // A pointer to an 8-byte aligned reserved region of the user ring buffer
+ ptr: NonNull<c_void>,
+
+ // The size of the sample in bytes.
+ size: usize,
+
+ // Reference to the owning ring buffer. This is used to discard the sample
+ // if it is not submitted before being dropped.
+ rb: &'slf UserRingBuffer,
+
+ // Track whether the sample has been submitted.
+ submitted: bool,
+}
+
+impl Deref for UserRingBufferSample<'_> {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { from_raw_parts(self.ptr.as_ptr() as *const u8, self.size) }
+ }
+}
+
+impl DerefMut for UserRingBufferSample<'_> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { from_raw_parts_mut(self.ptr.as_ptr() as *mut u8, self.size) }
+ }
+}
+
+impl Drop for UserRingBufferSample<'_> {
+ fn drop(&mut self) {
+ // If the sample has not been submitted, explicitly discard it.
+ // This is necessary to avoid leaking ring buffer memory.
+ if !self.submitted {
+ unsafe {
+ libbpf_sys::user_ring_buffer__discard(self.rb.ptr.as_ptr(), self.ptr.as_ptr());
+ }
+ }
+ }
+}
+
+/// Represents a user ring buffer. This is a special kind of map that is used to
+/// transfer data between user space and kernel space.
+#[derive(Debug)]
+pub struct UserRingBuffer {
+ // A non-null pointer to the underlying user ring buffer.
+ ptr: NonNull<libbpf_sys::user_ring_buffer>,
+}
+
+impl UserRingBuffer {
+ /// Create a new user ring buffer from a map.
+ ///
+ /// # Errors
+ /// * If the map is not a user ring buffer.
+ /// * If the underlying libbpf function fails.
+ pub fn new(map: &dyn MapCore) -> Result<Self> {
+ if map.map_type() != MapType::UserRingBuf {
+ return Err(Error::with_invalid_data("must use a UserRingBuf map"));
+ }
+
+ let fd = map.as_fd();
+ let raw_ptr = unsafe { libbpf_sys::user_ring_buffer__new(fd.as_raw_fd(), null_mut()) };
+
+ let ptr = NonNull::new(raw_ptr).ok_or_else(|| {
+ // Safely get the last OS error after a failed call to user_ring_buffer__new
+ io::Error::last_os_error()
+ })?;
+
+ Ok(UserRingBuffer { ptr })
+ }
+
+ /// Reserve a sample in the user ring buffer.
+ ///
+ /// Returns a [`UserRingBufferSample`](UserRingBufferSample<'slf>)
+ /// that contains a mutable reference to sample that can be written to.
+ /// The sample must be submitted via [`UserRingBuffer::submit`] before it is
+ /// dropped.
+ ///
+ /// # Parameters
+ /// * `size` - The size of the sample in bytes.
+ ///
+ /// This function is *not* thread-safe. It is necessary to synchronize
+ /// amongst multiple producers when invoking this function.
+ pub fn reserve(&self, size: usize) -> Result<UserRingBufferSample<'_>> {
+ let sample_ptr =
+ unsafe { libbpf_sys::user_ring_buffer__reserve(self.ptr.as_ptr(), size as c_uint) };
+
+ let ptr = NonNull::new(sample_ptr).ok_or_else(|| {
+ // Fetch the current value of errno to determine the type of error.
+ let errno = io::Error::last_os_error();
+ match errno.raw_os_error() {
+ Some(E2BIG) => Error::with_invalid_data("requested size is too large"),
+ Some(ENOSPC) => Error::with_invalid_data("not enough space in the ring buffer"),
+ _ => Error::from(errno),
+ }
+ })?;
+
+ Ok(UserRingBufferSample {
+ ptr,
+ size,
+ submitted: false,
+ rb: self,
+ })
+ }
+
+ /// Submit a sample to the user ring buffer.
+ ///
+ /// This function takes ownership of the sample and submits it to the ring
+ /// buffer. After submission, the consumer will be able to read the sample
+ /// from the ring buffer.
+ ///
+ /// This function is thread-safe. It is *not* necessary to synchronize
+ /// amongst multiple producers when invoking this function.
+ pub fn submit(&self, mut sample: UserRingBufferSample<'_>) -> Result<()> {
+ unsafe {
+ libbpf_sys::user_ring_buffer__submit(self.ptr.as_ptr(), sample.ptr.as_ptr());
+ }
+
+ sample.submitted = true;
+
+ // The libbpf API does not return an error code, so we cannot determine
+ // if the submission was successful. Return a `Result` to enable future
+ // validation while maintaining backwards compatibility.
+ Ok(())
+ }
+}
+
+impl AsRawLibbpf for UserRingBuffer {
+ type LibbpfType = libbpf_sys::user_ring_buffer;
+
+ /// Retrieve the underlying [`libbpf_sys::user_ring_buffer`].
+ fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
+ self.ptr
+ }
+}
+
+impl Drop for UserRingBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ libbpf_sys::user_ring_buffer__free(self.ptr.as_ptr());
+ }
+ }
+}
diff --git a/src/util.rs b/src/util.rs
new file mode 100644
index 0000000..c304db5
--- /dev/null
+++ b/src/util.rs
@@ -0,0 +1,168 @@
+use std::ffi::CStr;
+use std::ffi::CString;
+use std::mem::transmute;
+use std::ops::Deref;
+use std::os::raw::c_char;
+use std::path::Path;
+use std::ptr::NonNull;
+use std::sync::OnceLock;
+
+use crate::Error;
+use crate::Result;
+
+pub fn str_to_cstring(s: &str) -> Result<CString> {
+ CString::new(s).map_err(|e| Error::with_invalid_data(e.to_string()))
+}
+
+pub fn path_to_cstring<P: AsRef<Path>>(path: P) -> Result<CString> {
+ let path_str = path.as_ref().to_str().ok_or_else(|| {
+ Error::with_invalid_data(format!("{} is not valid unicode", path.as_ref().display()))
+ })?;
+
+ str_to_cstring(path_str)
+}
+
+pub fn c_ptr_to_string(p: *const c_char) -> Result<String> {
+ if p.is_null() {
+ return Err(Error::with_invalid_data("Null string"));
+ }
+
+ let c_str = unsafe { CStr::from_ptr(p) };
+ Ok(c_str
+ .to_str()
+ .map_err(|e| Error::with_invalid_data(e.to_string()))?
+ .to_owned())
+}
+
+/// Convert a `[c_char]` into a `CStr`.
+pub fn c_char_slice_to_cstr(s: &[c_char]) -> Option<&CStr> {
+ // TODO: Switch to using `CStr::from_bytes_until_nul` once we require
+ // Rust 1.69.0.
+ let nul_idx = s
+ .iter()
+ .enumerate()
+ .find_map(|(idx, b)| (*b == 0).then_some(idx))?;
+ let cstr =
+ // SAFETY: `c_char` and `u8` are both just one byte plain old data
+ // types.
+ CStr::from_bytes_with_nul(unsafe { transmute::<&[c_char], &[u8]>(&s[0..=nul_idx]) })
+ .unwrap();
+ Some(cstr)
+}
+
+/// Round up a number to the next multiple of `r`
+pub fn roundup(num: usize, r: usize) -> usize {
+ ((num + (r - 1)) / r) * r
+}
+
+/// Get the number of CPUs in the system, e.g., to interact with per-cpu maps.
+pub fn num_possible_cpus() -> Result<usize> {
+ let ret = unsafe { libbpf_sys::libbpf_num_possible_cpus() };
+ parse_ret(ret).map(|()| ret as usize)
+}
+
+pub fn parse_ret(ret: i32) -> Result<()> {
+ if ret < 0 {
+ // Error code is returned negative, flip to positive to match errno
+ Err(Error::from_raw_os_error(-ret))
+ } else {
+ Ok(())
+ }
+}
+
+pub fn parse_ret_i32(ret: i32) -> Result<i32> {
+ parse_ret(ret).map(|()| ret)
+}
+
+
+/// Check the returned pointer of a `libbpf` call, extracting any
+/// reported errors and converting them.
+pub fn validate_bpf_ret<T>(ptr: *mut T) -> Result<NonNull<T>> {
+ // SAFETY: `libbpf_get_error` is always safe to call.
+ match unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) } {
+ 0 => {
+ debug_assert!(!ptr.is_null());
+ // SAFETY: libbpf guarantees that if NULL is returned an
+ // error it set, so we will always end up with a
+ // valid pointer when `libbpf_get_error` returned 0.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+ Ok(ptr)
+ }
+ err => Err(Error::from_raw_os_error(-err as i32)),
+ }
+}
+
+
+// Fix me, If std::sync::LazyLock is stable(https://github.com/rust-lang/rust/issues/109736).
+pub(crate) struct LazyLock<T> {
+ cell: OnceLock<T>,
+ init: fn() -> T,
+}
+
+impl<T> LazyLock<T> {
+ pub const fn new(f: fn() -> T) -> Self {
+ Self {
+ cell: OnceLock::new(),
+ init: f,
+ }
+ }
+}
+
+impl<T> Deref for LazyLock<T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ self.cell.get_or_init(self.init)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_roundup() {
+ for i in 1..=256 {
+ let up = roundup(i, 8);
+ assert!(up % 8 == 0);
+ assert!(i <= up);
+ assert!(up - i < 8);
+ }
+ }
+
+ #[test]
+ fn test_roundup_multiples() {
+ for i in (8..=256).step_by(8) {
+ assert_eq!(roundup(i, 8), i);
+ }
+ }
+
+ #[test]
+ fn test_num_possible_cpus() {
+ let num = num_possible_cpus().unwrap();
+ assert!(num > 0);
+ }
+
+ /// Check that we can convert a `[c_char]` into a `CStr`.
+ #[test]
+ fn c_char_slice_conversion() {
+ let slice = [];
+ assert_eq!(c_char_slice_to_cstr(&slice), None);
+
+ let slice = [0];
+ assert_eq!(
+ c_char_slice_to_cstr(&slice).unwrap(),
+ CStr::from_bytes_with_nul(b"\0").unwrap()
+ );
+
+ let slice = ['a' as _, 'b' as _, 'c' as _, 0 as _];
+ assert_eq!(
+ c_char_slice_to_cstr(&slice).unwrap(),
+ CStr::from_bytes_with_nul(b"abc\0").unwrap()
+ );
+
+ // Missing terminating NUL byte.
+ let slice = ['a' as _, 'b' as _, 'c' as _];
+ assert_eq!(c_char_slice_to_cstr(&slice), None);
+ }
+}
diff --git a/src/xdp.rs b/src/xdp.rs
new file mode 100644
index 0000000..17d8274
--- /dev/null
+++ b/src/xdp.rs
@@ -0,0 +1,110 @@
+use std::mem::size_of;
+use std::os::unix::io::AsRawFd;
+use std::os::unix::io::BorrowedFd;
+
+use bitflags::bitflags;
+
+use crate::util;
+use crate::Result;
+
+bitflags! {
+ /// Flags to configure the `XDP` operations
+ pub struct XdpFlags: u32 {
+ /// No flags.
+ const NONE = 0;
+ /// See [`libbpf_sys::XDP_FLAGS_UPDATE_IF_NOEXIST`].
+ const UPDATE_IF_NOEXIST = libbpf_sys::XDP_FLAGS_UPDATE_IF_NOEXIST as _;
+ /// See [`libbpf_sys::XDP_FLAGS_SKB_MODE`].
+ const SKB_MODE = libbpf_sys::XDP_FLAGS_SKB_MODE as _;
+ /// See [`libbpf_sys::XDP_FLAGS_DRV_MODE`].
+ const DRV_MODE = libbpf_sys::XDP_FLAGS_DRV_MODE as _;
+ /// See [`libbpf_sys::XDP_FLAGS_HW_MODE`].
+ const HW_MODE = libbpf_sys::XDP_FLAGS_HW_MODE as _;
+ /// See [`libbpf_sys::XDP_FLAGS_REPLACE`].
+ const REPLACE = libbpf_sys::XDP_FLAGS_REPLACE as _;
+ /// See [`libbpf_sys::XDP_FLAGS_MODES`].
+ const MODES = libbpf_sys::XDP_FLAGS_MODES as _;
+ /// See [`libbpf_sys::XDP_FLAGS_MASK`].
+ const MASK = libbpf_sys::XDP_FLAGS_MASK as _;
+ }
+
+}
+
+/// Represents a XDP program.
+///
+/// This struct exposes operations to attach, detach and query a XDP program
+#[derive(Debug)]
+pub struct Xdp<'fd> {
+ fd: BorrowedFd<'fd>,
+ attach_opts: libbpf_sys::bpf_xdp_attach_opts,
+ query_opts: libbpf_sys::bpf_xdp_query_opts,
+}
+
+impl<'fd> Xdp<'fd> {
+ /// Create a new XDP instance with the given file descriptor of the
+ /// `SEC("xdp")` [`Program`][crate::Program].
+ pub fn new(fd: BorrowedFd<'fd>) -> Self {
+ let mut xdp = Xdp {
+ fd,
+ attach_opts: libbpf_sys::bpf_xdp_attach_opts::default(),
+ query_opts: libbpf_sys::bpf_xdp_query_opts::default(),
+ };
+ xdp.attach_opts.sz = size_of::<libbpf_sys::bpf_xdp_attach_opts>() as libbpf_sys::size_t;
+ xdp.query_opts.sz = size_of::<libbpf_sys::bpf_xdp_query_opts>() as libbpf_sys::size_t;
+ xdp
+ }
+
+ /// Attach the XDP program to the given interface to start processing the
+ /// packets
+ ///
+ /// # Notes
+ /// Once a program is attached, it will outlive the userspace program. Make
+ /// sure to detach the program if its not desired.
+ pub fn attach(&self, ifindex: i32, flags: XdpFlags) -> Result<()> {
+ let ret = unsafe {
+ libbpf_sys::bpf_xdp_attach(
+ ifindex,
+ self.fd.as_raw_fd(),
+ flags.bits(),
+ &self.attach_opts,
+ )
+ };
+ util::parse_ret(ret)
+ }
+
+ /// Detach the XDP program from the interface
+ pub fn detach(&self, ifindex: i32, flags: XdpFlags) -> Result<()> {
+ let ret = unsafe { libbpf_sys::bpf_xdp_detach(ifindex, flags.bits(), &self.attach_opts) };
+ util::parse_ret(ret)
+ }
+
+ /// Query to inspect the program
+ pub fn query(&self, ifindex: i32, flags: XdpFlags) -> Result<libbpf_sys::bpf_xdp_query_opts> {
+ let mut opts = self.query_opts;
+ let err = unsafe { libbpf_sys::bpf_xdp_query(ifindex, flags.bits() as i32, &mut opts) };
+ util::parse_ret(err).map(|()| opts)
+ }
+
+ /// Query to inspect the program identifier (prog_id)
+ pub fn query_id(&self, ifindex: i32, flags: XdpFlags) -> Result<u32> {
+ let mut prog_id = 0;
+ let err =
+ unsafe { libbpf_sys::bpf_xdp_query_id(ifindex, flags.bits() as i32, &mut prog_id) };
+ util::parse_ret(err).map(|()| prog_id)
+ }
+
+ /// Replace an existing xdp program (identified by old_prog_fd) with this xdp program
+ pub fn replace(&self, ifindex: i32, old_prog_fd: BorrowedFd<'_>) -> Result<()> {
+ let mut opts = self.attach_opts;
+ opts.old_prog_fd = old_prog_fd.as_raw_fd();
+ let ret = unsafe {
+ libbpf_sys::bpf_xdp_attach(
+ ifindex,
+ self.fd.as_raw_fd(),
+ XdpFlags::REPLACE.bits(),
+ &opts,
+ )
+ };
+ util::parse_ret(ret)
+ }
+}
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..3fc0b7b
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,56 @@
+# libbpf-rs tests
+
+libbpf-rs tests are designed to be independent of libbpf-cargo and underlying
+compiler versions. To that end, we check in pre-compiled bpf object files in
+`libbpf-rs/tests/bin`. To help with writing new tests, the original source
+code for the pre-compiled objects are placed in `libbpf-rs/tests/bin/src`.
+
+To regenerate the test bpf object files
+run bpf_object_regen.sh script via the command:
+$ ./bpf_object_regen.sh
+
+The script bpf_object_regen.sh depends on the following packages installed:
+
+bash
+bpftool (optional)
+clang
+libbpf
+
+Installation Instructions for common distributions
+
+Ubuntu 21.10+: (should work with 20.10+ (untested), 20.04 will not work!!)
+required:
+$ apt install bash clang libbpf-dev
+optional:
+$ apt install linux-tools-generic
+Note: bin/src/runqslower.bpf.c requires a vmlinux.h generated from kernel 5.14+
+
+Debian 11+:
+required:
+$ apt install bash clang libbpf-dev
+optional:
+$ apt install bpftool
+Note: bin/src/runqslower.bpf.c requires a vmlinux.h generated from kernel 5.14+
+Note: requires running with
+$ PATH=$PATH:/usr/sbin/ ./bpf_object_regen.sh -b ...
+
+Arch Linux: (tested as of 2021/12/16)
+required:
+$ pacman -S bash clang libbpf
+optional:
+$ pacman -S bpf
+
+Fedora 35+, Centos Stream 9: (should work with Fedora 34 (untested), RHEL 9 (untested))
+required:
+$ dnf install bash clang libbpf-devel
+optional:
+$ dnf install bpftool
+
+Alma Linux 8.5+: (should work with Centos-Stream-8 (untested) and derivatives eg RHEL 8.5 (untested))
+required:
+$ dnf install epel-release
+$ dnf --enablerepo=powertools install bash clang libbpf-devel
+optional:
+$ dnf install bpftool
+Note: bin/src/runqslower.bpf.c requires a vmlinux.h generated from kernel 5.14+
+
diff --git a/tests/bin/src/ksyscall.bpf.c b/tests/bin/src/ksyscall.bpf.c
new file mode 100644
index 0000000..de80a2d
--- /dev/null
+++ b/tests/bin/src/ksyscall.bpf.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} ringbuf SEC(".maps");
+
+SEC("ksyscall/kill")
+int handle__ksyscall(pid_t pid, int sig) {
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__ksyscall: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = 1;
+ bpf_ringbuf_submit(value, 0);
+ bpf_printk("handle__ksyscall: submitted ringbuf value");
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/map_auto_pin.bpf.c b/tests/bin/src/map_auto_pin.bpf.c
new file mode 100644
index 0000000..c0edf92
--- /dev/null
+++ b/tests/bin/src/map_auto_pin.bpf.c
@@ -0,0 +1,16 @@
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 1);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} auto_pin_map SEC(".maps");
+
+u64 resizable_data[1] SEC(".data.resizable_data");
+
+char _license[] SEC("license") = "GPL";
diff --git a/tests/bin/src/mapiter.bpf.c b/tests/bin/src/mapiter.bpf.c
new file mode 100644
index 0000000..605d62b
--- /dev/null
+++ b/tests/bin/src/mapiter.bpf.c
@@ -0,0 +1,30 @@
+#include "vmlinux.h"
+
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("iter/bpf_map_elem")
+int map_iter(struct bpf_iter__bpf_map_elem *ctx) {
+ struct seq_file *seq = ctx->meta->seq;
+ __u32 seq_num = ctx->meta->seq_num;
+ struct bpf_map *map = ctx->map;
+ __u32 *key = ctx->key;
+ __u64 *value = ctx->value;
+ __u32 tmp_key = 0;
+ __u64 tmp_val = 0;
+
+ if (seq_num == 0) {
+ bpf_printk("map dump starts");
+ }
+
+ if (key == (void *)0 || value == (void *)0) {
+ bpf_printk("map dump end");
+ return 0;
+ }
+
+ bpf_printk("test map iter, target map: %s, key: %d", map->name, (*key));
+ bpf_seq_write(seq, key, sizeof(__u32));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tests/bin/src/percpu_map.bpf.c b/tests/bin/src/percpu_map.bpf.c
new file mode 100644
index 0000000..14c49eb
--- /dev/null
+++ b/tests/bin/src/percpu_map.bpf.c
@@ -0,0 +1,13 @@
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 1);
+} percpu_map SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
diff --git a/tests/bin/src/ringbuf.bpf.c b/tests/bin/src/ringbuf.bpf.c
new file mode 100644
index 0000000..dcbbec3
--- /dev/null
+++ b/tests/bin/src/ringbuf.bpf.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 William Findlay
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} ringbuf1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} ringbuf2 SEC(".maps");
+
+SEC("tp/syscalls/sys_enter_getpid")
+int handle__sys_enter_getpid(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf1, sizeof(int), 0);
+ if (value) {
+ *value = 1;
+ bpf_ringbuf_submit(value, 0);
+ }
+
+ value = bpf_ringbuf_reserve(&ringbuf2, sizeof(int), 0);
+ if (value) {
+ *value = 2;
+ bpf_ringbuf_submit(value, 0);
+ }
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/run_prog.bpf.c b/tests/bin/src/run_prog.bpf.c
new file mode 100644
index 0000000..4fe224f
--- /dev/null
+++ b/tests/bin/src/run_prog.bpf.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+{
+ int ret;
+
+ if (!state)
+ return 0xf2f3f4f5;
+
+ ret = state->val;
+ state->val = 0x5a;
+ return ret;
+}
+
+__u64 test_2_args[5];
+
+SEC("struct_ops/test_2")
+int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+{
+ test_2_args[0] = (unsigned long)state;
+ test_2_args[1] = a1;
+ test_2_args[2] = a2;
+ test_2_args[3] = a3;
+ test_2_args[4] = a4;
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+};
diff --git a/tests/bin/src/runqslower.bpf.c b/tests/bin/src/runqslower.bpf.c
new file mode 100644
index 0000000..cf821da
--- /dev/null
+++ b/tests/bin/src/runqslower.bpf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include "vmlinux.h"
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include "runqslower.h"
+
+#define TASK_RUNNING 0
+
+const volatile __u64 min_us = 0;
+const volatile pid_t targ_pid = 0;
+const volatile pid_t targ_tgid = 0;
+
+// Dummy instance to get skeleton to generate definition for `struct event`
+struct event _event = {0};
+
+// Kernel 5.14 changed the state field to __state
+struct task_struct___pre_5_14 {
+ long int state;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10240);
+ __type(key, u32);
+ __type(value, u64);
+} start SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+} events SEC(".maps");
+
+/* record enqueue timestamp */
+static __always_inline
+int trace_enqueue(u32 tgid, u32 pid)
+{
+ u64 ts;
+
+ if (!pid)
+ return 0;
+ if (targ_tgid && targ_tgid != tgid)
+ return 0;
+ if (targ_pid && targ_pid != pid)
+ return 0;
+
+ ts = bpf_ktime_get_ns();
+ bpf_map_update_elem(&start, &pid, &ts, 0);
+ return 0;
+}
+
+SEC("tp_btf/sched_wakeup")
+int handle__sched_wakeup(u64 *ctx)
+{
+ /* TP_PROTO(struct task_struct *p) */
+ struct task_struct *p = (void *)ctx[0];
+
+ return trace_enqueue(p->tgid, p->pid);
+}
+
+SEC("tp_btf/sched_wakeup_new")
+int handle__sched_wakeup_new(u64 *ctx)
+{
+ /* TP_PROTO(struct task_struct *p) */
+ struct task_struct *p = (void *)ctx[0];
+
+ return trace_enqueue(p->tgid, p->pid);
+}
+
+static inline long get_task_state(struct task_struct *t)
+{
+ if (bpf_core_field_exists(t->__state))
+ return t->__state;
+
+ return ((struct task_struct___pre_5_14*)t)->state;
+}
+
+SEC("tp_btf/sched_switch")
+int handle__sched_switch(u64 *ctx)
+{
+ /* TP_PROTO(bool preempt, struct task_struct *prev,
+ * struct task_struct *next)
+ */
+ struct task_struct *prev = (struct task_struct *)ctx[1];
+ struct task_struct *next = (struct task_struct *)ctx[2];
+ struct event event = {};
+ u64 *tsp, delta_us;
+ long state = get_task_state(prev);
+ u32 pid;
+
+ /* ivcsw: treat like an enqueue event and store timestamp */
+ if (state == TASK_RUNNING)
+ trace_enqueue(prev->tgid, prev->pid);
+
+ pid = next->pid;
+
+ /* fetch timestamp and calculate delta */
+ tsp = bpf_map_lookup_elem(&start, &pid);
+ if (!tsp)
+ return 0; /* missed enqueue */
+
+ delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
+ if (min_us && delta_us <= min_us)
+ return 0;
+
+ event.pid = pid;
+ event.delta_us = delta_us;
+ bpf_probe_read_kernel_str(&event.task, sizeof(event.task), next->comm);
+
+ /* output */
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
+ &event, sizeof(event));
+
+ bpf_map_delete_elem(&start, &pid);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/runqslower.h b/tests/bin/src/runqslower.h
new file mode 100644
index 0000000..dee1a97
--- /dev/null
+++ b/tests/bin/src/runqslower.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __RUNQSLOWER_H
+#define __RUNQSLOWER_H
+
+#include "vmlinux.h"
+
+#define TASK_COMM_LEN 16
+
+struct event {
+ u8 task[TASK_COMM_LEN];
+ __u64 delta_us;
+ pid_t pid;
+};
+
+#endif /* __RUNQSLOWER_H */
diff --git a/tests/bin/src/taskiter.bpf.c b/tests/bin/src/taskiter.bpf.c
new file mode 100644
index 0000000..0deac31
--- /dev/null
+++ b/tests/bin/src/taskiter.bpf.c
@@ -0,0 +1,31 @@
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct index_pid_pair {
+ uint32_t i;
+ pid_t pid;
+};
+
+static uint32_t i = 0;
+
+SEC("iter/task")
+int dump_pid(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ struct index_pid_pair p;
+
+ if (!task)
+ return 0;
+
+ p.i = i++;
+ p.pid = task->tgid;
+
+ bpf_seq_write(seq, &p, sizeof(p));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+
diff --git a/tests/bin/src/tc-unit.bpf.c b/tests/bin/src/tc-unit.bpf.c
new file mode 100644
index 0000000..0756c9c
--- /dev/null
+++ b/tests/bin/src/tc-unit.bpf.c
@@ -0,0 +1,12 @@
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+//#include <linux/pkt_cls.h>
+#define TC_ACT_UNSPEC (-1)
+
+SEC("tc")
+int handle_tc(struct __sk_buff *skb)
+{
+ return TC_ACT_UNSPEC;
+}
+
diff --git a/tests/bin/src/tracepoint.bpf.c b/tests/bin/src/tracepoint.bpf.c
new file mode 100644
index 0000000..0991b9b
--- /dev/null
+++ b/tests/bin/src/tracepoint.bpf.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} ringbuf SEC(".maps");
+
+SEC("tracepoint/syscalls/sys_enter_getpid")
+int handle__tracepoint(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__tracepoint: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = 1;
+ bpf_ringbuf_submit(value, 0);
+ bpf_printk("handle__tracepoint: submitted ringbuf value");
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_enter_getpid")
+int handle__tracepoint_with_cookie(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__tracepoint_with_cookie: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = bpf_get_attach_cookie(ctx);
+ bpf_printk("handle__tracepoint_with_cookie: cookie=%d", *value);
+ bpf_ringbuf_submit(value, 0);
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} pb SEC(".maps");
+
+SEC("tracepoint/syscalls/sys_enter_getpid")
+int handle__tracepoint_with_cookie_pb(void *ctx)
+{
+ int value = bpf_get_attach_cookie(ctx);
+ bpf_perf_event_output(ctx, &pb, BPF_F_CURRENT_CPU, &value, sizeof(value));
+
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 10);
+ __uint(key, 0);
+ __type(value, __u32);
+} queue SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK);
+ __uint(max_entries, 10);
+ __uint(key, 0);
+ __type(value, __u32);
+} stack SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
+ __uint(max_entries, 5);
+ __type(value, __u32);
+} bloom_filter SEC(".maps");
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/uprobe.bpf.c b/tests/bin/src/uprobe.bpf.c
new file mode 100644
index 0000000..0e21141
--- /dev/null
+++ b/tests/bin/src/uprobe.bpf.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} ringbuf SEC(".maps");
+
+SEC("uprobe")
+int handle__uprobe(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__uprobe: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = 1;
+ bpf_ringbuf_submit(value, 0);
+ bpf_printk("handle__uprobe: submitted ringbuf value");
+ return 0;
+}
+
+SEC("uprobe")
+int handle__uprobe_with_cookie(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__uprobe_with_cookie: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = bpf_get_attach_cookie(ctx);
+ bpf_printk("handle__uprobe_with_cookie: cookie=%d", *value);
+ bpf_ringbuf_submit(value, 0);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/usdt.bpf.c b/tests/bin/src/usdt.bpf.c
new file mode 100644
index 0000000..e2c12b6
--- /dev/null
+++ b/tests/bin/src/usdt.bpf.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 William Findlay
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} ringbuf SEC(".maps");
+
+SEC("usdt")
+int handle__usdt(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__usdt: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = 1;
+ bpf_ringbuf_submit(value, 0);
+ bpf_printk("handle__usdt: submitted ringbuf value");
+ return 0;
+}
+
+SEC("usdt")
+int handle__usdt_with_cookie(void *ctx)
+{
+ int *value;
+
+ value = bpf_ringbuf_reserve(&ringbuf, sizeof(int), 0);
+ if (!value) {
+ bpf_printk("handle__usdt_with_cookie: failed to reserve ring buffer space");
+ return 1;
+ }
+
+ *value = bpf_usdt_cookie(ctx);
+ bpf_printk("handle__usdt_with_cookie: cookie=%d", *value);
+ bpf_ringbuf_submit(value, 0);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/user_ringbuf.bpf.c b/tests/bin/src/user_ringbuf.bpf.c
new file mode 100644
index 0000000..30d8fde
--- /dev/null
+++ b/tests/bin/src/user_ringbuf.bpf.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Jose Fernandez
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
+ __uint(max_entries, 4096 /* one page */);
+} user_ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 100);
+} samples SEC(".maps");
+
+struct my_struct_t {
+ u32 key;
+ u32 value;
+};
+
+static long user_ringbuf_callback(struct bpf_dynptr *dynptr, void *context)
+{
+ const struct my_struct_t *data;
+
+ data = bpf_dynptr_data(dynptr, 0, sizeof(*data));
+ if (!data)
+ return 0;
+
+ bpf_map_update_elem(&samples, &data->key, &data->value, BPF_ANY);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpid")
+int handle__sys_enter_getpid(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf, user_ringbuf_callback, NULL, 0);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tests/bin/src/xdp.bpf.c b/tests/bin/src/xdp.bpf.c
new file mode 100644
index 0000000..3bc7308
--- /dev/null
+++ b/tests/bin/src/xdp.bpf.c
@@ -0,0 +1,7 @@
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+SEC("xdp")
+int xdp_filter(struct xdp_md *ctx) {
+ return XDP_PASS;
+}
\ No newline at end of file
diff --git a/tests/common/mod.rs b/tests/common/mod.rs
new file mode 100644
index 0000000..fe3eca2
--- /dev/null
+++ b/tests/common/mod.rs
@@ -0,0 +1,77 @@
+use std::io;
+use std::path::PathBuf;
+
+use libbpf_rs::Map;
+use libbpf_rs::MapCore;
+use libbpf_rs::MapMut;
+use libbpf_rs::Object;
+use libbpf_rs::ObjectBuilder;
+use libbpf_rs::OpenObject;
+use libbpf_rs::ProgramMut;
+
+
+pub fn get_test_object_path(filename: &str) -> PathBuf {
+ let mut path = PathBuf::new();
+ // env!() macro fails at compile time if var not found
+ path.push(env!("CARGO_MANIFEST_DIR"));
+ path.push("tests/bin");
+ path.push(filename);
+ path
+}
+
+pub fn open_test_object(filename: &str) -> OpenObject {
+ let obj_path = get_test_object_path(filename);
+ let obj = ObjectBuilder::default()
+ .debug(true)
+ .open_file(obj_path)
+ .expect("failed to open object");
+ obj
+}
+
+pub fn bump_rlimit_mlock() {
+ let rlimit = libc::rlimit {
+ rlim_cur: 128 << 20,
+ rlim_max: 128 << 20,
+ };
+
+ let ret = unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlimit) };
+ assert_eq!(
+ ret,
+ 0,
+ "Setting RLIMIT_MEMLOCK failed with errno: {}",
+ io::Error::last_os_error()
+ );
+}
+
+pub fn get_test_object(filename: &str) -> Object {
+ open_test_object(filename)
+ .load()
+ .expect("failed to load object")
+}
+
+/// Find the BPF map with the given name, panic if it does not exist.
+#[track_caller]
+pub fn get_map<'obj>(object: &'obj Object, name: &str) -> Map<'obj> {
+ object
+ .maps()
+ .find(|map| map.name() == name)
+ .unwrap_or_else(|| panic!("failed to find map `{name}`"))
+}
+
+/// Find the BPF map with the given name, panic if it does not exist.
+#[track_caller]
+pub fn get_map_mut<'obj>(object: &'obj mut Object, name: &str) -> MapMut<'obj> {
+ object
+ .maps_mut()
+ .find(|map| map.name() == name)
+ .unwrap_or_else(|| panic!("failed to find map `{name}`"))
+}
+
+/// Find the BPF program with the given name, panic if it does not exist.
+#[track_caller]
+pub fn get_prog_mut<'obj>(object: &'obj mut Object, name: &str) -> ProgramMut<'obj> {
+ object
+ .progs_mut()
+ .find(|map| map.name() == name)
+ .unwrap_or_else(|| panic!("failed to find program `{name}`"))
+}
diff --git a/tests/test.rs b/tests/test.rs
new file mode 100644
index 0000000..6b6d2a8
--- /dev/null
+++ b/tests/test.rs
@@ -0,0 +1,2020 @@
+#![allow(clippy::let_unit_value)]
+#![warn(clippy::absolute_paths)]
+
+mod common;
+
+use std::collections::HashSet;
+use std::env::current_exe;
+use std::ffi::c_int;
+use std::ffi::c_void;
+use std::ffi::OsStr;
+use std::fs;
+use std::hint;
+use std::io;
+use std::io::Read;
+use std::mem::size_of;
+use std::mem::size_of_val;
+use std::os::unix::io::AsFd;
+use std::path::Path;
+use std::path::PathBuf;
+use std::ptr;
+use std::ptr::addr_of;
+use std::slice;
+use std::sync::mpsc::channel;
+use std::time::Duration;
+
+use libbpf_rs::num_possible_cpus;
+use libbpf_rs::AsRawLibbpf;
+use libbpf_rs::Iter;
+use libbpf_rs::Linker;
+use libbpf_rs::Map;
+use libbpf_rs::MapCore;
+use libbpf_rs::MapFlags;
+use libbpf_rs::MapHandle;
+use libbpf_rs::MapInfo;
+use libbpf_rs::MapType;
+use libbpf_rs::Object;
+use libbpf_rs::ObjectBuilder;
+use libbpf_rs::Program;
+use libbpf_rs::ProgramInput;
+use libbpf_rs::ProgramType;
+use libbpf_rs::TracepointOpts;
+use libbpf_rs::UprobeOpts;
+use libbpf_rs::UsdtOpts;
+use libbpf_rs::UserRingBuffer;
+use plain::Plain;
+use probe::probe;
+use scopeguard::defer;
+use tempfile::NamedTempFile;
+use test_tag::tag;
+
+use crate::common::bump_rlimit_mlock;
+use crate::common::get_map;
+use crate::common::get_map_mut;
+use crate::common::get_prog_mut;
+use crate::common::get_test_object;
+use crate::common::get_test_object_path;
+use crate::common::open_test_object;
+
+
+/// A helper function for instantiating a `RingBuffer` with a callback meant to
+/// be invoked when `action` is executed and that is intended to trigger a write
+/// to said `RingBuffer` from kernel space, which then reads a single `i32` from
+/// this buffer from user space and returns it.
+fn with_ringbuffer<F>(map: &Map, action: F) -> i32
+where
+ F: FnOnce(),
+{
+ let mut value = 0i32;
+ {
+ let callback = |data: &[u8]| {
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+ 0
+ };
+
+ let mut builder = libbpf_rs::RingBufferBuilder::new();
+ builder.add(map, callback).expect("failed to add ringbuf");
+ let mgr = builder.build().expect("failed to build");
+
+ action();
+ mgr.consume().expect("failed to consume ringbuf");
+ }
+
+ value
+}
+
+#[tag(root)]
+#[test]
+fn test_object_build_and_load() {
+ bump_rlimit_mlock();
+
+ get_test_object("runqslower.bpf.o");
+}
+
+#[test]
+fn test_object_build_from_memory() {
+ let obj_path = get_test_object_path("runqslower.bpf.o");
+ let contents = fs::read(obj_path).expect("failed to read object file");
+ let mut builder = ObjectBuilder::default();
+ let obj = builder
+ .name("memory name")
+ .unwrap()
+ .open_memory(&contents)
+ .expect("failed to build object");
+ let name = obj.name().expect("failed to get object name");
+ assert!(name == "memory name");
+
+ let obj = unsafe { Object::from_ptr(obj.take_ptr()) };
+ let name = obj.name().expect("failed to get object name");
+ assert!(name == "memory name");
+}
+
+#[test]
+fn test_object_build_from_memory_empty_name() {
+ let obj_path = get_test_object_path("runqslower.bpf.o");
+ let contents = fs::read(obj_path).expect("failed to read object file");
+ let mut builder = ObjectBuilder::default();
+ let obj = builder
+ .name("")
+ .unwrap()
+ .open_memory(&contents)
+ .expect("failed to build object");
+ let name = obj.name().expect("failed to get object name");
+ assert!(name.is_empty());
+
+ let obj = unsafe { Object::from_ptr(obj.take_ptr()) };
+ let name = obj.name().expect("failed to get object name");
+ assert!(name.is_empty());
+}
+
+/// Check that loading an object from an empty file fails as expected.
+#[tag(root)]
+#[test]
+fn test_object_load_invalid() {
+ let empty_file = NamedTempFile::new().unwrap();
+ let _err = ObjectBuilder::default()
+ .debug(true)
+ .open_file(empty_file.path())
+ .unwrap_err();
+}
+
+#[test]
+fn test_object_name() {
+ let obj_path = get_test_object_path("runqslower.bpf.o");
+ let mut builder = ObjectBuilder::default();
+ builder.name("test name").unwrap();
+ let obj = builder.open_file(obj_path).expect("failed to build object");
+ let obj_name = obj.name().expect("failed to get object name");
+ assert!(obj_name == "test name");
+}
+
+#[tag(root)]
+#[test]
+fn test_object_maps() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let _map = get_map_mut(&mut obj, "start");
+ let _map = get_map_mut(&mut obj, "events");
+ assert!(!obj.maps().any(|map| map.name() == OsStr::new("asdf")));
+}
+
+#[tag(root)]
+#[test]
+fn test_object_maps_iter() {
+ bump_rlimit_mlock();
+
+ let obj = get_test_object("runqslower.bpf.o");
+ for map in obj.maps() {
+ eprintln!("{:?}", map.name());
+ }
+ // This will include .rodata and .bss, so our expected count is 4, not 2
+ assert!(obj.maps().count() == 4);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_key_value_size() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+
+ assert!(start.lookup(&[1, 2, 3, 4, 5], MapFlags::empty()).is_err());
+ assert!(start.delete(&[1]).is_err());
+ assert!(start.lookup_and_delete(&[1, 2, 3, 4, 5]).is_err());
+ assert!(start
+ .update(&[1, 2, 3, 4, 5], &[1], MapFlags::empty())
+ .is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_update_batch() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+
+ let key1 = 1u32.to_ne_bytes();
+ let key2 = 2u32.to_ne_bytes();
+ let key3 = 3u32.to_ne_bytes();
+ let key4 = 4u32.to_ne_bytes();
+
+ let value1 = 369u64.to_ne_bytes();
+ let value2 = 258u64.to_ne_bytes();
+ let value3 = 147u64.to_ne_bytes();
+ let value4 = 159u64.to_ne_bytes();
+
+ let batch_key1 = key1.into_iter().chain(key2).collect::<Vec<_>>();
+ let batch_value1 = value1.into_iter().chain(value2).collect::<Vec<_>>();
+
+ let batch_key2 = key2.into_iter().chain(key3).chain(key4).collect::<Vec<_>>();
+ let batch_value2 = value2
+ .into_iter()
+ .chain(value3)
+ .chain(value4)
+ .collect::<Vec<_>>();
+
+ // Update batch with wrong key size
+ assert!(start
+ .update_batch(
+ &[1, 2, 3],
+ &batch_value1,
+ 2,
+ MapFlags::ANY,
+ MapFlags::NO_EXIST
+ )
+ .is_err());
+
+ // Update batch with wrong value size
+ assert!(start
+ .update_batch(
+ &batch_key1,
+ &[1, 2, 3],
+ 2,
+ MapFlags::ANY,
+ MapFlags::NO_EXIST
+ )
+ .is_err());
+
+ // Update batch with wrong count.
+ assert!(start
+ .update_batch(
+ &batch_key1,
+ &batch_value1,
+ 1,
+ MapFlags::ANY,
+ MapFlags::NO_EXIST
+ )
+ .is_err());
+
+ // Update batch with 1 key.
+ assert!(start
+ .update_batch(&key1, &value1, 1, MapFlags::ANY, MapFlags::NO_EXIST)
+ .is_ok());
+
+ // Update batch with multiple keys.
+ assert!(start
+ .update_batch(
+ &batch_key2,
+ &batch_value2,
+ 3,
+ MapFlags::ANY,
+ MapFlags::NO_EXIST
+ )
+ .is_ok());
+
+ // Update batch with existing keys.
+ assert!(start
+ .update_batch(
+ &batch_key2,
+ &batch_value2,
+ 3,
+ MapFlags::NO_EXIST,
+ MapFlags::NO_EXIST
+ )
+ .is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_delete_batch() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+
+ let key1 = 1u32.to_ne_bytes();
+ assert!(start
+ .update(&key1, &9999u64.to_ne_bytes(), MapFlags::ANY)
+ .is_ok());
+ let key2 = 2u32.to_ne_bytes();
+ assert!(start
+ .update(&key2, &42u64.to_ne_bytes(), MapFlags::ANY)
+ .is_ok());
+ let key3 = 3u32.to_ne_bytes();
+ assert!(start
+ .update(&key3, &18u64.to_ne_bytes(), MapFlags::ANY)
+ .is_ok());
+ let key4 = 4u32.to_ne_bytes();
+ assert!(start
+ .update(&key4, &1337u64.to_ne_bytes(), MapFlags::ANY)
+ .is_ok());
+
+ // Delete 1 incomplete key.
+ assert!(start
+ .delete_batch(&[0, 0, 1], 1, MapFlags::empty(), MapFlags::empty())
+ .is_err());
+ // Delete keys with wrong count.
+ assert!(start
+ .delete_batch(&key4, 2, MapFlags::empty(), MapFlags::empty())
+ .is_err());
+ // Delete 1 key successfully.
+ assert!(start
+ .delete_batch(&key4, 1, MapFlags::empty(), MapFlags::empty())
+ .is_ok());
+ // Delete remaining 3 keys.
+ let keys = key1.into_iter().chain(key2).chain(key3).collect::<Vec<_>>();
+ assert!(start
+ .delete_batch(&keys, 3, MapFlags::empty(), MapFlags::empty())
+ .is_ok());
+ // Map should be empty now.
+ assert!(start.keys().collect::<Vec<_>>().is_empty())
+}
+
+/// Test whether `MapInfo` works properly
+#[tag(root)]
+#[test]
+pub fn test_map_info() {
+ #[allow(clippy::needless_update)]
+ let opts = libbpf_sys::bpf_map_create_opts {
+ sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
+ map_flags: libbpf_sys::BPF_ANY,
+ btf_fd: 0,
+ btf_key_type_id: 0,
+ btf_value_type_id: 0,
+ btf_vmlinux_value_type_id: 0,
+ inner_map_fd: 0,
+ map_extra: 0,
+ numa_node: 0,
+ map_ifindex: 0,
+ // bpf_map_create_opts might have padding fields on some platform
+ ..Default::default()
+ };
+
+ let map = MapHandle::create(MapType::Hash, Some("simple_map"), 8, 64, 1024, &opts).unwrap();
+ let map_info = MapInfo::new(map.as_fd()).unwrap();
+ let name_received = map_info.name().unwrap();
+ assert_eq!(name_received, "simple_map");
+ assert_eq!(map_info.map_type(), MapType::Hash);
+ assert_eq!(map_info.flags() & MapFlags::ANY, MapFlags::ANY);
+
+ let map_info = &map_info.info;
+ assert_eq!(map_info.key_size, 8);
+ assert_eq!(map_info.value_size, 64);
+ assert_eq!(map_info.max_entries, 1024);
+ assert_eq!(map_info.btf_id, 0);
+ assert_eq!(map_info.btf_key_type_id, 0);
+ assert_eq!(map_info.btf_value_type_id, 0);
+ assert_eq!(map_info.btf_vmlinux_value_type_id, 0);
+ assert_eq!(map_info.map_extra, 0);
+ assert_eq!(map_info.ifindex, 0);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_percpu_lookup() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("percpu_map.bpf.o");
+ let map = get_map_mut(&mut obj, "percpu_map");
+ let res = map
+ .lookup_percpu(&(0_u32).to_ne_bytes(), MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+
+ assert_eq!(
+ res.len(),
+ num_possible_cpus().expect("must be one value per cpu")
+ );
+ assert_eq!(res[0].len(), size_of::<u32>());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_percpu_invalid_lookup_fn() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("percpu_map.bpf.o");
+ let map = get_map_mut(&mut obj, "percpu_map");
+
+ assert!(map.lookup(&(0_u32).to_ne_bytes(), MapFlags::ANY).is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_percpu_update() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("percpu_map.bpf.o");
+ let map = get_map_mut(&mut obj, "percpu_map");
+ let key = (0_u32).to_ne_bytes();
+
+ let mut vals: Vec<Vec<u8>> = Vec::new();
+ for i in 0..num_possible_cpus().unwrap() {
+ vals.push((i as u32).to_ne_bytes().to_vec());
+ }
+
+ map.update_percpu(&key, &vals, MapFlags::ANY)
+ .expect("failed to update map");
+
+ let res = map
+ .lookup_percpu(&key, MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+
+ assert_eq!(vals, res);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_percpu_invalid_update_fn() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("percpu_map.bpf.o");
+ let map = get_map_mut(&mut obj, "percpu_map");
+ let key = (0_u32).to_ne_bytes();
+
+ let val = (1_u32).to_ne_bytes().to_vec();
+
+ assert!(map.update(&key, &val, MapFlags::ANY).is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_percpu_lookup_update() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("percpu_map.bpf.o");
+ let map = get_map_mut(&mut obj, "percpu_map");
+ let key = (0_u32).to_ne_bytes();
+
+ let mut res = map
+ .lookup_percpu(&key, MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+
+ for e in res.iter_mut() {
+ e[0] &= 0xf0;
+ }
+
+ map.update_percpu(&key, &res, MapFlags::ANY)
+ .expect("failed to update after first lookup");
+
+ let res2 = map
+ .lookup_percpu(&key, MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+
+ assert_eq!(res, res2);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_empty_lookup() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+
+ assert!(start
+ .lookup(&[1, 2, 3, 4], MapFlags::empty())
+ .expect("err in map lookup")
+ .is_none());
+}
+
+/// Test CRUD operations on map of type queue.
+#[tag(root)]
+#[test]
+fn test_object_map_queue_crud() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let queue = get_map_mut(&mut obj, "queue");
+
+ let key: [u8; 0] = [];
+ let value1 = 42u32.to_ne_bytes();
+ let value2 = 43u32.to_ne_bytes();
+
+ // Test queue, FIFO expected
+ queue
+ .update(&key, &value1, MapFlags::ANY)
+ .expect("failed to update in queue");
+ queue
+ .update(&key, &value2, MapFlags::ANY)
+ .expect("failed to update in queue");
+
+ let mut val = queue
+ .lookup(&key, MapFlags::ANY)
+ .expect("failed to peek the queue")
+ .expect("failed to retrieve value");
+ assert_eq!(val.len(), 4);
+ assert_eq!(&val, &value1);
+
+ val = queue
+ .lookup_and_delete(&key)
+ .expect("failed to pop from queue")
+ .expect("failed to retrieve value");
+ assert_eq!(val.len(), 4);
+ assert_eq!(&val, &value1);
+
+ val = queue
+ .lookup_and_delete(&key)
+ .expect("failed to pop from queue")
+ .expect("failed to retrieve value");
+ assert_eq!(val.len(), 4);
+ assert_eq!(&val, &value2);
+
+ assert!(queue
+ .lookup_and_delete(&key)
+ .expect("failed to pop from queue")
+ .is_none());
+}
+
+/// Test CRUD operations on map of type bloomfilter.
+#[tag(root)]
+#[test]
+fn test_object_map_bloom_filter_crud() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let bloom_filter = get_map_mut(&mut obj, "bloom_filter");
+
+ let key: [u8; 0] = [];
+ let value1 = 1337u32.to_ne_bytes();
+ let value2 = 2674u32.to_ne_bytes();
+
+ bloom_filter
+ .update(&key, &value1, MapFlags::ANY)
+ .expect("failed to add entry value1 to bloom filter");
+
+ bloom_filter
+ .update(&key, &value2, MapFlags::ANY)
+ .expect("failed to add entry value2 in bloom filter");
+
+ // Non empty keys should result in an error
+ bloom_filter
+ .update(&value1, &value1, MapFlags::ANY)
+ .expect_err("Non empty key should return an error");
+
+ for inserted_value in [value1, value2] {
+ let val = bloom_filter
+ .lookup_bloom_filter(&inserted_value)
+ .expect("failed retrieve item from bloom filter");
+
+ assert!(val);
+ }
+ // Test non existing element
+ let enoent_found = bloom_filter
+ .lookup_bloom_filter(&[1, 2, 3, 4])
+ .expect("failed retrieve item from bloom filter");
+
+ assert!(!enoent_found);
+
+ // Calling lookup should result in an error
+ bloom_filter
+ .lookup(&[1, 2, 3, 4], MapFlags::ANY)
+ .expect_err("lookup should fail since we should use lookup_bloom_filter");
+
+ // Deleting should not be possible
+ bloom_filter
+ .lookup_and_delete(&key)
+ .expect_err("Expect delete to fail");
+}
+
+/// Test CRUD operations on map of type stack.
+#[tag(root)]
+#[test]
+fn test_object_map_stack_crud() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let stack = get_map_mut(&mut obj, "stack");
+
+ let key: [u8; 0] = [];
+ let value1 = 1337u32.to_ne_bytes();
+ let value2 = 2674u32.to_ne_bytes();
+
+ stack
+ .update(&key, &value1, MapFlags::ANY)
+ .expect("failed to update in stack");
+ stack
+ .update(&key, &value2, MapFlags::ANY)
+ .expect("failed to update in stack");
+
+ let mut val = stack
+ .lookup(&key, MapFlags::ANY)
+ .expect("failed to pop from stack")
+ .expect("failed to retrieve value");
+
+ assert_eq!(val.len(), 4);
+ assert_eq!(&val, &value2);
+
+ val = stack
+ .lookup_and_delete(&key)
+ .expect("failed to pop from stack")
+ .expect("failed to retrieve value");
+ assert_eq!(val.len(), 4);
+ assert_eq!(&val, &value2);
+
+ val = stack
+ .lookup_and_delete(&key)
+ .expect("failed to pop from stack")
+ .expect("failed to retrieve value");
+ assert_eq!(val.len(), 4);
+ assert_eq!(&val, &value1);
+
+ assert!(stack
+ .lookup_and_delete(&key)
+ .expect("failed to pop from stack")
+ .is_none());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_mutation() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+ start
+ .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
+ .expect("failed to write");
+ let val = start
+ .lookup(&[1, 2, 3, 4], MapFlags::empty())
+ .expect("failed to read map")
+ .expect("failed to find key");
+ assert_eq!(val.len(), 8);
+ assert_eq!(val, &[1, 2, 3, 4, 5, 6, 7, 8]);
+
+ start.delete(&[1, 2, 3, 4]).expect("failed to delete key");
+
+ assert!(start
+ .lookup(&[1, 2, 3, 4], MapFlags::empty())
+ .expect("failed to read map")
+ .is_none());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_lookup_flags() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+ start
+ .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST)
+ .expect("failed to write");
+ assert!(start
+ .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST)
+ .is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_key_iter() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+
+ let key1 = vec![1, 2, 3, 4];
+ let key2 = vec![1, 2, 3, 5];
+ let key3 = vec![1, 2, 3, 6];
+
+ start
+ .update(&key1, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
+ .expect("failed to write");
+ start
+ .update(&key2, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
+ .expect("failed to write");
+ start
+ .update(&key3, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
+ .expect("failed to write");
+
+ let mut keys = HashSet::new();
+ for key in start.keys() {
+ keys.insert(key);
+ }
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&key1));
+ assert!(keys.contains(&key2));
+ assert!(keys.contains(&key3));
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_key_iter_empty() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let start = get_map_mut(&mut obj, "start");
+ let mut count = 0;
+ for _ in start.keys() {
+ count += 1;
+ }
+ assert_eq!(count, 0);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_pin() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let mut map = get_map_mut(&mut obj, "start");
+ let path = "/sys/fs/bpf/mymap_test_object_map_pin";
+
+ // Unpinning a unpinned map should be an error
+ assert!(map.unpin(path).is_err());
+ assert!(!Path::new(path).exists());
+
+ // Pin and unpin should be successful
+ map.pin(path).expect("failed to pin map");
+ assert!(Path::new(path).exists());
+ map.unpin(path).expect("failed to unpin map");
+ assert!(!Path::new(path).exists());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_loading_pinned_map_from_path() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let mut map = get_map_mut(&mut obj, "start");
+ let path = "/sys/fs/bpf/mymap_test_pin_to_load_from_path";
+
+ map.pin(path).expect("pinning map failed");
+
+ let pinned_map = MapHandle::from_pinned_path(path).expect("loading a map from a path failed");
+ map.unpin(path).expect("unpinning map failed");
+
+ assert_eq!(map.name(), pinned_map.name());
+ assert_eq!(
+ map.info().unwrap().info.id,
+ pinned_map.info().unwrap().info.id
+ );
+}
+
+#[tag(root)]
+#[test]
+fn test_object_loading_loaded_map_from_id() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let map = get_map_mut(&mut obj, "start");
+ let id = map.info().expect("to get info from map 'start'").info.id;
+
+ let map_by_id = MapHandle::from_map_id(id).expect("map to load from id");
+
+ assert_eq!(map.name(), map_by_id.name());
+ assert_eq!(
+ map.info().unwrap().info.id,
+ map_by_id.info().unwrap().info.id
+ );
+}
+
+#[tag(root)]
+#[test]
+fn test_object_programs() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
+ let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup_new");
+ let _prog = get_prog_mut(&mut obj, "handle__sched_switch");
+ assert!(!obj.progs().any(|prog| prog.name() == OsStr::new("asdf")));
+}
+
+#[tag(root)]
+#[test]
+fn test_object_programs_iter_mut() {
+ bump_rlimit_mlock();
+
+ let obj = get_test_object("runqslower.bpf.o");
+ assert!(obj.progs().count() == 3);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_program_pin() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
+ let path = "/sys/fs/bpf/myprog";
+
+ // Unpinning a unpinned prog should be an error
+ assert!(prog.unpin(path).is_err());
+ assert!(!Path::new(path).exists());
+
+ // Pin should be successful
+ prog.pin(path).expect("failed to pin prog");
+ assert!(Path::new(path).exists());
+
+ // Backup cleanup method in case test errors
+ defer! {
+ let _ = fs::remove_file(path);
+ }
+
+ // Unpin should be successful
+ prog.unpin(path).expect("failed to unpin prog");
+ assert!(!Path::new(path).exists());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_link_pin() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
+ let mut link = prog.attach().expect("failed to attach prog");
+
+ let path = "/sys/fs/bpf/mylink";
+
+ // Unpinning a unpinned prog should be an error
+ assert!(link.unpin().is_err());
+ assert!(!Path::new(path).exists());
+
+ // Pin should be successful
+ link.pin(path).expect("failed to pin prog");
+ assert!(Path::new(path).exists());
+
+ // Backup cleanup method in case test errors
+ defer! {
+ let _ = fs::remove_file(path);
+ }
+
+ // Unpin should be successful
+ link.unpin().expect("failed to unpin prog");
+ assert!(!Path::new(path).exists());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_reuse_pined_map() {
+ bump_rlimit_mlock();
+
+ let path = "/sys/fs/bpf/mymap_test_object_reuse_pined_map";
+ let key = vec![1, 2, 3, 4];
+ let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
+
+ // Pin a map
+ {
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let mut map = get_map_mut(&mut obj, "start");
+ map.update(&key, &val, MapFlags::empty())
+ .expect("failed to write");
+
+ // Pin map
+ map.pin(path).expect("failed to pin map");
+ assert!(Path::new(path).exists());
+ }
+
+ // Backup cleanup method in case test errors somewhere
+ defer! {
+ let _ = fs::remove_file(path);
+ }
+
+ // Reuse the pinned map
+ let obj_path = get_test_object_path("runqslower.bpf.o");
+ let mut builder = ObjectBuilder::default();
+ builder.debug(true);
+ let mut open_obj = builder.open_file(obj_path).expect("failed to open object");
+ let mut start = open_obj
+ .maps_mut()
+ .find(|map| map.name() == OsStr::new("start"))
+ .expect("failed to find `start` map");
+ assert!(start.reuse_pinned_map("/asdf").is_err());
+ start.reuse_pinned_map(path).expect("failed to reuse map");
+
+ let mut obj = open_obj.load().expect("failed to load object");
+ let mut reused_map = get_map_mut(&mut obj, "start");
+ let found_val = reused_map
+ .lookup(&key, MapFlags::empty())
+ .expect("failed to read map")
+ .expect("failed to find key");
+ assert_eq!(&found_val, &val);
+
+ // Cleanup
+ reused_map.unpin(path).expect("failed to unpin map");
+ assert!(!Path::new(path).exists());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_ringbuf_raw() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+
+ static mut V1: i32 = 0;
+ static mut V2: i32 = 0;
+
+ fn callback1(data: &[u8]) -> i32 {
+ let mut value: i32 = 0;
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+
+ unsafe {
+ V1 = value;
+ }
+
+ 0
+ }
+
+ fn callback2(data: &[u8]) -> i32 {
+ let mut value: i32 = 0;
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+
+ unsafe {
+ V2 = value;
+ }
+
+ 0
+ }
+
+ // Test trying to build without adding any ringbufs
+ // Can't use expect_err here since RingBuffer does not implement Debug
+ let builder = libbpf_rs::RingBufferBuilder::new();
+ assert!(
+ builder.build().is_err(),
+ "Should not be able to build without adding at least one ringbuf"
+ );
+
+ // Test building with multiple map objects
+ let mut builder = libbpf_rs::RingBufferBuilder::new();
+
+ // Add a first map and callback
+ let map1 = get_map(&obj, "ringbuf1");
+ builder
+ .add(&map1, callback1)
+ .expect("failed to add ringbuf");
+
+ // Add a second map and callback
+ let map2 = get_map(&obj, "ringbuf2");
+ builder
+ .add(&map2, callback2)
+ .expect("failed to add ringbuf");
+
+ let mgr = builder.build().expect("failed to build");
+
+ // Call getpid to ensure the BPF program runs
+ unsafe { libc::getpid() };
+
+ // Test raw primitives
+ let ret = mgr.consume_raw();
+
+ // We can't check for exact return values, since other tasks in the system may call getpid(),
+ // triggering the BPF program
+ assert!(ret >= 2);
+
+ unsafe { assert_eq!(V1, 1) };
+ unsafe { assert_eq!(V2, 2) };
+
+ // Consume from a (potentially) empty ring buffer
+ let ret = mgr.consume_raw();
+ assert!(ret >= 0);
+
+ // Consume from a (potentially) empty ring buffer using poll()
+ let ret = mgr.poll_raw(Duration::from_millis(100));
+ assert!(ret >= 0);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_ringbuf_err_callback() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+
+ // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw()
+ fn callback1(_data: &[u8]) -> i32 {
+ -libc::ENOENT
+ }
+
+ // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw()
+ fn callback2(_data: &[u8]) -> i32 {
+ -libc::EPERM
+ }
+
+ // Test trying to build without adding any ringbufs
+ // Can't use expect_err here since RingBuffer does not implement Debug
+ let builder = libbpf_rs::RingBufferBuilder::new();
+ assert!(
+ builder.build().is_err(),
+ "Should not be able to build without adding at least one ringbuf"
+ );
+
+ // Test building with multiple map objects
+ let mut builder = libbpf_rs::RingBufferBuilder::new();
+
+ // Add a first map and callback
+ let map1 = get_map(&obj, "ringbuf1");
+ builder
+ .add(&map1, callback1)
+ .expect("failed to add ringbuf");
+
+ // Add a second map and callback
+ let map2 = get_map(&obj, "ringbuf2");
+ builder
+ .add(&map2, callback2)
+ .expect("failed to add ringbuf");
+
+ let mgr = builder.build().expect("failed to build");
+
+ // Call getpid to ensure the BPF program runs
+ unsafe { libc::getpid() };
+
+ // Test raw primitives
+ let ret = mgr.consume_raw();
+
+ // The error originated from the first callback executed should be reported here, either
+ // from callback1() or callback2()
+ assert!(ret == -libc::ENOENT || ret == -libc::EPERM);
+
+ unsafe { libc::getpid() };
+
+ // The same behavior should happen with poll_raw()
+ let ret = mgr.poll_raw(Duration::from_millis(100));
+
+ assert!(ret == -libc::ENOENT || ret == -libc::EPERM);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_ringbuf() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+
+ static mut V1: i32 = 0;
+ static mut V2: i32 = 0;
+
+ fn callback1(data: &[u8]) -> i32 {
+ let mut value: i32 = 0;
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+
+ unsafe {
+ V1 = value;
+ }
+
+ 0
+ }
+
+ fn callback2(data: &[u8]) -> i32 {
+ let mut value: i32 = 0;
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+
+ unsafe {
+ V2 = value;
+ }
+
+ 0
+ }
+
+ // Test trying to build without adding any ringbufs
+ // Can't use expect_err here since RingBuffer does not implement Debug
+ let builder = libbpf_rs::RingBufferBuilder::new();
+ assert!(
+ builder.build().is_err(),
+ "Should not be able to build without adding at least one ringbuf"
+ );
+
+ // Test building with multiple map objects
+ let mut builder = libbpf_rs::RingBufferBuilder::new();
+
+ // Add a first map and callback
+ let map1 = get_map(&obj, "ringbuf1");
+ builder
+ .add(&map1, callback1)
+ .expect("failed to add ringbuf");
+
+ // Add a second map and callback
+ let map2 = get_map(&obj, "ringbuf2");
+ builder
+ .add(&map2, callback2)
+ .expect("failed to add ringbuf");
+
+ let mgr = builder.build().expect("failed to build");
+
+ // Call getpid to ensure the BPF program runs
+ unsafe { libc::getpid() };
+
+ // This should result in both callbacks being called
+ mgr.consume().expect("failed to consume ringbuf");
+
+ // Our values should both reflect that the callbacks have been called
+ unsafe { assert_eq!(V1, 1) };
+ unsafe { assert_eq!(V2, 2) };
+
+ // Reset both values
+ unsafe { V1 = 0 };
+ unsafe { V2 = 0 };
+
+ // Call getpid to ensure the BPF program runs
+ unsafe { libc::getpid() };
+
+ // This should result in both callbacks being called
+ mgr.poll(Duration::from_millis(100))
+ .expect("failed to poll ringbuf");
+
+ // Our values should both reflect that the callbacks have been called
+ unsafe { assert_eq!(V1, 1) };
+ unsafe { assert_eq!(V2, 2) };
+}
+
+#[tag(root)]
+#[test]
+fn test_object_ringbuf_closure() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+
+ let (sender1, receiver1) = channel();
+ let callback1 = move |data: &[u8]| -> i32 {
+ let mut value: i32 = 0;
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+
+ sender1.send(value).expect("failed to send value");
+
+ 0
+ };
+
+ let (sender2, receiver2) = channel();
+ let callback2 = move |data: &[u8]| -> i32 {
+ let mut value: i32 = 0;
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+
+ sender2.send(value).expect("failed to send value");
+
+ 0
+ };
+
+ // Test trying to build without adding any ringbufs
+ // Can't use expect_err here since RingBuffer does not implement Debug
+ let builder = libbpf_rs::RingBufferBuilder::new();
+ assert!(
+ builder.build().is_err(),
+ "Should not be able to build without adding at least one ringbuf"
+ );
+
+ // Test building with multiple map objects
+ let mut builder = libbpf_rs::RingBufferBuilder::new();
+
+ // Add a first map and callback
+ let map1 = get_map(&obj, "ringbuf1");
+ builder
+ .add(&map1, callback1)
+ .expect("failed to add ringbuf");
+
+ // Add a second map and callback
+ let map2 = get_map(&obj, "ringbuf2");
+ builder
+ .add(&map2, callback2)
+ .expect("failed to add ringbuf");
+
+ let mgr = builder.build().expect("failed to build");
+
+ // Call getpid to ensure the BPF program runs
+ unsafe { libc::getpid() };
+
+ // This should result in both callbacks being called
+ mgr.consume().expect("failed to consume ringbuf");
+
+ let v1 = receiver1.recv().expect("failed to receive value");
+ let v2 = receiver2.recv().expect("failed to receive value");
+
+ assert_eq!(v1, 1);
+ assert_eq!(v2, 2);
+}
+
+/// Check that `RingBuffer` works correctly even if the map file descriptors
+/// provided during construction are closed. This test validates that `libbpf`'s
+/// refcount behavior is correctly reflected in our `RingBuffer` lifetimes.
+#[tag(root)]
+#[test]
+fn test_object_ringbuf_with_closed_map() {
+ bump_rlimit_mlock();
+
+ fn test(poll_fn: impl FnOnce(&libbpf_rs::RingBuffer)) {
+ let mut value = 0i32;
+
+ {
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__tracepoint");
+ let _link = prog
+ .attach_tracepoint("syscalls", "sys_enter_getpid")
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+
+ let callback = |data: &[u8]| {
+ plain::copy_from_bytes(&mut value, data).expect("Wrong size");
+ 0
+ };
+
+ let mut builder = libbpf_rs::RingBufferBuilder::new();
+ builder.add(&map, callback).expect("failed to add ringbuf");
+ let ringbuf = builder.build().expect("failed to build");
+
+ drop(obj);
+
+ // Trigger the tracepoint. At this point `map` along with the containing
+ // `obj` have been destroyed.
+ let _pid = unsafe { libc::getpid() };
+ let () = poll_fn(&ringbuf);
+ }
+
+ // If we see a 1 here the ring buffer was still working as expected.
+ assert_eq!(value, 1);
+ }
+
+ test(|ringbuf| ringbuf.consume().expect("failed to consume ringbuf"));
+ test(|ringbuf| {
+ ringbuf
+ .poll(Duration::from_secs(5))
+ .expect("failed to poll ringbuf")
+ });
+}
+
+#[tag(root)]
+#[test]
+fn test_object_user_ringbuf() {
+ #[repr(C)]
+ struct MyStruct {
+ key: u32,
+ value: u32,
+ }
+
+ unsafe impl Plain for MyStruct {}
+
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("user_ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+ let urb_map = get_map_mut(&mut obj, "user_ringbuf");
+ let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
+ let mut urb_sample = user_ringbuf
+ .reserve(size_of::<MyStruct>())
+ .expect("failed to reserve space");
+ let bytes = urb_sample.as_mut();
+ let my_struct = plain::from_mut_bytes::<MyStruct>(bytes).expect("failed to convert bytes");
+ my_struct.key = 42;
+ my_struct.value = 1337;
+ user_ringbuf
+ .submit(urb_sample)
+ .expect("failed to submit sample");
+
+ // Trigger BPF program.
+ let _pid = unsafe { libc::getpid() };
+
+ // At this point, the BPF program should have run and consumed the sample in
+ // the user ring buffer, and stored the key/value in the samples map.
+ let samples_map = get_map_mut(&mut obj, "samples");
+ let key: u32 = 42;
+ let value: u32 = 1337;
+ let res = samples_map
+ .lookup(&key.to_ne_bytes(), MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+
+ // The value in the samples map should be the same as the value we submitted
+ assert_eq!(res.len(), size_of::<u32>());
+ let mut array = [0; size_of::<u32>()];
+ array.copy_from_slice(&res[..]);
+ assert_eq!(u32::from_ne_bytes(array), value);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_user_ringbuf_reservation_too_big() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("user_ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+ let urb_map = get_map_mut(&mut obj, "user_ringbuf");
+ let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
+ let err = user_ringbuf.reserve(1024 * 1024).unwrap_err();
+ assert!(
+ err.to_string().contains("requested size is too large"),
+ "{err:#}"
+ );
+}
+
+#[tag(root)]
+#[test]
+fn test_object_user_ringbuf_not_enough_space() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("user_ringbuf.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
+ let _link = prog.attach().expect("failed to attach prog");
+ let urb_map = get_map_mut(&mut obj, "user_ringbuf");
+ let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
+ let _ = user_ringbuf
+ .reserve(1024 * 3)
+ .expect("failed to reserve space");
+ let err = user_ringbuf.reserve(1024 * 3).unwrap_err();
+ assert!(
+ err.to_string()
+ .contains("not enough space in the ring buffer"),
+ "{err:#}"
+ );
+}
+
+#[tag(root)]
+#[test]
+fn test_object_task_iter() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("taskiter.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "dump_pid");
+ let link = prog.attach().expect("failed to attach prog");
+ let mut iter = Iter::new(&link).expect("failed to create iterator");
+
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ struct IndexPidPair {
+ i: u32,
+ pid: i32,
+ }
+
+ unsafe impl Plain for IndexPidPair {}
+
+ let mut buf = Vec::new();
+ let bytes_read = iter
+ .read_to_end(&mut buf)
+ .expect("failed to read from iterator");
+
+ assert!(bytes_read > 0);
+ assert_eq!(bytes_read % size_of::<IndexPidPair>(), 0);
+ let items: &[IndexPidPair] =
+ plain::slice_from_bytes(buf.as_slice()).expect("Input slice cannot satisfy length");
+
+ assert!(!items.is_empty());
+ assert_eq!(items[0].i, 0);
+ assert!(items.windows(2).all(|w| w[0].i + 1 == w[1].i));
+ // Check for init
+ assert!(items.iter().any(|&item| item.pid == 1));
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_iter() {
+ bump_rlimit_mlock();
+
+ // Create a map for iteration test.
+ let opts = libbpf_sys::bpf_map_create_opts {
+ sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
+ map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
+ ..Default::default()
+ };
+ let map = MapHandle::create(
+ MapType::Hash,
+ Some("mymap_test_object_map_iter"),
+ 4,
+ 8,
+ 8,
+ &opts,
+ )
+ .expect("failed to create map");
+
+ // Insert 3 elements.
+ for i in 0..3 {
+ let key = i32::to_ne_bytes(i);
+ // We can change i to larger for more robust test, that's why we use a and b.
+ let val = [&key[..], &[0_u8; 4]].concat();
+ map.update(&key, val.as_slice(), MapFlags::empty())
+ .expect("failed to write");
+ }
+
+ let mut obj = get_test_object("mapiter.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "map_iter");
+ let link = prog
+ .attach_iter(map.as_fd())
+ .expect("failed to attach map iter prog");
+ let mut iter = Iter::new(&link).expect("failed to create map iterator");
+
+ let mut buf = Vec::new();
+ let bytes_read = iter
+ .read_to_end(&mut buf)
+ .expect("failed to read from iterator");
+
+ assert!(bytes_read > 0);
+ assert_eq!(bytes_read % size_of::<u32>(), 0);
+ // Convert buf to &[u32]
+ let buf =
+ plain::slice_from_bytes::<u32>(buf.as_slice()).expect("Input slice cannot satisfy length");
+ assert!(buf.contains(&0));
+ assert!(buf.contains(&1));
+ assert!(buf.contains(&2));
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_create_and_pin() {
+ bump_rlimit_mlock();
+
+ let opts = libbpf_sys::bpf_map_create_opts {
+ sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
+ map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
+ ..Default::default()
+ };
+
+ let mut map = MapHandle::create(
+ MapType::Hash,
+ Some("mymap_test_object_map_create_and_pin"),
+ 4,
+ 8,
+ 8,
+ &opts,
+ )
+ .expect("failed to create map");
+
+ assert_eq!(map.name(), "mymap_test_object_map_create_and_pin");
+
+ let key = vec![1, 2, 3, 4];
+ let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
+ map.update(&key, &val, MapFlags::empty())
+ .expect("failed to write");
+ let res = map
+ .lookup(&key, MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+ assert_eq!(val, res);
+
+ let path = "/sys/fs/bpf/mymap_test_object_map_create_and_pin";
+
+ // Unpinning a unpinned map should be an error
+ assert!(map.unpin(path).is_err());
+ assert!(!Path::new(path).exists());
+
+ // Pin and unpin should be successful
+ map.pin(path).expect("failed to pin map");
+ assert!(Path::new(path).exists());
+ map.unpin(path).expect("failed to unpin map");
+ assert!(!Path::new(path).exists());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_map_create_without_name() {
+ bump_rlimit_mlock();
+
+ #[allow(clippy::needless_update)]
+ let opts = libbpf_sys::bpf_map_create_opts {
+ sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
+ map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
+ btf_fd: 0,
+ btf_key_type_id: 0,
+ btf_value_type_id: 0,
+ btf_vmlinux_value_type_id: 0,
+ inner_map_fd: 0,
+ map_extra: 0,
+ numa_node: 0,
+ map_ifindex: 0,
+ // bpf_map_create_opts might have padding fields on some platform
+ ..Default::default()
+ };
+
+ let map = MapHandle::create(MapType::Hash, Option::<&str>::None, 4, 8, 8, &opts)
+ .expect("failed to create map");
+
+ assert!(map.name().is_empty());
+
+ let key = vec![1, 2, 3, 4];
+ let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
+ map.update(&key, &val, MapFlags::empty())
+ .expect("failed to write");
+ let res = map
+ .lookup(&key, MapFlags::ANY)
+ .expect("failed to lookup")
+ .expect("failed to find value for key");
+ assert_eq!(val, res);
+}
+
+/// Test whether we can obtain multiple `MapHandle`s from a `Map
+#[tag(root)]
+#[test]
+fn test_object_map_handle_clone() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let map = get_map_mut(&mut obj, "events");
+ let handle1 = MapHandle::try_from(&map).expect("failed to create handle from Map");
+ assert_eq!(map.name(), handle1.name());
+ assert_eq!(map.map_type(), handle1.map_type());
+ assert_eq!(map.key_size(), handle1.key_size());
+ assert_eq!(map.value_size(), handle1.value_size());
+
+ let handle2 = MapHandle::try_from(&handle1).expect("failed to duplicate existing handle");
+ assert_eq!(handle1.name(), handle2.name());
+ assert_eq!(handle1.map_type(), handle2.map_type());
+ assert_eq!(handle1.key_size(), handle2.key_size());
+ assert_eq!(handle1.value_size(), handle2.value_size());
+
+ let info1 = map.info().expect("failed to get map info from map");
+ let info2 = handle2.info().expect("failed to get map info from handle");
+ assert_eq!(
+ info1.info.id, info2.info.id,
+ "Map and MapHandle have different IDs"
+ );
+}
+
+#[tag(root)]
+#[test]
+fn test_object_usdt() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("usdt.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__usdt");
+
+ let path = current_exe().expect("failed to find executable name");
+ let _link = prog
+ .attach_usdt(
+ unsafe { libc::getpid() },
+ &path,
+ "test_provider",
+ "test_function",
+ )
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ // Define a USDT probe point and exercise it as we are attaching to self.
+ probe!(test_provider, test_function, 1);
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, 1);
+}
+
+#[tag(root)]
+#[test]
+fn test_object_usdt_cookie() {
+ bump_rlimit_mlock();
+
+ let cookie_val = 1337u16;
+ let mut obj = get_test_object("usdt.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__usdt_with_cookie");
+
+ let path = current_exe().expect("failed to find executable name");
+ let _link = prog
+ .attach_usdt_with_opts(
+ unsafe { libc::getpid() },
+ &path,
+ "test_provider",
+ "test_function2",
+ UsdtOpts {
+ cookie: cookie_val.into(),
+ ..UsdtOpts::default()
+ },
+ )
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ // Define a USDT probe point and exercise it as we are attaching to self.
+ probe!(test_provider, test_function2, 1);
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, cookie_val.into());
+}
+
+#[tag(root)]
+#[test]
+fn test_map_probes() {
+ bump_rlimit_mlock();
+
+ let supported = MapType::Array
+ .is_supported()
+ .expect("failed to query if Array map is supported");
+ assert!(supported);
+ let supported_res = MapType::Unknown.is_supported();
+ assert!(supported_res.is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_program_probes() {
+ bump_rlimit_mlock();
+
+ let supported = ProgramType::SocketFilter
+ .is_supported()
+ .expect("failed to query if SocketFilter program is supported");
+ assert!(supported);
+ let supported_res = ProgramType::Unknown.is_supported();
+ assert!(supported_res.is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_program_helper_probes() {
+ bump_rlimit_mlock();
+
+ let supported = ProgramType::SocketFilter
+ .is_helper_supported(libbpf_sys::BPF_FUNC_map_lookup_elem)
+ .expect("failed to query if helper supported");
+ assert!(supported);
+ // redirect should not be supported from socket filter, as it is only used in TC/XDP.
+ let supported = ProgramType::SocketFilter
+ .is_helper_supported(libbpf_sys::BPF_FUNC_redirect)
+ .expect("failed to query if helper supported");
+ assert!(!supported);
+ let supported_res = MapType::Unknown.is_supported();
+ assert!(supported_res.is_err());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_open_program_insns() {
+ bump_rlimit_mlock();
+
+ let open_obj = open_test_object("usdt.bpf.o");
+ let prog = open_obj
+ .progs()
+ .find(|prog| prog.name() == OsStr::new("handle__usdt"))
+ .expect("failed to find program");
+
+ let insns = prog.insns();
+ assert!(!insns.is_empty());
+}
+
+#[tag(root)]
+#[test]
+fn test_object_program_insns() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("usdt.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle__usdt");
+ let insns = prog.insns();
+ assert!(!insns.is_empty());
+}
+
+/// Check that we can attach a BPF program to a kernel tracepoint.
+#[tag(root)]
+#[test]
+fn test_object_tracepoint() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__tracepoint");
+ let _link = prog
+ .attach_tracepoint("syscalls", "sys_enter_getpid")
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ let _pid = unsafe { libc::getpid() };
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, 1);
+}
+
+/// Check that we can attach a BPF program to a kernel tracepoint, providing
+/// additional options.
+#[tag(root)]
+#[test]
+fn test_object_tracepoint_with_opts() {
+ bump_rlimit_mlock();
+
+ let cookie_val = 42u16;
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie");
+
+ let opts = TracepointOpts {
+ cookie: cookie_val.into(),
+ ..TracepointOpts::default()
+ };
+ let _link = prog
+ .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts)
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ let _pid = unsafe { libc::getpid() };
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, cookie_val.into());
+}
+
+#[inline(never)]
+#[no_mangle]
+extern "C" fn uprobe_target() -> usize {
+ // Use `black_box` here as an additional barrier to inlining.
+ hint::black_box(42)
+}
+
+/// Check that we can attach a BPF program to a uprobe.
+#[tag(root)]
+#[test]
+fn test_object_uprobe_with_opts() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("uprobe.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__uprobe");
+
+ let pid = unsafe { libc::getpid() };
+ let path = current_exe().expect("failed to find executable name");
+ let func_offset = 0;
+ let opts = UprobeOpts {
+ func_name: "uprobe_target".to_string(),
+ ..Default::default()
+ };
+ let _link = prog
+ .attach_uprobe_with_opts(pid, path, func_offset, opts)
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ let _ = uprobe_target();
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, 1);
+}
+
+/// Check that we can attach a BPF program to a uprobe and access the cookie
+/// provided during attach.
+#[tag(root)]
+#[test]
+fn test_object_uprobe_with_cookie() {
+ bump_rlimit_mlock();
+
+ let cookie_val = 5u16;
+ let mut obj = get_test_object("uprobe.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__uprobe_with_cookie");
+
+ let pid = unsafe { libc::getpid() };
+ let path = current_exe().expect("failed to find executable name");
+ let func_offset = 0;
+ let opts = UprobeOpts {
+ func_name: "uprobe_target".to_string(),
+ cookie: cookie_val.into(),
+ ..Default::default()
+ };
+ let _link = prog
+ .attach_uprobe_with_opts(pid, path, func_offset, opts)
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ let _ = uprobe_target();
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, cookie_val.into());
+}
+
+/// Check that we can link multiple object files.
+#[test]
+fn test_object_link_files() {
+ fn test(files: Vec<PathBuf>) {
+ let output_file = NamedTempFile::new().unwrap();
+
+ let mut linker = Linker::new(output_file.path()).unwrap();
+ let () = files
+ .into_iter()
+ .try_for_each(|file| linker.add_file(file))
+ .unwrap();
+ let () = linker.link().unwrap();
+
+ // Check that we can load the resulting object file.
+ let _object = ObjectBuilder::default()
+ .debug(true)
+ .open_file(output_file.path())
+ .unwrap();
+ }
+
+ let obj_path1 = get_test_object_path("usdt.bpf.o");
+ let obj_path2 = get_test_object_path("ringbuf.bpf.o");
+
+ test(vec![obj_path1.clone()]);
+ test(vec![obj_path1, obj_path2]);
+}
+
+/// Get access to the underlying per-cpu ring buffer data.
+fn buffer<'a>(perf: &'a libbpf_rs::PerfBuffer, buf_idx: usize) -> &'a [u8] {
+ let perf_buff_ptr = perf.as_libbpf_object();
+ let mut buffer_data_ptr: *mut c_void = ptr::null_mut();
+ let mut buffer_size: usize = 0;
+ let ret = unsafe {
+ libbpf_sys::perf_buffer__buffer(
+ perf_buff_ptr.as_ptr(),
+ buf_idx as i32,
+ ptr::addr_of_mut!(buffer_data_ptr),
+ ptr::addr_of_mut!(buffer_size) as *mut libbpf_sys::size_t,
+ )
+ };
+ assert!(ret >= 0);
+ unsafe { slice::from_raw_parts(buffer_data_ptr as *const u8, buffer_size) }
+}
+
+/// Check that we can see the raw ring buffer of the perf buffer and find a
+/// value we have sent.
+#[tag(root)]
+#[test]
+fn test_object_perf_buffer_raw() {
+ use memmem::Searcher;
+ use memmem::TwoWaySearcher;
+
+ bump_rlimit_mlock();
+
+ let cookie_val = 42u16;
+ let mut obj = get_test_object("tracepoint.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie_pb");
+
+ let opts = TracepointOpts {
+ cookie: cookie_val.into(),
+ ..TracepointOpts::default()
+ };
+ let _link = prog
+ .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts)
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "pb");
+ let cookie_bytes = cookie_val.to_ne_bytes();
+ let searcher = TwoWaySearcher::new(&cookie_bytes[..]);
+
+ let perf = libbpf_rs::PerfBufferBuilder::new(&map)
+ .build()
+ .expect("failed to build");
+
+ // Make an action that the tracepoint will see
+ let _pid = unsafe { libc::getpid() };
+
+ let found_cookie = (0..perf.buffer_cnt()).any(|buf_idx| {
+ let buf = buffer(&perf, buf_idx);
+ searcher.search_in(buf).is_some()
+ });
+
+ assert!(found_cookie);
+}
+
+/// Check that we can get map pin status and map pin path
+#[tag(root)]
+#[test]
+fn test_map_pinned_status() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("map_auto_pin.bpf.o");
+ let map = get_map_mut(&mut obj, "auto_pin_map");
+ let is_pinned = map.is_pinned();
+ assert!(is_pinned);
+ let expected_path = "/sys/fs/bpf/auto_pin_map";
+ let get_path = map.get_pin_path().expect("get map pin path failed");
+ assert_eq!(expected_path, get_path.to_str().unwrap());
+ // cleanup
+ let _ = fs::remove_file(expected_path);
+}
+
+/// Change the root_pin_path and see if it works.
+#[tag(root)]
+#[test]
+fn test_map_pinned_status_with_pin_root_path() {
+ bump_rlimit_mlock();
+
+ let obj_path = get_test_object_path("map_auto_pin.bpf.o");
+ let mut obj = ObjectBuilder::default()
+ .debug(true)
+ .pin_root_path("/sys/fs/bpf/test_namespace")
+ .expect("root_pin_path failed")
+ .open_file(obj_path)
+ .expect("failed to open object")
+ .load()
+ .expect("failed to load object");
+
+ let map = get_map_mut(&mut obj, "auto_pin_map");
+ let is_pinned = map.is_pinned();
+ assert!(is_pinned);
+ let expected_path = "/sys/fs/bpf/test_namespace/auto_pin_map";
+ let get_path = map.get_pin_path().expect("get map pin path failed");
+ assert_eq!(expected_path, get_path.to_str().unwrap());
+ // cleanup
+ let _ = fs::remove_file(expected_path);
+ let _ = fs::remove_dir("/sys/fs/bpf/test_namespace");
+}
+
+/// Check that we can get program fd by id and vice versa.
+#[tag(root)]
+#[test]
+fn test_program_get_fd_and_id() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("runqslower.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
+ let prog_fd = prog.as_fd();
+ let prog_id = Program::get_id_by_fd(prog_fd).expect("failed to get program id by fd");
+ let _owned_prog_fd = Program::get_fd_by_id(prog_id).expect("failed to get program fd by id");
+}
+
+/// Check that autocreate disabled maps don't prevent object loading
+#[tag(root)]
+#[test]
+fn test_map_autocreate_disable() {
+ bump_rlimit_mlock();
+
+ let mut open_obj = open_test_object("map_auto_pin.bpf.o");
+ let mut auto_pin_map = open_obj
+ .maps_mut()
+ .find(|map| map.name() == OsStr::new("auto_pin_map"))
+ .expect("failed to find `auto_pin_map` map");
+ auto_pin_map
+ .set_autocreate(false)
+ .expect("set_autocreate() failed");
+
+ open_obj.load().expect("failed to load object");
+}
+
+/// Check that we can resize a map.
+#[tag(root)]
+#[test]
+fn test_map_resize() {
+ bump_rlimit_mlock();
+
+ let mut open_obj = open_test_object("map_auto_pin.bpf.o");
+ let mut resizable = open_obj
+ .maps_mut()
+ .find(|map| map.name() == OsStr::new(".data.resizable_data"))
+ .expect("failed to find `.data.resizable_data` map");
+
+ let len = resizable.initial_value().unwrap().len();
+ assert_eq!(len, size_of::<u64>());
+
+ let () = resizable
+ .set_value_size(len as u32 * 2)
+ .expect("failed to set value size");
+ let new_len = resizable.initial_value().unwrap().len();
+ assert_eq!(new_len, len * 2);
+}
+
+/// Check that we are able to attach using ksyscall
+#[tag(root)]
+#[test]
+fn test_attach_ksyscall() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("ksyscall.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "handle__ksyscall");
+ let _link = prog
+ .attach_ksyscall(false, "kill")
+ .expect("failed to attach prog");
+
+ let map = get_map_mut(&mut obj, "ringbuf");
+ let action = || {
+ // Send `SIGCHLD`, which is ignored by default, to our process.
+ let ret = unsafe { libc::kill(libc::getpid(), libc::SIGCHLD) };
+ if ret < 0 {
+ panic!("kill failed: {}", io::Error::last_os_error());
+ }
+ };
+ let result = with_ringbuffer(&map, action);
+
+ assert_eq!(result, 1);
+}
+
+/// Check that we can invoke a program directly.
+#[tag(root)]
+#[test]
+fn test_run_prog_success() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("run_prog.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "test_1");
+
+ #[repr(C)]
+ struct bpf_dummy_ops_state {
+ val: c_int,
+ }
+
+ let value = 42;
+ let state = bpf_dummy_ops_state { val: value };
+ let mut args = [addr_of!(state) as u64];
+ let input = ProgramInput {
+ context_in: Some(unsafe {
+ slice::from_raw_parts_mut(&mut args as *mut _ as *mut u8, size_of_val(&args))
+ }),
+ ..Default::default()
+ };
+ let output = prog.test_run(input).unwrap();
+ assert_eq!(output.return_value, value as _);
+}
+
+/// Check that we fail program invocation when providing insufficient arguments.
+#[tag(root)]
+#[test]
+fn test_run_prog_fail() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("run_prog.bpf.o");
+ let mut prog = get_prog_mut(&mut obj, "test_2");
+
+ let input = ProgramInput::default();
+ let _err = prog.test_run(input).unwrap_err();
+}
diff --git a/tests/test_print.rs b/tests/test_print.rs
new file mode 100644
index 0000000..2c4f3c3
--- /dev/null
+++ b/tests/test_print.rs
@@ -0,0 +1,80 @@
+//! This test is in its own file because the underlying libbpf_set_print function used by
+//! set_print() and ObjectBuilder::debug() sets global state. The default is to run multiple tests
+//! in different threads, so this test will always race with the others unless its isolated to a
+//! different process.
+//!
+//! For the same reason, all tests here must run serially.
+
+use std::sync::atomic::AtomicBool;
+use std::sync::atomic::Ordering;
+
+use libbpf_rs::get_print;
+use libbpf_rs::set_print;
+use libbpf_rs::ObjectBuilder;
+use libbpf_rs::PrintCallback;
+use libbpf_rs::PrintLevel;
+use serial_test::serial;
+
+#[test]
+#[serial]
+fn test_set_print() {
+ static CORRECT_LEVEL: AtomicBool = AtomicBool::new(false);
+ static CORRECT_MESSAGE: AtomicBool = AtomicBool::new(false);
+
+ fn callback(level: PrintLevel, msg: String) {
+ if level == PrintLevel::Warn {
+ CORRECT_LEVEL.store(true, Ordering::Relaxed);
+ }
+
+ if msg.starts_with("libbpf: ") {
+ CORRECT_MESSAGE.store(true, Ordering::Relaxed);
+ }
+ }
+
+ set_print(Some((PrintLevel::Debug, callback)));
+ // expect_err requires that OpenObject implement Debug, which it does not.
+ let obj = ObjectBuilder::default().open_file("/dev/null");
+ assert!(obj.is_err(), "Successfully loaded /dev/null?");
+
+ let correct_level = CORRECT_LEVEL.load(Ordering::Relaxed);
+ let correct_message = CORRECT_MESSAGE.load(Ordering::Relaxed);
+ assert!(correct_level, "Did not capture a warning");
+ assert!(correct_message, "Did not capture the correct message");
+}
+
+#[test]
+#[serial]
+fn test_set_restore_print() {
+ fn callback1(_: PrintLevel, _: String) {
+ println!("one");
+ }
+ fn callback2(_: PrintLevel, _: String) {
+ println!("two");
+ }
+
+ set_print(Some((PrintLevel::Warn, callback1)));
+ let prev = get_print();
+ assert_eq!(prev, Some((PrintLevel::Warn, callback1 as PrintCallback)));
+
+ set_print(Some((PrintLevel::Debug, callback2)));
+ let prev = get_print();
+ assert_eq!(prev, Some((PrintLevel::Debug, callback2 as PrintCallback)));
+}
+
+#[test]
+#[serial]
+fn test_set_and_save_print() {
+ fn callback1(_: PrintLevel, _: String) {
+ println!("one");
+ }
+ fn callback2(_: PrintLevel, _: String) {
+ println!("two");
+ }
+
+ set_print(Some((PrintLevel::Warn, callback1)));
+ let prev = set_print(Some((PrintLevel::Debug, callback2)));
+ assert_eq!(prev, Some((PrintLevel::Warn, callback1 as PrintCallback)));
+
+ let prev = set_print(None);
+ assert_eq!(prev, Some((PrintLevel::Debug, callback2 as PrintCallback)));
+}
diff --git a/tests/test_tc.rs b/tests/test_tc.rs
new file mode 100644
index 0000000..085ada9
--- /dev/null
+++ b/tests/test_tc.rs
@@ -0,0 +1,392 @@
+#[allow(dead_code)]
+mod common;
+
+use std::os::unix::io::AsFd as _;
+use std::os::unix::io::BorrowedFd;
+
+use serial_test::serial;
+use test_tag::tag;
+
+use libbpf_rs::ErrorKind;
+use libbpf_rs::Result;
+use libbpf_rs::TcHook;
+use libbpf_rs::TcHookBuilder;
+use libbpf_rs::TC_CUSTOM;
+use libbpf_rs::TC_EGRESS;
+use libbpf_rs::TC_H_CLSACT;
+use libbpf_rs::TC_H_MIN_EGRESS;
+use libbpf_rs::TC_H_MIN_INGRESS;
+use libbpf_rs::TC_INGRESS;
+
+use crate::common::bump_rlimit_mlock;
+use crate::common::get_prog_mut;
+use crate::common::get_test_object;
+
+// do all TC tests on the lo network interface
+const LO_IFINDEX: i32 = 1;
+
+
+fn clear_clsact(fd: BorrowedFd) -> Result<()> {
+ // Ensure clean clsact tc qdisc
+ let mut destroyer = TcHook::new(fd);
+ destroyer
+ .ifindex(LO_IFINDEX)
+ .attach_point(TC_EGRESS | TC_INGRESS);
+
+ let res = destroyer.destroy();
+ if let Err(err) = &res {
+ if !matches!(err.kind(), ErrorKind::NotFound | ErrorKind::InvalidInput) {
+ return res;
+ }
+ }
+
+ Ok(())
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_basic_cycle() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ //assert!(!destroyer.destroy().is_err());
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut egress = tc_builder.hook(TC_EGRESS);
+ assert!(egress.create().is_ok());
+ assert!(egress.attach().is_ok());
+ assert!(egress.query().is_ok());
+ assert!(egress.detach().is_ok());
+ assert!(egress.destroy().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut ingress = tc_builder.hook(TC_EGRESS);
+ assert!(ingress.create().is_ok());
+ assert!(ingress.attach().is_ok());
+ assert!(ingress.query().is_ok());
+ assert!(ingress.detach().is_ok());
+ assert!(ingress.destroy().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut custom = tc_builder.hook(TC_CUSTOM);
+ custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ assert!(ingress.create().is_ok());
+ assert!(custom.attach().is_ok());
+ assert!(custom.query().is_ok());
+ assert!(custom.detach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_attach_no_qdisc() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut egress = tc_builder.hook(TC_EGRESS);
+ let mut ingress = tc_builder.hook(TC_INGRESS);
+ let mut custom = tc_builder.hook(TC_CUSTOM);
+
+ assert!(egress.attach().is_err());
+ assert!(ingress.attach().is_err());
+ assert!(custom.attach().is_err());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_attach_basic() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut egress = tc_builder.hook(TC_EGRESS);
+ assert!(egress.attach().is_err());
+ assert!(egress.create().is_ok());
+ assert!(egress.attach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut ingress = tc_builder.hook(TC_INGRESS);
+ assert!(ingress.attach().is_err());
+ assert!(ingress.create().is_ok());
+ assert!(ingress.attach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_attach_repeat() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut egress = tc_builder.hook(TC_EGRESS);
+ assert!(egress.create().is_ok());
+ for _ in 0..10 {
+ assert!(egress.attach().is_ok());
+ }
+
+ let mut ingress = tc_builder.hook(TC_INGRESS);
+ for _ in 0..10 {
+ assert!(ingress.attach().is_ok());
+ }
+
+ let mut custom = tc_builder.hook(TC_CUSTOM);
+ custom.parent(TC_H_CLSACT, TC_H_MIN_EGRESS);
+ for _ in 0..10 {
+ assert!(custom.attach().is_ok());
+ }
+ custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ for _ in 0..10 {
+ assert!(custom.attach().is_ok());
+ }
+
+ assert!(clear_clsact(fd).is_ok());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_attach_custom() {
+ bump_rlimit_mlock();
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ // destroy() ensures that clsact tc qdisc does not exist
+ // but BPF hooks need this qdisc in order to attach
+ // for ingress and egress hooks, the create() method will
+ // ensure that clsact tc qdisc is available, but custom hooks
+ // cannot call create(), thus we need to utilize an ingress, egress, or
+ // egress|ingress hook to create() and ensure
+ // the clsact tc qdisc is available
+
+ let mut custom = tc_builder.hook(TC_CUSTOM);
+ custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ assert!(custom.attach().is_err());
+ assert!(custom.create().is_err());
+
+ let mut ingress_for_parent = tc_builder.hook(TC_INGRESS);
+ assert!(ingress_for_parent.create().is_ok());
+ assert!(custom.attach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+ assert!(custom.attach().is_err());
+
+ custom.parent(TC_H_CLSACT, TC_H_MIN_EGRESS);
+ assert!(ingress_for_parent.create().is_ok());
+ assert!(custom.attach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+ assert!(custom.attach().is_err());
+
+ let mut egress_for_parent = tc_builder.hook(TC_EGRESS);
+ assert!(egress_for_parent.create().is_ok());
+ assert!(custom.attach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+ assert!(custom.attach().is_err());
+
+ custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ assert!(egress_for_parent.create().is_ok());
+ assert!(custom.attach().is_ok());
+ assert!(clear_clsact(fd).is_ok());
+ assert!(custom.attach().is_err());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_detach_basic() {
+ bump_rlimit_mlock();
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut egress = tc_builder.hook(TC_EGRESS);
+ let mut ingress = tc_builder.hook(TC_INGRESS);
+ let mut custom = tc_builder.hook(TC_CUSTOM);
+ custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ custom.handle(2);
+
+ assert!(egress.create().is_ok());
+ assert!(egress.attach().is_ok());
+ assert!(ingress.attach().is_ok());
+ assert!(custom.attach().is_ok());
+
+ assert!(egress.detach().is_ok());
+ assert!(ingress.detach().is_ok());
+ assert!(custom.detach().is_ok());
+
+ // test for double detach, error is ENOENT
+ let is_enoent = |hook: &mut TcHook| {
+ if let Err(err) = hook.detach() {
+ err.kind() == ErrorKind::NotFound
+ } else {
+ false
+ }
+ };
+
+ assert!(is_enoent(&mut egress));
+ assert!(is_enoent(&mut ingress));
+ assert!(is_enoent(&mut custom));
+
+ assert!(clear_clsact(fd).is_ok());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_query() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut egress = tc_builder.hook(TC_EGRESS);
+ assert!(egress.create().is_ok());
+ assert!(egress.attach().is_ok());
+ assert!(egress.query().is_ok());
+
+ assert!(egress.detach().is_ok());
+ assert!(egress.query().is_err());
+
+ assert!(egress.attach().is_ok());
+ assert!(egress.query().is_ok());
+
+ assert!(egress.destroy().is_ok());
+ assert!(egress.query().is_err());
+
+ assert!(egress.attach().is_ok());
+ assert!(egress.query().is_ok());
+
+ assert!(clear_clsact(fd).is_ok());
+ assert!(egress.query().is_err());
+
+ let mut ingress = tc_builder.hook(TC_INGRESS);
+ assert!(ingress.create().is_ok());
+ assert!(ingress.attach().is_ok());
+ assert!(ingress.query().is_ok());
+
+ assert!(ingress.detach().is_ok());
+ assert!(ingress.query().is_err());
+
+ assert!(ingress.attach().is_ok());
+ assert!(ingress.query().is_ok());
+
+ assert!(ingress.destroy().is_ok());
+ assert!(ingress.query().is_err());
+
+ assert!(ingress.attach().is_ok());
+ assert!(ingress.query().is_ok());
+
+ assert!(clear_clsact(fd).is_ok());
+ assert!(ingress.query().is_err());
+
+ let mut custom = tc_builder.hook(TC_CUSTOM);
+ custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ assert!(ingress.create().is_ok());
+ assert!(custom.attach().is_ok());
+ assert!(custom.query().is_ok());
+
+ assert!(custom.detach().is_ok());
+ assert!(custom.query().is_err());
+
+ assert!(custom.attach().is_ok());
+ assert!(custom.query().is_ok());
+
+ assert!(clear_clsact(fd).is_ok());
+ assert!(custom.query().is_err());
+}
+
+#[tag(root)]
+#[test]
+#[serial]
+fn test_tc_double_create() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("tc-unit.bpf.o");
+ let prog = get_prog_mut(&mut obj, "handle_tc");
+ let fd = prog.as_fd();
+
+ let mut tc_builder = TcHookBuilder::new(fd);
+ tc_builder
+ .ifindex(LO_IFINDEX)
+ .replace(true)
+ .handle(1)
+ .priority(1);
+ assert!(clear_clsact(fd).is_ok());
+
+ let mut ingress = tc_builder.hook(TC_INGRESS);
+ let mut egress = tc_builder.hook(TC_EGRESS);
+
+ assert!(ingress.create().is_ok());
+ assert!(egress.create().is_ok());
+
+ assert!(clear_clsact(fd).is_ok());
+}
diff --git a/tests/test_xdp.rs b/tests/test_xdp.rs
new file mode 100644
index 0000000..02c38fb
--- /dev/null
+++ b/tests/test_xdp.rs
@@ -0,0 +1,71 @@
+#[allow(dead_code)]
+mod common;
+
+use std::os::fd::AsFd;
+
+use scopeguard::defer;
+
+use test_tag::tag;
+
+use libbpf_rs::Xdp;
+use libbpf_rs::XdpFlags;
+
+use crate::common::bump_rlimit_mlock;
+use crate::common::get_prog_mut;
+use crate::common::get_test_object;
+
+
+const LO_IFINDEX: i32 = 1;
+
+
+#[tag(root)]
+#[test]
+fn test_xdp() {
+ bump_rlimit_mlock();
+
+ let mut obj = get_test_object("xdp.bpf.o");
+ let prog = get_prog_mut(&mut obj, "xdp_filter");
+ let fd = prog.as_fd();
+
+ let mut obj1 = get_test_object("xdp.bpf.o");
+ let prog1 = get_prog_mut(&mut obj1, "xdp_filter");
+ let fd1 = prog1.as_fd();
+
+ let xdp_prog = Xdp::new(fd);
+ let xdp_prog1 = Xdp::new(fd1);
+
+ defer! {
+ xdp_prog.detach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST).unwrap();
+ }
+
+ assert!(xdp_prog
+ .attach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .is_ok());
+
+ // Second attach should fail as a prog is already loaded
+ assert!(xdp_prog
+ .attach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .is_err());
+
+ assert!(xdp_prog
+ .query_id(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .is_ok());
+
+ assert!(xdp_prog
+ .query(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .is_ok());
+
+ let old_prog_id = xdp_prog
+ .query_id(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .unwrap();
+ assert!(xdp_prog1.replace(LO_IFINDEX, fd).is_ok());
+ let new_prog_id = xdp_prog1
+ .query_id(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .unwrap();
+ // If xdp prog is replaced, prog id should change.
+ assert!(old_prog_id != new_prog_id);
+
+ assert!(xdp_prog
+ .detach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST)
+ .is_ok());
+}