Merge "Fix transport override" into main
diff --git a/bpf/headers/include/bpf_helpers.h b/bpf/headers/include/bpf_helpers.h
index 199661d..e0c524f 100644
--- a/bpf/headers/include/bpf_helpers.h
+++ b/bpf/headers/include/bpf_helpers.h
@@ -236,6 +236,11 @@
         BPF_FUNC_ringbuf_reserve;
 static void (*bpf_ringbuf_submit_unsafe)(const void* data, __u64 flags) = (void*)
         BPF_FUNC_ringbuf_submit;
+static void* (*bpf_sk_storage_get_unsafe) (const struct bpf_map_def* sk_storage, const void* sk,
+                                           const void* value, unsigned long long flags) = (void*)
+        BPF_FUNC_sk_storage_get;
+static int (*bpf_sk_storage_delete_unsafe) (const struct bpf_map_def* sk_storage,
+                                            const void* sk) = (void*) BPF_FUNC_sk_storage_delete;
 
 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)  \
         struct ____btf_map_##name {                     \
@@ -330,6 +335,34 @@
                            PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,               \
                            LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
 
+// Type safe macro to declare a sk storage and related accessor functions.
+// BPF_MAP_TYPE_SK_STORAGE was introduced in kernel 5.2 but this map requires BTF and
+// BTF is enabled on kernel 5.10 or higher.
+#define DEFINE_BPF_SK_STORAGE_EXT(the_map, ValueType, usr, grp, md, selinux, pindir,    \
+                                  share, min_loader, max_loader, ignore_eng,            \
+                                  ignore_user, ignore_userdebug, mapFlags)              \
+    DEFINE_BPF_MAP_BASE(the_map, SK_STORAGE, sizeof(uint32_t), sizeof(ValueType),       \
+                        0, usr, grp, md, selinux, pindir, share,                        \
+                        KVER_5_10, KVER_INF, min_loader, max_loader,                    \
+                        ignore_eng, ignore_user, ignore_userdebug, mapFlags);           \
+    BPF_ANNOTATE_KV_PAIR(the_map, uint32_t, ValueType);                                 \
+                                                                                        \
+    static inline __always_inline __unused ValueType* bpf_##the_map##_get(              \
+            const struct bpf_sock* sk, const ValueType* v, unsigned long long flags) {  \
+        return bpf_sk_storage_get_unsafe(&the_map, sk, v, flags);                       \
+    };                                                                                  \
+                                                                                        \
+    static inline __always_inline __unused int bpf_##the_map##_delete(                  \
+            const struct bpf_sock* sk) {                                                \
+        return bpf_sk_storage_delete_unsafe(&the_map, sk);                              \
+    };
+
+#define DEFINE_BPF_SK_STORAGE(the_map, TypeOfValue)                                      \
+    DEFINE_BPF_SK_STORAGE_EXT(the_map, TypeOfValue,                                      \
+                              AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "",  \
+                              PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,             \
+                              LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG, 0)
+
 /* There exist buggy kernels with pre-T OS, that due to
  * kernel patch "[ALPS05162612] bpf: fix ubsan error"
  * do not support userspace writes into non-zero index of bpf map arrays.
diff --git a/bpf/progs/netd.c b/bpf/progs/netd.c
index b146e45..60c5a1c 100644
--- a/bpf/progs/netd.c
+++ b/bpf/progs/netd.c
@@ -81,6 +81,8 @@
 DEFINE_BPF_MAP_RW_NETD(lock_array_test_map, ARRAY, uint32_t, bool, 1)
 DEFINE_BPF_MAP_RW_NETD(lock_hash_test_map, HASH, uint32_t, bool, 1)
 
+DEFINE_BPF_SK_STORAGE(sk_storage, SkStorageValue)
+
 /* never actually used from ebpf */
 DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
 
@@ -770,11 +772,26 @@
     return permissions ? *permissions : BPF_PERMISSION_INTERNET;
 }
 
-DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create", inet_socket_create, KVER_4_14)
-(__unused struct bpf_sock* sk) {
+static __always_inline inline int inet_socket_create(struct bpf_sock* sk,
+                                                     const struct kver_uint kver) {
+    if (KVER_IS_AT_LEAST(kver, 5, 10, 0)) {
+        SkStorageValue *v = bpf_sk_storage_get(sk, 0, BPF_SK_STORAGE_GET_F_CREATE);
+        if (v) v->cookie = bpf_get_sk_cookie(sk);
+    }
     return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? BPF_ALLOW : BPF_DISALLOW;
 }
 
+DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create$5_10", inet_socket_create_5_10, KVER_5_10)
+(struct bpf_sock* sk) {
+    return inet_socket_create(sk, KVER_5_10);
+}
+
+DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupsock/inet_create$4_14",
+                                inet_socket_create_4_14, KVER_4_14, KVER_5_10)
+(struct bpf_sock* sk) {
+    return inet_socket_create(sk, KVER_4_14);
+}
+
 DEFINE_NETD_BPF_PROG_KVER("cgroupsockrelease/inet_release", inet_socket_release, KVER_5_10)
 (struct bpf_sock* sk) {
     uint64_t cookie = bpf_get_sk_cookie(sk);
diff --git a/bpf/progs/netd.h b/bpf/progs/netd.h
index 8400679..aa5f9cc 100644
--- a/bpf/progs/netd.h
+++ b/bpf/progs/netd.h
@@ -90,6 +90,11 @@
 } PacketTrace;
 STRUCT_SIZE(PacketTrace, 8+4+4 + 4+4 + 2+2 + 1+1+1+1);
 
+typedef struct {
+    uint64_t cookie;
+} SkStorageValue;
+STRUCT_SIZE(SkStorageValue, 8);
+
 // Since we cannot garbage collect the stats map since device boot, we need to make these maps as
 // large as possible. The maximum size of number of map entries we can have is depend on the rlimit
 // of MEM_LOCK granted to netd. The memory space needed by each map can be calculated by the