[automerger skipped] net-test: fix nduseropt parsing when multiple are present am: 67260f7cd2 am: 21ed6691ad am: bab2222f61 -s ours am: 699fffea8c -s ours
am skip reason: Merged-In Iafc98babfc451125e8de7700a773d36b15a67b29 with SHA-1 7f4de3c4c9 is already in history
Original change: https://android-review.googlesource.com/c/kernel/tests/+/3326575
Change-Id: I06a32b15f8c1672d19375ee94ac03d4539a45a40
Signed-off-by: Automerger Merge Worker <[email protected]>
diff --git a/net/test/Android.bp b/net/test/Android.bp
index b16b81f..fe063ee 100644
--- a/net/test/Android.bp
+++ b/net/test/Android.bp
@@ -1,4 +1,5 @@
package {
+ default_team: "trendy_team_fwk_core_networking",
// See: http://go/android-license-faq
default_applicable_licenses: ["Android-Apache-2.0"],
}
@@ -20,5 +21,28 @@
},
},
test_config: "vts_kernel_net_tests.xml",
- test_suites: ["vts", "general-tests"],
+ test_suites: [
+ "vts",
+ "general-tests",
+ ],
+}
+
+// Main target used for GKI tests.
+// equivalent to above, but has 'gki.IS_GKI == True'
+python_test {
+ name: "gki_kernel_net_tests",
+ stem: "kernel_net_tests_bin",
+ srcs: [
+ "*.py",
+ ],
+ libs: [
+ "scapy",
+ ],
+ main: "all_tests_gki.py",
+ version: {
+ py3: {
+ embedded_launcher: true,
+ },
+ },
+ test_config: "vts_kernel_net_tests.xml",
}
diff --git a/net/test/OWNERS b/net/test/OWNERS
index 2108f19..76fba4e 100644
--- a/net/test/OWNERS
+++ b/net/test/OWNERS
@@ -1,5 +1,6 @@
# Bug component: 31808
[email protected]
+set noparent
[email protected]
+file:platform/packages/modules/Connectivity:main:/OWNERS_core_networking_xts
per-file build_rootfs.sh = [email protected], [email protected], [email protected]
diff --git a/net/test/all_tests.py b/net/test/all_tests.py
index 4fd20dd..422005f 100755
--- a/net/test/all_tests.py
+++ b/net/test/all_tests.py
@@ -19,23 +19,24 @@
import sys
import unittest
+import gki
import namespace
+import net_test
-test_modules = [
+all_test_modules = [
'anycast_test',
'bpf_test',
'csocket_test',
'cstruct_test',
+ 'kernel_feature_test',
'leak_test',
'multinetwork_test',
'neighbour_test',
'netlink_test',
'nf_test',
'parameterization_test',
- 'pf_key_test',
'ping6_test',
'policy_crash_test',
- 'removed_feature_test',
'resilient_rs_test',
'sock_diag_test',
'srcaddr_selection_test',
@@ -48,21 +49,23 @@
'xfrm_tunnel_test',
]
-if __name__ == '__main__':
- namespace.EnterNewNetworkNamespace()
- # If one or more tests were passed in on the command line, only run those.
- if len(sys.argv) > 1:
- test_modules = sys.argv[1:]
+def RunTests(modules_to_test):
+ print('Running on %s %s %s %s-%sbit%s%s'
+ % (os.uname()[0], os.uname()[2], net_test.LINUX_VERSION, os.uname()[4],
+ '64' if sys.maxsize > 0x7FFFFFFF else '32',
+ ' GKI' if gki.IS_GKI else '', ' GSI' if net_test.IS_GSI else ''),
+ file=sys.stderr)
+ namespace.EnterNewNetworkNamespace()
# First, run InjectTests on all modules, to ensure that any parameterized
# tests in those modules are injected.
- for name in test_modules:
+ for name in modules_to_test:
importlib.import_module(name)
if hasattr(sys.modules[name], 'InjectTests'):
sys.modules[name].InjectTests()
- test_suite = unittest.defaultTestLoader.loadTestsFromNames(test_modules)
+ test_suite = unittest.defaultTestLoader.loadTestsFromNames(modules_to_test)
assert test_suite.countTestCases() > 0, (
'Inconceivable: no tests found! Command line: %s' % ' '.join(sys.argv))
@@ -70,3 +73,11 @@
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(test_suite)
sys.exit(not result.wasSuccessful())
+
+
+if __name__ == '__main__':
+ # If one or more tests were passed in on the command line, only run those.
+ if len(sys.argv) > 1:
+ RunTests(sys.argv[1:])
+ else:
+ RunTests(all_test_modules)
diff --git a/net/test/all_tests.sh b/net/test/all_tests.sh
index aa63cdd..63576b0 100755
--- a/net/test/all_tests.sh
+++ b/net/test/all_tests.sh
@@ -18,10 +18,6 @@
readonly RETRIES=2
test_prefix=
-# The tests currently have hundreds of ResourceWarnings that make it hard
-# to see errors/failures. Disable this warning for now.
-export PYTHONWARNINGS="ignore::ResourceWarning"
-
function checkArgOrExit() {
if [[ $# -lt 2 ]]; then
echo "Missing argument for option $1" >&2
diff --git a/net/test/all_tests_gki.py b/net/test/all_tests_gki.py
new file mode 100755
index 0000000..cd87195
--- /dev/null
+++ b/net/test/all_tests_gki.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python3
+#
+# Copyright 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+import all_tests
+import gki
+
+if __name__ == '__main__':
+ gki.IS_GKI = True
+ if len(sys.argv) > 1:
+ all_tests.RunTests(sys.argv[1:])
+ else:
+ all_tests.RunTests(all_tests.all_test_modules)
diff --git a/net/test/bpf.py b/net/test/bpf.py
index b96c82a..bc46e95 100755
--- a/net/test/bpf.py
+++ b/net/test/bpf.py
@@ -17,6 +17,7 @@
"""kernel net test library for bpf testing."""
import ctypes
+import errno
import os
import resource
import socket
@@ -50,7 +51,8 @@
# ACK android12-5.10 was >= 5.10.168 without this support only for ~4.5 hours
# ACK android13-4.10 was >= 5.10.168 without this support only for ~25 hours
# as such we can >= 5.10.168 instead of > 5.10.168
-HAVE_SO_NETNS_COOKIE = net_test.LINUX_VERSION >= (5, 10, 168)
+# Additionally require support to be backported to any 5.10+ non-GKI/GSI kernel.
+HAVE_SO_NETNS_COOKIE = net_test.LINUX_VERSION >= (5, 10, 168) or net_test.NonGXI(5, 10)
# Note: This is *not* correct for parisc & sparc architectures
SO_NETNS_COOKIE = 71
@@ -69,6 +71,15 @@
BPF_OBJ_GET = 7
BPF_PROG_ATTACH = 8
BPF_PROG_DETACH = 9
+BPF_PROG_TEST_RUN = 10
+BPF_PROG_GET_NEXT_ID = 11
+BPF_MAP_GET_NEXT_ID = 12
+BPF_PROG_GET_FD_BY_ID = 13
+BPF_MAP_GET_FD_BY_ID = 14
+BPF_OBJ_GET_INFO_BY_FD = 15
+BPF_PROG_QUERY = 16
+
+# setsockopt SOL_SOCKET constants
SO_ATTACH_BPF = 50
# BPF map type constant.
@@ -194,11 +205,14 @@
" license log_level log_size log_buf kern_version")
BpfAttrProgAttach = cstruct.Struct(
"bpf_attr_prog_attach", "=III", "target_fd attach_bpf_fd attach_type")
+BpfAttrGetFdById = cstruct.Struct(
+ "bpf_attr_get_fd_by_id", "=III", "id next_id open_flags")
+BpfAttrProgQuery = cstruct.Struct(
+ "bpf_attr_prog_query", "=IIIIQIQ", "target_fd attach_type query_flags attach_flags prog_ids_ptr prog_cnt prog_attach_flags")
BpfInsn = cstruct.Struct("bpf_insn", "=BBhi", "code dst_src_reg off imm")
# pylint: enable=invalid-name
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
-HAVE_EBPF_4_19 = net_test.LINUX_VERSION >= (4, 19, 0)
HAVE_EBPF_5_4 = net_test.LINUX_VERSION >= (5, 4, 0)
# set memlock resource 1 GiB
@@ -289,7 +303,48 @@
# Detach a eBPF filter from a cgroup
def BpfProgDetach(target_fd, prog_type):
attr = BpfAttrProgAttach((target_fd, 0, prog_type))
- return BpfSyscall(BPF_PROG_DETACH, attr)
+ try:
+ return BpfSyscall(BPF_PROG_DETACH, attr)
+ except socket.error as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+# Convert a BPF program ID into an open file descriptor
+def BpfProgGetFdById(prog_id):
+ if prog_id is None:
+ return None
+ attr = BpfAttrGetFdById((prog_id, 0, 0))
+ return BpfSyscall(BPF_PROG_GET_FD_BY_ID, attr)
+
+
+# Convert a BPF map ID into an open file descriptor
+def BpfMapGetFdById(map_id):
+ if map_id is None:
+ return None
+ attr = BpfAttrGetFdById((map_id, 0, 0))
+ return BpfSyscall(BPF_MAP_GET_FD_BY_ID, attr)
+
+
+# Return BPF program id attached to a given cgroup & attach point
+# Note: as written this only supports a *single* program per attach point
+def BpfProgQuery(target_fd, attach_type, query_flags, attach_flags):
+ prog_id = ctypes.c_uint32(-1)
+ minus_one = prog_id.value # but unsigned, so really 4294967295
+ attr = BpfAttrProgQuery((target_fd, attach_type, query_flags, attach_flags, ctypes.addressof(prog_id), 1, 0))
+ if BpfSyscall(BPF_PROG_QUERY, attr) == 0:
+ # to see kernel updates we have to convert back from the buffer that actually went to the kernel...
+ attr._Parse(attr._buffer)
+ assert attr.prog_cnt >= 0, "prog_cnt is %s" % attr.prog_cnt
+ assert attr.prog_cnt <= 1, "prog_cnt is %s" % attr.prog_cnt # we don't support more atm
+ if attr.prog_cnt == 0:
+ assert prog_id.value == minus_one, "prog_id is %s" % prog_id
+ return None
+ else:
+ assert prog_id.value != minus_one, "prog_id is %s" % prog_id
+ return prog_id.value
+ else:
+ return None
# BPF program command constructors
diff --git a/net/test/bpf_test.py b/net/test/bpf_test.py
index 343ca97..2826b01 100755
--- a/net/test/bpf_test.py
+++ b/net/test/bpf_test.py
@@ -18,7 +18,6 @@
import errno
import os
import socket
-import tempfile
import unittest
import bpf
@@ -67,7 +66,9 @@
from bpf import BpfProgAttach
from bpf import BpfProgAttachSocket
from bpf import BpfProgDetach
+from bpf import BpfProgGetFdById
from bpf import BpfProgLoad
+from bpf import BpfProgQuery
from bpf import BpfRawInsn
from bpf import BpfStMem
from bpf import BpfStxMem
@@ -86,7 +87,7 @@
KEY_SIZE = 4
VALUE_SIZE = 4
TOTAL_ENTRIES = 20
-TEST_UID = 54321
+TEST_UID = 5432
TEST_GID = 12345
# Offset to store the map key in stack register REG10
key_offset = -8
@@ -209,17 +210,17 @@
def setUp(self):
super(BpfTest, self).setUp()
- self.map_fd = -1
- self.prog_fd = -1
+ self.map_fd = None
+ self.prog_fd = None
self.sock = None
def tearDown(self):
- if self.prog_fd >= 0:
+ if self.prog_fd is not None:
os.close(self.prog_fd)
- self.prog_fd = -1
- if self.map_fd >= 0:
+ self.prog_fd = None
+ if self.map_fd is not None:
os.close(self.map_fd)
- self.map_fd = -1
+ self.map_fd = None
if self.sock:
self.sock.close()
self.sock = None
@@ -437,7 +438,8 @@
@unittest.skipUnless(bpf.HAVE_SO_NETNS_COOKIE, "no SO_NETNS_COOKIE support")
def testGetNetNsCookie(self):
sk = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, 0)
- cookie = sk.getsockopt(socket.SOL_SOCKET, bpf.SO_NETNS_COOKIE, 8) # sizeof(u64) == 8
+ sizeof_u64 = 8
+ cookie = sk.getsockopt(socket.SOL_SOCKET, bpf.SO_NETNS_COOKIE, sizeof_u64)
sk.close()
self.assertEqual(len(cookie), 8)
cookie = int.from_bytes(cookie, "little")
@@ -494,35 +496,59 @@
@classmethod
def setUpClass(cls):
super(BpfCgroupTest, cls).setUpClass()
+ # os.open() throws exception on failure
cls._cg_fd = os.open("/sys/fs/cgroup", os.O_DIRECTORY | os.O_RDONLY)
@classmethod
def tearDownClass(cls):
- os.close(cls._cg_fd)
+ if cls._cg_fd is not None:
+ os.close(cls._cg_fd)
+ cls._cg_fd = None
super(BpfCgroupTest, cls).tearDownClass()
def setUp(self):
super(BpfCgroupTest, self).setUp()
- self.prog_fd = -1
- self.map_fd = -1
+ self.prog_fd = None
+ self.map_fd = None
+ self.cg_inet_ingress = BpfProgGetFdById(
+ BpfProgQuery(self._cg_fd, BPF_CGROUP_INET_INGRESS, 0, 0))
+ self.cg_inet_egress = BpfProgGetFdById(
+ BpfProgQuery(self._cg_fd, BPF_CGROUP_INET_EGRESS, 0, 0))
+ self.cg_inet_sock_create = BpfProgGetFdById(
+ BpfProgQuery(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0, 0))
+ if self.cg_inet_ingress:
+ BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_INGRESS)
+ if self.cg_inet_egress:
+ BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_EGRESS)
+ if self.cg_inet_sock_create:
+ BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE)
def tearDown(self):
- if self.prog_fd >= 0:
+ if self.prog_fd is not None:
os.close(self.prog_fd)
- if self.map_fd >= 0:
+ self.prog_fd = None
+ if self.map_fd is not None:
os.close(self.map_fd)
- try:
- BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_EGRESS)
- except socket.error:
- pass
- try:
+ self.map_fd = None
+ if self.cg_inet_ingress is None:
BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_INGRESS)
- except socket.error:
- pass
- try:
+ else:
+ BpfProgAttach(self.cg_inet_ingress, self._cg_fd, BPF_CGROUP_INET_INGRESS)
+ os.close(self.cg_inet_ingress)
+ self.cg_inet_ingress = None
+ if self.cg_inet_egress is None:
+ BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_EGRESS)
+ else:
+ BpfProgAttach(self.cg_inet_egress, self._cg_fd, BPF_CGROUP_INET_EGRESS)
+ os.close(self.cg_inet_egress)
+ self.cg_inet_egress = None
+ if self.cg_inet_sock_create is None:
BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE)
- except socket.error:
- pass
+ else:
+ BpfProgAttach(self.cg_inet_sock_create, self._cg_fd,
+ BPF_CGROUP_INET_SOCK_CREATE)
+ os.close(self.cg_inet_sock_create)
+ self.cg_inet_sock_create = None
super(BpfCgroupTest, self).tearDown()
def testCgroupBpfAttach(self):
@@ -572,23 +598,32 @@
self.assertEqual(packet_count, LookupMap(self.map_fd, uid).value)
BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_INGRESS)
- def checkSocketCreate(self, family, socktype, success):
+ def checkSocketCreate(self, family, socktype, sockproto, success):
try:
- sock = socket.socket(family, socktype, 0)
+ sock = socket.socket(family, socktype, sockproto)
sock.close()
except socket.error as e:
if success:
- self.fail("Failed to create socket family=%d type=%d err=%s" %
- (family, socktype, os.strerror(e.errno)))
+ self.fail("Failed to create socket family=%d type=%d proto=%d err=%s" %
+ (family, socktype, sockproto, os.strerror(e.errno)))
return
if not success:
- self.fail("unexpected socket family=%d type=%d created, should be blocked"
- % (family, socktype))
+ self.fail("unexpected socket family=%d type=%d proto=%d created, "
+ "should be blocked" % (family, socktype, sockproto))
+
+ def testPfKeySocketCreate(self):
+ # AF_KEY socket type. See include/linux/socket.h.
+ AF_KEY = 15 # pylint: disable=invalid-name
+
+ # PFKEYv2 constants. See include/uapi/linux/pfkeyv2.h.
+ PF_KEY_V2 = 2 # pylint: disable=invalid-name
+
+ self.checkSocketCreate(AF_KEY, socket.SOCK_RAW, PF_KEY_V2, True)
def trySocketCreate(self, success):
for family in [socket.AF_INET, socket.AF_INET6]:
for socktype in [socket.SOCK_DGRAM, socket.SOCK_STREAM]:
- self.checkSocketCreate(family, socktype, success)
+ self.checkSocketCreate(family, socktype, 0, success)
def testCgroupSocketCreateBlock(self):
instructions = [
@@ -597,8 +632,22 @@
BpfJumpImm(BPF_JNE, BPF_REG_0, TEST_UID, 2),
]
instructions += INS_BPF_EXIT_BLOCK + INS_CGROUP_ACCEPT
+
+ fd = BpfProgGetFdById(
+ BpfProgQuery(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0, 0))
+ assert fd is None
+
self.prog_fd = BpfProgLoad(BPF_PROG_TYPE_CGROUP_SOCK, instructions)
BpfProgAttach(self.prog_fd, self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE)
+
+ fd = BpfProgGetFdById(
+ BpfProgQuery(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0, 0))
+ assert fd is not None
+ # equality while almost certain is not actually 100% guaranteed:
+ assert fd >= self.prog_fd + 1
+ os.close(fd)
+ fd = None
+
with net_test.RunAsUid(TEST_UID):
# Socket creation with target uid should fail
self.trySocketCreate(False)
diff --git a/net/test/build_rootfs.sh b/net/test/build_rootfs.sh
index ee79c86..5382a68 100755
--- a/net/test/build_rootfs.sh
+++ b/net/test/build_rootfs.sh
@@ -41,7 +41,7 @@
disk=
dtb=
-while getopts ":hs:a:m:n:r:k:i:d:eg" opt; do
+while getopts ":hs:a:m:n:r:k:O:i:d:eg" opt; do
case "${opt}" in
h)
usage
@@ -68,6 +68,9 @@
k)
kernel="${OPTARG}"
;;
+ O)
+ extradeb="${OPTARG}"
+ ;;
i)
initramfs="${OPTARG}"
;;
@@ -379,10 +382,10 @@
rootfs_partition_offset=$((${rootfs_partition_start} * 512))
rootfs_partition_tempfile2=$(mktemp)
dd if="${disk}" of="${rootfs_partition_tempfile2}" bs=512 skip=${rootfs_partition_start} count=${rootfs_partition_num_sectors}
- e2fsck -p -f "${rootfs_partition_tempfile2}" || true
+ /sbin/e2fsck -p -f "${rootfs_partition_tempfile2}" || true
dd if="${rootfs_partition_tempfile2}" of="${disk}" bs=512 seek=${rootfs_partition_start} count=${rootfs_partition_num_sectors} conv=fsync,notrunc
rm -f "${rootfs_partition_tempfile2}"
- e2fsck -fy "${disk}"?offset=${rootfs_partition_offset} || true
+ /sbin/e2fsck -fy "${disk}"?offset=${rootfs_partition_offset} || true
fi
if [[ -n "${system_partition}" ]]; then
system_partition_start=$(partx -g -o START -s -n "${system_partition}" "${disk}" | xargs)
@@ -458,6 +461,11 @@
sudo cp -a "${kernel}" "${mount}/boot/vmlinuz-${kernel_version}"
sudo chown root:root "${mount}/boot/vmlinuz-${kernel_version}"
fi
+ sudo cp -a "${SCRIPT_DIR}"/rootfs/cron-run-installer-script "${mount}/etc/cron.d/cron-run-installer-script"
+ if [ -e "${extradeb}" ]; then
+ sudo cp -a "${extradeb}" "${mount}/root/extradeb.tar.gz"
+ sudo chown root:root "${mount}/root/extradeb.tar.gz"
+ fi
else
if [[ "${embed_kernel_initrd_dtb}" = "1" ]]; then
if [ -n "${dtb}" ]; then
@@ -466,6 +474,10 @@
fi
e2cp -G 0 -O 0 "${kernel}" "${rootfs_partition_tempfile}":"/boot/vmlinuz-${kernel_version}"
fi
+ e2cp -G 0 -O 0 "${SCRIPT_DIR}"/rootfs/cron-run-installer-script "${rootfs_partition_tempfile}":"/etc/cron.d/cron-run-installer-script"
+ if [ -e "${extradeb}" ]; then
+ e2cp -G 0 -O 0 "${extradeb}" "${rootfs_partition_tempfile}":"/root/extradeb.tar.gz"
+ fi
fi
# Unmount the initial ramdisk
@@ -493,7 +505,7 @@
-device pci-serial,chardev=exitcode \
-netdev user,id=usernet0,ipv6=off \
-device virtio-net-pci-non-transitional,netdev=usernet0,id=net0 \
- -append "root=LABEL=ROOT init=/root/${suite}.sh ${cmdline}"
+ -append "root=LABEL=ROOT installer_script=/root/${suite}.sh ${cmdline}"
[[ -s exitcode ]] && exitcode=$(cat exitcode | tr -d '\r') || exitcode=2
rm -f exitcode
if [ "${exitcode}" != "0" ]; then
@@ -511,10 +523,10 @@
rootfs_partition_offset=$((${rootfs_partition_start} * 512))
rootfs_partition_tempfile2=$(mktemp)
dd if="${disk}" of="${rootfs_partition_tempfile2}" bs=512 skip=${rootfs_partition_start} count=${rootfs_partition_num_sectors}
- e2fsck -p -f "${rootfs_partition_tempfile2}" || true
+ /sbin/e2fsck -p -f "${rootfs_partition_tempfile2}" || true
dd if="${rootfs_partition_tempfile2}" of="${disk}" bs=512 seek=${rootfs_partition_start} count=${rootfs_partition_num_sectors} conv=fsync,notrunc
rm -f "${rootfs_partition_tempfile2}"
- e2fsck -fy "${disk}"?offset=${rootfs_partition_offset} || true
+ /sbin/e2fsck -fy "${disk}"?offset=${rootfs_partition_offset} || true
fi
if [[ -n "${system_partition}" ]]; then
system_partition_start=$(partx -g -o START -s -n "${system_partition}" "${disk}" | xargs)
diff --git a/net/test/gki.py b/net/test/gki.py
new file mode 100755
index 0000000..77f1ea9
--- /dev/null
+++ b/net/test/gki.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python3
+#
+# Copyright 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# all_tests_gki.py sets this to True
+IS_GKI = False
diff --git a/net/test/iproute.py b/net/test/iproute.py
index d61698c..307f836 100644
--- a/net/test/iproute.py
+++ b/net/test/iproute.py
@@ -221,6 +221,18 @@
IFLA_INFO_DATA = 2
IFLA_INFO_XSTATS = 3
+IFLA_INET_CONF = 1
+
+IFLA_INET6_FLAGS = 1
+IFLA_INET6_CONF = 2
+IFLA_INET6_STATS = 3
+IFLA_INET6_MCAST = 4
+IFLA_INET6_CACHEINFO = 5
+IFLA_INET6_ICMP6STATS = 6
+IFLA_INET6_TOKEN = 7
+IFLA_INET6_ADDR_GEN_MODE = 8
+IFLA_INET6_RA_MTU = 9
+
IFLA_XFRM_UNSPEC = 0
IFLA_XFRM_LINK = 1
IFLA_XFRM_IF_ID = 2
@@ -233,11 +245,24 @@
IFLA_VTI_LOCAL = 4
IFLA_VTI_REMOTE = 5
+# include/net/if_inet6.h
+IF_RA_OTHERCONF = 0x80
+IF_RA_MANAGED = 0x40
+IF_RA_RCVD = 0x20
+IF_RS_SENT = 0x10
+IF_READY = 0x80000000
+
+# Hack to use _ParseAttributes to parse family-specific interface attributes.
+# These are not actual kernel constants.
+IFLA_AF_SPEC_AF_INET = AF_INET
+IFLA_AF_SPEC_AF_INET6 = AF_INET6
+
CONSTANT_PREFIXES = netlink.MakeConstantPrefixes(
["RTM_", "RTN_", "RTPROT_", "RT_SCOPE_", "RT_TABLE_", "RTA_", "RTMGRP_",
"RTNLGRP_", "RTAX_", "IFA_", "IFA_F_", "NDA_", "FRA_", "IFLA_",
- "IFLA_INFO_", "IFLA_XFRM_", "IFLA_VTI_"])
+ "IFLA_INFO_", "IFLA_XFRM_", "IFLA_VTI_", "IFLA_AF_SPEC_", "IFLA_INET_",
+ "IFLA_INET6_"])
def CommandVerb(command):
@@ -300,6 +325,12 @@
name = self._GetConstantName(nla_type, "IFLA_INFO_")
elif lastnested == "IFLA_INFO_DATA":
name = self._GetConstantName(nla_type, "IFLA_VTI_")
+ elif lastnested == "IFLA_AF_SPEC":
+ name = self._GetConstantName(nla_type, "IFLA_AF_SPEC_")
+ elif lastnested == "IFLA_AF_SPEC_AF_INET":
+ name = self._GetConstantName(nla_type, "IFLA_INET_")
+ elif lastnested == "IFLA_AF_SPEC_AF_INET6":
+ name = self._GetConstantName(nla_type, "IFLA_INET6_")
elif CommandSubject(command) == "ADDR":
name = self._GetConstantName(nla_type, "IFA_")
elif CommandSubject(command) == "LINK":
@@ -320,21 +351,37 @@
"IFLA_PROMISCUITY", "IFLA_NUM_RX_QUEUES",
"IFLA_NUM_TX_QUEUES", "NDA_PROBES", "RTAX_MTU",
"RTAX_HOPLIMIT", "IFLA_CARRIER_CHANGES", "IFLA_GSO_MAX_SEGS",
- "IFLA_GSO_MAX_SIZE", "RTA_UID"]:
+ "IFLA_GSO_MAX_SIZE", "RTA_UID", "IFLA_INET6_FLAGS"]:
data = struct.unpack("=I", nla_data)[0]
- elif name in ["IFLA_VTI_OKEY", "IFLA_VTI_IKEY"]:
+ # HACK: the code cannot distinguish between IFLA_VTI_OKEY and
+ # IFLA_INET6_STATS, because they have the same values and similar context:
+ # they're both in an IFLA_INFO_DATA attribute, and knowing which one is
+ # being used requires remembering the IFLA_INFO_KIND attribute which is a
+ # peer of the IFLA_INFO_DATA).
+ # TODO: support parsing attributes whose meaning depends on the value of
+ # attributes that don't directly contain them.
+ # For now, disambiguate by checking the length.
+ elif name in ["IFLA_VTI_OKEY", "IFLA_VTI_IKEY"] and len(nla_data) == 4:
data = struct.unpack("!I", nla_data)[0]
elif name == "FRA_SUPPRESS_PREFIXLEN":
data = struct.unpack("=i", nla_data)[0]
- elif name in ["IFLA_LINKMODE", "IFLA_OPERSTATE", "IFLA_CARRIER"]:
+ elif name in ["IFLA_LINKMODE", "IFLA_OPERSTATE", "IFLA_CARRIER",
+ "IFLA_INET6_ADDR_GEN_MODE"]:
data = ord(nla_data)
elif name in ["IFA_ADDRESS", "IFA_LOCAL", "RTA_DST", "RTA_SRC",
"RTA_GATEWAY", "RTA_PREFSRC", "NDA_DST"]:
data = socket.inet_ntop(msg.family, nla_data)
+ elif name in ["IFLA_INET_CONF", "IFLA_INET6_CONF"]:
+ data = [struct.unpack("=I", nla_data[i:i+4])[0]
+ for i in range(0, len(nla_data), 4)]
+ elif name == "IFLA_INET6_TOKEN":
+ data = socket.inet_ntop(AF_INET6, nla_data)
elif name in ["FRA_IIFNAME", "FRA_OIFNAME", "IFLA_IFNAME", "IFLA_QDISC",
"IFA_LABEL", "IFLA_INFO_KIND"]:
data = nla_data.strip(b"\x00")
- elif name in ["RTA_METRICS", "IFLA_LINKINFO", "IFLA_INFO_DATA"]:
+ elif name in ["RTA_METRICS", "IFLA_LINKINFO", "IFLA_INFO_DATA",
+ "IFLA_AF_SPEC", "IFLA_AF_SPEC_AF_INET",
+ "IFLA_AF_SPEC_AF_INET6"]:
data = self._ParseAttributes(command, None, nla_data, nested + [name])
elif name == "RTA_CACHEINFO":
data = RTACacheinfo(nla_data)
@@ -549,6 +596,7 @@
if version == 6:
self._WaitForAddress(sock, address, ifindex)
+ sock.close()
def DelAddress(self, address, prefixlen, ifindex):
self._Address(csocket.AddressVersion(address),
@@ -702,6 +750,17 @@
stats = self.GetIfaceStats(dev_name)
return stats.rx_packets, stats.tx_packets
+ def GetIflaAfSpecificData(self, dev_name, family):
+ _, attrs = self.GetIfinfo(dev_name)
+ attrs = self._ParseAttributes(RTM_NEWLINK, IfinfoMsg, attrs, [])
+ if family == AF_INET:
+ attrname = "IFLA_AF_SPEC_AF_INET"
+ elif family == AF_INET6:
+ attrname = "IFLA_AF_SPEC_AF_INET6"
+ else:
+ raise ValueError("Unsupported address family %d" % family)
+ return attrs["IFLA_AF_SPEC"][attrname]
+
def CreateVirtualTunnelInterface(self, dev_name, local_addr, remote_addr,
i_key=None, o_key=None, is_update=False):
"""
diff --git a/net/test/kernel_feature_test.py b/net/test/kernel_feature_test.py
new file mode 100755
index 0000000..2594a82
--- /dev/null
+++ b/net/test/kernel_feature_test.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python3
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import gzip
+import os
+from socket import * # pylint: disable=wildcard-import,g-importing-member
+import unittest
+
+import gki
+import net_test
+
+
+class KernelFeatureTest(net_test.NetworkTest):
+ KCONFIG = None
+ AID_NET_RAW = 3004
+
+ @classmethod
+ def getKernelConfigFile(cls):
+ try:
+ return gzip.open("/proc/config.gz", mode="rt")
+ except FileNotFoundError:
+ return open("/boot/config-" + os.uname()[2], mode="rt")
+
+ @classmethod
+ def loadKernelConfig(cls):
+ cls.KCONFIG = {}
+ with cls.getKernelConfigFile() as f:
+ for line in f:
+ line = line.strip()
+ parts = line.split("=")
+ if (len(parts) == 2):
+ # Lines of the form:
+ # CONFIG_FOO=y
+ cls.KCONFIG[parts[0]] = parts[1]
+
+ @classmethod
+ def setUpClass(cls):
+ super(net_test.NetworkTest, cls).setUpClass()
+ cls.loadKernelConfig()
+
+ def assertFeatureAbsent(self, feature_name):
+ return self.assertNotIn(feature_name, self.KCONFIG)
+
+ def assertFeatureBuiltIn(self, feature_name):
+ return self.assertEqual("y", self.KCONFIG[feature_name])
+
+ def assertFeatureModular(self, feature_name):
+ return self.assertEqual("m", self.KCONFIG[feature_name])
+
+ def assertFeatureEnabled(self, feature_name):
+ return self.assertIn(self.KCONFIG[feature_name], ["m", "y"])
+
+ def testNetfilterRejectEnabled(self):
+ """Verify that CONFIG_IP{,6}_NF_{FILTER,TARGET_REJECT} is enabled."""
+ self.assertFeatureBuiltIn("CONFIG_IP_NF_FILTER")
+ self.assertFeatureBuiltIn("CONFIG_IP_NF_TARGET_REJECT")
+
+ self.assertFeatureBuiltIn("CONFIG_IP6_NF_FILTER")
+ self.assertFeatureBuiltIn("CONFIG_IP6_NF_TARGET_REJECT")
+
+ def testRemovedAndroidParanoidNetwork(self):
+ """Verify that ANDROID_PARANOID_NETWORK is gone.
+
+ On a 4.14-q kernel you can achieve this by simply
+ changing the ANDROID_PARANOID_NETWORK default y to n
+ in your kernel source code in net/Kconfig:
+
+ @@ -94,3 +94,3 @@ endif # if INET
+ config ANDROID_PARANOID_NETWORK
+ bool "Only allow certain groups to create sockets"
+ - default y
+ + default n
+ """
+ with net_test.RunAsUidGid(12345, self.AID_NET_RAW):
+ self.assertRaisesErrno(errno.EPERM, socket, AF_PACKET, SOCK_RAW, 0)
+
+ @unittest.skipUnless(net_test.IS_GSI, "not GSI")
+ def testIsGSI(self):
+ pass
+
+ @unittest.skipUnless(gki.IS_GKI, "not GKI")
+ def testIsGKI(self):
+ pass
+
+ @unittest.skipUnless(not net_test.IS_GSI and not gki.IS_GKI, "GSI or GKI")
+ def testMinRequiredKernelVersion(self):
+ self.assertTrue(net_test.KernelAtLeast([(4, 19, 236),
+ (5, 4, 186),
+ (5, 10, 199),
+ (5, 15, 136),
+ (6, 1, 57)]),
+ "%s [%s] is too old." % (os.uname()[2], os.uname()[4]))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/net/test/leak_test.py b/net/test/leak_test.py
index 54bbe73..9b7d2c6 100755
--- a/net/test/leak_test.py
+++ b/net/test/leak_test.py
@@ -45,6 +45,7 @@
data, addr = csocket.Recvfrom(s, 4096)
self.assertEqual(b"", data)
self.assertEqual(None, addr)
+ s.close()
class ForceSocketBufferOptionTest(net_test.NetworkTest):
diff --git a/net/test/multinetwork_base.py b/net/test/multinetwork_base.py
index 940be49..8c5fc26 100644
--- a/net/test/multinetwork_base.py
+++ b/net/test/multinetwork_base.py
@@ -52,11 +52,19 @@
IPV6_HOPLIMIT = 52 # Different from IPV6_UNICAST_HOPS, this is cmsg only.
+ACCEPT_RA_MIN_LFT_SYSCTL = "/proc/sys/net/ipv6/conf/default/accept_ra_min_lft"
AUTOCONF_TABLE_SYSCTL = "/proc/sys/net/ipv6/conf/default/accept_ra_rt_table"
IPV4_MARK_REFLECT_SYSCTL = "/proc/sys/net/ipv4/fwmark_reflect"
IPV6_MARK_REFLECT_SYSCTL = "/proc/sys/net/ipv6/fwmark_reflect"
+RA_HONOR_PIO_LIFE_SYSCTL = "/proc/sys/net/ipv6/conf/default/ra_honor_pio_life"
+HAVE_ACCEPT_RA_MIN_LFT = (os.path.isfile(ACCEPT_RA_MIN_LFT_SYSCTL) or
+ net_test.NonGXI(5, 10) or
+ net_test.KernelAtLeast([(5, 10, 199), (5, 15, 136),
+ (6, 1, 57), (6, 6, 0)]))
HAVE_AUTOCONF_TABLE = os.path.isfile(AUTOCONF_TABLE_SYSCTL)
+HAVE_RA_HONOR_PIO_LIFE = (os.path.isfile(RA_HONOR_PIO_LIFE_SYSCTL) or
+ net_test.KernelAtLeast([(6, 7, 0)]))
class ConfigurationError(AssertionError):
@@ -101,8 +109,9 @@
# Wether to output setup commands.
DEBUG = False
- # The size of our UID ranges.
- UID_RANGE_SIZE = 1000
+ UID_RANGE_START = 2000
+ UID_RANGE_END = 9999
+ UID_RANGE_SIZE = UID_RANGE_END - UID_RANGE_START + 1
# Rule priorities.
PRIORITY_UID = 100
@@ -125,13 +134,15 @@
IPV4_PING = net_test.IPV4_PING
IPV6_PING = net_test.IPV6_PING
- RA_VALIDITY = 300 # seconds
+ RA_VALIDITY = 600 # seconds
@classmethod
def UidRangeForNetid(cls, netid):
+ per_netid_range = int(cls.UID_RANGE_SIZE / len(cls.NETIDS))
+ idx = cls.NETIDS.index(netid)
return (
- cls.UID_RANGE_SIZE * netid,
- cls.UID_RANGE_SIZE * (netid + 1) - 1
+ cls.UID_RANGE_START + per_netid_range * idx,
+ cls.UID_RANGE_START + per_netid_range * (idx + 1) - 1
)
@classmethod
@@ -232,8 +243,8 @@
return f
@classmethod
- def SendRA(cls, netid, retranstimer=None, reachabletime=0, options=()):
- validity = cls.RA_VALIDITY # seconds
+ def SendRA(cls, netid, retranstimer=None, reachabletime=0, routerlft=RA_VALIDITY,
+ piolft=RA_VALIDITY, m=0, o=0, options=()):
macaddr = cls.RouterMacAddress(netid)
lladdr = cls._RouterAddress(netid, 6)
@@ -241,23 +252,25 @@
# If no retrans timer was specified, pick one that's as long as the
# router lifetime. This ensures that no spurious ND retransmits
# will interfere with test expectations.
- retranstimer = validity * 1000 # Lifetime is in s, retrans timer in ms.
+ retranstimer = routerlft * 1000 # Lifetime is in s, retrans timer in ms.
# We don't want any routes in the main table. If the kernel doesn't support
# putting RA routes into per-interface tables, configure routing manually.
- routerlifetime = validity if HAVE_AUTOCONF_TABLE else 0
+ if not HAVE_AUTOCONF_TABLE:
+ routerlft = 0
ra = (scapy.Ether(src=macaddr, dst="33:33:00:00:00:01") /
scapy.IPv6(src=lladdr, hlim=255) /
scapy.ICMPv6ND_RA(reachabletime=reachabletime,
retranstimer=retranstimer,
- routerlifetime=routerlifetime) /
+ routerlifetime=routerlft,
+ M=m, O=o) /
scapy.ICMPv6NDOptSrcLLAddr(lladdr=macaddr) /
scapy.ICMPv6NDOptPrefixInfo(prefix=cls.OnlinkPrefix(6, netid),
prefixlen=cls.OnlinkPrefixLen(6),
L=1, A=1,
- validlifetime=validity,
- preferredlifetime=validity))
+ validlifetime=piolft,
+ preferredlifetime=piolft))
for option in options:
ra /= option
posix.write(cls.tuns[netid].fileno(), bytes(ra))
@@ -645,8 +658,9 @@
if expectedudp.sport is None:
actualudp.sport = None
actualudp.chksum = None
- elif actualudp.chksum == 0xffff:
+ elif actualudp.chksum == 0xffff and expectedudp.chksum == 0:
# Scapy does not appear to change 0 to 0xffff as required by RFC 768.
+ # It is possible that scapy has been upgraded and this no longer triggers.
actualudp.chksum = 0
# Since the TCP code below messes with options, recalculate the length.
diff --git a/net/test/multinetwork_test.py b/net/test/multinetwork_test.py
index 9a0659a..2bae074 100755
--- a/net/test/multinetwork_test.py
+++ b/net/test/multinetwork_test.py
@@ -563,8 +563,6 @@
def testIPv6ExplicitMark(self):
self.CheckTCP(6, [self.MODE_EXPLICIT_MARK])
[email protected](multinetwork_base.HAVE_AUTOCONF_TABLE,
- "need support for per-table autoconf")
class RIOTest(multinetwork_base.MultiNetworkBaseTest):
"""Test for IPv6 RFC 4191 route information option
@@ -594,12 +592,20 @@
super(RIOTest, self).setUp()
self.NETID = random.choice(self.NETIDS)
self.IFACE = self.GetInterfaceName(self.NETID)
- # return min/max plen to default values before each test case
+ # return sysctls to default values before each test case
self.SetAcceptRaRtInfoMinPlen(0)
self.SetAcceptRaRtInfoMaxPlen(0)
+ if multinetwork_base.HAVE_ACCEPT_RA_MIN_LFT:
+ self.SetAcceptRaMinLft(0)
+ if multinetwork_base.HAVE_RA_HONOR_PIO_LIFE:
+ self.SetRaHonorPioLife(0)
def GetRoutingTable(self):
- return self._TableForNetid(self.NETID)
+ if multinetwork_base.HAVE_AUTOCONF_TABLE:
+ return self._TableForNetid(self.NETID)
+ else:
+ # main table
+ return 254
def SetAcceptRaRtInfoMinPlen(self, plen):
self.SetSysctl(
@@ -619,6 +625,22 @@
return int(self.GetSysctl(
"/proc/sys/net/ipv6/conf/%s/accept_ra_rt_info_max_plen" % self.IFACE))
+ def SetAcceptRaMinLft(self, min_lft):
+ self.SetSysctl(
+ "/proc/sys/net/ipv6/conf/%s/accept_ra_min_lft" % self.IFACE, min_lft)
+
+ def GetAcceptRaMinLft(self):
+ return int(self.GetSysctl(
+ "/proc/sys/net/ipv6/conf/%s/accept_ra_min_lft" % self.IFACE))
+
+ def SetRaHonorPioLife(self, enabled):
+ self.SetSysctl(
+ "/proc/sys/net/ipv6/conf/%s/ra_honor_pio_life" % self.IFACE, enabled)
+
+ def GetRaHonorPioLife(self):
+ return int(self.GetSysctl(
+ "/proc/sys/net/ipv6/conf/%s/ra_honor_pio_life" % self.IFACE))
+
def SendRIO(self, rtlifetime, plen, prefix, prf):
options = scapy.ICMPv6NDOptRouteInfo(rtlifetime=rtlifetime, plen=plen,
prefix=prefix, prf=prf)
@@ -671,6 +693,8 @@
self.SetAcceptRaRtInfoMaxPlen(plen)
self.assertEqual(plen, self.GetAcceptRaRtInfoMaxPlen())
+ @unittest.skipUnless(multinetwork_base.HAVE_AUTOCONF_TABLE,
+ "need support for per-table autoconf")
def testZeroRtLifetime(self):
PREFIX = "2001:db8:8901:2300::"
RTLIFETIME = 73500
@@ -717,6 +741,8 @@
routes = self.FindRoutesWithDestination(PREFIX)
self.assertFalse(routes)
+ @unittest.skipUnless(multinetwork_base.HAVE_AUTOCONF_TABLE,
+ "need support for per-table autoconf")
def testSimpleAccept(self):
PREFIX = "2001:db8:8904:2345::"
RTLIFETIME = 9993
@@ -731,6 +757,8 @@
self.AssertExpirationInRange(routes, RTLIFETIME, 1)
self.DelRA6(PREFIX, PLEN)
+ @unittest.skipUnless(multinetwork_base.HAVE_AUTOCONF_TABLE,
+ "need support for per-table autoconf")
def testEqualMinMaxAccept(self):
PREFIX = "2001:db8:8905:2345::"
RTLIFETIME = 6326
@@ -745,6 +773,8 @@
self.AssertExpirationInRange(routes, RTLIFETIME, 1)
self.DelRA6(PREFIX, PLEN)
+ @unittest.skipUnless(multinetwork_base.HAVE_AUTOCONF_TABLE,
+ "need support for per-table autoconf")
def testZeroLengthPrefix(self):
PREFIX = "2001:db8:8906:2345::"
RTLIFETIME = self.RA_VALIDITY * 2
@@ -766,6 +796,8 @@
self.AssertExpirationInRange(default, RTLIFETIME, 1)
self.DelRA6(PREFIX, PLEN)
+ @unittest.skipUnless(multinetwork_base.HAVE_AUTOCONF_TABLE,
+ "need support for per-table autoconf")
def testManyRIOs(self):
RTLIFETIME = 68012
PLEN = 56
@@ -785,6 +817,111 @@
# Expect that we can return to baseline config without lingering routes.
self.assertEqual(baseline, self.CountRoutes())
+ # Contextually, testAcceptRa tests do not belong in RIOTest, but as it
+ # turns out, RIOTest has all the useful helpers defined for these tests.
+ # TODO: Rename test class or merge RIOTest with RATest.
+ @unittest.skipUnless(multinetwork_base.HAVE_ACCEPT_RA_MIN_LFT,
+ "need support for accept_ra_min_lft")
+ def testAcceptRaMinLftReadWrite(self):
+ self.SetAcceptRaMinLft(500)
+ self.assertEqual(500, self.GetAcceptRaMinLft())
+
+ @unittest.skipUnless(multinetwork_base.HAVE_RA_HONOR_PIO_LIFE,
+ "need support for ra_honor_pio_life")
+ def testRaHonorPioLifeReadWrite(self):
+ self.assertEqual(0, self.GetRaHonorPioLife())
+ self.SetRaHonorPioLife(1)
+ self.assertEqual(1, self.GetRaHonorPioLife())
+
+ @unittest.skipUnless(multinetwork_base.HAVE_RA_HONOR_PIO_LIFE,
+ "need support for ra_honor_pio_life")
+ def testRaHonorPioLife(self):
+ self.SetRaHonorPioLife(1)
+
+ # Test setup has sent an initial RA -- expire it.
+ self.SendRA(self.NETID, routerlft=0, piolft=0)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+
+ # Assert that the address was deleted.
+ self.assertIsNone(self.MyAddress(6, self.NETID))
+
+ @unittest.skipUnless(multinetwork_base.HAVE_ACCEPT_RA_MIN_LFT,
+ "need support for accept_ra_min_lft")
+ def testAcceptRaMinLftRouterLifetime(self):
+ self.SetAcceptRaMinLft(500)
+
+ # Test setup has sent an initial RA. Expire it and test that the RA with
+ # lifetime 0 deletes the default route.
+ self.SendRA(self.NETID, routerlft=0, piolft=0)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertEqual([], self.FindRoutesWithGateway())
+
+ # RA with lifetime 400 is ignored
+ self.SendRA(self.NETID, routerlft=400)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertEqual([], self.FindRoutesWithGateway())
+
+ # RA with lifetime 600 is processed
+ self.SendRA(self.NETID, routerlft=600)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ # SendRA sets routerlft to 0 if HAVE_AUTOCONF_TABLE is false...
+ # TODO: Fix this correctly.
+ if multinetwork_base.HAVE_AUTOCONF_TABLE:
+ self.assertEqual(1, len(self.FindRoutesWithGateway()))
+
+ @unittest.skipUnless(multinetwork_base.HAVE_ACCEPT_RA_MIN_LFT,
+ "need support for accept_ra_min_lft")
+ def testAcceptRaMinLftPIOLifetime(self):
+ self.SetAcceptRaMinLft(500)
+
+ # Test setup has sent an initial RA -- expire it.
+ self.SendRA(self.NETID, routerlft=0, piolft=0)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ # Check that the prefix route was deleted.
+ prefixroutes = self.FindRoutesWithDestination(self.OnlinkPrefix(6, self.NETID))
+ self.assertEqual([], prefixroutes)
+
+ # Sending a 0-lifetime PIO does not cause the address to be deleted, see
+ # rfc2462#section-5.5.3.
+ address = self.MyAddress(6, self.NETID)
+ self.iproute.DelAddress(address, 64, self.ifindices[self.NETID])
+
+ # PIO with lifetime 400 is ignored
+ self.SendRA(self.NETID, piolft=400)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertIsNone(self.MyAddress(6, self.NETID))
+
+ # PIO with lifetime 600 is processed
+ self.SendRA(self.NETID, piolft=600)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertIsNotNone(self.MyAddress(6, self.NETID))
+
+ @unittest.skipUnless(multinetwork_base.HAVE_ACCEPT_RA_MIN_LFT,
+ "need support for accept_ra_min_lft")
+ def testAcceptRaMinLftRIOLifetime(self):
+ PREFIX = "2001:db8:8901:2300::"
+ PLEN = 64
+ PRF = 0
+
+ self.SetAcceptRaRtInfoMaxPlen(PLEN)
+ self.SetAcceptRaMinLft(500)
+
+ # RIO with lifetime 400 is ignored
+ self.SendRIO(400, PLEN, PREFIX, PRF)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertFalse(self.FindRoutesWithDestination(PREFIX))
+
+ # RIO with lifetime 600 is processed
+ self.SendRIO(600, PLEN, PREFIX, PRF)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertTrue(self.FindRoutesWithDestination(PREFIX))
+
+ # RIO with lifetime 0 deletes the route
+ self.SendRIO(0, PLEN, PREFIX, PRF)
+ time.sleep(0.1) # Give the kernel time to notice our RA
+ self.assertFalse(self.FindRoutesWithDestination(PREFIX))
+
+
class RATest(multinetwork_base.MultiNetworkBaseTest):
ND_ROUTER_ADVERT = 134
@@ -793,6 +930,14 @@
Pref64Option = cstruct.Struct("pref64_option", "!BBH12s",
"type length lft_plc prefix")
+ # Android Common Kernels are always based off of an LTS release,
+ # skipping this (always failing due to lack of an ACK specific patch) test
+ # on Linus's kernels (and various other upstream dev branches) allows
+ # for easier testing of Linux rc's and various developer trees.
+ @unittest.skipUnless(net_test.IS_STABLE, "not STABLE/LTS kernel")
+ def testHasAutoconfTable(self):
+ self.assertTrue(multinetwork_base.HAVE_AUTOCONF_TABLE)
+
def testDoesNotHaveObsoleteSysctl(self):
self.assertFalse(os.path.isfile(
"/proc/sys/net/ipv6/route/autoconf_table_offset"))
@@ -954,6 +1099,27 @@
self.assertEqual(foundPref64, True)
+ def testRaFlags(self):
+ def GetInterfaceIpv6Flags(iface):
+ attrs = self.iproute.GetIflaAfSpecificData(iface, AF_INET6)
+ return int(attrs["IFLA_INET6_FLAGS"])
+
+ netid = random.choice(self.NETIDS)
+ iface = self.GetInterfaceName(netid)
+ expected = iproute.IF_RS_SENT | iproute.IF_RA_RCVD | iproute.IF_READY
+ self.assertEqual(expected, GetInterfaceIpv6Flags(iface))
+
+ self.SendRA(netid, m=1, o=0)
+ expected |= iproute.IF_RA_MANAGED
+ self.assertEqual(expected, GetInterfaceIpv6Flags(iface))
+
+ self.SendRA(netid, m=1, o=1)
+ expected |= iproute.IF_RA_OTHERCONF
+ self.assertEqual(expected, GetInterfaceIpv6Flags(iface))
+
+ self.SendRA(netid, m=0, o=1)
+ expected &= ~iproute.IF_RA_MANAGED
+ self.assertEqual(expected, GetInterfaceIpv6Flags(iface))
class PMTUTest(multinetwork_base.InboundMarkingTest):
@@ -1150,6 +1316,10 @@
def _Random():
return random.randint(1000000, 2000000)
+ @staticmethod
+ def _RandomUid(cls):
+ return random.randint(cls.UID_RANGE_START, cls.UID_RANGE_END)
+
def CheckGetAndSetRules(self, version):
start, end = tuple(sorted([self._Random(), self._Random()]))
table = self._Random()
@@ -1275,7 +1445,7 @@
def testChangeFdAttributes(self):
netid = random.choice(self.NETIDS)
- uid = self._Random()
+ uid = self._RandomUid(self)
table = self._TableForNetid(netid)
remoteaddr = self.GetRemoteAddress(6)
s = socket(AF_INET6, SOCK_DGRAM, 0)
diff --git a/net/test/namespace.py b/net/test/namespace.py
index fdea1e6..7ebcbde 100644
--- a/net/test/namespace.py
+++ b/net/test/namespace.py
@@ -18,7 +18,6 @@
import ctypes
import ctypes.util
-import errno
import os
import socket
import sys
@@ -27,6 +26,8 @@
import sock_diag
import tcp_test
+# pylint: disable=bad-whitespace
+
# //include/linux/fs.h
MNT_FORCE = 1 # Attempt to forcibily umount
MNT_DETACH = 2 # Just detach from the tree
@@ -66,6 +67,8 @@
CLONE_NEWPID = 0x20000000 # New pid namespace
CLONE_NEWNET = 0x40000000 # New network namespace
+# pylint: enable=bad-whitespace
+
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
# See the relevant system call's man pages and:
@@ -81,9 +84,9 @@
ret = libc.mount(src.encode(), tgt.encode(), fs.encode() if fs else None,
flags, None)
if ret < 0:
- errno = ctypes.get_errno()
- raise OSError(errno, '%s mounting %s on %s (fs=%s flags=0x%x)'
- % (os.strerror(errno), src, tgt, fs, flags))
+ err = ctypes.get_errno()
+ raise OSError(err, '%s mounting %s on %s (fs=%s flags=0x%x)'
+ % (os.strerror(err), src, tgt, fs, flags))
def ReMountProc():
@@ -92,9 +95,9 @@
def ReMountSys():
- libc.umount2(b'/sys/fs/cgroup', MNT_DETACH) # Ignore failure: might not be mounted
- libc.umount2(b'/sys/fs/bpf', MNT_DETACH) # Ignore failure: might not be mounted
- libc.umount2(b'/sys', MNT_DETACH) # Ignore failure: might not be mounted
+ libc.umount2(b'/sys/fs/cgroup', MNT_DETACH) # Ign. fail: might not be mounted
+ libc.umount2(b'/sys/fs/bpf', MNT_DETACH) # Ignore fail: might not be mounted
+ libc.umount2(b'/sys', MNT_DETACH) # Ignore fail: might not be mounted
Mount('sysfs', '/sys', 'sysfs')
Mount('bpf', '/sys/fs/bpf', 'bpf')
Mount('cgroup2', '/sys/fs/cgroup', 'cgroup2')
@@ -109,15 +112,15 @@
hostname = s.encode()
ret = libc.sethostname(hostname, len(hostname))
if ret < 0:
- errno = ctypes.get_errno()
- raise OSError(errno, '%s while sethostname(%s)' % (os.strerror(errno), s))
+ err = ctypes.get_errno()
+ raise OSError(err, '%s while sethostname(%s)' % (os.strerror(err), s))
def UnShare(flags):
ret = libc.unshare(flags)
if ret < 0:
- errno = ctypes.get_errno()
- raise OSError(errno, '%s while unshare(0x%x)' % (os.strerror(errno), flags))
+ err = ctypes.get_errno()
+ raise OSError(err, '%s while unshare(0x%x)' % (os.strerror(err), flags))
def DumpMounts(hdr):
@@ -135,14 +138,6 @@
def EnterNewNetworkNamespace():
"""Instantiate and transition into a fresh new network namespace."""
- sys.stdout.write('Creating clean namespace... ')
-
- # sysctl only present on 4.14 and earlier Android kernels
- if net_test.LINUX_VERSION < (4, 15, 0):
- TCP_DEFAULT_INIT_RWND = "/proc/sys/net/ipv4/tcp_default_init_rwnd"
- # In root netns this will succeed
- init_rwnd_sysctl = open(TCP_DEFAULT_INIT_RWND, "w")
-
try:
UnShare(CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWNET)
except OSError as err:
@@ -163,38 +158,18 @@
# We've already transitioned into the new netns -- it's too late to recover.
raise
- if net_test.LINUX_VERSION < (4, 15, 0):
- # In non-root netns this open might fail due to non-namespace-ified sysctl
- # ie. lack of kernel commit:
- # https://android-review.googlesource.com/c/kernel/common/+/1312623
- # ANDROID: namespace'ify tcp_default_init_rwnd implementation
- try:
- init_rwnd_sysctl = open(TCP_DEFAULT_INIT_RWND, "w")
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
- # Note! if the netns open above succeeded (and thus we don't reach here)
- # then we don't need to actually update the sysctl, since we'll be able to do
- # that in the sock_diag_test.py TcpRcvWindowTest test case setUp() call instead.
- #
- # As such this write here is *still* to the root netns sysctl
- # (because we obtained a file descriptor *prior* to unshare/etc...)
- # and handles the case where the sysctl is not namespace aware and thus
- # affects the entire system.
- init_rwnd_sysctl.write("60");
-
print('succeeded.')
def HasEstablishedTcpSessionOnPort(port):
sd = sock_diag.SockDiag()
- sock_id = sd._EmptyInetDiagSockId()
+ sock_id = sd._EmptyInetDiagSockId() # pylint: disable=protected-access
sock_id.sport = port
states = 1 << tcp_test.TCP_ESTABLISHED
- matches = sd.DumpAllInetSockets(socket.IPPROTO_TCP, b"",
+ matches = sd.DumpAllInetSockets(socket.IPPROTO_TCP, b'',
sock_id=sock_id, states=states)
- return len(matches) > 0
+ return True if matches else False
diff --git a/net/test/net_test.py b/net/test/net_test.py
index bbff4e7..cdfdc0f 100644
--- a/net/test/net_test.py
+++ b/net/test/net_test.py
@@ -28,6 +28,7 @@
import binascii
import csocket
+import gki
# TODO: Move these to csocket.py.
SOL_IPV6 = 41
@@ -93,8 +94,36 @@
# Kernel log verbosity levels.
KERN_INFO = 6
+# The following ends up being (VERSION, PATCHLEVEL, SUBLEVEL) from top of kernel's Makefile
LINUX_VERSION = csocket.LinuxVersion()
-LINUX_ANY_VERSION = (0, 0)
+
+LINUX_ANY_VERSION = (0, 0, 0)
+
+# Linus always releases x.y.0-rcZ or x.y.0, any stable (incl. LTS) release will be x.y.1+
+IS_STABLE = (LINUX_VERSION[2] > 0)
+
+# From //system/gsid/libgsi.cpp IsGsiRunning()
+IS_GSI = os.access("/metadata/gsi/dsu/booted", os.F_OK)
+
+# NonGXI() is useful to run tests starting from a specific kernel version,
+# thus allowing one to test for correctly backported fixes,
+# without running the tests on non-updatable kernels (as part of GSI tests).
+#
+# Running vts_net_test on GSI image basically doesn't make sense, since
+# it's not like the unmodified vendor image - including the kernel - can be
+# realistically fixed in such a setup. Particularly problematic is GSI
+# on *older* pixel vendor: newer pixel images will have the fixed kernel,
+# but running newer GSI against ancient vendor will not see those fixes.
+#
+# Normally you'd also want to run on GKI kernels, but older release branches
+# are no longer maintained, so they also need to be excluded.
+# Proper GKI testing will happen on at the tip of the appropriate ACK/GKI branch.
+def NonGXI(major, minor):
+ """Checks the kernel version is >= major.minor, and not GKI or GSI."""
+
+ if IS_GSI or gki.IS_GKI:
+ return False
+ return LINUX_VERSION >= (major, minor, 0)
def KernelAtLeast(versions):
"""Checks the kernel version matches the specified versions.
@@ -402,7 +431,9 @@
def RunIptablesCommand(version, args):
iptables_path = GetIptablesBinaryPath(version)
- return os.spawnvp(os.P_WAIT, iptables_path, [iptables_path] + args.split(" "))
+ return os.spawnvp(
+ os.P_WAIT, iptables_path,
+ [iptables_path, "-w"] + args.split(" "))
# Determine network configuration.
try:
diff --git a/net/test/net_test.sh b/net/test/net_test.sh
index 7185fd5..6893e09 100755
--- a/net/test/net_test.sh
+++ b/net/test/net_test.sh
@@ -165,6 +165,15 @@
# Allow people to run ping.
echo '0 2147483647' > /proc/sys/net/ipv4/ping_group_range
+# Adjust tcp_rmem_default on UML as needed by Linux 6.6
+if [[ -e /proc/exitcode ]]; then
+ # UML with mem=512M defaults to '4096 131072 ~4021664'
+ read tcp_rmem_min tcp_rmem_default tcp_rmem_max < /proc/sys/net/ipv4/tcp_rmem
+ if [[ tcp_rmem_default -lt 262144 ]]; then
+ echo "${tcp_rmem_min} 262144 ${tcp_rmem_max}" > /proc/sys/net/ipv4/tcp_rmem
+ fi
+fi
+
# Allow unprivileged use of eBPF (matches Android OS)
if [[ "$(< /proc/sys/kernel/unprivileged_bpf_disabled)" != '0' ]]; then
echo 0 > /proc/sys/kernel/unprivileged_bpf_disabled
diff --git a/net/test/netlink.py b/net/test/netlink.py
index b5efe11..1a5aefd 100644
--- a/net/test/netlink.py
+++ b/net/test/netlink.py
@@ -94,7 +94,6 @@
@staticmethod
def _GetConstantName(module, value, prefix):
-
def FirstMatching(name, prefixlist):
for prefix in prefixlist:
if name.startswith(prefix):
diff --git a/net/test/pf_key.py b/net/test/pf_key.py
deleted file mode 100755
index ca6689e..0000000
--- a/net/test/pf_key.py
+++ /dev/null
@@ -1,337 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Partial implementation of the PFKEYv2 interface."""
-
-# pylint: disable=g-bad-todo,bad-whitespace
-
-import os
-from socket import * # pylint: disable=wildcard-import
-import sys
-
-import cstruct
-import net_test
-
-
-# AF_KEY socket type. See include/linux/socket.h.
-AF_KEY = 15
-
-# PFKEYv2 constants. See include/uapi/linux/pfkeyv2.h.
-PF_KEY_V2 = 2
-
-# IPsec constants. See include/uapi/linux/ipsec.h.
-IPSEC_MODE_ANY = 0
-IPSEC_MODE_TRANSPORT = 1
-IPSEC_MODE_TUNNEL = 2
-IPSEC_MODE_BEET = 3
-
-# Operation types.
-SADB_ADD = 3
-SADB_DELETE = 4
-SADB_DUMP = 10
-
-# SA types.
-SADB_TYPE_UNSPEC = 0
-SADB_TYPE_AH = 2
-SADB_TYPE_ESP = 3
-
-# SA states.
-SADB_SASTATE_LARVAL = 0
-SADB_SASTATE_MATURE = 1
-SADB_SASTATE_DYING = 2
-SADB_SASTATE_DEAD = 3
-
-# Authentication algorithms.
-SADB_AALG_NONE = 0
-SADB_AALG_MD5HMAC = 2
-SADB_AALG_SHA1HMAC = 3
-SADB_X_AALG_SHA2_256HMAC = 5
-SADB_X_AALG_SHA2_384HMAC = 6
-SADB_X_AALG_SHA2_512HMAC = 7
-SADB_X_AALG_RIPEMD160HMAC = 8
-SADB_X_AALG_AES_XCBC_MAC = 9
-SADB_X_AALG_NULL = 251
-
-# Encryption algorithms.
-SADB_EALG_NONE = 0
-SADB_EALG_DESCBC = 2
-SADB_EALG_3DESCBC = 3
-SADB_X_EALG_CASTCBC = 6
-SADB_X_EALG_BLOWFISHCBC = 7
-SADB_EALG_NULL = 11
-SADB_X_EALG_AESCBC = 12
-SADB_X_EALG_AESCTR = 13
-SADB_X_EALG_AES_CCM_ICV8 = 14
-SADB_X_EALG_AES_CCM_ICV12 = 15
-SADB_X_EALG_AES_CCM_ICV16 = 16
-SADB_X_EALG_AES_GCM_ICV8 = 18
-SADB_X_EALG_AES_GCM_ICV12 = 19
-SADB_X_EALG_AES_GCM_ICV16 = 20
-SADB_X_EALG_CAMELLIACBC = 22
-SADB_X_EALG_NULL_AES_GMAC = 23
-SADB_X_EALG_SERPENTCBC = 252
-SADB_X_EALG_TWOFISHCBC = 253
-
-# Extension Header values.
-SADB_EXT_RESERVED = 0
-SADB_EXT_SA = 1
-SADB_EXT_LIFETIME_CURRENT = 2
-SADB_EXT_LIFETIME_HARD = 3
-SADB_EXT_LIFETIME_SOFT = 4
-SADB_EXT_ADDRESS_SRC = 5
-SADB_EXT_ADDRESS_DST = 6
-SADB_EXT_ADDRESS_PROXY = 7
-SADB_EXT_KEY_AUTH = 8
-SADB_EXT_KEY_ENCRYPT = 9
-SADB_EXT_IDENTITY_SRC = 10
-SADB_EXT_IDENTITY_DST = 11
-SADB_EXT_SENSITIVITY = 12
-SADB_EXT_PROPOSAL = 13
-SADB_EXT_SUPPORTED_AUTH = 14
-SADB_EXT_SUPPORTED_ENCRYPT = 15
-SADB_EXT_SPIRANGE = 16
-SADB_X_EXT_KMPRIVATE = 17
-SADB_X_EXT_POLICY = 18
-SADB_X_EXT_SA2 = 19
-SADB_X_EXT_NAT_T_TYPE = 20
-SADB_X_EXT_NAT_T_SPORT = 21
-SADB_X_EXT_NAT_T_DPORT = 22
-SADB_X_EXT_NAT_T_OA = 23
-SADB_X_EXT_SEC_CTX = 24
-SADB_X_EXT_KMADDRESS = 25
-SADB_X_EXT_FILTER = 26
-
-# Data structure formats.
-# These aren't constants, they're classes. So, pylint: disable=invalid-name
-SadbMsg = cstruct.Struct(
- "SadbMsg", "=BBBBHHII", "version type errno satype len reserved seq pid")
-
-# Fake struct containing the common beginning of all extension structs.
-SadbExt = cstruct.Struct("SadbExt", "=HH", "len exttype")
-
-SadbSa = cstruct.Struct(
- "SadbSa", "=IBBBBI", "spi replay state auth encrypt flags")
-
-SadbLifetime = cstruct.Struct(
- "SadbLifetime", "=IQQQ", "allocations bytes addtime usetime")
-
-SadbAddress = cstruct.Struct("SadbAddress", "=BB2x", "proto prefixlen")
-
-SadbKey = cstruct.Struct("SadbKey", "=H2x", "bits")
-
-SadbXSa2 = cstruct.Struct("SadbXSa2", "=B3xII", "mode sequence reqid")
-
-SadbXNatTType = cstruct.Struct("SadbXNatTType", "=B3x", "type")
-
-SadbXNatTPort = cstruct.Struct("SadbXNatTPort", "!H2x", "port")
-
-
-def _GetConstantName(value, prefix):
- """Translates a number to a constant of the same value in this file."""
- thismodule = sys.modules[__name__]
- # Match shorter constant names first. This allows us to match SADB_DUMP and
- # instead of, say, SADB_EXT_LIFETIME_HARD if we pass in a prefix of "SADB_"
- # and a value of 3, and match SADB_EXT_LIFETIME_HARD just by specifying
- # a longer prefix.
- for name in sorted(dir(thismodule), key=len):
- if (name.startswith(prefix) and
- name.isupper() and getattr(thismodule, name) == value):
- return name
- return value
-
-
-def _GetMultiConstantName(value, prefixes):
- for prefix in prefixes:
- name = _GetConstantName(value, prefix)
- try:
- int(name)
- continue
- except ValueError:
- return name
-
-
-# Converts extension blobs to a (name, struct, attrs) tuple.
-def ParseExtension(exttype, data):
- struct_type = None
- if exttype == SADB_EXT_SA:
- struct_type = SadbSa
- elif exttype in [SADB_EXT_LIFETIME_CURRENT, SADB_EXT_LIFETIME_HARD,
- SADB_EXT_LIFETIME_SOFT]:
- struct_type = SadbLifetime
- elif exttype in [SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST,
- SADB_EXT_ADDRESS_PROXY]:
- struct_type = SadbAddress
- elif exttype in [SADB_EXT_KEY_AUTH, SADB_EXT_KEY_ENCRYPT]:
- struct_type = SadbKey
- elif exttype == SADB_X_EXT_SA2:
- struct_type = SadbXSa2
- elif exttype == SADB_X_EXT_NAT_T_TYPE:
- struct_type = SadbXNatTType
- elif exttype in [SADB_X_EXT_NAT_T_SPORT, SADB_X_EXT_NAT_T_DPORT]:
- struct_type = SadbXNatTPort
-
- if struct_type:
- ext, attrs = cstruct.Read(data, struct_type)
- else:
- ext, attrs = data, b""
-
- return exttype, ext, attrs
-
-
-class PfKey(object):
-
- """PF_KEY interface to kernel IPsec implementation."""
-
- def __init__(self):
- self.sock = socket(AF_KEY, SOCK_RAW, PF_KEY_V2)
- net_test.SetNonBlocking(self.sock)
- self.seq = 0
-
- def close(self):
- self.sock.close()
- self.sock = None
-
- def __del__(self):
- if self.sock: self.close()
-
- def Recv(self):
- reply = self.sock.recv(4096)
- msg = SadbMsg(reply)
- # print("RECV: " + self.DecodeSadbMsg(msg))
- if msg.errno != 0:
- raise OSError(msg.errno, os.strerror(msg.errno))
- return reply
-
- def SendAndRecv(self, msg, extensions):
- self.seq += 1
- msg.seq = self.seq
- msg.pid = os.getpid()
- msg.len = (len(SadbMsg) + len(extensions)) // 8
- self.sock.send(msg.Pack() + extensions)
- # print("SEND: " + self.DecodeSadbMsg(msg))
- return self.Recv()
-
- def PackPfKeyExtensions(self, extlist):
- extensions = b""
- for exttype, extstruct, attrs in extlist:
- extdata = extstruct.Pack()
- ext = SadbExt(((len(extdata) + len(SadbExt) + len(attrs)) // 8, exttype))
- extensions += ext.Pack() + extdata + attrs
- return extensions
-
- def MakeSadbMsg(self, msgtype, satype):
- # errno is 0. seq, pid and len are filled in by SendAndRecv().
- return SadbMsg((PF_KEY_V2, msgtype, 0, satype, 0, 0, 0, 0))
-
- def MakeSadbExtAddr(self, exttype, addr):
- prefixlen = {AF_INET: 32, AF_INET6: 128}[addr.family]
- packed = addr.Pack()
- padbytes = (len(SadbExt) + len(SadbAddress) + len(packed)) % 8
- packed += b"\x00" * padbytes
- return (exttype, SadbAddress((0, prefixlen)), packed)
-
- def AddSa(self, src, dst, spi, satype, mode, reqid, encryption,
- encryption_key, auth, auth_key):
- """Adds a security association."""
- msg = self.MakeSadbMsg(SADB_ADD, satype)
- replay = 4
- extlist = [
- (SADB_EXT_SA, SadbSa((htonl(spi), replay, SADB_SASTATE_MATURE,
- auth, encryption, 0)), b""),
- self.MakeSadbExtAddr(SADB_EXT_ADDRESS_SRC, src),
- self.MakeSadbExtAddr(SADB_EXT_ADDRESS_DST, dst),
- (SADB_X_EXT_SA2, SadbXSa2((mode, 0, reqid)), b""),
- (SADB_EXT_KEY_AUTH, SadbKey((len(auth_key) * 8,)), auth_key),
- (SADB_EXT_KEY_ENCRYPT, SadbKey((len(encryption_key) * 8,)),
- encryption_key)
- ]
- self.SendAndRecv(msg, self.PackPfKeyExtensions(extlist))
-
- def DelSa(self, src, dst, spi, satype):
- """Deletes a security association."""
- msg = self.MakeSadbMsg(SADB_DELETE, satype)
- extlist = [
- (SADB_EXT_SA, SadbSa((htonl(spi), 4, SADB_SASTATE_MATURE,
- 0, 0, 0)), b""),
- self.MakeSadbExtAddr(SADB_EXT_ADDRESS_SRC, src),
- self.MakeSadbExtAddr(SADB_EXT_ADDRESS_DST, dst),
- ]
- self.SendAndRecv(msg, self.PackPfKeyExtensions(extlist))
-
- @staticmethod
- def DecodeSadbMsg(msg):
- msgtype = _GetConstantName(msg.type, "SADB_")
- satype = _GetConstantName(msg.satype, "SADB_TYPE_")
- return ("SadbMsg(version=%d, type=%s, errno=%d, satype=%s, "
- "len=%d, reserved=%d, seq=%d, pid=%d)" % (
- msg.version, msgtype, msg.errno, satype, msg.len,
- msg.reserved, msg.seq, msg.pid))
-
- @staticmethod
- def DecodeSadbSa(sa):
- state = _GetConstantName(sa.state, "SADB_SASTATE_")
- auth = _GetMultiConstantName(sa.auth, ["SADB_AALG_", "SADB_X_AALG"])
- encrypt = _GetMultiConstantName(sa.encrypt, ["SADB_EALG_",
- "SADB_X_EALG_"])
- return ("SadbSa(spi=%x, replay=%d, state=%s, "
- "auth=%s, encrypt=%s, flags=%x)" % (
- sa.spi, sa.replay, state, auth, encrypt, sa.flags))
-
- @staticmethod
- def ExtensionsLength(msg, struct_type):
- return (msg.len * 8) - len(struct_type)
-
- @staticmethod
- def ParseExtensions(data):
- """Parses the extensions in a SADB message."""
- extensions = []
- while data:
- ext, data = cstruct.Read(data, SadbExt)
- datalen = PfKey.ExtensionsLength(ext, SadbExt)
- extdata, data = data[:datalen], data[datalen:]
- extensions.append(ParseExtension(ext.exttype, extdata))
- return extensions
-
- def DumpSaInfo(self):
- """Returns a list of (SadbMsg, [(extension, attr), ...], ...) tuples."""
- dump = []
- msg = self.MakeSadbMsg(SADB_DUMP, SADB_TYPE_UNSPEC)
- received = self.SendAndRecv(msg, b"")
- while received:
- msg, data = cstruct.Read(received, SadbMsg)
- extlen = self.ExtensionsLength(msg, SadbMsg)
- extensions, data = data[:extlen], data[extlen:]
- dump.append((msg, self.ParseExtensions(extensions)))
- if msg.seq == 0: # End of dump.
- break
- received = self.Recv()
- return dump
-
- def PrintSaInfos(self, dump):
- for msg, extensions in dump:
- print(self.DecodeSadbMsg(msg))
- for exttype, ext, attrs in extensions:
- exttype = _GetMultiConstantName(exttype, ["SADB_EXT", "SADB_X_EXT"])
- if exttype == SADB_EXT_SA:
- print(" %s %s %s" %
- (exttype, self.DecodeSadbSa(ext), attrs.encode("hex")))
- print(" %s %s %s" % (exttype, ext, attrs.encode("hex")))
- print("")
-
-
-if __name__ == "__main__":
- p = PfKey()
- p.DumpSaInfo()
diff --git a/net/test/pf_key_test.py b/net/test/pf_key_test.py
deleted file mode 100755
index 7791bd1..0000000
--- a/net/test/pf_key_test.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# pylint: disable=g-bad-todo,g-bad-file-header,wildcard-import
-from socket import *
-import unittest
-
-import binascii
-import csocket
-import pf_key
-import xfrm
-
-ENCRYPTION_KEY = binascii.unhexlify("308146eb3bd84b044573d60f5a5fd159"
- "57c7d4fe567a2120f35bae0f9869ec22")
-AUTH_KEY = binascii.unhexlify("af442892cdcd0ef650e9c299f9a8436a")
-
-
-class PfKeyTest(unittest.TestCase):
-
- def setUp(self):
- self.pf_key = pf_key.PfKey()
- self.xfrm = xfrm.Xfrm()
-
- def tearDown(self):
- self.pf_key.close()
- self.pf_key = None
-
- def testAddDelSa(self):
- src4 = csocket.Sockaddr(("192.0.2.1", 0))
- dst4 = csocket.Sockaddr(("192.0.2.2", 1))
- self.pf_key.AddSa(src4, dst4, 0xdeadbeef, pf_key.SADB_TYPE_ESP,
- pf_key.IPSEC_MODE_TRANSPORT, 54321,
- pf_key.SADB_X_EALG_AESCBC, ENCRYPTION_KEY,
- pf_key.SADB_X_AALG_SHA2_256HMAC, ENCRYPTION_KEY)
-
- src6 = csocket.Sockaddr(("2001:db8::1", 0))
- dst6 = csocket.Sockaddr(("2001:db8::2", 0))
- self.pf_key.AddSa(src6, dst6, 0xbeefdead, pf_key.SADB_TYPE_ESP,
- pf_key.IPSEC_MODE_TRANSPORT, 12345,
- pf_key.SADB_X_EALG_AESCBC, ENCRYPTION_KEY,
- pf_key.SADB_X_AALG_SHA2_256HMAC, ENCRYPTION_KEY)
-
- sainfos = self.xfrm.DumpSaInfo()
- self.assertEqual(2, len(sainfos))
- state4, attrs4 = [(s, a) for s, a in sainfos if s.family == AF_INET][0]
- state6, attrs6 = [(s, a) for s, a in sainfos if s.family == AF_INET6][0]
-
- pfkey_sainfos = self.pf_key.DumpSaInfo()
- self.assertEqual(2, len(pfkey_sainfos))
- self.assertTrue(all(msg.satype == pf_key.SDB_TYPE_ESP)
- for msg, _ in pfkey_sainfos)
-
- self.assertEqual(xfrm.IPPROTO_ESP, state4.id.proto)
- self.assertEqual(xfrm.IPPROTO_ESP, state6.id.proto)
- self.assertEqual(54321, state4.reqid)
- self.assertEqual(12345, state6.reqid)
- self.assertEqual(0xdeadbeef, state4.id.spi)
- self.assertEqual(0xbeefdead, state6.id.spi)
-
- self.assertEqual(xfrm.PaddedAddress("192.0.2.1"), state4.saddr)
- self.assertEqual(xfrm.PaddedAddress("192.0.2.2"), state4.id.daddr)
- self.assertEqual(xfrm.PaddedAddress("2001:db8::1"), state6.saddr)
- self.assertEqual(xfrm.PaddedAddress("2001:db8::2"), state6.id.daddr)
-
- # The algorithm names are null-terminated, but after that contain garbage.
- # Kernel bug?
- aes_name = b"cbc(aes)\x00"
- sha256_name = b"hmac(sha256)\x00"
- self.assertTrue(attrs4["XFRMA_ALG_CRYPT"].name.startswith(aes_name))
- self.assertTrue(attrs6["XFRMA_ALG_CRYPT"].name.startswith(aes_name))
- self.assertTrue(attrs4["XFRMA_ALG_AUTH"].name.startswith(sha256_name))
- self.assertTrue(attrs6["XFRMA_ALG_AUTH"].name.startswith(sha256_name))
-
- self.assertEqual(256, attrs4["XFRMA_ALG_CRYPT"].key_len)
- self.assertEqual(256, attrs6["XFRMA_ALG_CRYPT"].key_len)
- self.assertEqual(256, attrs4["XFRMA_ALG_AUTH"].key_len)
- self.assertEqual(256, attrs6["XFRMA_ALG_AUTH"].key_len)
- self.assertEqual(256, attrs4["XFRMA_ALG_AUTH_TRUNC"].key_len)
- self.assertEqual(256, attrs6["XFRMA_ALG_AUTH_TRUNC"].key_len)
-
- if attrs4["XFRMA_ALG_AUTH_TRUNC"].trunc_len == 96:
- missing4 = True
- else:
- self.assertEqual(128, attrs4["XFRMA_ALG_AUTH_TRUNC"].trunc_len)
- missing4 = False
-
- if attrs6["XFRMA_ALG_AUTH_TRUNC"].trunc_len == 96:
- missing6 = True
- else:
- self.assertEqual(128, attrs6["XFRMA_ALG_AUTH_TRUNC"].trunc_len)
- missing6 = False
-
- self.pf_key.DelSa(src4, dst4, 0xdeadbeef, pf_key.SADB_TYPE_ESP)
- self.assertEqual(1, len(self.xfrm.DumpSaInfo()))
- self.pf_key.DelSa(src6, dst6, 0xbeefdead, pf_key.SADB_TYPE_ESP)
- self.assertEqual(0, len(self.xfrm.DumpSaInfo()))
-
- if missing4 or missing6:
- self.assertFalse("missing b8a72fd7c4e9 ANDROID: net: xfrm: make PF_KEY SHA256 use RFC-compliant truncation.")
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/net/test/ping6_test.py b/net/test/ping6_test.py
index af2e4c5..59021e2 100755
--- a/net/test/ping6_test.py
+++ b/net/test/ping6_test.py
@@ -21,18 +21,17 @@
import os
import posix
import random
-from socket import * # pylint: disable=wildcard-import
+from socket import * # pylint: disable=g-importing-member,wildcard-import
import struct
import sys
import threading
import time
import unittest
-from scapy import all as scapy
-
import csocket
import multinetwork_base
import net_test
+from scapy import all as scapy
ICMP_ECHO = 8
@@ -116,12 +115,12 @@
packet)
def SendPacketTooBig(self, packet):
- src = packet.getlayer(scapy.IPv6).src
- datalen = IPV6_MIN_MTU - ICMPV6_HEADER_LEN
- self.SendPacket(
- scapy.IPv6(src=self.INTERMEDIATE_IPV6, dst=src) /
- scapy.ICMPv6PacketTooBig(mtu=self.LINK_MTU) /
- bytes(packet)[:datalen])
+ src = packet.getlayer(scapy.IPv6).src
+ datalen = IPV6_MIN_MTU - ICMPV6_HEADER_LEN
+ self.SendPacket(
+ scapy.IPv6(src=self.INTERMEDIATE_IPV6, dst=src) /
+ scapy.ICMPv6PacketTooBig(mtu=self.LINK_MTU) /
+ bytes(packet)[:datalen])
def IPv4Packet(self, ip):
icmp = ip.getlayer(scapy.ICMP)
@@ -184,7 +183,7 @@
packet = scapy.Ether(src=self._routermac, dst=self._mymac) / packet
try:
posix.write(self._tun.fileno(), bytes(packet))
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-exception-caught
if not self._stopped_flag:
raise e
@@ -217,15 +216,15 @@
# that would cause tearDownClass not to be called and thus not clean up
# routing configuration, breaking subsequent tests. Instead, just let these
# tests fail.
- _INTERVAL = 0.1
- _ATTEMPTS = 20
- for i in range(0, _ATTEMPTS):
- for netid in cls.NETIDS:
- if all(thread.IsStarted() for thread in list(cls.reply_threads.values())):
+ interval = 0.1
+ attempts = 20
+ for _ in range(attempts):
+ for _ in cls.NETIDS:
+ if all(thrd.IsStarted() for thrd in list(cls.reply_threads.values())):
return
- time.sleep(_INTERVAL)
+ time.sleep(interval)
msg = "WARNING: reply threads not all started after %.1f seconds\n" % (
- _ATTEMPTS * _INTERVAL)
+ attempts * interval)
sys.stderr.write(msg)
@classmethod
@@ -239,10 +238,10 @@
cls.reply_threads = {}
for netid in cls.NETIDS:
cls.reply_threads[netid] = PingReplyThread(
- cls.tuns[netid],
- cls.MyMacAddress(netid),
- cls.RouterMacAddress(netid),
- cls._RouterAddress(netid, 6))
+ cls.tuns[netid],
+ cls.MyMacAddress(netid),
+ cls.RouterMacAddress(netid),
+ cls._RouterAddress(netid, 6))
cls.reply_threads[netid].start()
cls.WaitForReplyThreads()
cls.netid = random.choice(cls.NETIDS)
@@ -255,6 +254,7 @@
super(Ping6Test, cls).tearDownClass()
def setUp(self):
+ super(Ping6Test, self).setUp()
self.ifname = self.GetInterfaceName(self.netid)
self.ifindex = self.ifindices[self.netid]
self.lladdr = net_test.GetLinkAddress(self.ifname, True)
@@ -294,7 +294,7 @@
# Check that the flow label is zero and that the scope ID is sane.
self.assertEqual(flowlabel, 0)
if addr.startswith("fe80::"):
- self.assertTrue(scope_id in list(self.ifindices.values()))
+ self.assertIn(scope_id, list(self.ifindices.values()))
else:
self.assertEqual(0, scope_id)
@@ -326,7 +326,7 @@
# Check all the parameters except rxmem and txmem.
expected[3] = actual[3]
- # also do not check ref, it's always 2 on older kernels, but 1 for 'raw6' on 6.0+
+ # Don't check ref, it's always 2 on old kernels, but 1 for 'raw6' on 6.0+
expected[5] = actual[5]
if expected == actual:
return
@@ -735,7 +735,7 @@
# that is not registered with the flow manager should return EINVAL...
s.setsockopt(net_test.SOL_IPV6, net_test.IPV6_FLOWINFO_SEND, 1)
# ... but this doesn't work yet.
- if False:
+ if False: # pylint: disable=using-constant-test
self.assertRaisesErrno(errno.EINVAL, s.sendto, net_test.IPV6_PING,
(net_test.IPV6_ADDR, 93, 0xdead, 0))
diff --git a/net/test/removed_feature_test.py b/net/test/removed_feature_test.py
deleted file mode 100755
index d47824b..0000000
--- a/net/test/removed_feature_test.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import errno
-from socket import * # pylint: disable=wildcard-import
-import unittest
-
-import gzip
-import net_test
-
-
-class RemovedFeatureTest(net_test.NetworkTest):
- KCONFIG = None
-
- @classmethod
- def loadKernelConfig(cls):
- cls.KCONFIG = {}
- with gzip.open("/proc/config.gz", mode="rt") as f:
- for line in f:
- line = line.strip()
- parts = line.split("=")
- if (len(parts) == 2):
- # Lines of the form:
- # CONFIG_FOO=y
- cls.KCONFIG[parts[0]] = parts[1]
-
- @classmethod
- def setUpClass(cls):
- cls.loadKernelConfig()
-
- def assertFeatureEnabled(self, featureName):
- return self.assertEqual("y", self.KCONFIG[featureName])
-
- def assertFeatureAbsent(self, featureName):
- return self.assertTrue(featureName not in self.KCONFIG)
-
- def testNetfilterRejectWithSocketError(self):
- """Verify that the CONFIG_IP{,6}_NF_TARGET_REJECT_SKERR option is gone.
-
- The commits to be reverted include:
-
- android-3.10: 6f489c42
- angler: 6f489c42
- bullhead: 6f489c42
- shamu: 6f489c42
- flounder: 6f489c42
-
- See b/28424847 and b/28719525 for more context.
- """
- self.assertFeatureEnabled("CONFIG_IP_NF_FILTER")
- self.assertFeatureEnabled("CONFIG_IP_NF_TARGET_REJECT")
- self.assertFeatureAbsent("CONFIG_IP_NF_TARGET_REJECT_SKERR")
-
- self.assertFeatureEnabled("CONFIG_IP6_NF_FILTER")
- self.assertFeatureEnabled("CONFIG_IP6_NF_TARGET_REJECT")
- self.assertFeatureAbsent("CONFIG_IP6_NF_TARGET_REJECT_SKERR")
-
- def testRemovedAndroidParanoidNetwork(self):
- """Verify that ANDROID_PARANOID_NETWORK is gone.
-
- On a 4.14-q kernel you can achieve this by simply
- changing the ANDROID_PARANOID_NETWORK default y to n
- in your kernel source code in net/Kconfig:
-
- @@ -94,3 +94,3 @@ endif # if INET
- config ANDROID_PARANOID_NETWORK
- bool "Only allow certain groups to create sockets"
- - default y
- + default n
- """
- AID_NET_RAW = 3004
- with net_test.RunAsUidGid(12345, AID_NET_RAW):
- self.assertRaisesErrno(errno.EPERM, socket, AF_PACKET, SOCK_RAW, 0)
-
- def testRemovedQtaguid(self):
- self.assertRaisesErrno(errno.ENOENT, open, "/proc/net/xt_qtaguid")
-
- def testRemovedTcpMemSysctls(self):
- self.assertRaisesErrno(errno.ENOENT, open, "/sys/kernel/ipv4/tcp_rmem_def")
- self.assertRaisesErrno(errno.ENOENT, open, "/sys/kernel/ipv4/tcp_rmem_max")
- self.assertRaisesErrno(errno.ENOENT, open, "/sys/kernel/ipv4/tcp_rmem_min")
- self.assertRaisesErrno(errno.ENOENT, open, "/sys/kernel/ipv4/tcp_wmem_def")
- self.assertRaisesErrno(errno.ENOENT, open, "/sys/kernel/ipv4/tcp_wmem_max")
- self.assertRaisesErrno(errno.ENOENT, open, "/sys/kernel/ipv4/tcp_wmem_min")
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/net/test/rootfs/bullseye-server.sh b/net/test/rootfs/bullseye-server.sh
index c5343de..61c046d 100755
--- a/net/test/rootfs/bullseye-server.sh
+++ b/net/test/rootfs/bullseye-server.sh
@@ -55,6 +55,14 @@
apt-get install -y -t bullseye-backports ${package}
done
+# Install AOSP customized kernel package and extra Debian packages
+if [ -e /root/extradeb.tar.gz ]; then
+ tar --one-top-level=/root/extra_deb -zxvf /root/extradeb.tar.gz
+ apt -o APT::Color=0 -o DPkgPM::Progress-Fancy=0 install /root/extra_deb/*.deb
+ rm -rf /root/extra_deb
+ rm -f /root/extradeb.tar.gz
+fi
+
# Install firmware package for AMD graphics
apt-get install -y firmware-amd-graphics
diff --git a/net/test/rootfs/bullseye.list b/net/test/rootfs/bullseye.list
index 7ef07b3..266f214 100644
--- a/net/test/rootfs/bullseye.list
+++ b/net/test/rootfs/bullseye.list
@@ -1,8 +1,10 @@
+anacron
apt
apt-utils
bash-completion
bsdmainutils
ca-certificates
+cron
e2fsprogs
file
gpgv
diff --git a/net/test/rootfs/common.sh b/net/test/rootfs/common.sh
index 211c6f8..6bb71b1 100644
--- a/net/test/rootfs/common.sh
+++ b/net/test/rootfs/common.sh
@@ -125,7 +125,10 @@
if [[ "${install_grub}" = "1" ]]; then
# Mount fstab entry added by stage2
- mount /boot/efi
+ findmnt /boot/efi > /dev/null 2>&1
+ if [ $? != 0 ]; then
+ mount /boot/efi
+ fi
# Install GRUB EFI (removable, for Cloud)
apt-get install -y grub-efi
@@ -171,6 +174,7 @@
# Miscellaneous cleanup
rm -rf /var/lib/apt/lists/* || true
rm -f /root/* || true
+ rm -f /etc/cron.d/cron-run-installer-script || true
apt-get clean
echo 0 >"${exitcode}"
diff --git a/net/test/rootfs/cron-run-installer-script b/net/test/rootfs/cron-run-installer-script
new file mode 100644
index 0000000..243223d
--- /dev/null
+++ b/net/test/rootfs/cron-run-installer-script
@@ -0,0 +1,3 @@
+SHELL=/bin/bash
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+@reboot root /root/cron-run-installer-script.sh
diff --git a/net/test/rootfs/cron-run-installer-script.sh b/net/test/rootfs/cron-run-installer-script.sh
new file mode 100755
index 0000000..48ca166
--- /dev/null
+++ b/net/test/rootfs/cron-run-installer-script.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Wait for Login Prompt fully active.
+# Otherwise the output are mixed together.
+while true; do
+ systemctl is-active --quiet multi-user.target
+ if [ $? -eq 0 ]; then
+ break
+ fi
+ sleep 2
+done
+sleep 10
+
+# Parsing /proc/cmdline and export all the variables
+PARAMS=""
+if [ -e /proc/cmdline ]; then
+ PARAMS=$(cat /proc/cmdline)
+fi
+
+for i in ${PARAMS}
+do
+ export ${i}
+done
+
+# Log output for qemu serial.
+LOG_FILE=/dev/null
+if [ x"${console}" != x"" ]; then
+ if [ -e /dev/${console} ]; then
+ LOG_FILE=/dev/${console}
+ fi
+fi
+
+# Run the script
+cd /
+if [ x"${installer_script}" = x"" ]; then
+ exit
+fi
+if [ ! -x "${installer_script}" ]; then
+ exit
+fi
+
+${installer_script} > "${LOG_FILE}" 2>&1
+
+# shutdown the machine.
+shutdown -h 1
diff --git a/net/test/run_net_test.sh b/net/test/run_net_test.sh
index 8d44cf3..8635eab 100755
--- a/net/test/run_net_test.sh
+++ b/net/test/run_net_test.sh
@@ -88,9 +88,6 @@
# These two break the flo kernel due to differences in -Werror on recent GCC.
DISABLE_OPTIONS=" REISERFS_FS ANDROID_PMEM"
-# Disable frame size warning on arm64. GCC 10 generates >1k stack frames.
-DISABLE_OPTIONS="$DISABLE_OPTIONS FRAME_WARN"
-
# How many TAP interfaces to create to provide the VM with real network access
# via the host. This requires privileges (e.g., root access) on the host.
#
@@ -128,6 +125,11 @@
nobuild=0
norun=0
+if [[ ! -f "${KERNEL_DIR}/Makefile" ]]; then
+ echo "No kernel Makefile found. Are you running this from a kernel directory?"
+ exit 1
+fi
+
KVER_MAJOR="$(sed -rn 's@^ *VERSION *= *([0-9]+)$@\1@p' < "${KERNEL_DIR}/Makefile")"
KVER_MINOR="$(sed -rn 's@^ *PATCHLEVEL *= *([0-9]+)$@\1@p' < "${KERNEL_DIR}/Makefile")"
KVER_LEVEL="$(sed -rn 's@^ *SUBLEVEL *= *([0-9]+)$@\1@p' < "${KERNEL_DIR}/Makefile")"
@@ -303,6 +305,9 @@
# Enable the kernel config options listed in $OPTIONS.
$CONFIG_SCRIPT --file $CONFIG_FILE ${OPTIONS// / -e }
+ # Increase acceptable frame size.
+ $CONFIG_SCRIPT --file $CONFIG_FILE --set-val FRAME_WARN 3172
+
# Disable the kernel config options listed in $DISABLE_OPTIONS.
$CONFIG_SCRIPT --file $CONFIG_FILE ${DISABLE_OPTIONS// / -d }
diff --git a/net/test/sock_diag_test.py b/net/test/sock_diag_test.py
index aa14343..58e8f01 100755
--- a/net/test/sock_diag_test.py
+++ b/net/test/sock_diag_test.py
@@ -38,7 +38,6 @@
NUM_SOCKETS = 30
NO_BYTECODE = b""
-LINUX_4_19_OR_ABOVE = net_test.LINUX_VERSION >= (4, 19, 0)
IPPROTO_SCTP = 132
@@ -180,6 +179,9 @@
self.sock_diag.GetSockInfo(diag_req)
# No errors? Good.
+ for sock in socketpair:
+ sock.close()
+
def CheckFindsAllMySockets(self, socktype, proto):
"""Tests that basic socket dumping works."""
self.socketpairs = self._CreateLotsOfSockets(socktype)
@@ -221,6 +223,10 @@
info = self.sock_diag.GetSockInfo(req)
self.assertSockInfoMatchesSocket(sock, info)
+ for socketpair in socketpairs:
+ for sock in socketpair:
+ sock.close()
+
def assertItemsEqual(self, expected, actual):
try:
super(SockDiagTest, self).assertItemsEqual(expected, actual)
@@ -333,6 +339,15 @@
self.assertTrue(all(d in v4socks for d in diag_msgs))
self.assertTrue(all(d in v6socks for d in diag_msgs))
+ for sock in unused_pair4:
+ sock.close()
+
+ for sock in unused_pair6:
+ sock.close()
+
+ for sock in pair5:
+ sock.close()
+
def testPortComparisonValidation(self):
"""Checks for a bug in validating port comparison bytecode.
@@ -366,6 +381,9 @@
cookie = sock.getsockopt(net_test.SOL_SOCKET, net_test.SO_COOKIE, 8)
self.assertEqual(diag_msg.id.cookie, cookie)
+ for sock in socketpair:
+ sock.close()
+
def testGetsockoptcookie(self):
self.CheckSocketCookie(AF_INET, "127.0.0.1")
self.CheckSocketCookie(AF_INET6, "::1")
@@ -400,6 +418,8 @@
self.assertSockInfoMatchesSocket(s, self.sock_diag.GetSockInfo(req))
+ s.close()
+
class SockDestroyTest(SockDiagBaseTest):
"""Tests that SOCK_DESTROY works correctly.
@@ -524,34 +544,12 @@
class TcpRcvWindowTest(tcp_test.TcpBaseTest, SockDiagBaseTest):
- RWND_SIZE = 64000 if LINUX_4_19_OR_ABOVE else 42000
+ RWND_SIZE = 64000
TCP_DEFAULT_INIT_RWND = "/proc/sys/net/ipv4/tcp_default_init_rwnd"
def setUp(self):
super(TcpRcvWindowTest, self).setUp()
- if LINUX_4_19_OR_ABOVE:
- self.assertRaisesErrno(ENOENT, open, self.TCP_DEFAULT_INIT_RWND, "w")
- return
-
- try:
- f = open(self.TCP_DEFAULT_INIT_RWND, "w")
- except IOError as e:
- # sysctl was namespace-ified on May 25, 2020 in android-4.14-stable [R]
- # just after 4.14.181 by:
- # https://android-review.googlesource.com/c/kernel/common/+/1312623
- # ANDROID: namespace'ify tcp_default_init_rwnd implementation
- # But that commit might be missing in Q era kernels even when > 4.14.181
- # when running T vts.
- if net_test.LINUX_VERSION >= (4, 15, 0):
- raise
- if e.errno != ENOENT:
- raise
- # we rely on the network namespace creation code
- # modifying the root netns sysctl before the namespace is even created
- return
-
- f.write("60")
- f.close()
+ self.assertRaisesErrno(ENOENT, open, self.TCP_DEFAULT_INIT_RWND, "w")
def checkInitRwndSize(self, version, netid):
self.IncomingConnection(version, tcp_test.TCP_ESTABLISHED, netid)
@@ -561,6 +559,7 @@
"Tcp rwnd of netid=%d, version=%d is not enough. "
"Expect: %d, actual: %d" % (netid, version, self.RWND_SIZE,
tcpInfo.tcpi_rcv_ssthresh))
+ self.CloseSockets()
def checkSynPacketWindowSize(self, version, netid):
s = self.BuildSocket(version, net_test.TCPSocket, netid, "mark")
@@ -618,6 +617,7 @@
if state != tcp_test.TCP_LISTEN:
msg = "Closing accepted IPv%d %s socket" % (version, statename)
self.CheckRstOnClose(self.accepted, None, True, msg)
+ self.CloseSockets()
def testTcpResets(self):
"""Checks that closing sockets in appropriate states sends a RST."""
@@ -636,6 +636,7 @@
# Close the socket and check that it goes into FIN_WAIT1 and sends a FIN.
net_test.EnableFinWait(self.accepted)
self.accepted.close()
+ del self.accepted
diag_req.states = 1 << tcp_test.TCP_FIN_WAIT1
diag_msg, attrs = self.sock_diag.GetSockInfo(diag_req)
self.assertEqual(tcp_test.TCP_FIN_WAIT1, diag_msg.state)
@@ -663,6 +664,8 @@
for diag_msg, attrs in infos),
"Expected to find FIN_WAIT2 socket in %s" % infos)
+ self.CloseSockets()
+
def FindChildSockets(self, s):
"""Finds the SYN_RECV child sockets of a given listening socket."""
d = self.sock_diag.FindSockDiagFromFd(self.s)
@@ -726,11 +729,11 @@
else:
CloseChildren()
CheckChildrenClosed()
- self.s.close()
else:
CloseChildren()
CloseParent(False)
- self.s.close()
+
+ self.CloseSockets()
def testChildSockets(self):
for version in [4, 5, 6]:
@@ -749,6 +752,7 @@
self.assertRaisesErrno(EINVAL, self.s.accept)
# TODO: this should really return an error such as ENOTCONN...
self.assertEqual(b"", self.s.recv(4096))
+ self.CloseSockets()
def testReadInterrupted(self):
"""Tests that read() is interrupted by SOCK_DESTROY."""
@@ -760,6 +764,7 @@
self.assertRaisesErrno(EPIPE, self.accepted.send, b"foo")
self.assertEqual(b"", self.accepted.recv(4096))
self.assertEqual(b"", self.accepted.recv(4096))
+ self.CloseSockets()
def testConnectInterrupted(self):
"""Tests that connect() is interrupted by SOCK_DESTROY."""
@@ -779,6 +784,7 @@
self.ExpectPacketOn(self.netid, desc, syn)
msg = "SOCK_DESTROY of socket in connect, expected no RST"
self.ExpectNoPacketsOn(self.netid, msg)
+ s.close()
class PollOnCloseTest(tcp_test.TcpBaseTest, SockDiagBaseTest):
@@ -840,6 +846,7 @@
lambda sock: self.BlockingPoll(sock, mask, expected, ignoremask),
None)
self.assertSocketErrors(ECONNABORTED)
+ self.CloseSockets()
def CheckPollRst(self, mask, expected, ignoremask):
"""Interrupts a poll() by receiving a TCP RST."""
@@ -850,6 +857,7 @@
lambda sock: self.BlockingPoll(sock, mask, expected, ignoremask),
None)
self.assertSocketErrors(ECONNRESET)
+ self.CloseSockets()
def testReadPollRst(self):
self.CheckPollRst(select.POLLIN, self.POLLIN_ERR_HUP, 0)
@@ -922,6 +930,7 @@
s.connect((dst, 53))
self.sock_diag.CloseSocketFromFd(s)
self.assertEqual((unspec, 0), s.getsockname()[:2])
+ s.close()
# Closing a socket bound to an IP address leaves the address as is.
s = self.BuildSocket(version, net_test.UDPSocket, netid, "mark")
@@ -931,6 +940,7 @@
port = s.getsockname()[1]
self.sock_diag.CloseSocketFromFd(s)
self.assertEqual((src, 0), s.getsockname()[:2])
+ s.close()
# Closing a socket bound to a port leaves the port as is.
s = self.BuildSocket(version, net_test.UDPSocket, netid, "mark")
@@ -938,6 +948,7 @@
s.connect((dst, 53))
self.sock_diag.CloseSocketFromFd(s)
self.assertEqual((unspec, port), s.getsockname()[:2])
+ s.close()
# Closing a socket bound to IP address and port leaves both as is.
s = self.BuildSocket(version, net_test.UDPSocket, netid, "mark")
@@ -945,6 +956,7 @@
port = self.BindToRandomPort(s, src)
self.sock_diag.CloseSocketFromFd(s)
self.assertEqual((src, port), s.getsockname()[:2])
+ s.close()
def testReadInterrupted(self):
"""Tests that read() is interrupted by SOCK_DESTROY."""
@@ -968,6 +980,8 @@
self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),
ECONNABORTED)
+ s.close()
+
class SockDestroyPermissionTest(SockDiagBaseTest):
def CheckPermissions(self, socktype):
@@ -987,6 +1001,8 @@
self.sock_diag.CloseSocketFromFd(s)
self.assertRaises(ValueError, self.sock_diag.CloseSocketFromFd, s)
+ s.close()
+
def testUdp(self):
self.CheckPermissions(SOCK_DGRAM)
@@ -1073,6 +1089,9 @@
self.assertRaisesErrno(EPERM, self.FilterEstablishedSockets,
0xfff0000, 0xf0fed00)
+ s1.close()
+ s2.close()
+
@staticmethod
def SetRandomMark(s):
# Python doesn't like marks that don't fit into a signed int.
@@ -1114,6 +1133,7 @@
self.assertSocketMarkIs(accepted, accepted_mark)
self.assertSocketMarkIs(server, server_mark)
+ accepted.close()
server.close()
client.close()
diff --git a/net/test/srcaddr_selection_test.py b/net/test/srcaddr_selection_test.py
index f515c47..8c327c3 100755
--- a/net/test/srcaddr_selection_test.py
+++ b/net/test/srcaddr_selection_test.py
@@ -83,6 +83,7 @@
s.connect((net_test.IPV6_ADDR, 123))
src_addr = s.getsockname()[0]
self.assertTrue(src_addr)
+ s.close()
return src_addr
def assertAddressNotPresent(self, address):
@@ -103,13 +104,19 @@
def BindToAddress(self, address):
s = net_test.UDPSocket(AF_INET6)
- s.bind((address, 0, 0, 0))
+ try:
+ s.bind((address, 0, 0, 0))
+ finally:
+ s.close()
def SendWithSourceAddress(self, address, netid, dest=net_test.IPV6_ADDR):
pktinfo = multinetwork_base.MakePktInfo(6, address, 0)
cmsgs = [(net_test.SOL_IPV6, IPV6_PKTINFO, pktinfo)]
s = self.BuildSocket(6, net_test.UDPSocket, netid, "mark")
- return csocket.Sendmsg(s, (dest, 53), b"Hello", cmsgs, 0)
+ try:
+ return csocket.Sendmsg(s, (dest, 53), b"Hello", cmsgs, 0)
+ finally:
+ s.close()
def assertAddressUsable(self, address, netid):
self.BindToAddress(address)
@@ -132,7 +139,19 @@
if not self.AddressIsTentative(address):
return
time.sleep(0.1)
- raise AssertionError("%s did not complete DAD after 2 seconds")
+ raise AssertionError(f"{address} did not complete DAD after 2 seconds")
+
+ def WaitForDadFailure(self, address):
+ # Address should be either deleted or set IFA_F_DADFAILED flag after DAD failure
+ for _ in range(20):
+ try:
+ ifa_msg = self.iproute.GetAddress(address)[0]
+ except OSError:
+ return
+ if ifa_msg.flags & iproute.IFA_F_DADFAILED:
+ return
+ time.sleep(0.1)
+ raise AssertionError(f"{address} did not complete DAD failure after 2 seconds")
class MultiInterfaceSourceAddressSelectionTest(IPv6SourceAddressSelectionTest):
@@ -279,6 +298,7 @@
self.SetUseOptimistic(self.test_ifname, 1)
# Send a RA to start SLAAC and subsequent DAD.
self.SendRA(self.test_netid, retranstimer=RETRANS_TIMER)
+ time.sleep(0.1) # Give the kernel time to notice our RA
# Prove optimism and usability.
self.assertAddressHasExpectedAttributes(
self.test_ip, self.test_ifindex, iproute.IFA_F_OPTIMISTIC)
@@ -293,7 +313,7 @@
scapy.ICMPv6ND_NA(tgt=self.test_ip, R=0, S=0, O=1) /
scapy.ICMPv6NDOptDstLLAddr(lladdr=conflict_macaddr))
self.ReceiveEtherPacketOn(self.test_netid, dad_defense)
- self.WaitForDad(self.test_lladdr)
+ self.WaitForDadFailure(self.test_ip)
# The address should have failed DAD, and therefore no longer be usable.
self.assertAddressNotUsable(self.test_ip, self.test_netid)
diff --git a/net/test/sysctls_test.py b/net/test/sysctls_test.py
index a4d8d66..4bf29bc 100755
--- a/net/test/sysctls_test.py
+++ b/net/test/sysctls_test.py
@@ -24,7 +24,7 @@
def check(self, f):
with open(f) as algs_file:
algs = algs_file.readline().strip().split(' ')
- bad_algs = [a for a in algs if a not in ['cubic', 'reno']]
+ bad_algs = [a for a in algs if a not in ['bbr', 'cubic', 'reno']]
msg = ("Obsolete TCP congestion control algorithm found. These "
"algorithms will decrease real-world networking performance for "
"users and must be disabled. Found: %s" % bad_algs)
@@ -38,7 +38,6 @@
def testAvailableCongestionControl(self):
self.check('/proc/sys/net/ipv4/tcp_available_congestion_control')
- @unittest.skipUnless(net_test.LINUX_VERSION >= (4, 15, 0), "not yet namespaced")
def testCongestionControl(self):
self.check('/proc/sys/net/ipv4/tcp_congestion_control')
diff --git a/net/test/tcp_fastopen_test.py b/net/test/tcp_fastopen_test.py
index f5fc00f..95596c5 100755
--- a/net/test/tcp_fastopen_test.py
+++ b/net/test/tcp_fastopen_test.py
@@ -66,9 +66,6 @@
self.tcp_metrics.GetMetrics(saddr, daddr)
def clearBlackhole(self):
- # Prior to 4.15 this sysctl is not namespace aware.
- if net_test.LINUX_VERSION < (4, 15, 0) and not os.path.exists(BH_TIMEOUT_SYSCTL):
- return
timeout = self.GetSysctl(BH_TIMEOUT_SYSCTL)
# Write to timeout to clear any pre-existing blackhole condition
@@ -130,6 +127,7 @@
t.payload = scapy.Raw(net_test.UDP_PAYLOAD)
msg = "TFO write, expected %s" % desc
self.ExpectPacketOn(netid, msg, syn)
+ s.close()
def testConnectOptionIPv4(self):
self.CheckConnectOption(4)
diff --git a/net/test/tcp_test.py b/net/test/tcp_test.py
index 5a073e6..f3ee291 100644
--- a/net/test/tcp_test.py
+++ b/net/test/tcp_test.py
@@ -40,9 +40,16 @@
class TcpBaseTest(multinetwork_base.MultiNetworkBaseTest):
- def tearDown(self):
+ def CloseSockets(self):
+ if hasattr(self, "accepted"):
+ self.accepted.close()
+ del self.accepted
if hasattr(self, "s"):
self.s.close()
+ del self.s
+
+ def tearDown(self):
+ self.CloseSockets()
super(TcpBaseTest, self).tearDown()
def OpenListenSocket(self, version, netid):
diff --git a/net/test/xfrm.py b/net/test/xfrm.py
index 3d003b6..eae3d4f 100755
--- a/net/test/xfrm.py
+++ b/net/test/xfrm.py
@@ -224,7 +224,7 @@
EspHdr = cstruct.Struct("EspHdr", "!II", "spi seqnum")
# Local constants.
-_DEFAULT_REPLAY_WINDOW = 4
+_DEFAULT_REPLAY_WINDOW = 32
ALL_ALGORITHMS = 0xffffffff
# Policy-SA match method (for VTI/XFRM-I).
diff --git a/net/test/xfrm_algorithm_test.py b/net/test/xfrm_algorithm_test.py
index 8466953..5fa5352 100755
--- a/net/test/xfrm_algorithm_test.py
+++ b/net/test/xfrm_algorithm_test.py
@@ -14,17 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# pylint: disable=g-bad-todo,g-bad-file-header,wildcard-import
-from errno import * # pylint: disable=wildcard-import
-import os
+from errno import * # pylint: disable=wildcard-import,g-importing-member
import itertools
-from scapy import all as scapy
-from socket import * # pylint: disable=wildcard-import
+import os
+from socket import * # pylint: disable=wildcard-import,g-importing-member
import threading
+import time
import unittest
-import multinetwork_base
import net_test
+from scapy import all as scapy
from tun_twister import TapTwister
import util
import xfrm
@@ -87,14 +86,17 @@
# 4 bytes (32 bits) of nonce. A fresh nonce value MUST be assigned for
# each SA. RFC 7634 also specifies that ICV length must be 16 bytes.
# ChaCha20-Poly1305 is enforced since kernel version 5.8
- (xfrm.XfrmAlgoAead((xfrm.XFRM_AEAD_CHACHA20_POLY1305, 256+32, 16*8)), (5, 8)),
+ (xfrm.XfrmAlgoAead((xfrm.XFRM_AEAD_CHACHA20_POLY1305, 256+32, 16*8)),
+ (5, 8)),
]
+
def GenerateKey(key_len):
if key_len % 8 != 0:
raise ValueError("Invalid key length in bits: " + str(key_len))
return os.urandom(key_len // 8)
+
# Does the kernel support this algorithm?
def HaveAlgo(crypt_algo, auth_algo, aead_algo):
try:
@@ -133,28 +135,33 @@
# False.
algoState = {}
+
def AlgoEnforcedOrEnabled(crypt, auth, aead, target_algo, target_kernel):
if algoState.get(target_algo) is None:
- algoState[target_algo] = net_test.LINUX_VERSION >= target_kernel or HaveAlgo(
- crypt, auth, aead)
+ algoState[target_algo] = (net_test.LINUX_VERSION >= target_kernel
+ or HaveAlgo(crypt, auth, aead))
return algoState.get(target_algo)
+
# Return true if this algorithm should be enforced or is enabled on this kernel
-def AuthEnforcedOrEnabled(authCase):
- auth = authCase[0]
+def AuthEnforcedOrEnabled(auth_case):
+ auth = auth_case[0]
crypt = xfrm.XfrmAlgo((b"ecb(cipher_null)", 0))
- return AlgoEnforcedOrEnabled(crypt, auth, None, auth.name, authCase[1])
+ return AlgoEnforcedOrEnabled(crypt, auth, None, auth.name, auth_case[1])
+
# Return true if this algorithm should be enforced or is enabled on this kernel
-def CryptEnforcedOrEnabled(cryptCase):
- crypt = cryptCase[0]
+def CryptEnforcedOrEnabled(crypt_case):
+ crypt = crypt_case[0]
auth = xfrm.XfrmAlgoAuth((b"digest_null", 0, 0))
- return AlgoEnforcedOrEnabled(crypt, auth, None, crypt.name, cryptCase[1])
+ return AlgoEnforcedOrEnabled(crypt, auth, None, crypt.name, crypt_case[1])
+
# Return true if this algorithm should be enforced or is enabled on this kernel
-def AeadEnforcedOrEnabled(aeadCase):
- aead = aeadCase[0]
- return AlgoEnforcedOrEnabled(None, None, aead, aead.name, aeadCase[1])
+def AeadEnforcedOrEnabled(aead_case):
+ aead = aead_case[0]
+ return AlgoEnforcedOrEnabled(None, None, aead, aead.name, aead_case[1])
+
def InjectTests():
XfrmAlgorithmTest.InjectTests()
@@ -163,66 +170,67 @@
class XfrmAlgorithmTest(xfrm_base.XfrmLazyTest):
@classmethod
def InjectTests(cls):
- VERSIONS = (4, 6)
- TYPES = (SOCK_DGRAM, SOCK_STREAM)
+ versions = (4, 6)
+ types = (SOCK_DGRAM, SOCK_STREAM)
# Tests all combinations of auth & crypt. Mutually exclusive with aead.
- param_list = itertools.product(VERSIONS, TYPES, AUTH_ALGOS, CRYPT_ALGOS,
+ param_list = itertools.product(versions, types, AUTH_ALGOS, CRYPT_ALGOS,
[None])
util.InjectParameterizedTest(cls, param_list, cls.TestNameGenerator)
# Tests all combinations of aead. Mutually exclusive with auth/crypt.
- param_list = itertools.product(VERSIONS, TYPES, [None], [None], AEAD_ALGOS)
+ param_list = itertools.product(versions, types, [None], [None], AEAD_ALGOS)
util.InjectParameterizedTest(cls, param_list, cls.TestNameGenerator)
@staticmethod
- def TestNameGenerator(version, proto, authCase, cryptCase, aeadCase):
+ def TestNameGenerator(version, proto, auth_case, crypt_case, aead_case):
# Produce a unique and readable name for each test. e.g.
# testSocketPolicySimple_cbc-aes_256_hmac-sha512_512_256_IPv6_UDP
param_string = ""
- if cryptCase is not None:
- crypt = cryptCase[0]
+ if crypt_case is not None:
+ crypt = crypt_case[0]
param_string += "%s_%d_" % (crypt.name.decode(), crypt.key_len)
- if authCase is not None:
- auth = authCase[0]
+ if auth_case is not None:
+ auth = auth_case[0]
param_string += "%s_%d_%d_" % (auth.name.decode(), auth.key_len,
- auth.trunc_len)
+ auth.trunc_len)
- if aeadCase is not None:
- aead = aeadCase[0]
+ if aead_case is not None:
+ aead = aead_case[0]
param_string += "%s_%d_%d_" % (aead.name.decode(), aead.key_len,
- aead.icv_len)
+ aead.icv_len)
param_string += "%s_%s" % ("IPv4" if version == 4 else "IPv6",
- "UDP" if proto == SOCK_DGRAM else "TCP")
+ "UDP" if proto == SOCK_DGRAM else "TCP")
return param_string
- def ParamTestSocketPolicySimple(self, version, proto, authCase, cryptCase, aeadCase):
+ def ParamTestSocketPolicySimple(self, version, proto, auth_case, crypt_case,
+ aead_case):
"""Test two-way traffic using transport mode and socket policies."""
# Bypass the test if any algorithm going to be tested is not enforced
# or enabled on this kernel
- if authCase is not None and not AuthEnforcedOrEnabled(authCase):
+ if auth_case is not None and not AuthEnforcedOrEnabled(auth_case):
return
- if cryptCase is not None and not CryptEnforcedOrEnabled(cryptCase):
+ if crypt_case is not None and not CryptEnforcedOrEnabled(crypt_case):
return
- if aeadCase is not None and not AeadEnforcedOrEnabled(aeadCase):
+ if aead_case is not None and not AeadEnforcedOrEnabled(aead_case):
return
- auth = authCase[0] if authCase else None
- crypt = cryptCase[0] if cryptCase else None
- aead = aeadCase[0] if aeadCase else None
+ auth = auth_case[0] if auth_case else None
+ crypt = crypt_case[0] if crypt_case else None
+ aead = aead_case[0] if aead_case else None
def AssertEncrypted(packet):
# This gives a free pass to ICMP and ICMPv6 packets, which show up
# nondeterministically in tests.
self.assertEqual(None,
- packet.getlayer(scapy.UDP),
- "UDP packet sent in the clear")
+ packet.getlayer(scapy.UDP),
+ "UDP packet sent in the clear")
self.assertEqual(None,
- packet.getlayer(scapy.TCP),
- "TCP packet sent in the clear")
+ packet.getlayer(scapy.TCP),
+ "TCP packet sent in the clear")
# We create a pair of sockets, "left" and "right", that will talk to each
# other using transport mode ESP. Because of TapTwister, both sockets
@@ -342,7 +350,10 @@
data = accepted.recv(2048)
self.assertEqual(b"hello request", data)
accepted.send(b"hello response")
- except Exception as e:
+ time.sleep(0.1)
+ accepted.close()
+ except Exception as e: # pylint: disable=broad-exception-caught
+ nonlocal server_error
server_error = e
finally:
sock.close()
@@ -355,7 +366,8 @@
self.assertEqual(client_port, peer[1])
self.assertEqual(b"hello request", data)
sock.sendto(b"hello response", peer)
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-exception-caught
+ nonlocal server_error
server_error = e
finally:
sock.close()
@@ -377,7 +389,8 @@
# Wait for server to be ready before attempting to connect. TCP retries
# hide this problem, but UDP will fail outright if the server socket has
# not bound when we send.
- self.assertTrue(server_ready.wait(2.0), "Timed out waiting for server thread")
+ self.assertTrue(server_ready.wait(3.0),
+ "Timed out waiting for server thread")
with TapTwister(fd=self.tuns[netid].fileno(), validator=AssertEncrypted):
sock_left.connect((remote_addr, right_port))
@@ -385,7 +398,7 @@
data = sock_left.recv(2048)
self.assertEqual(b"hello response", data)
sock_left.close()
- server.join(timeout=2.0)
+ server.join(timeout=3.0)
self.assertFalse(server.is_alive(), "Timed out waiting for server exit")
if server_error:
raise server_error
diff --git a/net/test/xfrm_test.py b/net/test/xfrm_test.py
index 4c5bff5..3aa3dc6 100755
--- a/net/test/xfrm_test.py
+++ b/net/test/xfrm_test.py
@@ -77,7 +77,7 @@
expected = (
"src :: dst 2001:4860:4860::8888\n"
"\tproto esp spi 0x00001234 reqid 3320 mode transport\n"
- "\treplay-window 4 \n"
+ "\treplay-window 32 \n"
"\tauth-trunc hmac(sha1) 0x%s 96\n"
"\tenc cbc(aes) 0x%s\n"
"\tsel src ::/0 dst ::/0 \n" % (
@@ -442,23 +442,22 @@
# because IP headers are always at least 20 bytes long.
data = 19 * b"a"
datalen = len(data)
- data += xfrm_base.GetEspTrailer(len(data), IPPROTO_UDP)
- self.assertEqual(32, len(data) + 8)
# TODO: update scapy and use scapy.ESP instead of manually generating ESP header.
inner_pkt = xfrm.EspHdr(spi=TEST_SPI, seqnum=1).Pack() + bytes(
- scapy.UDP(sport=443, dport=32123) / data)
+ scapy.UDP(sport=443, dport=32123) / data) + bytes(
+ xfrm_base.GetEspTrailer(len(data), IPPROTO_UDP))
input_pkt = (IpType(src=remoteaddr, dst=myaddr) /
scapy.UDP(sport=4500, dport=encap_port) /
inner_pkt)
else:
# TODO: test IPv4 in IPv6 encap and vice versa.
data = b"" # Empty UDP payload
- datalen = len(data) + {4: 20, 6: 40}[version]
- data += xfrm_base.GetEspTrailer(len(data), IPPROTO_UDP)
+ datalen = {4: 20, 6: 40}[version] + len(data)
# TODO: update scapy and use scapy.ESP instead of manually generating ESP header.
inner_pkt = xfrm.EspHdr(spi=TEST_SPI, seqnum=1).Pack() + bytes(
IpType(src=remoteaddr, dst=myaddr) /
- scapy.UDP(sport=443, dport=32123) / data)
+ scapy.UDP(sport=443, dport=32123) / data) + bytes(
+ xfrm_base.GetEspTrailer(len(data), {4: IPPROTO_IPIP, 6: IPPROTO_IPV6}[version]))
input_pkt = (IpType(src=remoteaddr, dst=myaddr) /
scapy.UDP(sport=4500, dport=encap_port) /
inner_pkt)
@@ -486,12 +485,14 @@
# IPv6 UDP encap is broken between:
# 4db4075f92af ("esp6: fix check on ipv6_skip_exthdr's return value") and
# 5f9c55c8066b ("ipv6: check return value of ipv6_skip_exthdr")
- @unittest.skipUnless(net_test.KernelAtLeast([(5, 10, 108), (5, 15, 31)]),
+ @unittest.skipUnless(net_test.KernelAtLeast([(5, 10, 108), (5, 15, 31)]) or
+ net_test.NonGXI(5, 10),
reason="Unsupported or broken on current kernel")
def testIPv6UDPEncapRecvTransport(self):
self._CheckUDPEncapRecv(6, xfrm.XFRM_MODE_TRANSPORT)
- @unittest.skipUnless(net_test.KernelAtLeast([(5, 10, 108), (5, 15, 31)]),
+ @unittest.skipUnless(net_test.KernelAtLeast([(5, 10, 108), (5, 15, 31)]) or
+ net_test.NonGXI(5, 10),
reason="Unsupported or broken on current kernel")
def testIPv6UDPEncapRecvTunnel(self):
self._CheckUDPEncapRecv(6, xfrm.XFRM_MODE_TUNNEL)
diff --git a/net/test/xfrm_tunnel_test.py b/net/test/xfrm_tunnel_test.py
index 4efb46a..d6c1f79 100755
--- a/net/test/xfrm_tunnel_test.py
+++ b/net/test/xfrm_tunnel_test.py
@@ -40,43 +40,21 @@
_TEST_XFRM_IF_ID = 42
_TEST_SPI = 0x1234
-# Does the kernel support CONFIG_XFRM_INTERFACE?
-def HaveXfrmInterfaces():
- # 4.19+ must have CONFIG_XFRM_INTERFACE enabled
- if LINUX_VERSION >= (4, 19, 0):
- return True
-
- try:
- i = iproute.IPRoute()
- i.CreateXfrmInterface(_TEST_XFRM_IFNAME, _TEST_XFRM_IF_ID,
- _LOOPBACK_IFINDEX)
- i.DeleteLink(_TEST_XFRM_IFNAME)
- try:
- i.GetIfIndex(_TEST_XFRM_IFNAME)
- assert "Deleted interface %s still exists!" % _TEST_XFRM_IFNAME
- except IOError:
- pass
- return True
- except IOError:
- return False
-
-HAVE_XFRM_INTERFACES = HaveXfrmInterfaces()
-
# Two kernel fixes have been added in 5.17 to allow XFRM_MIGRATE to work correctly
# when (1) there are multiple tunnels with the same selectors; and (2) addresses
# are updated to a different IP family. These two fixes were pulled into upstream
# LTS releases 4.14.273, 4.19.236, 5.4.186, 5.10.107 and 5.15.30, from whence they
# flowed into the Android Common Kernel (via standard LTS merges).
-# As such we require 4.14.273+, 4.19.236+, 5.4.186+, 5.10.107+, 5.15.30+ or 5.17+
+#
+# Note 'xfrm: Check if_id in xfrm_migrate' did not end up in 4.14 LTS,
+# and is only present in ACK android-4.14-stable after 4.14.320 LTS merge.
+# See https://android-review.git.corp.google.com/c/kernel/common/+/2640243
+#
+# As such we require 4.14.321+, 4.19.236+, 5.4.186+, 5.10.107+, 5.15.30+ or 5.17+
# to have these fixes.
def HasXfrmMigrateFixes():
- return (
- ((LINUX_VERSION >= (4, 14, 273)) and (LINUX_VERSION < (4, 19, 0))) or
- ((LINUX_VERSION >= (4, 19, 236)) and (LINUX_VERSION < (5, 4, 0))) or
- ((LINUX_VERSION >= (5, 4, 186)) and (LINUX_VERSION < (5, 10, 0))) or
- ((LINUX_VERSION >= (5, 10, 107)) and (LINUX_VERSION < (5, 15, 0))) or
- (LINUX_VERSION >= (5, 15, 30))
- )
+ return net_test.KernelAtLeast([(4, 19, 236), (5, 4, 186),
+ (5, 10, 107), (5, 15, 30)]) or net_test.NonGXI(4, 14)
# Does the kernel support CONFIG_XFRM_MIGRATE and include the kernel fixes?
@@ -88,10 +66,6 @@
if LINUX_VERSION >= (5, 10, 0):
return True
- # XFRM_MIGRATE depends on xfrmi interfaces
- if not HAVE_XFRM_INTERFACES:
- return False
-
try:
x = xfrm.Xfrm()
wildcard_addr = net_test.GetWildcardAddress(6)
@@ -183,6 +157,7 @@
testInstance.SelectInterface(write_sock, netid, "mark")
write_sock.sendto(net_test.UDP_PAYLOAD, (remote, remote_port))
local_port = write_sock.getsockname()[1]
+ write_sock.close()
return local_port
@@ -281,6 +256,9 @@
sock = write_sock if direction == xfrm.XFRM_POLICY_OUT else read_sock
func(inner_version, outer_version, u_netid, netid, local_inner,
remote_inner, local_outer, remote_outer, sock)
+
+ write_sock.close()
+ read_sock.close()
finally:
if test_output_mark_unset:
self.ClearDefaultNetwork()
@@ -499,7 +477,6 @@
xfrm.ExactMatchMark(self.okey))
[email protected](HAVE_XFRM_INTERFACES, "XFRM interfaces unsupported")
class XfrmAddDeleteXfrmInterfaceTest(xfrm_base.XfrmBaseTest):
"""Test the creation of an XFRM Interface."""
@@ -674,6 +651,13 @@
return cls.OnlinkPrefix(6, netid - _TUNNEL_NETID_OFFSET) + "1"
@classmethod
+ def UidRangeForTunnelNetId(cls, netid):
+ if netid < _TUNNEL_NETID_OFFSET:
+ raise ValueError("Tunnel netid outside tunnel range")
+ netid -= _TUNNEL_NETID_OFFSET
+ return (500 + 50 * netid, 500 + 50 * (netid + 1) - 1)
+
+ @classmethod
def _SetupTunnelNetwork(cls, tunnel, is_add):
"""Setup rules and routes for a tunnel Network.
@@ -704,7 +688,7 @@
table = tunnel.netid
# Set up routing rules.
- start, end = cls.UidRangeForNetid(tunnel.netid)
+ start, end = cls.UidRangeForTunnelNetId(tunnel.netid)
cls.iproute.UidRangeRule(version, is_add, start, end, table,
cls.PRIORITY_UID)
cls.iproute.OifRule(version, is_add, tunnel.iface, table, cls.PRIORITY_OIF)
@@ -746,14 +730,17 @@
local_inner, tunnel.local, local_port, sa_info.spi, sa_info.seq_num)
self.ReceivePacketOn(tunnel.underlying_netid, input_pkt)
- if expect_fail:
- self.assertRaisesErrno(EAGAIN, read_sock.recv, 4096)
- else:
- # Verify that the packet data and src are correct
- data, src = read_sock.recvfrom(4096)
- self.assertReceivedPacket(tunnel, sa_info)
- self.assertEqual(net_test.UDP_PAYLOAD, data)
- self.assertEqual((remote_inner, _TEST_REMOTE_PORT), src[:2])
+ try:
+ if expect_fail:
+ self.assertRaisesErrno(EAGAIN, read_sock.recv, 4096)
+ else:
+ # Verify that the packet data and src are correct
+ data, src = read_sock.recvfrom(4096)
+ self.assertReceivedPacket(tunnel, sa_info)
+ self.assertEqual(net_test.UDP_PAYLOAD, data)
+ self.assertEqual((remote_inner, _TEST_REMOTE_PORT), src[:2])
+ finally:
+ read_sock.close()
def _CheckTunnelOutput(self, tunnel, inner_version, local_inner,
remote_inner, sa_info=None):
@@ -841,6 +828,8 @@
# Check that the interface statistics recorded the inbound packet
self.assertReceivedPacket(tunnel, tunnel.in_sa)
+
+ read_sock.close()
finally:
# Swap the interface addresses to pretend we are the remote
self._SwapInterfaceAddress(
@@ -1011,7 +1000,6 @@
self._TestTunnelRekey(inner_version, outer_version)
[email protected](HAVE_XFRM_INTERFACES, "XFRM interfaces unsupported")
class XfrmInterfaceTest(XfrmTunnelBase):
INTERFACE_CLASS = XfrmInterface
@@ -1050,6 +1038,9 @@
# Those two upstream 5.17 fixes above were pulled in to LTS in kernel versions
# 4.14.273, 4.19.236, 5.4.186, 5.10.107, 5.15.30.
#
+# Note: the 'Check if_id in xfrm_migrate' fix did not land in 4.14 LTS,
+# and instead landed in android-4.14-stable after 4.14.320 LTS merge.
+#
@unittest.skipUnless(SUPPORTS_XFRM_MIGRATE,
"XFRM migration unsupported or fixes not included")
class XfrmInterfaceMigrateTest(XfrmTunnelBase):