Apply data offset for list-of-pointers at access time rather than ListReader creation time.
Baking this offset into `ptr` reduced ops needed at access time but made the interpretation of `ptr` inconsistent depending on what type of list was expected.
diff --git a/c++/src/capnp/layout.c++ b/c++/src/capnp/layout.c++
index 7fa5b4e..fef6f48 100644
--- a/c++/src/capnp/layout.c++
+++ b/c++/src/capnp/layout.c++
@@ -2333,10 +2333,6 @@
break;
case ElementSize::POINTER:
- // We expected a list of pointers but got a list of structs. Assuming the first field
- // in the struct is the pointer we were looking for, we want to munge the pointer to
- // point at the first element's pointer section.
- ptr += tag->structRef.dataSize.get();
KJ_REQUIRE(tag->structRef.ptrCount.get() > ZERO * POINTERS,
"Expected a pointer list, but got a list of data-only structs.") {
goto useDefault;
diff --git a/c++/src/capnp/layout.h b/c++/src/capnp/layout.h
index c8d533c..7a27f68 100644
--- a/c++/src/capnp/layout.h
+++ b/c++/src/capnp/layout.h
@@ -1227,8 +1227,12 @@
}
inline PointerReader ListReader::getPointerElement(ElementCount index) const {
+ // If the list elements have data sections we need to skip those. Note that for pointers to be
+ // present at all (which already must be true if we get here), then `structDataSize` must be a
+ // whole number of words, so we don't have to worry about unaligned reads here.
+ auto offset = structDataSize / BITS_PER_BYTE;
return PointerReader(segment, capTable, reinterpret_cast<const WirePointer*>(
- ptr + upgradeBound<uint64_t>(index) * step / BITS_PER_BYTE), nestingLimit);
+ ptr + offset + upgradeBound<uint64_t>(index) * step / BITS_PER_BYTE), nestingLimit);
}
// -------------------------------------------------------------------