Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 1 | // Copyright (C) 2019 The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | #include "RingStream.h" |
| 15 | |
Joshua Duong | ef2bbc2 | 2022-10-05 11:59:15 -0700 | [diff] [blame] | 16 | #include "aemu/base/system/System.h" |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 17 | |
| 18 | #define EMUGL_DEBUG_LEVEL 0 |
| 19 | |
Lingfeng Yang | bfe3c72 | 2020-10-29 10:33:18 -0700 | [diff] [blame] | 20 | #include "host-common/crash_reporter.h" |
| 21 | #include "host-common/debug.h" |
| 22 | #include "host-common/dma_device.h" |
Doug Horn | 0c2ea5a | 2021-10-29 17:30:16 -0700 | [diff] [blame] | 23 | #include "host-common/GfxstreamFatalError.h" |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 24 | |
| 25 | #include <assert.h> |
| 26 | #include <memory.h> |
| 27 | |
Kaiyi Li | 4935d31 | 2022-01-12 16:57:24 -0800 | [diff] [blame] | 28 | using emugl::ABORT_REASON_OTHER; |
| 29 | using emugl::FatalError; |
| 30 | |
Jason Macnak | ed0c9e6 | 2023-03-30 15:58:24 -0700 | [diff] [blame] | 31 | namespace gfxstream { |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 32 | |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 33 | RingStream::RingStream( |
| 34 | struct asg_context context, |
| 35 | android::emulation::asg::ConsumerCallbacks callbacks, |
| 36 | size_t bufsize) : |
| 37 | IOStream(bufsize), |
| 38 | mContext(context), |
| 39 | mCallbacks(callbacks) { } |
| 40 | RingStream::~RingStream() = default; |
| 41 | |
| 42 | int RingStream::getNeededFreeTailSize() const { |
| 43 | return mContext.ring_config->flush_interval; |
| 44 | } |
| 45 | |
| 46 | void* RingStream::allocBuffer(size_t minSize) { |
| 47 | if (mWriteBuffer.size() < minSize) { |
| 48 | mWriteBuffer.resize_noinit(minSize); |
| 49 | } |
| 50 | return mWriteBuffer.data(); |
| 51 | } |
| 52 | |
| 53 | int RingStream::commitBuffer(size_t size) { |
| 54 | size_t sent = 0; |
| 55 | auto data = mWriteBuffer.data(); |
| 56 | |
| 57 | size_t iters = 0; |
| 58 | size_t backedOffIters = 0; |
| 59 | const size_t kBackoffIters = 10000000ULL; |
| 60 | while (sent < size) { |
| 61 | ++iters; |
| 62 | auto avail = ring_buffer_available_write( |
| 63 | mContext.from_host_large_xfer.ring, |
| 64 | &mContext.from_host_large_xfer.view); |
| 65 | |
| 66 | // Check if the guest process crashed. |
| 67 | if (!avail) { |
| 68 | if (*(mContext.host_state) == ASG_HOST_STATE_EXIT) { |
| 69 | return sent; |
| 70 | } else { |
| 71 | ring_buffer_yield(); |
| 72 | if (iters > kBackoffIters) { |
Lingfeng Yang | bfe3c72 | 2020-10-29 10:33:18 -0700 | [diff] [blame] | 73 | android::base::sleepUs(10); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 74 | ++backedOffIters; |
| 75 | } |
| 76 | } |
| 77 | continue; |
| 78 | } |
| 79 | |
| 80 | auto remaining = size - sent; |
| 81 | auto todo = remaining < avail ? remaining : avail; |
| 82 | |
| 83 | ring_buffer_view_write( |
| 84 | mContext.from_host_large_xfer.ring, |
| 85 | &mContext.from_host_large_xfer.view, |
| 86 | data + sent, todo, 1); |
| 87 | |
| 88 | sent += todo; |
| 89 | } |
| 90 | |
| 91 | if (backedOffIters > 0) { |
Serdar Kocdemir | 3d6cf83 | 2024-10-24 11:12:58 +0000 | [diff] [blame] | 92 | WARN("Backed off %zu times to avoid overloading the guest system. This " |
| 93 | "may indicate resource constraints or performance issues.", |
| 94 | backedOffIters); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 95 | } |
| 96 | return sent; |
| 97 | } |
| 98 | |
| 99 | const unsigned char* RingStream::readRaw(void* buf, size_t* inout_len) { |
| 100 | size_t wanted = *inout_len; |
| 101 | size_t count = 0U; |
| 102 | auto dst = static_cast<char*>(buf); |
| 103 | |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 104 | uint32_t ringAvailable = 0; |
| 105 | uint32_t ringLargeXferAvailable = 0; |
| 106 | |
| 107 | const uint32_t maxSpins = 30; |
| 108 | uint32_t spins = 0; |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 109 | bool inLargeXfer = true; |
| 110 | |
| 111 | *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME; |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 112 | |
| 113 | while (count < wanted) { |
| 114 | |
| 115 | if (mReadBufferLeft) { |
| 116 | size_t avail = std::min<size_t>(wanted - count, mReadBufferLeft); |
| 117 | memcpy(dst + count, |
| 118 | mReadBuffer.data() + (mReadBuffer.size() - mReadBufferLeft), |
| 119 | avail); |
| 120 | count += avail; |
| 121 | mReadBufferLeft -= avail; |
| 122 | continue; |
| 123 | } |
| 124 | |
| 125 | mReadBuffer.clear(); |
| 126 | |
| 127 | // no read buffer left... |
| 128 | if (count > 0) { // There is some data to return. |
| 129 | break; |
| 130 | } |
| 131 | |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 132 | *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME; |
| 133 | |
Doug Horn | 60cc9fb | 2021-01-08 14:13:38 -0800 | [diff] [blame] | 134 | // if (mInSnapshotOperation) { |
| 135 | // fprintf(stderr, "%s: %p in snapshot operation, exit\n", __func__, mRenderThreadPtr); |
| 136 | // // In a snapshot operation, exit |
| 137 | // return nullptr; |
| 138 | // } |
| 139 | |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 140 | if (mShouldExit) { |
| 141 | return nullptr; |
| 142 | } |
| 143 | |
| 144 | ringAvailable = |
| 145 | ring_buffer_available_read(mContext.to_host, 0); |
| 146 | ringLargeXferAvailable = |
| 147 | ring_buffer_available_read( |
| 148 | mContext.to_host_large_xfer.ring, |
| 149 | &mContext.to_host_large_xfer.view); |
| 150 | |
| 151 | auto current = dst + count; |
| 152 | auto ptrEnd = dst + wanted; |
| 153 | |
| 154 | if (ringAvailable) { |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 155 | inLargeXfer = false; |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 156 | uint32_t transferMode = |
| 157 | mContext.ring_config->transfer_mode; |
| 158 | switch (transferMode) { |
| 159 | case 1: |
| 160 | type1Read(ringAvailable, dst, &count, ¤t, ptrEnd); |
| 161 | break; |
| 162 | case 2: |
| 163 | type2Read(ringAvailable, &count, ¤t, ptrEnd); |
| 164 | break; |
| 165 | case 3: |
Lingfeng Yang | bfe3c72 | 2020-10-29 10:33:18 -0700 | [diff] [blame] | 166 | // emugl::emugl_crash_reporter( |
| 167 | // "Guest should never set to " |
| 168 | // "transfer mode 3 with ringAvailable != 0\n"); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 169 | default: |
Lingfeng Yang | bfe3c72 | 2020-10-29 10:33:18 -0700 | [diff] [blame] | 170 | // emugl::emugl_crash_reporter( |
| 171 | // "Unknown transfer mode %u\n", |
| 172 | // transferMode); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 173 | break; |
| 174 | } |
| 175 | } else if (ringLargeXferAvailable) { |
| 176 | type3Read(ringLargeXferAvailable, |
| 177 | &count, ¤t, ptrEnd); |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 178 | inLargeXfer = true; |
| 179 | if (0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) { |
| 180 | inLargeXfer = false; |
| 181 | } |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 182 | } else { |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 183 | if (inLargeXfer && 0 != __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) { |
| 184 | continue; |
| 185 | } |
| 186 | |
| 187 | if (inLargeXfer && 0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) { |
| 188 | inLargeXfer = false; |
| 189 | } |
| 190 | |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 191 | if (++spins < maxSpins) { |
| 192 | ring_buffer_yield(); |
| 193 | continue; |
| 194 | } else { |
| 195 | spins = 0; |
| 196 | } |
| 197 | |
| 198 | if (mShouldExit) { |
| 199 | return nullptr; |
| 200 | } |
Doug Horn | 60cc9fb | 2021-01-08 14:13:38 -0800 | [diff] [blame] | 201 | |
| 202 | if (mShouldExitForSnapshot && mInSnapshotOperation) { |
| 203 | return nullptr; |
| 204 | } |
| 205 | |
Doug Horn | 05386c5 | 2021-01-08 13:51:36 -0800 | [diff] [blame] | 206 | int unavailReadResult = mCallbacks.onUnavailableRead(); |
| 207 | |
| 208 | if (-1 == unavailReadResult) { |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 209 | mShouldExit = true; |
| 210 | } |
Doug Horn | 05386c5 | 2021-01-08 13:51:36 -0800 | [diff] [blame] | 211 | |
| 212 | // pause pre snapshot |
| 213 | if (-2 == unavailReadResult) { |
Doug Horn | 60cc9fb | 2021-01-08 14:13:38 -0800 | [diff] [blame] | 214 | mShouldExitForSnapshot = true; |
| 215 | } |
| 216 | |
| 217 | // resume post snapshot |
| 218 | if (-3 == unavailReadResult) { |
| 219 | mShouldExitForSnapshot = false; |
Doug Horn | 05386c5 | 2021-01-08 13:51:36 -0800 | [diff] [blame] | 220 | } |
| 221 | |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 222 | continue; |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | *inout_len = count; |
| 227 | ++mXmits; |
| 228 | mTotalRecv += count; |
| 229 | D("read %d bytes", (int)count); |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 230 | |
| 231 | *(mContext.host_state) = ASG_HOST_STATE_RENDERING; |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 232 | return (const unsigned char*)buf; |
| 233 | } |
| 234 | |
| 235 | void RingStream::type1Read( |
| 236 | uint32_t available, |
| 237 | char* begin, |
| 238 | size_t* count, char** current, const char* ptrEnd) { |
| 239 | |
| 240 | uint32_t xferTotal = available / sizeof(struct asg_type1_xfer); |
| 241 | |
| 242 | if (mType1Xfers.size() < xferTotal) { |
| 243 | mType1Xfers.resize(xferTotal * 2); |
| 244 | } |
| 245 | |
| 246 | auto xfersPtr = mType1Xfers.data(); |
| 247 | |
| 248 | ring_buffer_copy_contents( |
| 249 | mContext.to_host, 0, xferTotal * sizeof(struct asg_type1_xfer), (uint8_t*)xfersPtr); |
| 250 | |
| 251 | for (uint32_t i = 0; i < xferTotal; ++i) { |
| 252 | if (*current + xfersPtr[i].size > ptrEnd) { |
| 253 | // Save in a temp buffer or we'll get stuck |
| 254 | if (begin == *current && i == 0) { |
| 255 | const char* src = mContext.buffer + xfersPtr[i].offset; |
| 256 | mReadBuffer.resize_noinit(xfersPtr[i].size); |
| 257 | memcpy(mReadBuffer.data(), src, xfersPtr[i].size); |
| 258 | mReadBufferLeft = xfersPtr[i].size; |
| 259 | ring_buffer_advance_read( |
| 260 | mContext.to_host, sizeof(struct asg_type1_xfer), 1); |
Lingfeng Yang | e494a92 | 2021-04-15 17:09:26 -0700 | [diff] [blame] | 261 | __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 262 | } |
| 263 | return; |
| 264 | } |
| 265 | const char* src = mContext.buffer + xfersPtr[i].offset; |
| 266 | memcpy(*current, src, xfersPtr[i].size); |
| 267 | ring_buffer_advance_read( |
| 268 | mContext.to_host, sizeof(struct asg_type1_xfer), 1); |
Lingfeng Yang | e494a92 | 2021-04-15 17:09:26 -0700 | [diff] [blame] | 269 | __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 270 | *current += xfersPtr[i].size; |
| 271 | *count += xfersPtr[i].size; |
| 272 | |
| 273 | // TODO: Figure out why running multiple xfers here can result in data |
| 274 | // corruption. |
| 275 | return; |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | void RingStream::type2Read( |
| 280 | uint32_t available, |
| 281 | size_t* count, char** current,const char* ptrEnd) { |
| 282 | |
Doug Horn | 0c2ea5a | 2021-10-29 17:30:16 -0700 | [diff] [blame] | 283 | GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "nyi. abort"; |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 284 | |
| 285 | uint32_t xferTotal = available / sizeof(struct asg_type2_xfer); |
| 286 | |
| 287 | if (mType2Xfers.size() < xferTotal) { |
| 288 | mType2Xfers.resize(xferTotal * 2); |
| 289 | } |
| 290 | |
| 291 | auto xfersPtr = mType2Xfers.data(); |
| 292 | |
| 293 | ring_buffer_copy_contents( |
| 294 | mContext.to_host, 0, available, (uint8_t*)xfersPtr); |
| 295 | |
| 296 | for (uint32_t i = 0; i < xferTotal; ++i) { |
| 297 | |
| 298 | if (*current + xfersPtr[i].size > ptrEnd) return; |
| 299 | |
| 300 | const char* src = |
| 301 | mCallbacks.getPtr(xfersPtr[i].physAddr); |
| 302 | |
| 303 | memcpy(*current, src, xfersPtr[i].size); |
| 304 | |
| 305 | ring_buffer_advance_read( |
| 306 | mContext.to_host, sizeof(struct asg_type1_xfer), 1); |
| 307 | |
| 308 | *current += xfersPtr[i].size; |
| 309 | *count += xfersPtr[i].size; |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | void RingStream::type3Read( |
| 314 | uint32_t available, |
| 315 | size_t* count, char** current, const char* ptrEnd) { |
| 316 | |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 317 | uint32_t xferTotal = __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 318 | uint32_t maxCanRead = ptrEnd - *current; |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 319 | uint32_t ringAvail = available; |
| 320 | uint32_t actuallyRead = std::min(ringAvail, std::min(xferTotal, maxCanRead)); |
| 321 | |
| 322 | // Decrement transfer_size before letting the guest proceed in ring_buffer funcs or we will race |
| 323 | // to the next time the guest sets transfer_size |
| 324 | __atomic_fetch_sub(&mContext.ring_config->transfer_size, actuallyRead, __ATOMIC_RELEASE); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 325 | |
| 326 | ring_buffer_read_fully_with_abort( |
Lingfeng Yang | 9ee8efa | 2021-01-22 20:37:09 -0800 | [diff] [blame] | 327 | mContext.to_host_large_xfer.ring, |
| 328 | &mContext.to_host_large_xfer.view, |
| 329 | *current, actuallyRead, |
| 330 | 1, &mContext.ring_config->in_error); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 331 | |
| 332 | *current += actuallyRead; |
| 333 | *count += actuallyRead; |
| 334 | } |
| 335 | |
| 336 | void* RingStream::getDmaForReading(uint64_t guest_paddr) { |
Jason Macnak | ed0c9e6 | 2023-03-30 15:58:24 -0700 | [diff] [blame] | 337 | return emugl::g_emugl_dma_get_host_addr(guest_paddr); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 338 | } |
| 339 | |
Jason Macnak | ed0c9e6 | 2023-03-30 15:58:24 -0700 | [diff] [blame] | 340 | void RingStream::unlockDma(uint64_t guest_paddr) { emugl::g_emugl_dma_unlock(guest_paddr); } |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 341 | |
| 342 | int RingStream::writeFully(const void* buf, size_t len) { |
| 343 | void* dstBuf = alloc(len); |
| 344 | memcpy(dstBuf, buf, len); |
| 345 | flush(); |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | const unsigned char *RingStream::readFully( void *buf, size_t len) { |
Doug Horn | 0c2ea5a | 2021-10-29 17:30:16 -0700 | [diff] [blame] | 350 | GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "not intended for use with RingStream"; |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | void RingStream::onSave(android::base::Stream* stream) { |
Doug Horn | 05386c5 | 2021-01-08 13:51:36 -0800 | [diff] [blame] | 354 | stream->putBe32(mReadBufferLeft); |
| 355 | stream->write(mReadBuffer.data() + mReadBuffer.size() - mReadBufferLeft, |
| 356 | mReadBufferLeft); |
| 357 | android::base::saveBuffer(stream, mWriteBuffer); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | unsigned char* RingStream::onLoad(android::base::Stream* stream) { |
Doug Horn | 05386c5 | 2021-01-08 13:51:36 -0800 | [diff] [blame] | 361 | android::base::loadBuffer(stream, &mReadBuffer); |
| 362 | mReadBufferLeft = mReadBuffer.size(); |
| 363 | android::base::loadBuffer(stream, &mWriteBuffer); |
| 364 | return reinterpret_cast<unsigned char*>(mWriteBuffer.data()); |
Lingfeng Yang | ee4aea3 | 2020-10-29 08:52:13 -0700 | [diff] [blame] | 365 | } |
| 366 | |
Jason Macnak | ed0c9e6 | 2023-03-30 15:58:24 -0700 | [diff] [blame] | 367 | } // namespace gfxstream |