blob: 13dfba64054bba83cb351e9b246e84e6131a5382 [file] [log] [blame]
Lingfeng Yangee4aea32020-10-29 08:52:13 -07001// Copyright (C) 2019 The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14#include "RingStream.h"
15
Joshua Duongef2bbc22022-10-05 11:59:15 -070016#include "aemu/base/system/System.h"
Lingfeng Yangee4aea32020-10-29 08:52:13 -070017
18#define EMUGL_DEBUG_LEVEL 0
19
Lingfeng Yangbfe3c722020-10-29 10:33:18 -070020#include "host-common/crash_reporter.h"
21#include "host-common/debug.h"
22#include "host-common/dma_device.h"
Doug Horn0c2ea5a2021-10-29 17:30:16 -070023#include "host-common/GfxstreamFatalError.h"
Lingfeng Yangee4aea32020-10-29 08:52:13 -070024
25#include <assert.h>
26#include <memory.h>
27
Kaiyi Li4935d312022-01-12 16:57:24 -080028using emugl::ABORT_REASON_OTHER;
29using emugl::FatalError;
30
Jason Macnaked0c9e62023-03-30 15:58:24 -070031namespace gfxstream {
Lingfeng Yangee4aea32020-10-29 08:52:13 -070032
Lingfeng Yangee4aea32020-10-29 08:52:13 -070033RingStream::RingStream(
34 struct asg_context context,
35 android::emulation::asg::ConsumerCallbacks callbacks,
36 size_t bufsize) :
37 IOStream(bufsize),
38 mContext(context),
39 mCallbacks(callbacks) { }
40RingStream::~RingStream() = default;
41
42int RingStream::getNeededFreeTailSize() const {
43 return mContext.ring_config->flush_interval;
44}
45
46void* RingStream::allocBuffer(size_t minSize) {
47 if (mWriteBuffer.size() < minSize) {
48 mWriteBuffer.resize_noinit(minSize);
49 }
50 return mWriteBuffer.data();
51}
52
53int RingStream::commitBuffer(size_t size) {
54 size_t sent = 0;
55 auto data = mWriteBuffer.data();
56
57 size_t iters = 0;
58 size_t backedOffIters = 0;
59 const size_t kBackoffIters = 10000000ULL;
60 while (sent < size) {
61 ++iters;
62 auto avail = ring_buffer_available_write(
63 mContext.from_host_large_xfer.ring,
64 &mContext.from_host_large_xfer.view);
65
66 // Check if the guest process crashed.
67 if (!avail) {
68 if (*(mContext.host_state) == ASG_HOST_STATE_EXIT) {
69 return sent;
70 } else {
71 ring_buffer_yield();
72 if (iters > kBackoffIters) {
Lingfeng Yangbfe3c722020-10-29 10:33:18 -070073 android::base::sleepUs(10);
Lingfeng Yangee4aea32020-10-29 08:52:13 -070074 ++backedOffIters;
75 }
76 }
77 continue;
78 }
79
80 auto remaining = size - sent;
81 auto todo = remaining < avail ? remaining : avail;
82
83 ring_buffer_view_write(
84 mContext.from_host_large_xfer.ring,
85 &mContext.from_host_large_xfer.view,
86 data + sent, todo, 1);
87
88 sent += todo;
89 }
90
91 if (backedOffIters > 0) {
Serdar Kocdemir3d6cf832024-10-24 11:12:58 +000092 WARN("Backed off %zu times to avoid overloading the guest system. This "
93 "may indicate resource constraints or performance issues.",
94 backedOffIters);
Lingfeng Yangee4aea32020-10-29 08:52:13 -070095 }
96 return sent;
97}
98
99const unsigned char* RingStream::readRaw(void* buf, size_t* inout_len) {
100 size_t wanted = *inout_len;
101 size_t count = 0U;
102 auto dst = static_cast<char*>(buf);
103
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700104 uint32_t ringAvailable = 0;
105 uint32_t ringLargeXferAvailable = 0;
106
107 const uint32_t maxSpins = 30;
108 uint32_t spins = 0;
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800109 bool inLargeXfer = true;
110
111 *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700112
113 while (count < wanted) {
114
115 if (mReadBufferLeft) {
116 size_t avail = std::min<size_t>(wanted - count, mReadBufferLeft);
117 memcpy(dst + count,
118 mReadBuffer.data() + (mReadBuffer.size() - mReadBufferLeft),
119 avail);
120 count += avail;
121 mReadBufferLeft -= avail;
122 continue;
123 }
124
125 mReadBuffer.clear();
126
127 // no read buffer left...
128 if (count > 0) { // There is some data to return.
129 break;
130 }
131
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800132 *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
133
Doug Horn60cc9fb2021-01-08 14:13:38 -0800134 // if (mInSnapshotOperation) {
135 // fprintf(stderr, "%s: %p in snapshot operation, exit\n", __func__, mRenderThreadPtr);
136 // // In a snapshot operation, exit
137 // return nullptr;
138 // }
139
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700140 if (mShouldExit) {
141 return nullptr;
142 }
143
144 ringAvailable =
145 ring_buffer_available_read(mContext.to_host, 0);
146 ringLargeXferAvailable =
147 ring_buffer_available_read(
148 mContext.to_host_large_xfer.ring,
149 &mContext.to_host_large_xfer.view);
150
151 auto current = dst + count;
152 auto ptrEnd = dst + wanted;
153
154 if (ringAvailable) {
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800155 inLargeXfer = false;
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700156 uint32_t transferMode =
157 mContext.ring_config->transfer_mode;
158 switch (transferMode) {
159 case 1:
160 type1Read(ringAvailable, dst, &count, &current, ptrEnd);
161 break;
162 case 2:
163 type2Read(ringAvailable, &count, &current, ptrEnd);
164 break;
165 case 3:
Lingfeng Yangbfe3c722020-10-29 10:33:18 -0700166 // emugl::emugl_crash_reporter(
167 // "Guest should never set to "
168 // "transfer mode 3 with ringAvailable != 0\n");
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700169 default:
Lingfeng Yangbfe3c722020-10-29 10:33:18 -0700170 // emugl::emugl_crash_reporter(
171 // "Unknown transfer mode %u\n",
172 // transferMode);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700173 break;
174 }
175 } else if (ringLargeXferAvailable) {
176 type3Read(ringLargeXferAvailable,
177 &count, &current, ptrEnd);
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800178 inLargeXfer = true;
179 if (0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
180 inLargeXfer = false;
181 }
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700182 } else {
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800183 if (inLargeXfer && 0 != __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
184 continue;
185 }
186
187 if (inLargeXfer && 0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
188 inLargeXfer = false;
189 }
190
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700191 if (++spins < maxSpins) {
192 ring_buffer_yield();
193 continue;
194 } else {
195 spins = 0;
196 }
197
198 if (mShouldExit) {
199 return nullptr;
200 }
Doug Horn60cc9fb2021-01-08 14:13:38 -0800201
202 if (mShouldExitForSnapshot && mInSnapshotOperation) {
203 return nullptr;
204 }
205
Doug Horn05386c52021-01-08 13:51:36 -0800206 int unavailReadResult = mCallbacks.onUnavailableRead();
207
208 if (-1 == unavailReadResult) {
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700209 mShouldExit = true;
210 }
Doug Horn05386c52021-01-08 13:51:36 -0800211
212 // pause pre snapshot
213 if (-2 == unavailReadResult) {
Doug Horn60cc9fb2021-01-08 14:13:38 -0800214 mShouldExitForSnapshot = true;
215 }
216
217 // resume post snapshot
218 if (-3 == unavailReadResult) {
219 mShouldExitForSnapshot = false;
Doug Horn05386c52021-01-08 13:51:36 -0800220 }
221
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700222 continue;
223 }
224 }
225
226 *inout_len = count;
227 ++mXmits;
228 mTotalRecv += count;
229 D("read %d bytes", (int)count);
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800230
231 *(mContext.host_state) = ASG_HOST_STATE_RENDERING;
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700232 return (const unsigned char*)buf;
233}
234
235void RingStream::type1Read(
236 uint32_t available,
237 char* begin,
238 size_t* count, char** current, const char* ptrEnd) {
239
240 uint32_t xferTotal = available / sizeof(struct asg_type1_xfer);
241
242 if (mType1Xfers.size() < xferTotal) {
243 mType1Xfers.resize(xferTotal * 2);
244 }
245
246 auto xfersPtr = mType1Xfers.data();
247
248 ring_buffer_copy_contents(
249 mContext.to_host, 0, xferTotal * sizeof(struct asg_type1_xfer), (uint8_t*)xfersPtr);
250
251 for (uint32_t i = 0; i < xferTotal; ++i) {
252 if (*current + xfersPtr[i].size > ptrEnd) {
253 // Save in a temp buffer or we'll get stuck
254 if (begin == *current && i == 0) {
255 const char* src = mContext.buffer + xfersPtr[i].offset;
256 mReadBuffer.resize_noinit(xfersPtr[i].size);
257 memcpy(mReadBuffer.data(), src, xfersPtr[i].size);
258 mReadBufferLeft = xfersPtr[i].size;
259 ring_buffer_advance_read(
260 mContext.to_host, sizeof(struct asg_type1_xfer), 1);
Lingfeng Yange494a922021-04-15 17:09:26 -0700261 __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700262 }
263 return;
264 }
265 const char* src = mContext.buffer + xfersPtr[i].offset;
266 memcpy(*current, src, xfersPtr[i].size);
267 ring_buffer_advance_read(
268 mContext.to_host, sizeof(struct asg_type1_xfer), 1);
Lingfeng Yange494a922021-04-15 17:09:26 -0700269 __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700270 *current += xfersPtr[i].size;
271 *count += xfersPtr[i].size;
272
273 // TODO: Figure out why running multiple xfers here can result in data
274 // corruption.
275 return;
276 }
277}
278
279void RingStream::type2Read(
280 uint32_t available,
281 size_t* count, char** current,const char* ptrEnd) {
282
Doug Horn0c2ea5a2021-10-29 17:30:16 -0700283 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "nyi. abort";
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700284
285 uint32_t xferTotal = available / sizeof(struct asg_type2_xfer);
286
287 if (mType2Xfers.size() < xferTotal) {
288 mType2Xfers.resize(xferTotal * 2);
289 }
290
291 auto xfersPtr = mType2Xfers.data();
292
293 ring_buffer_copy_contents(
294 mContext.to_host, 0, available, (uint8_t*)xfersPtr);
295
296 for (uint32_t i = 0; i < xferTotal; ++i) {
297
298 if (*current + xfersPtr[i].size > ptrEnd) return;
299
300 const char* src =
301 mCallbacks.getPtr(xfersPtr[i].physAddr);
302
303 memcpy(*current, src, xfersPtr[i].size);
304
305 ring_buffer_advance_read(
306 mContext.to_host, sizeof(struct asg_type1_xfer), 1);
307
308 *current += xfersPtr[i].size;
309 *count += xfersPtr[i].size;
310 }
311}
312
313void RingStream::type3Read(
314 uint32_t available,
315 size_t* count, char** current, const char* ptrEnd) {
316
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800317 uint32_t xferTotal = __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700318 uint32_t maxCanRead = ptrEnd - *current;
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800319 uint32_t ringAvail = available;
320 uint32_t actuallyRead = std::min(ringAvail, std::min(xferTotal, maxCanRead));
321
322 // Decrement transfer_size before letting the guest proceed in ring_buffer funcs or we will race
323 // to the next time the guest sets transfer_size
324 __atomic_fetch_sub(&mContext.ring_config->transfer_size, actuallyRead, __ATOMIC_RELEASE);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700325
326 ring_buffer_read_fully_with_abort(
Lingfeng Yang9ee8efa2021-01-22 20:37:09 -0800327 mContext.to_host_large_xfer.ring,
328 &mContext.to_host_large_xfer.view,
329 *current, actuallyRead,
330 1, &mContext.ring_config->in_error);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700331
332 *current += actuallyRead;
333 *count += actuallyRead;
334}
335
336void* RingStream::getDmaForReading(uint64_t guest_paddr) {
Jason Macnaked0c9e62023-03-30 15:58:24 -0700337 return emugl::g_emugl_dma_get_host_addr(guest_paddr);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700338}
339
Jason Macnaked0c9e62023-03-30 15:58:24 -0700340void RingStream::unlockDma(uint64_t guest_paddr) { emugl::g_emugl_dma_unlock(guest_paddr); }
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700341
342int RingStream::writeFully(const void* buf, size_t len) {
343 void* dstBuf = alloc(len);
344 memcpy(dstBuf, buf, len);
345 flush();
346 return 0;
347}
348
349const unsigned char *RingStream::readFully( void *buf, size_t len) {
Doug Horn0c2ea5a2021-10-29 17:30:16 -0700350 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "not intended for use with RingStream";
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700351}
352
353void RingStream::onSave(android::base::Stream* stream) {
Doug Horn05386c52021-01-08 13:51:36 -0800354 stream->putBe32(mReadBufferLeft);
355 stream->write(mReadBuffer.data() + mReadBuffer.size() - mReadBufferLeft,
356 mReadBufferLeft);
357 android::base::saveBuffer(stream, mWriteBuffer);
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700358}
359
360unsigned char* RingStream::onLoad(android::base::Stream* stream) {
Doug Horn05386c52021-01-08 13:51:36 -0800361 android::base::loadBuffer(stream, &mReadBuffer);
362 mReadBufferLeft = mReadBuffer.size();
363 android::base::loadBuffer(stream, &mWriteBuffer);
364 return reinterpret_cast<unsigned char*>(mWriteBuffer.data());
Lingfeng Yangee4aea32020-10-29 08:52:13 -0700365}
366
Jason Macnaked0c9e62023-03-30 15:58:24 -0700367} // namespace gfxstream