Move GrOpsTask to gpu/ops and the the skgpu::v1 namespace
Bug: skia:11837
Change-Id: I9ee6343b5144b02e8f455df0ea01b8199a8f14e1
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/440837
Commit-Queue: Robert Phillips <[email protected]>
Reviewed-by: Greg Daniel <[email protected]>
diff --git a/gn/gpu.gni b/gn/gpu.gni
index 727df61..7360e8a 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -493,8 +493,6 @@
"$_src/gpu/GrBlurUtils.h",
"$_src/gpu/GrDrawOpTest.cpp",
"$_src/gpu/GrDrawOpTest.h",
- "$_src/gpu/GrOpsTask.cpp",
- "$_src/gpu/GrOpsTask.h",
# Ops
"$_src/gpu/ops/AAConvexPathRenderer.cpp",
@@ -556,6 +554,8 @@
"$_src/gpu/ops/GrStrokeRectOp.h",
"$_src/gpu/ops/GrTextureOp.cpp",
"$_src/gpu/ops/GrTextureOp.h",
+ "$_src/gpu/ops/OpsTask.cpp",
+ "$_src/gpu/ops/OpsTask.h",
"$_src/gpu/ops/PathInnerTriangulateOp.cpp",
"$_src/gpu/ops/PathInnerTriangulateOp.h",
"$_src/gpu/ops/PathStencilCoverOp.cpp",
diff --git a/include/private/GrTypesPriv.h b/include/private/GrTypesPriv.h
index 5a8a0fa..6fc3136 100644
--- a/include/private/GrTypesPriv.h
+++ b/include/private/GrTypesPriv.h
@@ -165,7 +165,7 @@
};
/**
- * This enum is used to specify the load operation to be used when an GrOpsTask/GrOpsRenderPass
+ * This enum is used to specify the load operation to be used when an OpsTask/GrOpsRenderPass
* begins execution.
*/
enum class GrLoadOp {
@@ -175,7 +175,7 @@
};
/**
- * This enum is used to specify the store operation to be used when an GrOpsTask/GrOpsRenderPass
+ * This enum is used to specify the store operation to be used when an OpsTask/GrOpsRenderPass
* ends execution.
*/
enum class GrStoreOp {
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index f536b86..57777f1 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -44,7 +44,7 @@
#include "src/image/SkSurface_Gpu.h"
#if SK_GPU_V1
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/ops/OpsTask.h"
#include "src/gpu/ops/SoftwarePathRenderer.h"
#endif
@@ -374,9 +374,9 @@
// This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
// could have merged it means the opsTask was artificially split.
if (!fDAG.empty()) {
- GrOpsTask* prevOpsTask = fDAG[0]->asOpsTask();
+ auto prevOpsTask = fDAG[0]->asOpsTask();
for (int i = 1; i < fDAG.count(); ++i) {
- GrOpsTask* curOpsTask = fDAG[i]->asOpsTask();
+ auto curOpsTask = fDAG[i]->asOpsTask();
if (prevOpsTask && curOpsTask) {
SkASSERT(!prevOpsTask->canMerge(curOpsTask));
@@ -575,7 +575,7 @@
return entry ? *entry : nullptr;
}
-GrOpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
+skgpu::v1::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
GrRenderTask* task = this->getLastRenderTask(proxy);
return task ? task->asOpsTask() : nullptr;
}
@@ -699,18 +699,18 @@
}
#if SK_GPU_V1
-sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
- sk_sp<GrArenas> arenas,
- bool flushTimeOpsTask) {
+sk_sp<skgpu::v1::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
+ sk_sp<GrArenas> arenas,
+ bool flushTimeOpsTask) {
SkDEBUGCODE(this->validate());
SkASSERT(fContext);
this->closeActiveOpsTask();
- sk_sp<GrOpsTask> opsTask(new GrOpsTask(this,
- std::move(surfaceView),
- fContext->priv().auditTrail(),
- std::move(arenas)));
+ sk_sp<skgpu::v1::OpsTask> opsTask(new skgpu::v1::OpsTask(this,
+ std::move(surfaceView),
+ fContext->priv().auditTrail(),
+ std::move(arenas)));
SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 30148a6..65b84fc 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -30,7 +30,6 @@
class GrGpuBuffer;
class GrOnFlushCallbackObject;
class GrOpFlushState;
-class GrOpsTask;
class GrRecordingContext;
class GrRenderTargetProxy;
class GrRenderTask;
@@ -39,7 +38,10 @@
class GrSurfaceProxyView;
class GrTextureResolveRenderTask;
class SkDeferredDisplayList;
-namespace skgpu { namespace v1 { class SoftwarePathRenderer; }}
+namespace skgpu { namespace v1 {
+ class OpsTask;
+ class SoftwarePathRenderer;
+}}
class GrDrawingManager {
public:
@@ -49,9 +51,9 @@
#if SK_GPU_V1
// OpsTasks created at flush time are stored and handled different from the others.
- sk_sp<GrOpsTask> newOpsTask(GrSurfaceProxyView,
- sk_sp<GrArenas> arenas,
- bool flushTimeOpsTask);
+ sk_sp<skgpu::v1::OpsTask> newOpsTask(GrSurfaceProxyView,
+ sk_sp<GrArenas> arenas,
+ bool flushTimeOpsTask);
// Adds 'atlasTask' to the DAG and leaves it open.
//
@@ -151,7 +153,7 @@
#endif
GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
- GrOpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
+ skgpu::v1::OpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
@@ -214,7 +216,7 @@
sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
SkTArray<sk_sp<GrRenderTask>> fDAG;
- GrOpsTask* fActiveOpsTask = nullptr;
+ skgpu::v1::OpsTask* fActiveOpsTask = nullptr;
// These are the IDs of the opsTask currently being flushed (in internalFlush). They are
// only stored here to prevent memory thrashing.
SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index a6b330e..673b359 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -257,7 +257,7 @@
* sampling. This is currently only used by Vulkan for inline uploads
* to set that layout back to sampled after doing the upload. Inline
* uploads currently can happen between draws in a single op so it is
- * not trivial to break up the GrOpsTask into two tasks when we see
+ * not trivial to break up the OpsTask into two tasks when we see
* an inline upload. However, once we are able to support doing that
* we can remove this parameter.
*
@@ -350,7 +350,7 @@
bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint);
- // Returns a GrOpsRenderPass which GrOpsTasks send draw commands to instead of directly
+ // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
// to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
// If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
// provided but 'renderTarget' has a stencil buffer then that is a signal that the
diff --git a/src/gpu/GrOpFlushState.h b/src/gpu/GrOpFlushState.h
index f605ee2..eb5d81b 100644
--- a/src/gpu/GrOpFlushState.h
+++ b/src/gpu/GrOpFlushState.h
@@ -23,7 +23,7 @@
class GrOpsRenderPass;
class GrResourceProvider;
-/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */
+/** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawTarget {
public:
// vertexSpace and indexSpace may either be null or an alloation of size
@@ -307,7 +307,7 @@
// an op is not currently preparing of executing.
OpArgs* fOpArgs = nullptr;
- // This field is only transiently set during flush. Each GrOpsTask will set it to point to an
+ // This field is only transiently set during flush. Each OpsTask will set it to point to an
// array of proxies it uses before call onPrepare and onExecute.
SkTArray<GrSurfaceProxy*, true>* fSampledProxies;
diff --git a/src/gpu/GrRenderTargetProxy.h b/src/gpu/GrRenderTargetProxy.h
index c351981..54540e4 100644
--- a/src/gpu/GrRenderTargetProxy.h
+++ b/src/gpu/GrRenderTargetProxy.h
@@ -19,10 +19,10 @@
class GrResourceProvider;
// GrArenas matches the lifetime of a single frame. It is created and held on the
-// SurfaceFillContext's RenderTargetProxy with the first call to get an arena. Each GrOpsTask
-// takes a ref on it to keep the arenas alive. When the first GrOpsTask's onExecute() is
+// SurfaceFillContext's RenderTargetProxy with the first call to get an arena. Each OpsTask
+// takes a ref on it to keep the arenas alive. When the first OpsTask's onExecute() is
// completed, the arena ref on the SurfaceFillContext's RenderTargetProxy is nulled out so that
-// any new GrOpsTasks will create and ref a new set of arenas.
+// any new OpsTasks will create and ref a new set of arenas.
class GrArenas : public SkNVRefCnt<GrArenas> {
public:
SkArenaAlloc* arenaAlloc() {
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index e39245a..f9b3825 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -17,9 +17,9 @@
class GrMockRenderTask;
class GrOpFlushState;
-class GrOpsTask;
class GrResourceAllocator;
class GrTextureResolveRenderTask;
+namespace skgpu { namespace v1 { class OpsTask; }}
// This class abstracts a task that targets a single GrSurfaceProxy, participates in the
// GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's
@@ -97,9 +97,9 @@
GrSurfaceProxy* target(int i) const { return fTargets[i].get(); }
/*
- * Safely cast this GrRenderTask to a GrOpsTask (if possible).
+ * Safely cast this GrRenderTask to a OpsTask (if possible).
*/
- virtual GrOpsTask* asOpsTask() { return nullptr; }
+ virtual skgpu::v1::OpsTask* asOpsTask() { return nullptr; }
#if GR_TEST_UTILS
/*
@@ -254,8 +254,8 @@
};
virtual void onMakeSkippable() {}
- virtual void onPrePrepare(GrRecordingContext*) {} // Only GrOpsTask currently overrides this
- virtual void onPrepare(GrOpFlushState*) {} // GrOpsTask and GrDDLTask override this
+ virtual void onPrePrepare(GrRecordingContext*) {} // Only OpsTask currently overrides this
+ virtual void onPrepare(GrOpFlushState*) {} // OpsTask and GrDDLTask override this
virtual bool onExecute(GrOpFlushState* flushState) = 0;
const uint32_t fUniqueID;
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index 6d7c968..9047cbb 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -62,7 +62,7 @@
* How does instantiation failure handling work when explicitly allocating?
*
* In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
- * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
+ * gathered (i.e., in OpsTask::gatherProxyIntervals).
*
* During addInterval, read-only lazy proxies are instantiated. If that fails, the resource
* allocator will note the failure and ignore pretty much anything else until `reset`.
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index 8217383..874262a 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -17,7 +17,6 @@
class GrCaps;
class GrContext_Base;
-class GrOpsTask;
class GrRecordingContext;
class GrRenderTargetProxy;
class GrRenderTask;
diff --git a/src/gpu/GrUserStencilSettings.h b/src/gpu/GrUserStencilSettings.h
index a6dc6b2..5639f61 100644
--- a/src/gpu/GrUserStencilSettings.h
+++ b/src/gpu/GrUserStencilSettings.h
@@ -13,12 +13,12 @@
/**
* Gr uses the stencil buffer to implement complex clipping inside the
- * GrOpsTask class. The GrOpsTask makes a subset of the stencil buffer
+ * OpsTask class. The OpsTask makes a subset of the stencil buffer
* bits available for other uses by external code (user bits). Client code can
- * modify these bits. GrOpsTask will ignore ref, mask, and writemask bits
+ * modify these bits. OpsTask will ignore ref, mask, and writemask bits
* provided by clients that fall outside the user range.
*
- * When code outside the GrOpsTask class uses the stencil buffer the contract
+ * When code outside the OpsTask class uses the stencil buffer the contract
* is as follows:
*
* > Normal stencil funcs allow the client to pass / fail regardless of the
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 18f63e1..2e0d686 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -2052,7 +2052,7 @@
#else
// we could just clear the clip bit but when we go through
// ANGLE a partial stencil mask will cause clears to be
- // turned into draws. Our contract on GrOpsTask says that
+ // turned into draws. Our contract on OpsTask says that
// changing the clip between stencil passes may or may not
// zero the client's clip bits. So we just clear the whole thing.
static const GrGLint clipStencilMask = ~0;
diff --git a/src/gpu/ops/GrAtlasTextOp.cpp b/src/gpu/ops/GrAtlasTextOp.cpp
index b7a96f2..4e09c70 100644
--- a/src/gpu/ops/GrAtlasTextOp.cpp
+++ b/src/gpu/ops/GrAtlasTextOp.cpp
@@ -224,7 +224,7 @@
auto primProcProxies = target->allocPrimProcProxyPtrs(kMaxTextures);
for (unsigned i = 0; i < numActiveViews; ++i) {
primProcProxies[i] = views[i].proxy();
- // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the proxies
+ // This op does not know its atlas proxies when it is added to a OpsTasks, so the proxies
// don't get added during the visitProxies call. Thus we add them here.
target->sampledProxyArray()->push_back(views[i].proxy());
}
@@ -354,7 +354,7 @@
// Update the proxies used in the GP to match.
for (unsigned i = gp->numTextureSamplers(); i < numActiveViews; ++i) {
flushInfo->fPrimProcProxies[i] = views[i].proxy();
- // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the
+ // This op does not know its atlas proxies when it is added to a OpsTasks, so the
// proxies don't get added during the visitProxies call. Thus we add them here.
target->sampledProxyArray()->push_back(views[i].proxy());
// These will get unreffed when the previously recorded draws destruct.
diff --git a/src/gpu/GrOpsTask.cpp b/src/gpu/ops/OpsTask.cpp
similarity index 88%
rename from src/gpu/GrOpsTask.cpp
rename to src/gpu/ops/OpsTask.cpp
index 8185628..ab5c7cb 100644
--- a/src/gpu/GrOpsTask.cpp
+++ b/src/gpu/ops/OpsTask.cpp
@@ -5,7 +5,7 @@
* found in the LICENSE file.
*/
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/ops/OpsTask.h"
#include "include/gpu/GrRecordingContext.h"
#include "src/core/SkRectPriv.h"
@@ -28,24 +28,62 @@
////////////////////////////////////////////////////////////////////////////////
+namespace {
+
// Experimentally we have found that most combining occurs within the first 10 comparisons.
static const int kMaxOpMergeDistance = 10;
static const int kMaxOpChainDistance = 10;
////////////////////////////////////////////////////////////////////////////////
-static inline bool can_reorder(const SkRect& a, const SkRect& b) { return !GrRectsOverlap(a, b); }
+inline bool can_reorder(const SkRect& a, const SkRect& b) { return !GrRectsOverlap(a, b); }
+
+GrOpsRenderPass* create_render_pass(GrGpu* gpu,
+ GrRenderTarget* rt,
+ bool useMSAASurface,
+ GrAttachment* stencil,
+ GrSurfaceOrigin origin,
+ const SkIRect& bounds,
+ GrLoadOp colorLoadOp,
+ const std::array<float, 4>& loadClearColor,
+ GrLoadOp stencilLoadOp,
+ GrStoreOp stencilStoreOp,
+ const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
+ GrXferBarrierFlags renderPassXferBarriers) {
+ const GrOpsRenderPass::LoadAndStoreInfo kColorLoadStoreInfo {
+ colorLoadOp,
+ GrStoreOp::kStore,
+ loadClearColor
+ };
+
+ // TODO:
+ // We would like to (at this level) only ever clear & discard. We would need
+ // to stop splitting up higher level OpsTasks for copyOps to achieve that.
+ // Note: we would still need SB loads and stores but they would happen at a
+ // lower level (inside the VK command buffer).
+ const GrOpsRenderPass::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
+ stencilLoadOp,
+ stencilStoreOp,
+ };
+
+ return gpu->getOpsRenderPass(rt, useMSAASurface, stencil, origin, bounds, kColorLoadStoreInfo,
+ stencilLoadAndStoreInfo, sampledProxies, renderPassXferBarriers);
+}
+
+} // anonymous namespace
////////////////////////////////////////////////////////////////////////////////
-inline GrOpsTask::OpChain::List::List(GrOp::Owner op)
+namespace skgpu::v1 {
+
+inline OpsTask::OpChain::List::List(GrOp::Owner op)
: fHead(std::move(op)), fTail(fHead.get()) {
this->validate();
}
-inline GrOpsTask::OpChain::List::List(List&& that) { *this = std::move(that); }
+inline OpsTask::OpChain::List::List(List&& that) { *this = std::move(that); }
-inline GrOpsTask::OpChain::List& GrOpsTask::OpChain::List::operator=(List&& that) {
+inline OpsTask::OpChain::List& OpsTask::OpChain::List::operator=(List&& that) {
fHead = std::move(that.fHead);
fTail = that.fTail;
that.fTail = nullptr;
@@ -53,7 +91,7 @@
return *this;
}
-inline GrOp::Owner GrOpsTask::OpChain::List::popHead() {
+inline GrOp::Owner OpsTask::OpChain::List::popHead() {
SkASSERT(fHead);
auto temp = fHead->cutChain();
std::swap(temp, fHead);
@@ -64,7 +102,7 @@
return temp;
}
-inline GrOp::Owner GrOpsTask::OpChain::List::removeOp(GrOp* op) {
+inline GrOp::Owner OpsTask::OpChain::List::removeOp(GrOp* op) {
#ifdef SK_DEBUG
auto head = op;
while (head->prevInChain()) { head = head->prevInChain(); }
@@ -86,7 +124,7 @@
return temp;
}
-inline void GrOpsTask::OpChain::List::pushHead(GrOp::Owner op) {
+inline void OpsTask::OpChain::List::pushHead(GrOp::Owner op) {
SkASSERT(op);
SkASSERT(op->isChainHead());
SkASSERT(op->isChainTail());
@@ -99,13 +137,13 @@
}
}
-inline void GrOpsTask::OpChain::List::pushTail(GrOp::Owner op) {
+inline void OpsTask::OpChain::List::pushTail(GrOp::Owner op) {
SkASSERT(op->isChainTail());
fTail->chainConcat(std::move(op));
fTail = fTail->nextInChain();
}
-inline void GrOpsTask::OpChain::List::validate() const {
+inline void OpsTask::OpChain::List::validate() const {
#ifdef SK_DEBUG
if (fHead) {
SkASSERT(fTail);
@@ -116,8 +154,8 @@
////////////////////////////////////////////////////////////////////////////////
-GrOpsTask::OpChain::OpChain(GrOp::Owner op, GrProcessorSet::Analysis processorAnalysis,
- GrAppliedClip* appliedClip, const GrDstProxyView* dstProxyView)
+OpsTask::OpChain::OpChain(GrOp::Owner op, GrProcessorSet::Analysis processorAnalysis,
+ GrAppliedClip* appliedClip, const GrDstProxyView* dstProxyView)
: fList{std::move(op)}
, fProcessorAnalysis(processorAnalysis)
, fAppliedClip(appliedClip) {
@@ -128,7 +166,7 @@
fBounds = fList.head()->bounds();
}
-void GrOpsTask::OpChain::visitProxies(const GrVisitProxyFunc& func) const {
+void OpsTask::OpChain::visitProxies(const GrVisitProxyFunc& func) const {
if (fList.empty()) {
return;
}
@@ -143,7 +181,7 @@
}
}
-void GrOpsTask::OpChain::deleteOps() {
+void OpsTask::OpChain::deleteOps() {
while (!fList.empty()) {
// Since the value goes out of scope immediately, the GrOp::Owner deletes the op.
fList.popHead();
@@ -152,9 +190,9 @@
// Concatenates two op chains and attempts to merge ops across the chains. Assumes that we know that
// the two chains are chainable. Returns the new chain.
-GrOpsTask::OpChain::List GrOpsTask::OpChain::DoConcat(List chainA, List chainB, const GrCaps& caps,
- SkArenaAlloc* opsTaskArena,
- GrAuditTrail* auditTrail) {
+OpsTask::OpChain::List OpsTask::OpChain::DoConcat(List chainA, List chainB, const GrCaps& caps,
+ SkArenaAlloc* opsTaskArena,
+ GrAuditTrail* auditTrail) {
// We process ops in chain b from head to tail. We attempt to merge with nodes in a, starting
// at chain a's tail and working toward the head. We produce one of the following outcomes:
// 1) b's head is merged into an op in a.
@@ -230,7 +268,7 @@
// Attempts to concatenate the given chain onto our own and merge ops across the chains. Returns
// whether the operation succeeded. On success, the provided list will be returned empty.
-bool GrOpsTask::OpChain::tryConcat(
+bool OpsTask::OpChain::tryConcat(
List* list, GrProcessorSet::Analysis processorAnalysis, const GrDstProxyView& dstProxyView,
const GrAppliedClip* appliedClip, const SkRect& bounds, const GrCaps& caps,
SkArenaAlloc* opsTaskArena, GrAuditTrail* auditTrail) {
@@ -290,8 +328,8 @@
return true;
}
-bool GrOpsTask::OpChain::prependChain(OpChain* that, const GrCaps& caps, SkArenaAlloc* opsTaskArena,
- GrAuditTrail* auditTrail) {
+bool OpsTask::OpChain::prependChain(OpChain* that, const GrCaps& caps, SkArenaAlloc* opsTaskArena,
+ GrAuditTrail* auditTrail) {
if (!that->tryConcat(&fList, fProcessorAnalysis, fDstProxyView, fAppliedClip, fBounds, caps,
opsTaskArena, auditTrail)) {
this->validate();
@@ -313,7 +351,7 @@
return true;
}
-GrOp::Owner GrOpsTask::OpChain::appendOp(
+GrOp::Owner OpsTask::OpChain::appendOp(
GrOp::Owner op, GrProcessorSet::Analysis processorAnalysis,
const GrDstProxyView* dstProxyView, const GrAppliedClip* appliedClip, const GrCaps& caps,
SkArenaAlloc* opsTaskArena, GrAuditTrail* auditTrail) {
@@ -336,7 +374,7 @@
return nullptr;
}
-inline void GrOpsTask::OpChain::validate() const {
+inline void OpsTask::OpChain::validate() const {
#ifdef SK_DEBUG
fList.validate();
for (const auto& op : GrOp::ChainRange<>(fList.head())) {
@@ -349,10 +387,10 @@
////////////////////////////////////////////////////////////////////////////////
-GrOpsTask::GrOpsTask(GrDrawingManager* drawingMgr,
- GrSurfaceProxyView view,
- GrAuditTrail* auditTrail,
- sk_sp<GrArenas> arenas)
+OpsTask::OpsTask(GrDrawingManager* drawingMgr,
+ GrSurfaceProxyView view,
+ GrAuditTrail* auditTrail,
+ sk_sp<GrArenas> arenas)
: GrRenderTask()
, fAuditTrail(auditTrail)
, fUsesMSAASurface(view.asRenderTargetProxy()->numSamples() > 1)
@@ -363,19 +401,19 @@
this->addTarget(drawingMgr, view.detachProxy());
}
-void GrOpsTask::deleteOps() {
+void OpsTask::deleteOps() {
for (auto& chain : fOpChains) {
chain.deleteOps();
}
fOpChains.reset();
}
-GrOpsTask::~GrOpsTask() {
+OpsTask::~OpsTask() {
this->deleteOps();
}
-void GrOpsTask::addOp(GrDrawingManager* drawingMgr, GrOp::Owner op,
- GrTextureResolveManager textureResolveManager, const GrCaps& caps) {
+void OpsTask::addOp(GrDrawingManager* drawingMgr, GrOp::Owner op,
+ GrTextureResolveManager textureResolveManager, const GrCaps& caps) {
auto addDependency = [&](GrSurfaceProxy* p, GrMipmapped mipmapped) {
this->addDependency(drawingMgr, p, mipmapped, textureResolveManager, caps);
};
@@ -386,10 +424,10 @@
nullptr, caps);
}
-void GrOpsTask::addDrawOp(GrDrawingManager* drawingMgr, GrOp::Owner op, bool usesMSAA,
- const GrProcessorSet::Analysis& processorAnalysis, GrAppliedClip&& clip,
- const GrDstProxyView& dstProxyView,
- GrTextureResolveManager textureResolveManager, const GrCaps& caps) {
+void OpsTask::addDrawOp(GrDrawingManager* drawingMgr, GrOp::Owner op, bool usesMSAA,
+ const GrProcessorSet::Analysis& processorAnalysis, GrAppliedClip&& clip,
+ const GrDstProxyView& dstProxyView,
+ GrTextureResolveManager textureResolveManager, const GrCaps& caps) {
auto addDependency = [&](GrSurfaceProxy* p, GrMipmapped mipmapped) {
this->addSampledTexture(p);
this->addDependency(drawingMgr, p, mipmapped, textureResolveManager, caps);
@@ -417,7 +455,7 @@
&dstProxyView, caps);
}
-void GrOpsTask::endFlush(GrDrawingManager* drawingMgr) {
+void OpsTask::endFlush(GrDrawingManager* drawingMgr) {
fLastClipStackGenID = SK_InvalidUniqueID;
this->deleteOps();
@@ -428,12 +466,12 @@
GrRenderTask::endFlush(drawingMgr);
}
-void GrOpsTask::onPrePrepare(GrRecordingContext* context) {
+void OpsTask::onPrePrepare(GrRecordingContext* context) {
SkASSERT(this->isClosed());
// TODO: remove the check for discard here once reduced op splitting is turned on. Currently we
- // can end up with GrOpsTasks that only have a discard load op and no ops. For vulkan validation
+ // can end up with OpsTasks that only have a discard load op and no ops. For vulkan validation
// we need to keep that discard and not drop it. Once we have reduce op list splitting enabled
- // we shouldn't end up with GrOpsTasks with only discard.
+ // we shouldn't end up with OpsTasks with only discard.
if (this->isColorNoOp() ||
(fClippedContentBounds.isEmpty() && fColorLoadOp != GrLoadOp::kDiscard)) {
return;
@@ -453,13 +491,13 @@
}
}
-void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
+void OpsTask::onPrepare(GrOpFlushState* flushState) {
SkASSERT(this->target(0)->peekRenderTarget());
SkASSERT(this->isClosed());
// TODO: remove the check for discard here once reduced op splitting is turned on. Currently we
- // can end up with GrOpsTasks that only have a discard load op and no ops. For vulkan validation
+ // can end up with OpsTasks that only have a discard load op and no ops. For vulkan validation
// we need to keep that discard and not drop it. Once we have reduce op list splitting enabled
- // we shouldn't end up with GrOpsTasks with only discard.
+ // we shouldn't end up with OpsTasks with only discard.
if (this->isColorNoOp() ||
(fClippedContentBounds.isEmpty() && fColorLoadOp != GrLoadOp::kDiscard)) {
return;
@@ -497,51 +535,19 @@
flushState->setSampledProxyArray(nullptr);
}
-static GrOpsRenderPass* create_render_pass(GrGpu* gpu,
- GrRenderTarget* rt,
- bool useMSAASurface,
- GrAttachment* stencil,
- GrSurfaceOrigin origin,
- const SkIRect& bounds,
- GrLoadOp colorLoadOp,
- const std::array<float, 4>& loadClearColor,
- GrLoadOp stencilLoadOp,
- GrStoreOp stencilStoreOp,
- const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
- GrXferBarrierFlags renderPassXferBarriers) {
- const GrOpsRenderPass::LoadAndStoreInfo kColorLoadStoreInfo {
- colorLoadOp,
- GrStoreOp::kStore,
- loadClearColor
- };
-
- // TODO:
- // We would like to (at this level) only ever clear & discard. We would need
- // to stop splitting up higher level OpsTasks for copyOps to achieve that.
- // Note: we would still need SB loads and stores but they would happen at a
- // lower level (inside the VK command buffer).
- const GrOpsRenderPass::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
- stencilLoadOp,
- stencilStoreOp,
- };
-
- return gpu->getOpsRenderPass(rt, useMSAASurface, stencil, origin, bounds, kColorLoadStoreInfo,
- stencilLoadAndStoreInfo, sampledProxies, renderPassXferBarriers);
-}
-
// TODO: this is where GrOp::renderTarget is used (which is fine since it
// is at flush time). However, we need to store the RenderTargetProxy in the
// Ops and instantiate them here.
-bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
+bool OpsTask::onExecute(GrOpFlushState* flushState) {
SkASSERT(this->numTargets() == 1);
GrRenderTargetProxy* proxy = this->target(0)->asRenderTargetProxy();
SkASSERT(proxy);
SK_AT_SCOPE_EXIT(proxy->clearArenas());
// TODO: remove the check for discard here once reduced op splitting is turned on. Currently we
- // can end up with GrOpsTasks that only have a discard load op and no ops. For vulkan validation
+ // can end up with OpsTasks that only have a discard load op and no ops. For vulkan validation
// we need to keep that discard and not drop it. Once we have reduce op list splitting enabled
- // we shouldn't end up with GrOpsTasks with only discard.
+ // we shouldn't end up with OpsTasks with only discard.
if (this->isColorNoOp() ||
(fClippedContentBounds.isEmpty() && fColorLoadOp != GrLoadOp::kDiscard)) {
return false;
@@ -660,7 +666,7 @@
return true;
}
-void GrOpsTask::setColorLoadOp(GrLoadOp op, std::array<float, 4> color) {
+void OpsTask::setColorLoadOp(GrLoadOp op, std::array<float, 4> color) {
fColorLoadOp = op;
fLoadClearColor = color;
if (GrLoadOp::kClear == fColorLoadOp) {
@@ -670,7 +676,7 @@
}
}
-void GrOpsTask::reset() {
+void OpsTask::reset() {
fDeferredProxies.reset();
fSampledProxies.reset();
fClippedContentBounds = SkIRect::MakeEmpty();
@@ -679,13 +685,13 @@
fRenderPassXferBarriers = GrXferBarrierFlags::kNone;
}
-bool GrOpsTask::canMerge(const GrOpsTask* opsTask) const {
+bool OpsTask::canMerge(const OpsTask* opsTask) const {
return this->target(0) == opsTask->target(0) &&
fArenas == opsTask->fArenas &&
!opsTask->fCannotMergeBackward;
}
-int GrOpsTask::mergeFrom(SkSpan<const sk_sp<GrRenderTask>> tasks) {
+int OpsTask::mergeFrom(SkSpan<const sk_sp<GrRenderTask>> tasks) {
int mergedCount = 0;
for (const sk_sp<GrRenderTask>& task : tasks) {
auto opsTask = task->asOpsTask();
@@ -705,8 +711,8 @@
return 0;
}
- SkSpan<const sk_sp<GrOpsTask>> mergingNodes(
- reinterpret_cast<const sk_sp<GrOpsTask>*>(tasks.data()), SkToSizeT(mergedCount));
+ SkSpan<const sk_sp<OpsTask>> mergingNodes(
+ reinterpret_cast<const sk_sp<OpsTask>*>(tasks.data()), SkToSizeT(mergedCount));
int addlDeferredProxyCount = 0;
int addlProxyCount = 0;
int addlOpChainCount = 0;
@@ -758,7 +764,7 @@
return mergedCount;
}
-bool GrOpsTask::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
+bool OpsTask::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
if (CanDiscardPreviousOps::kYes == canDiscardPreviousOps || this->isEmpty()) {
this->deleteOps();
fDeferredProxies.reset();
@@ -774,7 +780,7 @@
return false;
}
-void GrOpsTask::discard() {
+void OpsTask::discard() {
// Discard calls to in-progress opsTasks are ignored. Calls at the start update the
// opsTasks' color & stencil load ops.
if (this->isEmpty()) {
@@ -787,10 +793,10 @@
////////////////////////////////////////////////////////////////////////////////
#if GR_TEST_UTILS
-void GrOpsTask::dump(const SkString& label,
- SkString indent,
- bool printDependencies,
- bool close) const {
+void OpsTask::dump(const SkString& label,
+ SkString indent,
+ bool printDependencies,
+ bool close) const {
GrRenderTask::dump(label, indent, printDependencies, false);
SkDebugf("%sfColorLoadOp: ", indent.c_str());
@@ -853,7 +859,7 @@
#endif
#ifdef SK_DEBUG
-void GrOpsTask::visitProxies_debugOnly(const GrVisitProxyFunc& func) const {
+void OpsTask::visitProxies_debugOnly(const GrVisitProxyFunc& func) const {
auto textureFunc = [ func ] (GrSurfaceProxy* tex, GrMipmapped mipmapped) {
func(tex, mipmapped);
};
@@ -867,14 +873,14 @@
////////////////////////////////////////////////////////////////////////////////
-void GrOpsTask::onMakeSkippable() {
+void OpsTask::onMakeSkippable() {
this->deleteOps();
fDeferredProxies.reset();
fColorLoadOp = GrLoadOp::kLoad;
SkASSERT(this->isColorNoOp());
}
-bool GrOpsTask::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
+bool OpsTask::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
bool used = false;
for (GrSurfaceProxy* proxy : fSampledProxies) {
if (proxy == proxyToCheck) {
@@ -896,7 +902,7 @@
return used;
}
-void GrOpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
+void OpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
SkASSERT(this->isClosed());
if (this->isColorNoOp()) {
return;
@@ -914,7 +920,7 @@
GrSurfaceProxy* targetProxy = this->target(0);
- // Add the interval for all the writes to this GrOpsTasks's target
+ // Add the interval for all the writes to this OpsTasks's target
if (fOpChains.count()) {
unsigned int cur = alloc->curOp();
@@ -946,7 +952,7 @@
}
}
-void GrOpsTask::recordOp(
+void OpsTask::recordOp(
GrOp::Owner op, bool usesMSAA, GrProcessorSet::Analysis processorAnalysis,
GrAppliedClip* clip, const GrDstProxyView* dstProxyView, const GrCaps& caps) {
GrSurfaceProxy* proxy = this->target(0);
@@ -954,7 +960,7 @@
op->validate();
SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxyView && dstProxyView->proxy()));
SkASSERT(proxy);
- // A closed GrOpsTask should never receive new/more ops
+ // A closed OpsTask should never receive new/more ops
SkASSERT(!this->isClosed());
// Ensure we can support dynamic msaa if the caller is trying to trigger it.
if (proxy->asRenderTargetProxy()->numSamples() == 1 && usesMSAA) {
@@ -1017,7 +1023,7 @@
fOpChains.emplace_back(std::move(op), processorAnalysis, clip, dstProxyView);
}
-void GrOpsTask::forwardCombine(const GrCaps& caps) {
+void OpsTask::forwardCombine(const GrCaps& caps) {
SkASSERT(!this->isClosed());
GrOP_INFO("opsTask: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
@@ -1048,8 +1054,8 @@
}
}
-GrRenderTask::ExpectedOutcome GrOpsTask::onMakeClosed(GrRecordingContext* rContext,
- SkIRect* targetUpdateBounds) {
+GrRenderTask::ExpectedOutcome OpsTask::onMakeClosed(GrRecordingContext* rContext,
+ SkIRect* targetUpdateBounds) {
this->forwardCombine(*rContext->priv().caps());
if (!this->isColorNoOp()) {
GrSurfaceProxy* proxy = this->target(0);
@@ -1069,3 +1075,5 @@
}
return ExpectedOutcome::kTargetUnchanged;
}
+
+} // namespace skgpu::v1
diff --git a/src/gpu/GrOpsTask.h b/src/gpu/ops/OpsTask.h
similarity index 95%
rename from src/gpu/GrOpsTask.h
rename to src/gpu/ops/OpsTask.h
index 16a9dd1..4f5cc08 100644
--- a/src/gpu/GrOpsTask.h
+++ b/src/gpu/ops/OpsTask.h
@@ -5,8 +5,8 @@
* found in the LICENSE file.
*/
-#ifndef GrOpsTask_DEFINED
-#define GrOpsTask_DEFINED
+#ifndef OpsTask_DEFINED
+#define OpsTask_DEFINED
#include "include/core/SkMatrix.h"
#include "include/core/SkRefCnt.h"
@@ -32,15 +32,19 @@
class GrClearOp;
class GrGpuBuffer;
class GrRenderTargetProxy;
-namespace skgpu { namespace v1 { class SurfaceDrawContext; }}
+class OpsTaskTestingAccess;
-class GrOpsTask : public GrRenderTask {
+namespace skgpu::v1 {
+
+class SurfaceDrawContext;
+
+class OpsTask : public GrRenderTask {
public:
// Manage the arenas life time by maintaining are reference to it.
- GrOpsTask(GrDrawingManager*, GrSurfaceProxyView, GrAuditTrail*, sk_sp<GrArenas>);
- ~GrOpsTask() override;
+ OpsTask(GrDrawingManager*, GrSurfaceProxyView, GrAuditTrail*, sk_sp<GrArenas>);
+ ~OpsTask() override;
- GrOpsTask* asOpsTask() override { return this; }
+ OpsTask* asOpsTask() override { return this; }
bool isEmpty() const { return fOpChains.empty(); }
bool usesMSAASurface() const { return fUsesMSAASurface; }
@@ -90,7 +94,7 @@
void setColorLoadOp(GrLoadOp op, std::array<float, 4> color = {0, 0, 0, 0});
// Returns whether the given opsTask can be appended at the end of this one.
- bool canMerge(const GrOpsTask*) const;
+ bool canMerge(const OpsTask*) const;
// Merge as many opsTasks as possible from the head of 'tasks'. They should all be
// renderPass compatible. Return the number of tasks merged into 'this'.
@@ -244,7 +248,7 @@
// Remove all ops, proxies, etc. Used in the merging algorithm when tasks can be skipped.
void reset();
- friend class OpsTaskTestingAccess;
+ friend class ::OpsTaskTestingAccess;
// The SDC and OpsTask have to work together to handle buffer clears. In most cases, buffer
// clearing can be done natively, in which case the op list's load ops are sufficient. In other
@@ -284,4 +288,6 @@
SkIRect fClippedContentBounds = SkIRect::MakeEmpty();
};
-#endif
+} // namespace skgpu::v1
+
+#endif // OpsTask_DEFINED
diff --git a/src/gpu/ops/SmallPathRenderer.cpp b/src/gpu/ops/SmallPathRenderer.cpp
index bc3c8c7..b6663a0 100644
--- a/src/gpu/ops/SmallPathRenderer.cpp
+++ b/src/gpu/ops/SmallPathRenderer.cpp
@@ -156,7 +156,7 @@
int numActiveProxies;
const GrSurfaceProxyView* views = atlasMgr->getViews(&numActiveProxies);
for (int i = 0; i < numActiveProxies; ++i) {
- // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the
+ // This op does not know its atlas proxies when it is added to a OpsTasks, so the
// proxies don't get added during the visitProxies call. Thus we add them here.
flushInfo.fPrimProcProxies[i] = views[i].proxy();
target->sampledProxyArray()->push_back(views[i].proxy());
@@ -540,7 +540,7 @@
if (gp->numTextureSamplers() != numActiveProxies) {
for (int i = gp->numTextureSamplers(); i < numActiveProxies; ++i) {
flushInfo->fPrimProcProxies[i] = views[i].proxy();
- // This op does not know its atlas proxies when it is added to a GrOpsTasks, so the
+ // This op does not know its atlas proxies when it is added to a OpsTasks, so the
// proxies don't get added during the visitProxies call. Thus we add them here.
target->sampledProxyArray()->push_back(views[i].proxy());
}
diff --git a/src/gpu/tessellate/GrAtlasRenderTask.cpp b/src/gpu/tessellate/GrAtlasRenderTask.cpp
index 0031915..569e665 100644
--- a/src/gpu/tessellate/GrAtlasRenderTask.cpp
+++ b/src/gpu/tessellate/GrAtlasRenderTask.cpp
@@ -18,10 +18,10 @@
GrAtlasRenderTask::GrAtlasRenderTask(GrRecordingContext* rContext,
sk_sp<GrArenas> arenas,
std::unique_ptr<GrDynamicAtlas> dynamicAtlas)
- : GrOpsTask(rContext->priv().drawingManager(),
- dynamicAtlas->writeView(*rContext->priv().caps()),
- rContext->priv().auditTrail(),
- std::move(arenas))
+ : OpsTask(rContext->priv().drawingManager(),
+ dynamicAtlas->writeView(*rContext->priv().caps()),
+ rContext->priv().auditTrail(),
+ std::move(arenas))
, fDynamicAtlas(std::move(dynamicAtlas)) {
}
@@ -64,7 +64,7 @@
const GrCaps& caps = *rContext->priv().caps();
- // Set our dimensions now. GrOpsTask will need them when we add our ops.
+ // Set our dimensions now. OpsTask will need them when we add our ops.
this->target(0)->priv().setLazyDimensions(fDynamicAtlas->drawBounds());
this->target(0)->asRenderTargetProxy()->setNeedsStencil();
SkRect drawRect = target(0)->getBoundsRect();
@@ -72,7 +72,7 @@
// Clear the atlas.
if (caps.performColorClearsAsDraws() || caps.performStencilClearsAsDraws()) {
this->setColorLoadOp(GrLoadOp::kDiscard);
- this->setInitialStencilContent(GrOpsTask::StencilContent::kDontCare);
+ this->setInitialStencilContent(StencilContent::kDontCare);
constexpr static GrUserStencilSettings kClearStencil(
GrUserStencilSettings::StaticInit<
@@ -86,7 +86,7 @@
this->stencilAtlasRect(rContext, drawRect, SK_PMColor4fTRANSPARENT, &kClearStencil);
} else {
this->setColorLoadOp(GrLoadOp::kClear);
- this->setInitialStencilContent(GrOpsTask::StencilContent::kUserBitsCleared);
+ this->setInitialStencilContent(StencilContent::kUserBitsCleared);
}
// Add ops to stencil the atlas paths.
@@ -137,7 +137,7 @@
}
this->stencilAtlasRect(rContext, drawRect, SK_PMColor4fWHITE, stencil);
- this->GrOpsTask::onMakeClosed(rContext, targetUpdateBounds);
+ this->OpsTask::onMakeClosed(rContext, targetUpdateBounds);
// Don't mark msaa dirty. Since this op defers being closed, the drawing manager's dirty
// tracking doesn't work anyway. We will just resolve msaa manually during onExecute.
@@ -172,7 +172,7 @@
}
bool GrAtlasRenderTask::onExecute(GrOpFlushState* flushState) {
- if (!this->GrOpsTask::onExecute(flushState)) {
+ if (!this->OpsTask::onExecute(flushState)) {
return false;
}
if (this->target(0)->requiresManualMSAAResolve()) {
diff --git a/src/gpu/tessellate/GrAtlasRenderTask.h b/src/gpu/tessellate/GrAtlasRenderTask.h
index cfd79ea..57fb26c 100644
--- a/src/gpu/tessellate/GrAtlasRenderTask.h
+++ b/src/gpu/tessellate/GrAtlasRenderTask.h
@@ -11,7 +11,7 @@
#include "include/core/SkPath.h"
#include "src/core/SkTBlockList.h"
#include "src/gpu/GrDynamicAtlas.h"
-#include "src/gpu/GrOpsTask.h"
+#include "src/gpu/ops/OpsTask.h"
#include "src/gpu/tessellate/GrPathTessellator.h"
struct SkIPoint16;
@@ -22,7 +22,7 @@
//
// The atlas texture does not get instantiated automatically. It is the creator's responsibility to
// call instantiate() at flush time.
-class GrAtlasRenderTask : public GrOpsTask {
+class GrAtlasRenderTask : public skgpu::v1::OpsTask {
public:
GrAtlasRenderTask(GrRecordingContext*,
sk_sp<GrArenas>,
@@ -46,14 +46,14 @@
}
private:
- // Adds internal ops to render the atlas before deferring to GrOpsTask::onMakeClosed.
+ // Adds internal ops to render the atlas before deferring to OpsTask::onMakeClosed.
ExpectedOutcome onMakeClosed(GrRecordingContext*, SkIRect* targetUpdateBounds) override;
void stencilAtlasRect(GrRecordingContext*, const SkRect&, const SkPMColor4f&,
const GrUserStencilSettings*);
void addAtlasDrawOp(GrOp::Owner, const GrCaps&);
- // Executes the GrOpsTask and resolves msaa if needed.
+ // Executes the OpsTask and resolves msaa if needed.
bool onExecute(GrOpFlushState* flushState) override;
const std::unique_ptr<GrDynamicAtlas> fDynamicAtlas;
diff --git a/src/gpu/v1/PathRenderer.h b/src/gpu/v1/PathRenderer.h
index c0608c5..4deb6f9 100644
--- a/src/gpu/v1/PathRenderer.h
+++ b/src/gpu/v1/PathRenderer.h
@@ -31,7 +31,7 @@
class SurfaceDrawContext;
/**
- * Base class for drawing paths into a GrOpsTask.
+ * Base class for drawing paths into a OpsTask.
*/
class PathRenderer : public SkRefCnt {
public:
diff --git a/src/gpu/v1/SurfaceDrawContext.cpp b/src/gpu/v1/SurfaceDrawContext.cpp
index b756b2b..5a7428e 100644
--- a/src/gpu/v1/SurfaceDrawContext.cpp
+++ b/src/gpu/v1/SurfaceDrawContext.cpp
@@ -279,8 +279,8 @@
}
// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
-// GrOpsTask to be picked up and added to by SurfaceDrawContexts lower in the call
-// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
+// OpsTask to be picked up and added to by SurfaceDrawContexts lower in the call
+// stack. When this occurs with a closed OpsTask, a new one will be allocated
// when the surfaceDrawContext attempts to use it (via getOpsTask).
SurfaceDrawContext::SurfaceDrawContext(GrRecordingContext* rContext,
GrSurfaceProxyView readView,
@@ -306,14 +306,14 @@
ASSERT_SINGLE_OWNER
}
-void SurfaceDrawContext::willReplaceOpsTask(GrOpsTask* prevTask, GrOpsTask* nextTask) {
+void SurfaceDrawContext::willReplaceOpsTask(OpsTask* prevTask, OpsTask* nextTask) {
if (prevTask && fNeedsStencil) {
// Store the stencil values in memory upon completion of fOpsTask.
prevTask->setMustPreserveStencil();
// Reload the stencil buffer content at the beginning of newOpsTask.
// FIXME: Could the topo sort insert a task between these two that modifies the stencil
// values?
- nextTask->setInitialStencilContent(GrOpsTask::StencilContent::kPreserved);
+ nextTask->setInitialStencilContent(OpsTask::StencilContent::kPreserved);
}
#if GR_GPU_STATS && GR_TEST_UTILS
if (fCanUseDynamicMSAA) {
@@ -859,10 +859,10 @@
return this->asRenderTargetProxy()->maxWindowRectangles(*this->caps());
}
-GrOpsTask::CanDiscardPreviousOps SurfaceDrawContext::canDiscardPreviousOpsOnFullClear() const {
+OpsTask::CanDiscardPreviousOps SurfaceDrawContext::canDiscardPreviousOpsOnFullClear() const {
#if GR_TEST_UTILS
if (fPreserveOpsOnFullClear_TestingOnly) {
- return GrOpsTask::CanDiscardPreviousOps::kNo;
+ return OpsTask::CanDiscardPreviousOps::kNo;
}
#endif
// Regardless of how the clear is implemented (native clear or a fullscreen quad), all prior ops
@@ -871,7 +871,7 @@
// Although the clear will ignore the stencil buffer, following draw ops may not so we can't get
// rid of all the preceding ops. Beware! If we ever add any ops that have a side effect beyond
// modifying the stencil buffer we will need a more elaborate tracking system (skbug.com/7002).
- return GrOpsTask::CanDiscardPreviousOps(!fNeedsStencil);
+ return OpsTask::CanDiscardPreviousOps(!fNeedsStencil);
}
void SurfaceDrawContext::setNeedsStencil() {
@@ -887,7 +887,7 @@
this->internalStencilClear(nullptr, /* inside mask */ false);
} else {
this->getOpsTask()->setInitialStencilContent(
- GrOpsTask::StencilContent::kUserBitsCleared);
+ OpsTask::StencilContent::kUserBitsCleared);
}
}
}
@@ -1501,7 +1501,7 @@
void SurfaceDrawContext::setLastClip(uint32_t clipStackGenID,
const SkIRect& devClipBounds,
int numClipAnalyticElements) {
- GrOpsTask* opsTask = this->getOpsTask();
+ auto opsTask = this->getOpsTask();
opsTask->fLastClipStackGenID = clipStackGenID;
opsTask->fLastDevClipBounds = devClipBounds;
opsTask->fLastClipNumAnalyticElements = numClipAnalyticElements;
@@ -1510,7 +1510,7 @@
bool SurfaceDrawContext::mustRenderClip(uint32_t clipStackGenID,
const SkIRect& devClipBounds,
int numClipAnalyticElements) {
- GrOpsTask* opsTask = this->getOpsTask();
+ auto opsTask = this->getOpsTask();
return opsTask->fLastClipStackGenID != clipStackGenID ||
!opsTask->fLastDevClipBounds.contains(devClipBounds) ||
opsTask->fLastClipNumAnalyticElements != numClipAnalyticElements;
@@ -2032,12 +2032,12 @@
return false;
}
- // First get the dstSampleFlags as if we will put the draw into the current GrOpsTask
+ // First get the dstSampleFlags as if we will put the draw into the current OpsTask
auto dstSampleFlags = this->caps()->getDstSampleFlagsForProxy(
this->asRenderTargetProxy(), this->getOpsTask()->usesMSAASurface() || opRequiresMSAA);
- // If we don't have barriers for this draw then we will definitely be breaking up the GrOpsTask.
- // However, if using dynamic MSAA, the new GrOpsTask will not have MSAA already enabled on it
+ // If we don't have barriers for this draw then we will definitely be breaking up the OpsTask.
+ // However, if using dynamic MSAA, the new OpsTask will not have MSAA already enabled on it
// and that may allow us to use texture barriers. So we check if we can use barriers on the new
// ops task and then break it up if so.
if (!(dstSampleFlags & GrDstSampleFlags::kRequiresTextureBarrier) &&
@@ -2057,7 +2057,7 @@
if (dstSampleFlags & GrDstSampleFlags::kRequiresTextureBarrier) {
// If we require a barrier to sample the dst it means we are sampling the RT itself
// either as a texture or input attachment. In this case we don't need to break up the
- // GrOpsTask.
+ // OpsTask.
dstProxyView->setProxyView(this->readSurfaceView());
dstProxyView->setOffset(0, 0);
dstProxyView->setDstSampleFlags(dstSampleFlags);
@@ -2124,7 +2124,7 @@
return true;
}
-GrOpsTask* SurfaceDrawContext::replaceOpsTaskIfModifiesColor() {
+OpsTask* SurfaceDrawContext::replaceOpsTaskIfModifiesColor() {
if (!this->getOpsTask()->isColorNoOp()) {
this->replaceOpsTask();
}
diff --git a/src/gpu/v1/SurfaceDrawContext_v1.h b/src/gpu/v1/SurfaceDrawContext_v1.h
index f5a235a..c11e294 100644
--- a/src/gpu/v1/SurfaceDrawContext_v1.h
+++ b/src/gpu/v1/SurfaceDrawContext_v1.h
@@ -15,12 +15,12 @@
#include "include/core/SkSurfaceProps.h"
#include "include/private/GrTypesPriv.h"
#include "src/core/SkGlyphRunPainter.h"
-#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrPaint.h"
#include "src/gpu/GrRenderTargetProxy.h"
#include "src/gpu/GrSurfaceProxyView.h"
#include "src/gpu/GrXferProcessor.h"
#include "src/gpu/geometry/GrQuad.h"
+#include "src/gpu/ops/OpsTask.h"
#include "src/gpu/v1/SurfaceFillContext_v1.h"
class GrBackendSemaphore;
@@ -626,11 +626,11 @@
private:
enum class QuadOptimization;
- void willReplaceOpsTask(GrOpsTask* prevTask, GrOpsTask* nextTask) override;
+ void willReplaceOpsTask(OpsTask* prevTask, OpsTask* nextTask) override;
GrAAType chooseAAType(GrAA);
- GrOpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const override;
+ OpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const override;
void setNeedsStencil();
void internalStencilClear(const SkIRect* scissor, bool insideStencilMask);
@@ -694,7 +694,7 @@
bool opRequiresMSAA,
GrDstProxyView* result);
- GrOpsTask* replaceOpsTaskIfModifiesColor();
+ OpsTask* replaceOpsTaskIfModifiesColor();
SkGlyphRunListPainter* glyphPainter() { return &fGlyphPainter; }
diff --git a/src/gpu/v1/SurfaceFillContext_v1.cpp b/src/gpu/v1/SurfaceFillContext_v1.cpp
index 839feaa..8d11991 100644
--- a/src/gpu/v1/SurfaceFillContext_v1.cpp
+++ b/src/gpu/v1/SurfaceFillContext_v1.cpp
@@ -34,8 +34,8 @@
namespace skgpu::v1 {
// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
-// GrOpsTask to be picked up and added to by SurfaceFillContext lower in the call
-// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
+// OpsTask to be picked up and added to by SurfaceFillContext lower in the call
+// stack. When this occurs with a closed OpsTask, a new one will be allocated
// when the SurfaceFillContext attempts to use it (via getOpsTask).
SurfaceFillContext::SurfaceFillContext(GrRecordingContext* rContext,
GrSurfaceProxyView readView,
@@ -118,7 +118,7 @@
*this->caps());
}
-GrOpsTask* SurfaceFillContext::getOpsTask() {
+OpsTask* SurfaceFillContext::getOpsTask() {
ASSERT_SINGLE_OWNER
SkDEBUGCODE(this->validate();)
@@ -133,8 +133,8 @@
return sk_ref_sp(this->getOpsTask());
}
-GrOpsTask* SurfaceFillContext::replaceOpsTask() {
- sk_sp<GrOpsTask> newOpsTask = this->drawingManager()->newOpsTask(
+OpsTask* SurfaceFillContext::replaceOpsTask() {
+ sk_sp<OpsTask> newOpsTask = this->drawingManager()->newOpsTask(
this->writeSurfaceView(), this->arenas(), fFlushTimeOpsTask);
this->willReplaceOpsTask(fOpsTask.get(), newOpsTask.get());
fOpsTask = std::move(newOpsTask);
@@ -197,7 +197,7 @@
if (!scissorState.enabled()) {
// This is a fullscreen clear, so could be handled as a load op. Regardless, we can also
// discard all prior ops in the current task since the color buffer will be overwritten.
- GrOpsTask* opsTask = this->getOpsTask();
+ auto opsTask = this->getOpsTask();
if (opsTask->resetForFullscreenClear(this->canDiscardPreviousOpsOnFullClear()) &&
!this->caps()->performColorClearsAsDraws()) {
color = this->writeSurfaceView().swizzle().applyTo(color);
diff --git a/src/gpu/v1/SurfaceFillContext_v1.h b/src/gpu/v1/SurfaceFillContext_v1.h
index eb7a1b3..ce04851 100644
--- a/src/gpu/v1/SurfaceFillContext_v1.h
+++ b/src/gpu/v1/SurfaceFillContext_v1.h
@@ -11,9 +11,9 @@
#include "include/core/SkSize.h"
#include "include/private/GrTypesPriv.h"
#include "src/gpu/GrImageInfo.h"
-#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrSwizzle.h"
#include "src/gpu/SurfaceFillContext.h"
+#include "src/gpu/ops/OpsTask.h"
#include <array>
#include <tuple>
@@ -44,7 +44,7 @@
const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
- GrOpsTask* getOpsTask();
+ OpsTask* getOpsTask();
sk_sp<GrRenderTask> refRenderTask() override;
int numSamples() const { return this->asRenderTargetProxy()->numSamples(); }
@@ -54,7 +54,7 @@
GrSubRunAllocator* subRunAlloc() { return this->arenas()->subRunAlloc(); }
#if GR_TEST_UTILS
- GrOpsTask* testingOnly_PeekLastOpsTask() { return fOpsTask.get(); }
+ OpsTask* testingOnly_PeekLastOpsTask() { return fOpsTask.get(); }
#endif
const GrSurfaceProxyView& writeSurfaceView() const { return fWriteView; }
@@ -67,20 +67,20 @@
void addOp(GrOp::Owner);
- GrOpsTask* replaceOpsTask();
+ OpsTask* replaceOpsTask();
private:
sk_sp<GrArenas> arenas() { return fWriteView.proxy()->asRenderTargetProxy()->arenas(); }
/** Override to be notified in subclass before the current ops task is replaced. */
- virtual void willReplaceOpsTask(GrOpsTask* prevTask, GrOpsTask* nextTask) {}
+ virtual void willReplaceOpsTask(OpsTask* prevTask, OpsTask* nextTask) {}
/**
* Override to be called to participate in the decision to discard all previous ops if a
* fullscreen clear occurs.
*/
- virtual GrOpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const {
- return GrOpsTask::CanDiscardPreviousOps::kYes;
+ virtual OpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const {
+ return OpsTask::CanDiscardPreviousOps::kYes;
}
void internalClear(const SkIRect* scissor,
@@ -91,9 +91,9 @@
SkDEBUGCODE(void onValidate() const override;)
- // The GrOpsTask can be closed by some other surface context that has picked it up. For this
- // reason, the GrOpsTask should only ever be accessed via 'getOpsTask'.
- sk_sp<GrOpsTask> fOpsTask;
+ // The OpsTask can be closed by some other surface context that has picked it up. For this
+ // reason, the OpsTask should only ever be accessed via 'getOpsTask'.
+ sk_sp<OpsTask> fOpsTask;
bool fFlushTimeOpsTask;
diff --git a/src/gpu/vk/GrVkOpsRenderPass.cpp b/src/gpu/vk/GrVkOpsRenderPass.cpp
index 8c82415..b609d06 100644
--- a/src/gpu/vk/GrVkOpsRenderPass.cpp
+++ b/src/gpu/vk/GrVkOpsRenderPass.cpp
@@ -486,7 +486,7 @@
// means we missed an opportunity higher up the stack to set the load op to be a clear. However,
// there are situations where higher up we couldn't discard the previous ops and set a clear
// load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
- // TODO: Make the waitOp a RenderTask instead so we can clear out the GrOpsTask for a clear. We
+ // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
// can then reenable this assert assuming we can't get messed up by a waitOp.
//SkASSERT(!fCurrentCBIsEmpty || scissor);
diff --git a/tests/BulkRectTest.cpp b/tests/BulkRectTest.cpp
index 01fae79..7a8c035 100644
--- a/tests/BulkRectTest.cpp
+++ b/tests/BulkRectTest.cpp
@@ -73,7 +73,7 @@
GrFillRectOp::AddFillRectOps(sdc.get(), nullptr, dContext, std::move(paint), overallAA,
SkMatrix::I(), quads, requestedTotNumQuads);
- GrOpsTask* opsTask = sdc->testingOnly_PeekLastOpsTask();
+ auto opsTask = sdc->testingOnly_PeekLastOpsTask();
int actualNumOps = opsTask->numOpChains();
int actualTotNumQuads = 0;
@@ -178,7 +178,7 @@
nullptr);
}
- GrOpsTask* opsTask = sdc->testingOnly_PeekLastOpsTask();
+ auto opsTask = sdc->testingOnly_PeekLastOpsTask();
int actualNumOps = opsTask->numOpChains();
int actualTotNumQuads = 0;
diff --git a/tests/ClearTest.cpp b/tests/ClearTest.cpp
index cc90443..e6b04c2 100644
--- a/tests/ClearTest.cpp
+++ b/tests/ClearTest.cpp
@@ -250,10 +250,10 @@
// This should combine w/ the prior combined clear and overwrite the color
sdc->clear(kScissorRect, SK_PMColor4fBLACK);
- GrOpsTask* ops = sdc->getOpsTask();
- REPORTER_ASSERT(reporter, ops->numOpChains() == 1);
+ auto opsTask = sdc->getOpsTask();
+ REPORTER_ASSERT(reporter, opsTask->numOpChains() == 1);
- const GrClearOp& clearOp = ops->getChain(0)->cast<GrClearOp>();
+ const GrClearOp& clearOp = opsTask->getChain(0)->cast<GrClearOp>();
constexpr std::array<float, 4> kExpected { 0, 0, 0, 1 };
REPORTER_ASSERT(reporter, clearOp.color() == kExpected);
@@ -276,10 +276,10 @@
// field
sdc->clearStencilClip(kScissorRect, false);
- GrOpsTask* ops = sdc->getOpsTask();
- REPORTER_ASSERT(reporter, ops->numOpChains() == 1);
+ auto opsTask = sdc->getOpsTask();
+ REPORTER_ASSERT(reporter, opsTask->numOpChains() == 1);
- const GrClearOp& clearOp = ops->getChain(0)->cast<GrClearOp>();
+ const GrClearOp& clearOp = opsTask->getChain(0)->cast<GrClearOp>();
constexpr std::array<float, 4> kExpected { 1, 1, 1, 1 };
REPORTER_ASSERT(reporter, clearOp.color() == kExpected);
diff --git a/tests/GrOpListFlushTest.cpp b/tests/GrOpListFlushTest.cpp
index 370a2bd..36228d4 100644
--- a/tests/GrOpListFlushTest.cpp
+++ b/tests/GrOpListFlushTest.cpp
@@ -26,7 +26,7 @@
return result;
}
-DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrOpsTaskFlushCount, reporter, ctxInfo) {
+DEF_GPUTEST_FOR_RENDERING_CONTEXTS(OpsTaskFlushCount, reporter, ctxInfo) {
auto context = ctxInfo.directContext();
GrGpu* gpu = context->priv().getGpu();
diff --git a/tests/OpChainTest.cpp b/tests/OpChainTest.cpp
index 58aa3e2..a7b0db7 100644
--- a/tests/OpChainTest.cpp
+++ b/tests/OpChainTest.cpp
@@ -9,10 +9,10 @@
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrMemoryPool.h"
#include "src/gpu/GrOpFlushState.h"
-#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/ops/GrOp.h"
+#include "src/gpu/ops/OpsTask.h"
#include "tests/Test.h"
#include <iterator>
@@ -140,7 +140,7 @@
}
CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc* arenas, const GrCaps&) override {
- // This op doesn't use the arenas, but make sure the GrOpsTask is sending it
+ // This op doesn't use the arenas, but make sure the OpsTask is sending it
SkASSERT(arenas);
(void) arenas;
auto that = t->cast<TestOp>();
@@ -220,10 +220,10 @@
GrOpFlushState flushState(dContext->priv().getGpu(),
dContext->priv().resourceProvider(),
&tracker);
- GrOpsTask opsTask(drawingMgr,
- GrSurfaceProxyView(proxy, kOrigin, writeSwizzle),
- dContext->priv().auditTrail(),
- arenas);
+ skgpu::v1::OpsTask opsTask(drawingMgr,
+ GrSurfaceProxyView(proxy, kOrigin, writeSwizzle),
+ dContext->priv().auditTrail(),
+ arenas);
// This assumes the particular values of kRanges.
std::fill_n(result, result_width(), -1);
std::fill_n(validResult, result_width(), -1);
diff --git a/tests/TextureOpTest.cpp b/tests/TextureOpTest.cpp
index 40c967c..4bc9253 100644
--- a/tests/TextureOpTest.cpp
+++ b/tests/TextureOpTest.cpp
@@ -9,16 +9,16 @@
#include "include/gpu/GrRecordingContext.h"
#include "src/gpu/GrColorSpaceXform.h"
#include "src/gpu/GrDirectContextPriv.h"
-#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/geometry/GrQuad.h"
#include "src/gpu/ops/GrTextureOp.h"
+#include "src/gpu/ops/OpsTask.h"
#include "tests/Test.h"
class OpsTaskTestingAccess {
public:
- typedef GrOpsTask::OpChain OpChain;
+ typedef skgpu::v1::OpsTask::OpChain OpChain;
};
static void check_chain(OpsTaskTestingAccess::OpChain* chain, SkRect firstRect, SkRect lastRect,
diff --git a/tests/WrappedSurfaceCopyOnWriteTest.cpp b/tests/WrappedSurfaceCopyOnWriteTest.cpp
index 1e773f3..9db0e93 100644
--- a/tests/WrappedSurfaceCopyOnWriteTest.cpp
+++ b/tests/WrappedSurfaceCopyOnWriteTest.cpp
@@ -129,7 +129,7 @@
#if SK_GPU_V1
-// Make sure GrOpsTask are skippable
+// Make sure OpsTask are skippable
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SkipOpsTaskTest, reporter, ctxInfo) {
GrDirectContext* dContext = ctxInfo.directContext();