layers: Only support shared_ptr Get<>() methods
In order to make it feasible to use reduced locking in the validation
layers, it will be necessary for the validation code to hold a
reference count on any state objects that are being used.
Copying shared_ptr's involves atomic operations and is more expensive
than copying raw pointers, so it should be avoided if possible.
Some guidelines:
-Avoid repeated Get<> calls whenever possible. These are a hash lookup,
shared_ptr<> copy, and (eventually) map locking operations.
-Functions that use a state object but don't change ownership should
pass by reference (preferred) or raw pointer.
'void func(Foo &);' or 'void func(Foo *);'
-Functions that share ownership (caller keeps reference and callee takes
an addiional reference) should pass a shared_ptr lvalue reference
`void func(shared_ptr<Foo>&).` This avoids a useless shared pointer
copy for the function argument.
-Functions that transfer ownership (caller gives up reference and callee
takes it) should pass a shared_ptr rvalue reference
`void func2(shared_ptr<Foo> &&);` This allows std::move() to be used
to avoid shared_ptr copies.
diff --git a/layers/synchronization_validation.cpp b/layers/synchronization_validation.cpp
index 4b0d552..61bfb10 100644
--- a/layers/synchronization_validation.cpp
+++ b/layers/synchronization_validation.cpp
@@ -2192,9 +2192,9 @@
void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
// Erase is okay with the key not being
- const auto *event_state = sync_state_->Get<EVENT_STATE>(event);
+ const auto event_state = sync_state_->Get<EVENT_STATE>(event);
if (event_state) {
- GetCurrentEventsContext()->Destroy(event_state);
+ GetCurrentEventsContext()->Destroy(event_state.get());
}
}
@@ -3353,8 +3353,8 @@
const auto *context = cb_context->GetCurrentAccessContext();
// If we have no previous accesses, we have no hazards
- const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
@@ -3390,8 +3390,8 @@
const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
auto *context = cb_context->GetCurrentAccessContext();
- const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
@@ -3422,8 +3422,8 @@
const auto *context = cb_context->GetCurrentAccessContext();
// If we have no previous accesses, we have no hazards
- const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
- const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
+ const auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
const auto ©_region = pCopyBufferInfos->pRegions[region];
@@ -3459,8 +3459,8 @@
const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
auto *context = cb_context->GetCurrentAccessContext();
- const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
- const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
+ const auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
const auto ©_region = pCopyBufferInfos->pRegions[region];
@@ -3487,8 +3487,8 @@
assert(context);
if (!context) return skip;
- const auto *src_image = Get<IMAGE_STATE>(srcImage);
- const auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ const auto src_image = Get<IMAGE_STATE>(srcImage);
+ const auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_image) {
@@ -3529,8 +3529,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- auto *src_image = Get<IMAGE_STATE>(srcImage);
- auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ auto src_image = Get<IMAGE_STATE>(srcImage);
+ auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
@@ -3558,8 +3558,8 @@
assert(context);
if (!context) return skip;
- const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
- const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
+ const auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
+ const auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
const auto ©_region = pCopyImageInfo->pRegions[region];
if (src_image) {
@@ -3598,8 +3598,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
- auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
+ auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
+ auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
const auto ©_region = pCopyImageInfo->pRegions[region];
@@ -3916,8 +3916,8 @@
assert(context);
if (!context) return skip;
- const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
- const auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ const auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
+ const auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
@@ -3979,8 +3979,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
- const auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ const auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
+ const auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
@@ -4027,8 +4027,8 @@
assert(context);
if (!context) return skip;
- const auto *src_image = Get<IMAGE_STATE>(srcImage);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto src_image = Get<IMAGE_STATE>(srcImage);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
@@ -4086,8 +4086,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *src_image = Get<IMAGE_STATE>(srcImage);
- auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto src_image = Get<IMAGE_STATE>(srcImage);
+ auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
@@ -4132,8 +4132,8 @@
assert(context);
if (!context) return skip;
- const auto *src_image = Get<IMAGE_STATE>(srcImage);
- const auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ const auto src_image = Get<IMAGE_STATE>(srcImage);
+ const auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &blit_region = pRegions[region];
@@ -4197,8 +4197,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- auto *src_image = Get<IMAGE_STATE>(srcImage);
- auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ auto src_image = Get<IMAGE_STATE>(srcImage);
+ auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &blit_region = pRegions[region];
@@ -4253,7 +4253,7 @@
bool skip = false;
if (drawCount == 0) return skip;
- const auto *buf_state = Get<BUFFER_STATE>(buffer);
+ const auto buf_state = Get<BUFFER_STATE>(buffer);
VkDeviceSize size = struct_size;
if (drawCount == 1 || stride == size) {
if (drawCount > 1) size *= drawCount;
@@ -4284,7 +4284,7 @@
void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag tag, const VkDeviceSize struct_size,
const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
uint32_t stride) {
- const auto *buf_state = Get<BUFFER_STATE>(buffer);
+ const auto buf_state = Get<BUFFER_STATE>(buffer);
VkDeviceSize size = struct_size;
if (drawCount == 1 || stride == size) {
if (drawCount > 1) size *= drawCount;
@@ -4304,7 +4304,7 @@
const char *function) const {
bool skip = false;
- const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
+ const auto count_buf_state = Get<BUFFER_STATE>(buffer);
const ResourceAccessRange range = MakeRange(offset, 4);
auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
if (hazard.hazard) {
@@ -4317,7 +4317,7 @@
}
void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag tag, VkBuffer buffer, VkDeviceSize offset) {
- const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
+ const auto count_buf_state = Get<BUFFER_STATE>(buffer);
const ResourceAccessRange range = MakeRange(offset, 4);
context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
}
@@ -4706,7 +4706,7 @@
assert(context);
if (!context) return skip;
- const auto *image_state = Get<IMAGE_STATE>(image);
+ const auto image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
@@ -4733,7 +4733,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *image_state = Get<IMAGE_STATE>(image);
+ const auto image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
@@ -4756,7 +4756,7 @@
assert(context);
if (!context) return skip;
- const auto *image_state = Get<IMAGE_STATE>(image);
+ const auto image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
@@ -4783,7 +4783,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *image_state = Get<IMAGE_STATE>(image);
+ const auto image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
@@ -4806,7 +4806,7 @@
assert(context);
if (!context) return skip;
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
@@ -4834,7 +4834,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
@@ -4855,7 +4855,7 @@
assert(context);
if (!context) return skip;
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
@@ -4878,7 +4878,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
@@ -4898,8 +4898,8 @@
assert(context);
if (!context) return skip;
- const auto *src_image = Get<IMAGE_STATE>(srcImage);
- const auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ const auto src_image = Get<IMAGE_STATE>(srcImage);
+ const auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &resolve_region = pRegions[region];
@@ -4941,8 +4941,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- auto *src_image = Get<IMAGE_STATE>(srcImage);
- auto *dst_image = Get<IMAGE_STATE>(dstImage);
+ auto src_image = Get<IMAGE_STATE>(srcImage);
+ auto dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &resolve_region = pRegions[region];
@@ -4968,8 +4968,8 @@
assert(context);
if (!context) return skip;
- const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
- const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
+ const auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
+ const auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
const auto &resolve_region = pResolveImageInfo->pRegions[region];
@@ -5009,8 +5009,8 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
- auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
+ auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
+ auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
const auto &resolve_region = pResolveImageInfo->pRegions[region];
@@ -5036,7 +5036,7 @@
assert(context);
if (!context) return skip;
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
// VK_WHOLE_SIZE not allowed
@@ -5060,7 +5060,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
// VK_WHOLE_SIZE not allowed
@@ -5080,7 +5080,7 @@
assert(context);
if (!context) return skip;
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, 4);
@@ -5104,7 +5104,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, 4);
@@ -5484,7 +5484,7 @@
buffer_memory_barriers.reserve(barrier_count);
for (uint32_t index = 0; index < barrier_count; index++) {
const auto &barrier = barriers[index];
- auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
+ auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
if (buffer) {
const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
const auto range = MakeRange(barrier.offset, barrier_size);
@@ -5517,7 +5517,7 @@
const auto &barrier = barriers[index];
auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
- auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
+ auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
if (buffer) {
const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
const auto range = MakeRange(barrier.offset, barrier_size);
@@ -5535,7 +5535,7 @@
image_memory_barriers.reserve(barrier_count);
for (uint32_t index = 0; index < barrier_count; index++) {
const auto &barrier = barriers[index];
- const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
+ const auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
if (image) {
auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
const SyncBarrier sync_barrier(barrier, src, dst);
@@ -5555,7 +5555,7 @@
const auto &barrier = barriers[index];
auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
- const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
+ const auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
if (image) {
auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
const SyncBarrier sync_barrier(barrier, src, dst);
@@ -5887,7 +5887,7 @@
assert(context);
if (!context) return skip;
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, 4);
@@ -5905,15 +5905,13 @@
void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
events_.reserve(event_count);
for (uint32_t event_index = 0; event_index < event_count; event_index++) {
- events_.emplace_back(sync_state.GetShared<EVENT_STATE>(events[event_index]));
+ events_.emplace_back(sync_state.Get<EVENT_STATE>(events[event_index]));
}
}
SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
VkPipelineStageFlags2KHR stageMask)
- : SyncOpBase(cmd),
- event_(sync_state.GetShared<EVENT_STATE>(event)),
- exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
+ : SyncOpBase(cmd), event_(sync_state.Get<EVENT_STATE>(event)), exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
bool SyncOpResetEvent::Validate(const CommandBufferAccessContext& cb_context) const {
return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
@@ -5993,14 +5991,14 @@
SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
VkPipelineStageFlags2KHR stageMask)
: SyncOpBase(cmd),
- event_(sync_state.GetShared<EVENT_STATE>(event)),
+ event_(sync_state.Get<EVENT_STATE>(event)),
src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
dep_info_() {}
SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
const VkDependencyInfoKHR &dep_info)
: SyncOpBase(cmd),
- event_(sync_state.GetShared<EVENT_STATE>(event)),
+ event_(sync_state.Get<EVENT_STATE>(event)),
src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
dep_info_(new safe_VkDependencyInfoKHR(&dep_info)) {}
@@ -6125,11 +6123,11 @@
const VkSubpassBeginInfo *pSubpassBeginInfo)
: SyncOpBase(cmd) {
if (pRenderPassBegin) {
- rp_state_ = sync_state.GetShared<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
+ rp_state_ = sync_state.Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
- const auto *fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
+ const auto fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
if (fb_state) {
- shared_attachments_ = sync_state.GetSharedAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
+ shared_attachments_ = sync_state.GetAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
// TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
// Note that this a safe to presist as long as shared_attachments is not cleared
attachments_.reserve(shared_attachments_.size());
@@ -6274,7 +6272,7 @@
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
- const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
+ const auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, 4);