Flutter Impeller
blit_pass_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
10 #include "vulkan/vulkan_core.h"
11 #include "vulkan/vulkan_enums.hpp"
12 #include "vulkan/vulkan_structs.hpp"
13 
14 namespace impeller {
15 
16 static void InsertImageMemoryBarrier(const vk::CommandBuffer& cmd,
17  const vk::Image& image,
18  vk::AccessFlags src_access_mask,
19  vk::AccessFlags dst_access_mask,
20  vk::ImageLayout old_layout,
21  vk::ImageLayout new_layout,
22  vk::PipelineStageFlags src_stage,
23  vk::PipelineStageFlags dst_stage,
24  uint32_t base_mip_level,
25  uint32_t mip_level_count = 1u) {
26  if (old_layout == new_layout) {
27  return;
28  }
29 
30  vk::ImageMemoryBarrier barrier;
31  barrier.srcAccessMask = src_access_mask;
32  barrier.dstAccessMask = dst_access_mask;
33  barrier.oldLayout = old_layout;
34  barrier.newLayout = new_layout;
35  barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
36  barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
37  barrier.image = image;
38  barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
39  barrier.subresourceRange.baseMipLevel = base_mip_level;
40  barrier.subresourceRange.levelCount = mip_level_count;
41  barrier.subresourceRange.baseArrayLayer = 0u;
42  barrier.subresourceRange.layerCount = 1u;
43 
44  cmd.pipelineBarrier(src_stage, dst_stage, {}, nullptr, nullptr, barrier);
45 }
46 
47 BlitPassVK::BlitPassVK(std::shared_ptr<CommandBufferVK> command_buffer,
48  const WorkaroundsVK& workarounds)
49  : command_buffer_(std::move(command_buffer)), workarounds_(workarounds) {}
50 
51 BlitPassVK::~BlitPassVK() = default;
52 
53 void BlitPassVK::OnSetLabel(std::string_view label) {}
54 
55 // |BlitPass|
56 bool BlitPassVK::IsValid() const {
57  return true;
58 }
59 
60 // |BlitPass|
61 bool BlitPassVK::EncodeCommands() const {
62  return true;
63 }
64 
65 // |BlitPass|
66 bool BlitPassVK::OnCopyTextureToTextureCommand(
67  std::shared_ptr<Texture> source,
68  std::shared_ptr<Texture> destination,
69  IRect source_region,
70  IPoint destination_origin,
71  std::string_view label) {
72  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
73 
74  const auto& src = TextureVK::Cast(*source);
75  const auto& dst = TextureVK::Cast(*destination);
76 
77  if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
78  return false;
79  }
80 
81  BarrierVK src_barrier;
82  src_barrier.cmd_buffer = cmd_buffer;
83  src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
84  src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
85  vk::AccessFlagBits::eShaderWrite |
86  vk::AccessFlagBits::eColorAttachmentWrite;
87  src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
88  vk::PipelineStageFlagBits::eFragmentShader |
89  vk::PipelineStageFlagBits::eColorAttachmentOutput;
90  src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
91  src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
92 
93  BarrierVK dst_barrier;
94  dst_barrier.cmd_buffer = cmd_buffer;
95  dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
96  dst_barrier.src_access = {};
97  dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
98  dst_barrier.dst_access =
99  vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
100  dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
101  vk::PipelineStageFlagBits::eTransfer;
102 
103  if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
104  VALIDATION_LOG << "Could not complete layout transitions.";
105  return false;
106  }
107 
108  vk::ImageCopy image_copy;
109 
110  image_copy.setSrcSubresource(
111  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
112  image_copy.setDstSubresource(
113  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
114 
115  image_copy.srcOffset =
116  vk::Offset3D(source_region.GetX(), source_region.GetY(), 0);
117  image_copy.dstOffset =
118  vk::Offset3D(destination_origin.x, destination_origin.y, 0);
119  image_copy.extent =
120  vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1);
121 
122  // Issue the copy command now that the images are already in the right
123  // layouts.
124  cmd_buffer.copyImage(src.GetImage(), //
125  src_barrier.new_layout, //
126  dst.GetImage(), //
127  dst_barrier.new_layout, //
128  image_copy //
129  );
130 
131  // If this is an onscreen texture, do not transition the layout
132  // back to shader read.
133  if (dst.IsSwapchainImage()) {
134  return true;
135  }
136 
137  BarrierVK barrier;
138  barrier.cmd_buffer = cmd_buffer;
139  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
140  barrier.src_access = {};
141  barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
142  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
143  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
144 
145  return dst.SetLayout(barrier);
146 }
147 
148 // |BlitPass|
149 bool BlitPassVK::OnCopyTextureToBufferCommand(
150  std::shared_ptr<Texture> source,
151  std::shared_ptr<DeviceBuffer> destination,
152  IRect source_region,
153  size_t destination_offset,
154  std::string_view label) {
155  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
156 
157  // cast source and destination to TextureVK
158  const auto& src = TextureVK::Cast(*source);
159 
160  if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
161  return false;
162  }
163 
164  BarrierVK barrier;
165  barrier.cmd_buffer = cmd_buffer;
166  barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
167  barrier.src_access = vk::AccessFlagBits::eShaderWrite |
168  vk::AccessFlagBits::eTransferWrite |
169  vk::AccessFlagBits::eColorAttachmentWrite;
170  barrier.src_stage = vk::PipelineStageFlagBits::eFragmentShader |
171  vk::PipelineStageFlagBits::eTransfer |
172  vk::PipelineStageFlagBits::eColorAttachmentOutput;
173  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
174  barrier.dst_stage = vk::PipelineStageFlagBits::eVertexShader |
175  vk::PipelineStageFlagBits::eFragmentShader;
176 
177  const auto& dst = DeviceBufferVK::Cast(*destination);
178 
179  vk::BufferImageCopy image_copy;
180  image_copy.setBufferOffset(destination_offset);
181  image_copy.setBufferRowLength(0);
182  image_copy.setBufferImageHeight(0);
183  image_copy.setImageSubresource(
184  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
185  image_copy.setImageOffset(
186  vk::Offset3D(source_region.GetX(), source_region.GetY(), 0));
187  image_copy.setImageExtent(
188  vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1));
189 
190  if (!src.SetLayout(barrier)) {
191  VALIDATION_LOG << "Could not encode layout transition.";
192  return false;
193  }
194 
195  cmd_buffer.copyImageToBuffer(src.GetImage(), //
196  barrier.new_layout, //
197  dst.GetBuffer(), //
198  image_copy //
199  );
200 
201  // If the buffer is used for readback, then apply a transfer -> host memory
202  // barrier.
203  if (destination->GetDeviceBufferDescriptor().readback) {
204  vk::MemoryBarrier barrier;
205  barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
206  barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
207 
208  cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
209  vk::PipelineStageFlagBits::eHost, {}, 1,
210  &barrier, 0, {}, 0, {});
211  }
212 
213  return true;
214 }
215 
216 bool BlitPassVK::ConvertTextureToShaderRead(
217  const std::shared_ptr<Texture>& texture) {
218  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
219 
220  BarrierVK barrier;
221  barrier.cmd_buffer = cmd_buffer;
222  barrier.src_access = vk::AccessFlagBits::eTransferWrite;
223  barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
224  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
225  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
226 
227  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
228 
229  const auto& texture_vk = TextureVK::Cast(*texture);
230 
231  if (!command_buffer_->Track(texture)) {
232  return false;
233  }
234 
235  return texture_vk.SetLayout(barrier);
236 }
237 
238 // |BlitPass|
239 bool BlitPassVK::OnCopyBufferToTextureCommand(
240  BufferView source,
241  std::shared_ptr<Texture> destination,
242  IRect destination_region,
243  std::string_view label,
244  uint32_t mip_level,
245  uint32_t slice,
246  bool convert_to_read) {
247  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
248 
249  // cast destination to TextureVK
250  const auto& dst = TextureVK::Cast(*destination);
251  const auto& src = DeviceBufferVK::Cast(*source.GetBuffer());
252 
253  std::shared_ptr<const DeviceBuffer> source_buffer = source.TakeBuffer();
254  if ((source_buffer && !command_buffer_->Track(source_buffer)) ||
255  !command_buffer_->Track(destination)) {
256  return false;
257  }
258 
259  BarrierVK dst_barrier;
260  dst_barrier.cmd_buffer = cmd_buffer;
261  dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
262  dst_barrier.src_access = {};
263  dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
264  dst_barrier.dst_access =
265  vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
266  dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
267  vk::PipelineStageFlagBits::eTransfer;
268 
269  vk::BufferImageCopy image_copy;
270  image_copy.setBufferOffset(source.GetRange().offset);
271  image_copy.setBufferRowLength(0);
272  image_copy.setBufferImageHeight(0);
273  image_copy.setImageSubresource(vk::ImageSubresourceLayers(
274  vk::ImageAspectFlagBits::eColor, mip_level, slice, 1));
275  image_copy.imageOffset.x = destination_region.GetX();
276  image_copy.imageOffset.y = destination_region.GetY();
277  image_copy.imageOffset.z = 0u;
278  image_copy.imageExtent.width = destination_region.GetWidth();
279  image_copy.imageExtent.height = destination_region.GetHeight();
280  image_copy.imageExtent.depth = 1u;
281 
282  // Note: this barrier should do nothing if we're already in the transfer dst
283  // optimal state. This is important for performance of repeated blit pass
284  // encoding.
285  if (!dst.SetLayout(dst_barrier)) {
286  VALIDATION_LOG << "Could not encode layout transition.";
287  return false;
288  }
289 
290  cmd_buffer.copyBufferToImage(src.GetBuffer(), //
291  dst.GetImage(), //
292  dst_barrier.new_layout, //
293  image_copy //
294  );
295 
296  // Transition to shader-read.
297  if (convert_to_read) {
298  BarrierVK barrier;
299  barrier.cmd_buffer = cmd_buffer;
300  barrier.src_access = vk::AccessFlagBits::eTransferWrite;
301  barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
302  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
303  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
304 
305  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
306 
307  if (!dst.SetLayout(barrier)) {
308  return false;
309  }
310  }
311 
312  return true;
313 }
314 
315 // |BlitPass|
316 bool BlitPassVK::ResizeTexture(const std::shared_ptr<Texture>& source,
317  const std::shared_ptr<Texture>& destination) {
318  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
319 
320  const auto& src = TextureVK::Cast(*source);
321  const auto& dst = TextureVK::Cast(*destination);
322 
323  if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
324  return false;
325  }
326 
327  BarrierVK src_barrier;
328  src_barrier.cmd_buffer = cmd_buffer;
329  src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
330  src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
331  vk::AccessFlagBits::eShaderWrite |
332  vk::AccessFlagBits::eColorAttachmentWrite;
333  src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
334  vk::PipelineStageFlagBits::eFragmentShader |
335  vk::PipelineStageFlagBits::eColorAttachmentOutput;
336  src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
337  src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
338 
339  BarrierVK dst_barrier;
340  dst_barrier.cmd_buffer = cmd_buffer;
341  dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
342  dst_barrier.src_access = {};
343  dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
344  dst_barrier.dst_access =
345  vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
346  dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
347  vk::PipelineStageFlagBits::eTransfer;
348 
349  if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
350  VALIDATION_LOG << "Could not complete layout transitions.";
351  return false;
352  }
353 
354  vk::ImageBlit blit;
355  blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
356  blit.srcSubresource.baseArrayLayer = 0u;
357  blit.srcSubresource.layerCount = 1u;
358  blit.srcSubresource.mipLevel = 0;
359 
360  blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
361  blit.dstSubresource.baseArrayLayer = 0u;
362  blit.dstSubresource.layerCount = 1u;
363  blit.dstSubresource.mipLevel = 0;
364 
365  // offsets[0] is origin.
366  blit.srcOffsets[1].x = std::max<int32_t>(source->GetSize().width, 1u);
367  blit.srcOffsets[1].y = std::max<int32_t>(source->GetSize().height, 1u);
368  blit.srcOffsets[1].z = 1u;
369 
370  // offsets[0] is origin.
371  blit.dstOffsets[1].x = std::max<int32_t>(destination->GetSize().width, 1u);
372  blit.dstOffsets[1].y = std::max<int32_t>(destination->GetSize().height, 1u);
373  blit.dstOffsets[1].z = 1u;
374 
375  cmd_buffer.blitImage(src.GetImage(), //
376  src_barrier.new_layout, //
377  dst.GetImage(), //
378  dst_barrier.new_layout, //
379  1, //
380  &blit, //
381  vk::Filter::eLinear
382 
383  );
384 
385  // Convert back to shader read
386 
387  BarrierVK barrier;
388  barrier.cmd_buffer = cmd_buffer;
389  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
390  barrier.src_access = {};
391  barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
392  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
393  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
394 
395  return dst.SetLayout(barrier);
396 }
397 
398 // |BlitPass|
399 bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr<Texture> texture,
400  std::string_view label) {
401  auto& src = TextureVK::Cast(*texture);
402 
403  const auto size = src.GetTextureDescriptor().size;
404  uint32_t mip_count = src.GetTextureDescriptor().mip_count;
405 
406  if (mip_count < 2u) {
407  return true;
408  }
409 
410  const auto& image = src.GetImage();
411  const auto& cmd = command_buffer_->GetCommandBuffer();
412 
413  if (!command_buffer_->Track(texture)) {
414  return false;
415  }
416 
417  // Initialize all mip levels to be in TransferDst mode. Later, in a loop,
418  // after writing to that mip level, we'll first switch its layout to
419  // TransferSrc to prepare the mip level after it, use the image as the source
420  // of the blit, before finally switching it to ShaderReadOnly so its available
421  // for sampling in a shader.
423  /*cmd=*/cmd,
424  /*image=*/image,
425  /*src_access_mask=*/vk::AccessFlagBits::eTransferWrite |
426  vk::AccessFlagBits::eColorAttachmentWrite,
427  /*dst_access_mask=*/vk::AccessFlagBits::eTransferRead,
428  /*old_layout=*/src.GetLayout(),
429  /*new_layout=*/vk::ImageLayout::eTransferDstOptimal,
430  /*src_stage=*/vk::PipelineStageFlagBits::eTransfer |
431  vk::PipelineStageFlagBits::eColorAttachmentOutput,
432  /*dst_stage=*/vk::PipelineStageFlagBits::eTransfer,
433  /*base_mip_level=*/0u,
434  /*mip_level_count=*/mip_count);
435 
436  vk::ImageMemoryBarrier barrier;
437  barrier.image = image;
438  barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
439  barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
440  barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
441  barrier.subresourceRange.baseArrayLayer = 0;
442  barrier.subresourceRange.layerCount = 1;
443  barrier.subresourceRange.levelCount = 1;
444 
445  // Blit from the mip level N - 1 to mip level N.
446  size_t width = size.width;
447  size_t height = size.height;
448  for (size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
449  barrier.subresourceRange.baseMipLevel = mip_level - 1;
450  barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
451  barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
452  barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
453  barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
454 
455  // We just finished writing to the previous (N-1) mip level or it was the
456  // base mip level. These were initialized to TransferDst earler. We are now
457  // going to read from it to write to the current level (N) . So it must be
458  // converted to TransferSrc.
459  cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
460  vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
461  {barrier});
462 
463  vk::ImageBlit blit;
464  blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
465  blit.srcSubresource.baseArrayLayer = 0u;
466  blit.srcSubresource.layerCount = 1u;
467  blit.srcSubresource.mipLevel = mip_level - 1;
468 
469  blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
470  blit.dstSubresource.baseArrayLayer = 0u;
471  blit.dstSubresource.layerCount = 1u;
472  blit.dstSubresource.mipLevel = mip_level;
473 
474  // offsets[0] is origin.
475  blit.srcOffsets[1].x = std::max<int32_t>(width, 1u);
476  blit.srcOffsets[1].y = std::max<int32_t>(height, 1u);
477  blit.srcOffsets[1].z = 1u;
478 
479  width = width / 2;
480  height = height / 2;
481  if (width <= 1 || height <= 1) {
482  break;
483  }
484 
485  // offsets[0] is origin.
486  blit.dstOffsets[1].x = std::max<int32_t>(width, 1u);
487  blit.dstOffsets[1].y = std::max<int32_t>(height, 1u);
488  blit.dstOffsets[1].z = 1u;
489 
490  cmd.blitImage(image, // src image
491  vk::ImageLayout::eTransferSrcOptimal, // src layout
492  image, // dst image
493  vk::ImageLayout::eTransferDstOptimal, // dst layout
494  1u, // region count
495  &blit, // regions
496  vk::Filter::eLinear // filter
497  );
498 
499  barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
500  barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
501  barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
502  barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
503 
504  // Now that the blit is done, the image at the previous level (N-1)
505  // is done reading from (TransferSrc)/ Now we must prepare it to be read
506  // from a shader (ShaderReadOnly).
507  cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
508  vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
509  {barrier});
510  }
511 
512  barrier.subresourceRange.baseMipLevel = mip_count - 1;
513  barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
514  barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
515  barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
516  barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
517 
518  cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
519  vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
520  {barrier});
521 
522  // We modified the layouts of this image from underneath it. Tell it its new
523  // state so it doesn't try to perform redundant transitions under the hood.
524  src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
525  src.SetMipMapGenerated();
526 
527  return true;
528 }
529 
530 } // namespace impeller
IRect64 IRect
Definition: rect.h:795
TPoint< int64_t > IPoint
Definition: point.h:328
static void InsertImageMemoryBarrier(const vk::CommandBuffer &cmd, const vk::Image &image, vk::AccessFlags src_access_mask, vk::AccessFlags dst_access_mask, vk::ImageLayout old_layout, vk::ImageLayout new_layout, vk::PipelineStageFlags src_stage, vk::PipelineStageFlags dst_stage, uint32_t base_mip_level, uint32_t mip_level_count=1u)
Definition: blit_pass_vk.cc:16
Definition: comparable.h:95
#define VALIDATION_LOG
Definition: validation.h:91