Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
24 contributors

Users who have contributed to this file

@kvark @grovesNL @bors @msiglreith @ZeGentzy @omni-viral @jackmott @termhn @Michael-Lfx @arashikou @Thomspoon @derekdreery @cormac-obrien @aleksijuvani @karroffel @robert-wallis @rukai @keringar @Veykril @BourgondAries @hgallagher1993 @Ralith @madadam @Abendstolz
957 lines (864 sloc) 34.3 KB
#![cfg_attr(
not(any(
feature = "vulkan",
feature = "dx11",
feature = "dx12",
feature = "metal",
feature = "gl",
feature = "wgl"
)),
allow(dead_code, unused_extern_crates, unused_imports)
)]
#[cfg(feature = "dx11")]
extern crate gfx_backend_dx11 as back;
#[cfg(feature = "dx12")]
extern crate gfx_backend_dx12 as back;
#[cfg(any(feature = "gl", feature = "wgl"))]
extern crate gfx_backend_gl as back;
#[cfg(feature = "metal")]
extern crate gfx_backend_metal as back;
#[cfg(feature = "vulkan")]
extern crate gfx_backend_vulkan as back;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen(start)]
pub fn wasm_main() {
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
main();
}
use hal::{
buffer,
command,
format as f,
format::{AsFormat, ChannelType, Rgba8Srgb as ColorFormat, Swizzle},
image as i,
memory as m,
pass,
pass::Subpass,
pool,
prelude::*,
pso,
pso::{PipelineStage, ShaderStageFlags, VertexInputRate},
queue::{QueueGroup, Submission},
window,
};
use std::{
borrow::Borrow,
io::Cursor,
iter,
mem::{self, ManuallyDrop},
ptr,
};
#[cfg_attr(rustfmt, rustfmt_skip)]
const DIMS: window::Extent2D = window::Extent2D { width: 1024, height: 768 };
const ENTRY_NAME: &str = "main";
#[derive(Debug, Clone, Copy)]
#[allow(non_snake_case)]
struct Vertex {
a_Pos: [f32; 2],
a_Uv: [f32; 2],
}
#[cfg_attr(rustfmt, rustfmt_skip)]
const QUAD: [Vertex; 6] = [
Vertex { a_Pos: [ -0.5, 0.33 ], a_Uv: [0.0, 1.0] },
Vertex { a_Pos: [ 0.5, 0.33 ], a_Uv: [1.0, 1.0] },
Vertex { a_Pos: [ 0.5,-0.33 ], a_Uv: [1.0, 0.0] },
Vertex { a_Pos: [ -0.5, 0.33 ], a_Uv: [0.0, 1.0] },
Vertex { a_Pos: [ 0.5,-0.33 ], a_Uv: [1.0, 0.0] },
Vertex { a_Pos: [ -0.5,-0.33 ], a_Uv: [0.0, 0.0] },
];
const COLOR_RANGE: i::SubresourceRange = i::SubresourceRange {
aspects: f::Aspects::COLOR,
levels: 0 .. 1,
layers: 0 .. 1,
};
#[cfg(any(
feature = "vulkan",
feature = "dx11",
feature = "dx12",
feature = "metal",
feature = "gl",
feature = "wgl"
))]
fn main() {
#[cfg(target_arch = "wasm32")]
console_log::init_with_level(log::Level::Debug).unwrap();
#[cfg(all(debug_assertions, not(target_arch = "wasm32")))]
env_logger::init();
#[cfg(not(target_arch = "wasm32"))]
let event_loop = winit::event_loop::EventLoop::new();
#[cfg(not(target_arch = "wasm32"))]
let dpi = event_loop.primary_monitor().hidpi_factor();
#[cfg(not(target_arch = "wasm32"))]
let wb = winit::window::WindowBuilder::new()
.with_min_inner_size(winit::dpi::LogicalSize::new(1.0, 1.0))
.with_inner_size(winit::dpi::LogicalSize::from_physical(
winit::dpi::PhysicalSize::new(DIMS.width as _, DIMS.height as _),
dpi,
))
.with_title("quad".to_string());
// instantiate backend
#[cfg(not(feature = "gl"))]
let (_window, _instance, mut adapters, surface) = {
let window = wb.build(&event_loop).unwrap();
let instance = back::Instance::create("gfx-rs quad", 1)
.expect("Failed to create an instance!");
let surface = instance.create_surface(&window);
let adapters = instance.enumerate_adapters();
(window, instance, adapters, surface)
};
#[cfg(feature = "gl")]
let (window, mut adapters, surface) = {
#[cfg(not(target_arch = "wasm32"))]
let (window, surface) = {
let builder =
back::config_context(back::glutin::ContextBuilder::new(), ColorFormat::SELF, None)
.with_vsync(true);
let windowed_context = builder.build_windowed(wb, &event_loop).unwrap();
let (context, window) = unsafe {
windowed_context
.make_current()
.expect("Unable to make context current")
.split()
};
let surface = back::Surface::from_context(context);
(window, surface)
};
#[cfg(target_arch = "wasm32")]
let (window, surface) = {
let window = back::Window;
let surface = back::Surface::from_window(&window);
(window, surface)
};
let adapters = surface.enumerate_adapters();
(window, adapters, surface)
};
for adapter in &adapters {
println!("{:?}", adapter.info);
}
let adapter = adapters.remove(0);
let mut renderer = Renderer::new(surface, adapter);
#[cfg(target_arch = "wasm32")]
renderer.render();
#[cfg(not(target_arch = "wasm32"))]
// It is important that the closure move captures the Renderer,
// otherwise it will not be dropped when the event loop exits.
event_loop.run(move |event, _, control_flow| {
*control_flow = winit::event_loop::ControlFlow::Wait;
match event {
winit::event::Event::WindowEvent { event, .. } => match event {
winit::event::WindowEvent::CloseRequested => {
*control_flow = winit::event_loop::ControlFlow::Exit
}
winit::event::WindowEvent::KeyboardInput {
input:
winit::event::KeyboardInput {
virtual_keycode: Some(winit::event::VirtualKeyCode::Escape),
..
},
..
} => *control_flow = winit::event_loop::ControlFlow::Exit,
winit::event::WindowEvent::Resized(dims) => {
println!("resized to {:?}", dims);
#[cfg(feature = "gl")]
{
let context = renderer.surface.context();
context.resize(dims.to_physical(window.hidpi_factor()));
}
renderer.dimensions = window::Extent2D {
width: (dims.width * dpi) as u32,
height: (dims.height * dpi) as u32,
};
renderer.recreate_swapchain();
}
_ => {}
},
winit::event::Event::EventsCleared => {
renderer.render();
}
_ => {}
}
});
}
struct Renderer<B: hal::Backend> {
device: B::Device,
queue_group: QueueGroup<B>,
desc_pool: ManuallyDrop<B::DescriptorPool>,
surface: B::Surface,
adapter: hal::adapter::Adapter<B>,
format: hal::format::Format,
dimensions: window::Extent2D,
viewport: hal::pso::Viewport,
render_pass: ManuallyDrop<B::RenderPass>,
pipeline: ManuallyDrop<B::GraphicsPipeline>,
pipeline_layout: ManuallyDrop<B::PipelineLayout>,
desc_set: B::DescriptorSet,
set_layout: ManuallyDrop<B::DescriptorSetLayout>,
submission_complete_semaphores: Vec<B::Semaphore>,
submission_complete_fences: Vec<B::Fence>,
cmd_pools: Vec<B::CommandPool>,
cmd_buffers: Vec<B::CommandBuffer>,
vertex_buffer: ManuallyDrop<B::Buffer>,
image_upload_buffer: ManuallyDrop<B::Buffer>,
image_logo: ManuallyDrop<B::Image>,
image_srv: ManuallyDrop<B::ImageView>,
buffer_memory: ManuallyDrop<B::Memory>,
image_memory: ManuallyDrop<B::Memory>,
image_upload_memory: ManuallyDrop<B::Memory>,
sampler: ManuallyDrop<B::Sampler>,
frames_in_flight: usize,
frame: u64,
}
impl<B> Renderer<B>
where
B: hal::Backend,
{
fn new(mut surface: B::Surface, mut adapter: hal::adapter::Adapter<B>) -> Renderer<B> {
let memory_types = adapter.physical_device.memory_properties().memory_types;
let limits = adapter.physical_device.limits();
// Build a new device and associated command queues
let family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::empty())
.unwrap()
};
let mut queue_group = gpu.queue_groups.pop().unwrap();
let device = gpu.device;
let mut command_pool = unsafe {
device.create_command_pool(queue_group.family, pool::CommandPoolCreateFlags::empty())
}
.expect("Can't create command pool");
// Setup renderpass and pipeline
let set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
&[
pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::SampledImage,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 1,
ty: pso::DescriptorType::Sampler,
count: 1,
stage_flags: ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
],
&[],
)
}
.expect("Can't create descriptor set layout"),
);
// Descriptors
let mut desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
1, // sets
&[
pso::DescriptorRangeDesc {
ty: pso::DescriptorType::SampledImage,
count: 1,
},
pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Sampler,
count: 1,
},
],
pso::DescriptorPoolCreateFlags::empty(),
)
}
.expect("Can't create descriptor pool"),
);
let desc_set = unsafe { desc_pool.allocate_set(&set_layout) }.unwrap();
// Buffer allocations
println!("Memory types: {:?}", memory_types);
let buffer_stride = mem::size_of::<Vertex>() as u64;
let buffer_len = QUAD.len() as u64 * buffer_stride;
assert_ne!(buffer_len, 0);
let mut vertex_buffer = ManuallyDrop::new(
unsafe { device.create_buffer(buffer_len, buffer::Usage::VERTEX) }.unwrap(),
);
let buffer_req = unsafe { device.get_buffer_requirements(&vertex_buffer) };
let upload_type = memory_types
.iter()
.enumerate()
.position(|(id, mem_type)| {
// type_mask is a bit field where each bit represents a memory type. If the bit is set
// to 1 it means we can use that type for our buffer. So this code finds the first
// memory type that has a `1` (or, is allowed), and is visible to the CPU.
buffer_req.type_mask & (1 << id) != 0
&& mem_type.properties.contains(m::Properties::CPU_VISIBLE | m::Properties::COHERENT)
})
.unwrap()
.into();
// TODO: check transitions: read/write mapping and vertex buffer read
let buffer_memory = unsafe {
let memory = device.allocate_memory(upload_type, buffer_req.size).unwrap();
device.bind_buffer_memory(&memory, 0, &mut vertex_buffer).unwrap();
let mapping = device.map_memory(&memory, 0 .. buffer_len).unwrap();
ptr::copy_nonoverlapping(QUAD.as_ptr() as *const u8, mapping, buffer_len as usize);
device.unmap_memory(&memory);
ManuallyDrop::new(memory)
};
// Image
let img_data = include_bytes!("data/logo.png");
let img = image::load(Cursor::new(&img_data[..]), image::PNG)
.unwrap()
.to_rgba();
let (width, height) = img.dimensions();
let kind = i::Kind::D2(width as i::Size, height as i::Size, 1, 1);
let row_alignment_mask = limits.optimal_buffer_copy_pitch_alignment as u32 - 1;
let image_stride = 4usize;
let row_pitch = (width * image_stride as u32 + row_alignment_mask) & !row_alignment_mask;
let upload_size = (height * row_pitch) as u64;
let mut image_upload_buffer = ManuallyDrop::new(
unsafe { device.create_buffer(upload_size, buffer::Usage::TRANSFER_SRC) }.unwrap(),
);
let image_mem_reqs = unsafe { device.get_buffer_requirements(&image_upload_buffer) };
// copy image data into staging buffer
let image_upload_memory = unsafe {
let memory = device.allocate_memory(upload_type, image_mem_reqs.size).unwrap();
device.bind_buffer_memory(&memory, 0, &mut image_upload_buffer).unwrap();
let mapping = device.map_memory(&memory, 0 .. upload_size).unwrap();
for y in 0 .. height as usize {
let row = &(*img)[y * (width as usize) * image_stride
.. (y + 1) * (width as usize) * image_stride];
ptr::copy_nonoverlapping(
row.as_ptr(),
mapping.offset(y as isize * row_pitch as isize),
width as usize * image_stride,
);
}
device.unmap_memory(&memory);
ManuallyDrop::new(memory)
};
let mut image_logo = ManuallyDrop::new(
unsafe {
device.create_image(
kind,
1,
ColorFormat::SELF,
i::Tiling::Optimal,
i::Usage::TRANSFER_DST | i::Usage::SAMPLED,
i::ViewCapabilities::empty(),
)
}
.unwrap(),
);
let image_req = unsafe { device.get_image_requirements(&image_logo) };
let device_type = memory_types
.iter()
.enumerate()
.position(|(id, memory_type)| {
image_req.type_mask & (1 << id) != 0
&& memory_type.properties.contains(m::Properties::DEVICE_LOCAL)
})
.unwrap()
.into();
let image_memory = ManuallyDrop::new(
unsafe { device.allocate_memory(device_type, image_req.size) }.unwrap(),
);
unsafe { device.bind_image_memory(&image_memory, 0, &mut image_logo) }.unwrap();
let image_srv = ManuallyDrop::new(
unsafe {
device.create_image_view(
&image_logo,
i::ViewKind::D2,
ColorFormat::SELF,
Swizzle::NO,
COLOR_RANGE.clone(),
)
}
.unwrap(),
);
let sampler = ManuallyDrop::new(
unsafe {
device.create_sampler(i::SamplerInfo::new(i::Filter::Linear, i::WrapMode::Clamp))
}
.expect("Can't create sampler"),
);;
unsafe {
device.write_descriptor_sets(vec![
pso::DescriptorSetWrite {
set: &desc_set,
binding: 0,
array_offset: 0,
descriptors: Some(pso::Descriptor::Image(
&*image_srv,
i::Layout::ShaderReadOnlyOptimal,
)),
},
pso::DescriptorSetWrite {
set: &desc_set,
binding: 1,
array_offset: 0,
descriptors: Some(pso::Descriptor::Sampler(&*sampler)),
},
]);
}
// copy buffer to texture
let mut copy_fence = device.create_fence(false).expect("Could not create fence");
unsafe {
let mut cmd_buffer = command_pool.allocate_one(command::Level::Primary);
cmd_buffer.begin_primary(command::CommandBufferFlags::ONE_TIME_SUBMIT);
let image_barrier = m::Barrier::Image {
states: (i::Access::empty(), i::Layout::Undefined)
.. (i::Access::TRANSFER_WRITE, i::Layout::TransferDstOptimal),
target: &*image_logo,
families: None,
range: COLOR_RANGE.clone(),
};
cmd_buffer.pipeline_barrier(
PipelineStage::TOP_OF_PIPE .. PipelineStage::TRANSFER,
m::Dependencies::empty(),
&[image_barrier],
);
cmd_buffer.copy_buffer_to_image(
&image_upload_buffer,
&image_logo,
i::Layout::TransferDstOptimal,
&[command::BufferImageCopy {
buffer_offset: 0,
buffer_width: row_pitch / (image_stride as u32),
buffer_height: height as u32,
image_layers: i::SubresourceLayers {
aspects: f::Aspects::COLOR,
level: 0,
layers: 0 .. 1,
},
image_offset: i::Offset { x: 0, y: 0, z: 0 },
image_extent: i::Extent {
width,
height,
depth: 1,
},
}],
);
let image_barrier = m::Barrier::Image {
states: (i::Access::TRANSFER_WRITE, i::Layout::TransferDstOptimal)
.. (i::Access::SHADER_READ, i::Layout::ShaderReadOnlyOptimal),
target: &*image_logo,
families: None,
range: COLOR_RANGE.clone(),
};
cmd_buffer.pipeline_barrier(
PipelineStage::TRANSFER .. PipelineStage::FRAGMENT_SHADER,
m::Dependencies::empty(),
&[image_barrier],
);
cmd_buffer.finish();
queue_group.queues[0]
.submit_without_semaphores(Some(&cmd_buffer), Some(&mut copy_fence));
device
.wait_for_fence(&copy_fence, !0)
.expect("Can't wait for fence");
}
unsafe {
device.destroy_fence(copy_fence);
}
let (caps, formats, _present_modes) = surface.compatibility(&mut adapter.physical_device);
println!("formats: {:?}", formats);
let format = formats.map_or(f::Format::Rgba8Srgb, |formats| {
formats
.iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.map(|format| *format)
.unwrap_or(formats[0])
});
let swap_config = window::SwapchainConfig::from_caps(&caps, format, DIMS);
println!("{:?}", swap_config);
let extent = swap_config.extent;
unsafe {
surface
.configure_swapchain(&device, swap_config)
.expect("Can't configure swapchain");
};
let render_pass = {
let attachment = pass::Attachment {
format: Some(format),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: i::Layout::Undefined .. i::Layout::Present,
};
let subpass = pass::SubpassDesc {
colors: &[(0, i::Layout::ColorAttachmentOptimal)],
depth_stencil: None,
inputs: &[],
resolves: &[],
preserves: &[],
};
let dependency = pass::SubpassDependency {
passes: pass::SubpassRef::External .. pass::SubpassRef::Pass(0),
stages: PipelineStage::COLOR_ATTACHMENT_OUTPUT
.. PipelineStage::COLOR_ATTACHMENT_OUTPUT,
accesses: i::Access::empty()
.. (i::Access::COLOR_ATTACHMENT_READ | i::Access::COLOR_ATTACHMENT_WRITE),
};
ManuallyDrop::new(
unsafe { device.create_render_pass(&[attachment], &[subpass], &[dependency]) }
.expect("Can't create render pass"),
)
};
// Define maximum number of frames we want to be able to be "in flight" (being computed
// simultaneously) at once
let frames_in_flight = 3;
// The number of the rest of the resources is based on the frames in flight.
let mut submission_complete_semaphores = Vec::with_capacity(frames_in_flight);
let mut submission_complete_fences = Vec::with_capacity(frames_in_flight);
// Note: We don't really need a different command pool per frame in such a simple demo like this,
// but in a more 'real' application, it's generally seen as optimal to have one command pool per
// thread per frame. There is a flag that lets a command pool reset individual command buffers
// which are created from it, but by default the whole pool (and therefore all buffers in it)
// must be reset at once. Furthermore, it is often the case that resetting a whole pool is actually
// faster and more efficient for the hardware than resetting individual command buffers, so it's
// usually best to just make a command pool for each set of buffers which need to be reset at the
// same time (each frame). In our case, each pool will only have one command buffer created from it,
// though.
let mut cmd_pools = Vec::with_capacity(frames_in_flight);
let mut cmd_buffers = Vec::with_capacity(frames_in_flight);
cmd_pools.push(command_pool);
for _ in 1 .. frames_in_flight {
unsafe {
cmd_pools.push(
device
.create_command_pool(
queue_group.family,
pool::CommandPoolCreateFlags::empty(),
)
.expect("Can't create command pool"),
);
}
}
for i in 0 .. frames_in_flight {
submission_complete_semaphores.push(
device
.create_semaphore()
.expect("Could not create semaphore"),
);
submission_complete_fences.push(
device
.create_fence(true)
.expect("Could not create semaphore"),
);
cmd_buffers.push(cmd_pools[i].allocate_one(command::Level::Primary));
}
let pipeline_layout = ManuallyDrop::new(
unsafe {
device.create_pipeline_layout(
iter::once(&*set_layout),
&[(pso::ShaderStageFlags::VERTEX, 0 .. 8)],
)
}
.expect("Can't create pipeline layout"),
);
let pipeline = {
let vs_module = {
let spirv = pso::read_spirv(Cursor::new(&include_bytes!("data/quad.vert.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let fs_module = {
let spirv =
pso::read_spirv(Cursor::new(&include_bytes!("./data/quad.frag.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let pipeline = {
let (vs_entry, fs_entry) = (
pso::EntryPoint {
entry: ENTRY_NAME,
module: &vs_module,
specialization: hal::spec_const_list![0.8f32],
},
pso::EntryPoint {
entry: ENTRY_NAME,
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let shader_entries = pso::GraphicsShaderSet {
vertex: vs_entry,
hull: None,
domain: None,
geometry: None,
fragment: Some(fs_entry),
};
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let mut pipeline_desc = pso::GraphicsPipelineDesc::new(
shader_entries,
hal::Primitive::TriangleList,
pso::Rasterizer::FILL,
&*pipeline_layout,
subpass,
);
pipeline_desc.blender.targets.push(pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: Some(pso::BlendState::ALPHA),
});
pipeline_desc.vertex_buffers.push(pso::VertexBufferDesc {
binding: 0,
stride: mem::size_of::<Vertex>() as u32,
rate: VertexInputRate::Vertex,
});
pipeline_desc.attributes.push(pso::AttributeDesc {
location: 0,
binding: 0,
element: pso::Element {
format: f::Format::Rg32Sfloat,
offset: 0,
},
});
pipeline_desc.attributes.push(pso::AttributeDesc {
location: 1,
binding: 0,
element: pso::Element {
format: f::Format::Rg32Sfloat,
offset: 8,
},
});
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(vs_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
ManuallyDrop::new(pipeline.unwrap())
};
// Rendering setup
let viewport = pso::Viewport {
rect: pso::Rect {
x: 0,
y: 0,
w: extent.width as _,
h: extent.height as _,
},
depth: 0.0 .. 1.0,
};
Renderer {
device,
queue_group,
desc_pool,
surface,
adapter,
format,
dimensions: DIMS,
viewport,
render_pass,
pipeline,
pipeline_layout,
desc_set,
set_layout,
submission_complete_semaphores,
submission_complete_fences,
cmd_pools,
cmd_buffers,
vertex_buffer,
image_upload_buffer,
image_logo,
image_srv,
buffer_memory,
image_memory,
image_upload_memory,
sampler,
frames_in_flight,
frame: 0,
}
}
fn recreate_swapchain(&mut self) {
let (caps, formats, _present_modes) = self
.surface
.compatibility(&mut self.adapter.physical_device);
// Verify that previous format still exists so we may reuse it.
assert!(formats.iter().any(|fs| fs.contains(&self.format)));
let swap_config = window::SwapchainConfig::from_caps(&caps, self.format, self.dimensions);
println!("{:?}", swap_config);
let extent = swap_config.extent.to_extent();
unsafe {
self.surface
.configure_swapchain(&self.device, swap_config)
.expect("Can't create swapchain");
}
self.viewport.rect.w = extent.width as _;
self.viewport.rect.h = extent.height as _;
}
fn render(&mut self) {
let surface_image = unsafe {
match self.surface.acquire_image(!0) {
Ok((image, _)) => image,
Err(_) => {
self.recreate_swapchain();
return;
}
}
};
let framebuffer = unsafe {
self.device
.create_framebuffer(
&self.render_pass,
iter::once(surface_image.borrow()),
i::Extent {
width: self.dimensions.width,
height: self.dimensions.height,
depth: 1,
},
)
.unwrap()
};
// Compute index into our resource ring buffers based on the frame number
// and number of frames in flight. Pay close attention to where this index is needed
// versus when the swapchain image index we got from acquire_image is needed.
let frame_idx = self.frame as usize % self.frames_in_flight;
// Wait for the fence of the previous submission of this frame and reset it; ensures we are
// submitting only up to maximum number of frames_in_flight if we are submitting faster than
// the gpu can keep up with. This would also guarantee that any resources which need to be
// updated with a CPU->GPU data copy are not in use by the GPU, so we can perform those updates.
// In this case there are none to be done, however.
unsafe {
let fence = &self.submission_complete_fences[frame_idx];
self.device
.wait_for_fence(fence, !0)
.expect("Failed to wait for fence");
self.device
.reset_fence(fence)
.expect("Failed to reset fence");
self.cmd_pools[frame_idx].reset(false);
}
// Rendering
let cmd_buffer = &mut self.cmd_buffers[frame_idx];
unsafe {
cmd_buffer.begin_primary(command::CommandBufferFlags::ONE_TIME_SUBMIT);
cmd_buffer.set_viewports(0, &[self.viewport.clone()]);
cmd_buffer.set_scissors(0, &[self.viewport.rect]);
cmd_buffer.bind_graphics_pipeline(&self.pipeline);
cmd_buffer.bind_vertex_buffers(0, iter::once((&*self.vertex_buffer, 0)));
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline_layout,
0,
iter::once(&self.desc_set),
&[],
);
cmd_buffer.begin_render_pass(
&self.render_pass,
&framebuffer,
self.viewport.rect,
&[command::ClearValue {
color: command::ClearColor {
float32: [0.8, 0.8, 0.8, 1.0],
},
}],
command::SubpassContents::Inline,
);
cmd_buffer.draw(0 .. 6, 0 .. 1);
cmd_buffer.end_render_pass();
cmd_buffer.finish();
let submission = Submission {
command_buffers: iter::once(&*cmd_buffer),
wait_semaphores: None,
signal_semaphores: iter::once(&self.submission_complete_semaphores[frame_idx]),
};
self.queue_group.queues[0].submit(
submission,
Some(&self.submission_complete_fences[frame_idx]),
);
// present frame
let result = self.queue_group.queues[0].present_surface(
&mut self.surface,
surface_image,
Some(&self.submission_complete_semaphores[frame_idx]),
);
self.device.destroy_framebuffer(framebuffer);
if result.is_err() {
self.recreate_swapchain();
}
}
// Increment our frame
self.frame += 1;
}
}
impl<B> Drop for Renderer<B>
where
B: hal::Backend,
{
fn drop(&mut self) {
self.device.wait_idle().unwrap();
unsafe {
// TODO: When ManuallyDrop::take (soon to be renamed to ManuallyDrop::read) is stabilized we should use that instead.
self.device
.destroy_descriptor_pool(ManuallyDrop::into_inner(ptr::read(&self.desc_pool)));
self.device
.destroy_descriptor_set_layout(ManuallyDrop::into_inner(ptr::read(
&self.set_layout,
)));
self.device
.destroy_buffer(ManuallyDrop::into_inner(ptr::read(&self.vertex_buffer)));
self.device
.destroy_buffer(ManuallyDrop::into_inner(ptr::read(
&self.image_upload_buffer,
)));
self.device
.destroy_image(ManuallyDrop::into_inner(ptr::read(&self.image_logo)));
self.device
.destroy_image_view(ManuallyDrop::into_inner(ptr::read(&self.image_srv)));
self.device
.destroy_sampler(ManuallyDrop::into_inner(ptr::read(&self.sampler)));
for p in self.cmd_pools.drain(..) {
self.device.destroy_command_pool(p);
}
for s in self.submission_complete_semaphores.drain(..) {
self.device.destroy_semaphore(s);
}
for f in self.submission_complete_fences.drain(..) {
self.device.destroy_fence(f);
}
self.device
.destroy_render_pass(ManuallyDrop::into_inner(ptr::read(&self.render_pass)));
self.device
.free_memory(ManuallyDrop::into_inner(ptr::read(&self.buffer_memory)));
self.device
.free_memory(ManuallyDrop::into_inner(ptr::read(&self.image_memory)));
self.device.free_memory(ManuallyDrop::into_inner(ptr::read(
&self.image_upload_memory,
)));
self.device
.destroy_graphics_pipeline(ManuallyDrop::into_inner(ptr::read(&self.pipeline)));
self.device
.destroy_pipeline_layout(ManuallyDrop::into_inner(ptr::read(
&self.pipeline_layout,
)));
}
println!("DROPPED!");
}
}
#[cfg(not(any(
feature = "vulkan",
feature = "dx11",
feature = "dx12",
feature = "metal",
feature = "gl",
feature = "wgl"
)))]
fn main() {
println!("You need to enable the native API feature (vulkan/metal/dx11/dx12/gl/wgl) in order to run the example");
}
You can’t perform that action at this time.