In wgpu, how do I reset a buffer after making it device.create_buffer_init?

and how do I send multiple objects to render to the gpu at once? Beginner at wgpu.

I generally just:

  1. create a new buffer (gpu side)
  2. overwrite the handle (cpu side)

Not that simple in opengl, odd. Do you have an answer to my second?

Buffers and Indices | Learn Wgpu <-- repeat the renderpass.draw(...) call (and setting the buffer before the call)

You sure? That sounds like "going to the factory to get a boxed stamped" every frame, multiple times

Yeah. If you want to draw N sets of objects:

  • create N sets of buffers
  • for each set of buffers: set the buffers, make a render_pass.draw()

If you want to update one of the N sets of objects:

  • create new buffers on the GPU
  • overwrite the local cpu handle

What about renderpass.draw(vertices,instances)?

also, how do render using tokio::spawn?

I am having difficulty trying to build a mental model of your level of wgpu knowledge / what code you have already written. Sorry I can't be more helpful.

A better question, how do I spawn a tokio thread that just sets the renderpass stuff and draws the object? Because synchronous rendering is a nono

Here's my code:

use anyhow::Result;
use bytemuck::{cast_slice, Pod, Zeroable};
use std::borrow::Cow;
use tokio::spawn;
use wgpu::{
    include_wgsl,
    util::{BufferInitDescriptor, DeviceExt},
    vertex_attr_array, Buffer, BufferDescriptor, BufferUsages, Device, PipelineLayout, Queue,
    RenderPass, RenderPipeline, ShaderModule, SurfaceCapabilities, SurfaceConfiguration,
    TextureFormat, VertexBufferLayout, VertexStepMode,
};
use winit::{
    dpi::PhysicalSize,
    event::{Event, WindowEvent},
    event_loop::EventLoop,
    window::{Window, WindowBuilder},
};
#[tokio::main]
async fn main() -> Result<()> {
    let event_loop = EventLoop::new().unwrap();
    let window = WindowBuilder::new()
        .with_title("Vagous")
        .build(&event_loop)
        .unwrap();
    let mut size = window.inner_size();

    let instance = wgpu::Instance::default();

    let surface = instance.create_surface(&window).unwrap();
    let adapter = instance
        .request_adapter(&wgpu::RequestAdapterOptions {
            power_preference: wgpu::PowerPreference::default(),
            force_fallback_adapter: false,
            // Request an adapter which can render to our surface
            compatible_surface: Some(&surface),
        })
        .await
        .expect("Failed to find an appropriate adapter");

    // Create the logical device and command queue
    let (device, queue) = adapter
        .request_device(
            &wgpu::DeviceDescriptor {
                label: None,
                required_features: wgpu::Features::empty(),
                // Make sure we use the texture resolution limits from the adapter, so we can support images the size of the swapchain.
                required_limits: wgpu::Limits::default(),
            },
            None,
        )
        .await
        .expect("Failed to create device");

    // Load the shaders from disk
    let shader = device.create_shader_module(include_wgsl!(r"shaders\lighting.wgsl"));

    let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
        label: None,
        bind_group_layouts: &[],
        push_constant_ranges: &[],
    });

    let swapchain_capabilities = surface.get_capabilities(&adapter);
    let swapchain_format = swapchain_capabilities.formats[0];

    let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
        label: None,
        layout: Some(&pipeline_layout),
        vertex: wgpu::VertexState {
            module: &shader,
            entry_point: "vs_main",
            buffers: &[],
        },
        fragment: Some(wgpu::FragmentState {
            module: &shader,
            entry_point: "fs_main",
            targets: &[Some(swapchain_format.into())],
        }),
        primitive: wgpu::PrimitiveState::default(),
        depth_stencil: None,
        multisample: wgpu::MultisampleState::default(),
        multiview: None,
    });

    let mut config = surface
        .get_default_config(&adapter, size.width, size.height)
        .unwrap();
    surface.configure(&device, &config);

    let window = &window;
    event_loop
        .run(move |event, target| {
            // Have the closure take ownership of the resources.
            // `event_loop.run` never returns, therefore we must do this to ensure
            // the resources are properly cleaned up.
            let _ = (&instance, &adapter, &shader, &pipeline_layout);

            if let Event::WindowEvent {
                window_id: _,
                event,
            } = event
            {
                match event {
                    WindowEvent::Resized(new_size) => {
                        // Reconfigure the surface with the new size
                        config.width = new_size.width;
                        config.height = new_size.height;
                        surface.configure(&device, &config);
                        // On macos the window needs to be redrawn manually after resizing
                        window.request_redraw();
                    }
                    WindowEvent::RedrawRequested => {
                        let frame = surface
                            .get_current_texture()
                            .expect("Failed to acquire next swap chain texture");
                        let view = frame
                            .texture
                            .create_view(&wgpu::TextureViewDescriptor::default());
                        let mut encoder =
                            device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
                                label: None,
                            });
                        {
                            let mut rpass =
                                encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
                                    label: None,
                                    color_attachments: &[Some(wgpu::RenderPassColorAttachment {
                                        view: &view,
                                        resolve_target: None,
                                        ops: wgpu::Operations {
                                            load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
                                            store: wgpu::StoreOp::Store,
                                        },
                                    })],
                                    depth_stencil_attachment: None,
                                    timestamp_writes: None,
                                    occlusion_query_set: None,
                                });
                            rpass.set_pipeline(&render_pipeline);
                            rpass.draw(0..3, 0..1);
                        }

                        queue.submit(Some(encoder.finish()));
                        frame.present();
                    }
                    WindowEvent::CloseRequested => target.exit(),
                    _ => {}
                };
            }
        })
        .unwrap();
    Ok(())
}
struct Vagous<'a> {
    instance: wgpu::Instance,
    adapter: wgpu::Adapter,
    surface: wgpu::Surface<'a>,
    device: wgpu::Device,
    queue: Queue,
    size: PhysicalSize<u32>,
    config: SurfaceConfiguration,
    pipeline_layout: PipelineLayout,
    swapchain_format: TextureFormat,
    objects: Vec<Object<'a>>,
}
impl<'a> Vagous<'a> {
    async fn new(window: &'a Window) -> Self {
        let mut size = window.inner_size();

        let instance = wgpu::Instance::default();

        let surface = instance.create_surface(window).unwrap();
        let adapter = instance
            .request_adapter(&wgpu::RequestAdapterOptions {
                power_preference: wgpu::PowerPreference::default(),
                force_fallback_adapter: false,
                compatible_surface: Some(&surface),
            })
            .await
            .expect("Failed to find an appropriate adapter");
        let (device, queue) = adapter
            .request_device(
                &wgpu::DeviceDescriptor {
                    label: None,
                    required_features: wgpu::Features::empty(),
                    required_limits: wgpu::Limits::default(),
                },
                None,
            )
            .await
            .expect("Failed to create device");
        let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
            label: None,
            bind_group_layouts: &[],
            push_constant_ranges: &[],
        });
        let swapchain_format = surface.get_capabilities(&adapter).formats[0];
        let mut config = surface
            .get_default_config(&adapter, size.width, size.height)
            .unwrap();
        surface.configure(&device, &config);
        Self {
            instance,
            adapter,
            surface,
            device,
            queue,
            size,
            config,
            pipeline_layout,
            swapchain_format,
            objects: Vec::new(),
        }
    }
    async fn run(&self, window: &Window, eloop: EventLoop<()>) {
        let frame = self
            .surface
            .get_current_texture()
            .expect("Failed to acquire next swap chain texture");
        let view = frame
            .texture
            .create_view(&wgpu::TextureViewDescriptor::default());
        let mut encoder = self
            .device
            .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
        {
            let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
                label: None,
                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
                    view: &view,
                    resolve_target: None,
                    ops: wgpu::Operations {
                        load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
                        store: wgpu::StoreOp::Store,
                    },
                })],
                depth_stencil_attachment: None,
                timestamp_writes: None,
                occlusion_query_set: None,
            });
            for object in &self.objects {}
        }

        self.queue.submit(Some(encoder.finish()));
        frame.present();
    }
}
#[repr(C)]
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
struct Vertex {
    position: [f32; 2],
}
impl Vertex {
    fn new(x: f32, y: f32) -> Self {
        Self { position: [x, y] }
    }
}
struct Object<'a> {
    render_pipeline: &'a RenderPipeline,
    device: &'a Device,
    pub width: f32,
    pub height: f32,
    pub x: f32,
    pub y: f32,
    pub depth: f32,
    halfwidth: f32,
    halfheight: f32,
    indexbuf: Buffer,
    vertbuf: Buffer,
}
impl<'a> Object<'a> {
    async fn new(
        render_pipeline: &'a RenderPipeline,
        device: &'a Device,
        x: f32,
        y: f32,
        depth: f32,
        width: f32,
        height: f32,
    ) -> Self {
        let (w, h) = (width * 0.5, height * 0.5);
        Self {
            render_pipeline,
            device,
            width,
            height,
            x,
            y,
            halfwidth: w,
            halfheight: h,
            depth,
            vertbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: cast_slice(&[
                    Vertex::new(x - w, y + h),
                    Vertex::new(x + w, y + h),
                    Vertex::new(x + w, y - h),
                    Vertex::new(x - w, y - h),
                ]),
                usage: BufferUsages::VERTEX,
            }),
            indexbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: cast_slice(&[0, 1, 2, 2, 1, 3]),
                usage: BufferUsages::INDEX,
            }),
        }
    }
    async fn render(&self, rpass: &mut RenderPass<'a>) {
        rpass.set_pipeline(self.render_pipeline);
        rpass.set_vertex_buffer(0, self.vertbuf.slice(..));
    }
    async fn resize(&mut self, sizex: f32, sizey: f32) {
        self.width = sizex;
        self.height = sizey;
        (self.halfwidth, self.halfheight) = (sizex * 0.5, sizey * 0.5);
        self.vertbuf = self.device.create_buffer_init(&BufferInitDescriptor {
            label: None,
            contents: cast_slice(&[
                Vertex::new(self.x - self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y - self.halfheight),
                Vertex::new(self.x - self.halfwidth, self.y - self.halfheight),
            ]),
            usage: BufferUsages::VERTEX,
        });
    }
    async fn reposition(&mut self, device: &'a Device, posx: f32, posy: f32) {
        self.x = posx;
        self.y = posy;
        self.vertbuf = device.create_buffer_init(&BufferInitDescriptor {
            label: None,
            contents: cast_slice(&[
                Vertex::new(self.x - self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y - self.halfheight),
                Vertex::new(self.x - self.halfwidth, self.y - self.halfheight),
            ]),
            usage: BufferUsages::VERTEX,
        })
    }
    async fn default(render_pipeline: &'a RenderPipeline, device: &'a Device) -> Self {
        Self {
            render_pipeline,
            device,
            width: 1.0,
            height: 1.0,
            x: 0.0,
            y: 0.0,
            depth: 1.0,
            vertbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: cast_slice(&[
                    Vertex::new(-0.5, 0.5),
                    Vertex::new(0.5, 0.5),
                    Vertex::new(0.5, -0.5),
                    Vertex::new(-0.5, -0.5),
                ]),
                usage: BufferUsages::VERTEX,
            }),
            indexbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: &[0, 1, 2, 3],
                usage: BufferUsages::INDEX,
            }),
            halfwidth: 0.5,
            halfheight: 0.5,
        }
    }
}
async fn get_render_pipeline(
    device: &Device,
    shader: &ShaderModule,
    swapchain_format: TextureFormat,
    pipeline_layout: &PipelineLayout,
) -> RenderPipeline {
    device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
        label: None,
        layout: Some(pipeline_layout),
        vertex: wgpu::VertexState {
            module: shader,
            entry_point: "vs_main",
            buffers: &[VertexBufferLayout {
                array_stride: 8,
                step_mode: VertexStepMode::Vertex,
                attributes: &vertex_attr_array![0 => Float32x2],
            }],
        },
        fragment: Some(wgpu::FragmentState {
            module: shader,
            entry_point: "fs_main",
            targets: &[Some(swapchain_format.into())],
        }),
        primitive: wgpu::PrimitiveState::default(),
        depth_stencil: None,
        multisample: wgpu::MultisampleState::default(),
        multiview: None,
    })
}

That, or renderpass is meant to be in a render for loop inside the object iteration

If you want to render from another thread, using tokio, you're going to need to structure your code so it hasn't got lifetimes on your structs.

First, you want to replace wgpu::Surface<'a> with wgpu::Surface<'static>. The way you do this is to put your winit::winit::Window inside a std::sync::Arc, which makes it cloneable.

  1. Change the initialization like this:

    let window = Arc::new(WindowBuilder::new()
        .with_title("Vagous")
        .build(&event_loop)
        .unwrap());
    
  2. When you create the surface, clone the Arc:

    let surface = instance.create_surface(window.clone()).unwrap();
    
  3. Get rid of the line let window = &window; as it is not going to help anything.

  4. Change your struct declaration so the surface's lifetime is 'static:

    struct Vagous<'a> {
        surface: wgpu::Surface<'static>,
        ...
    }
    

Now the remaining issue is the lifetime on Object. I would recommend that your Objects should not store the pipeline or device inside them — rather, the rendering code should provide those from its own storage when needed. But if necessary, you can again use Arc. Once you've done either of those, you can remove the <'a> parameter from your Vagous and Object structs. That way it will be possible to choose to move the objects to another thread, or share them using more Arcs, instead of them being borrowed from the main thread.

The Object struct uses a reference to it, are you sure I should remove them?

Pass the reference when you call your rendering functions. Don't store it in your struct.

As a general rule, most references are temporary, and structs should not contain references unless they are also temporary.

This rule has exceptions, but it will keep you out of more trouble than it creates.

Gotcha. That's what I was thinking.

Another issue, when trying to set the config size to the new size when the window is resized, says that it's under a & reference so it can't be set.

And something I forgot, each object may use different shaders, so how would I give each object their own shader, better if I don't have to set the same render pipeline twice, without having to use a vec of render pipelines?

The code you previously posted doesn't contain that problem, so you must have changed something else. Post the full text of the error (as produced by cargo check, not an IDE) and the code that it refers to.

The usual problem is: when you want to mutate something, you need to make sure that you're not using any & along the path to getting to that thing, only &mut. But sometimes the &s can be sneaky, so post the full error.

each object may use different shaders, so how would I give each object their own shader, better if I don't have to set the same render pipeline twice, without having to use a vec of render pipelines?

In that case you might decide to put your pipelines in Arcs so that each object can own a cloned Arc<wgpu::RenderPipeline>.

But you'll also probably want to store more information than just a pipeline — you might also want to select a bind group containing textures, for example. The concept of all the data needed to render an object except for its mesh and position is often known as a “material”. So you might put the RenderPipeline along with other things in a struct Material you define, and store an Arc<Material> in each Object.

Note that “serious” rendering — that is, lots of objects on screen — generally ends up wanting to group objects at least by pipeline, because drawing lots of objects with the same pipeline (even better, as instances in the same draw call) can be more efficient than switching for each object. But don't worry about that for now while you're just getting started and still learning how to structure your Rust as well as your GPU operations.

It's a 2d engine, so I don't know how to add a texture to an object currently.

use anyhow::Result;
use bytemuck::{cast_slice, Pod, Zeroable};
use std::{borrow::Cow, sync::Arc};
use tokio::spawn;
use wgpu::{
    include_wgsl,
    util::{BufferInitDescriptor, DeviceExt},
    vertex_attr_array, Buffer, BufferDescriptor, BufferUsages, Device, PipelineLayout, Queue,
    RenderPass, RenderPipeline, ShaderModule, SurfaceCapabilities, SurfaceConfiguration,
    TextureFormat, VertexBufferLayout, VertexStepMode,
};
use winit::{
    dpi::PhysicalSize,
    event::{Event, WindowEvent},
    event_loop::EventLoop,
    window::{Window, WindowBuilder},
};
#[tokio::main]
async fn main() -> Result<()> {
    let event_loop = EventLoop::new().unwrap();
    let window = Arc::new(
        WindowBuilder::new()
            .with_title("Vagous")
            .build(&event_loop)
            .unwrap(),
    );
    let mut vagous = Vagous::new(window).await;
    let lighting = get_render_pipeline(
        &vagous.device,
        &vagous
            .device
            .create_shader_module(include_wgsl!(r"shaders\lighting.wgsl")),
        vagous.swapchain_format,
        &vagous.pipeline_layout,
    )
    .await;
    vagous.create_object(lighting, x, y, depth, width, height)
    vagous.run(&window, event_loop);
    Ok(())
}
struct Vagous {
    surface: wgpu::Surface<'static>,
    device: wgpu::Device,
    queue: Queue,
    size: PhysicalSize<u32>,
    config: SurfaceConfiguration,
    pipeline_layout: PipelineLayout,
    swapchain_format: TextureFormat,
    objects: Vec<Object>,
}
impl Vagous {
    async fn new(window: Arc<Window>) -> Self {
        let mut size = window.inner_size();

        let instance = wgpu::Instance::default();

        let surface = instance.create_surface(window.clone()).unwrap();
        let adapter = instance
            .request_adapter(&wgpu::RequestAdapterOptions {
                power_preference: wgpu::PowerPreference::default(),
                force_fallback_adapter: false,
                compatible_surface: Some(&surface),
            })
            .await
            .expect("Failed to find an appropriate adapter");
        let (device, queue) = adapter
            .request_device(
                &wgpu::DeviceDescriptor {
                    label: None,
                    required_features: wgpu::Features::empty(),
                    required_limits: wgpu::Limits::default(),
                },
                None,
            )
            .await
            .expect("Failed to create device");
        let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
            label: None,
            bind_group_layouts: &[],
            push_constant_ranges: &[],
        });
        let swapchain_format = surface.get_capabilities(&adapter).formats[0];
        let mut config = surface
            .get_default_config(&adapter, size.width, size.height)
            .unwrap();
        surface.configure(&device, &config);
        Self {
            surface,
            device,
            queue,
            size,
            config,
            pipeline_layout,
            swapchain_format,
            objects: Vec::new(),
        }
    }
    async fn create_object_default(&mut self,shader: Arc<RenderPipeline>) {
        self.objects.push(Object::default(
            &self.device,
            shader
        ).await);
    }
    async fn create_object(
        &mut self,
        shader: Arc<RenderPipeline>,
        x: f32,
        y: f32,
        depth: f32,
        width: f32,
        height: f32,
    ) {
        self.objects
            .push(Object::new(&self.device, shader, x, y, depth, width, height).await);
    }
    async fn run(&self, window: &Window, eloop: EventLoop<()>) {
        eloop
            .run(move |event, target| {
                if let Event::WindowEvent {
                    window_id: _,
                    event,
                } = event
                {
                    match event {
                        WindowEvent::Resized(new_size) => {
                            self.config.width = new_size.width;
                            self.config.height = new_size.height;
                            self.surface.configure(&self.device, &self.config);
                            window.request_redraw();
                        }
                        WindowEvent::RedrawRequested => {
                            let frame = self
                                .surface
                                .get_current_texture()
                                .expect("Failed to acquire next swap chain texture");
                            let view = frame
                                .texture
                                .create_view(&wgpu::TextureViewDescriptor::default());
                            let mut encoder = self.device.create_command_encoder(
                                &wgpu::CommandEncoderDescriptor { label: None },
                            );
                            for object in &self.objects {
                                let mut rpass =
                                    encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
                                        label: None,
                                        color_attachments: &[Some(
                                            wgpu::RenderPassColorAttachment {
                                                view: &view,
                                                resolve_target: None,
                                                ops: wgpu::Operations {
                                                    load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
                                                    store: wgpu::StoreOp::Store,
                                                },
                                            },
                                        )],
                                        depth_stencil_attachment: None,
                                        timestamp_writes: None,
                                        occlusion_query_set: None,
                                    });
                                rpass.set_pipeline(&object.shader);
                                rpass.set_vertex_buffer(0, object.vertbuf.slice(..));
                                rpass.set_index_buffer(
                                    object.indexbuf.slice(..),
                                    wgpu::IndexFormat::Uint16,
                                );
                                rpass.draw_indexed(0..6, 0, 0..1);
                            }
                            self.queue.submit(Some(encoder.finish()));
                            frame.present();
                        }
                        WindowEvent::CloseRequested => target.exit(),
                        _ => {}
                    };
                }
            })
            .unwrap()
    }
}
#[repr(C)]
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
struct Vertex {
    position: [f32; 2],
}
impl Vertex {
    fn new(x: f32, y: f32) -> Self {
        Self { position: [x, y] }
    }
}
struct Object {
    pub shader: Arc<RenderPipeline>,
    pub width: f32,
    pub height: f32,
    pub x: f32,
    pub y: f32,
    pub depth: f32,
    halfwidth: f32,
    halfheight: f32,
    indexbuf: Buffer,
    vertbuf: Buffer,
}
impl Object {
    async fn new(
        device: &Device,
        shader: Arc<RenderPipeline>,
        x: f32,
        y: f32,
        depth: f32,
        width: f32,
        height: f32,
    ) -> Self {
        let (w, h) = (width * 0.5, height * 0.5);
        Self {
            shader,
            width,
            height,
            x,
            y,
            halfwidth: w,
            halfheight: h,
            depth,
            vertbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: cast_slice(&[
                    Vertex::new(x - w, y + h),
                    Vertex::new(x + w, y + h),
                    Vertex::new(x + w, y - h),
                    Vertex::new(x - w, y - h),
                ]),
                usage: BufferUsages::VERTEX,
            }),
            indexbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: cast_slice(&[0, 1, 2, 2, 1, 3]),
                usage: BufferUsages::INDEX,
            }),
        }
    }
    async fn resize(&mut self, sizex: f32, sizey: f32, device: &Device) {
        self.width = sizex;
        self.height = sizey;
        (self.halfwidth, self.halfheight) = (sizex * 0.5, sizey * 0.5);
        self.vertbuf = device.create_buffer_init(&BufferInitDescriptor {
            label: None,
            contents: cast_slice(&[
                Vertex::new(self.x - self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y - self.halfheight),
                Vertex::new(self.x - self.halfwidth, self.y - self.halfheight),
            ]),
            usage: BufferUsages::VERTEX,
        });
    }
    async fn reposition(&mut self, device: &Device, posx: f32, posy: f32) {
        self.x = posx;
        self.y = posy;
        self.vertbuf = device.create_buffer_init(&BufferInitDescriptor {
            label: None,
            contents: cast_slice(&[
                Vertex::new(self.x - self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y + self.halfheight),
                Vertex::new(self.x + self.halfwidth, self.y - self.halfheight),
                Vertex::new(self.x - self.halfwidth, self.y - self.halfheight),
            ]),
            usage: BufferUsages::VERTEX,
        })
    }
    async fn default(device: &Device,shader: Arc<RenderPipeline>) -> Self {
        Self {
            shader,
            width: 1.0,
            height: 1.0,
            x: 0.0,
            y: 0.0,
            depth: 1.0,
            vertbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: cast_slice(&[
                    Vertex::new(-0.5, 0.5),
                    Vertex::new(0.5, 0.5),
                    Vertex::new(0.5, -0.5),
                    Vertex::new(-0.5, -0.5),
                ]),
                usage: BufferUsages::VERTEX,
            }),
            indexbuf: device.create_buffer_init(&BufferInitDescriptor {
                label: None,
                contents: &[0, 1, 2, 3],
                usage: BufferUsages::INDEX,
            }),
            halfwidth: 0.5,
            halfheight: 0.5,
        }
    }
}
async fn get_render_pipeline(
    device: &Device,
    shader: &ShaderModule,
    swapchain_format: TextureFormat,
    pipeline_layout: &PipelineLayout,
) -> RenderPipeline {
    device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
        label: None,
        layout: Some(pipeline_layout),
        vertex: wgpu::VertexState {
            module: shader,
            entry_point: "vs_main",
            buffers: &[VertexBufferLayout {
                array_stride: 8,
                step_mode: VertexStepMode::Vertex,
                attributes: &vertex_attr_array![0 => Float32x2],
            }],
        },
        fragment: Some(wgpu::FragmentState {
            module: shader,
            entry_point: "fs_main",
            targets: &[Some(swapchain_format.into())],
        }),
        primitive: wgpu::PrimitiveState::default(),
        depth_stencil: None,
        multisample: wgpu::MultisampleState::default(),
        multiview: None,
    })
}