|
| 1 | +use std::mem::size_of; |
1 | 2 | use std::num::NonZeroU64;
|
2 | 3 |
|
| 4 | +use wgpu::util::RenderEncoder; |
| 5 | +use wgpu::*; |
3 | 6 | use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters, TestingContext};
|
4 | 7 |
|
5 | 8 | /// We want to test that partial updates to push constants work as expected.
|
@@ -153,3 +156,219 @@ async fn partial_update_test(ctx: TestingContext) {
|
153 | 156 | // second 4 floats the first update
|
154 | 157 | assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 1.0, 5.0, 3.0, 4.0]);
|
155 | 158 | }
|
| 159 | +#[gpu_test] |
| 160 | +static RENDER_PASS_TEST: GpuTestConfiguration = GpuTestConfiguration::new() |
| 161 | + .parameters( |
| 162 | + TestParameters::default() |
| 163 | + .features(Features::PUSH_CONSTANTS | Features::VERTEX_WRITABLE_STORAGE) |
| 164 | + .limits(wgpu::Limits { |
| 165 | + max_push_constant_size: 64, |
| 166 | + ..Default::default() |
| 167 | + }), |
| 168 | + ) |
| 169 | + .run_async(move |ctx| async move { |
| 170 | + for use_render_bundle in [false, true] { |
| 171 | + render_pass_test(&ctx, use_render_bundle).await; |
| 172 | + } |
| 173 | + }); |
| 174 | + |
| 175 | +// This shader simply moves the values from vector_constants and push_constants into the |
| 176 | +// result buffer. It expects to be called 4 times (with vector_index in 0..4) with its |
| 177 | +// topology being PointList, so that each vertex shader call leads to exactly one fragment |
| 178 | +// call. |
| 179 | +const SHADER2: &str = " |
| 180 | + const POSITION: vec4f = vec4f(0, 0, 0, 1); |
| 181 | +
|
| 182 | + struct PushConstants { |
| 183 | + vertex_constants: vec4i, |
| 184 | + fragment_constants: vec4i, |
| 185 | + } |
| 186 | +
|
| 187 | + var<push_constant> push_constants: PushConstants; |
| 188 | +
|
| 189 | + @group(0) @binding(0) var<storage, read_write> result: array<i32>; |
| 190 | +
|
| 191 | + struct VertexOutput { |
| 192 | + @builtin(position) position: vec4f, |
| 193 | + @location(0) index: u32, |
| 194 | + } |
| 195 | +
|
| 196 | + @vertex fn vertex( |
| 197 | + @builtin(vertex_index) ix: u32, |
| 198 | + ) -> VertexOutput { |
| 199 | + result[ix] = push_constants.vertex_constants[ix]; |
| 200 | + return VertexOutput(POSITION, ix); |
| 201 | + } |
| 202 | +
|
| 203 | + @fragment fn fragment( |
| 204 | + @location(0) ix: u32, |
| 205 | + ) -> @location(0) vec4f { |
| 206 | + result[ix + 4u] = push_constants.fragment_constants[ix]; |
| 207 | + return vec4f(); |
| 208 | + } |
| 209 | +"; |
| 210 | + |
| 211 | +async fn render_pass_test(ctx: &TestingContext, use_render_bundle: bool) { |
| 212 | + let output_buffer = ctx.device.create_buffer(&BufferDescriptor { |
| 213 | + label: Some("output buffer"), |
| 214 | + size: 8 * size_of::<u32>() as BufferAddress, |
| 215 | + usage: BufferUsages::STORAGE | BufferUsages::COPY_SRC, |
| 216 | + mapped_at_creation: false, |
| 217 | + }); |
| 218 | + |
| 219 | + let cpu_buffer = ctx.device.create_buffer(&BufferDescriptor { |
| 220 | + label: Some("cpu buffer"), |
| 221 | + size: output_buffer.size(), |
| 222 | + usage: BufferUsages::COPY_DST | BufferUsages::MAP_READ, |
| 223 | + mapped_at_creation: false, |
| 224 | + }); |
| 225 | + |
| 226 | + // We need an output texture, even though we're not ever going to look at it. |
| 227 | + let output_texture = ctx.device.create_texture(&TextureDescriptor { |
| 228 | + size: Extent3d { |
| 229 | + width: 2, |
| 230 | + height: 2, |
| 231 | + depth_or_array_layers: 1, |
| 232 | + }, |
| 233 | + mip_level_count: 1, |
| 234 | + sample_count: 1, |
| 235 | + dimension: TextureDimension::D2, |
| 236 | + format: TextureFormat::Rgba8UnormSrgb, |
| 237 | + usage: TextureUsages::RENDER_ATTACHMENT, |
| 238 | + label: Some("Output Texture"), |
| 239 | + view_formats: &[], |
| 240 | + }); |
| 241 | + let output_texture_view = output_texture.create_view(&Default::default()); |
| 242 | + |
| 243 | + let shader = ctx.device.create_shader_module(ShaderModuleDescriptor { |
| 244 | + label: Some("Shader"), |
| 245 | + source: ShaderSource::Wgsl(SHADER2.into()), |
| 246 | + }); |
| 247 | + |
| 248 | + let bind_group_layout = ctx |
| 249 | + .device |
| 250 | + .create_bind_group_layout(&BindGroupLayoutDescriptor { |
| 251 | + label: None, |
| 252 | + entries: &[BindGroupLayoutEntry { |
| 253 | + binding: 0, |
| 254 | + visibility: ShaderStages::VERTEX_FRAGMENT, |
| 255 | + ty: BindingType::Buffer { |
| 256 | + ty: BufferBindingType::Storage { read_only: false }, |
| 257 | + has_dynamic_offset: false, |
| 258 | + min_binding_size: None, |
| 259 | + }, |
| 260 | + count: None, |
| 261 | + }], |
| 262 | + }); |
| 263 | + |
| 264 | + let render_pipeline_layout = ctx |
| 265 | + .device |
| 266 | + .create_pipeline_layout(&PipelineLayoutDescriptor { |
| 267 | + bind_group_layouts: &[&bind_group_layout], |
| 268 | + push_constant_ranges: &[PushConstantRange { |
| 269 | + stages: ShaderStages::VERTEX_FRAGMENT, |
| 270 | + range: 0..8 * size_of::<u32>() as u32, |
| 271 | + }], |
| 272 | + ..Default::default() |
| 273 | + }); |
| 274 | + |
| 275 | + let pipeline = ctx |
| 276 | + .device |
| 277 | + .create_render_pipeline(&RenderPipelineDescriptor { |
| 278 | + label: Some("Render Pipeline"), |
| 279 | + layout: Some(&render_pipeline_layout), |
| 280 | + vertex: VertexState { |
| 281 | + module: &shader, |
| 282 | + entry_point: None, |
| 283 | + buffers: &[], |
| 284 | + compilation_options: Default::default(), |
| 285 | + }, |
| 286 | + fragment: Some(FragmentState { |
| 287 | + module: &shader, |
| 288 | + entry_point: None, |
| 289 | + targets: &[Some(output_texture.format().into())], |
| 290 | + compilation_options: Default::default(), |
| 291 | + }), |
| 292 | + primitive: PrimitiveState { |
| 293 | + topology: PrimitiveTopology::PointList, |
| 294 | + ..Default::default() |
| 295 | + }, |
| 296 | + depth_stencil: None, |
| 297 | + multisample: MultisampleState::default(), |
| 298 | + multiview: None, |
| 299 | + cache: None, |
| 300 | + }); |
| 301 | + |
| 302 | + let render_pass_desc = RenderPassDescriptor { |
| 303 | + label: Some("Render Pass"), |
| 304 | + color_attachments: &[Some(RenderPassColorAttachment { |
| 305 | + view: &output_texture_view, |
| 306 | + resolve_target: None, |
| 307 | + ops: Operations { |
| 308 | + load: LoadOp::Clear(Color::default()), |
| 309 | + store: StoreOp::Store, |
| 310 | + }, |
| 311 | + })], |
| 312 | + ..Default::default() |
| 313 | + }; |
| 314 | + |
| 315 | + let bind_group = ctx.device.create_bind_group(&BindGroupDescriptor { |
| 316 | + label: Some("bind group"), |
| 317 | + layout: &pipeline.get_bind_group_layout(0), |
| 318 | + entries: &[BindGroupEntry { |
| 319 | + binding: 0, |
| 320 | + resource: output_buffer.as_entire_binding(), |
| 321 | + }], |
| 322 | + }); |
| 323 | + |
| 324 | + let data: Vec<i32> = (0..8).map(|i| (i * i) - 1).collect(); |
| 325 | + |
| 326 | + fn do_encoding<'a>( |
| 327 | + encoder: &mut dyn RenderEncoder<'a>, |
| 328 | + pipeline: &'a RenderPipeline, |
| 329 | + bind_group: &'a BindGroup, |
| 330 | + data: &'a Vec<i32>, |
| 331 | + ) { |
| 332 | + let data_as_u8: &[u8] = bytemuck::cast_slice(data.as_slice()); |
| 333 | + encoder.set_pipeline(pipeline); |
| 334 | + encoder.set_push_constants(ShaderStages::VERTEX_FRAGMENT, 0, data_as_u8); |
| 335 | + encoder.set_bind_group(0, Some(bind_group), &[]); |
| 336 | + encoder.draw(0..4, 0..1); |
| 337 | + } |
| 338 | + |
| 339 | + let mut command_encoder = ctx |
| 340 | + .device |
| 341 | + .create_command_encoder(&CommandEncoderDescriptor::default()); |
| 342 | + { |
| 343 | + let mut render_pass = command_encoder.begin_render_pass(&render_pass_desc); |
| 344 | + if use_render_bundle { |
| 345 | + // Execute the commands in a render_bundle_encoder. |
| 346 | + let mut render_bundle_encoder = |
| 347 | + ctx.device |
| 348 | + .create_render_bundle_encoder(&RenderBundleEncoderDescriptor { |
| 349 | + color_formats: &[Some(output_texture.format())], |
| 350 | + sample_count: 1, |
| 351 | + ..RenderBundleEncoderDescriptor::default() |
| 352 | + }); |
| 353 | + do_encoding(&mut render_bundle_encoder, &pipeline, &bind_group, &data); |
| 354 | + let render_bundle = render_bundle_encoder.finish(&RenderBundleDescriptor::default()); |
| 355 | + render_pass.execute_bundles([&render_bundle]); |
| 356 | + } else { |
| 357 | + // Execute the commands directly. |
| 358 | + do_encoding(&mut render_pass, &pipeline, &bind_group, &data); |
| 359 | + } |
| 360 | + } |
| 361 | + // Move the result to the cpu buffer, so that we can read them. |
| 362 | + command_encoder.copy_buffer_to_buffer(&output_buffer, 0, &cpu_buffer, 0, output_buffer.size()); |
| 363 | + let command_buffer = command_encoder.finish(); |
| 364 | + ctx.queue.submit([command_buffer]); |
| 365 | + cpu_buffer.slice(..).map_async(MapMode::Read, |_| ()); |
| 366 | + ctx.async_poll(wgpu::Maintain::wait()) |
| 367 | + .await |
| 368 | + .panic_on_timeout(); |
| 369 | + let mapped_data = cpu_buffer.slice(..).get_mapped_range(); |
| 370 | + let result = bytemuck::cast_slice::<u8, i32>(&mapped_data).to_vec(); |
| 371 | + drop(mapped_data); |
| 372 | + cpu_buffer.unmap(); |
| 373 | + assert_eq!(&result, &data); |
| 374 | +} |
0 commit comments