Skip to main content

mtl_gpu/mtl4/
compute_command_encoder.rs

1//! MTL4 ComputeCommandEncoder implementation.
2//!
3//! Corresponds to `Metal/MTL4ComputeCommandEncoder.hpp`.
4
5use std::ffi::c_void;
6use std::ptr::NonNull;
7
8use mtl_foundation::{Referencing, UInteger};
9use mtl_sys::{msg_send_0, msg_send_1, msg_send_2, msg_send_3, msg_send_4, msg_send_5, sel};
10
11use super::enums::VisibilityOptions;
12use crate::{ComputePipelineState, Device, Size};
13
14// ============================================================
15// ComputeCommandEncoder
16// ============================================================
17
18/// MTL4 compute command encoder.
19///
20/// C++ equivalent: `MTL4::ComputeCommandEncoder`
21///
22/// ComputeCommandEncoder encodes compute dispatch commands and resource bindings.
23#[repr(transparent)]
24pub struct ComputeCommandEncoder(NonNull<c_void>);
25
26impl ComputeCommandEncoder {
27    /// Create a ComputeCommandEncoder from a raw pointer.
28    #[inline]
29    pub unsafe fn from_raw(ptr: *mut c_void) -> Option<Self> {
30        NonNull::new(ptr).map(Self)
31    }
32
33    /// Get the raw pointer.
34    #[inline]
35    pub fn as_raw(&self) -> *mut c_void {
36        self.0.as_ptr()
37    }
38
39    /// Get the device.
40    ///
41    /// C++ equivalent: `MTL::Device* device() const`
42    pub fn device(&self) -> Option<Device> {
43        unsafe {
44            let ptr: *mut c_void = msg_send_0(self.as_ptr(), sel!(device));
45            Device::from_raw(ptr)
46        }
47    }
48
49    /// Get the label.
50    ///
51    /// C++ equivalent: `NS::String* label() const`
52    pub fn label(&self) -> Option<String> {
53        unsafe {
54            let ns_string: *mut c_void = msg_send_0(self.as_ptr(), sel!(label));
55            if ns_string.is_null() {
56                return None;
57            }
58            let c_str: *const i8 = msg_send_0(ns_string, sel!(UTF8String));
59            if c_str.is_null() {
60                return None;
61            }
62            Some(
63                std::ffi::CStr::from_ptr(c_str)
64                    .to_string_lossy()
65                    .into_owned(),
66            )
67        }
68    }
69
70    /// Set the label.
71    ///
72    /// C++ equivalent: `void setLabel(const NS::String*)`
73    pub fn set_label(&self, label: &str) {
74        if let Some(ns_label) = mtl_foundation::String::from_str(label) {
75            unsafe {
76                let _: () = msg_send_1(self.as_ptr(), sel!(setLabel:), ns_label.as_ptr());
77            }
78        }
79    }
80
81    // ========== Pipeline State ==========
82
83    /// Set the compute pipeline state.
84    ///
85    /// C++ equivalent: `void setComputePipelineState(const MTL::ComputePipelineState*)`
86    pub fn set_compute_pipeline_state(&self, pipeline: &ComputePipelineState) {
87        unsafe {
88            let _: () = msg_send_1(
89                self.as_ptr(),
90                sel!(setComputePipelineState:),
91                pipeline.as_ptr(),
92            );
93        }
94    }
95
96    // ========== Argument Table ==========
97
98    /// Set the argument table at index.
99    ///
100    /// C++ equivalent: `void setArgumentTable(const MTL4::ArgumentTable*, NS::UInteger)`
101    pub fn set_argument_table(&self, table: *const c_void, index: UInteger) {
102        unsafe {
103            let _: () = msg_send_2(self.as_ptr(), sel!(setArgumentTable:atIndex:), table, index);
104        }
105    }
106
107    // ========== Buffer Binding ==========
108
109    /// Set a buffer at index with offset.
110    ///
111    /// C++ equivalent: `void setBuffer(const MTL::Buffer*, NS::UInteger, NS::UInteger)`
112    pub fn set_buffer(&self, buffer: *const c_void, offset: UInteger, index: UInteger) {
113        unsafe {
114            let _: () = msg_send_3(
115                self.as_ptr(),
116                sel!(setBuffer:offset:atIndex:),
117                buffer,
118                offset,
119                index,
120            );
121        }
122    }
123
124    /// Set multiple buffers.
125    ///
126    /// C++ equivalent: `void setBuffers(const MTL::Buffer* const*, const NS::UInteger*, NS::Range)`
127    pub fn set_buffers(
128        &self,
129        buffers: *const *const c_void,
130        offsets: *const UInteger,
131        range_location: UInteger,
132        range_length: UInteger,
133    ) {
134        unsafe {
135            // Create NSRange struct
136            let range = (range_location, range_length);
137            let _: () = msg_send_3(
138                self.as_ptr(),
139                sel!(setBuffers:offsets:withRange:),
140                buffers,
141                offsets,
142                range,
143            );
144        }
145    }
146
147    /// Set bytes at index.
148    ///
149    /// C++ equivalent: `void setBytes(const void*, NS::UInteger, NS::UInteger)`
150    pub fn set_bytes(&self, bytes: *const c_void, length: UInteger, index: UInteger) {
151        unsafe {
152            let _: () = msg_send_3(
153                self.as_ptr(),
154                sel!(setBytes:length:atIndex:),
155                bytes,
156                length,
157                index,
158            );
159        }
160    }
161
162    // ========== Texture Binding ==========
163
164    /// Set a texture at index.
165    ///
166    /// C++ equivalent: `void setTexture(const MTL::Texture*, NS::UInteger)`
167    pub fn set_texture(&self, texture: *const c_void, index: UInteger) {
168        unsafe {
169            let _: () = msg_send_2(self.as_ptr(), sel!(setTexture:atIndex:), texture, index);
170        }
171    }
172
173    /// Set multiple textures.
174    ///
175    /// C++ equivalent: `void setTextures(const MTL::Texture* const*, NS::Range)`
176    pub fn set_textures(
177        &self,
178        textures: *const *const c_void,
179        range_location: UInteger,
180        range_length: UInteger,
181    ) {
182        unsafe {
183            let range = (range_location, range_length);
184            let _: () = msg_send_2(self.as_ptr(), sel!(setTextures:withRange:), textures, range);
185        }
186    }
187
188    // ========== Sampler Binding ==========
189
190    /// Set a sampler state at index.
191    ///
192    /// C++ equivalent: `void setSamplerState(const MTL::SamplerState*, NS::UInteger)`
193    pub fn set_sampler_state(&self, sampler: *const c_void, index: UInteger) {
194        unsafe {
195            let _: () = msg_send_2(
196                self.as_ptr(),
197                sel!(setSamplerState:atIndex:),
198                sampler,
199                index,
200            );
201        }
202    }
203
204    /// Set a sampler state with LOD clamp.
205    ///
206    /// C++ equivalent: `void setSamplerState(const MTL::SamplerState*, float, float, NS::UInteger)`
207    pub fn set_sampler_state_with_lod(
208        &self,
209        sampler: *const c_void,
210        lod_min_clamp: f32,
211        lod_max_clamp: f32,
212        index: UInteger,
213    ) {
214        unsafe {
215            let _: () = msg_send_4(
216                self.as_ptr(),
217                sel!(setSamplerState:lodMinClamp:lodMaxClamp:atIndex:),
218                sampler,
219                lod_min_clamp,
220                lod_max_clamp,
221                index,
222            );
223        }
224    }
225
226    // ========== Threadgroup Memory ==========
227
228    /// Set threadgroup memory length at index.
229    ///
230    /// C++ equivalent: `void setThreadgroupMemoryLength(NS::UInteger, NS::UInteger)`
231    pub fn set_threadgroup_memory_length(&self, length: UInteger, index: UInteger) {
232        unsafe {
233            let _: () = msg_send_2(
234                self.as_ptr(),
235                sel!(setThreadgroupMemoryLength:atIndex:),
236                length,
237                index,
238            );
239        }
240    }
241
242    // ========== Dispatch Methods ==========
243
244    /// Dispatch threadgroups.
245    ///
246    /// C++ equivalent: `void dispatchThreadgroups(MTL::Size, MTL::Size)`
247    pub fn dispatch_threadgroups(
248        &self,
249        threadgroups_per_grid: Size,
250        threads_per_threadgroup: Size,
251    ) {
252        unsafe {
253            let _: () = msg_send_2(
254                self.as_ptr(),
255                sel!(dispatchThreadgroups:threadsPerThreadgroup:),
256                threadgroups_per_grid,
257                threads_per_threadgroup,
258            );
259        }
260    }
261
262    /// Dispatch threads.
263    ///
264    /// C++ equivalent: `void dispatchThreads(MTL::Size, MTL::Size)`
265    pub fn dispatch_threads(&self, threads_per_grid: Size, threads_per_threadgroup: Size) {
266        unsafe {
267            let _: () = msg_send_2(
268                self.as_ptr(),
269                sel!(dispatchThreads:threadsPerThreadgroup:),
270                threads_per_grid,
271                threads_per_threadgroup,
272            );
273        }
274    }
275
276    /// Dispatch threadgroups with indirect buffer.
277    ///
278    /// C++ equivalent: `void dispatchThreadgroups(const MTL::Buffer*, NS::UInteger, MTL::Size)`
279    pub fn dispatch_threadgroups_indirect(
280        &self,
281        indirect_buffer: *const c_void,
282        indirect_buffer_offset: UInteger,
283        threads_per_threadgroup: Size,
284    ) {
285        unsafe {
286            let _: () = msg_send_3(
287                self.as_ptr(),
288                sel!(dispatchThreadgroupsWithIndirectBuffer:indirectBufferOffset:threadsPerThreadgroup:),
289                indirect_buffer,
290                indirect_buffer_offset,
291                threads_per_threadgroup,
292            );
293        }
294    }
295
296    /// Dispatch threads with indirect buffer.
297    ///
298    /// C++ equivalent: `void dispatchThreads(const MTL::Buffer*, NS::UInteger, MTL::Size)`
299    pub fn dispatch_threads_indirect(
300        &self,
301        indirect_buffer: *const c_void,
302        indirect_buffer_offset: UInteger,
303        threads_per_threadgroup: Size,
304    ) {
305        unsafe {
306            let _: () = msg_send_3(
307                self.as_ptr(),
308                sel!(dispatchThreadsWithIndirectBuffer:indirectBufferOffset:threadsPerThreadgroup:),
309                indirect_buffer,
310                indirect_buffer_offset,
311                threads_per_threadgroup,
312            );
313        }
314    }
315
316    // ========== Memory Barrier ==========
317
318    /// Insert a barrier.
319    ///
320    /// C++ equivalent: `void barrier()`
321    pub fn barrier(&self) {
322        unsafe {
323            let _: () = msg_send_0(self.as_ptr(), sel!(barrier));
324        }
325    }
326
327    /// Insert a barrier for a buffer.
328    ///
329    /// C++ equivalent: `void barrier(const MTL::Buffer*, MTL4::VisibilityOptions)`
330    pub fn barrier_buffer(&self, buffer: *const c_void, visibility: VisibilityOptions) {
331        unsafe {
332            let _: () = msg_send_2(
333                self.as_ptr(),
334                sel!(barrierWithBuffer:visibilityOptions:),
335                buffer,
336                visibility.0,
337            );
338        }
339    }
340
341    /// Insert a barrier for a texture.
342    ///
343    /// C++ equivalent: `void barrier(const MTL::Texture*, MTL4::VisibilityOptions)`
344    pub fn barrier_texture(&self, texture: *const c_void, visibility: VisibilityOptions) {
345        unsafe {
346            let _: () = msg_send_2(
347                self.as_ptr(),
348                sel!(barrierWithTexture:visibilityOptions:),
349                texture,
350                visibility.0,
351            );
352        }
353    }
354
355    // ========== Fence Methods ==========
356
357    /// Update a fence.
358    ///
359    /// C++ equivalent: `void updateFence(const MTL::Fence*)`
360    pub fn update_fence(&self, fence: *const c_void) {
361        unsafe {
362            let _: () = msg_send_1(self.as_ptr(), sel!(updateFence:), fence);
363        }
364    }
365
366    /// Wait for a fence.
367    ///
368    /// C++ equivalent: `void waitForFence(const MTL::Fence*)`
369    pub fn wait_for_fence(&self, fence: *const c_void) {
370        unsafe {
371            let _: () = msg_send_1(self.as_ptr(), sel!(waitForFence:), fence);
372        }
373    }
374
375    // ========== Resource Usage ==========
376
377    /// Use resource with usage.
378    ///
379    /// C++ equivalent: `void useResource(const MTL::Resource*, MTL::ResourceUsage)`
380    pub fn use_resource(&self, resource: *const c_void, usage: UInteger) {
381        unsafe {
382            let _: () = msg_send_2(self.as_ptr(), sel!(useResource:usage:), resource, usage);
383        }
384    }
385
386    /// Use multiple resources.
387    ///
388    /// C++ equivalent: `void useResources(const MTL::Resource* const*, NS::UInteger, MTL::ResourceUsage)`
389    pub fn use_resources(&self, resources: *const *const c_void, count: UInteger, usage: UInteger) {
390        unsafe {
391            let _: () = msg_send_3(
392                self.as_ptr(),
393                sel!(useResources:count:usage:),
394                resources,
395                count,
396                usage,
397            );
398        }
399    }
400
401    /// Use heap with usage.
402    ///
403    /// C++ equivalent: `void useHeap(const MTL::Heap*, MTL::ResourceUsage)`
404    pub fn use_heap(&self, heap: *const c_void, usage: UInteger) {
405        unsafe {
406            let _: () = msg_send_2(self.as_ptr(), sel!(useHeap:usage:), heap, usage);
407        }
408    }
409
410    /// Use multiple heaps.
411    ///
412    /// C++ equivalent: `void useHeaps(const MTL::Heap* const*, NS::UInteger, MTL::ResourceUsage)`
413    pub fn use_heaps(&self, heaps: *const *const c_void, count: UInteger, usage: UInteger) {
414        unsafe {
415            let _: () = msg_send_3(
416                self.as_ptr(),
417                sel!(useHeaps:count:usage:),
418                heaps,
419                count,
420                usage,
421            );
422        }
423    }
424
425    // ========== Debug Methods ==========
426
427    /// Push a debug group.
428    ///
429    /// C++ equivalent: `void pushDebugGroup(const NS::String*)`
430    pub fn push_debug_group(&self, name: &str) {
431        if let Some(ns_name) = mtl_foundation::String::from_str(name) {
432            unsafe {
433                let _: () = msg_send_1(self.as_ptr(), sel!(pushDebugGroup:), ns_name.as_ptr());
434            }
435        }
436    }
437
438    /// Pop a debug group.
439    ///
440    /// C++ equivalent: `void popDebugGroup()`
441    pub fn pop_debug_group(&self) {
442        unsafe {
443            let _: () = msg_send_0(self.as_ptr(), sel!(popDebugGroup));
444        }
445    }
446
447    /// Insert a debug signpost.
448    ///
449    /// C++ equivalent: `void insertDebugSignpost(const NS::String*)`
450    pub fn insert_debug_signpost(&self, name: &str) {
451        if let Some(ns_name) = mtl_foundation::String::from_str(name) {
452            unsafe {
453                let _: () = msg_send_1(self.as_ptr(), sel!(insertDebugSignpost:), ns_name.as_ptr());
454            }
455        }
456    }
457
458    // ========== Encoding ==========
459
460    /// End encoding.
461    ///
462    /// C++ equivalent: `void endEncoding()`
463    pub fn end_encoding(&self) {
464        unsafe {
465            let _: () = msg_send_0(self.as_ptr(), sel!(endEncoding));
466        }
467    }
468
469    // ========== Copy Operations ==========
470
471    /// Copy from buffer to buffer.
472    ///
473    /// C++ equivalent: `void copyFromBuffer(...)`
474    pub fn copy_from_buffer_to_buffer(
475        &self,
476        source_buffer: *const c_void,
477        source_offset: UInteger,
478        destination_buffer: *const c_void,
479        destination_offset: UInteger,
480        size: UInteger,
481    ) {
482        unsafe {
483            let _: () = msg_send_5(
484                self.as_ptr(),
485                sel!(copyFromBuffer:sourceOffset:toBuffer:destinationOffset:size:),
486                source_buffer,
487                source_offset,
488                destination_buffer,
489                destination_offset,
490                size,
491            );
492        }
493    }
494
495    /// Fill buffer with value.
496    ///
497    /// C++ equivalent: `void fillBuffer(const MTL::Buffer*, NS::Range, uint8_t)`
498    pub fn fill_buffer(
499        &self,
500        buffer: *const c_void,
501        range_location: UInteger,
502        range_length: UInteger,
503        value: u8,
504    ) {
505        unsafe {
506            let range = (range_location, range_length);
507            let _: () = msg_send_3(
508                self.as_ptr(),
509                sel!(fillBuffer:range:value:),
510                buffer,
511                range,
512                value,
513            );
514        }
515    }
516
517    // ========== Texture Operations ==========
518
519    /// Generate mipmaps for a texture.
520    ///
521    /// C++ equivalent: `void generateMipmaps(const MTL::Texture*)`
522    pub fn generate_mipmaps(&self, texture: *const c_void) {
523        unsafe {
524            let _: () = msg_send_1(self.as_ptr(), sel!(generateMipmapsForTexture:), texture);
525        }
526    }
527
528    /// Optimize texture contents for CPU access.
529    ///
530    /// C++ equivalent: `void optimizeContentsForCPUAccess(const MTL::Texture*)`
531    pub fn optimize_contents_for_cpu_access(&self, texture: *const c_void) {
532        unsafe {
533            let _: () = msg_send_1(self.as_ptr(), sel!(optimizeContentsForCPUAccess:), texture);
534        }
535    }
536
537    /// Optimize texture contents for CPU access with slice and level.
538    ///
539    /// C++ equivalent: `void optimizeContentsForCPUAccess(const MTL::Texture*, NS::UInteger, NS::UInteger)`
540    pub fn optimize_contents_for_cpu_access_slice_level(
541        &self,
542        texture: *const c_void,
543        slice: UInteger,
544        level: UInteger,
545    ) {
546        unsafe {
547            let _: () = msg_send_3(
548                self.as_ptr(),
549                sel!(optimizeContentsForCPUAccess:slice:level:),
550                texture,
551                slice,
552                level,
553            );
554        }
555    }
556
557    /// Optimize texture contents for GPU access.
558    ///
559    /// C++ equivalent: `void optimizeContentsForGPUAccess(const MTL::Texture*)`
560    pub fn optimize_contents_for_gpu_access(&self, texture: *const c_void) {
561        unsafe {
562            let _: () = msg_send_1(self.as_ptr(), sel!(optimizeContentsForGPUAccess:), texture);
563        }
564    }
565
566    /// Optimize texture contents for GPU access with slice and level.
567    ///
568    /// C++ equivalent: `void optimizeContentsForGPUAccess(const MTL::Texture*, NS::UInteger, NS::UInteger)`
569    pub fn optimize_contents_for_gpu_access_slice_level(
570        &self,
571        texture: *const c_void,
572        slice: UInteger,
573        level: UInteger,
574    ) {
575        unsafe {
576            let _: () = msg_send_3(
577                self.as_ptr(),
578                sel!(optimizeContentsForGPUAccess:slice:level:),
579                texture,
580                slice,
581                level,
582            );
583        }
584    }
585
586    // ========== Counter/Timestamp Methods ==========
587
588    /// Write a timestamp to a counter heap.
589    ///
590    /// C++ equivalent: `void writeTimestamp(MTL4::TimestampGranularity, const MTL4::CounterHeap*, NS::UInteger)`
591    pub fn write_timestamp(
592        &self,
593        granularity: super::TimestampGranularity,
594        counter_heap: *const c_void,
595        index: UInteger,
596    ) {
597        unsafe {
598            let _: () = msg_send_3(
599                self.as_ptr(),
600                sel!(writeTimestampWithGranularity:intoHeap:atIndex:),
601                granularity.0,
602                counter_heap,
603                index,
604            );
605        }
606    }
607
608    // ========== Acceleration Structure Methods ==========
609
610    /// Build an acceleration structure.
611    ///
612    /// C++ equivalent: `void buildAccelerationStructure(const MTL::AccelerationStructure*, const MTL4::AccelerationStructureDescriptor*, const MTL4::BufferRange)`
613    pub fn build_acceleration_structure(
614        &self,
615        acceleration_structure: &crate::AccelerationStructure,
616        descriptor: &super::AccelerationStructureDescriptor,
617        scratch_buffer: super::BufferRange,
618    ) {
619        unsafe {
620            let _: () = msg_send_3(
621                self.as_ptr(),
622                sel!(buildAccelerationStructure:descriptor:scratchBuffer:),
623                acceleration_structure.as_ptr(),
624                descriptor.as_ptr(),
625                scratch_buffer,
626            );
627        }
628    }
629
630    /// Copy an acceleration structure.
631    ///
632    /// C++ equivalent: `void copyAccelerationStructure(const MTL::AccelerationStructure*, const MTL::AccelerationStructure*)`
633    pub fn copy_acceleration_structure(
634        &self,
635        source: &crate::AccelerationStructure,
636        destination: &crate::AccelerationStructure,
637    ) {
638        unsafe {
639            let _: () = msg_send_2(
640                self.as_ptr(),
641                sel!(copyAccelerationStructure:toAccelerationStructure:),
642                source.as_ptr(),
643                destination.as_ptr(),
644            );
645        }
646    }
647
648    /// Copy and compact an acceleration structure.
649    ///
650    /// C++ equivalent: `void copyAndCompactAccelerationStructure(const MTL::AccelerationStructure*, const MTL::AccelerationStructure*)`
651    pub fn copy_and_compact_acceleration_structure(
652        &self,
653        source: &crate::AccelerationStructure,
654        destination: &crate::AccelerationStructure,
655    ) {
656        unsafe {
657            let _: () = msg_send_2(
658                self.as_ptr(),
659                sel!(copyAndCompactAccelerationStructure:toAccelerationStructure:),
660                source.as_ptr(),
661                destination.as_ptr(),
662            );
663        }
664    }
665
666    /// Refit an acceleration structure.
667    ///
668    /// C++ equivalent: `void refitAccelerationStructure(const MTL::AccelerationStructure*, const MTL4::AccelerationStructureDescriptor*, const MTL::AccelerationStructure*, const MTL4::BufferRange)`
669    pub fn refit_acceleration_structure(
670        &self,
671        source: &crate::AccelerationStructure,
672        descriptor: &super::AccelerationStructureDescriptor,
673        destination: &crate::AccelerationStructure,
674        scratch_buffer: super::BufferRange,
675    ) {
676        unsafe {
677            let _: () = msg_send_4(
678                self.as_ptr(),
679                sel!(refitAccelerationStructure:descriptor:destination:scratchBuffer:),
680                source.as_ptr(),
681                descriptor.as_ptr(),
682                destination.as_ptr(),
683                scratch_buffer,
684            );
685        }
686    }
687
688    /// Refit an acceleration structure with options.
689    ///
690    /// C++ equivalent: `void refitAccelerationStructure(const MTL::AccelerationStructure*, const MTL4::AccelerationStructureDescriptor*, const MTL::AccelerationStructure*, const MTL4::BufferRange, MTL::AccelerationStructureRefitOptions)`
691    pub fn refit_acceleration_structure_with_options(
692        &self,
693        source: &crate::AccelerationStructure,
694        descriptor: &super::AccelerationStructureDescriptor,
695        destination: &crate::AccelerationStructure,
696        scratch_buffer: super::BufferRange,
697        options: crate::AccelerationStructureRefitOptions,
698    ) {
699        unsafe {
700            let _: () = msg_send_5(
701                self.as_ptr(),
702                sel!(refitAccelerationStructure:descriptor:destination:scratchBuffer:options:),
703                source.as_ptr(),
704                descriptor.as_ptr(),
705                destination.as_ptr(),
706                scratch_buffer,
707                options,
708            );
709        }
710    }
711
712    /// Write the compacted acceleration structure size to a buffer.
713    ///
714    /// C++ equivalent: `void writeCompactedAccelerationStructureSize(const MTL::AccelerationStructure*, const MTL4::BufferRange)`
715    pub fn write_compacted_acceleration_structure_size(
716        &self,
717        acceleration_structure: &crate::AccelerationStructure,
718        buffer: super::BufferRange,
719    ) {
720        unsafe {
721            let _: () = msg_send_2(
722                self.as_ptr(),
723                sel!(writeCompactedAccelerationStructureSize:toBuffer:),
724                acceleration_structure.as_ptr(),
725                buffer,
726            );
727        }
728    }
729
730    // ========== Tensor Copy Methods ==========
731
732    /// Copy from tensor to tensor.
733    ///
734    /// C++ equivalent: `void copyFromTensor(const MTL::Tensor*, const MTL::TensorExtents*, const MTL::TensorExtents*, const MTL::Tensor*, const MTL::TensorExtents*, const MTL::TensorExtents*)`
735    pub fn copy_from_tensor(
736        &self,
737        source_tensor: &crate::Tensor,
738        source_origin: &crate::TensorExtents,
739        source_dimensions: &crate::TensorExtents,
740        destination_tensor: &crate::Tensor,
741        destination_origin: &crate::TensorExtents,
742        destination_dimensions: &crate::TensorExtents,
743    ) {
744        unsafe {
745            mtl_sys::msg_send_6::<
746                (),
747                *const c_void,
748                *const c_void,
749                *const c_void,
750                *const c_void,
751                *const c_void,
752                *const c_void,
753            >(
754                self.as_ptr(),
755                sel!(copyFromTensor:sourceOrigin:sourceDimensions:toTensor:destinationOrigin:destinationDimensions:),
756                source_tensor.as_ptr(),
757                source_origin.as_ptr(),
758                source_dimensions.as_ptr(),
759                destination_tensor.as_ptr(),
760                destination_origin.as_ptr(),
761                destination_dimensions.as_ptr(),
762            );
763        }
764    }
765
766    // ========== Texture Copy Methods ==========
767
768    /// Copy from texture to texture (simple version).
769    ///
770    /// C++ equivalent: `void copyFromTexture(const MTL::Texture*, const MTL::Texture*)`
771    pub fn copy_from_texture_to_texture(
772        &self,
773        source_texture: &crate::Texture,
774        destination_texture: &crate::Texture,
775    ) {
776        unsafe {
777            let _: () = msg_send_2(
778                self.as_ptr(),
779                sel!(copyFromTexture:toTexture:),
780                source_texture.as_ptr(),
781                destination_texture.as_ptr(),
782            );
783        }
784    }
785
786    /// Copy from texture to texture with slice and level ranges.
787    ///
788    /// C++ equivalent: `void copyFromTexture(const MTL::Texture*, NS::UInteger, NS::UInteger, const MTL::Texture*, NS::UInteger, NS::UInteger, NS::UInteger, NS::UInteger)`
789    pub fn copy_from_texture_with_slices(
790        &self,
791        source_texture: &crate::Texture,
792        source_slice: UInteger,
793        source_level: UInteger,
794        destination_texture: &crate::Texture,
795        destination_slice: UInteger,
796        destination_level: UInteger,
797        slice_count: UInteger,
798        level_count: UInteger,
799    ) {
800        unsafe {
801            mtl_sys::msg_send_8::<
802                (),
803                *const c_void,
804                UInteger,
805                UInteger,
806                *const c_void,
807                UInteger,
808                UInteger,
809                UInteger,
810                UInteger,
811            >(
812                self.as_ptr(),
813                sel!(copyFromTexture:sourceSlice:sourceLevel:toTexture:destinationSlice:destinationLevel:sliceCount:levelCount:),
814                source_texture.as_ptr(),
815                source_slice,
816                source_level,
817                destination_texture.as_ptr(),
818                destination_slice,
819                destination_level,
820                slice_count,
821                level_count,
822            );
823        }
824    }
825
826    /// Copy from texture to texture with origin and size.
827    ///
828    /// C++ equivalent: `void copyFromTexture(const MTL::Texture*, NS::UInteger, NS::UInteger, MTL::Origin, MTL::Size, const MTL::Texture*, NS::UInteger, NS::UInteger, MTL::Origin)`
829    pub fn copy_from_texture_with_origin(
830        &self,
831        source_texture: &crate::Texture,
832        source_slice: UInteger,
833        source_level: UInteger,
834        source_origin: crate::Origin,
835        source_size: Size,
836        destination_texture: &crate::Texture,
837        destination_slice: UInteger,
838        destination_level: UInteger,
839        destination_origin: crate::Origin,
840    ) {
841        unsafe {
842            mtl_sys::msg_send_9::<
843                (),
844                *const c_void,
845                UInteger,
846                UInteger,
847                crate::Origin,
848                Size,
849                *const c_void,
850                UInteger,
851                UInteger,
852                crate::Origin,
853            >(
854                self.as_ptr(),
855                sel!(copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:),
856                source_texture.as_ptr(),
857                source_slice,
858                source_level,
859                source_origin,
860                source_size,
861                destination_texture.as_ptr(),
862                destination_slice,
863                destination_level,
864                destination_origin,
865            );
866        }
867    }
868
869    /// Copy from texture to buffer.
870    ///
871    /// C++ equivalent: `void copyFromTexture(const MTL::Texture*, NS::UInteger, NS::UInteger, MTL::Origin, MTL::Size, const MTL::Buffer*, NS::UInteger, NS::UInteger, NS::UInteger)`
872    pub fn copy_from_texture_to_buffer(
873        &self,
874        source_texture: &crate::Texture,
875        source_slice: UInteger,
876        source_level: UInteger,
877        source_origin: crate::Origin,
878        source_size: Size,
879        destination_buffer: &crate::Buffer,
880        destination_offset: UInteger,
881        destination_bytes_per_row: UInteger,
882        destination_bytes_per_image: UInteger,
883    ) {
884        unsafe {
885            mtl_sys::msg_send_9::<
886                (),
887                *const c_void,
888                UInteger,
889                UInteger,
890                crate::Origin,
891                Size,
892                *const c_void,
893                UInteger,
894                UInteger,
895                UInteger,
896            >(
897                self.as_ptr(),
898                sel!(copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:),
899                source_texture.as_ptr(),
900                source_slice,
901                source_level,
902                source_origin,
903                source_size,
904                destination_buffer.as_ptr(),
905                destination_offset,
906                destination_bytes_per_row,
907                destination_bytes_per_image,
908            );
909        }
910    }
911
912    /// Copy from texture to buffer with blit options.
913    ///
914    /// C++ equivalent: `void copyFromTexture(const MTL::Texture*, NS::UInteger, NS::UInteger, MTL::Origin, MTL::Size, const MTL::Buffer*, NS::UInteger, NS::UInteger, NS::UInteger, MTL::BlitOption)`
915    pub fn copy_from_texture_to_buffer_with_options(
916        &self,
917        source_texture: &crate::Texture,
918        source_slice: UInteger,
919        source_level: UInteger,
920        source_origin: crate::Origin,
921        source_size: Size,
922        destination_buffer: &crate::Buffer,
923        destination_offset: UInteger,
924        destination_bytes_per_row: UInteger,
925        destination_bytes_per_image: UInteger,
926        options: crate::BlitOption,
927    ) {
928        unsafe {
929            mtl_sys::msg_send_10::<
930                (),
931                *const c_void,
932                UInteger,
933                UInteger,
934                crate::Origin,
935                Size,
936                *const c_void,
937                UInteger,
938                UInteger,
939                UInteger,
940                crate::BlitOption,
941            >(
942                self.as_ptr(),
943                sel!(copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:options:),
944                source_texture.as_ptr(),
945                source_slice,
946                source_level,
947                source_origin,
948                source_size,
949                destination_buffer.as_ptr(),
950                destination_offset,
951                destination_bytes_per_row,
952                destination_bytes_per_image,
953                options,
954            );
955        }
956    }
957
958    // ========== Indirect Command Buffer Methods ==========
959
960    /// Copy indirect command buffer.
961    ///
962    /// C++ equivalent: `void copyIndirectCommandBuffer(const MTL::IndirectCommandBuffer*, NS::Range, const MTL::IndirectCommandBuffer*, NS::UInteger)`
963    pub fn copy_indirect_command_buffer(
964        &self,
965        source: &crate::IndirectCommandBuffer,
966        source_range_location: UInteger,
967        source_range_length: UInteger,
968        destination: &crate::IndirectCommandBuffer,
969        destination_index: UInteger,
970    ) {
971        unsafe {
972            let range = mtl_foundation::Range::new(source_range_location, source_range_length);
973            let _: () = msg_send_4(
974                self.as_ptr(),
975                sel!(copyIndirectCommandBuffer:sourceRange:destination:destinationIndex:),
976                source.as_ptr(),
977                range,
978                destination.as_ptr(),
979                destination_index,
980            );
981        }
982    }
983
984    /// Optimize indirect command buffer.
985    ///
986    /// C++ equivalent: `void optimizeIndirectCommandBuffer(const MTL::IndirectCommandBuffer*, NS::Range)`
987    pub fn optimize_indirect_command_buffer(
988        &self,
989        indirect_command_buffer: &crate::IndirectCommandBuffer,
990        range_location: UInteger,
991        range_length: UInteger,
992    ) {
993        unsafe {
994            let range = mtl_foundation::Range::new(range_location, range_length);
995            let _: () = msg_send_2(
996                self.as_ptr(),
997                sel!(optimizeIndirectCommandBuffer:withRange:),
998                indirect_command_buffer.as_ptr(),
999                range,
1000            );
1001        }
1002    }
1003
1004    /// Reset commands in an indirect command buffer.
1005    ///
1006    /// C++ equivalent: `void resetCommandsInBuffer(const MTL::IndirectCommandBuffer*, NS::Range)`
1007    pub fn reset_commands_in_buffer(
1008        &self,
1009        buffer: &crate::IndirectCommandBuffer,
1010        range_location: UInteger,
1011        range_length: UInteger,
1012    ) {
1013        unsafe {
1014            let range = mtl_foundation::Range::new(range_location, range_length);
1015            let _: () = msg_send_2(
1016                self.as_ptr(),
1017                sel!(resetCommandsInBuffer:withRange:),
1018                buffer.as_ptr(),
1019                range,
1020            );
1021        }
1022    }
1023
1024    /// Execute commands in an indirect command buffer.
1025    ///
1026    /// C++ equivalent: `void executeCommandsInBuffer(const MTL::IndirectCommandBuffer*, NS::Range)`
1027    pub fn execute_commands_in_buffer(
1028        &self,
1029        indirect_command_buffer: &crate::IndirectCommandBuffer,
1030        range_location: UInteger,
1031        range_length: UInteger,
1032    ) {
1033        unsafe {
1034            let range = mtl_foundation::Range::new(range_location, range_length);
1035            let _: () = msg_send_2(
1036                self.as_ptr(),
1037                sel!(executeCommandsInBuffer:withRange:),
1038                indirect_command_buffer.as_ptr(),
1039                range,
1040            );
1041        }
1042    }
1043
1044    /// Execute commands in an indirect command buffer with indirect range.
1045    ///
1046    /// C++ equivalent: `void executeCommandsInBuffer(const MTL::IndirectCommandBuffer*, MTL::GPUAddress)`
1047    pub fn execute_commands_in_buffer_indirect(
1048        &self,
1049        indirect_command_buffer: &crate::IndirectCommandBuffer,
1050        indirect_range_buffer: u64,
1051    ) {
1052        unsafe {
1053            let _: () = msg_send_2(
1054                self.as_ptr(),
1055                sel!(executeCommandsInBuffer:indirectBuffer:),
1056                indirect_command_buffer.as_ptr(),
1057                indirect_range_buffer,
1058            );
1059        }
1060    }
1061
1062    // ========== Stage Information ==========
1063
1064    /// Get the stages supported by this encoder.
1065    ///
1066    /// C++ equivalent: `MTL::Stages stages()`
1067    pub fn stages(&self) -> crate::Stages {
1068        unsafe { msg_send_0(self.as_ptr(), sel!(stages)) }
1069    }
1070}
1071
1072impl Clone for ComputeCommandEncoder {
1073    fn clone(&self) -> Self {
1074        unsafe {
1075            mtl_sys::msg_send_0::<*mut c_void>(self.as_ptr(), mtl_sys::sel!(retain));
1076        }
1077        Self(self.0)
1078    }
1079}
1080
1081impl Drop for ComputeCommandEncoder {
1082    fn drop(&mut self) {
1083        unsafe {
1084            mtl_sys::msg_send_0::<()>(self.as_ptr(), mtl_sys::sel!(release));
1085        }
1086    }
1087}
1088
1089impl Referencing for ComputeCommandEncoder {
1090    #[inline]
1091    fn as_ptr(&self) -> *const c_void {
1092        self.0.as_ptr()
1093    }
1094}
1095
1096unsafe impl Send for ComputeCommandEncoder {}
1097unsafe impl Sync for ComputeCommandEncoder {}
1098
1099impl std::fmt::Debug for ComputeCommandEncoder {
1100    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1101        f.debug_struct("ComputeCommandEncoder")
1102            .field("label", &self.label())
1103            .finish()
1104    }
1105}
1106
1107#[cfg(test)]
1108mod tests {
1109    use super::*;
1110
1111    #[test]
1112    fn test_compute_command_encoder_size() {
1113        assert_eq!(
1114            std::mem::size_of::<ComputeCommandEncoder>(),
1115            std::mem::size_of::<*mut c_void>()
1116        );
1117    }
1118}