Skip to main content

mtl_gpu/io/
mod.rs

1//! Metal IO command types.
2//!
3//! Corresponds to `Metal/MTLIOCommandQueue.hpp`, `Metal/MTLIOCommandBuffer.hpp`,
4//! and `Metal/MTLIOCompressor.hpp`.
5//!
6//! IO commands provide asynchronous file loading directly to GPU buffers and textures.
7
8use std::ffi::c_void;
9
10use crate::enums::IOCompressionStatus;
11use crate::enums::device::IOCompressionMethod;
12
13mod command_buffer;
14mod command_queue;
15mod command_queue_descriptor;
16mod file_handle;
17mod scratch_buffer;
18mod scratch_buffer_allocator;
19
20pub use command_buffer::IOCommandBuffer;
21pub use command_queue::IOCommandQueue;
22pub use command_queue_descriptor::IOCommandQueueDescriptor;
23pub use file_handle::IOFileHandle;
24pub use scratch_buffer::IOScratchBuffer;
25pub use scratch_buffer_allocator::IOScratchBufferAllocator;
26
27// ============================================================================
28// IO Compression Functions
29// ============================================================================
30
31/// Opaque compression context handle.
32pub type IOCompressionContext = *mut c_void;
33
34#[link(name = "Metal", kind = "framework")]
35unsafe extern "C" {
36    fn MTLIOCompressionContextDefaultChunkSize() -> usize;
37    fn MTLIOCreateCompressionContext(
38        path: *const std::ffi::c_char,
39        compression_type: IOCompressionMethod,
40        chunk_size: usize,
41    ) -> IOCompressionContext;
42    fn MTLIOCompressionContextAppendData(
43        context: IOCompressionContext,
44        data: *const c_void,
45        size: usize,
46    );
47    fn MTLIOFlushAndDestroyCompressionContext(context: IOCompressionContext)
48    -> IOCompressionStatus;
49}
50
51/// Get the default chunk size for IO compression.
52///
53/// C++ equivalent: `size_t IOCompressionContextDefaultChunkSize()`
54#[inline]
55pub fn io_compression_context_default_chunk_size() -> usize {
56    unsafe { MTLIOCompressionContextDefaultChunkSize() }
57}
58
59/// Create a compression context for writing compressed data to a file.
60///
61/// C++ equivalent: `IOCompressionContext IOCreateCompressionContext(const char*, IOCompressionMethod, size_t)`
62pub fn io_create_compression_context(
63    path: &str,
64    compression_type: IOCompressionMethod,
65    chunk_size: usize,
66) -> Option<IOCompressionContext> {
67    let c_path = std::ffi::CString::new(path).ok()?;
68    let ctx =
69        unsafe { MTLIOCreateCompressionContext(c_path.as_ptr(), compression_type, chunk_size) };
70    if ctx.is_null() { None } else { Some(ctx) }
71}
72
73/// Append data to a compression context.
74///
75/// # Safety
76///
77/// The context must be valid, and data must point to at least `size` bytes.
78///
79/// C++ equivalent: `void IOCompressionContextAppendData(IOCompressionContext, const void*, size_t)`
80pub unsafe fn io_compression_context_append_data(
81    context: IOCompressionContext,
82    data: *const c_void,
83    size: usize,
84) {
85    unsafe {
86        MTLIOCompressionContextAppendData(context, data, size);
87    }
88}
89
90/// Flush and destroy a compression context.
91///
92/// C++ equivalent: `IOCompressionStatus IOFlushAndDestroyCompressionContext(IOCompressionContext)`
93pub fn io_flush_and_destroy_compression_context(
94    context: IOCompressionContext,
95) -> IOCompressionStatus {
96    unsafe { MTLIOFlushAndDestroyCompressionContext(context) }
97}
98
99// ============================================================================
100// Tests
101// ============================================================================
102
103#[cfg(test)]
104mod tests {
105    use super::*;
106    use crate::enums::{IOCommandQueueType, IOPriority};
107
108    #[test]
109    fn test_io_command_queue_descriptor_creation() {
110        let descriptor = IOCommandQueueDescriptor::new();
111        assert!(descriptor.is_some());
112    }
113
114    #[test]
115    fn test_io_command_queue_descriptor_properties() {
116        let descriptor = IOCommandQueueDescriptor::new().unwrap();
117
118        descriptor.set_priority(IOPriority::HIGH);
119        assert_eq!(descriptor.priority(), IOPriority::HIGH);
120
121        descriptor.set_queue_type(IOCommandQueueType::SERIAL);
122        assert_eq!(descriptor.queue_type(), IOCommandQueueType::SERIAL);
123
124        descriptor.set_max_command_buffer_count(4);
125        assert_eq!(descriptor.max_command_buffer_count(), 4);
126
127        descriptor.set_max_commands_in_flight(8);
128        assert_eq!(descriptor.max_commands_in_flight(), 8);
129    }
130
131    #[test]
132    fn test_io_file_handle_size() {
133        assert_eq!(
134            std::mem::size_of::<IOFileHandle>(),
135            std::mem::size_of::<*mut c_void>()
136        );
137    }
138
139    #[test]
140    fn test_io_command_queue_size() {
141        assert_eq!(
142            std::mem::size_of::<IOCommandQueue>(),
143            std::mem::size_of::<*mut c_void>()
144        );
145    }
146
147    #[test]
148    fn test_io_command_buffer_size() {
149        assert_eq!(
150            std::mem::size_of::<IOCommandBuffer>(),
151            std::mem::size_of::<*mut c_void>()
152        );
153    }
154
155    #[test]
156    fn test_io_compression_default_chunk_size() {
157        let size = io_compression_context_default_chunk_size();
158        // Should be a reasonable value (typically 64KB or similar)
159        assert!(size > 0);
160    }
161}