Skip to content

Commit 0f4fa00

Browse files
hoshinolinamarcan
authored andcommitted
rust: drm: mm: Add DRM MM Range Allocator abstraction
drm_mm provides a simple range allocator, useful for managing virtual address ranges. Add a Rust abstraction to expose this module to Rust drivers. Signed-off-by: Asahi Lina <lina@asahilina.net>
1 parent a2faec5 commit 0f4fa00

2 files changed

Lines changed: 300 additions & 0 deletions

File tree

rust/kernel/drm/mm.rs

Lines changed: 299 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,299 @@
1+
// SPDX-License-Identifier: GPL-2.0 OR MIT
2+
3+
//! DRM MM range allocator
4+
//!
5+
//! C header: [`include/linux/drm/drm_mm.h`](../../../../include/linux/drm/drm_mm.h)
6+
7+
use crate::{
8+
bindings,
9+
error::{to_result, Result},
10+
sync::{Arc, Mutex, UniqueArc},
11+
types::Opaque,
12+
};
13+
14+
use crate::init::InPlaceInit;
15+
use alloc::boxed::Box;
16+
17+
use core::{
18+
marker::{PhantomData, PhantomPinned},
19+
ops::Deref,
20+
pin::Pin,
21+
};
22+
23+
/// Type alias representing a DRM MM node.
24+
pub type Node<A, T> = Pin<Box<NodeData<A, T>>>;
25+
26+
/// Trait which must be implemented by the inner allocator state type provided by the user.
27+
pub trait AllocInner<T> {
28+
/// Notification that a node was dropped from the allocator.
29+
fn drop_object(&mut self, _start: u64, _size: u64, _color: usize, _object: &mut T) {}
30+
}
31+
32+
impl<T> AllocInner<T> for () {}
33+
34+
/// Wrapper type for a `struct drm_mm` plus user AllocInner object.
35+
///
36+
/// # Invariants
37+
/// The `drm_mm` struct is valid and initialized.
38+
struct MmInner<A: AllocInner<T>, T>(Opaque<bindings::drm_mm>, A, PhantomData<T>);
39+
40+
/// Represents a single allocated node in the MM allocator
41+
pub struct NodeData<A: AllocInner<T>, T> {
42+
node: bindings::drm_mm_node,
43+
mm: Arc<Mutex<MmInner<A, T>>>,
44+
valid: bool,
45+
/// A drm_mm_node needs to be pinned because nodes reference each other in a linked list.
46+
_pin: PhantomPinned,
47+
inner: T,
48+
}
49+
50+
// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
51+
unsafe impl<A: Send + AllocInner<T>, T: Send> Send for NodeData<A, T> {}
52+
unsafe impl<A: Send + AllocInner<T>, T: Sync> Sync for NodeData<A, T> {}
53+
54+
/// Available MM node insertion modes
55+
#[repr(u32)]
56+
pub enum InsertMode {
57+
/// Search for the smallest hole (within the search range) that fits the desired node.
58+
///
59+
/// Allocates the node from the bottom of the found hole.
60+
Best = bindings::drm_mm_insert_mode_DRM_MM_INSERT_BEST,
61+
62+
/// Search for the lowest hole (address closest to 0, within the search range) that fits the
63+
/// desired node.
64+
///
65+
/// Allocates the node from the bottom of the found hole.
66+
Low = bindings::drm_mm_insert_mode_DRM_MM_INSERT_LOW,
67+
68+
/// Search for the highest hole (address closest to U64_MAX, within the search range) that fits
69+
/// the desired node.
70+
///
71+
/// Allocates the node from the top of the found hole. The specified alignment for the node is
72+
/// applied to the base of the node (`Node.start()`).
73+
High = bindings::drm_mm_insert_mode_DRM_MM_INSERT_HIGH,
74+
75+
/// Search for the most recently evicted hole (within the search range) that fits the desired
76+
/// node. This is appropriate for use immediately after performing an eviction scan and removing
77+
/// the selected nodes to form a hole.
78+
///
79+
/// Allocates the node from the bottom of the found hole.
80+
Evict = bindings::drm_mm_insert_mode_DRM_MM_INSERT_EVICT,
81+
}
82+
83+
/// A clonable, interlocked reference to the allocator state.
84+
///
85+
/// This is useful to perform actions on the user-supplied `AllocInner<T>` type given just a Node,
86+
/// without immediately taking the lock.
87+
#[derive(Clone)]
88+
pub struct InnerRef<A: AllocInner<T>, T>(Arc<Mutex<MmInner<A, T>>>);
89+
90+
impl<A: AllocInner<T>, T> InnerRef<A, T> {
91+
/// Operate on the user `AllocInner<T>` implementation, taking the lock.
92+
pub fn with<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
93+
let mut l = self.0.lock();
94+
cb(&mut l.1)
95+
}
96+
}
97+
98+
impl<A: AllocInner<T>, T> NodeData<A, T> {
99+
/// Returns the color of the node (an opaque value)
100+
pub fn color(&self) -> usize {
101+
self.node.color as usize
102+
}
103+
104+
/// Returns the start address of the node
105+
pub fn start(&self) -> u64 {
106+
self.node.start
107+
}
108+
109+
/// Returns the size of the node in bytes
110+
pub fn size(&self) -> u64 {
111+
self.node.size
112+
}
113+
114+
/// Operate on the user `AllocInner<T>` implementation associated with this node's allocator.
115+
pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
116+
let mut l = self.mm.lock();
117+
cb(&mut l.1)
118+
}
119+
120+
/// Return a clonable, detached reference to the allocator inner data.
121+
pub fn alloc_ref(&self) -> InnerRef<A, T> {
122+
InnerRef(self.mm.clone())
123+
}
124+
125+
/// Return a mutable reference to the inner data.
126+
pub fn inner_mut(self: Pin<&mut Self>) -> &mut T {
127+
// SAFETY: This is okay because inner is not structural
128+
unsafe { &mut self.get_unchecked_mut().inner }
129+
}
130+
}
131+
132+
impl<A: AllocInner<T>, T> Deref for NodeData<A, T> {
133+
type Target = T;
134+
135+
fn deref(&self) -> &Self::Target {
136+
&self.inner
137+
}
138+
}
139+
140+
impl<A: AllocInner<T>, T> Drop for NodeData<A, T> {
141+
fn drop(&mut self) {
142+
if self.valid {
143+
let mut guard = self.mm.lock();
144+
145+
// Inform the user allocator that a node is being dropped.
146+
guard
147+
.1
148+
.drop_object(self.start(), self.size(), self.color(), &mut self.inner);
149+
// SAFETY: The MM lock is still taken, so we can safely remove the node.
150+
unsafe { bindings::drm_mm_remove_node(&mut self.node) };
151+
}
152+
}
153+
}
154+
155+
/// An instance of a DRM MM range allocator.
156+
pub struct Allocator<A: AllocInner<T>, T> {
157+
mm: Arc<Mutex<MmInner<A, T>>>,
158+
_p: PhantomData<T>,
159+
}
160+
161+
impl<A: AllocInner<T>, T> Allocator<A, T> {
162+
/// Create a new range allocator for the given start and size range of addresses.
163+
///
164+
/// The user may optionally provide an inner object representing allocator state, which will
165+
/// be protected by the same lock. If not required, `()` can be used.
166+
#[track_caller]
167+
pub fn new(start: u64, size: u64, inner: A) -> Result<Allocator<A, T>> {
168+
// SAFETY: We call `Mutex::init_lock` below.
169+
let mm = UniqueArc::pin_init(Mutex::new(MmInner(Opaque::uninit(), inner, PhantomData)))?;
170+
171+
unsafe {
172+
// SAFETY: The Opaque instance provides a valid pointer, and it is initialized after
173+
// this call.
174+
bindings::drm_mm_init(mm.lock().0.get(), start, size);
175+
}
176+
177+
Ok(Allocator {
178+
mm: mm.into(),
179+
_p: PhantomData,
180+
})
181+
}
182+
183+
/// Insert a new node into the allocator of a given size.
184+
///
185+
/// `node` is the user `T` type data to store into the node.
186+
pub fn insert_node(&mut self, node: T, size: u64) -> Result<Node<A, T>> {
187+
self.insert_node_generic(node, size, 0, 0, InsertMode::Best)
188+
}
189+
190+
/// Insert a new node into the allocator of a given size, with configurable alignment,
191+
/// color, and insertion mode.
192+
///
193+
/// `node` is the user `T` type data to store into the node.
194+
pub fn insert_node_generic(
195+
&mut self,
196+
node: T,
197+
size: u64,
198+
alignment: u64,
199+
color: usize,
200+
mode: InsertMode,
201+
) -> Result<Node<A, T>> {
202+
self.insert_node_in_range(node, size, alignment, color, 0, u64::MAX, mode)
203+
}
204+
205+
/// Insert a new node into the allocator of a given size, with configurable alignment,
206+
/// color, insertion mode, and sub-range to allocate from.
207+
///
208+
/// `node` is the user `T` type data to store into the node.
209+
#[allow(clippy::too_many_arguments)]
210+
pub fn insert_node_in_range(
211+
&mut self,
212+
node: T,
213+
size: u64,
214+
alignment: u64,
215+
color: usize,
216+
start: u64,
217+
end: u64,
218+
mode: InsertMode,
219+
) -> Result<Node<A, T>> {
220+
let mut mm_node = Box::try_new(NodeData {
221+
// SAFETY: This C struct should be zero-initialized.
222+
node: unsafe { core::mem::zeroed() },
223+
valid: false,
224+
inner: node,
225+
mm: self.mm.clone(),
226+
_pin: PhantomPinned,
227+
})?;
228+
229+
let guard = self.mm.lock();
230+
// SAFETY: We hold the lock and all pointers are valid.
231+
to_result(unsafe {
232+
bindings::drm_mm_insert_node_in_range(
233+
guard.0.get(),
234+
&mut mm_node.node,
235+
size,
236+
alignment,
237+
color as core::ffi::c_ulong,
238+
start,
239+
end,
240+
mode as u32,
241+
)
242+
})?;
243+
244+
mm_node.valid = true;
245+
246+
Ok(Pin::from(mm_node))
247+
}
248+
249+
/// Insert a node into the allocator at a fixed start address.
250+
///
251+
/// `node` is the user `T` type data to store into the node.
252+
pub fn reserve_node(
253+
&mut self,
254+
node: T,
255+
start: u64,
256+
size: u64,
257+
color: usize,
258+
) -> Result<Node<A, T>> {
259+
let mut mm_node = Box::try_new(NodeData {
260+
// SAFETY: This C struct should be zero-initialized.
261+
node: unsafe { core::mem::zeroed() },
262+
valid: false,
263+
inner: node,
264+
mm: self.mm.clone(),
265+
_pin: PhantomPinned,
266+
})?;
267+
268+
mm_node.node.start = start;
269+
mm_node.node.size = size;
270+
mm_node.node.color = color as core::ffi::c_ulong;
271+
272+
let guard = self.mm.lock();
273+
// SAFETY: We hold the lock and all pointers are valid.
274+
to_result(unsafe { bindings::drm_mm_reserve_node(guard.0.get(), &mut mm_node.node) })?;
275+
276+
mm_node.valid = true;
277+
278+
Ok(Pin::from(mm_node))
279+
}
280+
281+
/// Operate on the inner user type `A`, taking the allocator lock
282+
pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
283+
let mut guard = self.mm.lock();
284+
cb(&mut guard.1)
285+
}
286+
}
287+
288+
impl<A: AllocInner<T>, T> Drop for MmInner<A, T> {
289+
fn drop(&mut self) {
290+
// SAFETY: If the MmInner is dropped then all nodes are gone (since they hold references),
291+
// so it is safe to tear down the allocator.
292+
unsafe {
293+
bindings::drm_mm_takedown(self.0.get());
294+
}
295+
}
296+
}
297+
298+
// MmInner is safely Send if the AllocInner user type is Send.
299+
unsafe impl<A: Send + AllocInner<T>, T> Send for MmInner<A, T> {}

rust/kernel/drm/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,4 @@ pub mod drv;
77
pub mod file;
88
pub mod gem;
99
pub mod ioctl;
10+
pub mod mm;

0 commit comments

Comments
 (0)