diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-04-24 13:23:50 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-04-24 13:23:50 -0700 |
| commit | cb4eb6771c0f8fd1c52a8f6fdec7762fb087380a (patch) | |
| tree | c50311c87c0d464bfbba34e5c3b8699897fb1b65 /rust/kernel | |
| parent | b2680ba4a2ad259c7bbd856ed830b459e11d88ba (diff) | |
| parent | 1c0220a61508d67a09a6e71eb09593a8aea61822 (diff) | |
Merge tag 'char-misc-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc / IIO / and others driver updates from Greg KH:
"Here is the char/misc/iio and other smaller driver subsystem updates
for 7.1-rc1. Lots of stuff in here, all tiny, but relevant for the
different drivers they touch. Major points in here is:
- the usual large set of new IIO drivers and updates for that
subsystem (the large majority of this diffstat)
- lots of comedi driver updates and bugfixes
- coresight driver updates
- interconnect driver updates and additions
- mei driver updates
- binder (both rust and C versions) updates and fixes
- lots of other smaller driver subsystem updates and additions
All of these have been in linux-next for a while with no reported
issues"
* tag 'char-misc-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (405 commits)
coresight: tpdm: fix invalid MMIO access issue
mei: me: add nova lake point H DID
mei: lb: add late binding version 2
mei: bus: add mei_cldev_uuid
w1: ds2490: drop redundant device reference
bus: mhi: host: pci_generic: Add Telit FE912C04 modem support
mei: csc: wake device while reading firmware status
mei: csc: support controller with separate PCI device
mei: convert PCI error to common errno
mei: trace: print return value of pci_cfg_read
mei: me: move trace into firmware status read
mei: fix idle print specifiers
mei: me: use PCI_DEVICE_DATA macro
sonypi: Convert ACPI driver to a platform one
misc: apds990x: fix all kernel-doc warnings
most: usb: Use kzalloc_objs for endpoint address array
hpet: Convert ACPI driver to a platform one
misc: vmw_vmci: Fix spelling mistakes in comments
parport: Remove completed item from to-do list
char: remove unnecessary module_init/exit functions
...
Diffstat (limited to 'rust/kernel')
| -rw-r--r-- | rust/kernel/alloc/kvec.rs | 216 | ||||
| -rw-r--r-- | rust/kernel/sync/aref.rs | 22 | ||||
| -rw-r--r-- | rust/kernel/task.rs | 9 | ||||
| -rw-r--r-- | rust/kernel/uaccess.rs | 2 |
4 files changed, 247 insertions, 2 deletions
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index ac8d6f763ae8..6438385e4322 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -9,7 +9,10 @@ use super::{ }; use crate::{ fmt, - page::AsPageIter, // + page::{ + AsPageIter, + PAGE_SIZE, // + }, }; use core::{ borrow::{Borrow, BorrowMut}, @@ -734,6 +737,115 @@ where self.truncate(num_kept); } } +// TODO: This is a temporary KVVec-specific implementation. It should be replaced with a generic +// `shrink_to()` for `impl<T, A: Allocator> Vec<T, A>` that uses `A::realloc()` once the +// underlying allocators properly support shrinking via realloc. +impl<T> Vec<T, KVmalloc> { + /// Shrinks the capacity of the vector with a lower bound. + /// + /// The capacity will remain at least as large as both the length and the supplied value. + /// If the current capacity is less than the lower limit, this is a no-op. + /// + /// For `kmalloc` allocations, this delegates to `realloc()`, which decides whether + /// shrinking is worthwhile. For `vmalloc` allocations, shrinking only occurs if the + /// operation would free at least one page of memory, and performs a deep copy since + /// `vrealloc` does not yet support in-place shrinking. + /// + /// # Examples + /// + /// ``` + /// // Allocate enough capacity to span multiple pages. + /// let elements_per_page = kernel::page::PAGE_SIZE / core::mem::size_of::<u32>(); + /// let mut v = KVVec::with_capacity(elements_per_page * 4, GFP_KERNEL)?; + /// v.push(1, GFP_KERNEL)?; + /// v.push(2, GFP_KERNEL)?; + /// + /// v.shrink_to(0, GFP_KERNEL)?; + /// # Ok::<(), Error>(()) + /// ``` + pub fn shrink_to(&mut self, min_capacity: usize, flags: Flags) -> Result<(), AllocError> { + let target_cap = core::cmp::max(self.len(), min_capacity); + + if self.capacity() <= target_cap { + return Ok(()); + } + + if Self::is_zst() { + return Ok(()); + } + + // For kmalloc allocations, delegate to realloc() and let the allocator decide + // whether shrinking is worthwhile. + // + // SAFETY: `self.ptr` points to a valid `KVmalloc` allocation. + if !unsafe { bindings::is_vmalloc_addr(self.ptr.as_ptr().cast()) } { + let new_layout = ArrayLayout::<T>::new(target_cap).map_err(|_| AllocError)?; + + // SAFETY: + // - `self.ptr` is valid and was previously allocated with `KVmalloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + let ptr = unsafe { + KVmalloc::realloc( + Some(self.ptr.cast()), + new_layout.into(), + self.layout.into(), + flags, + NumaNode::NO_NODE, + )? + }; + + self.ptr = ptr.cast(); + self.layout = new_layout; + return Ok(()); + } + + // Only shrink if we would free at least one page. + let current_size = self.capacity() * core::mem::size_of::<T>(); + let target_size = target_cap * core::mem::size_of::<T>(); + let current_pages = current_size.div_ceil(PAGE_SIZE); + let target_pages = target_size.div_ceil(PAGE_SIZE); + + if current_pages <= target_pages { + return Ok(()); + } + + if target_cap == 0 { + if !self.layout.is_empty() { + // SAFETY: + // - `self.ptr` was previously allocated with `KVmalloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) }; + } + self.ptr = NonNull::dangling(); + self.layout = ArrayLayout::empty(); + return Ok(()); + } + + // SAFETY: `target_cap <= self.capacity()` and original capacity was valid. + let new_layout = unsafe { ArrayLayout::<T>::new_unchecked(target_cap) }; + + let new_ptr = KVmalloc::alloc(new_layout.into(), flags, NumaNode::NO_NODE)?; + + // SAFETY: + // - `self.as_ptr()` is valid for reads of `self.len()` elements of `T`. + // - `new_ptr` is valid for writes of at least `target_cap >= self.len()` elements. + // - The two allocations do not overlap since `new_ptr` is freshly allocated. + // - Both pointers are properly aligned for `T`. + unsafe { + ptr::copy_nonoverlapping(self.as_ptr(), new_ptr.as_ptr().cast::<T>(), self.len()) + }; + + // SAFETY: + // - `self.ptr` was previously allocated with `KVmalloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) }; + + self.ptr = new_ptr.cast::<T>(); + self.layout = new_layout; + + Ok(()) + } +} impl<T: Clone, A: Allocator> Vec<T, A> { /// Extend the vector by `n` clones of `value`. @@ -1398,4 +1510,106 @@ mod tests { func.push_within_capacity(false).unwrap(); } } + + #[test] + fn test_kvvec_shrink_to() { + use crate::page::PAGE_SIZE; + + // Create a vector with capacity spanning multiple pages. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap(); + + // Add a few elements. + v.push(1, GFP_KERNEL).unwrap(); + v.push(2, GFP_KERNEL).unwrap(); + v.push(3, GFP_KERNEL).unwrap(); + + let initial_capacity = v.capacity(); + assert!(initial_capacity >= PAGE_SIZE * 4); + + // Shrink to a capacity that would free at least one page. + v.shrink_to(PAGE_SIZE, GFP_KERNEL).unwrap(); + + // Capacity should have been reduced. + assert!(v.capacity() < initial_capacity); + assert!(v.capacity() >= PAGE_SIZE); + + // Elements should be preserved. + assert_eq!(v.len(), 3); + assert_eq!(v[0], 1); + assert_eq!(v[1], 2); + assert_eq!(v[2], 3); + + // Shrink to zero (should shrink to len). + v.shrink_to(0, GFP_KERNEL).unwrap(); + + // Capacity should be at least the length. + assert!(v.capacity() >= v.len()); + + // Elements should still be preserved. + assert_eq!(v.len(), 3); + assert_eq!(v[0], 1); + assert_eq!(v[1], 2); + assert_eq!(v[2], 3); + } + + #[test] + fn test_kvvec_shrink_to_empty() { + use crate::page::PAGE_SIZE; + + // Create a vector with large capacity but no elements. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap(); + + assert!(v.is_empty()); + + // Shrink empty vector to zero. + v.shrink_to(0, GFP_KERNEL).unwrap(); + + // Should have freed the allocation. + assert_eq!(v.capacity(), 0); + assert!(v.is_empty()); + } + + #[test] + fn test_kvvec_shrink_to_no_op() { + use crate::page::PAGE_SIZE; + + // Create a small vector. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE, GFP_KERNEL).unwrap(); + v.push(1, GFP_KERNEL).unwrap(); + + let capacity_before = v.capacity(); + + // Try to shrink to a capacity larger than current - should be no-op. + v.shrink_to(capacity_before + 100, GFP_KERNEL).unwrap(); + + assert_eq!(v.capacity(), capacity_before); + assert_eq!(v.len(), 1); + assert_eq!(v[0], 1); + } + + #[test] + fn test_kvvec_shrink_to_respects_min_capacity() { + use crate::page::PAGE_SIZE; + + // Create a vector with large capacity. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap(); + + // Add some elements. + for i in 0..10u8 { + v.push(i, GFP_KERNEL).unwrap(); + } + + // Shrink to a min_capacity larger than length. + let min_cap = PAGE_SIZE * 2; + v.shrink_to(min_cap, GFP_KERNEL).unwrap(); + + // Capacity should be at least min_capacity. + assert!(v.capacity() >= min_cap); + + // All elements preserved. + assert_eq!(v.len(), 10); + for i in 0..10u8 { + assert_eq!(v[i as usize], i); + } + } } diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs index 0616c0353c2b..9989f56d0605 100644 --- a/rust/kernel/sync/aref.rs +++ b/rust/kernel/sync/aref.rs @@ -170,3 +170,25 @@ impl<T: AlwaysRefCounted> Drop for ARef<T> { unsafe { T::dec_ref(self.ptr) }; } } + +impl<T, U> PartialEq<ARef<U>> for ARef<T> +where + T: AlwaysRefCounted + PartialEq<U>, + U: AlwaysRefCounted, +{ + #[inline] + fn eq(&self, other: &ARef<U>) -> bool { + T::eq(&**self, &**other) + } +} +impl<T: AlwaysRefCounted + Eq> Eq for ARef<T> {} + +impl<T, U> PartialEq<&'_ U> for ARef<T> +where + T: AlwaysRefCounted + PartialEq<U>, +{ + #[inline] + fn eq(&self, other: &&U) -> bool { + T::eq(&**self, other) + } +} diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index 049c8a4d45d8..38273f4eedb5 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -361,6 +361,15 @@ unsafe impl crate::sync::aref::AlwaysRefCounted for Task { } } +impl PartialEq for Task { + #[inline] + fn eq(&self, other: &Self) -> bool { + ptr::eq(self.as_ptr(), other.as_ptr()) + } +} + +impl Eq for Task {} + impl Kuid { /// Get the current euid. #[inline] diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs index 6c9c1cce3c63..5f6c4d7a1a51 100644 --- a/rust/kernel/uaccess.rs +++ b/rust/kernel/uaccess.rs @@ -21,7 +21,7 @@ use core::mem::{size_of, MaybeUninit}; /// /// This is the Rust equivalent to C pointers tagged with `__user`. #[repr(transparent)] -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Zeroable)] pub struct UserPtr(*mut c_void); impl UserPtr { |
