diff --git a/examples/functions_and_traits.rs b/examples/functions_and_traits.rs
index dc8f73da4..56ec1f2e2 100644
--- a/examples/functions_and_traits.rs
+++ b/examples/functions_and_traits.rs
@@ -139,7 +139,7 @@ fn takes_rawref_mut(arr: &mut RawRef)
 /// Immutable, take a generic that implements `AsRef` to `RawRef`
 #[allow(dead_code)]
 fn takes_rawref_asref(_arr: &T)
-where T: AsRef>
+where T: AsRef> + ?Sized
 {
     takes_layout(_arr.as_ref());
     takes_layout_asref(_arr.as_ref());
@@ -148,7 +148,7 @@ where T: AsRef>
 /// Mutable, take a generic that implements `AsMut` to `RawRef`
 #[allow(dead_code)]
 fn takes_rawref_asmut(_arr: &mut T)
-where T: AsMut>
+where T: AsMut> + ?Sized
 {
     takes_layout_mut(_arr.as_mut());
     takes_layout_asmut(_arr.as_mut());
@@ -169,10 +169,16 @@ fn takes_layout_mut(_arr: &mut LayoutRef) {}
 
 /// Immutable, take a generic that implements `AsRef` to `LayoutRef`
 #[allow(dead_code)]
-fn takes_layout_asref>, A, D>(_arr: &T) {}
+fn takes_layout_asref(_arr: &T)
+where T: AsRef> + ?Sized
+{
+}
 
 /// Mutable, take a generic that implements `AsMut` to `LayoutRef`
 #[allow(dead_code)]
-fn takes_layout_asmut>, A, D>(_arr: &mut T) {}
+fn takes_layout_asmut(_arr: &mut T)
+where T: AsMut> + ?Sized
+{
+}
 
 fn main() {}
diff --git a/scripts/all-tests.sh b/scripts/all-tests.sh
index 4135ebeb8..f6c9b27a8 100755
--- a/scripts/all-tests.sh
+++ b/scripts/all-tests.sh
@@ -30,5 +30,8 @@ fi
 # Examples
 cargo nextest run --examples
 
+# Doc tests
+cargo test --doc
+
 # Benchmarks
 ([ "$CHANNEL" != "nightly" ] || cargo bench --no-run --verbose --features "$FEATURES")
diff --git a/src/arraytraits.rs b/src/arraytraits.rs
index a34b1985e..da87e3a58 100644
--- a/src/arraytraits.rs
+++ b/src/arraytraits.rs
@@ -19,7 +19,6 @@ use std::{iter::FromIterator, slice};
 use crate::imp_prelude::*;
 use crate::Arc;
 
-use crate::LayoutRef;
 use crate::{
     dimension,
     iter::{Iter, IterMut},
@@ -38,12 +37,14 @@ pub(crate) fn array_out_of_bounds() -> !
 }
 
 #[inline(always)]
-pub fn debug_bounds_check(_a: &LayoutRef, _index: &I)
+pub fn debug_bounds_check(_a: &T, _index: &I)
 where
     D: Dimension,
     I: NdIndex,
+    T: AsRef> + ?Sized,
 {
-    debug_bounds_check!(_a, *_index);
+    let _layout_ref = _a.as_ref();
+    debug_bounds_check_ref!(_layout_ref, *_index);
 }
 
 /// Access the element at **index**.
@@ -59,11 +60,11 @@ where
     #[inline]
     fn index(&self, index: I) -> &Self::Output
     {
-        debug_bounds_check!(self, index);
+        debug_bounds_check_ref!(self, index);
         unsafe {
-            &*self.ptr.as_ptr().offset(
+            &*self._ptr().as_ptr().offset(
                 index
-                    .index_checked(&self.dim, &self.strides)
+                    .index_checked(self._dim(), self._strides())
                     .unwrap_or_else(|| array_out_of_bounds()),
             )
         }
@@ -81,11 +82,11 @@ where
     #[inline]
     fn index_mut(&mut self, index: I) -> &mut A
     {
-        debug_bounds_check!(self, index);
+        debug_bounds_check_ref!(self, index);
         unsafe {
             &mut *self.as_mut_ptr().offset(
                 index
-                    .index_checked(&self.dim, &self.strides)
+                    .index_checked(self._dim(), self._strides())
                     .unwrap_or_else(|| array_out_of_bounds()),
             )
         }
@@ -581,7 +582,7 @@ where D: Dimension
     {
         let data = OwnedArcRepr(Arc::new(arr.data));
         // safe because: equivalent unmoved data, ptr and dims remain valid
-        unsafe { ArrayBase::from_data_ptr(data, arr.layout.ptr).with_strides_dim(arr.layout.strides, arr.layout.dim) }
+        unsafe { ArrayBase::from_data_ptr(data, arr.parts.ptr).with_strides_dim(arr.parts.strides, arr.parts.dim) }
     }
 }
 
diff --git a/src/data_traits.rs b/src/data_traits.rs
index 4266e4017..a0b33ea12 100644
--- a/src/data_traits.rs
+++ b/src/data_traits.rs
@@ -251,7 +251,7 @@ where A: Clone
         if Arc::get_mut(&mut self_.data.0).is_some() {
             return;
         }
-        if self_.layout.dim.size() <= self_.data.0.len() / 2 {
+        if self_.parts.dim.size() <= self_.data.0.len() / 2 {
             // Clone only the visible elements if the current view is less than
             // half of backing data.
             *self_ = self_.to_owned().into_shared();
@@ -260,13 +260,13 @@ where A: Clone
         let rcvec = &mut self_.data.0;
         let a_size = mem::size_of::() as isize;
         let our_off = if a_size != 0 {
-            (self_.layout.ptr.as_ptr() as isize - rcvec.as_ptr() as isize) / a_size
+            (self_.parts.ptr.as_ptr() as isize - rcvec.as_ptr() as isize) / a_size
         } else {
             0
         };
         let rvec = Arc::make_mut(rcvec);
         unsafe {
-            self_.layout.ptr = rvec.as_nonnull_mut().offset(our_off);
+            self_.parts.ptr = rvec.as_nonnull_mut().offset(our_off);
         }
     }
 
@@ -287,7 +287,7 @@ unsafe impl Data for OwnedArcRepr
         let data = Arc::try_unwrap(self_.data.0).ok().unwrap();
         // safe because data is equivalent
         unsafe {
-            ArrayBase::from_data_ptr(data, self_.layout.ptr).with_strides_dim(self_.layout.strides, self_.layout.dim)
+            ArrayBase::from_data_ptr(data, self_.parts.ptr).with_strides_dim(self_.parts.strides, self_.parts.dim)
         }
     }
 
@@ -297,14 +297,14 @@ unsafe impl Data for OwnedArcRepr
         match Arc::try_unwrap(self_.data.0) {
             Ok(owned_data) => unsafe {
                 // Safe because the data is equivalent.
-                Ok(ArrayBase::from_data_ptr(owned_data, self_.layout.ptr)
-                    .with_strides_dim(self_.layout.strides, self_.layout.dim))
+                Ok(ArrayBase::from_data_ptr(owned_data, self_.parts.ptr)
+                    .with_strides_dim(self_.parts.strides, self_.parts.dim))
             },
             Err(arc_data) => unsafe {
                 // Safe because the data is equivalent; we're just
                 // reconstructing `self_`.
-                Err(ArrayBase::from_data_ptr(OwnedArcRepr(arc_data), self_.layout.ptr)
-                    .with_strides_dim(self_.layout.strides, self_.layout.dim))
+                Err(ArrayBase::from_data_ptr(OwnedArcRepr(arc_data), self_.parts.ptr)
+                    .with_strides_dim(self_.parts.strides, self_.parts.dim))
             },
         }
     }
@@ -603,9 +603,9 @@ where A: Clone
             CowRepr::View(_) => {
                 let owned = ArrayRef::to_owned(array);
                 array.data = CowRepr::Owned(owned.data);
-                array.layout.ptr = owned.layout.ptr;
-                array.layout.dim = owned.layout.dim;
-                array.layout.strides = owned.layout.strides;
+                array.parts.ptr = owned.parts.ptr;
+                array.parts.dim = owned.parts.dim;
+                array.parts.strides = owned.parts.strides;
             }
             CowRepr::Owned(_) => {}
         }
@@ -666,8 +666,7 @@ unsafe impl<'a, A> Data for CowRepr<'a, A>
             CowRepr::View(_) => self_.to_owned(),
             CowRepr::Owned(data) => unsafe {
                 // safe because the data is equivalent so ptr, dims remain valid
-                ArrayBase::from_data_ptr(data, self_.layout.ptr)
-                    .with_strides_dim(self_.layout.strides, self_.layout.dim)
+                ArrayBase::from_data_ptr(data, self_.parts.ptr).with_strides_dim(self_.parts.strides, self_.parts.dim)
             },
         }
     }
@@ -679,8 +678,8 @@ unsafe impl<'a, A> Data for CowRepr<'a, A>
             CowRepr::View(_) => Err(self_),
             CowRepr::Owned(data) => unsafe {
                 // safe because the data is equivalent so ptr, dims remain valid
-                Ok(ArrayBase::from_data_ptr(data, self_.layout.ptr)
-                    .with_strides_dim(self_.layout.strides, self_.layout.dim))
+                Ok(ArrayBase::from_data_ptr(data, self_.parts.ptr)
+                    .with_strides_dim(self_.parts.strides, self_.parts.dim))
             },
         }
     }
diff --git a/src/free_functions.rs b/src/free_functions.rs
index a2ad6137c..4ad69f2c3 100644
--- a/src/free_functions.rs
+++ b/src/free_functions.rs
@@ -16,7 +16,7 @@ use std::mem::{forget, size_of};
 use std::ptr::NonNull;
 
 use crate::{dimension, ArcArray1, ArcArray2};
-use crate::{imp_prelude::*, LayoutRef};
+use crate::{imp_prelude::*, ArrayPartsSized};
 
 /// Create an **[`Array`]** with one, two, three, four, five, or six dimensions.
 ///
@@ -109,12 +109,12 @@ pub const fn aview0(x: &A) -> ArrayView0<'_, A>
 {
     ArrayBase {
         data: ViewRepr::new(),
-        layout: LayoutRef {
+        parts: ArrayPartsSized::new(
             // Safe because references are always non-null.
-            ptr: unsafe { NonNull::new_unchecked(x as *const A as *mut A) },
-            dim: Ix0(),
-            strides: Ix0(),
-        },
+            unsafe { NonNull::new_unchecked(x as *const A as *mut A) },
+            Ix0(),
+            Ix0(),
+        ),
     }
 }
 
@@ -149,12 +149,12 @@ pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A>
     }
     ArrayBase {
         data: ViewRepr::new(),
-        layout: LayoutRef {
+        parts: ArrayPartsSized::new(
             // Safe because references are always non-null.
-            ptr: unsafe { NonNull::new_unchecked(xs.as_ptr() as *mut A) },
-            dim: Ix1(xs.len()),
-            strides: Ix1(1),
-        },
+            unsafe { NonNull::new_unchecked(xs.as_ptr() as *mut A) },
+            Ix1(xs.len()),
+            Ix1(1),
+        ),
     }
 }
 
@@ -207,7 +207,7 @@ pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A>
     };
     ArrayBase {
         data: ViewRepr::new(),
-        layout: LayoutRef { ptr, dim, strides },
+        parts: ArrayPartsSized::new(ptr, dim, strides),
     }
 }
 
diff --git a/src/impl_clone.rs b/src/impl_clone.rs
index 402437941..bef783bd8 100644
--- a/src/impl_clone.rs
+++ b/src/impl_clone.rs
@@ -7,7 +7,7 @@
 // except according to those terms.
 
 use crate::imp_prelude::*;
-use crate::LayoutRef;
+use crate::ArrayPartsSized;
 use crate::RawDataClone;
 
 impl Clone for ArrayBase
@@ -16,14 +16,10 @@ impl Clone for ArrayBase
     {
         // safe because `clone_with_ptr` promises to provide equivalent data and ptr
         unsafe {
-            let (data, ptr) = self.data.clone_with_ptr(self.layout.ptr);
+            let (data, ptr) = self.data.clone_with_ptr(self.parts.ptr);
             ArrayBase {
                 data,
-                layout: LayoutRef {
-                    ptr,
-                    dim: self.layout.dim.clone(),
-                    strides: self.layout.strides.clone(),
-                },
+                parts: ArrayPartsSized::new(ptr, self.parts.dim.clone(), self.parts.strides.clone()),
             }
         }
     }
@@ -34,9 +30,9 @@ impl Clone for ArrayBase
     fn clone_from(&mut self, other: &Self)
     {
         unsafe {
-            self.layout.ptr = self.data.clone_from_with_ptr(&other.data, other.layout.ptr);
-            self.layout.dim.clone_from(&other.layout.dim);
-            self.layout.strides.clone_from(&other.layout.strides);
+            self.parts.ptr = self.data.clone_from_with_ptr(&other.data, other.parts.ptr);
+            self.parts.dim.clone_from(&other.parts.dim);
+            self.parts.strides.clone_from(&other.parts.strides);
         }
     }
 }
diff --git a/src/impl_cow.rs b/src/impl_cow.rs
index 0ecc3c44b..1a28996d6 100644
--- a/src/impl_cow.rs
+++ b/src/impl_cow.rs
@@ -34,8 +34,8 @@ where D: Dimension
     {
         // safe because equivalent data
         unsafe {
-            ArrayBase::from_data_ptr(CowRepr::View(view.data), view.ptr)
-                .with_strides_dim(view.layout.strides, view.layout.dim)
+            ArrayBase::from_data_ptr(CowRepr::View(view.data), view.parts.ptr)
+                .with_strides_dim(view.parts.strides, view.parts.dim)
         }
     }
 }
@@ -47,8 +47,8 @@ where D: Dimension
     {
         // safe because equivalent data
         unsafe {
-            ArrayBase::from_data_ptr(CowRepr::Owned(array.data), array.layout.ptr)
-                .with_strides_dim(array.layout.strides, array.layout.dim)
+            ArrayBase::from_data_ptr(CowRepr::Owned(array.data), array.parts.ptr)
+                .with_strides_dim(array.parts.strides, array.parts.dim)
         }
     }
 }
diff --git a/src/impl_dyn.rs b/src/impl_dyn.rs
index 409fe991a..404f3d4b6 100644
--- a/src/impl_dyn.rs
+++ b/src/impl_dyn.rs
@@ -31,8 +31,8 @@ impl LayoutRef
     pub fn insert_axis_inplace(&mut self, axis: Axis)
     {
         assert!(axis.index() <= self.ndim());
-        self.dim = self.dim.insert_axis(axis);
-        self.strides = self.strides.insert_axis(axis);
+        self.0.dim = self._dim().insert_axis(axis);
+        self.0.strides = self._strides().insert_axis(axis);
     }
 
     /// Collapses the array to `index` along the axis and removes the axis,
@@ -54,8 +54,8 @@ impl LayoutRef
     pub fn index_axis_inplace(&mut self, axis: Axis, index: usize)
     {
         self.collapse_axis(axis, index);
-        self.dim = self.dim.remove_axis(axis);
-        self.strides = self.strides.remove_axis(axis);
+        self.0.dim = self._dim().remove_axis(axis);
+        self.0.strides = self._strides().remove_axis(axis);
     }
 }
 
diff --git a/src/impl_internal_constructors.rs b/src/impl_internal_constructors.rs
index 7f95339d5..ef2964fff 100644
--- a/src/impl_internal_constructors.rs
+++ b/src/impl_internal_constructors.rs
@@ -8,7 +8,7 @@
 
 use std::ptr::NonNull;
 
-use crate::{imp_prelude::*, LayoutRef};
+use crate::{imp_prelude::*, ArrayPartsSized};
 
 // internal "builder-like" methods
 impl ArrayBase
@@ -27,11 +27,7 @@ where S: RawData
     {
         let array = ArrayBase {
             data,
-            layout: LayoutRef {
-                ptr,
-                dim: Ix1(0),
-                strides: Ix1(1),
-            },
+            parts: ArrayPartsSized::new(ptr, Ix1(0), Ix1(1)),
         };
         debug_assert!(array.pointer_is_inbounds());
         array
@@ -60,11 +56,7 @@ where
         debug_assert_eq!(strides.ndim(), dim.ndim());
         ArrayBase {
             data: self.data,
-            layout: LayoutRef {
-                ptr: self.layout.ptr,
-                dim,
-                strides,
-            },
+            parts: ArrayPartsSized::new(self.parts.ptr, dim, strides),
         }
     }
 }
diff --git a/src/impl_methods.rs b/src/impl_methods.rs
index 453cc05b3..2170a8d93 100644
--- a/src/impl_methods.rs
+++ b/src/impl_methods.rs
@@ -70,7 +70,7 @@ impl LayoutRef
     /// Return the total number of elements in the array.
     pub fn len(&self) -> usize
     {
-        self.dim.size()
+        self._dim().size()
     }
 
     /// Return the length of `axis`.
@@ -82,7 +82,7 @@ impl LayoutRef
     #[track_caller]
     pub fn len_of(&self, axis: Axis) -> usize
     {
-        self.dim[axis.index()]
+        self._dim()[axis.index()]
     }
 
     /// Return whether the array has any elements
@@ -94,7 +94,7 @@ impl LayoutRef
     /// Return the number of dimensions (axes) in the array
     pub fn ndim(&self) -> usize
     {
-        self.dim.ndim()
+        self._dim().ndim()
     }
 
     /// Return the shape of the array in its “pattern” form,
@@ -102,7 +102,7 @@ impl LayoutRef
     /// and so on.
     pub fn dim(&self) -> D::Pattern
     {
-        self.dim.clone().into_pattern()
+        self._dim().clone().into_pattern()
     }
 
     /// Return the shape of the array as it's stored in the array.
@@ -121,7 +121,7 @@ impl LayoutRef
     /// ```
     pub fn raw_dim(&self) -> D
     {
-        self.dim.clone()
+        self._dim().clone()
     }
 
     /// Return the shape of the array as a slice.
@@ -150,13 +150,13 @@ impl LayoutRef
     /// ```
     pub fn shape(&self) -> &[usize]
     {
-        self.dim.slice()
+        self._dim().slice()
     }
 
     /// Return the strides of the array as a slice.
     pub fn strides(&self) -> &[isize]
     {
-        let s = self.strides.slice();
+        let s = self._strides().slice();
         // reinterpret unsigned integer as signed
         unsafe { slice::from_raw_parts(s.as_ptr() as *const _, s.len()) }
     }
@@ -171,7 +171,7 @@ impl LayoutRef
     pub fn stride_of(&self, axis: Axis) -> isize
     {
         // strides are reinterpreted as isize
-        self.strides[axis.index()] as isize
+        self._strides()[axis.index()] as isize
     }
 }
 
@@ -181,13 +181,13 @@ impl ArrayRef
     pub fn view(&self) -> ArrayView<'_, A, D>
     {
         // debug_assert!(self.pointer_is_inbounds());
-        unsafe { ArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) }
+        unsafe { ArrayView::new(*self._ptr(), self._dim().clone(), self._strides().clone()) }
     }
 
     /// Return a read-write view of the array
     pub fn view_mut(&mut self) -> ArrayViewMut<'_, A, D>
     {
-        unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }
+        unsafe { ArrayViewMut::new(*self._ptr(), self._dim().clone(), self._strides().clone()) }
     }
 
     /// Return a shared view of the array with elements as if they were embedded in cells.
@@ -236,7 +236,9 @@ impl ArrayRef
     where A: Clone
     {
         if let Some(slc) = self.as_slice_memory_order() {
-            unsafe { Array::from_shape_vec_unchecked(self.dim.clone().strides(self.strides.clone()), slc.to_vec()) }
+            unsafe {
+                Array::from_shape_vec_unchecked(self._dim().clone().strides(self._strides().clone()), slc.to_vec())
+            }
         } else {
             self.map(A::clone)
         }
@@ -588,8 +590,8 @@ where
                 // Slice the axis in-place to update the `dim`, `strides`, and `ptr`.
                 self.slice_axis_inplace(Axis(old_axis), Slice { start, end, step });
                 // Copy the sliced dim and stride to corresponding axis.
-                new_dim[new_axis] = self.layout.dim[old_axis];
-                new_strides[new_axis] = self.layout.strides[old_axis];
+                new_dim[new_axis] = self.parts.dim[old_axis];
+                new_strides[new_axis] = self.parts.strides[old_axis];
                 old_axis += 1;
                 new_axis += 1;
             }
@@ -700,10 +702,11 @@ impl LayoutRef
     #[track_caller]
     pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice)
     {
+        let parts = &mut self.0;
         let offset =
-            do_slice(&mut self.dim.slice_mut()[axis.index()], &mut self.strides.slice_mut()[axis.index()], indices);
+            do_slice(&mut parts.dim.slice_mut()[axis.index()], &mut parts.strides.slice_mut()[axis.index()], indices);
         unsafe {
-            self.ptr = self.ptr.offset(offset);
+            self.0.ptr = self._ptr().offset(offset);
         }
         // debug_assert!(self.pointer_is_inbounds());
     }
@@ -779,8 +782,8 @@ impl LayoutRef
                 Axis(ax),
                 f(AxisDescription {
                     axis: Axis(ax),
-                    len: self.dim[ax],
-                    stride: self.strides[ax] as isize,
+                    len: self._dim()[ax],
+                    stride: self._strides()[ax] as isize,
                 }),
             )
         }
@@ -832,9 +835,9 @@ impl RawRef
     pub fn get_ptr(&self, index: I) -> Option<*const A>
     where I: NdIndex
     {
-        let ptr = self.ptr;
+        let ptr = self._ptr();
         index
-            .index_checked(&self.dim, &self.strides)
+            .index_checked(self._dim(), self._strides())
             .map(move |offset| unsafe { ptr.as_ptr().offset(offset) as *const _ })
     }
 }
@@ -876,7 +879,7 @@ impl RawRef
         // extra code in as_mut_ptr
         let ptr = self.as_mut_ptr();
         index
-            .index_checked(&self.dim, &self.strides)
+            .index_checked(self._dim(), self._strides())
             .map(move |offset| unsafe { ptr.offset(offset) })
     }
 }
@@ -897,8 +900,8 @@ impl ArrayRef
     where I: NdIndex
     {
         arraytraits::debug_bounds_check(self, &index);
-        let off = index.index_unchecked(&self.strides);
-        &*self.ptr.as_ptr().offset(off)
+        let off = index.index_unchecked(self._strides());
+        &*self._ptr().as_ptr().offset(off)
     }
 
     /// Perform *unchecked* array indexing.
@@ -921,8 +924,8 @@ impl ArrayRef
     {
         // debug_assert!(self.data.is_unique());
         arraytraits::debug_bounds_check(self, &index);
-        let off = index.index_unchecked(&self.strides);
-        &mut *self.ptr.as_ptr().offset(off)
+        let off = index.index_unchecked(self._strides());
+        &mut *self._ptr().as_ptr().offset(off)
     }
 
     /// Swap elements at indices `index1` and `index2`.
@@ -935,8 +938,8 @@ impl ArrayRef
     where I: NdIndex
     {
         let ptr = self.as_mut_ptr();
-        let offset1 = index1.index_checked(&self.dim, &self.strides);
-        let offset2 = index2.index_checked(&self.dim, &self.strides);
+        let offset1 = index1.index_checked(self._dim(), self._strides());
+        let offset2 = index2.index_checked(self._dim(), self._strides());
         if let Some(offset1) = offset1 {
             if let Some(offset2) = offset2 {
                 unsafe {
@@ -968,9 +971,9 @@ impl ArrayRef
         // debug_assert!(self.data.is_unique());
         arraytraits::debug_bounds_check(self, &index1);
         arraytraits::debug_bounds_check(self, &index2);
-        let off1 = index1.index_unchecked(&self.strides);
-        let off2 = index2.index_unchecked(&self.strides);
-        std::ptr::swap(self.ptr.as_ptr().offset(off1), self.ptr.as_ptr().offset(off2));
+        let off1 = index1.index_unchecked(self._strides());
+        let off2 = index2.index_unchecked(self._strides());
+        std::ptr::swap(self._ptr().as_ptr().offset(off1), self._ptr().as_ptr().offset(off2));
     }
 
     // `get` for zero-dimensional arrays
@@ -1056,8 +1059,8 @@ where
     where D: RemoveAxis
     {
         self.collapse_axis(axis, index);
-        let dim = self.layout.dim.remove_axis(axis);
-        let strides = self.layout.strides.remove_axis(axis);
+        let dim = self.parts.dim.remove_axis(axis);
+        let strides = self.parts.strides.remove_axis(axis);
         // safe because new dimension, strides allow access to a subset of old data
         unsafe { self.with_strides_dim(strides, dim) }
     }
@@ -1071,8 +1074,9 @@ impl LayoutRef
     #[track_caller]
     pub fn collapse_axis(&mut self, axis: Axis, index: usize)
     {
-        let offset = dimension::do_collapse_axis(&mut self.dim, &self.strides, axis.index(), index);
-        self.ptr = unsafe { self.ptr.offset(offset) };
+        let parts = &mut self.0;
+        let offset = dimension::do_collapse_axis(&mut parts.dim, &parts.strides, axis.index(), index);
+        self.0.ptr = unsafe { self._ptr().offset(offset) };
         // debug_assert!(self.pointer_is_inbounds());
     }
 }
@@ -1571,7 +1575,7 @@ where
     fn diag_params(&self) -> (Ix, Ixs)
     {
         /* empty shape has len 1 */
-        let len = self.layout.dim.slice().iter().cloned().min().unwrap_or(1);
+        let len = self.parts.dim.slice().iter().cloned().min().unwrap_or(1);
         let stride = self.strides().iter().sum();
         (len, stride)
     }
@@ -1618,13 +1622,13 @@ impl LayoutRef
     /// contiguous in memory, it has custom strides, etc.
     pub fn is_standard_layout(&self) -> bool
     {
-        dimension::is_layout_c(&self.dim, &self.strides)
+        dimension::is_layout_c(self._dim(), self._strides())
     }
 
     /// Return true if the array is known to be contiguous.
     pub(crate) fn is_contiguous(&self) -> bool
     {
-        D::is_contiguous(&self.dim, &self.strides)
+        D::is_contiguous(self._dim(), self._strides())
     }
 }
 
@@ -1659,7 +1663,7 @@ impl ArrayRef
             CowArray::from(self.view())
         } else {
             let v = crate::iterators::to_vec_mapped(self.iter(), A::clone);
-            let dim = self.dim.clone();
+            let dim = self._dim().clone();
             debug_assert_eq!(v.len(), dim.size());
 
             unsafe {
@@ -1685,14 +1689,14 @@ impl RawRef
     #[inline(always)]
     pub fn as_ptr(&self) -> *const A
     {
-        self.ptr.as_ptr() as *const A
+        self._ptr().as_ptr() as *const A
     }
 
     /// Return a mutable pointer to the first element in the array reference.
     #[inline(always)]
     pub fn as_mut_ptr(&mut self) -> *mut A
     {
-        self.ptr.as_ptr()
+        self._ptr().as_ptr()
     }
 }
 
@@ -1716,7 +1720,7 @@ where
     where S: RawDataMut
     {
         self.try_ensure_unique(); // for ArcArray
-        self.layout.ptr.as_ptr()
+        self.parts.ptr.as_ptr()
     }
 }
 
@@ -1726,14 +1730,14 @@ impl RawRef
     #[inline]
     pub fn raw_view(&self) -> RawArrayView
     {
-        unsafe { RawArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) }
+        unsafe { RawArrayView::new(*self._ptr(), self._dim().clone(), self._strides().clone()) }
     }
 
     /// Return a raw mutable view of the array.
     #[inline]
     pub fn raw_view_mut(&mut self) -> RawArrayViewMut
     {
-        unsafe { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }
+        unsafe { RawArrayViewMut::new(*self._ptr(), self._dim().clone(), self._strides().clone()) }
     }
 }
 
@@ -1751,7 +1755,7 @@ where
     where S: RawDataMut
     {
         self.try_ensure_unique(); // for ArcArray
-        unsafe { RawArrayViewMut::new(self.layout.ptr, self.layout.dim.clone(), self.layout.strides.clone()) }
+        unsafe { RawArrayViewMut::new(self.parts.ptr, self.parts.dim.clone(), self.parts.strides.clone()) }
     }
 
     /// Return a raw mutable view of the array.
@@ -1761,7 +1765,7 @@ where
     pub(crate) unsafe fn raw_view_mut_unchecked(&mut self) -> RawArrayViewMut
     where S: DataOwned
     {
-        RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone())
+        RawArrayViewMut::new(*self._ptr(), self._dim().clone(), self._strides().clone())
     }
 
     /// Return the array’s data as a slice, if it is contiguous and in standard order.
@@ -1771,7 +1775,7 @@ where
     {
         if self.is_standard_layout() {
             self.ensure_unique();
-            unsafe { Some(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) }
+            unsafe { Some(slice::from_raw_parts_mut(self._ptr().as_ptr(), self.len())) }
         } else {
             None
         }
@@ -1796,8 +1800,8 @@ where
     {
         if self.is_contiguous() {
             self.ensure_unique();
-            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides);
-            unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) }
+            let offset = offset_from_low_addr_ptr_to_logical_ptr(self._dim(), self._strides());
+            unsafe { Ok(slice::from_raw_parts_mut(self._ptr().sub(offset).as_ptr(), self.len())) }
         } else {
             Err(self)
         }
@@ -1814,7 +1818,7 @@ impl ArrayRef
     pub fn as_slice(&self) -> Option<&[A]>
     {
         if self.is_standard_layout() {
-            unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) }
+            unsafe { Some(slice::from_raw_parts(self._ptr().as_ptr(), self.len())) }
         } else {
             None
         }
@@ -1825,7 +1829,7 @@ impl ArrayRef
     pub fn as_slice_mut(&mut self) -> Option<&mut [A]>
     {
         if self.is_standard_layout() {
-            unsafe { Some(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) }
+            unsafe { Some(slice::from_raw_parts_mut(self._ptr().as_ptr(), self.len())) }
         } else {
             None
         }
@@ -1839,8 +1843,8 @@ impl ArrayRef
     pub fn as_slice_memory_order(&self) -> Option<&[A]>
     {
         if self.is_contiguous() {
-            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides);
-            unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) }
+            let offset = offset_from_low_addr_ptr_to_logical_ptr(self._dim(), self._strides());
+            unsafe { Some(slice::from_raw_parts(self._ptr().sub(offset).as_ptr(), self.len())) }
         } else {
             None
         }
@@ -1862,8 +1866,8 @@ impl ArrayRef
     pub(crate) fn try_as_slice_memory_order_mut(&mut self) -> Result<&mut [A], &mut Self>
     {
         if self.is_contiguous() {
-            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides);
-            unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) }
+            let offset = offset_from_low_addr_ptr_to_logical_ptr(self._dim(), self._strides());
+            unsafe { Ok(slice::from_raw_parts_mut(self._ptr().sub(offset).as_ptr(), self.len())) }
         } else {
             Err(self)
         }
@@ -1938,9 +1942,9 @@ impl ArrayRef
         E: Dimension,
         A: Clone,
     {
-        let len = self.dim.size();
+        let len = self._dim().size();
         if size_of_shape_checked(&shape) != Ok(len) {
-            return Err(error::incompatible_shapes(&self.dim, &shape));
+            return Err(error::incompatible_shapes(self._dim(), &shape));
         }
 
         // Create a view if the length is 0, safe because the array and new shape is empty.
@@ -1951,12 +1955,12 @@ impl ArrayRef
         }
 
         // Try to reshape the array as a view into the existing data
-        match reshape_dim(&self.dim, &self.strides, &shape, order) {
+        match reshape_dim(self._dim(), self._strides(), &shape, order) {
             Ok(to_strides) => unsafe {
-                return Ok(CowArray::from(ArrayView::new(self.ptr, shape, to_strides)));
+                return Ok(CowArray::from(ArrayView::new(*self._ptr(), shape, to_strides)));
             },
             Err(err) if err.kind() == ErrorKind::IncompatibleShape => {
-                return Err(error::incompatible_shapes(&self.dim, &shape));
+                return Err(error::incompatible_shapes(self._dim(), &shape));
             }
             _otherwise => {}
         }
@@ -2033,8 +2037,8 @@ where
     where E: Dimension
     {
         let shape = shape.into_dimension();
-        if size_of_shape_checked(&shape) != Ok(self.layout.dim.size()) {
-            return Err(error::incompatible_shapes(&self.layout.dim, &shape));
+        if size_of_shape_checked(&shape) != Ok(self.parts.dim.size()) {
+            return Err(error::incompatible_shapes(&self.parts.dim, &shape));
         }
 
         // Check if contiguous, then we can change shape
@@ -2078,8 +2082,8 @@ where
     where E: IntoDimension
     {
         let shape = shape.into_dimension();
-        if size_of_shape_checked(&shape) != Ok(self.layout.dim.size()) {
-            return Err(error::incompatible_shapes(&self.layout.dim, &shape));
+        if size_of_shape_checked(&shape) != Ok(self.parts.dim.size()) {
+            return Err(error::incompatible_shapes(&self.parts.dim, &shape));
         }
         // Check if contiguous, if not => copy all, else just adapt strides
         unsafe {
@@ -2124,9 +2128,9 @@ where
         A: Clone,
         E: Dimension,
     {
-        let len = self.dim.size();
+        let len = self._dim().size();
         if size_of_shape_checked(&shape) != Ok(len) {
-            return Err(error::incompatible_shapes(&self.dim, &shape));
+            return Err(error::incompatible_shapes(self._dim(), &shape));
         }
 
         // Safe because the array and new shape is empty.
@@ -2137,12 +2141,12 @@ where
         }
 
         // Try to reshape the array's current data
-        match reshape_dim(&self.dim, &self.strides, &shape, order) {
+        match reshape_dim(self._dim(), self._strides(), &shape, order) {
             Ok(to_strides) => unsafe {
                 return Ok(self.with_strides_dim(to_strides, shape));
             },
             Err(err) if err.kind() == ErrorKind::IncompatibleShape => {
-                return Err(error::incompatible_shapes(&self.dim, &shape));
+                return Err(error::incompatible_shapes(self._dim(), &shape));
             }
             _otherwise => {}
         }
@@ -2190,10 +2194,10 @@ where
         E: IntoDimension,
     {
         let shape = shape.into_dimension();
-        if size_of_shape_checked(&shape) != Ok(self.dim.size()) {
+        if size_of_shape_checked(&shape) != Ok(self._dim().size()) {
             panic!(
                 "ndarray: incompatible shapes in reshape, attempted from: {:?}, to: {:?}",
-                self.dim.slice(),
+                self._dim().slice(),
                 shape.slice()
             )
         }
@@ -2289,8 +2293,8 @@ where
     {
         // safe because new dims equivalent
         unsafe {
-            ArrayBase::from_data_ptr(self.data, self.layout.ptr)
-                .with_strides_dim(self.layout.strides.into_dyn(), self.layout.dim.into_dyn())
+            ArrayBase::from_data_ptr(self.data, self.parts.ptr)
+                .with_strides_dim(self.parts.strides.into_dyn(), self.parts.dim.into_dyn())
         }
     }
 
@@ -2316,14 +2320,14 @@ where
         unsafe {
             if D::NDIM == D2::NDIM {
                 // safe because D == D2
-                let dim = unlimited_transmute::(self.layout.dim);
-                let strides = unlimited_transmute::(self.layout.strides);
-                return Ok(ArrayBase::from_data_ptr(self.data, self.layout.ptr).with_strides_dim(strides, dim));
+                let dim = unlimited_transmute::(self.parts.dim);
+                let strides = unlimited_transmute::(self.parts.strides);
+                return Ok(ArrayBase::from_data_ptr(self.data, self.parts.ptr).with_strides_dim(strides, dim));
             } else if D::NDIM.is_none() || D2::NDIM.is_none() {
                 // one is dynamic dim
                 // safe because dim, strides are equivalent under a different type
-                if let Some(dim) = D2::from_dimension(&self.layout.dim) {
-                    if let Some(strides) = D2::from_dimension(&self.layout.strides) {
+                if let Some(dim) = D2::from_dimension(&self.parts.dim) {
+                    if let Some(strides) = D2::from_dimension(&self.parts.strides) {
                         return Ok(self.with_strides_dim(strides, dim));
                     }
                 }
@@ -2421,8 +2425,8 @@ impl ArrayRef
         let dim = dim.into_dimension();
 
         // Note: zero strides are safe precisely because we return an read-only view
-        let broadcast_strides = upcast(&dim, &self.dim, &self.strides)?;
-        unsafe { Some(ArrayView::new(self.ptr, dim, broadcast_strides)) }
+        let broadcast_strides = upcast(&dim, self._dim(), self._strides())?;
+        unsafe { Some(ArrayView::new(*self._ptr(), dim, broadcast_strides)) }
     }
 
     /// For two arrays or views, find their common shape if possible and
@@ -2437,8 +2441,8 @@ impl ArrayRef
         D: Dimension + DimMax,
         E: Dimension,
     {
-        let shape = co_broadcast::>::Output>(&self.dim, &other.dim)?;
-        let view1 = if shape.slice() == self.dim.slice() {
+        let shape = co_broadcast::>::Output>(self._dim(), other._dim())?;
+        let view1 = if shape.slice() == self._dim().slice() {
             self.view()
                 .into_dimensionality::<>::Output>()
                 .unwrap()
@@ -2447,7 +2451,7 @@ impl ArrayRef
         } else {
             return Err(from_kind(ErrorKind::IncompatibleShape));
         };
-        let view2 = if shape.slice() == other.dim.slice() {
+        let view2 = if shape.slice() == other._dim().slice() {
             other
                 .view()
                 .into_dimensionality::<>::Output>()
@@ -2482,8 +2486,8 @@ impl LayoutRef
     #[track_caller]
     pub fn swap_axes(&mut self, ax: usize, bx: usize)
     {
-        self.dim.slice_mut().swap(ax, bx);
-        self.strides.slice_mut().swap(ax, bx);
+        self.0.dim.slice_mut().swap(ax, bx);
+        self.0.strides.slice_mut().swap(ax, bx);
     }
 }
 
@@ -2531,8 +2535,8 @@ where
         let mut new_dim = usage_counts; // reuse to avoid an allocation
         let mut new_strides = D::zeros(self.ndim());
         {
-            let dim = self.layout.dim.slice();
-            let strides = self.layout.strides.slice();
+            let dim = self.parts.dim.slice();
+            let strides = self.parts.strides.slice();
             for (new_axis, &axis) in axes.slice().iter().enumerate() {
                 new_dim[new_axis] = dim[axis];
                 new_strides[new_axis] = strides[axis];
@@ -2579,8 +2583,8 @@ where
             assert_eq!(*count, 1, "each axis must be listed exactly once");
         }
 
-        let dim = self.layout.dim.slice_mut();
-        let strides = self.layout.strides.slice_mut();
+        let dim = self.parts.dim.slice_mut();
+        let strides = self.parts.strides.slice_mut();
         let axes = axes.slice();
 
         // The cycle detection is done using a bitmask to track visited positions.
@@ -2614,8 +2618,8 @@ where
     /// while retaining the same data.
     pub fn reversed_axes(mut self) -> ArrayBase
     {
-        self.layout.dim.slice_mut().reverse();
-        self.layout.strides.slice_mut().reverse();
+        self.parts.dim.slice_mut().reverse();
+        self.parts.strides.slice_mut().reverse();
         self
     }
 
@@ -2625,8 +2629,8 @@ where
     /// and strides.
     pub fn reverse_axes(&mut self)
     {
-        self.layout.dim.slice_mut().reverse();
-        self.layout.strides.slice_mut().reverse();
+        self.parts.dim.slice_mut().reverse();
+        self.parts.strides.slice_mut().reverse();
     }
 }
 
@@ -2648,13 +2652,13 @@ impl LayoutRef
     /// Return an iterator over the length and stride of each axis.
     pub fn axes(&self) -> Axes<'_, D>
     {
-        axes_of(&self.dim, &self.strides)
+        axes_of(self._dim(), self._strides())
     }
 
     /*
     /// Return the axis with the least stride (by absolute value)
     pub fn min_stride_axis(&self) -> Axis {
-        self.dim.min_stride_axis(&self.strides)
+        self._dim().min_stride_axis(self._strides())
     }
     */
 
@@ -2662,7 +2666,7 @@ impl LayoutRef
     /// preferring axes with len > 1.
     pub fn max_stride_axis(&self) -> Axis
     {
-        self.dim.max_stride_axis(&self.strides)
+        self._dim().max_stride_axis(self._strides())
     }
 
     /// Reverse the stride of `axis`.
@@ -2672,12 +2676,12 @@ impl LayoutRef
     pub fn invert_axis(&mut self, axis: Axis)
     {
         unsafe {
-            let s = self.strides.axis(axis) as Ixs;
-            let m = self.dim.axis(axis);
+            let s = self._strides().axis(axis) as Ixs;
+            let m = self._dim().axis(axis);
             if m != 0 {
-                self.ptr = self.ptr.offset(stride_offset(m - 1, s as Ix));
+                self.0.ptr = self._ptr().offset(stride_offset(m - 1, s as Ix));
             }
-            self.strides.set_axis(axis, (-s) as Ix);
+            self.0.strides.set_axis(axis, (-s) as Ix);
         }
     }
 
@@ -2719,7 +2723,8 @@ impl LayoutRef
     #[track_caller]
     pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool
     {
-        merge_axes(&mut self.dim, &mut self.strides, take, into)
+        let parts = &mut self.0;
+        merge_axes(&mut parts.dim, &mut parts.strides, take, into)
     }
 }
 
@@ -2755,8 +2760,8 @@ where
         assert!(axis.index() <= self.ndim());
         // safe because a new axis of length one does not affect memory layout
         unsafe {
-            let strides = self.layout.strides.insert_axis(axis);
-            let dim = self.layout.dim.insert_axis(axis);
+            let strides = self.parts.strides.insert_axis(axis);
+            let dim = self.parts.dim.insert_axis(axis);
             self.with_strides_dim(strides, dim)
         }
     }
@@ -2824,7 +2829,10 @@ impl ArrayRef
     {
         debug_assert_eq!(self.shape(), rhs.shape());
 
-        if self.dim.strides_equivalent(&self.strides, &rhs.strides) {
+        if self
+            ._dim()
+            .strides_equivalent(self._strides(), rhs._strides())
+        {
             if let Some(self_s) = self.as_slice_memory_order_mut() {
                 if let Some(rhs_s) = rhs.as_slice_memory_order() {
                     for (s, r) in self_s.iter_mut().zip(rhs_s) {
@@ -2876,10 +2884,10 @@ impl ArrayRef
         E: Dimension,
         F: FnMut(&mut A, &B),
     {
-        if rhs.dim.ndim() == 0 {
+        if rhs._dim().ndim() == 0 {
             // Skip broadcast from 0-dim array
             self.zip_mut_with_elem(rhs.get_0d(), f);
-        } else if self.dim.ndim() == rhs.dim.ndim() && self.shape() == rhs.shape() {
+        } else if self._dim().ndim() == rhs._dim().ndim() && self.shape() == rhs.shape() {
             self.zip_mut_with_same_shape(rhs, f);
         } else {
             let rhs_broadcast = rhs.broadcast_unwrap(self.raw_dim());
@@ -2900,7 +2908,7 @@ impl ArrayRef
             slc.iter().fold(init, f)
         } else {
             let mut v = self.view();
-            move_min_stride_axis_to_last(&mut v.layout.dim, &mut v.layout.strides);
+            move_min_stride_axis_to_last(&mut v.parts.dim, &mut v.parts.strides);
             v.into_elements_base().fold(init, f)
         }
     }
@@ -2931,12 +2939,12 @@ impl ArrayRef
         unsafe {
             if let Some(slc) = self.as_slice_memory_order() {
                 ArrayBase::from_shape_trusted_iter_unchecked(
-                    self.dim.clone().strides(self.strides.clone()),
+                    self._dim().clone().strides(self._strides().clone()),
                     slc.iter(),
                     f,
                 )
             } else {
-                ArrayBase::from_shape_trusted_iter_unchecked(self.dim.clone(), self.iter(), f)
+                ArrayBase::from_shape_trusted_iter_unchecked(self._dim().clone(), self.iter(), f)
             }
         }
     }
@@ -2952,9 +2960,9 @@ impl ArrayRef
         F: FnMut(&'a mut A) -> B,
         A: 'a,
     {
-        let dim = self.dim.clone();
+        let dim = self._dim().clone();
         if self.is_contiguous() {
-            let strides = self.strides.clone();
+            let strides = self._strides().clone();
             let slc = self.as_slice_memory_order_mut().unwrap();
             unsafe { ArrayBase::from_shape_trusted_iter_unchecked(dim.strides(strides), slc.iter_mut(), f) }
         } else {
@@ -3064,7 +3072,7 @@ impl ArrayRef
             Ok(slc) => slc.iter_mut().for_each(f),
             Err(arr) => {
                 let mut v = arr.view_mut();
-                move_min_stride_axis_to_last(&mut v.layout.dim, &mut v.layout.strides);
+                move_min_stride_axis_to_last(&mut v.parts.dim, &mut v.parts.strides);
                 v.into_elements_base().for_each(f);
             }
         }
@@ -3148,7 +3156,7 @@ impl ArrayRef
         A: 'a,
     {
         if self.len_of(axis) == 0 {
-            let new_dim = self.dim.remove_axis(axis);
+            let new_dim = self._dim().remove_axis(axis);
             Array::from_shape_simple_fn(new_dim, move || mapping(ArrayView::from(&[])))
         } else {
             Zip::from(self.lanes(axis)).map_collect(mapping)
@@ -3173,7 +3181,7 @@ impl ArrayRef
         A: 'a,
     {
         if self.len_of(axis) == 0 {
-            let new_dim = self.dim.remove_axis(axis);
+            let new_dim = self._dim().remove_axis(axis);
             Array::from_shape_simple_fn(new_dim, move || mapping(ArrayViewMut::from(&mut [])))
         } else {
             Zip::from(self.lanes_mut(axis)).map_collect(mapping)
diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs
index 023e9ebb4..277b156b8 100644
--- a/src/impl_owned_array.rs
+++ b/src/impl_owned_array.rs
@@ -45,7 +45,7 @@ impl Array
             // (This is necessary because the element in the array might not be
             // the first element in the `Vec`, such as if the array was created
             // by `array![1, 2, 3, 4].slice_move(s![2])`.)
-            let first = self.ptr.as_ptr() as usize;
+            let first = self.parts.ptr.as_ptr() as usize;
             let base = self.data.as_ptr() as usize;
             let index = (first - base) / size;
             debug_assert_eq!((first - base) % size, 0);
@@ -69,7 +69,7 @@ where D: Dimension
             return None;
         }
         if std::mem::size_of::() == 0 {
-            Some(dimension::offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides))
+            Some(dimension::offset_from_low_addr_ptr_to_logical_ptr(&self.parts.dim, &self.parts.strides))
         } else {
             let offset = unsafe { self.as_ptr().offset_from(self.data.as_ptr()) };
             debug_assert!(offset >= 0);
@@ -476,8 +476,8 @@ where D: Dimension
         } else {
             dim.slice_mut()[..=growing_axis.index()].rotate_right(1);
             new_array = Self::uninit(dim);
-            new_array.dim.slice_mut()[..=growing_axis.index()].rotate_left(1);
-            new_array.strides.slice_mut()[..=growing_axis.index()].rotate_left(1);
+            new_array.parts.dim.slice_mut()[..=growing_axis.index()].rotate_left(1);
+            new_array.parts.strides.slice_mut()[..=growing_axis.index()].rotate_left(1);
         }
 
         // self -> old_self.
@@ -631,7 +631,7 @@ where D: Dimension
             // either the dimension increment is zero, or there is an existing
             // zero in another axis in self.
             debug_assert_eq!(self.len(), new_len);
-            self.dim = res_dim;
+            self.parts.dim = res_dim;
             return Ok(());
         }
 
@@ -701,11 +701,11 @@ where D: Dimension
                     }
                 }
             });
-            let mut strides = self.strides.clone();
+            let mut strides = self.parts.strides.clone();
             strides[axis.index()] = new_stride as usize;
             strides
         } else {
-            self.strides.clone()
+            self.parts.strides.clone()
         };
 
         // grow backing storage and update head ptr
@@ -746,7 +746,7 @@ where D: Dimension
                 sort_axes_in_default_order_tandem(&mut tail_view, &mut array);
                 debug_assert!(tail_view.is_standard_layout(),
                               "not std layout dim: {:?}, strides: {:?}",
-                              tail_view.shape(), tail_view.strides());
+                              tail_view.shape(), RawArrayViewMut::strides(&tail_view));
             }
 
             // Keep track of currently filled length of `self.data` and update it
@@ -785,8 +785,8 @@ where D: Dimension
             drop(data_length_guard);
 
             // update array dimension
-            self.strides = strides;
-            self.dim = res_dim;
+            self.parts.strides = strides;
+            self.parts.dim = res_dim;
         }
         // multiple assertions after pointer & dimension update
         debug_assert_eq!(self.data.len(), self.len());
@@ -849,7 +849,7 @@ where D: Dimension
                 0
             };
             debug_assert!(data_to_array_offset >= 0);
-            self.layout.ptr = self
+            self.parts.ptr = self
                 .data
                 .reserve(len_to_append)
                 .offset(data_to_array_offset);
@@ -880,7 +880,7 @@ pub(crate) unsafe fn drop_unreachable_raw(
     }
     sort_axes_in_default_order(&mut self_);
     // with uninverted axes this is now the element with lowest address
-    let array_memory_head_ptr = self_.layout.ptr;
+    let array_memory_head_ptr = self_.parts.ptr;
     let data_end_ptr = data_ptr.add(data_len);
     debug_assert!(data_ptr <= array_memory_head_ptr);
     debug_assert!(array_memory_head_ptr <= data_end_ptr);
@@ -897,19 +897,19 @@ pub(crate) unsafe fn drop_unreachable_raw(
     // As an optimization, the innermost axis is removed if it has stride 1, because
     // we then have a long stretch of contiguous elements we can skip as one.
     let inner_lane_len;
-    if self_.ndim() > 1 && self_.layout.strides.last_elem() == 1 {
-        self_.layout.dim.slice_mut().rotate_right(1);
-        self_.layout.strides.slice_mut().rotate_right(1);
-        inner_lane_len = self_.layout.dim[0];
-        self_.layout.dim[0] = 1;
-        self_.layout.strides[0] = 1;
+    if self_.ndim() > 1 && self_.parts.strides.last_elem() == 1 {
+        self_.parts.dim.slice_mut().rotate_right(1);
+        self_.parts.strides.slice_mut().rotate_right(1);
+        inner_lane_len = self_.parts.dim[0];
+        self_.parts.dim[0] = 1;
+        self_.parts.strides[0] = 1;
     } else {
         inner_lane_len = 1;
     }
 
     // iter is a raw pointer iterator traversing the array in memory order now with the
     // sorted axes.
-    let mut iter = Baseiter::new(self_.layout.ptr, self_.layout.dim, self_.layout.strides);
+    let mut iter = Baseiter::new(self_.parts.ptr, self_.parts.dim, self_.parts.strides);
     let mut dropped_elements = 0;
 
     let mut last_ptr = data_ptr;
@@ -948,7 +948,7 @@ where
     if a.ndim() <= 1 {
         return;
     }
-    sort_axes1_impl(&mut a.layout.dim, &mut a.layout.strides);
+    sort_axes1_impl(&mut a.parts.dim, &mut a.parts.strides);
 }
 
 fn sort_axes1_impl(adim: &mut D, astrides: &mut D)
@@ -988,7 +988,7 @@ where
     if a.ndim() <= 1 {
         return;
     }
-    sort_axes2_impl(&mut a.layout.dim, &mut a.layout.strides, &mut b.layout.dim, &mut b.layout.strides);
+    sort_axes2_impl(&mut a.parts.dim, &mut a.parts.strides, &mut b.parts.dim, &mut b.parts.strides);
 }
 
 fn sort_axes2_impl(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D)
diff --git a/src/impl_raw_views.rs b/src/impl_raw_views.rs
index 5bb2a0e42..2423b9343 100644
--- a/src/impl_raw_views.rs
+++ b/src/impl_raw_views.rs
@@ -98,10 +98,10 @@ where D: Dimension
     pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D>
     {
         debug_assert!(
-            is_aligned(self.layout.ptr.as_ptr()),
+            is_aligned(self.parts.ptr.as_ptr()),
             "The pointer must be aligned."
         );
-        ArrayView::new(self.layout.ptr, self.layout.dim, self.layout.strides)
+        ArrayView::new(self.parts.ptr, self.parts.dim, self.parts.strides)
     }
 
     /// Split the array view along `axis` and return one array pointer strictly
@@ -113,23 +113,23 @@ where D: Dimension
     pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self)
     {
         assert!(index <= self.len_of(axis));
-        let left_ptr = self.layout.ptr.as_ptr();
+        let left_ptr = self.parts.ptr.as_ptr();
         let right_ptr = if index == self.len_of(axis) {
-            self.layout.ptr.as_ptr()
+            self.parts.ptr.as_ptr()
         } else {
-            let offset = stride_offset(index, self.layout.strides.axis(axis));
+            let offset = stride_offset(index, self.parts.strides.axis(axis));
             // The `.offset()` is safe due to the guarantees of `RawData`.
-            unsafe { self.layout.ptr.as_ptr().offset(offset) }
+            unsafe { self.parts.ptr.as_ptr().offset(offset) }
         };
 
-        let mut dim_left = self.layout.dim.clone();
+        let mut dim_left = self.parts.dim.clone();
         dim_left.set_axis(axis, index);
-        let left = unsafe { Self::new_(left_ptr, dim_left, self.layout.strides.clone()) };
+        let left = unsafe { Self::new_(left_ptr, dim_left, self.parts.strides.clone()) };
 
-        let mut dim_right = self.layout.dim;
+        let mut dim_right = self.parts.dim;
         let right_len = dim_right.axis(axis) - index;
         dim_right.set_axis(axis, right_len);
-        let right = unsafe { Self::new_(right_ptr, dim_right, self.layout.strides) };
+        let right = unsafe { Self::new_(right_ptr, dim_right, self.parts.strides) };
 
         (left, right)
     }
@@ -152,8 +152,8 @@ where D: Dimension
             mem::size_of::(),
             "size mismatch in raw view cast"
         );
-        let ptr = self.layout.ptr.cast::();
-        unsafe { RawArrayView::new(ptr, self.layout.dim, self.layout.strides) }
+        let ptr = self.parts.ptr.cast::();
+        unsafe { RawArrayView::new(ptr, self.parts.dim, self.parts.strides) }
     }
 }
 
@@ -172,11 +172,11 @@ where D: Dimension
         );
         assert_eq!(mem::align_of::>(), mem::align_of::());
 
-        let dim = self.layout.dim.clone();
+        let dim = self.parts.dim.clone();
 
         // Double the strides. In the zero-sized element case and for axes of
         // length <= 1, we leave the strides as-is to avoid possible overflow.
-        let mut strides = self.layout.strides.clone();
+        let mut strides = self.parts.strides.clone();
         if mem::size_of::() != 0 {
             for ax in 0..strides.ndim() {
                 if dim[ax] > 1 {
@@ -185,7 +185,7 @@ where D: Dimension
             }
         }
 
-        let ptr_re: *mut T = self.layout.ptr.as_ptr().cast();
+        let ptr_re: *mut T = self.parts.ptr.as_ptr().cast();
         let ptr_im: *mut T = if self.is_empty() {
             // In the empty case, we can just reuse the existing pointer since
             // it won't be dereferenced anyway. It is not safe to offset by
@@ -308,7 +308,7 @@ where D: Dimension
     #[inline]
     pub(crate) fn into_raw_view(self) -> RawArrayView
     {
-        unsafe { RawArrayView::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { RawArrayView::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 
     /// Converts to a read-only view of the array.
@@ -323,10 +323,10 @@ where D: Dimension
     pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D>
     {
         debug_assert!(
-            is_aligned(self.layout.ptr.as_ptr()),
+            is_aligned(self.parts.ptr.as_ptr()),
             "The pointer must be aligned."
         );
-        ArrayView::new(self.layout.ptr, self.layout.dim, self.layout.strides)
+        ArrayView::new(self.parts.ptr, self.parts.dim, self.parts.strides)
     }
 
     /// Converts to a mutable view of the array.
@@ -341,10 +341,10 @@ where D: Dimension
     pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D>
     {
         debug_assert!(
-            is_aligned(self.layout.ptr.as_ptr()),
+            is_aligned(self.parts.ptr.as_ptr()),
             "The pointer must be aligned."
         );
-        ArrayViewMut::new(self.layout.ptr, self.layout.dim, self.layout.strides)
+        ArrayViewMut::new(self.parts.ptr, self.parts.dim, self.parts.strides)
     }
 
     /// Split the array view along `axis` and return one array pointer strictly
@@ -358,8 +358,8 @@ where D: Dimension
         let (left, right) = self.into_raw_view().split_at(axis, index);
         unsafe {
             (
-                Self::new(left.layout.ptr, left.layout.dim, left.layout.strides),
-                Self::new(right.layout.ptr, right.layout.dim, right.layout.strides),
+                Self::new(left.parts.ptr, left.parts.dim, left.parts.strides),
+                Self::new(right.parts.ptr, right.parts.dim, right.parts.strides),
             )
         }
     }
@@ -382,8 +382,8 @@ where D: Dimension
             mem::size_of::(),
             "size mismatch in raw view cast"
         );
-        let ptr = self.layout.ptr.cast::();
-        unsafe { RawArrayViewMut::new(ptr, self.layout.dim, self.layout.strides) }
+        let ptr = self.parts.ptr.cast::();
+        unsafe { RawArrayViewMut::new(ptr, self.parts.dim, self.parts.strides) }
     }
 }
 
@@ -397,8 +397,8 @@ where D: Dimension
         let Complex { re, im } = self.into_raw_view().split_complex();
         unsafe {
             Complex {
-                re: RawArrayViewMut::new(re.layout.ptr, re.layout.dim, re.layout.strides),
-                im: RawArrayViewMut::new(im.layout.ptr, im.layout.dim, im.layout.strides),
+                re: RawArrayViewMut::new(re.parts.ptr, re.parts.dim, re.parts.strides),
+                im: RawArrayViewMut::new(im.parts.ptr, im.parts.dim, im.parts.strides),
             }
         }
     }
diff --git a/src/impl_ref_types.rs b/src/impl_ref_types.rs
index d93a996bf..bfdfa27f9 100644
--- a/src/impl_ref_types.rs
+++ b/src/impl_ref_types.rs
@@ -35,7 +35,20 @@ use core::{
     ops::{Deref, DerefMut},
 };
 
-use crate::{Array, ArrayBase, ArrayRef, Data, DataMut, Dimension, LayoutRef, RawData, RawDataMut, RawRef};
+use crate::{
+    Array,
+    ArrayBase,
+    ArrayPartsSized,
+    ArrayPartsUnsized,
+    ArrayRef,
+    Data,
+    DataMut,
+    Dimension,
+    LayoutRef,
+    RawData,
+    RawDataMut,
+    RawRef,
+};
 
 // D1: &ArrayBase -> &ArrayRef when data is safe to read
 impl Deref for ArrayBase
@@ -50,7 +63,9 @@ where S: Data
         // - It is "dereferencable" because it comes from a reference
         // - For the same reason, it is initialized
         // - The cast is valid because ArrayRef uses #[repr(transparent)]
-        unsafe { &*(&self.layout as *const LayoutRef).cast::>() }
+        let parts: &ArrayPartsUnsized = &self.parts;
+        let ptr = (parts as *const ArrayPartsUnsized) as *const ArrayRef;
+        unsafe { &*ptr }
     }
 }
 
@@ -68,7 +83,9 @@ where
         // - It is "dereferencable" because it comes from a reference
         // - For the same reason, it is initialized
         // - The cast is valid because ArrayRef uses #[repr(transparent)]
-        unsafe { &mut *(&mut self.layout as *mut LayoutRef).cast::>() }
+        let parts: &mut ArrayPartsUnsized = &mut self.parts;
+        let ptr = (parts as *mut ArrayPartsUnsized) as *mut ArrayRef;
+        unsafe { &mut *ptr }
     }
 }
 
@@ -84,7 +101,7 @@ impl Deref for ArrayRef
         // - It is "dereferencable" because it comes from a reference
         // - For the same reason, it is initialized
         // - The cast is valid because ArrayRef uses #[repr(transparent)]
-        unsafe { &*(self as *const ArrayRef).cast::>() }
+        unsafe { &*((self as *const ArrayRef) as *const RawRef) }
     }
 }
 
@@ -98,7 +115,7 @@ impl DerefMut for ArrayRef
         // - It is "dereferencable" because it comes from a reference
         // - For the same reason, it is initialized
         // - The cast is valid because ArrayRef uses #[repr(transparent)]
-        unsafe { &mut *(self as *mut ArrayRef).cast::>() }
+        unsafe { &mut *((self as *mut ArrayRef) as *mut RawRef) }
     }
 }
 
@@ -133,7 +150,9 @@ where S: RawData
         // - It is "dereferencable" because it comes from a reference
         // - For the same reason, it is initialized
         // - The cast is valid because ArrayRef uses #[repr(transparent)]
-        unsafe { &*(&self.layout as *const LayoutRef).cast::>() }
+        let parts: &ArrayPartsUnsized = &self.parts;
+        let ptr = (parts as *const ArrayPartsUnsized) as *const RawRef;
+        unsafe { &*ptr }
     }
 }
 
@@ -148,7 +167,9 @@ where S: RawDataMut
         // - It is "dereferencable" because it comes from a reference
         // - For the same reason, it is initialized
         // - The cast is valid because ArrayRef uses #[repr(transparent)]
-        unsafe { &mut *(&mut self.layout as *mut LayoutRef).cast::>() }
+        let parts: &mut ArrayPartsUnsized = &mut self.parts;
+        let ptr = (parts as *mut ArrayPartsUnsized) as *mut RawRef;
+        unsafe { &mut *ptr }
     }
 }
 
@@ -158,7 +179,9 @@ where S: RawData
 {
     fn as_ref(&self) -> &LayoutRef
     {
-        &self.layout
+        let parts: &ArrayPartsUnsized = &self.parts;
+        let ptr = (parts as *const ArrayPartsUnsized) as *const LayoutRef;
+        unsafe { &*ptr }
     }
 }
 
@@ -168,7 +191,9 @@ where S: RawData
 {
     fn as_mut(&mut self) -> &mut LayoutRef
     {
-        &mut self.layout
+        let parts: &mut ArrayPartsUnsized = &mut self.parts;
+        let ptr = (parts as *mut ArrayPartsUnsized) as *mut LayoutRef;
+        unsafe { &mut *ptr }
     }
 }
 
@@ -269,7 +294,7 @@ impl AsMut> for LayoutRef
 /// impossible to read the data behind the pointer from a LayoutRef (this
 /// is a safety invariant that *must* be maintained), and therefore we can
 /// Clone and Copy as desired.
-impl Clone for LayoutRef
+impl Clone for ArrayPartsSized
 {
     fn clone(&self) -> Self
     {
@@ -277,11 +302,12 @@ impl Clone for LayoutRef
             dim: self.dim.clone(),
             strides: self.strides.clone(),
             ptr: self.ptr,
+            _dst_control: [0; 0],
         }
     }
 }
 
-impl Copy for LayoutRef {}
+impl Copy for ArrayPartsSized {}
 
 impl Borrow> for ArrayBase
 where S: RawData
@@ -368,3 +394,17 @@ where S: RawData
         self.as_mut()
     }
 }
+
+/// Tests that a mem::swap can't compile by putting it into a doctest
+///
+/// ```compile_fail
+/// let mut x = Array1::from_vec(vec![0, 1, 2]);
+/// {
+///     let mut y = Array1::from_vec(vec![4, 5, 6]);
+///     let x_ref = x.as_layout_ref_mut();
+///     let y_ref = y.as_layout_ref_mut();
+///     core::mem::swap(x_ref, y_ref);
+/// }
+/// ```
+#[allow(dead_code)]
+fn test_no_swap_via_doctests() {}
diff --git a/src/impl_special_element_types.rs b/src/impl_special_element_types.rs
index 42b524bc2..8b525e314 100644
--- a/src/impl_special_element_types.rs
+++ b/src/impl_special_element_types.rs
@@ -9,7 +9,7 @@
 use std::mem::MaybeUninit;
 
 use crate::imp_prelude::*;
-use crate::LayoutRef;
+use crate::ArrayParts;
 use crate::RawDataSubst;
 
 /// Methods specific to arrays with `MaybeUninit` elements.
@@ -36,7 +36,13 @@ where
     {
         let ArrayBase {
             data,
-            layout: LayoutRef { ptr, dim, strides },
+            parts:
+                ArrayParts {
+                    ptr,
+                    dim,
+                    strides,
+                    _dst_control: [],
+                },
         } = self;
 
         // "transmute" from storage of MaybeUninit to storage of A
diff --git a/src/impl_views/constructors.rs b/src/impl_views/constructors.rs
index 29b7c13d7..dcf6527ec 100644
--- a/src/impl_views/constructors.rs
+++ b/src/impl_views/constructors.rs
@@ -225,7 +225,7 @@ where D: Dimension
     pub fn reborrow<'b>(self) -> ArrayViewMut<'b, A, D>
     where 'a: 'b
     {
-        unsafe { ArrayViewMut::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { ArrayViewMut::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 }
 
diff --git a/src/impl_views/conversions.rs b/src/impl_views/conversions.rs
index 5bc5f9ad6..54d7ed207 100644
--- a/src/impl_views/conversions.rs
+++ b/src/impl_views/conversions.rs
@@ -29,7 +29,7 @@ where D: Dimension
     pub fn reborrow<'b>(self) -> ArrayView<'b, A, D>
     where 'a: 'b
     {
-        unsafe { ArrayView::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { ArrayView::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 
     /// Return the array’s data as a slice, if it is contiguous and in standard order.
@@ -40,7 +40,7 @@ where D: Dimension
     pub fn to_slice(&self) -> Option<&'a [A]>
     {
         if self.is_standard_layout() {
-            unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) }
+            unsafe { Some(slice::from_raw_parts(self.parts.ptr.as_ptr(), self.len())) }
         } else {
             None
         }
@@ -55,8 +55,8 @@ where D: Dimension
     pub fn to_slice_memory_order(&self) -> Option<&'a [A]>
     {
         if self.is_contiguous() {
-            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides);
-            unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) }
+            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.parts.dim, &self.parts.strides);
+            unsafe { Some(slice::from_raw_parts(self.parts.ptr.sub(offset).as_ptr(), self.len())) }
         } else {
             None
         }
@@ -66,7 +66,7 @@ where D: Dimension
     #[inline]
     pub(crate) fn into_raw_view(self) -> RawArrayView
     {
-        unsafe { RawArrayView::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { RawArrayView::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 }
 
@@ -199,7 +199,7 @@ where D: Dimension
     #[inline]
     pub(crate) fn into_base_iter(self) -> Baseiter
     {
-        unsafe { Baseiter::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { Baseiter::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 }
 
@@ -209,7 +209,7 @@ where D: Dimension
     #[inline]
     pub(crate) fn into_base_iter(self) -> Baseiter
     {
-        unsafe { Baseiter::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { Baseiter::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 }
 
@@ -220,7 +220,7 @@ where D: Dimension
     #[inline]
     pub(crate) fn into_base_iter(self) -> Baseiter
     {
-        unsafe { Baseiter::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { Baseiter::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 
     #[inline]
@@ -276,19 +276,19 @@ where D: Dimension
     // Convert into a read-only view
     pub(crate) fn into_view(self) -> ArrayView<'a, A, D>
     {
-        unsafe { ArrayView::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { ArrayView::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 
     /// Converts to a mutable raw array view.
     pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut
     {
-        unsafe { RawArrayViewMut::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { RawArrayViewMut::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 
     #[inline]
     pub(crate) fn into_base_iter(self) -> Baseiter
     {
-        unsafe { Baseiter::new(self.layout.ptr, self.layout.dim, self.layout.strides) }
+        unsafe { Baseiter::new(self.parts.ptr, self.parts.dim, self.parts.strides) }
     }
 
     #[inline]
@@ -302,7 +302,7 @@ where D: Dimension
     pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self>
     {
         if self.is_standard_layout() {
-            unsafe { Ok(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) }
+            unsafe { Ok(slice::from_raw_parts_mut(self.parts.ptr.as_ptr(), self.len())) }
         } else {
             Err(self)
         }
@@ -313,8 +313,8 @@ where D: Dimension
     fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self>
     {
         if self.is_contiguous() {
-            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides);
-            unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) }
+            let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.parts.dim, &self.parts.strides);
+            unsafe { Ok(slice::from_raw_parts_mut(self.parts.ptr.sub(offset).as_ptr(), self.len())) }
         } else {
             Err(self)
         }
diff --git a/src/impl_views/indexing.rs b/src/impl_views/indexing.rs
index 2879e7416..feadbd296 100644
--- a/src/impl_views/indexing.rs
+++ b/src/impl_views/indexing.rs
@@ -145,7 +145,9 @@ where
     unsafe fn uget(self, index: I) -> &'a A
     {
         debug_bounds_check!(self, index);
-        &*self.as_ptr().offset(index.index_unchecked(&self.strides))
+        &*self
+            .as_ptr()
+            .offset(index.index_unchecked(&self.parts.strides))
     }
 }
 
@@ -213,6 +215,6 @@ where
         debug_bounds_check!(self, index);
         &mut *self
             .as_mut_ptr()
-            .offset(index.index_unchecked(&self.strides))
+            .offset(index.index_unchecked(&self.parts.strides))
     }
 }
diff --git a/src/iterators/chunks.rs b/src/iterators/chunks.rs
index 4dd99f002..178ead7e0 100644
--- a/src/iterators/chunks.rs
+++ b/src/iterators/chunks.rs
@@ -59,10 +59,10 @@ impl<'a, A, D: Dimension> ExactChunks<'a, A, D>
             a.shape()
         );
         for i in 0..a.ndim() {
-            a.layout.dim[i] /= chunk[i];
+            a.parts.dim[i] /= chunk[i];
         }
-        let inner_strides = a.layout.strides.clone();
-        a.layout.strides *= &chunk;
+        let inner_strides = a.parts.strides.clone();
+        a.parts.strides *= &chunk;
 
         ExactChunks {
             base: a,
@@ -158,10 +158,10 @@ impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D>
             a.shape()
         );
         for i in 0..a.ndim() {
-            a.layout.dim[i] /= chunk[i];
+            a.parts.dim[i] /= chunk[i];
         }
-        let inner_strides = a.layout.strides.clone();
-        a.layout.strides *= &chunk;
+        let inner_strides = a.parts.strides.clone();
+        a.parts.strides *= &chunk;
 
         ExactChunksMut {
             base: a,
diff --git a/src/iterators/into_iter.rs b/src/iterators/into_iter.rs
index b51315a0f..cacafd2f2 100644
--- a/src/iterators/into_iter.rs
+++ b/src/iterators/into_iter.rs
@@ -36,12 +36,12 @@ where D: Dimension
     pub(crate) fn new(array: Array) -> Self
     {
         unsafe {
-            let array_head_ptr = array.ptr;
+            let array_head_ptr = array.parts.ptr;
             let mut array_data = array.data;
             let data_len = array_data.release_all_elements();
-            debug_assert!(data_len >= array.layout.dim.size());
-            let has_unreachable_elements = array.layout.dim.size() != data_len;
-            let inner = Baseiter::new(array_head_ptr, array.layout.dim, array.layout.strides);
+            debug_assert!(data_len >= array.parts.dim.size());
+            let has_unreachable_elements = array.parts.dim.size() != data_len;
+            let inner = Baseiter::new(array_head_ptr, array.parts.dim, array.parts.strides);
 
             IntoIter {
                 array_data,
diff --git a/src/iterators/lanes.rs b/src/iterators/lanes.rs
index 0f9678872..9fd39607b 100644
--- a/src/iterators/lanes.rs
+++ b/src/iterators/lanes.rs
@@ -46,8 +46,8 @@ impl<'a, A, D: Dimension> Lanes<'a, A, D>
             v.try_remove_axis(Axis(0))
         } else {
             let i = axis.index();
-            len = v.dim[i];
-            stride = v.strides[i] as isize;
+            len = v.parts.dim[i];
+            stride = v.parts.strides[i] as isize;
             v.try_remove_axis(axis)
         };
         Lanes {
@@ -115,8 +115,8 @@ impl<'a, A, D: Dimension> LanesMut<'a, A, D>
             v.try_remove_axis(Axis(0))
         } else {
             let i = axis.index();
-            len = v.dim[i];
-            stride = v.strides[i] as isize;
+            len = v.parts.dim[i];
+            stride = v.parts.strides[i] as isize;
             v.try_remove_axis(axis)
         };
         LanesMut {
diff --git a/src/iterators/mod.rs b/src/iterators/mod.rs
index 55a9920a8..f7892a8c9 100644
--- a/src/iterators/mod.rs
+++ b/src/iterators/mod.rs
@@ -880,9 +880,9 @@ impl AxisIterCore
             index: 0,
             end: v.len_of(axis),
             stride: v.stride_of(axis),
-            inner_dim: v.dim.remove_axis(axis),
-            inner_strides: v.strides.remove_axis(axis),
-            ptr: v.ptr.as_ptr(),
+            inner_dim: v.parts.dim.remove_axis(axis),
+            inner_strides: v.parts.strides.remove_axis(axis),
+            ptr: v.parts.ptr.as_ptr(),
         }
     }
 
@@ -1366,10 +1366,10 @@ fn chunk_iter_parts(v: ArrayView<'_, A, D>, axis: Axis, size: u
     };
 
     let axis = axis.index();
-    let mut inner_dim = v.dim.clone();
+    let mut inner_dim = v.parts.dim.clone();
     inner_dim[axis] = size;
 
-    let mut partial_chunk_dim = v.layout.dim;
+    let mut partial_chunk_dim = v.parts.dim;
     partial_chunk_dim[axis] = chunk_remainder;
     let partial_chunk_index = n_whole_chunks;
 
@@ -1378,8 +1378,8 @@ fn chunk_iter_parts(v: ArrayView<'_, A, D>, axis: Axis, size: u
         end: iter_len,
         stride,
         inner_dim,
-        inner_strides: v.layout.strides,
-        ptr: v.layout.ptr.as_ptr(),
+        inner_strides: v.parts.strides,
+        ptr: v.parts.ptr.as_ptr(),
     };
 
     (iter, partial_chunk_index, partial_chunk_dim)
diff --git a/src/iterators/windows.rs b/src/iterators/windows.rs
index f3442c0af..e6fccce46 100644
--- a/src/iterators/windows.rs
+++ b/src/iterators/windows.rs
@@ -39,7 +39,7 @@ impl<'a, A, D: Dimension> Windows<'a, A, D>
         let window = window_size.into_dimension();
 
         let strides = axis_strides.into_dimension();
-        let window_strides = a.strides.clone();
+        let window_strides = a.parts.strides.clone();
 
         let base = build_base(a, window.clone(), strides);
         Windows {
@@ -143,7 +143,7 @@ impl<'a, A, D: Dimension> AxisWindows<'a, A, D>
 {
     pub(crate) fn new_with_stride(a: ArrayView<'a, A, D>, axis: Axis, window_size: usize, stride_size: usize) -> Self
     {
-        let window_strides = a.strides.clone();
+        let window_strides = a.parts.strides.clone();
         let axis_idx = axis.index();
 
         let mut window = a.raw_dim();
diff --git a/src/lib.rs b/src/lib.rs
index e11a8a5ff..9f2c53d79 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -131,12 +131,14 @@ extern crate cblas_sys;
 #[cfg(docsrs)]
 pub mod doc;
 
+use alloc::fmt::Debug;
 #[cfg(target_has_atomic = "ptr")]
 use alloc::sync::Arc;
 
 #[cfg(not(target_has_atomic = "ptr"))]
 use portable_atomic_util::Arc;
 
+use core::ptr::NonNull;
 use std::marker::PhantomData;
 
 pub use crate::dimension::dim::*;
@@ -1299,7 +1301,41 @@ where S: RawData
     /// buffer; if borrowed, contains the lifetime and mutability.)
     data: S,
     /// The dimension, strides, and pointer to inside of `data`
-    layout: LayoutRef,
+    parts: ArrayPartsSized,
+}
+
+/// A possibly-unsized container for array parts.
+///
+/// This type only exists to enable holding the array parts in a single
+/// type, which needs to be sized inside of `ArrayBase` and unsized inside
+/// of the reference types.
+#[derive(Debug)]
+struct ArrayParts
+{
+    /// A non-null pointer into the buffer held by `data`; may point anywhere
+    /// in its range. If `S: Data`, this pointer must be aligned.
+    ptr: NonNull,
+    /// The lengths of the axes.
+    dim: D,
+    /// The element count stride per axis. To be parsed as `isize`.
+    strides: D,
+    _dst_control: T,
+}
+
+type ArrayPartsSized = ArrayParts;
+type ArrayPartsUnsized = ArrayParts;
+
+impl ArrayPartsSized
+{
+    const fn new(ptr: NonNull, dim: D, strides: D) -> ArrayPartsSized
+    {
+        Self {
+            ptr,
+            dim,
+            strides,
+            _dst_control: [],
+        }
+    }
 }
 
 /// A reference to the layout of an *n*-dimensional array.
@@ -1401,16 +1437,28 @@ where S: RawData
 // which alter the layout / shape / strides of an array must also
 // alter the offset of the pointer. This is allowed, as it does not
 // cause a pointer deref.
-#[derive(Debug)]
-pub struct LayoutRef
+#[repr(transparent)]
+pub struct LayoutRef(ArrayPartsUnsized);
+
+impl LayoutRef
 {
-    /// A non-null pointer into the buffer held by `data`; may point anywhere
-    /// in its range. If `S: Data`, this pointer must be aligned.
-    ptr: std::ptr::NonNull,
-    /// The lengths of the axes.
-    dim: D,
-    /// The element count stride per axis. To be parsed as `isize`.
-    strides: D,
+    /// Get a reference to the data pointer.
+    fn _ptr(&self) -> &NonNull
+    {
+        &self.0.ptr
+    }
+
+    /// Get a reference to the array's dimension.
+    fn _dim(&self) -> &D
+    {
+        &self.0.dim
+    }
+
+    /// Get a reference to the array's strides.
+    fn _strides(&self) -> &D
+    {
+        &self.0.strides
+    }
 }
 
 /// A reference to an *n*-dimensional array whose data is safe to read and write.
@@ -1757,7 +1805,7 @@ impl ArrayRef
 
         match self.broadcast(dim.clone()) {
             Some(it) => it,
-            None => broadcast_panic(&self.dim, &dim),
+            None => broadcast_panic(self._dim(), &dim),
         }
     }
 
@@ -1769,10 +1817,10 @@ impl ArrayRef
     {
         let dim = dim.into_dimension();
         debug_assert_eq!(self.shape(), dim.slice());
-        let ptr = self.ptr;
+        let ptr = self._ptr();
         let mut strides = dim.clone();
-        strides.slice_mut().copy_from_slice(self.strides.slice());
-        unsafe { ArrayView::new(ptr, dim, strides) }
+        strides.slice_mut().copy_from_slice(self._strides().slice());
+        unsafe { ArrayView::new(*ptr, dim, strides) }
     }
 }
 
@@ -1784,8 +1832,8 @@ where
     /// Remove array axis `axis` and return the result.
     fn try_remove_axis(self, axis: Axis) -> ArrayBase
     {
-        let d = self.layout.dim.try_remove_axis(axis);
-        let s = self.layout.strides.try_remove_axis(axis);
+        let d = self.parts.dim.try_remove_axis(axis);
+        let s = self.parts.strides.try_remove_axis(axis);
         // safe because new dimension, strides allow access to a subset of old data
         unsafe { self.with_strides_dim(s, d) }
     }
diff --git a/src/linalg/impl_linalg.rs b/src/linalg/impl_linalg.rs
index 1e4eb80ee..14c82ff4d 100644
--- a/src/linalg/impl_linalg.rs
+++ b/src/linalg/impl_linalg.rs
@@ -65,7 +65,7 @@ impl ArrayRef
     /// *Note:* If enabled, uses blas `dot` for elements of `f32, f64` when memory
     /// layout allows.
     #[track_caller]
-    pub fn dot(&self, rhs: &Rhs) -> >::Output
+    pub fn dot(&self, rhs: &Rhs) -> >::Output
     where Self: Dot
     {
         Dot::dot(self, rhs)
@@ -110,9 +110,9 @@ impl ArrayRef
                     if blas_compat_1d::<$ty, _>(self) && blas_compat_1d::<$ty, _>(rhs) {
                         unsafe {
                             let (lhs_ptr, n, incx) =
-                                blas_1d_params(self.ptr.as_ptr(), self.len(), self.strides()[0]);
+                                blas_1d_params(self._ptr().as_ptr(), self.len(), self.strides()[0]);
                             let (rhs_ptr, _, incy) =
-                                blas_1d_params(rhs.ptr.as_ptr(), rhs.len(), rhs.strides()[0]);
+                                blas_1d_params(rhs._ptr().as_ptr(), rhs.len(), rhs.strides()[0]);
                             let ret = blas_sys::$func(
                                 n,
                                 lhs_ptr as *const $ty,
@@ -157,7 +157,7 @@ unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const
 ///
 /// For two-dimensional arrays, the dot method computes the matrix
 /// multiplication.
-pub trait Dot
+pub trait Dot
 {
     /// The result of the operation.
     ///
@@ -295,7 +295,7 @@ impl ArrayRef
     /// );
     /// ```
     #[track_caller]
-    pub fn dot(&self, rhs: &Rhs) -> >::Output
+    pub fn dot(&self, rhs: &Rhs) -> >::Output
     where Self: Dot
     {
         Dot::dot(self, rhs)
@@ -471,12 +471,12 @@ where A: LinalgScalar
                                 n as blas_index,                 // n, cols of Op(b)
                                 k as blas_index,                 // k, cols of Op(a)
                                 gemm_scalar_cast!($ty, alpha),   // alpha
-                                a.ptr.as_ptr() as *const _,      // a
+                                a._ptr().as_ptr() as *const _,      // a
                                 lda,                             // lda
-                                b.ptr.as_ptr() as *const _,      // b
+                                b._ptr().as_ptr() as *const _,      // b
                                 ldb,                             // ldb
                                 gemm_scalar_cast!($ty, beta),    // beta
-                                c.ptr.as_ptr() as *mut _,        // c
+                                c._ptr().as_ptr() as *mut _,        // c
                                 ldc,                             // ldc
                             );
                         }
@@ -694,10 +694,10 @@ unsafe fn general_mat_vec_mul_impl(
                             let cblas_layout = layout.to_cblas_layout();
 
                             // Low addr in memory pointers required for x, y
-                            let x_offset = offset_from_low_addr_ptr_to_logical_ptr(&x.dim, &x.strides);
-                            let x_ptr = x.ptr.as_ptr().sub(x_offset);
-                            let y_offset = offset_from_low_addr_ptr_to_logical_ptr(&y.layout.dim, &y.layout.strides);
-                            let y_ptr = y.layout.ptr.as_ptr().sub(y_offset);
+                            let x_offset = offset_from_low_addr_ptr_to_logical_ptr(x._dim(), x._strides());
+                            let x_ptr = x._ptr().as_ptr().sub(x_offset);
+                            let y_offset = offset_from_low_addr_ptr_to_logical_ptr(&y.parts.dim, &y.parts.strides);
+                            let y_ptr = y.parts.ptr.as_ptr().sub(y_offset);
 
                             let x_stride = x.strides()[0] as blas_index;
                             let y_stride = y.strides()[0] as blas_index;
@@ -708,7 +708,7 @@ unsafe fn general_mat_vec_mul_impl(
                                 m as blas_index,            // m, rows of Op(a)
                                 k as blas_index,            // n, cols of Op(a)
                                 cast_as(&alpha),            // alpha
-                                a.ptr.as_ptr() as *const _, // a
+                                a._ptr().as_ptr() as *const _, // a
                                 a_stride,                   // lda
                                 x_ptr as *const _,          // x
                                 x_stride,
@@ -909,9 +909,9 @@ fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: BlasOrder) -> bool
 #[cfg(feature = "blas")]
 fn get_blas_compatible_layout(a: &ArrayRef) -> Option
 {
-    if is_blas_2d(&a.dim, &a.strides, BlasOrder::C) {
+    if is_blas_2d(a._dim(), a._strides(), BlasOrder::C) {
         Some(BlasOrder::C)
-    } else if is_blas_2d(&a.dim, &a.strides, BlasOrder::F) {
+    } else if is_blas_2d(a._dim(), a._strides(), BlasOrder::F) {
         Some(BlasOrder::F)
     } else {
         None
@@ -952,7 +952,7 @@ where
     if !same_type::() {
         return false;
     }
-    is_blas_2d(&a.dim, &a.strides, BlasOrder::C)
+    is_blas_2d(a._dim(), a._strides(), BlasOrder::C)
 }
 
 #[cfg(test)]
@@ -965,7 +965,7 @@ where
     if !same_type::() {
         return false;
     }
-    is_blas_2d(&a.dim, &a.strides, BlasOrder::F)
+    is_blas_2d(a._dim(), a._strides(), BlasOrder::F)
 }
 
 #[cfg(test)]
diff --git a/src/macro_utils.rs b/src/macro_utils.rs
index 75360de37..34c700e65 100644
--- a/src/macro_utils.rs
+++ b/src/macro_utils.rs
@@ -61,7 +61,7 @@ macro_rules! expand_if {
 #[cfg(debug_assertions)]
 macro_rules! debug_bounds_check {
     ($self_:ident, $index:expr) => {
-        if $index.index_checked(&$self_.dim, &$self_.strides).is_none() {
+        if $index.index_checked(&$self_._dim(), &$self_._strides()).is_none() {
             panic!(
                 "ndarray: index {:?} is out of bounds for array of shape {:?}",
                 $index,
@@ -75,3 +75,21 @@ macro_rules! debug_bounds_check {
 macro_rules! debug_bounds_check {
     ($self_:ident, $index:expr) => {};
 }
+
+#[cfg(debug_assertions)]
+macro_rules! debug_bounds_check_ref {
+    ($self_:ident, $index:expr) => {
+        if $index.index_checked(&$self_._dim(), &$self_._strides()).is_none() {
+            panic!(
+                "ndarray: index {:?} is out of bounds for array of shape {:?}",
+                $index,
+                $self_.shape()
+            );
+        }
+    };
+}
+
+#[cfg(not(debug_assertions))]
+macro_rules! debug_bounds_check_ref {
+    ($self_:ident, $index:expr) => {};
+}
diff --git a/src/numeric/impl_numeric.rs b/src/numeric/impl_numeric.rs
index ae82a482a..9709a5254 100644
--- a/src/numeric/impl_numeric.rs
+++ b/src/numeric/impl_numeric.rs
@@ -272,7 +272,7 @@ where D: Dimension
         A: Clone + Zero + Add