@@ -39,14 +39,13 @@ pub(crate) struct TaskVTable {
3939 /// The memory layout of the task. This information enables
4040 /// debuggers to decode raw task memory blobs. Do not remove
4141 /// the field, even if it appears to be unused.
42- #[ allow( unused) ]
4342 pub ( crate ) layout_info : & ' static TaskLayout ,
4443}
4544
4645impl TaskVTable {
4746 /// Returns a pointer to the output inside a task.
4847 pub ( crate ) unsafe fn get_output ( & self , ptr : * const ( ) ) -> * const ( ) {
49- ptr. byte_add ( self . layout_info . offset_r )
48+ ptr. add_byte ( self . layout_info . offset_r )
5049 }
5150}
5251
@@ -209,9 +208,9 @@ where
209208 unsafe {
210209 Self {
211210 header : ptr as * const HeaderWithMetadata < M > ,
212- schedule : ptr. byte_add ( Self :: TASK_LAYOUT . offset_s ) as * const S ,
213- future : ptr. byte_add ( Self :: TASK_LAYOUT . offset_f ) as * mut F ,
214- output : ptr. byte_add ( Self :: TASK_LAYOUT . offset_r ) as * mut Result < T , Panic > ,
211+ schedule : ptr. add_byte ( Self :: TASK_LAYOUT . offset_s ) as * const S ,
212+ future : ptr. add_byte ( Self :: TASK_LAYOUT . offset_f ) as * mut F ,
213+ output : ptr. add_byte ( Self :: TASK_LAYOUT . offset_r ) as * mut Result < T , Panic > ,
215214 }
216215 }
217216 }
@@ -502,7 +501,7 @@ where
502501unsafe fn schedule < S : Schedule < M > , M > ( ptr : * const ( ) , info : ScheduleInfo ) {
503502 let header = ptr as * const Header ;
504503 let task_layout = ( * header) . vtable . layout_info ;
505- let schedule = ptr. byte_add ( task_layout. offset_s ) as * mut S ;
504+ let schedule = ptr. add_byte ( task_layout. offset_s ) as * mut S ;
506505
507506 // If the schedule function has captured variables, create a temporary waker that prevents
508507 // the task from getting deallocated while the function is being invoked.
@@ -533,7 +532,7 @@ unsafe fn drop_waker(ptr: *const ()) {
533532/// Drops the future inside a task.
534533#[ inline]
535534unsafe fn drop_future < F > ( ptr : * const ( ) , task_layout : & TaskLayout ) {
536- let future_ptr = ptr. byte_add ( task_layout. offset_f ) as * mut F ;
535+ let future_ptr = ptr. add_byte ( task_layout. offset_f ) as * mut F ;
537536
538537 // We need a safeguard against panics because the destructor can panic.
539538 abort_on_panic ( || {
@@ -657,7 +656,7 @@ unsafe fn wake_by_ref<S: Schedule<M>, M>(ptr: *const ()) {
657656 abort ( ) ;
658657 }
659658
660- let schedule = ptr. byte_add ( task_layout. offset_s ) as * mut S ;
659+ let schedule = ptr. add_byte ( task_layout. offset_s ) as * mut S ;
661660
662661 // Schedule the task. There is no need to call `Self::schedule(ptr)`
663662 // because the schedule function cannot be destroyed while the waker is
@@ -682,7 +681,7 @@ unsafe fn wake_by_ref<S: Schedule<M>, M>(ptr: *const ()) {
682681unsafe fn destroy < S , M > ( ptr : * const ( ) ) {
683682 let header = ptr as * const Header ;
684683 let task_layout = ( * header) . vtable . layout_info ;
685- let schedule = ptr. byte_add ( task_layout. offset_s ) ;
684+ let schedule = ptr. add_byte ( task_layout. offset_s ) ;
686685
687686 // We need a safeguard against panics because destructors can panic.
688687 abort_on_panic ( || {
@@ -715,3 +714,33 @@ pub(crate) unsafe fn drop_ref(ptr: *const ()) {
715714 ( header. vtable . destroy ) ( ptr) ;
716715 }
717716}
717+
718+ trait PointerPolyfill {
719+ // Polyfill for `byte_add`.
720+ // TODO: Replace this with `byte_add` once the MSRV should be bumped past 1.75
721+ /// Adds an unsigned offset in bytes to a pointer.
722+ ///
723+ /// `count` is in units of bytes.
724+ ///
725+ /// This is purely a convenience for casting to a `u8` pointer and
726+ /// using [add][pointer::add] on it. See that method for documentation
727+ /// and safety requirements.
728+ ///
729+ /// # Safety
730+ /// If any of the following conditions are violated, the result is Undefined Behavior:
731+ ///
732+ /// - The offset in bytes, count * size_of::<T>(), computed on mathematical integers
733+ /// (without “wrapping around”), must fit in an isize.
734+ /// - If the computed offset is non-zero, then self must be derived from a pointer to
735+ /// some allocation, and the entire memory range between self and the result must be
736+ /// in bounds of that allocation. In particular, this range must not “wrap around”
737+ /// the edge of the address space.
738+ unsafe fn add_byte ( self , size : usize ) -> Self ;
739+ }
740+
741+ impl < T > PointerPolyfill for * const T {
742+ #[ inline]
743+ unsafe fn add_byte ( self , size : usize ) -> Self {
744+ ( self . cast :: < u8 > ( ) . add ( size) ) . cast :: < T > ( )
745+ }
746+ }
0 commit comments