1
+ #![ allow( dead_code) ]
1
2
use std:: pin:: Pin ;
2
3
use std:: sync:: Arc ;
3
4
use std:: task:: { Context , Poll } ;
4
5
6
+ use async_trait:: async_trait;
5
7
use parking_lot:: Mutex ;
6
8
use vortex_buffer:: { Alignment , ByteBuffer } ;
7
9
use vortex_error:: { VortexResult , vortex_bail, vortex_err} ;
@@ -12,73 +14,16 @@ use super::ordered::{OrderedBuffers, Region};
12
14
use crate :: footer:: SegmentSpec ;
13
15
14
16
/// A segment writer that holds buffers in memory until they are flushed by a writer.
15
- #[ derive( Default ) ]
16
- pub ( crate ) struct BufferedSegmentWriter {
17
- /// A Vec byte buffers for segments
18
- segments_buffers : Vec < Vec < ByteBuffer > > ,
19
- next_id : SegmentId ,
20
- }
21
-
22
- impl SegmentWriter for BufferedSegmentWriter {
23
- fn put ( & mut self , data : & [ ByteBuffer ] ) -> SegmentId {
24
- self . segments_buffers . push ( data. to_vec ( ) ) ;
25
- let id = self . next_id ;
26
- self . next_id = SegmentId :: from ( * self . next_id + 1 ) ;
27
- id
28
- }
29
- }
30
-
31
- impl BufferedSegmentWriter {
32
- /// Flush the segments to the provided async writer.
33
- pub async fn flush_async < W : VortexWrite > (
34
- & mut self ,
35
- writer : & mut futures:: io:: Cursor < W > ,
36
- segment_specs : & mut Vec < SegmentSpec > ,
37
- ) -> VortexResult < ( ) > {
38
- for buffers in self . segments_buffers . drain ( ..) {
39
- // The API requires us to write these buffers contiguously. Therefore, we can only
40
- // respect the alignment of the first one.
41
- // Don't worry, in most cases the caller knows what they're doing and will align the
42
- // buffers themselves, inserting padding buffers where necessary.
43
- let alignment = buffers
44
- . first ( )
45
- . map ( |buffer| buffer. alignment ( ) )
46
- . unwrap_or_else ( Alignment :: none) ;
47
-
48
- // Add any padding required to align the segment.
49
- let offset = writer. position ( ) ;
50
- let padding = offset. next_multiple_of ( * alignment as u64 ) - offset;
51
- if padding > 0 {
52
- writer
53
- . write_all ( ByteBuffer :: zeroed ( padding as usize ) )
54
- . await ?;
55
- }
56
- let offset = writer. position ( ) ;
57
-
58
- for buffer in buffers {
59
- writer. write_all ( buffer) . await ?;
60
- }
61
-
62
- segment_specs. push ( SegmentSpec {
63
- offset,
64
- length : u32:: try_from ( writer. position ( ) - offset)
65
- . map_err ( |_| vortex_err ! ( "segment length exceeds maximum u32" ) ) ?,
66
- alignment,
67
- } ) ;
68
- }
69
- Ok ( ( ) )
70
- }
71
- }
72
-
73
17
#[ derive( Default ) ]
74
18
pub struct InOrderSegmentWriter {
75
19
buffers : Arc < Mutex < OrderedBuffers > > ,
76
20
region : Region ,
77
21
region_offset : usize ,
78
22
}
79
23
80
- impl InOrderSegmentWriter {
81
- pub async fn put ( & mut self , data : Vec < ByteBuffer > ) -> VortexResult < SegmentId > {
24
+ #[ async_trait]
25
+ impl SegmentWriter for InOrderSegmentWriter {
26
+ async fn put ( & mut self , data : Vec < ByteBuffer > ) -> VortexResult < SegmentId > {
82
27
let buffer_idx = self . region . start + self . region_offset ;
83
28
if buffer_idx >= self . region . end {
84
29
vortex_bail ! ( "region space exhausted!" ) ;
@@ -87,7 +32,9 @@ impl InOrderSegmentWriter {
87
32
self . region_offset += 1 ;
88
33
self . next_segment_id_once_active ( ) . await
89
34
}
35
+ }
90
36
37
+ impl InOrderSegmentWriter {
91
38
pub fn split ( self , splits : usize ) -> VortexResult < Vec < Self > > {
92
39
Ok ( self
93
40
. buffers
0 commit comments