@@ -272,12 +272,99 @@ pub struct MemoryRange {
272272 pub length : u64 ,
273273}
274274
275- #[ derive( Clone , Default , Serialize , Deserialize ) ]
275+ /// A set of guest-memory ranges to transfer as one migration payload.
276+ #[ derive( Clone , Default , Debug , Serialize , Deserialize ) ]
276277pub struct MemoryRangeTable {
277278 data : Vec < MemoryRange > ,
278279}
279280
281+ /// Iterator returned by [`MemoryRangeTable::partition`].
282+ ///
283+ /// Each item contains at most `chunk_size` bytes. A range may be split across
284+ /// multiple items.
285+ ///
286+ /// The iterator may reorder ranges for efficiency, so callers must not rely on
287+ /// the order in which chunks or ranges are yielded.
288+ #[ derive( Clone , Default , Debug ) ]
289+ struct MemoryRangeTableIterator {
290+ chunk_size : u64 ,
291+ data : Vec < MemoryRange > ,
292+ }
293+
294+ impl MemoryRangeTableIterator {
295+ /// Create an iterator that partitions `table` into chunks of at most
296+ /// `chunk_size` bytes.
297+ pub fn new ( table : MemoryRangeTable , chunk_size : u64 ) -> Self {
298+ MemoryRangeTableIterator {
299+ chunk_size,
300+ data : table. data ,
301+ }
302+ }
303+ }
304+
305+ impl Iterator for MemoryRangeTableIterator {
306+ type Item = MemoryRangeTable ;
307+
308+ /// Return the next memory range in the table, making sure that
309+ /// the returned range is not larger than `chunk_size`.
310+ ///
311+ /// **Note**: Do not rely on the order of the ranges returned by this
312+ /// iterator. This allows for a more efficient implementation.
313+ fn next ( & mut self ) -> Option < Self :: Item > {
314+ let mut ranges: Vec < MemoryRange > = vec ! [ ] ;
315+ let mut ranges_size: u64 = 0 ;
316+
317+ loop {
318+ assert ! ( ranges_size <= self . chunk_size) ;
319+
320+ if ranges_size == self . chunk_size || self . data . is_empty ( ) {
321+ break ;
322+ }
323+
324+ if let Some ( range) = self . data . pop ( ) {
325+ let next_range: MemoryRange = if ranges_size + range. length > self . chunk_size {
326+ // How many bytes we need to put back into the table.
327+ let leftover_bytes = ranges_size + range. length - self . chunk_size ;
328+ assert ! ( leftover_bytes <= range. length) ;
329+ let returned_bytes = range. length - leftover_bytes;
330+ assert ! ( returned_bytes <= range. length) ;
331+ assert_eq ! ( leftover_bytes + returned_bytes, range. length) ;
332+
333+ self . data . push ( MemoryRange {
334+ gpa : range. gpa ,
335+ length : leftover_bytes,
336+ } ) ;
337+ MemoryRange {
338+ gpa : range. gpa + leftover_bytes,
339+ length : returned_bytes,
340+ }
341+ } else {
342+ range
343+ } ;
344+
345+ ranges_size += next_range. length ;
346+ ranges. push ( next_range) ;
347+ }
348+ }
349+
350+ if ranges. is_empty ( ) {
351+ None
352+ } else {
353+ Some ( MemoryRangeTable { data : ranges } )
354+ }
355+ }
356+ }
357+
280358impl MemoryRangeTable {
359+ pub fn ranges ( & self ) -> & [ MemoryRange ] {
360+ & self . data
361+ }
362+
363+ /// Partitions the table into chunks of at most `chunk_size` bytes.
364+ pub fn partition ( self , chunk_size : u64 ) -> impl Iterator < Item = MemoryRangeTable > {
365+ MemoryRangeTableIterator :: new ( self , chunk_size)
366+ }
367+
281368 /// Converts an iterator over a dirty bitmap into an iterator of dirty
282369 /// [`MemoryRange`]s, merging consecutive dirty pages into contiguous ranges.
283370 ///
@@ -413,4 +500,144 @@ mod unit_tests {
413500 ]
414501 ) ;
415502 }
503+
504+ #[ test]
505+ fn test_memory_range_table_partition ( ) {
506+ // We start the test similar as the one above, but with a input that is simpler to parse for
507+ // developers.
508+ let input = [ 0b11_0011_0011_0011 ] ;
509+
510+ let start_gpa = 0x1000 ;
511+ let page_size = 0x1000 ;
512+
513+ let table = MemoryRangeTable :: from_dirty_bitmap ( input, start_gpa, page_size) ;
514+ let expected_regions = [
515+ MemoryRange {
516+ gpa : start_gpa,
517+ length : page_size * 2 ,
518+ } ,
519+ MemoryRange {
520+ gpa : start_gpa + 4 * page_size,
521+ length : page_size * 2 ,
522+ } ,
523+ MemoryRange {
524+ gpa : start_gpa + 8 * page_size,
525+ length : page_size * 2 ,
526+ } ,
527+ MemoryRange {
528+ gpa : start_gpa + 12 * page_size,
529+ length : page_size * 2 ,
530+ } ,
531+ ] ;
532+ assert_eq ! ( table. regions( ) , & expected_regions) ;
533+
534+ // In the first test, we expect to see the exact same result as above, as we use the length
535+ // of every region (which is fixed!).
536+ {
537+ let chunks = table
538+ . clone ( )
539+ . partition ( page_size * 2 )
540+ . map ( |table| table. data )
541+ . collect :: < Vec < _ > > ( ) ;
542+
543+ // The implementation currently returns the ranges in reverse order.
544+ // For better testability, we reverse it.
545+ let chunks = chunks
546+ . into_iter ( )
547+ . map ( |vec| vec. into_iter ( ) . rev ( ) . collect :: < Vec < _ > > ( ) )
548+ . rev ( )
549+ . collect :: < Vec < _ > > ( ) ;
550+
551+ assert_eq ! (
552+ chunks,
553+ & [
554+ [ expected_regions[ 0 ] . clone( ) ] . to_vec( ) ,
555+ [ expected_regions[ 1 ] . clone( ) ] . to_vec( ) ,
556+ [ expected_regions[ 2 ] . clone( ) ] . to_vec( ) ,
557+ [ expected_regions[ 3 ] . clone( ) ] . to_vec( ) ,
558+ ]
559+ ) ;
560+ }
561+
562+ // Next, we have a more sophisticated test with a chunk size of 5 pages.
563+ {
564+ let chunks = table
565+ . clone ( )
566+ . partition ( page_size * 5 )
567+ . map ( |table| table. data )
568+ . collect :: < Vec < _ > > ( ) ;
569+
570+ // The implementation currently returns the ranges in reverse order.
571+ // For better testability, we reverse it.
572+ let chunks = chunks
573+ . into_iter ( )
574+ . map ( |vec| vec. into_iter ( ) . rev ( ) . collect :: < Vec < _ > > ( ) )
575+ . rev ( )
576+ . collect :: < Vec < _ > > ( ) ;
577+
578+ assert_eq ! (
579+ chunks,
580+ & [
581+ vec![
582+ MemoryRange {
583+ gpa: start_gpa,
584+ length: 2 * page_size
585+ } ,
586+ MemoryRange {
587+ gpa: start_gpa + 4 * page_size,
588+ length: page_size
589+ }
590+ ] ,
591+ vec![
592+ MemoryRange {
593+ gpa: start_gpa + 5 * page_size,
594+ length: page_size
595+ } ,
596+ MemoryRange {
597+ gpa: start_gpa + 8 * page_size,
598+ length: 2 * page_size
599+ } ,
600+ MemoryRange {
601+ gpa: start_gpa + 12 * page_size,
602+ length: 2 * page_size
603+ }
604+ ]
605+ ]
606+ ) ;
607+ }
608+ }
609+
610+ #[ test]
611+ fn test_memory_range_table_partition_uneven_split ( ) {
612+ // Three consecutive dirty pages produce one 3-page range, which lets
613+ // us test an uneven 1+2 page split while using the same helper as the
614+ // other partition tests above.
615+ let input = [ 0b111 ] ;
616+ let start_gpa = 0x1000 ;
617+ let page_size = 0x1000 ;
618+
619+ let table = MemoryRangeTable :: from_dirty_bitmap ( input, start_gpa, page_size) ;
620+
621+ let chunks = table
622+ . partition ( page_size * 2 )
623+ . map ( |table| table. data )
624+ . collect :: < Vec < _ > > ( ) ;
625+
626+ // The implementation currently returns ranges in reverse order.
627+ let chunks = chunks. into_iter ( ) . rev ( ) . collect :: < Vec < _ > > ( ) ;
628+
629+ assert_eq ! (
630+ chunks,
631+ & [
632+ vec![ MemoryRange {
633+ gpa: start_gpa,
634+ length: page_size,
635+ } ] ,
636+ vec![ MemoryRange {
637+ gpa: start_gpa + page_size,
638+ length: page_size * 2 ,
639+ } ] ,
640+ ]
641+ ) ;
642+ }
416643}
0 commit comments