Skip to content

Commit aa0a728

Browse files
committed
add dirty tracking unit tests
Signed-off-by: Alexandru Agache <[email protected]>
1 parent 7186d16 commit aa0a728

File tree

6 files changed

+524
-2
lines changed

6 files changed

+524
-2
lines changed

src/bitmap/backend/atomic_bitmap.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,9 +144,12 @@ impl NewBitmap for AtomicBitmap {
144144

145145
#[cfg(test)]
146146
mod tests {
147+
use super::*;
148+
149+
use crate::bitmap::tests::test_bitmap;
150+
147151
#[test]
148152
fn bitmap_basic() {
149-
use super::AtomicBitmap;
150153
let b = AtomicBitmap::new(1024, 128);
151154
assert_eq!(b.is_empty(), false);
152155
assert_eq!(b.len(), 8);
@@ -169,12 +172,17 @@ mod tests {
169172

170173
#[test]
171174
fn bitmap_out_of_range() {
172-
use super::AtomicBitmap;
173175
let b = AtomicBitmap::new(1024, 128);
174176
// Set a partial range that goes beyond the end of the bitmap
175177
b.set_addr_range(768, 512);
176178
assert!(b.is_addr_set(768));
177179
// The bitmap is never set beyond its end
178180
assert!(!b.is_addr_set(1152));
179181
}
182+
183+
#[test]
184+
fn test_bitmap_impl() {
185+
let b = AtomicBitmap::new(0x2000, 128);
186+
test_bitmap(&b);
187+
}
180188
}

src/bitmap/backend/ref_slice.rs

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,3 +72,38 @@ impl<'a, B> Debug for RefSlice<'a, B> {
7272
write!(f, "(bitmap slice)")
7373
}
7474
}
75+
76+
#[cfg(test)]
77+
mod tests {
78+
use super::*;
79+
80+
use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap};
81+
use crate::bitmap::AtomicBitmap;
82+
83+
#[test]
84+
fn test_ref_slice() {
85+
let bitmap_size = 0x1_0000;
86+
let dirty_offset = 0x1000;
87+
let dirty_len = 0x100;
88+
89+
{
90+
let bitmap = AtomicBitmap::new(bitmap_size, 1);
91+
let slice1 = bitmap.slice_at(0);
92+
let slice2 = bitmap.slice_at(dirty_offset);
93+
94+
assert!(range_is_clean(&slice1, 0, bitmap_size));
95+
assert!(range_is_clean(&slice2, 0, dirty_len));
96+
97+
bitmap.mark_dirty(dirty_offset, dirty_len);
98+
99+
assert!(range_is_dirty(&slice1, dirty_offset, dirty_len));
100+
assert!(range_is_dirty(&slice2, 0, dirty_len));
101+
}
102+
103+
{
104+
let bitmap = AtomicBitmap::new(bitmap_size, 1);
105+
let slice = bitmap.slice_at(0);
106+
test_bitmap(&slice);
107+
}
108+
}
109+
}

src/bitmap/mod.rs

Lines changed: 318 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,3 +108,321 @@ pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
108108
/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
109109
/// the memory regions of an object `M: GuestMemory`.
110110
pub type MS<'a, M> = BS<'a, <<M as GuestMemory>::R as GuestMemoryRegion>::B>;
111+
112+
#[cfg(test)]
113+
pub(crate) mod tests {
114+
use super::*;
115+
116+
use std::io::Cursor;
117+
use std::marker::PhantomData;
118+
use std::mem::size_of_val;
119+
use std::result::Result;
120+
use std::sync::atomic::{AtomicU64, Ordering};
121+
122+
use crate::{Bytes, VolatileMemory};
123+
#[cfg(feature = "backend-mmap")]
124+
use crate::{GuestAddress, MemoryRegionAddress};
125+
126+
// Helper method to check whether a specified range is clean.
127+
pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
128+
(start..start + len).all(|offset| !b.dirty_at(offset))
129+
}
130+
131+
// Helper method to check whether a specified range is dirty.
132+
pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
133+
(start..start + len).all(|offset| b.dirty_at(offset))
134+
}
135+
136+
pub fn check_range<B: Bitmap>(b: &B, start: usize, len: usize, clean: bool) -> bool {
137+
if clean {
138+
range_is_clean(b, start, len)
139+
} else {
140+
range_is_dirty(b, start, len)
141+
}
142+
}
143+
144+
// Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
145+
// an area of length at least 0x2000.
146+
pub fn test_bitmap<B: Bitmap>(b: &B) {
147+
let len = 0x2000;
148+
let dirty_offset = 0x1000;
149+
let dirty_len = 0x100;
150+
151+
// Some basic checks.
152+
let s = b.slice_at(dirty_offset);
153+
154+
assert!(range_is_clean(b, 0, len));
155+
assert!(range_is_clean(&s, 0, dirty_len));
156+
157+
b.mark_dirty(dirty_offset, dirty_len);
158+
assert!(range_is_dirty(b, dirty_offset, dirty_len));
159+
assert!(range_is_dirty(&s, 0, dirty_len));
160+
}
161+
162+
#[derive(Debug)]
163+
pub enum TestAccessError {
164+
RangeCleanCheck,
165+
RangeDirtyCheck,
166+
}
167+
168+
// A helper object that implements auxiliary operations for testing `Bytes` implementations
169+
// in the context of dirty bitmap tracking.
170+
struct BytesHelper<F, G, M> {
171+
check_range_fn: F,
172+
address_fn: G,
173+
phantom: PhantomData<*const M>,
174+
}
175+
176+
// `F` represents a closure the checks whether a specified range associated with the `Bytes`
177+
// object that's being tested is marked as dirty or not (depending on the value of the last
178+
// parameter). It has the following parameters:
179+
// - A reference to a `Bytes` implementations that's subject to testing.
180+
// - The offset of the range.
181+
// - The length of the range.
182+
// - Whether we are checking if the range is clean (when `true`) or marked as dirty.
183+
//
184+
// `G` represents a closure that translates an offset into an address value that's
185+
// relevant for the `Bytes` implementation being tested.
186+
impl<F, G, M, A> BytesHelper<F, G, M>
187+
where
188+
F: Fn(&M, usize, usize, bool) -> bool,
189+
G: Fn(usize) -> A,
190+
M: Bytes<A>,
191+
{
192+
fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool {
193+
(self.check_range_fn)(m, start, len, clean)
194+
}
195+
196+
fn address(&self, offset: usize) -> A {
197+
(self.address_fn)(offset)
198+
}
199+
200+
fn test_access<Op>(
201+
&self,
202+
bytes: &M,
203+
dirty_offset: usize,
204+
dirty_len: usize,
205+
op: Op,
206+
) -> Result<(), TestAccessError>
207+
where
208+
Op: Fn(&M, A),
209+
{
210+
if !self.check_range(bytes, dirty_offset, dirty_len, true) {
211+
return Err(TestAccessError::RangeCleanCheck);
212+
}
213+
214+
op(bytes, self.address(dirty_offset));
215+
216+
if !self.check_range(bytes, dirty_offset, dirty_len, false) {
217+
return Err(TestAccessError::RangeDirtyCheck);
218+
}
219+
220+
Ok(())
221+
}
222+
}
223+
224+
// `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
225+
// The `step` parameter represents the offset that's added the the current address after
226+
// performing each access. It provides finer grained control when testing tracking
227+
// implementations that aggregate entire ranges for accounting purposes (for example, doing
228+
// tracking at the page level).
229+
pub fn test_bytes<F, G, M, A>(bytes: &M, check_range_fn: F, address_fn: G, step: usize)
230+
where
231+
F: Fn(&M, usize, usize, bool) -> bool,
232+
G: Fn(usize) -> A,
233+
A: Copy,
234+
M: Bytes<A>,
235+
<M as Bytes<A>>::E: Debug,
236+
{
237+
const BUF_SIZE: usize = 1024;
238+
let buf = vec![1u8; 1024];
239+
240+
let val = 1u64;
241+
242+
let h = BytesHelper {
243+
check_range_fn,
244+
address_fn,
245+
phantom: PhantomData,
246+
};
247+
248+
let mut dirty_offset = 0x1000;
249+
250+
// Test `write`.
251+
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
252+
assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE)
253+
})
254+
.unwrap();
255+
dirty_offset += step;
256+
257+
// Test `write_slice`.
258+
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
259+
m.write_slice(buf.as_slice(), addr).unwrap()
260+
})
261+
.unwrap();
262+
dirty_offset += step;
263+
264+
// Test `write_obj`.
265+
h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
266+
m.write_obj(val, addr).unwrap()
267+
})
268+
.unwrap();
269+
dirty_offset += step;
270+
271+
// Test `read_from`.
272+
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
273+
assert_eq!(
274+
m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
275+
BUF_SIZE
276+
)
277+
})
278+
.unwrap();
279+
dirty_offset += step;
280+
281+
// Test `read_exact_from`.
282+
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
283+
m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
284+
.unwrap()
285+
})
286+
.unwrap();
287+
dirty_offset += step;
288+
289+
// Test `store`.
290+
h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
291+
m.store(val, addr, Ordering::Relaxed).unwrap()
292+
})
293+
.unwrap();
294+
}
295+
296+
// This function and the next are currently conditionally compiled because we only use
297+
// them to test the mmap-based backend implementations for now. Going forward, the generic
298+
// test functions defined here can be placed in a separate module (i.e. `test_utilities`)
299+
// which is gated by a feature and can be used for testing purposes by other crates as well.
300+
#[cfg(feature = "backend-mmap")]
301+
fn test_guest_memory_region<R: GuestMemoryRegion>(region: &R) {
302+
let dirty_addr = MemoryRegionAddress(0x0);
303+
let val = 123u64;
304+
let dirty_len = size_of_val(&val);
305+
306+
let slice = region.get_slice(dirty_addr, dirty_len).unwrap();
307+
308+
assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
309+
assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
310+
311+
region.write_obj(val, dirty_addr).unwrap();
312+
313+
assert!(range_is_dirty(
314+
region.bitmap(),
315+
dirty_addr.0 as usize,
316+
dirty_len
317+
));
318+
319+
assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
320+
321+
// Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same
322+
// `region` handle because `test_bytes` starts performing writes after the range that's
323+
// been already dirtied in the first part of this test.
324+
test_bytes(
325+
region,
326+
|r: &R, start: usize, len: usize, clean: bool| {
327+
check_range(r.bitmap(), start, len, clean)
328+
},
329+
|offset| MemoryRegionAddress(offset as u64),
330+
0x1000,
331+
);
332+
}
333+
334+
#[cfg(feature = "backend-mmap")]
335+
// Assumptions about M generated by f ...
336+
pub fn test_guest_memory_and_region<M, F>(f: F)
337+
where
338+
M: GuestMemory,
339+
F: Fn() -> M,
340+
{
341+
let m = f();
342+
let dirty_addr = GuestAddress(0x1000);
343+
let val = 123u64;
344+
let dirty_len = size_of_val(&val);
345+
346+
let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
347+
let slice = m.get_slice(dirty_addr, dirty_len).unwrap();
348+
349+
assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
350+
assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
351+
352+
m.write_obj(val, dirty_addr).unwrap();
353+
354+
assert!(range_is_dirty(
355+
region.bitmap(),
356+
region_addr.0 as usize,
357+
dirty_len
358+
));
359+
360+
assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
361+
362+
// Now let's invoke the tests for the inner `GuestMemoryRegion` type.
363+
test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap());
364+
365+
// Finally, let's invoke the generic tests for `Bytes`.
366+
let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool {
367+
let mut check_result = true;
368+
m.try_access(len, GuestAddress(start as u64), |_, size, reg_addr, reg| {
369+
if !check_range(reg.bitmap(), reg_addr.0 as usize, size, clean) {
370+
check_result = false;
371+
}
372+
Ok(size)
373+
})
374+
.unwrap();
375+
376+
check_result
377+
};
378+
379+
test_bytes(
380+
&f(),
381+
check_range_closure,
382+
|offset| GuestAddress(offset as u64),
383+
0x1000,
384+
);
385+
}
386+
387+
pub fn test_volatile_memory<M: VolatileMemory>(m: &M) {
388+
assert!(m.len() >= 0x8000);
389+
390+
let dirty_offset = 0x1000;
391+
let val = 123u64;
392+
let dirty_len = size_of_val(&val);
393+
394+
let get_ref_offset = 0x2000;
395+
let array_ref_offset = 0x3000;
396+
let align_as_mut_offset = 0x4000;
397+
let atomic_ref_offset = 0x5000;
398+
399+
let s1 = m.as_volatile_slice();
400+
let s2 = m.get_slice(dirty_offset, dirty_len).unwrap();
401+
402+
assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
403+
assert!(range_is_clean(s2.bitmap(), 0, s2.len()));
404+
405+
s1.write_obj(val, dirty_offset).unwrap();
406+
407+
assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len));
408+
assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
409+
410+
let v_ref = m.get_ref::<u64>(get_ref_offset).unwrap();
411+
assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len));
412+
v_ref.store(val);
413+
assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len));
414+
415+
let arr_ref = m.get_array_ref::<u64>(array_ref_offset, 1).unwrap();
416+
assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len));
417+
arr_ref.store(0, val);
418+
assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len));
419+
420+
assert!(range_is_clean(s1.bitmap(), align_as_mut_offset, dirty_len));
421+
unsafe { m.aligned_as_mut::<u64>(align_as_mut_offset) }.unwrap();
422+
assert!(range_is_dirty(s1.bitmap(), align_as_mut_offset, dirty_len));
423+
424+
assert!(range_is_clean(s1.bitmap(), atomic_ref_offset, dirty_len));
425+
m.get_atomic_ref::<AtomicU64>(atomic_ref_offset).unwrap();
426+
assert!(range_is_dirty(s1.bitmap(), atomic_ref_offset, dirty_len));
427+
}
428+
}

0 commit comments

Comments
 (0)