Skip to content

Commit

Permalink
move to a builder pattern
Browse files Browse the repository at this point in the history
  • Loading branch information
Freax13 committed Feb 14, 2023
1 parent 0d98de9 commit 8f3a412
Showing 1 changed file with 253 additions and 20 deletions.
273 changes: 253 additions & 20 deletions src/instructions/tlb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
use bit_field::BitField;

use crate::{
structures::paging::{page::NotGiantPageSize, Page, PageSize, Size2MiB},
VirtAddr,
instructions::segmentation::{Segment, CS},
structures::paging::{
page::{NotGiantPageSize, PageRange},
Page, PageSize, Size2MiB, Size4KiB,
},
PrivilegeLevel, VirtAddr,
};
use core::arch::asm;
use core::{arch::asm, cmp, convert::TryFrom};

/// Invalidate the given address in the TLB using the `invlpg` instruction.
#[inline]
Expand Down Expand Up @@ -103,14 +107,255 @@ pub unsafe fn flush_pcid(command: InvPicdCommand) {
}
}

/// Invalidates TLB entry(s) with Broadcast.
/// Used to broadcast flushes to all logical processors.
///
/// # Safety
/// ```no_run
/// use x86_64::VirtAddr;
/// use x86_64::structures::paging::Page;
/// use x86_64::instructions::tlb::Invlpgb;
///
/// This function is unsafe as it requires CPUID.(EAX=8000_0008H, ECX=0H):EBX.INVLPGB
/// to be 1 and count to be less than or equal to CPUID.(EAX=8000_0008H, ECX=0H):EDX[0..=15].
/// // Check that `invlpgb` and `tlbsync` are supported.
/// let invlpgb = Invlpgb::new().unwrap();
///
/// // Broadcast flushing some pages to all logical processors.
/// let start: Page = Page::from_start_address(VirtAddr::new(0xf000_0000)).unwrap();
/// let pages = Page::range(start, start + 3);
/// invlpgb.build().pages(pages).include_global().flush();
///
/// // Wait for all logical processors to respond.
/// invlpgb.tlbsync();
/// ```
#[derive(Debug, Clone, Copy)]
pub struct Invlpgb {
invlpgb_count_max: u16,
tlb_flush_nested: bool,
nasid: u32,
}

impl Invlpgb {
/// Check that `invlpgb` and `tlbsync` are supported and query limits.
///
/// # Panics
///
/// Panics if the CPL is not 0.
pub fn new() -> Option<Self> {
let cs = CS::get_reg();
assert_eq!(cs.rpl(), PrivilegeLevel::Ring0);

// Check if the `INVLPGB` and `TLBSYNC` instruction are supported.
let cpuid = unsafe { core::arch::x86_64::__cpuid(0x8000_0008) };
if !cpuid.ebx.get_bit(3) {
return None;
}

let tlb_flush_nested = cpuid.ebx.get_bit(21);
let invlpgb_count_max = cpuid.edx.get_bits(0..=15) as u16;

// Figure out the number of supported ASIDs.
let cpuid = unsafe { core::arch::x86_64::__cpuid(0x8000_000a) };
let nasid = cpuid.ebx;

Some(Self {
tlb_flush_nested,
invlpgb_count_max,
nasid,
})
}

/// Returns the maximum count of pages to be flushed supported by the processor.
#[inline]
pub fn invlpgb_count_max(&self) -> u16 {
self.invlpgb_count_max
}

/// Returns whether the processor supports flushing translations used for guest translation.
#[inline]
pub fn tlb_flush_nested(&self) -> bool {
self.tlb_flush_nested
}

/// Returns the number of available address space identifiers.
#[inline]
pub fn nasid(&self) -> u32 {
self.nasid
}

/// Create a `InvlpgbFlushBuilder`.
pub fn build(&self) -> InvlpgbFlushBuilder<'_> {
InvlpgbFlushBuilder {
invlpgb: self,
page_range: None,
pcid: None,
asid: None,
include_global: false,
final_translation_only: false,
include_nested_translations: false,
}
}

/// Wait for all previous `invlpgb` instruction executed on the current
/// logical processor to be acknowledged by all other logical processors.
#[inline]
pub fn tlbsync(&self) {
unsafe {
asm!("tlbsync", options(nomem, preserves_flags));
}
}
}

/// A builder struct to construct the parameters for the `invlpgb` instruction.
#[derive(Debug, Clone)]
#[must_use]
pub struct InvlpgbFlushBuilder<'a, S = Size4KiB>
where
S: NotGiantPageSize,
{
invlpgb: &'a Invlpgb,
page_range: Option<PageRange<S>>,
pcid: Option<Pcid>,
asid: Option<u16>,
include_global: bool,
final_translation_only: bool,
include_nested_translations: bool,
}

impl<'a, S> InvlpgbFlushBuilder<'a, S>
where
S: NotGiantPageSize,
{
/// Flush a range of pages.
///
/// If the range doesn't fit within `invlpgb_count_max`, `invlpgb` is
/// executed multiple times.
pub fn pages<T>(self, page_range: PageRange<T>) -> InvlpgbFlushBuilder<'a, T>
where
T: NotGiantPageSize,
{
InvlpgbFlushBuilder {
invlpgb: self.invlpgb,
page_range: Some(page_range),
pcid: self.pcid,
asid: self.asid,
include_global: self.include_global,
final_translation_only: self.final_translation_only,
include_nested_translations: self.include_nested_translations,
}
}

/// Only flush TLB entries with the given PCID.
///
/// # Safety
///
/// The caller has to ensure that PCID is enabled in CR4 when the flush is executed.
pub unsafe fn pcid(mut self, pcid: Pcid) -> Self {
self.pcid = Some(pcid);
self
}

/// Only flush TLB entries with the given ASID.
///
/// # Safety
///
/// The caller has to ensure that SVM is enabled in EFER when the flush is executed.
// FIXME: Make ASID a type and remove error type.
pub unsafe fn asid(mut self, asid: u16) -> Result<Self, AsidOutOfRangeError> {
if u32::from(asid) > self.invlpgb.nasid {
return Err(AsidOutOfRangeError {
asid,
nasid: self.invlpgb.nasid,
});
}

self.asid = Some(asid);
Ok(self)
}

/// Also flush global pages.
pub fn include_global(mut self) -> Self {
self.include_global = true;
self
}

/// Only flush the final translation and not the cached upper level TLB entries.
pub fn final_translation_only(mut self) -> Self {
self.final_translation_only = true;
self
}

/// Also flush nestred translations that could be used for guest translation.
pub fn include_nested_translations(mut self) -> Self {
assert!(
self.invlpgb.tlb_flush_nested,
"flushing all nested translations is not supported"
);

self.include_nested_translations = true;
self
}

/// Execute the flush.
pub fn flush(self) {
if let Some(mut pages) = self.page_range {
while !pages.is_empty() {
// Calculate out how many pages we still need to flush.
let count = Page::<S>::steps_between(&pages.start, &pages.end).unwrap();

// Make sure that we never jump the gap in the address space when flushing.
let second_half_start =
Page::<S>::containing_address(VirtAddr::new(0xffff_8000_0000_0000));
let count = if pages.start < second_half_start {
let count_to_second_half =
Page::steps_between(&pages.start, &second_half_start).unwrap();
cmp::min(count, count_to_second_half)
} else {
count
};

// We can flush at most u16::MAX pages at once.
let count = u16::try_from(count).unwrap_or(u16::MAX);

// Cap the count by the maximum supported count of the processor.
let count = cmp::min(count, self.invlpgb.invlpgb_count_max);

unsafe {
flush_broadcast(
Some((pages.start, count)),
self.pcid,
self.asid,
self.include_global,
self.final_translation_only,
self.include_nested_translations,
);
}

// Even if the count is zero, one page is still flushed and so
// we need to advance by at least one.
let inc_count = cmp::max(count, 1);
pages.start = Page::forward_checked(pages.start, usize::from(inc_count)).unwrap();
}
} else {
unsafe {
flush_broadcast::<S>(
None,
self.pcid,
self.asid,
self.include_global,
self.final_translation_only,
self.include_nested_translations,
);
}
}
}
}

#[derive(Debug)]
pub struct AsidOutOfRangeError {
asid: u16,
nasid: u32,
}

#[inline]
pub unsafe fn flush_broadcast<S>(
unsafe fn flush_broadcast<S>(
va_and_count: Option<(Page<S>, u16)>,
pcid: Option<Pcid>,
asid: Option<u16>,
Expand Down Expand Up @@ -156,15 +401,3 @@ pub unsafe fn flush_broadcast<S>(
);
}
}

/// Synchronize broadcasted TLB Invalidations.
///
/// # Safety
///
/// This function is unsafe as it requires CPUID.(EAX=8000_0008H, ECX=0H):EBX.INVLPGB to be 1.
#[inline]
pub unsafe fn tlbsync() {
unsafe {
asm!("tlbsync", options(nomem, preserves_flags));
}
}

0 comments on commit 8f3a412

Please sign in to comment.