diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 3f9791c84..487716510 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -61,11 +61,9 @@ body: description: | examples: - **OS**: Ubuntu 20.04 or Windows 11... - - **Tool Chain**: GCC5 or VS2022 or CLANGPDB... - **Targets Impacted**: RELEASE, DEBUG, NO-TARGET, NOOPT... value: | - OS(s): - - Tool Chain(s): - Targets Impacted: render: markdown validations: diff --git a/.github/workflows/patina-qemu-pr-validation-pending.yml b/.github/workflows/patina-qemu-pr-validation-pending.yml index db121dcf9..a3b5433ed 100644 --- a/.github/workflows/patina-qemu-pr-validation-pending.yml +++ b/.github/workflows/patina-qemu-pr-validation-pending.yml @@ -34,5 +34,6 @@ jobs: name: Run uses: OpenDevicePartnership/patina-devops/.github/workflows/PatinaQemuPrValidationPending.yml@patina_e2e_plat_validation with: + head-sha: ${{ github.event.pull_request.head.sha }} pr-number: ${{ github.event.pull_request.number }} secrets: inherit diff --git a/.github/workflows/patina-qemu-pr-validation-post.yml b/.github/workflows/patina-qemu-pr-validation-post.yml index da5ad0417..10426c9fe 100644 --- a/.github/workflows/patina-qemu-pr-validation-post.yml +++ b/.github/workflows/patina-qemu-pr-validation-post.yml @@ -20,8 +20,9 @@ permissions: jobs: post-process: - if: github.event.workflow_run.conclusion != 'cancelled' uses: OpenDevicePartnership/patina-devops/.github/workflows/PatinaQemuPrValidationPost.yml@patina_e2e_plat_validation with: + conclusion: ${{ github.event.workflow_run.conclusion }} + head-sha: ${{ github.event.workflow_run.head_sha }} triggering-run-id: ${{ fromJSON(github.event.workflow_run.id) }} secrets: inherit diff --git a/.github/workflows/patina-qemu-pr-validation.yml b/.github/workflows/patina-qemu-pr-validation.yml index a6d48669f..85e1cb395 100644 --- a/.github/workflows/patina-qemu-pr-validation.yml +++ b/.github/workflows/patina-qemu-pr-validation.yml @@ -121,6 +121,7 @@ jobs: if: needs.prepare.result == 'success' && needs.prepare.outputs.skip != 'true' uses: OpenDevicePartnership/patina-devops/.github/workflows/PatinaQemuPrValidation.yml@patina_e2e_plat_validation with: - pr-number: ${{ fromJSON(needs.prepare.outputs.pr_number) }} + head-sha: ${{ github.event.workflow_run.head_sha }} patina-ref: ${{ github.event.workflow_run.head_sha }} + pr-number: ${{ fromJSON(needs.prepare.outputs.pr_number) }} secrets: inherit diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e76fc622d..540609ce1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,6 +20,30 @@ indicate that clearly in your pull request so that the project team can discuss * Format the code with `cargo make all`. * Use meaningful commit messages. See [this blogpost](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) +## AI Policy + +Patina does not accept contributions directly from AI tools (e.g. GitHub Copilot). If you use an AI tool to assist in +the development of a Patina contribution, ensure that: + +1. You have the legal right to submit the code generated by an AI tool under Patina's contribution and licensing + guidelines. +2. You fully understand the changes being made to the codebase and can explain the changes to other contributors. +3. You have thoroughly reviewed the code to ensure it meets Patina contribution guidelines. +4. You have thoroughly tested the code. Firmware changes must be tested on QEMU and a physical platform. + +If you do not fully understand the changes being made to the codebase, please ask for help from the Patina +community before submitting a pull request. You can start a GitHub discussion to get more background on a topic or +submit a GitHub issue to report a bug or request. + +At a minimum, contributors submitting firmware changes should have a working understanding of the Rust programming +language and UEFI firmware development. If you are new to either topic, the Patina community is happy to help you +and there are many existing resources available for both subjects on the Internet. + +Patina maintainers reserve the right to reject or close a pull request at any time if it is determined to violate this +policy, including cases where AI‑assisted contributions lack sufficient human understanding, review, testing, or legal +clarity. If a user repeatedly violates this policy, they may be temporarily or permanently banned from contributing to +the Patina project. + ## PR Etiquette * Make sure that GitHub status checks ("PR gates") pass in your PR. diff --git a/Cargo.toml b/Cargo.toml index 5b64aa3d6..9bdb347c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,9 @@ [workspace] resolver = "3" -members = ["components/*", "core/*", "sdk/*", "patina_dxe_core"] +members = ["components/*", "core/*", "sdk/*", "patina_dxe_core", "patina_mm_supervisor_core", "patina_mm_user_core"] [workspace.package] -version = "20.1.1" +version = "21.0.0" license = "Apache-2.0" edition = "2024" rust-version = "1.89" @@ -31,21 +31,26 @@ log = { version = "0.4", default-features = false } memoffset = {version = "0.9.1" } mu_rust_helpers = { version = "3.0.2" } num-traits = { version = "0.2", default-features = false } -patina = { version = "20.1.1", path = "sdk/patina" } -patina_debugger = { version = "20.1.1", path = "core/patina_debugger" } -patina_ffs = { version = "20.1.1", path = "sdk/patina_ffs" } -patina_ffs_extractors = { version = "20.1.1", path = "sdk/patina_ffs_extractors" } -patina_internal_collections = { version = "20.1.1", path = "core/patina_internal_collections", default-features = false } -patina_internal_cpu = { version = "20.1.1", path = "core/patina_internal_cpu" } -patina_internal_depex = { version = "20.1.1", path = "core/patina_internal_depex" } +patina = { version = "21.0.0", path = "sdk/patina" } +patina_debugger = { version = "21.0.0", path = "core/patina_debugger" } +patina_ffs = { version = "21.0.0", path = "sdk/patina_ffs" } +patina_ffs_extractors = { version = "21.0.0", path = "sdk/patina_ffs_extractors" } +patina_internal_collections = { version = "21.0.0", path = "core/patina_internal_collections", default-features = false } +patina_internal_cpu = { version = "21.0.0", path = "core/patina_internal_cpu" } +patina_internal_depex = { version = "21.0.0", path = "core/patina_internal_depex" } +patina_internal_mm_common = { version = "21.0.0", path = "core/patina_internal_mm_common" } patina_lzma_rs = { version = "0.3.1", default-features = false } -patina_macro = { version = "20.1.1", path = "sdk/patina_macro" } -patina_mm = { version = "20.1.1", path = "components/patina_mm" } +patina_macro = { version = "21.0.0", path = "sdk/patina_macro" } +patina_mm = { version = "21.0.0", path = "components/patina_mm" } +patina_mm_policy = { version = "21.0.0", path = "components/patina_mm_policy" } +patina_mm_supervisor_core = { version = "21.0.0", path = "patina_mm_supervisor_core" } patina_mtrr = { version = "^1.1.4" } -patina_paging = { version = "11" } -patina_performance = { version = "20.1.1", path = "components/patina_performance" } -patina_smbios = { version = "20.1.1", path = "components/patina_smbios" } -patina_stacktrace = { version = "20.1.1", path = "core/patina_stacktrace" } +patina_paging = { version = "11.0.2" } +patina_performance = { version = "21.0.0", path = "components/patina_performance" } +patina_smbios = { version = "21.0.0", path = "components/patina_smbios" } +patina_stacktrace = { version = "21.0.0", path = "core/patina_stacktrace" } +patina_test = { version = "21.0.0", path = "components/patina_test" } +patina_adv_logger = { version = "21.0.0", path = "components/patina_adv_logger" } proc-macro2 = { version = "1" } quote = { version = "1" } r-efi = { version = "5.0.0", default-features = false } @@ -75,3 +80,6 @@ lzma-rs = { version = "0.3" } [workspace.lints.clippy] undocumented_unsafe_blocks = "warn" + +[patch.crates-io] +patina_paging = { path = 'D:\Repos\patina-paging' } diff --git a/Makefile.toml b/Makefile.toml index 10095ff83..d7b7d3cf0 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -125,6 +125,23 @@ private = true command = "cargo" args = ["test", "--no-run", "--all-targets", "--all-features", "@@split(CARGO_MAKE_TASK_ARGS, )"] +[tasks.check-no-default-features-code] +description = "Checks rust code compiles without default features." +private = true +command = "cargo" +args = ["check", "--no-default-features", "@@split(CARGO_MAKE_TASK_ARGS, )"] + +[tasks.check-no-default-features-tests] +description = "Checks rust test code compiles without default features." +private = true +command = "cargo" +args = ["test", "--no-run", "--no-default-features", "@@split(CARGO_MAKE_TASK_ARGS, )"] + +[tasks.check-no-default-features] +description = "Checks rust code and tests compile without default features to catch feature-gate regressions." +clear = true +run_task = [{ name = ["check-no-default-features-code", "check-no-default-features-tests"], parallel = true }] + [tasks.check] description = "Checks rust code for errors. Example `cargo make check`" clear = true @@ -134,7 +151,7 @@ run_task = [{ name = ["check_code", "check_tests"], parallel = true }] description = "Builds crates with Patina tests enabled. Example `cargo make patina-test`" clear = true command = "cargo" -args = ["build", "@@split(INDIVIDUAL_PACKAGE_TARGETS, )", "@@split(STD_FLAGS, )", "--features", "enable_patina_tests"] +args = ["build", "@@split(INDIVIDUAL_PACKAGE_TARGETS, )", "@@split(STD_FLAGS, )", "--features", "test-runner"] dependencies = ["individual-package-targets"] [tasks.generate-lockfile] @@ -151,6 +168,12 @@ command = "cargo" args = ["llvm-cov", "clean", "--workspace"] [tasks.test] +description = "Runs tests with native cargo test behavior. Example `cargo make test` or `cargo make test -p package_name -- --nocapture`" +clear = true +command = "cargo" +args = ["test", "@@split(CARGO_MAKE_TASK_ARGS,;)"] + +[tasks.test-cov] description = "Run tests and collect coverage data without generating reports." install_crate = false clear = true @@ -226,7 +249,7 @@ args = ["llvm-cov", "report", "--html", "--output-dir", "${CARGO_MAKE_WORKSPACE_ [tasks.coverage] description = "Build and run all tests and calculate coverage (runs test once and generates LCOV and HTML reports)." -dependencies = ["test", "coverage-lcov", "coverage-html"] +dependencies = ["test-cov", "coverage-lcov", "coverage-html"] clear = true [tasks.build-aarch64] @@ -311,6 +334,7 @@ dependencies = [ "deny", "cspell", "clippy", + "check-no-default-features", "build", "build-x64", "build-aarch64", diff --git a/README.md b/README.md index 31b749a6f..59e584ea0 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,21 @@ incremental migration of today's firmware components largely written in C to Rus objective for this effort is to improve the security and stability of system firmware by leveraging the memory safety offered by Rust while retaining similar boot performance. +**Patina is not a simple port of C UEFI code to Rust**. + +Patina is a pure‑Rust UEFI firmware implementation that removes legacy complexity and introduces a modern architecture, +while preserving compatibility with current PI Specifications and enabling a clear path toward writing more firmware +components in pure Rust over time. + +**Simply writing individual C UEFI drivers in Rust is not equivalent to Patina**. + +To better understand the types of memory safety problems that +Patina helps mitigate, see [Memory Safety Strategy](https://opendevicepartnership.github.io/patina/background/memory_safety_strategy.html). + +Otherwise, read the docs to learn about concepts like [Patina DXE Core Requirements](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html) +and the [Patina Component Model](https://opendevicepartnership.github.io/patina/component/getting_started.html) to +better understand how Patina is structured and how to integrate it into a platform. + ## Docs * **[Getting Started](https://opendevicepartnership.github.io/patina/):** Patina's official getting started guide, @@ -52,14 +67,23 @@ write a Patina component. ## Important Notes -This repository is still considered to be in a "beta" stage at this time. Platform testing and integration feedback -is very welcome. +Content in the main branch of the patina repository is expected to be functionally stable with the following exception: -Before making pull requests at a minimum, run: +* Patina has optional pieces of functionality called "Patina components". While components are expected to adhere to +the same standards of readiness as the rest of the repository, when evaluating a new component, consumers should +verify that the component does not have special disclaimers or limitations noted in its documentation. -```shell -cargo make all -``` +Also, be aware that Patina has other branches that may host work that is not yet ready for the main branch. To learn +more about these branches and the overall Patina release process, read the +[Patina Release Process](https://github.com/OpenDevicePartnership/patina/blob/main/docs/src/rfc/text/0015-patina-release-process.md) +RFC. + +Platform testing and integration feedback is very welcome. + +### AI Policy + +Patina does not accept contributions directly from AI tools (e.g. GitHub Copilot) and has an AI Policy defined in +[CONTRIBUTING.md](CONTRIBUTING.md#ai-policy) that must be followed for any contributions that are AI-assisted. ## Performing a Release diff --git a/components/patina_acpi/Cargo.toml b/components/patina_acpi/Cargo.toml index 1e970e717..897a14b14 100644 --- a/components/patina_acpi/Cargo.toml +++ b/components/patina_acpi/Cargo.toml @@ -20,6 +20,7 @@ zerocopy = { workspace = true } zerocopy-derive = { workspace = true } patina = { workspace = true, features = ["alloc"] } +patina_test = { workspace = true } [dev-dependencies] mockall = { workspace = true } diff --git a/components/patina_acpi/src/acpi_protocol.rs b/components/patina_acpi/src/acpi_protocol.rs index 5b0a22fdb..8aaed1174 100644 --- a/components/patina_acpi/src/acpi_protocol.rs +++ b/components/patina_acpi/src/acpi_protocol.rs @@ -31,8 +31,7 @@ pub struct AcpiTableProtocol { // SAFETY: `AcpiTableProtocol` matches the C layout and behavior of the EFI_ACPI_TABLE_PROTOCOL. unsafe impl ProtocolInterface for AcpiTableProtocol { - const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xffe06bdd, 0x6107, 0x46a6, 0x7b, 0xb2, &[0x5a, 0x9c, 0x7e, 0xc5, 0x27, 0x5c]); + const PROTOCOL_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("FFE06BDD-6107-46A6-7BB2-5A9C7EC5275C"); } // C function interfaces for ACPI Table Protocol and ACPI Get Protocol. @@ -166,8 +165,7 @@ pub struct AcpiGetProtocol { // SAFETY: `AcpiGetProtocol` matches the C layout and behavior of the custom-defined EFI_ACPI_GET_PROTOCOL. (Not a UEFI spec protocol.) unsafe impl ProtocolInterface for AcpiGetProtocol { - const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x7f3c1a92, 0x8b4e, 0x4d2f, 0xa6, 0xc9, &[0x3e, 0x12, 0xf4, 0xb8, 0xd7, 0xc1]); + const PROTOCOL_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("7F3C1A92-8B4E-4D2F-A6C9-3E12F4B8D7C1"); } impl AcpiGetProtocol { diff --git a/components/patina_acpi/src/integration_test.rs b/components/patina_acpi/src/integration_test.rs index 6a866dacf..b2cb02116 100644 --- a/components/patina_acpi/src/integration_test.rs +++ b/components/patina_acpi/src/integration_test.rs @@ -13,9 +13,8 @@ use core::{ffi::c_void, mem}; use patina::{ boot_services::{BootServices, StandardBootServices}, component::service::Service, - test::patina_test, - u_assert, u_assert_eq, }; +use patina_test::{patina_test, u_assert, u_assert_eq}; use r_efi::efi; use crate::{ @@ -40,7 +39,7 @@ struct MockLargeTable { #[coverage(off)] #[patina_test] -fn acpi_test(table_manager: Service) -> patina::test::Result { +fn acpi_test(table_manager: Service) -> patina_test::error::Result { let original_length = table_manager.iter_tables().len(); // Install a dummy ACPI table. @@ -98,7 +97,7 @@ fn acpi_test(table_manager: Service) -> patina::test::Result { #[coverage(off)] #[patina_test] -fn acpi_protocol_test(bs: StandardBootServices) -> patina::test::Result { +fn acpi_protocol_test(bs: StandardBootServices) -> patina_test::error::Result { // SAFETY: there is only one reference to the `AcpiTableProtocol` during this test. let table_protocol = unsafe { bs.locate_protocol::(None) }.expect("Locate protocol should succeed."); diff --git a/components/patina_acpi/src/signature.rs b/components/patina_acpi/src/signature.rs index 29e199682..3a6fc0432 100644 --- a/components/patina_acpi/src/signature.rs +++ b/components/patina_acpi/src/signature.rs @@ -12,7 +12,6 @@ use core::mem; use patina::signature; -use r_efi::efi; use crate::acpi_table::{AcpiDsdt, AcpiFacs, AcpiFadt, AcpiTableHeader}; @@ -48,8 +47,7 @@ pub const CCEL: u32 = signature!('C', 'C', 'E', 'L'); pub const SKVL: u32 = signature!('S', 'K', 'V', 'L'); pub const RHCT: u32 = signature!('R', 'H', 'C', 'T'); -pub const ACPI_TABLE_GUID: efi::Guid = - efi::Guid::from_fields(0x8868E871, 0xE4F1, 0x11D3, 0xBC, 0x22, &[0x00, 0x80, 0xC7, 0x3C, 0x88, 0x81]); +pub const ACPI_TABLE_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("8868E871-E4F1-11D3-BC22-0080C73C8881"); pub(crate) const ACPI_HEADER_LEN: usize = mem::size_of::(); pub(crate) const MAX_INITIAL_ENTRIES: usize = 32; diff --git a/components/patina_adv_logger/Cargo.toml b/components/patina_adv_logger/Cargo.toml index b8be6a7ca..ead957e39 100644 --- a/components/patina_adv_logger/Cargo.toml +++ b/components/patina_adv_logger/Cargo.toml @@ -17,7 +17,8 @@ required-features = ['std'] [dependencies] log = { workspace = true } -patina = { workspace = true} +patina = { workspace = true } +patina_test = { workspace = true } r-efi = { workspace = true } spin = { workspace = true } mu_rust_helpers = { workspace = true } diff --git a/components/patina_adv_logger/README.md b/components/patina_adv_logger/README.md index 745d2dd4d..897b26fa8 100644 --- a/components/patina_adv_logger/README.md +++ b/components/patina_adv_logger/README.md @@ -43,14 +43,14 @@ below. ```rust use patina_dxe_core::*; use patina::{log::Format, serial::uart::UartNull}; -use patina_adv_logger::{component::AdvancedLoggerComponent, logger::AdvancedLogger}; +use patina_adv_logger::{component::AdvancedLoggerComponent, logger::{AdvancedLogger, TargetFilter}}; use log::LevelFilter; use core::ffi::c_void; static LOGGER: AdvancedLogger = AdvancedLogger::new( Format::Standard, // How logs are formatted - &[("allocations", LevelFilter::Off)], // set custom log levels per module + &[TargetFilter { target: "allocations", log_level: LevelFilter::Off, hw_filter_override: None }], // set custom log levels per module log::LevelFilter::Info, // Default log level UartNull { }, // Serial writer instance ); diff --git a/components/patina_adv_logger/src/component.rs b/components/patina_adv_logger/src/component.rs index 4420565ae..832653d5f 100644 --- a/components/patina_adv_logger/src/component.rs +++ b/components/patina_adv_logger/src/component.rs @@ -14,10 +14,11 @@ use patina::{ boot_services::{BootServices, StandardBootServices}, component::{ component, - service::{Service, perf_timer::ArchTimerFunctionality}, + service::Service, }, error::{EfiError, Result}, serial::SerialIO, + timer::ArchTimerFunctionality, }; use r_efi::efi; @@ -69,7 +70,7 @@ where // SAFETY: We must trust the C code was a responsible steward of this buffer. let internal = unsafe { &*(this as *const AdvancedLoggerProtocolInternal) }; - internal.adv_logger.log_write(error_level, data); + internal.adv_logger.log_write(error_level, None, data); efi::Status::SUCCESS } diff --git a/components/patina_adv_logger/src/integration_test.rs b/components/patina_adv_logger/src/integration_test.rs index 2e3f3a477..9ad16ba41 100644 --- a/components/patina_adv_logger/src/integration_test.rs +++ b/components/patina_adv_logger/src/integration_test.rs @@ -10,18 +10,16 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -use patina::{ - boot_services::{BootServices, StandardBootServices}, - test::patina_test, - u_assert, u_assert_eq, -}; +use patina::boot_services::{BootServices, StandardBootServices}; +use patina_test::{patina_test, u_assert, u_assert_eq}; + use r_efi::efi; use crate::{memory_log, protocol::AdvancedLoggerProtocol, reader::AdvancedLogReader}; #[coverage(off)] #[patina_test] -fn adv_logger_test(bs: StandardBootServices) -> patina::test::Result { +fn adv_logger_test(bs: StandardBootServices) -> patina_test::error::Result { const DIRECT_STR: &str = "adv_logger_test: Direct log message!!!"; const PROTOCOL_STR: &str = "adv_logger_test: Logged through the protocol!!!\n"; diff --git a/components/patina_adv_logger/src/logger.rs b/components/patina_adv_logger/src/logger.rs index 0e758346e..bf7371691 100644 --- a/components/patina_adv_logger/src/logger.rs +++ b/components/patina_adv_logger/src/logger.rs @@ -16,11 +16,12 @@ use crate::{ use core::{ffi::c_void, marker::Send, ptr}; use log::Level; use patina::{ - component::service::{Service, perf_timer::ArchTimerFunctionality}, + component::service::Service, error::EfiError, log::Format, pi::hob::{Hob, PhaseHandoffInformationTable}, serial::SerialIO, + timer::ArchTimerFunctionality, }; use r_efi::efi; use spin::RwLock; @@ -29,13 +30,27 @@ use spin::RwLock; #[used] static mut DBG_ADV_LOG_BUFFER: u64 = 0; +/// Per-target filter that binds a target name prefix with its log level and optional hardware print level override. +pub struct TargetFilter<'a> { + /// Target name prefix to match. + pub target: &'a str, + /// Maximum log level for this target. Messages above this are dropped entirely. + pub log_level: log::LevelFilter, + /// Optional override for the hardware print level for this target. Messages above this level will not be printed + /// to the hardware port, but may still be logged to the memory log based on log_level and the overall max_level. + /// - `None` = use global `hw_print_level` from memory log header. + /// - `Some(level_filter)` Use the provided level filter to control hardware printing for this target, instead + /// of the global `hw_print_level`. + pub hw_filter_override: Option, +} + /// The logger for memory/hardware port logging. pub struct AdvancedLogger<'a, S> where S: SerialIO + Send, { hardware_port: S, - target_filters: &'a [(&'a str, log::LevelFilter)], + target_filters: &'a [TargetFilter<'a>], max_level: log::LevelFilter, format: Format, memory_log: RwLock>, @@ -51,13 +66,13 @@ where /// ## Arguments /// /// * `format` - The format to use for logging. - /// * `target_filters` - A list of target filters to apply to the logger. + /// * `target_filters` - Per-target filters that control log level and optionally the hardware print filter. /// * `max_level` - The maximum log level to log. /// * `hardware_port` - The hardware port to write logs to. /// pub const fn new( format: Format, - target_filters: &'a [(&'a str, log::LevelFilter)], + target_filters: &'a [TargetFilter<'a>], max_level: log::LevelFilter, hardware_port: S, ) -> Self { @@ -116,12 +131,18 @@ where } /// Writes a log entry to the hardware port and memory log if available. - pub(crate) fn log_write(&self, error_level: u32, data: &[u8]) { + /// + /// `hw_print_mask_override` optionally overrides the global hw_print_level + /// from the memory log header, enabling per-target hardware print filtering. + pub(crate) fn log_write(&self, error_level: u32, hw_print_mask_override: Option, data: &[u8]) { self.refresh_log_info_address(); let mut hw_write = true; let log_guard = self.memory_log.read(); if let Some(memory_log) = log_guard.as_ref() { - hw_write = memory_log.hardware_write_enabled(error_level); + hw_write = match hw_print_mask_override { + Some(mask) => memory_log.hardware_write_enabled_with_mask(error_level, mask), + None => memory_log.hardware_write_enabled(error_level), + }; let timestamp = self.timer.map_or(0, |timer| timer.cpu_count()); let _ = memory_log.add_log_entry(LogEntry { phase: memory_log::ADVANCED_LOGGER_PHASE_DXE, @@ -198,6 +219,11 @@ where self.set_log_info_address(new_address); } } + + /// Returns the matching target filter for the given target name, if any. + fn target_filter(&self, target: &str) -> Option<&TargetFilter<'a>> { + self.target_filters.iter().find(|f| target.starts_with(f.target)) + } } impl log::Log for AdvancedLogger<'_, S> @@ -205,19 +231,18 @@ where S: SerialIO + Send, { fn enabled(&self, metadata: &log::Metadata) -> bool { - metadata.level().to_level_filter() - <= *self - .target_filters - .iter() - .find(|(name, _)| metadata.target().starts_with(name)) - .map(|(_, level)| level) - .unwrap_or(&self.max_level) + let max_level = self.target_filter(metadata.target()).map(|f| f.log_level).unwrap_or(self.max_level); + metadata.level().to_level_filter() <= max_level } fn log(&self, record: &log::Record) { - if self.enabled(record.metadata()) { + let filter = self.target_filter(record.target()); + let max_level = filter.map(|f| f.log_level).unwrap_or(self.max_level); + + if record.metadata().level().to_level_filter() <= max_level { let level = log_level_to_debug_level(record.metadata().level()); - let mut writer = BufferedWriter::new(level, self); + let hw_print_mask_override = filter.and_then(|f| f.hw_filter_override).map(log_level_filter_to_debug_mask); + let mut writer = BufferedWriter::new(level, hw_print_mask_override, self); self.format.write(&mut writer, record); writer.flush(); } @@ -239,6 +264,24 @@ const fn log_level_to_debug_level(level: Level) -> u32 { } } +/// Converts a `log::LevelFilter` to a hardware print mask. +const fn log_level_filter_to_debug_mask(level_filter: log::LevelFilter) -> u32 { + match level_filter { + log::LevelFilter::Error => memory_log::DEBUG_LEVEL_ERROR, + log::LevelFilter::Warn => memory_log::DEBUG_LEVEL_ERROR | memory_log::DEBUG_LEVEL_WARNING, + log::LevelFilter::Info => { + memory_log::DEBUG_LEVEL_ERROR | memory_log::DEBUG_LEVEL_WARNING | memory_log::DEBUG_LEVEL_INFO + } + log::LevelFilter::Debug | log::LevelFilter::Trace => { + memory_log::DEBUG_LEVEL_ERROR + | memory_log::DEBUG_LEVEL_WARNING + | memory_log::DEBUG_LEVEL_INFO + | memory_log::DEBUG_LEVEL_VERBOSE + } + log::LevelFilter::Off => 0, + } +} + /// Size of the buffer for the buffered writer. const WRITER_BUFFER_SIZE: usize = 128; @@ -248,6 +291,7 @@ where S: SerialIO + Send, { level: u32, + hw_print_mask_override: Option, writer: &'a AdvancedLogger<'a, S>, buffer: [u8; WRITER_BUFFER_SIZE], buffer_size: usize, @@ -257,9 +301,9 @@ impl<'a, S> BufferedWriter<'a, S> where S: SerialIO + Send, { - /// Creates a new BufferedWriter with the specified log level and writer. - const fn new(level: u32, writer: &'a AdvancedLogger<'a, S>) -> Self { - Self { level, writer, buffer: [0; WRITER_BUFFER_SIZE], buffer_size: 0 } + /// Creates a new BufferedWriter with the specified log level, optional hardware print mask override, and writer. + const fn new(level: u32, hw_print_mask_override: Option, writer: &'a AdvancedLogger<'a, S>) -> Self { + Self { level, hw_print_mask_override, writer, buffer: [0; WRITER_BUFFER_SIZE], buffer_size: 0 } } /// Flushes the current buffer to the underlying writer. @@ -269,7 +313,7 @@ where } let data = &self.buffer[0..self.buffer_size]; - self.writer.log_write(self.level, data); + self.writer.log_write(self.level, self.hw_print_mask_override, data); self.buffer_size = 0; } } @@ -293,7 +337,7 @@ where } else { // this message is too big to buffer, flush then write the message. self.flush(); - self.writer.log_write(self.level, data); + self.writer.log_write(self.level, self.hw_print_mask_override, data); } Ok(()) @@ -306,15 +350,20 @@ mod tests { use core::{ffi::c_void, ptr}; use alloc::boxed::Box; + use log::Log; use patina::{ - component::service::{IntoService, perf_timer::ArchTimerFunctionality}, + component::service::{IntoService, timer::ArchTimerFunctionality}, log::Format, pi::hob::{GUID_EXTENSION, GuidHob, header}, serial::uart::UartNull, }; use r_efi::efi; - use crate::{logger::AdvancedLogger, memory_log, writer::AdvancedLogWriter}; + use crate::{ + logger::{AdvancedLogger, TargetFilter}, + memory_log, + writer::AdvancedLogWriter, + }; #[derive(IntoService)] #[service(dyn ArchTimerFunctionality)] @@ -334,7 +383,7 @@ mod tests { let serial = UartNull {}; let logger_uninit = AdvancedLogger::::new( Format::Standard, - &[("test_target", log::LevelFilter::Info)], + &[TargetFilter { target: "test_target", log_level: log::LevelFilter::Info, hw_filter_override: None }], log::LevelFilter::Debug, serial, ); @@ -346,7 +395,7 @@ mod tests { let serial = UartNull {}; let logger_uninit = AdvancedLogger::::new( Format::Standard, - &[("test_target", log::LevelFilter::Info)], + &[TargetFilter { target: "test_target", log_level: log::LevelFilter::Info, hw_filter_override: None }], log::LevelFilter::Debug, serial, ); @@ -401,4 +450,87 @@ mod tests { // TODO: Need to mock the protocol interface but requires final component interface. } + + // Helper to build Metadata for a given target and level. + fn metadata(target: &str, level: log::Level) -> log::Metadata<'_> { + log::Metadata::builder().target(target).level(level).build() + } + + // === Global level filtering (no target filters) === + + #[test] + fn enabled_respects_global_max_level() { + let logger = AdvancedLogger::new(Format::Standard, &[], log::LevelFilter::Info, UartNull {}); + + assert!(logger.enabled(&metadata("any", log::Level::Error))); + assert!(logger.enabled(&metadata("any", log::Level::Warn))); + assert!(logger.enabled(&metadata("any", log::Level::Info))); + assert!(!logger.enabled(&metadata("any", log::Level::Debug))); + assert!(!logger.enabled(&metadata("any", log::Level::Trace))); + } + + #[test] + fn enabled_at_trace_allows_everything() { + let logger = AdvancedLogger::new(Format::Standard, &[], log::LevelFilter::Trace, UartNull {}); + + assert!(logger.enabled(&metadata("x", log::Level::Error))); + assert!(logger.enabled(&metadata("x", log::Level::Warn))); + assert!(logger.enabled(&metadata("x", log::Level::Info))); + assert!(logger.enabled(&metadata("x", log::Level::Debug))); + assert!(logger.enabled(&metadata("x", log::Level::Trace))); + } + + #[test] + fn enabled_at_off_blocks_everything() { + let logger = AdvancedLogger::new(Format::Standard, &[], log::LevelFilter::Off, UartNull {}); + + assert!(!logger.enabled(&metadata("x", log::Level::Error))); + assert!(!logger.enabled(&metadata("x", log::Level::Trace))); + } + + // === Target filter level overrides === + + #[test] + fn target_filter_overrides_global_to_be_more_permissive() { + let filters = [TargetFilter { target: "my_mod", log_level: log::LevelFilter::Info, hw_filter_override: None }]; + let logger = AdvancedLogger::new(Format::Standard, &filters, log::LevelFilter::Error, UartNull {}); + + // Matching target uses the filter's level (Info) + assert!(logger.enabled(&metadata("my_mod", log::Level::Info))); + assert!(logger.enabled(&metadata("my_mod", log::Level::Error))); + assert!(!logger.enabled(&metadata("my_mod", log::Level::Debug))); + + // Non-matching target falls back to global (Error) + assert!(!logger.enabled(&metadata("other", log::Level::Info))); + assert!(logger.enabled(&metadata("other", log::Level::Error))); + } + + #[test] + fn target_filter_restricts_below_global() { + let filters = [TargetFilter { target: "noisy", log_level: log::LevelFilter::Error, hw_filter_override: None }]; + let logger = AdvancedLogger::new(Format::Standard, &filters, log::LevelFilter::Trace, UartNull {}); + + // "noisy" target is restricted to Error only + assert!(!logger.enabled(&metadata("noisy", log::Level::Info))); + assert!(!logger.enabled(&metadata("noisy", log::Level::Warn))); + assert!(logger.enabled(&metadata("noisy", log::Level::Error))); + + // Other targets use global Trace (everything passes) + assert!(logger.enabled(&metadata("other", log::Level::Trace))); + } + + #[test] + fn target_filter_matches_by_prefix() { + let filters = + [TargetFilter { target: "my_crate", log_level: log::LevelFilter::Info, hw_filter_override: None }]; + // Global is Off so anything not matching the filter is blocked. + let logger = AdvancedLogger::new(Format::Standard, &filters, log::LevelFilter::Off, UartNull {}); + + // Prefix match + assert!(logger.enabled(&metadata("my_crate::submod", log::Level::Info))); + // Exact match (also a valid prefix) + assert!(logger.enabled(&metadata("my_crate", log::Level::Info))); + // No match → falls to global Off + assert!(!logger.enabled(&metadata("other_crate", log::Level::Error))); + } } diff --git a/components/patina_adv_logger/src/memory_log.rs b/components/patina_adv_logger/src/memory_log.rs index d3ffa2274..13e71813f 100644 --- a/components/patina_adv_logger/src/memory_log.rs +++ b/components/patina_adv_logger/src/memory_log.rs @@ -25,16 +25,20 @@ use r_efi::efi; use zerocopy_derive::*; // { 0x4d60cfb5, 0xf481, 0x4a98, {0x9c, 0x81, 0xbf, 0xf8, 0x64, 0x60, 0xc4, 0x3e }} -pub const ADV_LOGGER_HOB_GUID: efi::Guid = - efi::Guid::from_fields(0x4d60cfb5, 0xf481, 0x4a98, 0x9c, 0x81, &[0xbf, 0xf8, 0x64, 0x60, 0xc4, 0x3e]); +pub const ADV_LOGGER_HOB_GUID: patina::BinaryGuid = + patina::BinaryGuid::from_string("4D60CFB5-F481-4A98-9C81-BFF86460C43E"); pub const ADV_LOGGER_INFO_VERSION_V5: u16 = 5; pub const ADV_LOGGER_INFO_VERSION_V6: u16 = 6; // UEFI Debug Levels +/// Error pub const DEBUG_LEVEL_ERROR: u32 = 0x80000000; +/// Warnings pub const DEBUG_LEVEL_WARNING: u32 = 0x00000002; +/// Informational debug messages pub const DEBUG_LEVEL_INFO: u32 = 0x00000040; +/// Detailed debug messages that may significantly impact boot performance pub const DEBUG_LEVEL_VERBOSE: u32 = 0x00400000; // Phase definitions. diff --git a/components/patina_adv_logger/src/protocol.rs b/components/patina_adv_logger/src/protocol.rs index fc2517ec2..342881675 100644 --- a/components/patina_adv_logger/src/protocol.rs +++ b/components/patina_adv_logger/src/protocol.rs @@ -30,13 +30,12 @@ type AdvancedLoggerWrite = extern "efiapi" fn(*const AdvancedLoggerProtocol, usi // SAFETY: The AdvancedLoggerProtocol struct layout matches the protocol definition. unsafe impl ProtocolInterface for AdvancedLoggerProtocol { - const PROTOCOL_GUID: efi::Guid = AdvancedLoggerProtocol::GUID; + const PROTOCOL_GUID: patina::BinaryGuid = AdvancedLoggerProtocol::GUID; } impl AdvancedLoggerProtocol { /// Protocol GUID for the Advanced Logger protocol. - pub const GUID: efi::Guid = - efi::Guid::from_fields(0x434f695c, 0xef26, 0x4a12, 0x9e, 0xba, &[0xdd, 0xef, 0x00, 0x97, 0x49, 0x7c]); + pub const GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("434F695C-EF26-4A12-9EBA-DDEF0097497C"); /// Signature used for the Advanced Logger protocol. pub const SIGNATURE: u32 = 0x50474F4C; // "LOGP" diff --git a/components/patina_adv_logger/src/writer.rs b/components/patina_adv_logger/src/writer.rs index f4bcc623b..53d245b37 100644 --- a/components/patina_adv_logger/src/writer.rs +++ b/components/patina_adv_logger/src/writer.rs @@ -153,6 +153,12 @@ impl AdvancedLogWriter { !self.header.hw_port_disabled() && (level & self.header.hw_print_level() != 0) } + /// Returns whether hardware port writing is enabled for the given level, + /// using an overridden hw_print_level bitmask. + pub fn hardware_write_enabled_with_mask(&self, level: u32, mask_override: u32) -> bool { + !self.header.hw_port_disabled() && (level & mask_override != 0) + } + /// Returns the timer frequency. pub fn get_frequency(&self) -> u64 { self.header.timer_frequency().load(Ordering::Relaxed) @@ -183,7 +189,7 @@ impl AdvancedLogWriter { } } -#[cfg(test)] +#[cfg(all(test, feature = "reader"))] #[coverage(off)] mod tests { extern crate std; diff --git a/components/patina_mm/src/component/communicator.rs b/components/patina_mm/src/component/communicator.rs index c4a28a1ce..fba635c51 100644 --- a/components/patina_mm/src/component/communicator.rs +++ b/components/patina_mm/src/component/communicator.rs @@ -16,7 +16,7 @@ mod comm_buffer_update; use crate::{ - config::{CommunicateBuffer, EfiMmCommunicateHeader, MmCommunicationConfiguration}, + config::{CommunicateBuffer, MmCommunicationConfiguration}, service::SwMmiTrigger, }; use patina::{ @@ -26,9 +26,10 @@ use patina::{ Storage, component, service::{IntoService, Service}, }, + pi::protocols::communication::EfiMmCommunicateHeader, }; -extern crate alloc; -use alloc::{boxed::Box, vec::Vec}; + +use alloc::vec::Vec; use core::{ cell::RefCell, @@ -127,14 +128,13 @@ pub trait MmCommunication { /// # Example /// /// ```rust - /// use r_efi::efi; /// use patina_mm::component::communicator::MmCommunication; /// use patina::component::service::Service; /// use patina::Guid; /// /// fn component(comm_service: Service) { /// let data = [0x01, 0x02, 0x03]; - /// let recipient = efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); + /// let recipient = patina::BinaryGuid::from_string("12345678-1234-5678-1234-567890ABCDEF"); /// let result = comm_service.communicate(0, &data, Guid::from_ref(&recipient)); /// /// match result { @@ -152,26 +152,23 @@ pub trait MmCommunication { /// /// Allows sending messages via a communication ("comm") buffer and receiving responses from the MM handler where /// the response is stored in the same buffer. +/// +/// The default executor ([`RealMmExecutor`]) triggers MM via the SW MMI trigger service. +/// Tests can substitute alternative executor implementations. #[derive(IntoService)] #[service(dyn MmCommunication)] -pub struct MmCommunicator { +pub struct MmCommunicator { /// Configured communication buffers comm_buffers: RefCell>, /// The MM Executor actively handling MM execution - mm_executor: Option>, + mm_executor: Option, /// Context shared with protocol callback for pending buffer updates notify_context: Option<&'static comm_buffer_update::ProtocolNotifyContext>, } -#[component] -impl MmCommunicator { - /// Create a new `MmCommunicator` instance for testing. - pub fn new() -> Self { - Self { comm_buffers: RefCell::new(Vec::new()), mm_executor: None, notify_context: None } - } - - /// Create a new `MmCommunicator` instance with a custom MM executor (for testing). - pub fn with_executor(executor: Box) -> Self { +impl MmCommunicator { + /// Create a new `MmCommunicator` instance with a custom MM executor. + pub fn with_executor(executor: E) -> Self { Self { comm_buffers: RefCell::new(Vec::new()), mm_executor: Some(executor), notify_context: None } } @@ -180,6 +177,14 @@ impl MmCommunicator { pub fn set_test_comm_buffers(&self, buffers: Vec) { *self.comm_buffers.borrow_mut() = buffers; } +} + +#[component] +impl MmCommunicator { + /// Create a new `MmCommunicator` instance. + pub fn new() -> Self { + Self { comm_buffers: RefCell::new(Vec::new()), mm_executor: None, notify_context: None } + } /// Component entry point /// @@ -197,7 +202,7 @@ impl MmCommunicator { log::info!(target: "mm_comm", "MM Communicator entry..."); // Create the real MM executor - self.mm_executor = Some(Box::new(RealMmExecutor::new(sw_mmi_trigger))); + self.mm_executor = Some(RealMmExecutor::new(sw_mmi_trigger)); let (comm_buffers, enable_buffer_updates, updatable_buffer_id) = { let config = storage @@ -228,9 +233,7 @@ impl MmCommunicator { buffer_id ); - // SAFETY: The communicator reference remains valid as a stored service - let self_ptr = &self as *const MmCommunicator; - let context = comm_buffer_update::register_buffer_update_notify(boot_services, buffer_id, self_ptr)?; + let context = comm_buffer_update::register_buffer_update_notify(boot_services, buffer_id)?; // Store context reference for checking pending updates in communicate() self.notify_context = Some(context); @@ -250,7 +253,7 @@ impl MmCommunicator { } } -impl Debug for MmCommunicator { +impl Debug for MmCommunicator { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "MM Communicator:")?; for buffer in self.comm_buffers.borrow().iter() { @@ -261,7 +264,7 @@ impl Debug for MmCommunicator { } } -impl MmCommunication for MmCommunicator { +impl MmCommunication for MmCommunicator { fn communicate<'a>(&self, id: u8, data_buffer: &[u8], recipient: Guid<'a>) -> Result, Status> { log::debug!(target: "mm_comm", "Starting MM communication: buffer_id={}, data_size={}, recipient={:?}", id, data_buffer.len(), recipient); @@ -374,15 +377,16 @@ mod tests { communicator::{MmCommunicator, MockMmExecutor}, sw_mmi_manager::SwMmiManager, }, - config::{CommunicateBuffer, MmCommBufferStatus, MmCommunicationConfiguration}, + config::{CommunicateBuffer, MmCommunicationConfiguration}, + }; + use patina::{ + component::{IntoComponent, Storage}, + management_mode::MmCommBufferStatus, }; - use patina::component::{IntoComponent, Storage}; use core::{cell::RefCell, pin::Pin}; - use r_efi::efi; - extern crate alloc; - use alloc::vec::Vec; + use std::vec::Vec; /// Simple MM Executor for unit tests that simulates MM handlers echoing request data back as the response struct EchoMmExecutor; @@ -442,8 +446,7 @@ mod tests { } static TEST_DATA: [u8; 3] = [0x01, 0x02, 0x03]; - static TEST_RECIPIENT: efi::Guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); + static TEST_RECIPIENT: patina::BinaryGuid = patina::BinaryGuid::from_string("12345678-1234-5678-1234-567890ABCDEF"); fn test_recipient() -> Guid<'static> { Guid::from_ref(&TEST_RECIPIENT) @@ -454,16 +457,16 @@ mod tests { let buffer: &'static mut [u8; $size] = Box::leak(Box::new([0u8; $size])); MmCommunicator { comm_buffers: RefCell::new(vec![CommunicateBuffer::new(Pin::new(buffer), 0)]), - mm_executor: Some(Box::new($mock_executor)), + mm_executor: Some($mock_executor), notify_context: None, } }}; } - fn create_communicator_with_buffers( + fn create_communicator_with_buffers( buffers: Vec, - executor: Box, - ) -> MmCommunicator { + executor: E, + ) -> MmCommunicator { MmCommunicator { comm_buffers: RefCell::new(buffers), mm_executor: Some(executor), notify_context: None } } @@ -488,7 +491,7 @@ mod tests { let communicator = MmCommunicator { comm_buffers: RefCell::new(vec![]), - mm_executor: Some(Box::new(mock_executor)), + mm_executor: Some(mock_executor), notify_context: None, }; let result = communicator.communicate(0, &TEST_DATA, test_recipient()); @@ -507,7 +510,7 @@ mod tests { #[test] fn test_communicate_no_mm_executor() { - let communicator = MmCommunicator { + let communicator: MmCommunicator = MmCommunicator { comm_buffers: RefCell::new(vec![CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 1024]))), 0)]), mm_executor: None, notify_context: None, @@ -585,7 +588,7 @@ mod tests { CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 256]))), 10), ]; - let communicator = create_communicator_with_buffers(buffers, Box::new(EchoMmExecutor)); + let communicator = create_communicator_with_buffers(buffers, EchoMmExecutor); // Test communication with each buffer let test_data1 = b"Buffer 1 test"; @@ -663,7 +666,7 @@ mod tests { let buffer2 = CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 1024]))), 2); let buffers = vec![buffer1, buffer2]; - let communicator = create_communicator_with_buffers(buffers, Box::new(EchoMmExecutor)); + let communicator = create_communicator_with_buffers(buffers, EchoMmExecutor); let debug_output = format!("{:?}", communicator); assert!(debug_output.contains("MM Communicator:")); @@ -674,7 +677,7 @@ mod tests { #[test] fn test_mm_communicator_debug_no_executor() { let buffer = CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 512]))), 0); - let communicator = + let communicator: MmCommunicator = MmCommunicator { comm_buffers: RefCell::new(vec![buffer]), mm_executor: None, notify_context: None }; let debug_output = format!("{:?}", communicator); @@ -692,7 +695,7 @@ mod tests { #[test] fn test_mm_communicator_with_executor() { - let executor = Box::new(EchoMmExecutor); + let executor = EchoMmExecutor; let communicator = MmCommunicator::with_executor(executor); assert_eq!(communicator.comm_buffers.borrow().len(), 0); @@ -740,7 +743,7 @@ mod tests { let mut buffer = CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 1024]))), 0); buffer.disable(); - let communicator = create_communicator_with_buffers(vec![buffer], Box::new(EchoMmExecutor)); + let communicator = create_communicator_with_buffers(vec![buffer], EchoMmExecutor); // Should fail to find the buffer since it's disabled let result = communicator.communicate(0, &TEST_DATA, test_recipient()); @@ -759,7 +762,7 @@ mod tests { buffer3.disable(); // Disabled let buffers = vec![buffer1, buffer2, buffer3]; - let communicator = create_communicator_with_buffers(buffers, Box::new(EchoMmExecutor)); + let communicator = create_communicator_with_buffers(buffers, EchoMmExecutor); // Buffer 1 is disabled - should fail let result1 = communicator.communicate(1, &TEST_DATA, test_recipient()); @@ -857,7 +860,7 @@ mod tests { } let communicator = - create_communicator_with_buffers(vec![buffer_with_mailbox], Box::new(NonZeroReturnExecutor { status_ptr })); + create_communicator_with_buffers(vec![buffer_with_mailbox], NonZeroReturnExecutor { status_ptr }); let result = communicator.communicate(0, &TEST_DATA, test_recipient()); assert!(result.is_ok(), "Communication should succeed even with a non-zero MM return status"); @@ -926,7 +929,7 @@ mod tests { } } - let communicator = create_communicator_with_buffers(vec![buffer_with_mailbox], Box::new(CorruptBufferExecutor)); + let communicator = create_communicator_with_buffers(vec![buffer_with_mailbox], CorruptBufferExecutor); let result = communicator.communicate(0, &TEST_DATA, test_recipient()); assert_eq!(result, Err(Status::InvalidResponse)); diff --git a/components/patina_mm/src/component/communicator/comm_buffer_update.rs b/components/patina_mm/src/component/communicator/comm_buffer_update.rs index 1289e23ed..dc0d5fbe7 100644 --- a/components/patina_mm/src/component/communicator/comm_buffer_update.rs +++ b/components/patina_mm/src/component/communicator/comm_buffer_update.rs @@ -10,20 +10,16 @@ //! //! SPDX-License-Identifier: Apache-2.0 -use crate::{ - component::communicator::MmCommunicator, - config::CommunicateBuffer, - protocol::mm_comm_buffer_update::{self, MmCommBufferUpdateProtocol}, -}; +use crate::config::CommunicateBuffer; use patina::{ base::UEFI_PAGE_SIZE, boot_services::{BootServices, StandardBootServices, event::EventType, tpl::Tpl}, + management_mode::protocol::mm_comm_buffer_update::{self, MmCommBufferUpdateProtocol}, }; use zerocopy::FromBytes; use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -extern crate alloc; use alloc::boxed::Box; /// Context for the MM Comm Buffer Update Protocol notify callback @@ -35,7 +31,6 @@ use alloc::boxed::Box; pub(super) struct ProtocolNotifyContext { pub(super) boot_services: StandardBootServices, pub(super) updatable_buffer_id: u8, - pub(super) communicator: *const MmCommunicator, /// Pending buffer update - set by protocol callback, consumed by communicate() pub(super) pending_buffer: AtomicPtr, /// Flag indicating if a buffer update is pending @@ -50,26 +45,22 @@ pub(super) struct ProtocolNotifyContext { /// # Parameters /// - `boot_services`: Boot services for creating events and registering protocol notify /// - `updatable_buffer_id`: The buffer ID that should be updated when protocol is installed -/// - `communicator`: Pointer to the MmCommunicator instance /// /// # Returns /// - `Ok(&'static ProtocolNotifyContext)`: Context that should be stored for later use /// - `Err(patina::error::Error)`: If event creation or protocol notify registration fails /// /// # Safety -/// - The communicator pointer must remain valid for the lifetime of the context /// - The returned context is leaked and will live for a static lifetime pub(super) fn register_buffer_update_notify( boot_services: StandardBootServices, updatable_buffer_id: u8, - communicator: *const MmCommunicator, ) -> patina::error::Result<&'static ProtocolNotifyContext> { log::trace!(target: "mm_comm", "Setting up protocol notify callback for buffer ID {}", updatable_buffer_id); let context = Box::leak(Box::new(ProtocolNotifyContext { boot_services: boot_services.clone(), updatable_buffer_id, - communicator, pending_buffer: AtomicPtr::new(core::ptr::null_mut()), has_pending_update: AtomicBool::new(false), })); @@ -174,7 +165,7 @@ pub(super) fn apply_pending_buffer_update( /// ELements of the protocol update process are unit tested but the notification function as a whole is not. #[coverage(off)] extern "efiapi" fn protocol_notify_callback(_event: r_efi::efi::Event, context: &'static ProtocolNotifyContext) { - log::trace!(target: "mm_comm", "=== Protocol callback ENTRY === communicator ptr: {:p}", context.communicator); + log::trace!(target: "mm_comm", "=== Protocol callback ENTRY ==="); log::info!(target: "mm_comm", "Protocol notify callback triggered for {}", mm_comm_buffer_update::GUID); let updatable_buffer_id = context.updatable_buffer_id; @@ -281,7 +272,7 @@ extern "efiapi" fn protocol_notify_callback(_event: r_efi::efi::Event, context: #[cfg(test)] mod tests { use super::*; - use crate::{component::communicator::MmCommunicator, config::CommunicateBuffer}; + use crate::config::CommunicateBuffer; use core::{ pin::Pin, @@ -289,14 +280,10 @@ mod tests { }; use patina::boot_services::StandardBootServices; - extern crate alloc; - use alloc::{boxed::Box, vec}; + use alloc::boxed::Box; /// Helper to create a test protocol notify context without boot services - fn create_test_context( - updatable_buffer_id: u8, - communicator_ptr: *const MmCommunicator, - ) -> Box { + fn create_test_context(updatable_buffer_id: u8) -> Box { let mock_bs = Box::leak(Box::new([0u8; core::mem::size_of::()])); let bs_ptr = mock_bs.as_mut_ptr() as *mut r_efi::system::BootServices; let bs = StandardBootServices::new(bs_ptr); @@ -304,7 +291,6 @@ mod tests { Box::new(ProtocolNotifyContext { boot_services: bs, updatable_buffer_id, - communicator: communicator_ptr, pending_buffer: AtomicPtr::new(core::ptr::null_mut()), has_pending_update: AtomicBool::new(false), }) @@ -312,7 +298,7 @@ mod tests { #[test] fn test_apply_pending_buffer_update_no_pending_update() { - let context = create_test_context(0, core::ptr::null()); + let context = create_test_context(0); let mut comm_buffers = vec![]; // No pending update should return false @@ -322,7 +308,7 @@ mod tests { #[test] fn test_apply_pending_buffer_update_with_pending_buffer() { - let context = create_test_context(5, core::ptr::null()); + let context = create_test_context(5); // Create a new buffer to be the pending update let new_buffer = CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 4096]))), 5); @@ -347,7 +333,7 @@ mod tests { #[test] fn test_apply_pending_buffer_update_replaces_existing_buffer() { - let context = create_test_context(3, core::ptr::null()); + let context = create_test_context(3); // Create existing buffer with ID 3 let old_buffer = CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0xAA; 1024]))), 3); @@ -382,7 +368,7 @@ mod tests { #[test] fn test_apply_pending_buffer_update_flag_set_but_no_buffer() { - let context = create_test_context(0, core::ptr::null()); + let context = create_test_context(0); // Set the flag but don't store a buffer context.has_pending_update.store(true, Ordering::Release); @@ -397,19 +383,16 @@ mod tests { #[test] fn test_protocol_notify_context_creation() { - let communicator_ptr: *const MmCommunicator = 0x1000 as *const MmCommunicator; - - let context = create_test_context(7, communicator_ptr); + let context = create_test_context(7); assert_eq!(context.updatable_buffer_id, 7); - assert_eq!(context.communicator, communicator_ptr); assert!(!context.has_pending_update.load(Ordering::Acquire)); assert!(context.pending_buffer.load(Ordering::Acquire).is_null()); } #[test] fn test_multiple_pending_buffer_updates() { - let context = create_test_context(1, core::ptr::null()); + let context = create_test_context(1); // Set the first pending buffer let buffer1 = CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0xAA; 1024]))), 1); @@ -446,7 +429,7 @@ mod tests { #[test] fn test_pending_buffer_atomic_operations() { - let context = create_test_context(10, core::ptr::null()); + let context = create_test_context(10); // Verify the initial state assert!(!context.has_pending_update.load(Ordering::Acquire)); diff --git a/components/patina_mm/src/config.rs b/components/patina_mm/src/config.rs index c1bbcabe4..d63ac9a7e 100644 --- a/components/patina_mm/src/config.rs +++ b/components/patina_mm/src/config.rs @@ -18,52 +18,13 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; use alloc::vec::Vec; use core::{fmt, pin::Pin, ptr::NonNull}; -use patina::{Guid, base::UEFI_PAGE_MASK}; -use r_efi::efi; -use zerocopy_derive::*; - -/// MM Communication Buffer Status -/// -/// Shared structure between DXE and MM environments to communicate the status -/// of MM communication operations. This structure is written by DXE before -/// triggering an MMI and read/written by MM during MMI processing. -/// -/// This is a structure currently used in some MM Supervisor MM implementations. -#[derive(Debug, Clone, Copy, FromBytes, IntoBytes, Immutable, KnownLayout)] -#[repr(C, packed)] -pub struct MmCommBufferStatus { - /// Whether the data in the fixed MM communication buffer is valid when entering from non-MM to MM. - /// Must be set to TRUE before triggering MMI, will be set to FALSE by MM after processing. - pub is_comm_buffer_valid: u8, - - /// The channel used to communicate with MM. - /// FALSE = user buffer, TRUE = supervisor buffer - pub talk_to_supervisor: u8, - - /// The return status when returning from MM to non-MM. - pub return_status: u64, - - /// The size in bytes of the output buffer when returning from MM to non-MM. - pub return_buffer_size: u64, -} - -impl Default for MmCommBufferStatus { - #[coverage(off)] - fn default() -> Self { - Self::new() - } -} - -impl MmCommBufferStatus { - /// Create a new mailbox status with all fields zeroed - pub const fn new() -> Self { - Self { is_comm_buffer_valid: 0, talk_to_supervisor: 0, return_status: 0, return_buffer_size: 0 } - } -} +use patina::{ + BinaryGuid, Guid, base::UEFI_PAGE_MASK, management_mode::MmCommBufferStatus, + pi::protocols::communication::EfiMmCommunicateHeader, +}; /// Management Mode (MM) Configuration /// @@ -120,70 +81,6 @@ impl fmt::Display for MmCommunicationConfiguration { } } -/// UEFI MM Communicate Header -/// -/// A standard header that must be present at the beginning of any MM communication buffer. -/// -/// ## Notes -/// -/// - This only supports V1 and V2 of the MM Communicate header format. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct EfiMmCommunicateHeader { - /// Allows for disambiguation of the message format. - /// Used to identify the registered MM handlers that should be given the message. - header_guid: efi::Guid, - /// The size of Data (in bytes) and does not include the size of the header. - message_length: usize, -} - -impl EfiMmCommunicateHeader { - /// Create a new communicate header with the specified GUID and message length. - pub fn new(header_guid: Guid, message_length: usize) -> Self { - Self { header_guid: header_guid.to_efi_guid(), message_length } - } - - /// Returns the communicate header as a slice of bytes using safe conversion. - /// - /// Useful if byte-level access to the header structure is needed. - pub fn as_bytes(&self) -> &[u8] { - // SAFETY: EfiMmCommunicateHeader is repr(C) with well-defined layout and size - unsafe { core::slice::from_raw_parts(self as *const _ as *const u8, Self::size()) } - } - - /// Returns the size of the header in bytes. - pub const fn size() -> usize { - core::mem::size_of::() - } - - /// Get the header GUID from the communication buffer. - /// - /// Returns `Some(guid)` if the buffer has been properly initialized with a GUID, - /// or `None` if the buffer is not initialized. - /// - /// # Returns - /// - /// The GUID from the communication header if available. - /// - /// # Errors - /// - /// Returns an error if the communication buffer header cannot be read. - pub fn header_guid(&self) -> Guid<'_> { - Guid::from_ref(&self.header_guid) - } - - /// Returns the message length from this communicate header. - /// - /// The length represents the size of the message data that follows the header. - /// - /// # Returns - /// - /// The length in bytes of the message data (excluding the header size). - pub const fn message_length(&self) -> usize { - self.message_length - } -} - /// MM Communicator Service Status Codes #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum CommunicateBufferStatus { @@ -213,7 +110,7 @@ pub struct CommunicateBuffer { /// Length of the total buffer in bytes. length: usize, /// Handler GUID tracked independently to check against comm buffer contents - private_recipient: Option, + private_recipient: Option, /// Message length tracked independently to check against comm buffer contents private_message_length: usize, /// Whether this buffer is enabled and should be used for communication. @@ -509,8 +406,8 @@ impl CommunicateBuffer { let header_slice = &self.as_slice()[..Self::MESSAGE_START_OFFSET]; - // SAFETY: Buffer size validated, efi::Guid is repr(C) at offset 0 - let memory_guid = unsafe { core::ptr::read(header_slice.as_ptr() as *const efi::Guid) }; + // SAFETY: Buffer size validated, BinaryGuid is repr(transparent) over repr(C) efi::Guid at offset 0 + let memory_guid = unsafe { core::ptr::read(header_slice.as_ptr() as *const patina::BinaryGuid) }; // SAFETY: Buffer size validated, usize at offset 16 after Guid let memory_message_length = unsafe { core::ptr::read(header_slice.as_ptr().add(16) as *const usize) }; @@ -526,8 +423,7 @@ impl CommunicateBuffer { } None => { // If no recipient is set privately, the memory should contain all zeros for the GUID - let zero_guid = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0; 6]); - if memory_guid != zero_guid { + if memory_guid != patina::guids::ZERO { log::error!(target: "mm_comm", "Buffer {} unexpected GUID in memory when none set privately", self.id); return Err(CommunicateBufferStatus::InvalidRecipient); } @@ -589,7 +485,7 @@ impl CommunicateBuffer { self.validate_capacity(0)?; // Update private state - let recipient_efi = recipient.to_efi_guid(); + let recipient_efi: BinaryGuid = recipient.to_efi_guid().into(); self.private_recipient = Some(recipient_efi); // Update memory buffer using safe byte operations @@ -680,7 +576,7 @@ impl CommunicateBuffer { self.verify_state_consistency()?; log::trace!(target: "mm_comm", "Buffer {} header GUID retrieved from private state", self.id); - Ok(self.private_recipient.as_ref().map(Guid::from_ref)) + Ok(self.private_recipient.as_ref().map(Guid::from)) } /// Returns the message length from the current communicate buffer. @@ -971,8 +867,7 @@ mod tests { assert_eq!(comm_buffer.get_message_length().unwrap(), message.len()); // Update with new recipient - let recipient_guid2 = - Guid::from_fields(0x3210FEDC, 0xABCD, 0xABCD, 0x12, 0x23, [0x12, 0x34, 0x56, 0x78, 0x90, 0xAB]); + let recipient_guid2 = Guid::try_from_string("3210FEDC-ABCD-ABCD-1223-1234567890AB").unwrap(); assert!(comm_buffer.set_message_info(recipient_guid2.clone()).is_ok()); assert_eq!( comm_buffer.get_header_guid().unwrap().as_ref().map(|g| g.as_bytes()), @@ -1197,8 +1092,7 @@ mod tests { let returned_guid = header.header_guid(); assert_eq!(returned_guid.as_bytes(), test_guid.as_bytes()); - let test_guid2 = - Guid::from_fields(0xDEADBEEF, 0xCAFE, 0xDCBA, 0x12, 0x34, [0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0]); + let test_guid2 = Guid::try_from_string("DEADBEEF-CAFE-DCBA-1234-56789ABCDEF0").unwrap(); let header2 = EfiMmCommunicateHeader::new(test_guid2.clone(), message_length); let returned_guid2 = header2.header_guid(); @@ -1221,7 +1115,7 @@ mod tests { #[test] fn test_efi_mm_communicate_header_size() { - let expected_size = core::mem::size_of::() + core::mem::size_of::(); + let expected_size = core::mem::size_of::() + core::mem::size_of::(); assert_eq!(EfiMmCommunicateHeader::size(), expected_size); let test_guid = Guid::try_from_string("12345678-1234-5678-90AB-CDEF01234567").unwrap(); diff --git a/components/patina_mm/src/lib.rs b/components/patina_mm/src/lib.rs index 7ab1d99e1..e90a80f09 100644 --- a/components/patina_mm/src/lib.rs +++ b/components/patina_mm/src/lib.rs @@ -6,7 +6,8 @@ #![cfg_attr(all(not(feature = "std"), not(test), not(feature = "mockall")), no_std)] #![feature(coverage_attribute)] +extern crate alloc; + pub mod component; pub mod config; -pub mod protocol; pub mod service; diff --git a/components/patina_mm/tests/README.md b/components/patina_mm/tests/README.md index 9fbfa8de1..d0ae1b74f 100644 --- a/components/patina_mm/tests/README.md +++ b/components/patina_mm/tests/README.md @@ -605,20 +605,19 @@ For example: ```rust pub mod test_guids { + use patina::BinaryGuid; + // Echo handler for basic testing - pub const ECHO_HANDLER: efi::Guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, - &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); + pub const ECHO_HANDLER: BinaryGuid = + BinaryGuid::from_string("12345678-1234-5678-1234-567890ABCDEF"); // Version handler for version testing - pub const VERSION_HANDLER: efi::Guid = - efi::Guid::from_fields(0x87654321, 0x4321, 0x8765, 0x43, 0x21, - &[0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54]); + pub const VERSION_HANDLER: BinaryGuid = + BinaryGuid::from_string("87654321-4321-8765-4321-FEDCBA987654"); // MM Supervisor for protocol testing - pub const MM_SUPERVISOR: efi::Guid = - efi::Guid::from_fields(0x8c633b23, 0x1260, 0x4ea6, 0x83, 0x0F, - &[0x7d, 0xdc, 0x97, 0x38, 0x21, 0x11]); + pub const MM_SUPERVISOR: BinaryGuid = + BinaryGuid::from_string("8C633B23-1260-4EA6-830F-7DDC97382111"); } ``` diff --git a/components/patina_mm/tests/patina_mm_integration/common/constants.rs b/components/patina_mm/tests/patina_mm_integration/common/constants.rs index 5d495dbb1..d9011a868 100644 --- a/components/patina_mm/tests/patina_mm_integration/common/constants.rs +++ b/components/patina_mm/tests/patina_mm_integration/common/constants.rs @@ -11,52 +11,16 @@ use patina::base::SIZE_4KB; /// Standard test buffer size pub const TEST_BUFFER_SIZE: usize = SIZE_4KB; -/// MM Supervisor constants and definitions +/// MM Supervisor constants and definitions for testing /// /// Note: These values are only used for testing. They're not meant to be /// accurate or used in production code. pub mod mm_supv { - /// Supervisor signature bytes - pub const SIGNATURE: [u8; 4] = [b'M', b'S', b'U', b'P']; - - /// Communication protocol revision - pub const REVISION: u32 = 1; - - /// Request signature as a DWORD - pub const REQUEST_SIGNATURE: u32 = 0x5055534D; // 'MSUP' - - /// Supervisor version + /// Mock supervisor version for testing pub const VERSION: u32 = 0x00130008; - /// Supervisor patch level + /// Mock supervisor patch level for testing pub const PATCH_LEVEL: u32 = 0x00010001; - - /// Maximum request level supported - pub const MAX_REQUEST_LEVEL: u64 = 0x0000000000000004; // COMM_UPDATE - - /// Request type constants - pub mod requests { - /// Request for unblocking memory regions - pub const UNBLOCK_MEM: u32 = 0x0001; - - /// Request to fetch security policy - pub const FETCH_POLICY: u32 = 0x0002; - - /// Request version information - pub const VERSION_INFO: u32 = 0x0003; - - /// Request to update the communication buffer address - pub const COMM_UPDATE: u32 = 0x0004; - } - - /// Response code constants - pub mod responses { - /// Operation completed successfully - pub const SUCCESS: u64 = 0; - - /// Operation failed with error - pub const ERROR: u64 = 0xFFFFFFFFFFFFFFFF; - } } /// Test GUIDs for different handlers @@ -64,21 +28,18 @@ pub mod mm_supv { /// Provides predefined GUIDs used throughout the patina_mm test framework for registering /// and identifying different types of test handlers. pub mod test_guids { - use r_efi::efi; + use patina::BinaryGuid; /// Echo handler GUID for testing - pub const ECHO_HANDLER: efi::Guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); + pub const ECHO_HANDLER: BinaryGuid = BinaryGuid::from_string("12345678-1234-5678-1234-567890ABCDEF"); /// Version handler GUID for testing /// Note: Not used now but the GUID is reserved for future usage #[allow(dead_code)] - pub const VERSION_HANDLER: efi::Guid = - efi::Guid::from_fields(0x87654321, 0x4321, 0x8765, 0x43, 0x21, &[0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54]); + pub const VERSION_HANDLER: BinaryGuid = BinaryGuid::from_string("87654321-4321-8765-4321-FEDCBA987654"); /// MM Supervisor GUID for supervisor protocol testing - pub const MM_SUPERVISOR: efi::Guid = - efi::Guid::from_fields(0x8c633b23, 0x1260, 0x4ea6, 0x83, 0x0F, &[0x7d, 0xdc, 0x97, 0x38, 0x21, 0x11]); + pub const MM_SUPERVISOR: BinaryGuid = BinaryGuid::from_string("8C633B23-1260-4EA6-830F-7DDC97382111"); } // Convenience re-exports for common usage diff --git a/components/patina_mm/tests/patina_mm_integration/common/framework.rs b/components/patina_mm/tests/patina_mm_integration/common/framework.rs index 7f2f41c86..11dd14e9b 100644 --- a/components/patina_mm/tests/patina_mm_integration/common/framework.rs +++ b/components/patina_mm/tests/patina_mm_integration/common/framework.rs @@ -14,7 +14,7 @@ use crate::patina_mm_integration::common::{constants::*, handlers::*, message_pa extern crate alloc; use alloc::{boxed::Box, string::String, vec::Vec}; -use r_efi::efi; +use patina::BinaryGuid; use std::{ collections::HashMap, sync::{ @@ -83,7 +83,7 @@ pub struct MmTestFramework { /// Handlers can be added during framework construction, and then transferred to the final /// framework instance without cloning. It also lets the same handler registry be shared /// between different testing contexts, without duplicating data. - handlers: Arc>>>, + handlers: Arc>>>, /// Atomic counter tracking the number of MM communication triggers /// @@ -126,7 +126,7 @@ impl MmTestFramework { /// allowing concurrent handler processing for different handler GUIDs. pub fn communicate( &self, - guid: &efi::Guid, + guid: &BinaryGuid, data: &[u8], ) -> Result, patina_mm::component::communicator::Status> { // Increment trigger count at the start of every communication attempt @@ -157,7 +157,7 @@ impl MmTestFramework { #[allow(dead_code)] // Usage in integration code is not recognized pub fn communicate_with_buffer( &self, - _guid: &efi::Guid, + _guid: &BinaryGuid, buffer: &mut [u8], ) -> Result, patina_mm::component::communicator::Status> { // Increment trigger count at the start of every communication attempt @@ -223,7 +223,7 @@ pub struct MmTestFrameworkBuilder { /// the single-threaded configuration phase. Handlers are moved in via `Box` /// and then the entire collection is transferred to the framework's `Arc>`` /// wrapper during the `.build()` call. - handlers: HashMap>, + handlers: HashMap>, } impl MmTestFrameworkBuilder { @@ -232,7 +232,7 @@ impl MmTestFrameworkBuilder { } /// Add a custom handler for the specified GUID - pub fn with_handler(mut self, guid: efi::Guid, handler: Box) -> Self { + pub fn with_handler(mut self, guid: BinaryGuid, handler: Box) -> Self { self.handlers.insert(guid, handler); self } @@ -249,25 +249,25 @@ impl MmTestFrameworkBuilder { /// Add a version info handler #[allow(dead_code)] // Reserved for future version handler tests - pub fn with_version_handler(self, guid: efi::Guid, version: &str) -> Self { + pub fn with_version_handler(self, guid: BinaryGuid, version: &str) -> Self { self.with_handler(guid, Box::new(VersionInfoHandler::new(version))) } /// Add an error injection handler for testing error conditions #[allow(dead_code)] // Used in stress testing scenarios - pub fn with_error_injection_handler(self, guid: efi::Guid) -> Self { + pub fn with_error_injection_handler(self, guid: BinaryGuid) -> Self { self.with_handler(guid, Box::new(ErrorInjectionHandler::new())) } /// Add a buffer size handler for testing various buffer scenarios #[allow(dead_code)] // Used in stress testing scenarios - pub fn with_buffer_size_handler(self, guid: efi::Guid) -> Self { + pub fn with_buffer_size_handler(self, guid: BinaryGuid) -> Self { self.with_handler(guid, Box::new(BufferSizeHandler::new())) } /// Add a computation handler for stress testing #[allow(dead_code)] // Used in stress testing scenarios - pub fn with_computation_handler(self, guid: efi::Guid) -> Self { + pub fn with_computation_handler(self, guid: BinaryGuid) -> Self { self.with_handler(guid, Box::new(ComputationHandler::new())) } diff --git a/components/patina_mm/tests/patina_mm_integration/common/handlers.rs b/components/patina_mm/tests/patina_mm_integration/common/handlers.rs index d0def8752..ec77c40a3 100644 --- a/components/patina_mm/tests/patina_mm_integration/common/handlers.rs +++ b/components/patina_mm/tests/patina_mm_integration/common/handlers.rs @@ -17,11 +17,16 @@ //! SPDX-License-Identifier: Apache-2.0 use crate::patina_mm_integration::common::constants::*; +use r_efi::efi; extern crate alloc; use alloc::{string::String, vec::Vec}; -use zerocopy::{FromBytes, IntoBytes}; -use zerocopy_derive::*; +pub use zerocopy::IntoBytes; + +pub use patina::management_mode::protocol::{ + mm_supervisor_request, + mm_supervisor_request::{MmSupervisorRequestHeader, MmSupervisorVersionInfo, RequestType, ResponseType}, +}; /// Standardized error type for MM handlers #[derive(Debug, Clone, PartialEq, Eq)] @@ -109,64 +114,6 @@ impl MmHandler for VersionInfoHandler { } } -/// MM Supervisor request header -#[derive(Debug, Clone, Copy, IntoBytes, FromBytes, Immutable)] -#[repr(C)] -pub struct MmSupervisorRequestHeader { - pub signature: u32, - pub revision: u32, - pub request: u32, - pub reserved: u32, - pub result: u64, -} - -impl MmSupervisorRequestHeader { - const SIZE: usize = core::mem::size_of::(); - - /// Converts a byte slice to a MmSupervisorRequestHeader - #[allow(dead_code)] // // Usage not recognized - pub fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() < Self::SIZE { - return Err(MmHandlerError::InvalidInput("Buffer too small for header".to_string())); - } - - Self::read_from_bytes(&bytes[..Self::SIZE]) - .map_err(|_| MmHandlerError::InvalidInput("Failed to parse header from bytes".to_string())) - } - - /// Converts a MmSupervisorRequestHeader instance to a byte vector - pub fn to_bytes(self) -> Vec { - self.as_bytes().to_vec() - } -} - -/// MM Supervisor version information -#[derive(Debug, Clone, Copy, IntoBytes, FromBytes, Immutable)] -#[repr(C)] -pub struct MmSupervisorVersionInfo { - pub version: u32, - pub patch_level: u32, - pub max_supervisor_request_level: u64, -} - -impl MmSupervisorVersionInfo { - const SIZE: usize = core::mem::size_of::(); - - #[allow(dead_code)] // Usage not recognized - pub fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() < Self::SIZE { - return Err(MmHandlerError::InvalidInput("Buffer too small for version info".to_string())); - } - - Self::read_from_bytes(&bytes[..Self::SIZE]) - .map_err(|_| MmHandlerError::InvalidInput("Failed to parse version info from bytes".to_string())) - } - - fn to_bytes(self) -> Vec { - self.as_bytes().to_vec() - } -} - /// MM Supervisor handler for testing supervisor communication patterns pub struct MmSupervisorHandler { #[allow(dead_code)] // Usage not recognized @@ -180,9 +127,9 @@ impl MmSupervisorHandler { fn handle_get_info_request(&self) -> MmHandlerResult> { let response_header = MmSupervisorRequestHeader { - signature: mm_supv::REQUEST_SIGNATURE, - revision: mm_supv::REVISION, - request: mm_supv::requests::VERSION_INFO, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::VersionInfo as u32, reserved: 0, result: 0, // Success }; @@ -190,12 +137,12 @@ impl MmSupervisorHandler { let version_info = MmSupervisorVersionInfo { version: mm_supv::VERSION, patch_level: mm_supv::PATCH_LEVEL, - max_supervisor_request_level: mm_supv::MAX_REQUEST_LEVEL, + max_supervisor_request_level: RequestType::MAX_REQUEST_TYPE, }; let mut response = Vec::new(); - response.extend_from_slice(&response_header.to_bytes()); - response.extend_from_slice(&version_info.to_bytes()); + response.extend_from_slice(response_header.as_bytes()); + response.extend_from_slice(version_info.as_bytes()); log::debug!(target: "supervisor_handler", "Generated get info response: {} bytes", response.len()); Ok(response) @@ -203,9 +150,9 @@ impl MmSupervisorHandler { fn handle_get_capabilities_request(&self) -> MmHandlerResult> { let response_header = MmSupervisorRequestHeader { - signature: mm_supv::REQUEST_SIGNATURE, - revision: mm_supv::REVISION, - request: mm_supv::requests::FETCH_POLICY, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::FetchPolicy as u32, reserved: 0, result: 0, // Success }; @@ -213,7 +160,7 @@ impl MmSupervisorHandler { let capabilities: u64 = 0x00000007; // Mock capabilities value let mut response = Vec::new(); - response.extend_from_slice(&response_header.to_bytes()); + response.extend_from_slice(response_header.as_bytes()); response.extend_from_slice(&capabilities.to_le_bytes()); log::debug!(target: "supervisor_handler", "Generated get capabilities response: {} bytes", response.len()); @@ -222,9 +169,9 @@ impl MmSupervisorHandler { fn handle_comm_update_request(&self) -> MmHandlerResult> { let response_header = MmSupervisorRequestHeader { - signature: mm_supv::REQUEST_SIGNATURE, - revision: mm_supv::REVISION, - request: mm_supv::requests::COMM_UPDATE, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::CommUpdate as u32, reserved: 0, result: 0, // Success }; @@ -233,7 +180,7 @@ impl MmSupervisorHandler { let update_result: u32 = 0x00000001; // Success status let mut response = Vec::new(); - response.extend_from_slice(&response_header.to_bytes()); + response.extend_from_slice(response_header.as_bytes()); response.extend_from_slice(&update_result.to_le_bytes()); log::debug!(target: "supervisor_handler", "Generated comm update response: {} bytes", response.len()); @@ -242,9 +189,9 @@ impl MmSupervisorHandler { fn handle_unblock_mem_request(&self) -> MmHandlerResult> { let response_header = MmSupervisorRequestHeader { - signature: mm_supv::REQUEST_SIGNATURE, - revision: mm_supv::REVISION, - request: mm_supv::requests::UNBLOCK_MEM, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::UnblockMem as u32, reserved: 0, result: 0, // Success }; @@ -253,7 +200,7 @@ impl MmSupervisorHandler { let unblock_status: u64 = 0x0000000000000001; // Success - memory regions unblocked let mut response = Vec::new(); - response.extend_from_slice(&response_header.to_bytes()); + response.extend_from_slice(response_header.as_bytes()); response.extend_from_slice(&unblock_status.to_le_bytes()); log::debug!(target: "supervisor_handler", "Generated unblock mem response: {} bytes", response.len()); @@ -273,58 +220,59 @@ impl MmHandler for MmSupervisorHandler { ))); } - let request_header = MmSupervisorRequestHeader::from_bytes(data)?; + let request_header = MmSupervisorRequestHeader::from_bytes(data) + .ok_or_else(|| MmHandlerError::InvalidInput("Failed to parse header from bytes".to_string()))?; // Validate signature - if request_header.signature != mm_supv::REQUEST_SIGNATURE { + if request_header.signature != mm_supervisor_request::SIGNATURE { return Err(MmHandlerError::InvalidInput(format!( "Invalid signature: 0x{:08X}, expected 0x{:08X}", request_header.signature, - mm_supv::REQUEST_SIGNATURE + mm_supervisor_request::SIGNATURE ))); } // Validate revision - if request_header.revision != mm_supv::REVISION { + if request_header.revision != mm_supervisor_request::REVISION { return Err(MmHandlerError::InvalidInput(format!( "Invalid revision: 0x{:08X}, expected 0x{:08X}", request_header.revision, - mm_supv::REVISION + mm_supervisor_request::REVISION ))); } // Process based on request type - match request_header.request { - mm_supv::requests::VERSION_INFO => { + match RequestType::try_from(request_header.request) { + Ok(RequestType::VersionInfo) => { log::debug!(target: "supervisor_handler", "Processing get info request"); self.handle_get_info_request() } - mm_supv::requests::FETCH_POLICY => { + Ok(RequestType::FetchPolicy) => { log::debug!(target: "supervisor_handler", "Processing fetch policy request"); self.handle_get_capabilities_request() } - mm_supv::requests::COMM_UPDATE => { + Ok(RequestType::CommUpdate) => { log::debug!(target: "supervisor_handler", "Processing comm update request"); self.handle_comm_update_request() } - mm_supv::requests::UNBLOCK_MEM => { + Ok(RequestType::UnblockMem) => { log::debug!(target: "supervisor_handler", "Processing unblock mem request"); self.handle_unblock_mem_request() } - _ => { + Err(_) => { log::warn!(target: "supervisor_handler", "Unsupported request type: 0x{:08X}", request_header.request); // Return error response let error_header = MmSupervisorRequestHeader { - signature: mm_supv::REQUEST_SIGNATURE, - revision: mm_supv::REVISION, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, request: request_header.request, reserved: 0, - result: 0xFFFFFFFFFFFFFFFF, // Error + result: efi::Status::from(ResponseType::InvalidRequest).as_usize() as u64, // Error }; let mut response = Vec::new(); - response.extend_from_slice(&error_header.to_bytes()); + response.extend_from_slice(error_header.as_bytes()); Ok(response) } } @@ -482,11 +430,11 @@ mod tests { result: 0x123456789ABCDEF0, }; - let bytes = original.to_bytes(); + let bytes = original.as_bytes(); assert_eq!(bytes.len(), MmSupervisorRequestHeader::SIZE); - let recovered = MmSupervisorRequestHeader::from_bytes(&bytes); - assert!(recovered.is_ok(), "Should successfully parse the header"); + let recovered = MmSupervisorRequestHeader::from_bytes(bytes); + assert!(recovered.is_some(), "Should successfully parse the header"); let recovered = recovered.unwrap(); assert_eq!(recovered.signature, original.signature); diff --git a/components/patina_mm/tests/patina_mm_integration/common/message_parser.rs b/components/patina_mm/tests/patina_mm_integration/common/message_parser.rs index abf2b5d41..3c77aea99 100644 --- a/components/patina_mm/tests/patina_mm_integration/common/message_parser.rs +++ b/components/patina_mm/tests/patina_mm_integration/common/message_parser.rs @@ -13,7 +13,7 @@ extern crate alloc; #[allow(unused_imports)] // Used in test module within this file use alloc::vec::Vec; -use r_efi::efi; +use patina::BinaryGuid; /// Error types for MM message parsing operations #[derive(Debug, PartialEq)] @@ -46,7 +46,7 @@ impl core::fmt::Display for MmMessageParseError { #[repr(C)] struct MmCommunicateHeader { /// Recipient handler GUID - header_guid: efi::Guid, + header_guid: BinaryGuid, /// Length of the message data (excluding header) message_length: u64, } @@ -55,7 +55,7 @@ impl MmCommunicateHeader { const SIZE: usize = core::mem::size_of::(); /// Create a new header with the specified GUID and message length - fn new(guid: &efi::Guid, message_length: u64) -> Self { + fn new(guid: &BinaryGuid, message_length: u64) -> Self { Self { header_guid: *guid, message_length } } @@ -78,8 +78,7 @@ impl MmCommunicateHeader { } // Byte-by-byte copy to avoid alignment issues - let mut header = - MmCommunicateHeader { header_guid: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0; 6]), message_length: 0 }; + let mut header = MmCommunicateHeader { header_guid: patina::guids::ZERO, message_length: 0 }; // SAFETY: MmCommunicateHeader is repr(C) with well-defined size and layout let header_bytes = unsafe { core::slice::from_raw_parts_mut(&mut header as *mut Self as *mut u8, Self::SIZE) }; @@ -111,7 +110,7 @@ impl<'a> MmMessageParser<'a> { } /// Parse an MM message from the buffer, returning the GUID and message data - pub fn parse_message(&self) -> Result<(efi::Guid, &[u8]), MmMessageParseError> { + pub fn parse_message(&self) -> Result<(BinaryGuid, &[u8]), MmMessageParseError> { if self.buffer.len() < MmCommunicateHeader::SIZE { return Err(MmMessageParseError::BufferTooSmall); } @@ -130,7 +129,7 @@ impl<'a> MmMessageParser<'a> { } /// Write an MM message to the buffer with the specified GUID and data - pub fn write_message(&mut self, guid: &efi::Guid, data: &[u8]) -> Result<(), MmMessageParseError> { + pub fn write_message(&mut self, guid: &BinaryGuid, data: &[u8]) -> Result<(), MmMessageParseError> { let total_size = MmCommunicateHeader::SIZE + data.len(); if total_size > self.buffer.len() { return Err(MmMessageParseError::BufferTooSmall); @@ -175,7 +174,7 @@ impl<'a> MmMessageParser<'a> { /// Get the GUID from the header #[allow(dead_code)] // Part of complete message manipulation API - pub fn get_header_guid(&self) -> Result { + pub fn get_header_guid(&self) -> Result { if self.buffer.len() < MmCommunicateHeader::SIZE { return Err(MmMessageParseError::BufferTooSmall); } @@ -197,8 +196,7 @@ mod tests { #[test] fn test_message_round_trip() { let mut buffer = vec![0u8; 128]; - let test_guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0]); + let test_guid = BinaryGuid::from_string("12345678-1234-5678-1234-56789ABCDEF0"); let test_data = b"Hello, MM World!"; let mut parser = MmMessageParser::new(&mut buffer); @@ -219,8 +217,7 @@ mod tests { #[test] fn test_buffer_too_small() { let mut small_buffer = vec![0u8; 4]; // Much smaller than header size - let test_guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0]); + let test_guid = BinaryGuid::from_string("12345678-1234-5678-1234-56789ABCDEF0"); let test_data = b"Data"; let mut parser = MmMessageParser::new(&mut small_buffer); diff --git a/components/patina_mm/tests/patina_mm_integration/common/real_component_framework.rs b/components/patina_mm/tests/patina_mm_integration/common/real_component_framework.rs index e89dc84d1..142d5aeb1 100644 --- a/components/patina_mm/tests/patina_mm_integration/common/real_component_framework.rs +++ b/components/patina_mm/tests/patina_mm_integration/common/real_component_framework.rs @@ -108,7 +108,7 @@ impl MmExecutor for TestMmExecutor { /// the complete patina_mm component stack, including real `CommunicateBuffer` operations. pub struct RealComponentMmTestFramework { /// Real MM Communicator service using actual communication logic - mm_communicator: MmCommunicator, + mm_communicator: MmCommunicator, } impl RealComponentMmTestFramework { @@ -174,7 +174,7 @@ impl RealComponentMmTestFrameworkBuilder { let test_executor = TestMmExecutor::new(handlers); // Create real MM communicator with test executor and test communication buffer - let mm_communicator = MmCommunicator::with_executor(Box::new(test_executor)); + let mm_communicator = MmCommunicator::with_executor(test_executor); // Set up the communication buffer in the communicator mm_communicator.set_test_comm_buffers(vec![comm_buffer]); diff --git a/components/patina_mm/tests/patina_mm_integration/framework/core_functionality_tests.rs b/components/patina_mm/tests/patina_mm_integration/framework/core_functionality_tests.rs index 1eb99c590..f80ac028b 100644 --- a/components/patina_mm/tests/patina_mm_integration/framework/core_functionality_tests.rs +++ b/components/patina_mm/tests/patina_mm_integration/framework/core_functionality_tests.rs @@ -9,33 +9,32 @@ //! //! SPDX-License-Identifier: Apache-2.0 -use patina::Guid; +use patina::{BinaryGuid, Guid, pi::protocols::communication::EfiMmCommunicateHeader}; use patina_mm::{ component::communicator::{MmCommunication, MmCommunicator, MmExecutor, Status}, - config::{CommunicateBuffer, EfiMmCommunicateHeader}, + config::CommunicateBuffer, }; use core::pin::Pin; use std::collections::HashMap; -extern crate alloc; -use alloc::{boxed::Box, vec::Vec}; +use std::vec::Vec; /// Lightweight MM handler used for testing struct TestHandler { - guid: r_efi::efi::Guid, //The r_efi GUID is kept for the internal GUID here and concerted as needed + guid: BinaryGuid, //The BinaryGuid is kept for the internal GUID here and converted as needed response_data: Vec, } impl TestHandler { - fn new(guid: r_efi::efi::Guid, response_data: Vec) -> Self { + fn new(guid: BinaryGuid, response_data: Vec) -> Self { Self { guid, response_data } } } /// Simple MM executor used for testing struct CoreTestExecutor { - handlers: HashMap, + handlers: HashMap, } impl CoreTestExecutor { @@ -50,17 +49,17 @@ impl CoreTestExecutor { impl MmExecutor for CoreTestExecutor { fn execute_mm(&self, comm_buffer: &mut CommunicateBuffer) -> Result<(), Status> { - let recipient_guid = comm_buffer + let recipient: BinaryGuid = comm_buffer .get_header_guid() .map_err(|_| Status::CommBufferInitError)? - .ok_or(Status::CommBufferInitError)?; + .ok_or(Status::CommBufferInitError)? + .to_efi_guid() + .into(); - let handler = self.handlers.get(&recipient_guid.to_efi_guid()).ok_or(Status::CommBufferNotFound)?; + let handler = self.handlers.get(&recipient).ok_or(Status::CommBufferNotFound)?; - // Set response (need to clone the recipient_guid to avoid borrow conflicts) - let recipient_copy = Guid::from_bytes(&recipient_guid.as_bytes()); comm_buffer.reset(); - comm_buffer.set_message_info(recipient_copy).map_err(|_| Status::CommBufferInitError)?; + comm_buffer.set_message_info(recipient.as_guid()).map_err(|_| Status::CommBufferInitError)?; comm_buffer.set_message(&handler.response_data).map_err(|_| Status::CommBufferInitError)?; Ok(()) @@ -71,16 +70,16 @@ impl MmExecutor for CoreTestExecutor { mod tests { use super::*; - const TEST_GUID: r_efi::efi::Guid = - r_efi::efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); + const TEST_GUID: BinaryGuid = + BinaryGuid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); - fn create_test_communicator() -> MmCommunicator { + fn create_test_communicator() -> MmCommunicator { let mut executor = CoreTestExecutor::new(); executor.add_handler(TestHandler::new(TEST_GUID, b"test response".to_vec())); let buffers = vec![CommunicateBuffer::new(Pin::new(Box::leak(Box::new([0u8; 1024]))), 0)]; - let communicator = MmCommunicator::with_executor(Box::new(executor)); + let communicator = MmCommunicator::with_executor(executor); communicator.set_test_comm_buffers(buffers); communicator } @@ -134,14 +133,8 @@ mod tests { #[test] fn test_communication_unknown_handler() { let communicator = create_test_communicator(); - let unknown_guid = r_efi::efi::Guid::from_fields( - 0xDEADBEEF, - 0xCAFE, - 0xABCD, - 0xAA, - 0xBB, - &[0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x11], - ); + let unknown_guid = + BinaryGuid::from_fields(0xDEADBEEF, 0xCAFE, 0xABCD, 0xAA, 0xBB, &[0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x11]); let result = communicator.communicate(0, b"test", Guid::from_ref(&unknown_guid)); @@ -194,14 +187,8 @@ mod tests { #[test] fn test_safe_message_parsing() { // Basic test to verify the framework works with message parsing - let test_guid = r_efi::efi::Guid::from_fields( - 0x12345678, - 0x1234, - 0x5678, - 0x12, - 0x34, - &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef], - ); + let test_guid = + BinaryGuid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x90, 0xab, 0xcd, 0xef]); let test_data = b"Integration test"; // This test validates that GUIDs and data can be safely handled diff --git a/components/patina_mm/tests/patina_mm_integration/mm_communicator/component_integration_tests.rs b/components/patina_mm/tests/patina_mm_integration/mm_communicator/component_integration_tests.rs index daa3a65c6..d10871edf 100644 --- a/components/patina_mm/tests/patina_mm_integration/mm_communicator/component_integration_tests.rs +++ b/components/patina_mm/tests/patina_mm_integration/mm_communicator/component_integration_tests.rs @@ -16,6 +16,7 @@ use patina::{ Guid, component::{IntoComponent, Storage}, + management_mode::protocol::{mm_supervisor_request, mm_supervisor_request::RequestType}, }; use patina_mm::{ component::{communicator::MmCommunicator, sw_mmi_manager::SwMmiManager}, @@ -24,6 +25,8 @@ use patina_mm::{ use core::pin::Pin; +use r_efi::efi; + use crate::patina_mm_integration::common::*; #[test] @@ -156,17 +159,17 @@ fn test_real_component_mm_supervisor_version_request() { // Create MM Supervisor version request using the actual structures let version_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::VERSION_INFO, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::VersionInfo.into(), reserved: 0, result: 0, }; - let request_bytes = version_request.to_bytes(); + let request_bytes = version_request.as_bytes(); // Send the request using the real component framework - let result = framework.communicate(&Guid::from_ref(&test_guids::MM_SUPERVISOR), &request_bytes); + let result = framework.communicate(&Guid::from_ref(&test_guids::MM_SUPERVISOR), request_bytes); assert!(result.is_ok(), "Real component MM Supervisor communication should succeed: {:?}", result.err()); let response = result.unwrap(); @@ -181,10 +184,10 @@ fn test_real_component_mm_supervisor_version_request() { MmSupervisorRequestHeader::from_bytes(&response).expect("Should parse response header from real component"); // Verify header fields - assert_eq!(response_header.signature, mm_supv::REQUEST_SIGNATURE, "Response signature should match"); - assert_eq!(response_header.revision, mm_supv::REVISION, "Response revision should match"); - assert_eq!(response_header.request, mm_supv::requests::VERSION_INFO, "Response request type should match"); - assert_eq!(response_header.result, 0, "Response should indicate success"); + assert_eq!(response_header.signature, mm_supervisor_request::SIGNATURE, "Response signature should match"); + assert_eq!(response_header.revision, mm_supervisor_request::REVISION, "Response revision should match"); + assert_eq!(response_header.request, RequestType::VersionInfo.into(), "Response request type should match"); + assert_eq!(response_header.result, efi::Status::SUCCESS.as_usize() as u64, "Response should indicate success"); // Parse version info from response let version_info_offset = core::mem::size_of::(); @@ -201,7 +204,7 @@ fn test_real_component_mm_supervisor_version_request() { assert_eq!( version_info.max_supervisor_request_level, - mm_supv::MAX_REQUEST_LEVEL, + RequestType::MAX_REQUEST_TYPE, "Max request level should match expected value" ); } @@ -214,10 +217,10 @@ fn test_real_component_invalid_guid_communication() { .expect("Real component framework should initialize successfully"); // Use an unknown GUID that has no registered handler - let unknown_guid = r_efi::efi::Guid::from_fields(0xFFFFFFFF, 0xFFFF, 0xFFFF, 0xFF, 0xFF, &[0xFF; 6]); + let unknown_guid = Guid::try_from_string("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF").unwrap(); let test_data = b"This should fail"; - let result = framework.communicate(&Guid::from_ref(&unknown_guid), test_data); + let result = framework.communicate(&unknown_guid, test_data); // The real components should properly handle this error case assert!(result.is_err(), "Communication with unknown GUID should fail"); @@ -275,15 +278,15 @@ fn test_real_component_multiple_handlers() { // Test MM supervisor handler let supervisor_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::FETCH_POLICY, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::FetchPolicy.into(), reserved: 0, result: 0, }; let supervisor_result = - framework.communicate(&Guid::from_ref(&test_guids::MM_SUPERVISOR), &supervisor_request.to_bytes()); + framework.communicate(&Guid::from_ref(&test_guids::MM_SUPERVISOR), supervisor_request.as_bytes()); assert!(supervisor_result.is_ok(), "Supervisor communication should succeed"); // Both handlers should work independently through the real component infrastructure diff --git a/components/patina_mm/tests/patina_mm_integration/mm_communicator/stress_tests.rs b/components/patina_mm/tests/patina_mm_integration/mm_communicator/stress_tests.rs index 8ca1b4b2e..4873361b3 100644 --- a/components/patina_mm/tests/patina_mm_integration/mm_communicator/stress_tests.rs +++ b/components/patina_mm/tests/patina_mm_integration/mm_communicator/stress_tests.rs @@ -10,8 +10,6 @@ use crate::patina_mm_integration::common::{constants::*, framework::*}; -extern crate alloc; -use alloc::{boxed::Box, format, vec, vec::Vec}; use core::pin::Pin; use patina::{ Guid, @@ -25,16 +23,13 @@ use patina_mm::{ /// Additional test GUIDs for stress testing mod stress_guids { - use r_efi::efi; + use patina::BinaryGuid; - pub const ERROR_INJECTION: efi::Guid = - efi::Guid::from_fields(0xdeadbeef, 0x1111, 0x2222, 0x33, 0x44, &[0x55, 0x66, 0x77, 0x88, 0x99, 0xaa]); + pub const ERROR_INJECTION: BinaryGuid = BinaryGuid::from_string("BBCCDDEE-1111-2222-3344-5566778899AA"); - pub const BUFFER_SIZE_TEST: efi::Guid = - efi::Guid::from_fields(0xcafebabe, 0x3333, 0x4444, 0x55, 0x66, &[0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc]); + pub const BUFFER_SIZE_TEST: BinaryGuid = BinaryGuid::from_string("DDEEFF00-3333-4444-5566-778899AABBCC"); - pub const COMPUTATION_TEST: efi::Guid = - efi::Guid::from_fields(0xfeedf00d, 0x5555, 0x6666, 0x77, 0x88, &[0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee]); + pub const COMPUTATION_TEST: BinaryGuid = BinaryGuid::from_string("FF001122-5555-6666-7788-99AABBCCDDEE"); } /// Mock MM Executor for stress testing that uses the test framework @@ -57,20 +52,20 @@ impl MmExecutor for StressTestMmExecutor { let data = comm_buffer.get_message().map_err(|_| patina_mm::component::communicator::Status::InvalidDataBuffer)?; - // Extract the GUID from the header and convert to an owned efi::Guid + // Extract the GUID from the header and convert to BinaryGuid let guid = comm_buffer .get_header_guid() .map_err(|_| patina_mm::component::communicator::Status::InvalidDataBuffer)? - .ok_or(patina_mm::component::communicator::Status::InvalidDataBuffer)? - .to_efi_guid(); // Convert to owned efi::Guid to avoid borrowing issues + .ok_or(patina_mm::component::communicator::Status::InvalidDataBuffer)?; + let binary_guid: patina::BinaryGuid = guid.to_efi_guid().into(); // Use the test framework to process the message - match self.framework.communicate(&guid, &data) { + match self.framework.communicate(&binary_guid, &data) { Ok(response) => { // Set the response back in the buffer comm_buffer.reset(); comm_buffer - .set_message_info(Guid::from_ref(&guid)) + .set_message_info(Guid::from_ref(&binary_guid)) .map_err(|_| patina_mm::component::communicator::Status::CommBufferInitError)?; comm_buffer .set_message(&response) @@ -114,7 +109,7 @@ impl TestDataGenerator { } /// Create a configured MM Communicator for stress testing -fn create_stress_test_communicator() -> (MmCommunicator, MmTestFramework) { +fn create_stress_test_communicator() -> (MmCommunicator, MmTestFramework) { // Create a comprehensive test framework with all handler types let framework = MmTestFramework::builder() .with_echo_handler() @@ -126,7 +121,7 @@ fn create_stress_test_communicator() -> (MmCommunicator, MmTestFramework) { .expect("Framework creation should succeed"); // Create MM Communicator with our test executor - let executor = Box::new(StressTestMmExecutor::new(framework.clone())); + let executor = StressTestMmExecutor::new(framework.clone()); let communicator = MmCommunicator::with_executor(executor); // Create communication buffers for testing diff --git a/components/patina_mm/tests/patina_mm_integration/mm_supervisor/communication_tests.rs b/components/patina_mm/tests/patina_mm_integration/mm_supervisor/communication_tests.rs index c4cb5bd3c..9cdec6711 100644 --- a/components/patina_mm/tests/patina_mm_integration/mm_supervisor/communication_tests.rs +++ b/components/patina_mm/tests/patina_mm_integration/mm_supervisor/communication_tests.rs @@ -15,6 +15,8 @@ //! SPDX-License-Identifier: Apache-2.0 use crate::patina_mm_integration::common::*; +use patina::management_mode::protocol::{mm_supervisor_request, mm_supervisor_request::RequestType}; +use r_efi::efi; #[test] fn test_mm_supervisor_version_request_integration() { @@ -25,17 +27,17 @@ fn test_mm_supervisor_version_request_integration() { // Create MM Supervisor version request using safe operations let version_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::VERSION_INFO, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::VersionInfo.into(), reserved: 0, result: 0, }; - let request_bytes = version_request.to_bytes(); + let request_bytes = version_request.as_bytes(); // Send the request using framework - let result = framework.communicate(&test_guids::MM_SUPERVISOR, &request_bytes); + let result = framework.communicate(&test_guids::MM_SUPERVISOR, request_bytes); assert!(result.is_ok(), "MM Supervisor communication should succeed: {:?}", result.err()); let response = result.unwrap(); @@ -51,7 +53,7 @@ fn test_mm_supervisor_version_request_integration() { assert_eq!(response_header.signature, version_request.signature, "Signature should match"); assert_eq!(response_header.revision, version_request.revision, "Revision should match"); assert_eq!(response_header.request, version_request.request, "Request type should match"); - assert_eq!(response_header.result, mm_supv::responses::SUCCESS, "Result should be success"); + assert_eq!(response_header.result, efi::Status::SUCCESS.as_usize() as u64, "Result should be success"); // Parse version info safely let version_info_offset = core::mem::size_of::(); @@ -75,17 +77,17 @@ fn test_mm_supervisor_capabilities_request() { // Create capabilities request using safe operations let capabilities_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::FETCH_POLICY, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::FetchPolicy.into(), reserved: 0, result: 0, }; - let request_bytes = capabilities_request.to_bytes(); + let request_bytes = capabilities_request.as_bytes(); // Send the request using framework - let result = framework.communicate(&test_guids::MM_SUPERVISOR, &request_bytes); + let result = framework.communicate(&test_guids::MM_SUPERVISOR, request_bytes); assert!(result.is_ok(), "MM Supervisor capabilities communication should succeed"); let response = result.unwrap(); @@ -97,7 +99,7 @@ fn test_mm_supervisor_capabilities_request() { // Parse response header safely let response_header = MmSupervisorRequestHeader::from_bytes(&response).expect("Should parse response header"); - assert_eq!(response_header.result, mm_supv::responses::SUCCESS, "Capabilities request should succeed"); + assert_eq!(response_header.result, efi::Status::SUCCESS.as_usize() as u64, "Capabilities request should succeed"); // Parse capabilities safely let capabilities_offset = core::mem::size_of::(); @@ -124,17 +126,17 @@ fn test_mm_supervisor_invalid_request() { // Create invalid request using safe operations let invalid_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, request: 0xFFFF, // Invalid request type reserved: 0, result: 0, }; - let request_bytes = invalid_request.to_bytes(); + let request_bytes = invalid_request.as_bytes(); // Send the request using framework - let result = framework.communicate(&test_guids::MM_SUPERVISOR, &request_bytes); + let result = framework.communicate(&test_guids::MM_SUPERVISOR, request_bytes); assert!(result.is_ok(), "Communication should succeed even with invalid request"); let response = result.unwrap(); @@ -142,7 +144,11 @@ fn test_mm_supervisor_invalid_request() { // Parse response header safely let response_header = MmSupervisorRequestHeader::from_bytes(&response).expect("Should parse response header"); - assert_eq!(response_header.result, mm_supv::responses::ERROR, "Invalid request should return error"); + assert_eq!( + response_header.result, + efi::Status::INVALID_PARAMETER.as_usize() as u64, + "Invalid request should return error" + ); } #[test] @@ -153,16 +159,16 @@ fn test_mm_supervisor_invalid_signature() { // Create request with invalid signature using safe operations let invalid_request = MmSupervisorRequestHeader { signature: u32::from_le_bytes([b'I', b'N', b'V', b'D']), // Invalid signature - revision: mm_supv::REVISION, - request: mm_supv::requests::VERSION_INFO, + revision: mm_supervisor_request::REVISION, + request: RequestType::VersionInfo.into(), reserved: 0, result: 0, }; - let request_bytes = invalid_request.to_bytes(); + let request_bytes = invalid_request.as_bytes(); // Test handler directly - let result = mm_supervisor.handle_request(&request_bytes); + let result = mm_supervisor.handle_request(request_bytes); assert!(result.is_err(), "Invalid signature should cause handler to fail"); let error_msg = format!("{}", result.unwrap_err()); @@ -207,16 +213,16 @@ fn test_mm_supervisor_builder_integration() { // Test MM Supervisor handler as well let version_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::VERSION_INFO, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::VersionInfo.into(), reserved: 0, result: 0, }; // Use framework directly instead of mm_comm_service - let request_data = version_request.to_bytes(); // Convert to bytes - let supervisor_result = framework.communicate(&test_guids::MM_SUPERVISOR, &request_data); + let request_data = version_request.as_bytes(); // Convert to bytes + let supervisor_result = framework.communicate(&test_guids::MM_SUPERVISOR, request_data); assert!(supervisor_result.is_ok(), "MM Supervisor should work"); // Verify both triggers worked (we made 2 communication calls: echo + supervisor) @@ -229,26 +235,26 @@ fn test_safe_message_parsing_with_mm_supervisor() { let mut buffer = vec![0u8; TEST_BUFFER_SIZE]; let version_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::VERSION_INFO, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::VersionInfo.into(), reserved: 0, result: 0, }; - let request_data = version_request.to_bytes(); + let request_data = version_request.as_bytes(); // Test writing MM Supervisor message safely let mut parser = MmMessageParser::new(&mut buffer); parser - .write_message(&test_guids::MM_SUPERVISOR, &request_data) + .write_message(&test_guids::MM_SUPERVISOR, request_data) .expect("Should write MM Supervisor message successfully"); // Test parsing the message back safely let (parsed_guid, parsed_data) = parser.parse_message().expect("Should parse MM Supervisor message successfully"); assert_eq!(parsed_guid, test_guids::MM_SUPERVISOR); - assert_eq!(parsed_data, &request_data); + assert_eq!(parsed_data, request_data); // Verify we can parse the MM Supervisor request from the parsed data let parsed_request = @@ -267,17 +273,17 @@ fn test_mm_supervisor_comm_update_request() { // Create communication buffer update request using COMM_UPDATE constant let comm_update_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::COMM_UPDATE, + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::CommUpdate.into(), reserved: 0, result: 0, }; - let request_bytes = comm_update_request.to_bytes(); + let request_bytes = comm_update_request.as_bytes(); // Send the request using framework - let result = framework.communicate(&test_guids::MM_SUPERVISOR, &request_bytes); + let result = framework.communicate(&test_guids::MM_SUPERVISOR, request_bytes); assert!(result.is_ok(), "MM Supervisor comm update communication should succeed"); let response = result.unwrap(); @@ -289,8 +295,8 @@ fn test_mm_supervisor_comm_update_request() { // Parse response header safely let response_header = MmSupervisorRequestHeader::from_bytes(&response).expect("Should parse response header"); - assert_eq!(response_header.request, mm_supv::requests::COMM_UPDATE, "Response should be for COMM_UPDATE request"); - assert_eq!(response_header.result, mm_supv::responses::SUCCESS, "Comm update request should succeed"); + assert_eq!(response_header.request, RequestType::CommUpdate.into(), "Response should be for COMM_UPDATE request"); + assert_eq!(response_header.result, efi::Status::SUCCESS.as_usize() as u64, "Comm update request should succeed"); // Parse update result safely let update_result_offset = core::mem::size_of::(); @@ -315,17 +321,17 @@ fn test_mm_supervisor_unblock_mem_request() { // Create memory unblock request using UNBLOCK_MEM constant let unblock_mem_request = MmSupervisorRequestHeader { - signature: u32::from_le_bytes(mm_supv::SIGNATURE), - revision: mm_supv::REVISION, - request: mm_supv::requests::UNBLOCK_MEM, // This uses the constant! + signature: mm_supervisor_request::SIGNATURE, + revision: mm_supervisor_request::REVISION, + request: RequestType::UnblockMem.into(), reserved: 0, result: 0, }; - let request_bytes = unblock_mem_request.to_bytes(); + let request_bytes = unblock_mem_request.as_bytes(); // Send the request using framework - let result = framework.communicate(&test_guids::MM_SUPERVISOR, &request_bytes); + let result = framework.communicate(&test_guids::MM_SUPERVISOR, request_bytes); assert!(result.is_ok(), "MM Supervisor unblock mem communication should succeed"); let response = result.unwrap(); @@ -337,8 +343,8 @@ fn test_mm_supervisor_unblock_mem_request() { // Parse response header safely let response_header = MmSupervisorRequestHeader::from_bytes(&response).expect("Should parse response header"); - assert_eq!(response_header.request, mm_supv::requests::UNBLOCK_MEM, "Response should be for UNBLOCK_MEM request"); - assert_eq!(response_header.result, mm_supv::responses::SUCCESS, "Unblock mem request should succeed"); + assert_eq!(response_header.request, RequestType::UnblockMem.into(), "Response should be for UNBLOCK_MEM request"); + assert_eq!(response_header.result, efi::Status::SUCCESS.as_usize() as u64, "Unblock mem request should succeed"); // Parse unblock status safely let unblock_status_offset = core::mem::size_of::(); diff --git a/components/patina_mm/tests/patina_mm_integration/mod.rs b/components/patina_mm/tests/patina_mm_integration/mod.rs index 768468cc3..db638cf76 100644 --- a/components/patina_mm/tests/patina_mm_integration/mod.rs +++ b/components/patina_mm/tests/patina_mm_integration/mod.rs @@ -31,6 +31,8 @@ //! //! SPDX-License-Identifier: Apache-2.0 +extern crate alloc; + // Common utilities available to all test modules mod common; diff --git a/components/patina_mm_policy/Cargo.toml b/components/patina_mm_policy/Cargo.toml new file mode 100644 index 000000000..5ac9481da --- /dev/null +++ b/components/patina_mm_policy/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "patina_mm_policy" +resolver = "2" +version.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +readme = "README.md" +description = "MM Supervisor secure policy library for Rust UEFI environments." + +# Metadata to tell docs.rs how to build the documentation when uploading +[package.metadata.docs.rs] +features = ["doc"] + +[dependencies] +log = { workspace = true } +r-efi = { workspace = true } +spin = { workspace = true } + +[dev-dependencies] + +[features] +default = [] +std = [] +doc = [] diff --git a/components/patina_mm_policy/README.md b/components/patina_mm_policy/README.md new file mode 100644 index 000000000..efad5522d --- /dev/null +++ b/components/patina_mm_policy/README.md @@ -0,0 +1,44 @@ +# patina_mm_policy + +MM Supervisor secure policy library for Rust UEFI environments. + +This crate provides a comprehensive policy management library for the MM Supervisor, +including policy data structures, access validation (policy gate), and helper utilities. + +## Features + +### Policy Gate +Initialize with a policy buffer pointer, then query whether operations are allowed: +- `is_io_allowed()` - Check I/O port access +- `is_msr_allowed()` - Check MSR access +- `is_instruction_allowed()` - Check privileged instruction execution +- `is_save_state_read_allowed()` - Check save state read access + +### Helper Functions +- `dump_policy()` - Print policy contents for debugging +- `compare_policies()` - Compare two policies (order-independent) +- `populate_memory_policy_from_page_table()` - Walk page tables to generate memory policy + +## Usage + +```rust,ignore +use patina_mm_policy::{PolicyGate, AccessType, IoWidth}; + +// Initialize the policy gate with a policy buffer +let gate = unsafe { PolicyGate::new(policy_ptr) }?; + +// Check if I/O access is allowed +let result = gate.is_io_allowed(0x3F8, IoWidth::Byte, AccessType::Read); +if result.is_ok() { + // Access allowed +} + +// Dump policy for debugging +gate.dump_policy(); +``` + +## License + +Copyright (c) Microsoft Corporation. + +SPDX-License-Identifier: Apache-2.0 diff --git a/components/patina_mm_policy/src/gate.rs b/components/patina_mm_policy/src/gate.rs new file mode 100644 index 000000000..2291c70ef --- /dev/null +++ b/components/patina_mm_policy/src/gate.rs @@ -0,0 +1,731 @@ +//! Policy Gate - Runtime access validation +//! +//! This module provides the `PolicyGate` struct that wraps a policy buffer +//! and provides methods to check if various operations are allowed. + +use crate::types::*; +use crate::helpers::{walk_page_table, IsInsideMmramFn}; +use spin::Once; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur during policy gate operations. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PolicyError { + /// The policy pointer is null. + NullPointer, + /// Invalid policy version. + InvalidVersion, + /// Invalid access mask specified. + InvalidAccessMask, + /// Invalid I/O width specified. + InvalidIoWidth, + /// Invalid I/O address (out of 16-bit range). + InvalidIoAddress, + /// Invalid I/O address range (overflow). + InvalidIoRange, + /// Invalid instruction index. + InvalidInstructionIndex, + /// Invalid save state map field. + InvalidSaveStateField, + /// Policy root not found for the requested type. + PolicyRootNotFound, + /// Access denied by policy. + AccessDenied, + /// Internal error during policy evaluation. + InternalError, +} + +// ============================================================================ +// Policy Gate +// ============================================================================ + +/// Policy gate for runtime access validation. +/// +/// This struct wraps a policy buffer and provides methods to check if +/// various operations (I/O, MSR, instruction, save state) are allowed. +/// +/// ## Example +/// +/// ```rust,ignore +/// use patina_mm_policy::{PolicyGate, AccessType, IoWidth}; +/// +/// let gate = unsafe { PolicyGate::new(policy_ptr) }?; +/// +/// // Check I/O access +/// if gate.is_io_allowed(0x3F8, IoWidth::Byte, AccessType::Read).is_ok() { +/// // Access allowed +/// } +/// ``` +pub struct PolicyGate { + /// Pointer to the firmware policy data (static, read-only). + policy_ptr: *const u8, + /// Memory policy buffer (written by `walk_page_table` during snapshot). + memory_policy_buffer: *mut MemDescriptorV1_0, + /// Maximum number of `MemDescriptorV1_0` entries the memory policy buffer can hold. + memory_policy_max_count: usize, + /// Number of descriptors stored in the snapshot buffer. + /// + /// `None` means the ready-to-lock transition has **not** occurred. + /// `Some(count)` means a snapshot was taken with `count` entries. + snapshot_count: Once, +} + +// SAFETY: PolicyGate only holds a pointer to read-only policy data. +unsafe impl Send for PolicyGate {} +unsafe impl Sync for PolicyGate {} + +impl PolicyGate { + /// Creates a new policy gate from a policy buffer pointer. + /// + /// # Safety + /// + /// The caller must ensure that `policy_ptr` points to a valid policy buffer + /// that remains valid for the lifetime of this PolicyGate. + /// + /// # Returns + /// + /// Returns `Ok(PolicyGate)` if the policy is valid, or an error otherwise. + pub unsafe fn new(policy_ptr: *const u8) -> Result { + if policy_ptr.is_null() { + return Err(PolicyError::NullPointer); + } + + let policy = unsafe { &*(policy_ptr as *const SecurePolicyDataV1_0) }; + if !policy.is_valid_version() { + return Err(PolicyError::InvalidVersion); + } + + Ok(Self { + policy_ptr, + memory_policy_buffer: core::ptr::null_mut(), + memory_policy_max_count: 0, + snapshot_count: Once::new(), + }) + } + + /// Sets the memory policy buffer for page-table-derived snapshots. + /// + /// Must be called before [`take_snapshot`](Self::take_snapshot). Typically + /// the buffer address and size come from the PassDown HOB. + /// + /// # Safety + /// + /// # Safety Contract (deferred) + /// + /// The caller must ensure that `buffer` points to a valid memory region + /// that can hold at least `max_count` `MemDescriptorV1_0` entries and that + /// this memory remains valid for the lifetime of the `PolicyGate`. + /// + /// Storing the pointer is safe; the contract is enforced when the buffer + /// is later dereferenced by [`take_snapshot`], [`verify_snapshot`], or + /// [`fetch_n_update_policy`]. + pub fn set_memory_policy_buffer( + &mut self, + buffer: *mut MemDescriptorV1_0, + max_count: usize, + ) { + self.memory_policy_buffer = buffer; + self.memory_policy_max_count = max_count; + } + + /// Gets a reference to the policy header. + fn policy(&self) -> &SecurePolicyDataV1_0 { + // SAFETY: Constructor validated the pointer + unsafe { &*(self.policy_ptr as *const SecurePolicyDataV1_0) } + } + + /// Finds a policy root by type. + fn find_policy_root(&self, policy_type: u32) -> Option<&PolicyRootV1> { + let policy = self.policy(); + // SAFETY: Constructor validated the policy + let roots = unsafe { policy.get_policy_roots() }; + roots.iter().find(|r| r.policy_type == policy_type) + } + + /// Checks if I/O access is allowed. + /// + /// # Arguments + /// + /// * `io_address` - The I/O port address (must be <= 0xFFFF) + /// * `width` - The access width + /// * `access_type` - Read or Write access + /// + /// # Returns + /// + /// Returns `Ok(())` if access is allowed, or `Err(PolicyError)` otherwise. + pub fn is_io_allowed( + &self, + io_address: u32, + width: IoWidth, + access_type: AccessType, + ) -> Result<(), PolicyError> { + // Validate access type (must be read or write, not execute) + if access_type == AccessType::Execute { + return Err(PolicyError::InvalidAccessMask); + } + + let io_size = width.size(); + + // Validate I/O address range (16-bit port space) + if io_address > u16::MAX as u32 { + return Err(PolicyError::InvalidIoAddress); + } + + // Check for overflow (MAX_UINT16 + 1 is valid for end address) + if io_address.saturating_add(io_size) > (u16::MAX as u32) + 1 { + return Err(PolicyError::InvalidIoRange); + } + + let policy_root = match self.find_policy_root(TYPE_IO) { + Some(root) => root, + None => { + log::warn!("Could not find IO policy root, denying access to be safe."); + return Err(PolicyError::PolicyRootNotFound); + } + }; + + // SAFETY: We validated the policy in the constructor + let descriptors = unsafe { policy_root.get_io_descriptors(self.policy_ptr) }; + let access_mask = access_type.as_attr_mask(); + + let mut found_match = false; + + for desc in descriptors { + let desc_start = desc.io_address as u32; + let desc_size = desc.length_or_width as u32; + let is_strict_width = (desc.attributes as u32 & RESOURCE_ATTR_STRICT_WIDTH) != 0; + + if is_strict_width { + // Strict width: address and size must match exactly + if io_address == desc_start && io_size == desc_size { + // Check if the access type matches + if (desc.attributes as u32 & access_mask) != 0 { + found_match = true; + break; + } + } + } else { + // Non-strict: check if our range is covered by this descriptor + let desc_end = desc_start.saturating_add(desc_size); + let our_end = io_address.saturating_add(io_size); + + if io_address >= desc_start && our_end <= desc_end { + // Check if the access type matches + if (desc.attributes as u32 & access_mask) != 0 { + found_match = true; + break; + } + } + } + } + + // Evaluate based on allow/deny list semantics + if (found_match && policy_root.access_attr == ACCESS_ATTR_DENY) + || (!found_match && policy_root.access_attr == ACCESS_ATTR_ALLOW) + { + log::debug!( + "Rejecting IO access: port=0x{:x}, width={}, type={:?}", + io_address, + io_size, + access_type + ); + return Err(PolicyError::AccessDenied); + } + + Ok(()) + } + + /// Checks if MSR access is allowed. + /// + /// # Arguments + /// + /// * `msr_address` - The MSR address + /// * `access_type` - Read or Write access + /// + /// # Returns + /// + /// Returns `Ok(())` if access is allowed, or `Err(PolicyError)` otherwise. + pub fn is_msr_allowed(&self, msr_address: u32, access_type: AccessType) -> Result<(), PolicyError> { + // Validate access type + if access_type == AccessType::Execute { + return Err(PolicyError::InvalidAccessMask); + } + + let policy_root = match self.find_policy_root(TYPE_MSR) { + Some(root) => root, + None => { + log::warn!("Could not find MSR policy root, denying access to be safe."); + return Err(PolicyError::PolicyRootNotFound); + } + }; + + // SAFETY: We validated the policy in the constructor + let descriptors = unsafe { policy_root.get_msr_descriptors(self.policy_ptr) }; + let access_mask = access_type.as_attr_mask(); + + let mut found_match = false; + + for desc in descriptors { + let desc_start = desc.msr_address; + let desc_end = desc_start.saturating_add(desc.length as u32); + + if msr_address >= desc_start && msr_address < desc_end { + if (desc.attributes as u32 & access_mask) != 0 { + found_match = true; + break; + } + } + } + + // Evaluate based on allow/deny list semantics + if (found_match && policy_root.access_attr == ACCESS_ATTR_DENY) + || (!found_match && policy_root.access_attr == ACCESS_ATTR_ALLOW) + { + log::debug!( + "Rejecting MSR access: address=0x{:x}, type={:?}", + msr_address, + access_type + ); + return Err(PolicyError::AccessDenied); + } + + Ok(()) + } + + /// Checks if instruction execution is allowed. + /// + /// # Arguments + /// + /// * `instruction` - The instruction to check + /// + /// # Returns + /// + /// Returns `Ok(())` if execution is allowed, or `Err(PolicyError)` otherwise. + pub fn is_instruction_allowed(&self, instruction: Instruction) -> Result<(), PolicyError> { + let instruction_index = instruction.as_index(); + + if instruction_index >= INSTRUCTION_COUNT { + return Err(PolicyError::InvalidInstructionIndex); + } + + let policy_root = match self.find_policy_root(TYPE_INSTRUCTION) { + Some(root) => root, + None => { + log::warn!("Could not find Instruction policy root, denying access to be safe."); + return Err(PolicyError::PolicyRootNotFound); + } + }; + + // SAFETY: We validated the policy in the constructor + let descriptors = unsafe { policy_root.get_instruction_descriptors(self.policy_ptr) }; + + let mut found_match = false; + + for desc in descriptors { + if instruction_index == desc.instruction_index { + if (desc.attributes as u32 & RESOURCE_ATTR_EXECUTE) != 0 { + found_match = true; + break; + } + } + } + + // Evaluate based on allow/deny list semantics + if (found_match && policy_root.access_attr == ACCESS_ATTR_DENY) + || (!found_match && policy_root.access_attr == ACCESS_ATTR_ALLOW) + { + log::debug!("Rejecting instruction execution: {:?}", instruction); + return Err(PolicyError::AccessDenied); + } + + Ok(()) + } + + /// Checks if save state read access is allowed. + /// + /// # Arguments + /// + /// * `field` - The save state field to read + /// * `width` - The access width in bytes + /// * `current_condition` - The current I/O trap condition (if applicable) + /// + /// # Returns + /// + /// Returns `Ok(())` if access is allowed, or `Err(PolicyError)` otherwise. + pub fn is_save_state_read_allowed( + &self, + field: SaveStateField, + width: usize, + current_condition: Option, + ) -> Result<(), PolicyError> { + let policy_root = match self.find_policy_root(TYPE_SAVE_STATE) { + Some(root) => root, + None => { + // No save state policy = level 20, allow all reads + log::debug!("No save state policy root found, allowing read (level 20 policy)."); + return Ok(()); + } + }; + + // SAFETY: We validated the policy in the constructor + let descriptors = unsafe { policy_root.get_save_state_descriptors(self.policy_ptr) }; + + let mut found_match = false; + + for desc in descriptors { + if desc.map_field == field.as_index() { + // Check if this is a read-allowed policy + let is_read = (desc.attributes & RESOURCE_ATTR_READ) != 0; + let is_cond_read = (desc.attributes & RESOURCE_ATTR_COND_READ) != 0; + + if is_read || is_cond_read { + // Check condition if this is conditional read + if is_cond_read { + if let Some(current) = current_condition { + if desc.access_condition == current as u32 { + found_match = true; + break; + } + } + // Condition doesn't match, continue looking + } else { + // Unconditional read + if desc.access_condition == SVST_UNCONDITIONAL { + found_match = true; + break; + } + } + } + } + } + + // Evaluate based on allow/deny list semantics + if (found_match && policy_root.access_attr == ACCESS_ATTR_DENY) + || (!found_match && policy_root.access_attr == ACCESS_ATTR_ALLOW) + { + log::debug!( + "Rejecting save state read: field={:?}, width={}", + field, + width + ); + return Err(PolicyError::AccessDenied); + } + + Ok(()) + } + + /// Gets the raw policy pointer. + pub fn as_ptr(&self) -> *const u8 { + self.policy_ptr + } + + // ==================================================================== + // Memory Policy Snapshot + // ==================================================================== + + /// Returns `true` if the ready-to-lock snapshot has been taken. + pub fn is_locked(&self) -> bool { + self.snapshot_count.get().is_some() + } + + /// Returns the snapshot descriptor count, or `None` if not yet locked. + pub fn snapshot_count(&self) -> Option { + self.snapshot_count.get().copied() + } + + /// Returns the firmware policy blob size (from `SecurePolicyDataV1_0::size`). + /// + /// Returns `0` if the policy pointer is null (should not happen after construction). + pub fn firmware_policy_size(&self) -> usize { + self.policy().size as usize + } + + /// Returns the raw memory policy buffer pointer. + /// + /// After [`take_snapshot`](Self::take_snapshot) this buffer contains the + /// snapshot descriptors. + pub fn memory_policy_buffer(&self) -> *const MemDescriptorV1_0 { + self.memory_policy_buffer as *const MemDescriptorV1_0 + } + + /// Returns the memory policy buffer capacity in descriptor count. + pub fn memory_policy_max_count(&self) -> usize { + self.memory_policy_max_count + } + + /// Takes a page-table memory policy snapshot and transitions to the locked + /// state. + /// + /// Walks the active page table, writes the resulting descriptors into the + /// memory policy buffer, and atomically saves the descriptor count. After + /// this call, [`is_locked`](Self::is_locked) returns `true`. + /// + /// If the gate is already locked, the snapshot is **not** re-taken and the + /// existing descriptor count is returned. + /// + /// # Safety + /// + /// * `cr3` must point to a valid, stable PML4 table. + /// * The memory policy buffer (set via [`set_memory_policy_buffer`]) + /// must still be valid and large enough. + pub unsafe fn take_snapshot( + &self, + cr3: u64, + is_inside_mmram: IsInsideMmramFn, + ) -> Result { + // Idempotent: if already locked, return the saved count. + if let Some(&count) = self.snapshot_count.get() { + return Ok(count); + } + + if self.memory_policy_buffer.is_null() || self.memory_policy_max_count == 0 { + log::error!("take_snapshot: memory policy buffer not configured"); + return Err(PolicyError::InternalError); + } + + // SAFETY: The caller guarantees that `cr3` points to a valid PML4 and + // that the memory policy buffer (set via `set_memory_policy_buffer`) is + // valid and can hold `memory_policy_max_count` descriptors. + let count = unsafe { + walk_page_table(cr3, self.memory_policy_buffer, self.memory_policy_max_count, is_inside_mmram) + }.map_err(|e| { + log::error!("take_snapshot: walk_page_table failed: {:?}", e); + PolicyError::InternalError + })?; + + self.snapshot_count.call_once(|| count); + log::info!("Policy snapshot taken: {} descriptors, ready-to-lock is now TRUE", count); + Ok(count) + } + + /// Verifies that the current page table still matches the saved snapshot. + /// + /// The caller must provide a scratch buffer (typically allocated from the + /// page allocator) large enough to hold the walk results. This avoids + /// overwriting the saved snapshot during comparison. + /// + /// Returns `Ok(())` if the tables match, or `Err(PolicyError::AccessDenied)` + /// if they differ ("security violation"). + /// + /// # Safety + /// + /// * `cr3` must point to a valid, stable PML4 table. + /// * `scratch` must point to a buffer of at least `scratch_max_count` + /// `MemDescriptorV1_0` entries. + pub unsafe fn verify_snapshot( + &self, + cr3: u64, + is_inside_mmram: IsInsideMmramFn, + scratch: *mut MemDescriptorV1_0, + scratch_max_count: usize, + ) -> Result<(), PolicyError> { + let saved_count = match self.snapshot_count.get() { + Some(&c) => c, + None => { + log::warn!("verify_snapshot: no snapshot available, skipping verification"); + return Ok(()); + } + }; + + // SAFETY: The caller guarantees that `cr3` points to a valid PML4 and + // that `scratch` can hold `scratch_max_count` descriptors. The saved + // snapshot buffer (`self.memory_policy_buffer`) was populated by a + // prior `take_snapshot` call with `saved_count` entries. + unsafe { + let fresh_count = walk_page_table(cr3, scratch, scratch_max_count, is_inside_mmram) + .map_err(|e| { + log::error!("verify_snapshot: walk_page_table failed: {:?}", e); + PolicyError::InternalError + })?; + + if fresh_count != saved_count { + log::error!( + "verify_snapshot: descriptor count mismatch (saved={}, fresh={})", + saved_count, fresh_count, + ); + return Err(PolicyError::AccessDenied); + } + + let snapshot_ptr = self.memory_policy_buffer as *const MemDescriptorV1_0; + for i in 0..saved_count { + let saved = core::ptr::read(snapshot_ptr.add(i)); + let fresh = core::ptr::read(scratch.add(i)); + if saved != fresh { + log::error!( + "verify_snapshot: descriptor {} mismatch - \ + saved=(base=0x{:x}, size=0x{:x}, attrs=0x{:x}) vs \ + fresh=(base=0x{:x}, size=0x{:x}, attrs=0x{:x})", + i, + saved.base_address, saved.size, saved.mem_attributes, + fresh.base_address, fresh.size, fresh.mem_attributes, + ); + return Err(PolicyError::AccessDenied); + } + } + } + + log::info!( + "verify_snapshot: page table matches saved snapshot ({} descriptors)", + saved_count, + ); + Ok(()) + } + + /// Writes the merged firmware + memory policy into `dest`. + /// + /// Mirrors the C `FetchNUpdateSecurityPolicy` function. The caller is + /// responsible for ensuring the snapshot has been taken first (via + /// [`take_snapshot`](Self::take_snapshot)). + /// + /// ## Layout written to `dest` + /// + /// ```text + /// |--------------------------------------| + /// | SecurePolicyDataV1_0 + payload | <- firmware policy blob (copied first) + /// |--------------------------------------| + /// | MemDescriptorV1_0[0..N] | <- memory policy snapshot (appended) + /// |--------------------------------------| + /// ``` + /// + /// After the copy the function patches the header in-place: + /// + /// * The `TYPE_MEM` policy root's `offset` → `fw_size` and `count` → snapshot count + /// * The header's `size` → `fw_size + mem_policy_bytes` + /// * The legacy `memory_policy_count` field is zeroed (unused with root-based layout) + /// + /// Note: the caller is responsible for writing/reserving any request header + /// *before* the region pointed to by `dest`. + /// + /// # Arguments + /// + /// * `dest` - Destination buffer for the merged policy. + /// * `dest_size` - Available bytes at `dest`. + /// + /// # Returns + /// + /// The total number of bytes written to `dest`. + /// + /// # Safety + /// + /// * `dest` must point to a writable buffer of at least `dest_size` bytes. + pub unsafe fn fetch_n_update_policy( + &self, + dest: *mut u8, + dest_size: usize, + ) -> Result { + let count = self.snapshot_count.get().copied().ok_or_else(|| { + log::error!("fetch_n_update_policy: no snapshot taken"); + PolicyError::InternalError + })?; + + let desc_size = core::mem::size_of::(); + let mem_policy_bytes = count.checked_mul(desc_size).ok_or_else(|| { + log::error!("fetch_n_update_policy: descriptor count overflow"); + PolicyError::InternalError + })?; + + let fw_size = self.firmware_policy_size(); + if fw_size == 0 { + log::error!("fetch_n_update_policy: firmware policy size is 0"); + return Err(PolicyError::InternalError); + } + + let total_bytes = fw_size.checked_add(mem_policy_bytes).ok_or_else(|| { + log::error!("fetch_n_update_policy: total size overflow"); + PolicyError::InternalError + })?; + + if dest_size < total_bytes { + log::error!( + "fetch_n_update_policy: buffer too small ({} bytes, need {})", + dest_size, + total_bytes, + ); + return Err(PolicyError::InternalError); + } + + // SAFETY: The caller guarantees that `dest` is writable for at least + // `dest_size` bytes (verified >= `total_bytes` above). + // `self.policy_ptr` points to a valid firmware policy blob of `fw_size` + // bytes (validated at construction). The memory policy buffer holds + // `count` valid descriptors from a prior `take_snapshot` call. + unsafe { + // 1. Copy the firmware policy blob first (header + payload). + core::ptr::copy_nonoverlapping(self.policy_ptr, dest, fw_size); + + // 2. Append memory policy descriptors after the firmware blob. + if mem_policy_bytes > 0 { + let mem_dest = dest.add(fw_size); + let src = self.memory_policy_buffer as *const u8; + core::ptr::copy_nonoverlapping(src, mem_dest, mem_policy_bytes); + } + + // 3. Fix up the copied header to reflect the appended memory policy. + let header = &mut *(dest as *mut SecurePolicyDataV1_0); + + // Find the TYPE_MEM policy root and patch its offset/count. + let roots_ptr = (dest as *mut u8).add(header.policy_root_offset as usize) + as *mut PolicyRootV1; + let mut found_mem_root = false; + for i in 0..header.policy_root_count as usize { + let root = &mut *roots_ptr.add(i); + if root.policy_type == TYPE_MEM { + root.access_attr = ACCESS_ATTR_ALLOW; + root.offset = fw_size as u32; + root.count = count as u32; + found_mem_root = true; + break; + } + } + + if !found_mem_root { + log::error!( + "fetch_n_update_policy: firmware policy has no TYPE_MEM policy root" + ); + return Err(PolicyError::PolicyRootNotFound); + } + + // Update the total size and clear the legacy memory_policy_count. + header.size = total_bytes as u32; + header.memory_policy_count = 0; + } + + log::info!( + "fetch_n_update_policy: wrote {} bytes (fw_policy={}, mem_policy={} ({} descs))", + total_bytes, + fw_size, + mem_policy_bytes, + count, + ); + Ok(total_bytes) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_io_width() { + assert_eq!(IoWidth::Byte.size(), 1); + assert_eq!(IoWidth::Word.size(), 2); + assert_eq!(IoWidth::Dword.size(), 4); + } + + #[test] + fn test_access_type_mask() { + assert_eq!(AccessType::Read.as_attr_mask(), RESOURCE_ATTR_READ); + assert_eq!(AccessType::Write.as_attr_mask(), RESOURCE_ATTR_WRITE); + assert_eq!(AccessType::Execute.as_attr_mask(), RESOURCE_ATTR_EXECUTE); + } + + #[test] + fn test_instruction_conversion() { + assert_eq!(Instruction::Cli.as_index(), 0); + assert_eq!(Instruction::from_index(0), Some(Instruction::Cli)); + assert_eq!(Instruction::from_index(99), None); + } +} diff --git a/components/patina_mm_policy/src/helpers.rs b/components/patina_mm_policy/src/helpers.rs new file mode 100644 index 000000000..b164970e2 --- /dev/null +++ b/components/patina_mm_policy/src/helpers.rs @@ -0,0 +1,1107 @@ +//! Policy Helper Functions +//! +//! This module provides utility functions for policy manipulation: +//! - Dump/print policy for debugging +//! - Compare two policies (order-independent) +//! - Page table walking to generate memory policy + +use crate::types::*; +use core::mem::size_of; + +// ============================================================================ +// Policy Dumping +// ============================================================================ + +/// Dumps a single memory policy entry for debugging. +pub fn dump_mem_policy_entry(desc: &MemDescriptorV1_0) { + let r = if (desc.mem_attributes & RESOURCE_ATTR_READ) != 0 { "R" } else { "." }; + let w = if (desc.mem_attributes & RESOURCE_ATTR_WRITE) != 0 { "W" } else { "." }; + let x = if (desc.mem_attributes & RESOURCE_ATTR_EXECUTE) != 0 { "X" } else { "." }; + + log::info!( + " MEM: [0x{:016x}-0x{:016x}] {}{}{}", + desc.base_address, + desc.base_address.saturating_add(desc.size).saturating_sub(1), + r, w, x + ); +} + +/// Dumps policy data for debugging (like `DumpSmmPolicyData`). +/// +/// # Safety +/// +/// The caller must ensure that `policy_ptr` points to a valid policy buffer. +pub unsafe fn dump_policy(policy_ptr: *const u8) { + if policy_ptr.is_null() { + log::error!("dump_policy: null pointer"); + return; + } + + let policy = unsafe { &*(policy_ptr as *const SecurePolicyDataV1_0) }; + + log::info!("SMM_SUPV_SECURE_POLICY_DATA_V1_0:"); + log::info!(" Version: {}.{}", policy.version_major, policy.version_minor); + log::info!(" Size: 0x{:x}", policy.size); + log::info!(" MemoryPolicyOffset: 0x{:x}", policy.memory_policy_offset); + log::info!(" MemoryPolicyCount: 0x{:x}", policy.memory_policy_count); + log::info!(" Flags: 0x{:x}", policy.flags); + log::info!(" Capabilities: 0x{:x}", policy.capabilities); + log::info!(" PolicyRootOffset: 0x{:x}", policy.policy_root_offset); + log::info!(" PolicyRootCount: 0x{:x}", policy.policy_root_count); + + let policy_roots = unsafe { policy.get_policy_roots() }; + + for (i, root) in policy_roots.iter().enumerate() { + log::info!("Policy Root {}:", i); + log::info!(" Version: {}", root.version); + log::info!(" PolicyRootSize: {}", root.policy_root_size); + log::info!(" Type: {}", root.policy_type); + log::info!(" Offset: 0x{:x}", root.offset); + log::info!(" Count: {}", root.count); + log::info!( + " AccessAttr: {}", + if root.access_attr == ACCESS_ATTR_ALLOW { "ALLOW" } else { "DENY" } + ); + + match root.policy_type { + TYPE_MEM => { + let descriptors = unsafe { root.get_mem_descriptors(policy_ptr) }; + for desc in descriptors { + dump_mem_policy_entry(desc); + } + } + TYPE_IO => { + let descriptors = unsafe { root.get_io_descriptors(policy_ptr) }; + for desc in descriptors { + let r = if (desc.attributes as u32 & RESOURCE_ATTR_READ) != 0 { "R" } else { "." }; + let w = if (desc.attributes as u32 & RESOURCE_ATTR_WRITE) != 0 { "W" } else { "." }; + log::info!( + " IO: [0x{:04x}-0x{:04x}] {}{}", + desc.io_address, + (desc.io_address as u32) + .saturating_add(desc.length_or_width as u32) + .saturating_sub(1), + r, w + ); + } + } + TYPE_MSR => { + let descriptors = unsafe { root.get_msr_descriptors(policy_ptr) }; + for desc in descriptors { + let r = if (desc.attributes as u32 & RESOURCE_ATTR_READ) != 0 { "R" } else { "." }; + let w = if (desc.attributes as u32 & RESOURCE_ATTR_WRITE) != 0 { "W" } else { "." }; + log::info!( + " MSR: [0x{:08x}-0x{:08x}] {}{}", + desc.msr_address, + desc.msr_address + .saturating_add(desc.length as u32) + .saturating_sub(1), + r, w + ); + } + } + TYPE_INSTRUCTION => { + let descriptors = unsafe { root.get_instruction_descriptors(policy_ptr) }; + for desc in descriptors { + let name = match desc.instruction_index { + 0 => "CLI", + 1 => "WBINVD", + 2 => "HLT", + _ => "UNKNOWN", + }; + let x = if (desc.attributes as u32 & RESOURCE_ATTR_EXECUTE) != 0 { "X" } else { "." }; + log::info!(" INSTRUCTION: {} {}", name, x); + } + } + TYPE_SAVE_STATE => { + let descriptors = unsafe { root.get_save_state_descriptors(policy_ptr) }; + for desc in descriptors { + let field = match desc.map_field { + 0 => "RAX", + 1 => "IO_TRAP", + _ => "UNKNOWN", + }; + let condition = match desc.access_condition { + 0 => "Unconditional", + 1 => "IoRead", + 2 => "IoWrite", + _ => "Unknown", + }; + log::info!( + " SAVESTATE: {} attr=0x{:x} cond={}", + field, + desc.attributes, + condition + ); + } + } + _ => { + log::error!(" Unknown policy type: {}", root.policy_type); + } + } + } +} + +// ============================================================================ +// Policy Comparison +// ============================================================================ + +/// Compares two policies of a given type (order-independent). +/// +/// # Safety +/// +/// The caller must ensure that both policy pointers are valid. +pub unsafe fn compare_policy_with_type( + policy1_ptr: *const u8, + policy2_ptr: *const u8, + policy_type: u32, +) -> bool { + if policy1_ptr.is_null() || policy2_ptr.is_null() { + return false; + } + + let policy1 = unsafe { &*(policy1_ptr as *const SecurePolicyDataV1_0) }; + let policy2 = unsafe { &*(policy2_ptr as *const SecurePolicyDataV1_0) }; + + // Find policy roots for the given type + let roots1 = unsafe { policy1.get_policy_roots() }; + let roots2 = unsafe { policy2.get_policy_roots() }; + + let root1 = roots1.iter().find(|r| r.policy_type == policy_type); + let root2 = roots2.iter().find(|r| r.policy_type == policy_type); + + match (root1, root2) { + (None, None) => true, // Neither has this type + (Some(_), None) | (None, Some(_)) => false, // Only one has this type + (Some(r1), Some(r2)) => { + // Both have this type, compare + if r1.count != r2.count || r1.access_attr != r2.access_attr { + return false; + } + + // Compare descriptors (order-independent) + match policy_type { + TYPE_MEM => { + let descs1 = unsafe { r1.get_mem_descriptors(policy1_ptr) }; + let descs2 = unsafe { r2.get_mem_descriptors(policy2_ptr) }; + compare_mem_descriptors(descs1, descs2) + } + TYPE_IO => { + let descs1 = unsafe { r1.get_io_descriptors(policy1_ptr) }; + let descs2 = unsafe { r2.get_io_descriptors(policy2_ptr) }; + compare_io_descriptors(descs1, descs2) + } + TYPE_MSR => { + let descs1 = unsafe { r1.get_msr_descriptors(policy1_ptr) }; + let descs2 = unsafe { r2.get_msr_descriptors(policy2_ptr) }; + compare_msr_descriptors(descs1, descs2) + } + TYPE_INSTRUCTION => { + let descs1 = unsafe { r1.get_instruction_descriptors(policy1_ptr) }; + let descs2 = unsafe { r2.get_instruction_descriptors(policy2_ptr) }; + compare_instruction_descriptors(descs1, descs2) + } + TYPE_SAVE_STATE => { + let descs1 = unsafe { r1.get_save_state_descriptors(policy1_ptr) }; + let descs2 = unsafe { r2.get_save_state_descriptors(policy2_ptr) }; + compare_save_state_descriptors(descs1, descs2) + } + _ => false, + } + } + } +} + +/// Compares memory policies (convenience wrapper). +/// +/// # Safety +/// +/// The caller must ensure that both policy pointers are valid. +pub unsafe fn compare_memory_policy(policy1_ptr: *const u8, policy2_ptr: *const u8) -> bool { + unsafe { compare_policy_with_type(policy1_ptr, policy2_ptr, TYPE_MEM) } +} + +// Helper functions for order-independent comparison + +fn compare_mem_descriptors(descs1: &[MemDescriptorV1_0], descs2: &[MemDescriptorV1_0]) -> bool { + if descs1.len() != descs2.len() { + return false; + } + + // For each descriptor in descs1, check if it exists in descs2 + for d1 in descs1 { + let found = descs2.iter().any(|d2| { + d1.base_address == d2.base_address + && d1.size == d2.size + && d1.mem_attributes == d2.mem_attributes + }); + if !found { + return false; + } + } + true +} + +fn compare_io_descriptors(descs1: &[IoDescriptorV1_0], descs2: &[IoDescriptorV1_0]) -> bool { + if descs1.len() != descs2.len() { + return false; + } + + for d1 in descs1 { + let found = descs2.iter().any(|d2| { + d1.io_address == d2.io_address + && d1.length_or_width == d2.length_or_width + && d1.attributes == d2.attributes + }); + if !found { + return false; + } + } + true +} + +fn compare_msr_descriptors(descs1: &[MsrDescriptorV1_0], descs2: &[MsrDescriptorV1_0]) -> bool { + if descs1.len() != descs2.len() { + return false; + } + + for d1 in descs1 { + let found = descs2.iter().any(|d2| { + d1.msr_address == d2.msr_address + && d1.length == d2.length + && d1.attributes == d2.attributes + }); + if !found { + return false; + } + } + true +} + +fn compare_instruction_descriptors( + descs1: &[InstructionDescriptorV1_0], + descs2: &[InstructionDescriptorV1_0], +) -> bool { + if descs1.len() != descs2.len() { + return false; + } + + for d1 in descs1 { + let found = descs2.iter().any(|d2| { + d1.instruction_index == d2.instruction_index && d1.attributes == d2.attributes + }); + if !found { + return false; + } + } + true +} + +fn compare_save_state_descriptors( + descs1: &[SaveStateDescriptorV1_0], + descs2: &[SaveStateDescriptorV1_0], +) -> bool { + if descs1.len() != descs2.len() { + return false; + } + + for d1 in descs1 { + let found = descs2.iter().any(|d2| { + d1.map_field == d2.map_field + && d1.attributes == d2.attributes + && d1.access_condition == d2.access_condition + }); + if !found { + return false; + } + } + true +} + +// ============================================================================ +// Policy Validation +// ============================================================================ + +/// Errors that can occur during policy validation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PolicyCheckError { + /// The policy pointer is null. + NullPointer, + /// Invalid policy version. + InvalidVersion { major: u16, minor: u16 }, + /// A reserved field contains non-zero data. + InvalidReservedField { policy_type: u32, entry_index: usize }, + /// The same policy type appears multiple times. + DuplicatePolicyType { policy_type: u32 }, + /// Overlapping entries detected. + OverlappingEntries { policy_type: u32, entry1: usize, entry2: usize }, + /// Duplicate entries detected. + DuplicateEntries { policy_type: u32, entry1: usize, entry2: usize }, + /// Size mismatch. + SizeMismatch { expected: usize, declared: usize }, + /// Unrecognized policy type. + UnrecognizedPolicyType { policy_type: u32 }, + /// Unrecognized header bits. + UnrecognizedHeaderBits, + /// Unsupported attribute. + UnsupportedAttribute { policy_type: u32, entry_index: usize, attributes: u32 }, + /// Conflicting condition. + ConflictingCondition { entry_index: usize }, + /// Legacy memory policy detected. + LegacyMemoryPolicyDetected, + /// Range overflow. + RangeOverflow, +} + +/// Performs comprehensive security policy validation. +/// +/// # Safety +/// +/// The caller must ensure that `policy_ptr` points to a valid policy buffer. +pub unsafe fn security_policy_check(policy_ptr: *const u8) -> Result<(), PolicyCheckError> { + if policy_ptr.is_null() { + return Err(PolicyCheckError::NullPointer); + } + + let policy = unsafe { &*(policy_ptr as *const SecurePolicyDataV1_0) }; + + log::info!("Security policy check entry..."); + + // Version check + if !policy.is_valid_version() { + return Err(PolicyCheckError::InvalidVersion { + major: policy.version_major, + minor: policy.version_minor, + }); + } + + // Check for unrecognized header bits + if policy.reserved != 0 || policy.flags != 0 || policy.capabilities != 0 { + return Err(PolicyCheckError::UnrecognizedHeaderBits); + } + + let mut total_scanned_size = size_of::(); + let mut type_flags: u64 = 0; + + let policy_roots = unsafe { policy.get_policy_roots() }; + + for root in policy_roots.iter() { + let type_bit = 1u64 << root.policy_type; + + if (type_flags & type_bit) != 0 { + return Err(PolicyCheckError::DuplicatePolicyType { + policy_type: root.policy_type, + }); + } + type_flags |= type_bit; + + if !root.has_valid_reserved() { + return Err(PolicyCheckError::InvalidReservedField { + policy_type: root.policy_type, + entry_index: 0, + }); + } + + match root.policy_type { + TYPE_IO => { + unsafe { validate_io_policy(policy_ptr, root)? }; + total_scanned_size += (root.count as usize) * size_of::(); + } + TYPE_MEM => { + unsafe { validate_mem_policy(policy_ptr, root)? }; + total_scanned_size += (root.count as usize) * size_of::(); + } + TYPE_MSR => { + unsafe { validate_msr_policy(policy_ptr, root)? }; + total_scanned_size += (root.count as usize) * size_of::(); + } + TYPE_INSTRUCTION => { + unsafe { validate_instruction_policy(policy_ptr, root)? }; + total_scanned_size += (root.count as usize) * size_of::(); + } + TYPE_SAVE_STATE => { + unsafe { validate_save_state_policy(policy_ptr, root)? }; + total_scanned_size += (root.count as usize) * size_of::(); + } + _ => { + return Err(PolicyCheckError::UnrecognizedPolicyType { + policy_type: root.policy_type, + }); + } + } + + total_scanned_size += size_of::(); + } + + if policy.memory_policy_count != 0 { + return Err(PolicyCheckError::LegacyMemoryPolicyDetected); + } + + if total_scanned_size != policy.size as usize { + return Err(PolicyCheckError::SizeMismatch { + expected: total_scanned_size, + declared: policy.size as usize, + }); + } + + log::info!("Security policy check passed."); + Ok(()) +} + +// Validation helper functions + +unsafe fn validate_io_policy(policy_base: *const u8, root: &PolicyRootV1) -> Result<(), PolicyCheckError> { + let descriptors = unsafe { root.get_io_descriptors(policy_base) }; + + for (i, desc) in descriptors.iter().enumerate() { + if desc.reserved != 0 { + return Err(PolicyCheckError::InvalidReservedField { + policy_type: TYPE_IO, + entry_index: i, + }); + } + } + Ok(()) +} + +unsafe fn validate_mem_policy(policy_base: *const u8, root: &PolicyRootV1) -> Result<(), PolicyCheckError> { + let descriptors = unsafe { root.get_mem_descriptors(policy_base) }; + + for (i, desc) in descriptors.iter().enumerate() { + if desc.reserved != 0 { + return Err(PolicyCheckError::InvalidReservedField { + policy_type: TYPE_MEM, + entry_index: i, + }); + } + } + Ok(()) +} + +unsafe fn validate_msr_policy(_policy_base: *const u8, _root: &PolicyRootV1) -> Result<(), PolicyCheckError> { + // MSR descriptors don't have reserved fields + Ok(()) +} + +unsafe fn validate_instruction_policy(policy_base: *const u8, root: &PolicyRootV1) -> Result<(), PolicyCheckError> { + let descriptors = unsafe { root.get_instruction_descriptors(policy_base) }; + + for (i, desc) in descriptors.iter().enumerate() { + if desc.reserved != 0 { + return Err(PolicyCheckError::InvalidReservedField { + policy_type: TYPE_INSTRUCTION, + entry_index: i, + }); + } + } + Ok(()) +} + +unsafe fn validate_save_state_policy(policy_base: *const u8, root: &PolicyRootV1) -> Result<(), PolicyCheckError> { + let descriptors = unsafe { root.get_save_state_descriptors(policy_base) }; + + for (i, desc) in descriptors.iter().enumerate() { + // Check for unsupported write attributes + if (desc.attributes & (RESOURCE_ATTR_WRITE | RESOURCE_ATTR_COND_WRITE)) != 0 { + return Err(PolicyCheckError::UnsupportedAttribute { + policy_type: TYPE_SAVE_STATE, + entry_index: i, + attributes: desc.attributes, + }); + } + + // Check for conflicting conditions + if (desc.attributes & RESOURCE_ATTR_COND_READ) == 0 + && desc.access_condition != SVST_UNCONDITIONAL + { + return Err(PolicyCheckError::ConflictingCondition { entry_index: i }); + } + + if desc.reserved != 0 { + return Err(PolicyCheckError::InvalidReservedField { + policy_type: TYPE_SAVE_STATE, + entry_index: i, + }); + } + } + Ok(()) +} + +// ============================================================================ +// Page Table Walking (Memory Policy Generation) +// ============================================================================ + +/// Memory policy builder for collecting memory descriptors from page table walking. +/// +/// This is used to generate memory policy from page table entries. +pub struct MemoryPolicyBuilder { + /// Current descriptor being built + current: Option, + /// Maximum number of descriptors we can store + max_count: usize, + /// Buffer for descriptors + buffer_ptr: *mut MemDescriptorV1_0, + /// Current count of descriptors + count: usize, +} + +impl MemoryPolicyBuilder { + /// Creates a new memory policy builder. + /// + /// # Safety + /// + /// The caller must ensure that `buffer_ptr` points to a valid buffer + /// with space for at least `max_count` descriptors. + pub unsafe fn new(buffer_ptr: *mut MemDescriptorV1_0, max_count: usize) -> Self { + Self { + current: None, + max_count, + buffer_ptr, + count: 0, + } + } + + /// Adds a memory region to the policy. + /// + /// Adjacent regions with the same attributes will be coalesced. + /// + /// # Returns + /// + /// Returns `Ok(())` if successful, or `Err(())` if the buffer is full. + pub fn add_region(&mut self, base: u64, size: u64, attributes: u32) -> Result<(), ()> { + let new_desc = MemDescriptorV1_0 { + base_address: base, + size, + mem_attributes: attributes, + reserved: 0, + }; + + if let Some(ref mut current) = self.current { + // Check if we can coalesce with current + let current_end = current.base_address.saturating_add(current.size); + if base == current_end && attributes == current.mem_attributes { + // Coalesce + current.size = current.size.saturating_add(size); + return Ok(()); + } else { + // Flush current and start new + self.flush_current()?; + } + } + + self.current = Some(new_desc); + Ok(()) + } + + /// Flushes the current descriptor to the buffer. + fn flush_current(&mut self) -> Result<(), ()> { + if let Some(desc) = self.current.take() { + if self.count >= self.max_count { + return Err(()); + } + + // SAFETY: We checked bounds + unsafe { + *self.buffer_ptr.add(self.count) = desc; + } + self.count += 1; + } + Ok(()) + } + + /// Finishes building and returns the count of descriptors. + pub fn finish(mut self) -> Result { + self.flush_current()?; + Ok(self.count) + } + + /// Gets the current count of descriptors. + pub fn count(&self) -> usize { + self.count + if self.current.is_some() { 1 } else { 0 } + } +} + +// ============================================================================ +// Page Table Walking (x86_64 4-Level Paging) +// ============================================================================ + +/// Page table entry for x86_64 4-level paging. +/// +/// This structure represents entries in PML4, PDPE, PDE, and PTE tables. +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct PageTableEntry { + /// Raw 64-bit value of the page table entry. + pub value: u64, +} + +impl PageTableEntry { + /// Present bit (bit 0): Entry is valid if set. + pub const PRESENT: u64 = 1 << 0; + /// Read/Write bit (bit 1): Writable if set. + pub const READ_WRITE: u64 = 1 << 1; + /// User/Supervisor bit (bit 2): User-mode accessible if set. + pub const USER_SUPERVISOR: u64 = 1 << 2; + /// Page Size bit (bit 7): Large page (2MB or 1GB) if set. + pub const PAGE_SIZE: u64 = 1 << 7; + /// No Execute bit (bit 63): Not executable if set. + pub const NO_EXECUTE: u64 = 1 << 63; + + /// Address mask for 4KB page table base addresses. + pub const ADDR_MASK_4K: u64 = 0x000F_FFFF_FFFF_F000; + /// Address mask for 2MB large page addresses. + pub const ADDR_MASK_2M: u64 = 0x000F_FFFF_FFE0_0000; + /// Address mask for 1GB huge page addresses. + pub const ADDR_MASK_1G: u64 = 0x000F_FFFF_C000_0000; + + /// Size of a 4KB page. + pub const SIZE_4K: u64 = 0x1000; + /// Size of a 2MB large page. + pub const SIZE_2M: u64 = 0x20_0000; + /// Size of a 1GB huge page. + pub const SIZE_1G: u64 = 0x4000_0000; + + /// Creates a new page table entry from a raw value. + #[inline] + pub const fn new(value: u64) -> Self { + Self { value } + } + + /// Returns true if the entry is present. + #[inline] + pub const fn is_present(&self) -> bool { + (self.value & Self::PRESENT) != 0 + } + + /// Returns true if the entry is writable. + #[inline] + pub const fn is_writable(&self) -> bool { + (self.value & Self::READ_WRITE) != 0 + } + + /// Returns true if the entry is user-mode accessible. + #[inline] + pub const fn is_user(&self) -> bool { + (self.value & Self::USER_SUPERVISOR) != 0 + } + + /// Returns true if this is a large/huge page (PS bit set). + #[inline] + pub const fn is_large_page(&self) -> bool { + (self.value & Self::PAGE_SIZE) != 0 + } + + /// Returns true if the page is executable (NX bit NOT set). + #[inline] + pub const fn is_executable(&self) -> bool { + (self.value & Self::NO_EXECUTE) == 0 + } + + /// Gets the physical address of the next-level page table (4KB aligned). + #[inline] + pub const fn next_table_addr(&self) -> u64 { + self.value & Self::ADDR_MASK_4K + } + + /// Gets the physical address of a 2MB large page. + #[inline] + pub const fn large_page_addr(&self) -> u64 { + self.value & Self::ADDR_MASK_2M + } + + /// Gets the physical address of a 1GB huge page. + #[inline] + pub const fn huge_page_addr(&self) -> u64 { + self.value & Self::ADDR_MASK_1G + } + + /// Gets the physical address of a 4KB page. + #[inline] + pub const fn page_addr(&self) -> u64 { + self.value & Self::ADDR_MASK_4K + } + + /// Converts page table entry attributes to policy memory attributes. + /// + /// Inherits R/W/X permissions from upper-level entries. + #[inline] + pub fn to_policy_attrs(&self, inherited_attrs: u32) -> u32 { + if !self.is_present() { + return 0; + } + + let mut attrs = RESOURCE_ATTR_READ; + + if self.is_writable() { + attrs |= RESOURCE_ATTR_WRITE; + } + + if self.is_executable() { + attrs |= RESOURCE_ATTR_EXECUTE; + } + + // Inherit restrictions from upper-level tables + attrs & inherited_attrs + } +} + +/// Errors that can occur during page table walking. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PageTableWalkError { + /// Buffer is full, cannot add more descriptors. + BufferFull, + /// The CR3 value is invalid (null). + InvalidCr3, + /// Paging is not in the expected mode. + UnsupportedPagingMode, +} + +/// Callback type for checking if a buffer is inside MMRAM. +/// +/// Returns `true` if the buffer `[base, base + size)` is fully inside MMRAM. +pub type IsInsideMmramFn = fn(base: u64, size: u64) -> bool; + +/// Walks x86_64 4-level page tables and generates memory policy descriptors. +/// +/// This function traverses the page table hierarchy starting from the PML4 +/// table (pointed to by CR3), and for each mapped page, generates a memory +/// policy descriptor with the effective R/W/X attributes. +/// +/// Adjacent pages with the same attributes are coalesced into single descriptors. +/// +/// # Arguments +/// +/// * `cr3` - The CR3 register value (physical address of PML4 table) +/// * `buffer` - Buffer to store the generated memory descriptors +/// * `max_count` - Maximum number of descriptors the buffer can hold +/// * `is_inside_mmram` - Callback to check if a region is inside MMRAM +/// (regions fully inside MMRAM are skipped) +/// +/// # Returns +/// +/// The number of memory descriptors generated, or an error. +/// +/// # Safety +/// +/// The caller must ensure that: +/// - `cr3` points to a valid PML4 table +/// - `buffer` has space for at least `max_count` descriptors +/// - The page table memory is accessible and won't change during the walk +/// +/// # Example +/// +/// ```rust,ignore +/// use patina_mm_policy::{walk_page_table, MemDescriptorV1_0, default_mmram_check}; +/// +/// let cr3 = read_cr3(); // Read from hardware +/// let mut buffer = [MemDescriptorV1_0::default(); 1024]; +/// +/// let count = unsafe { +/// walk_page_table(cr3, buffer.as_mut_ptr(), buffer.len(), default_mmram_check)? +/// }; +/// +/// println!("Generated {} memory policy descriptors", count); +/// ``` +pub unsafe fn walk_page_table( + cr3: u64, + buffer: *mut MemDescriptorV1_0, + max_count: usize, + is_inside_mmram: IsInsideMmramFn, +) -> Result { + if cr3 == 0 || buffer.is_null() { + return Err(PageTableWalkError::InvalidCr3); + } + + let pml4_base = cr3 & PageTableEntry::ADDR_MASK_4K; + let mut builder = unsafe { MemoryPolicyBuilder::new(buffer, max_count) }; + + // Walk PML4 (512 entries) + let pml4_table = pml4_base as *const PageTableEntry; + + for i in 0..512u64 { + let pml4e = unsafe { *pml4_table.add(i as usize) }; + if !pml4e.is_present() { + continue; + } + + // Calculate inherited attributes from PML4 entry + let pml4_attrs = pml4e.to_policy_attrs( + RESOURCE_ATTR_READ | RESOURCE_ATTR_WRITE | RESOURCE_ATTR_EXECUTE, + ); + + // Walk PDPE (512 entries) + let pdpe_table = pml4e.next_table_addr() as *const PageTableEntry; + + for j in 0..512u64 { + let pdpe = unsafe { *pdpe_table.add(j as usize) }; + if !pdpe.is_present() { + continue; + } + + let pdpe_attrs = pdpe.to_policy_attrs(pml4_attrs); + + // Check for 1GB huge page + if pdpe.is_large_page() { + let page_addr = pdpe.huge_page_addr(); + let page_size = PageTableEntry::SIZE_1G; + + // Skip if fully inside MMRAM + if is_inside_mmram(page_addr, page_size) { + continue; + } + + if builder.add_region(page_addr, page_size, pdpe_attrs).is_err() { + return Err(PageTableWalkError::BufferFull); + } + continue; + } + + // Walk PDE (512 entries) + let pde_table = pdpe.next_table_addr() as *const PageTableEntry; + + for k in 0..512u64 { + let pde = unsafe { *pde_table.add(k as usize) }; + if !pde.is_present() { + continue; + } + + let pde_attrs = pde.to_policy_attrs(pdpe_attrs); + + // Check for 2MB large page + if pde.is_large_page() { + let page_addr = pde.large_page_addr(); + let page_size = PageTableEntry::SIZE_2M; + + // Skip if fully inside MMRAM + if is_inside_mmram(page_addr, page_size) { + continue; + } + + if builder.add_region(page_addr, page_size, pde_attrs).is_err() { + return Err(PageTableWalkError::BufferFull); + } + continue; + } + + // Walk PTE (512 entries) + let pte_table = pde.next_table_addr() as *const PageTableEntry; + + for l in 0..512u64 { + let pte = unsafe { *pte_table.add(l as usize) }; + if !pte.is_present() { + continue; + } + + let page_addr = pte.page_addr(); + let page_size = PageTableEntry::SIZE_4K; + + // Skip if fully inside MMRAM + if is_inside_mmram(page_addr, page_size) { + continue; + } + + let pte_attrs = pte.to_policy_attrs(pde_attrs); + + if builder.add_region(page_addr, page_size, pte_attrs).is_err() { + return Err(PageTableWalkError::BufferFull); + } + } + } + } + } + + builder.finish().map_err(|()| PageTableWalkError::BufferFull) +} + +/// Statistics from page table walking. +#[derive(Debug, Clone, Copy, Default)] +pub struct PageTableWalkStats { + /// Number of PML4 entries traversed. + pub pml4_entries: usize, + /// Number of PDPE entries traversed. + pub pdpe_entries: usize, + /// Number of PDE entries traversed. + pub pde_entries: usize, + /// Number of PTE entries traversed. + pub pte_entries: usize, + /// Number of 1GB huge pages found. + pub huge_pages_1g: usize, + /// Number of 2MB large pages found. + pub large_pages_2m: usize, + /// Number of 4KB pages found. + pub pages_4k: usize, + /// Number of pages skipped (inside MMRAM). + pub skipped_mmram: usize, +} + +/// Walks x86_64 4-level page tables with statistics. +/// +/// This is the same as [`walk_page_table`] but also returns statistics +/// about the page table structure. +/// +/// # Safety +/// +/// Same requirements as [`walk_page_table`]. +pub unsafe fn walk_page_table_with_stats( + cr3: u64, + buffer: *mut MemDescriptorV1_0, + max_count: usize, + is_inside_mmram: IsInsideMmramFn, +) -> Result<(usize, PageTableWalkStats), PageTableWalkError> { + if cr3 == 0 || buffer.is_null() { + return Err(PageTableWalkError::InvalidCr3); + } + + let pml4_base = cr3 & PageTableEntry::ADDR_MASK_4K; + let mut builder = unsafe { MemoryPolicyBuilder::new(buffer, max_count) }; + let mut stats = PageTableWalkStats::default(); + + // Walk PML4 (512 entries) + let pml4_table = pml4_base as *const PageTableEntry; + + for i in 0..512u64 { + let pml4e = unsafe { *pml4_table.add(i as usize) }; + if !pml4e.is_present() { + continue; + } + + stats.pml4_entries += 1; + + let pml4_attrs = pml4e.to_policy_attrs( + RESOURCE_ATTR_READ | RESOURCE_ATTR_WRITE | RESOURCE_ATTR_EXECUTE, + ); + + // Walk PDPE + let pdpe_table = pml4e.next_table_addr() as *const PageTableEntry; + + for j in 0..512u64 { + let pdpe = unsafe { *pdpe_table.add(j as usize) }; + if !pdpe.is_present() { + continue; + } + + stats.pdpe_entries += 1; + let pdpe_attrs = pdpe.to_policy_attrs(pml4_attrs); + + // 1GB huge page + if pdpe.is_large_page() { + let page_addr = pdpe.huge_page_addr(); + let page_size = PageTableEntry::SIZE_1G; + + if is_inside_mmram(page_addr, page_size) { + stats.skipped_mmram += 1; + continue; + } + + stats.huge_pages_1g += 1; + if builder.add_region(page_addr, page_size, pdpe_attrs).is_err() { + return Err(PageTableWalkError::BufferFull); + } + continue; + } + + // Walk PDE + let pde_table = pdpe.next_table_addr() as *const PageTableEntry; + + for k in 0..512u64 { + let pde = unsafe { *pde_table.add(k as usize) }; + if !pde.is_present() { + continue; + } + + stats.pde_entries += 1; + let pde_attrs = pde.to_policy_attrs(pdpe_attrs); + + // 2MB large page + if pde.is_large_page() { + let page_addr = pde.large_page_addr(); + let page_size = PageTableEntry::SIZE_2M; + + if is_inside_mmram(page_addr, page_size) { + stats.skipped_mmram += 1; + continue; + } + + stats.large_pages_2m += 1; + if builder.add_region(page_addr, page_size, pde_attrs).is_err() { + return Err(PageTableWalkError::BufferFull); + } + continue; + } + + // Walk PTE + let pte_table = pde.next_table_addr() as *const PageTableEntry; + + for l in 0..512u64 { + let pte = unsafe { *pte_table.add(l as usize) }; + if !pte.is_present() { + continue; + } + + stats.pte_entries += 1; + let page_addr = pte.page_addr(); + let page_size = PageTableEntry::SIZE_4K; + + if is_inside_mmram(page_addr, page_size) { + stats.skipped_mmram += 1; + continue; + } + + stats.pages_4k += 1; + let pte_attrs = pte.to_policy_attrs(pde_attrs); + + if builder.add_region(page_addr, page_size, pte_attrs).is_err() { + return Err(PageTableWalkError::BufferFull); + } + } + } + } + } + + let count = builder.finish().map_err(|()| PageTableWalkError::BufferFull)?; + Ok((count, stats)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dump_mem_policy_entry() { + // Just verify it doesn't panic + let desc = MemDescriptorV1_0 { + base_address: 0x1000, + size: 0x1000, + mem_attributes: RESOURCE_ATTR_READ | RESOURCE_ATTR_WRITE, + reserved: 0, + }; + dump_mem_policy_entry(&desc); + } + + #[test] + fn test_compare_mem_descriptors() { + let descs1 = [ + MemDescriptorV1_0 { base_address: 0x1000, size: 0x1000, mem_attributes: 1, reserved: 0 }, + MemDescriptorV1_0 { base_address: 0x2000, size: 0x1000, mem_attributes: 2, reserved: 0 }, + ]; + let descs2 = [ + MemDescriptorV1_0 { base_address: 0x2000, size: 0x1000, mem_attributes: 2, reserved: 0 }, + MemDescriptorV1_0 { base_address: 0x1000, size: 0x1000, mem_attributes: 1, reserved: 0 }, + ]; + + // Order-independent comparison should succeed + assert!(compare_mem_descriptors(&descs1, &descs2)); + } + + #[test] + fn test_compare_mem_descriptors_mismatch() { + let descs1 = [ + MemDescriptorV1_0 { base_address: 0x1000, size: 0x1000, mem_attributes: 1, reserved: 0 }, + ]; + let descs2 = [ + MemDescriptorV1_0 { base_address: 0x1000, size: 0x2000, mem_attributes: 1, reserved: 0 }, + ]; + + assert!(!compare_mem_descriptors(&descs1, &descs2)); + } +} diff --git a/components/patina_mm_policy/src/lib.rs b/components/patina_mm_policy/src/lib.rs new file mode 100644 index 000000000..8d341bae0 --- /dev/null +++ b/components/patina_mm_policy/src/lib.rs @@ -0,0 +1,45 @@ +//! MM Supervisor Secure Policy Library +//! +//! This crate provides a comprehensive policy management library for the MM Supervisor, +//! including policy data structures, access validation (policy gate), and helper utilities. +//! +//! ## Overview +//! +//! The MM Supervisor uses security policies to control what resources user-mode MM drivers +//! can access. This crate provides: +//! +//! - **Data Structures**: Rust definitions matching the C structures in `SmmSecurePolicy.h` +//! - **Policy Gate**: Runtime access validation for I/O, MSR, instruction, and save state +//! - **Helpers**: Dump, compare, and page table walking utilities +//! +//! ## Example +//! +//! ```rust,ignore +//! use patina_mm_policy::{PolicyGate, AccessType, IoWidth}; +//! +//! // Initialize the policy gate with a policy buffer +//! let gate = unsafe { PolicyGate::new(policy_ptr) }?; +//! +//! // Check if I/O access is allowed +//! if gate.is_io_allowed(0x3F8, IoWidth::Byte, AccessType::Read).is_ok() { +//! // Perform the I/O operation +//! } +//! ``` +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] +#![allow(dead_code)] + +mod types; +mod gate; +mod helpers; + +pub use types::*; +pub use gate::*; +pub use helpers::*; diff --git a/components/patina_mm_policy/src/types.rs b/components/patina_mm_policy/src/types.rs new file mode 100644 index 000000000..65c4f9fa6 --- /dev/null +++ b/components/patina_mm_policy/src/types.rs @@ -0,0 +1,451 @@ +//! MM Supervisor Secure Policy Data Structures +//! +//! Rust definitions matching the C structures in `SmmSecurePolicy.h`. + +use core::slice; + +// ============================================================================ +// Policy Descriptor Types +// ============================================================================ + +/// Memory policy descriptor type. +pub const TYPE_MEM: u32 = 1; +/// I/O policy descriptor type. +pub const TYPE_IO: u32 = 2; +/// MSR policy descriptor type. +pub const TYPE_MSR: u32 = 3; +/// Instruction policy descriptor type. +pub const TYPE_INSTRUCTION: u32 = 4; +/// Save state policy descriptor type. +pub const TYPE_SAVE_STATE: u32 = 5; + +// ============================================================================ +// Access Attributes +// ============================================================================ + +/// Access attribute: Allow access to resources described by this policy root. +pub const ACCESS_ATTR_ALLOW: u8 = 0; +/// Access attribute: Deny access to resources described by this policy root. +pub const ACCESS_ATTR_DENY: u8 = 1; + +// ============================================================================ +// Resource Attributes +// ============================================================================ + +/// Resource attribute: Read access. +pub const RESOURCE_ATTR_READ: u32 = 0x01; +/// Resource attribute: Write access. +pub const RESOURCE_ATTR_WRITE: u32 = 0x02; +/// Resource attribute: Execute access. +pub const RESOURCE_ATTR_EXECUTE: u32 = 0x04; +/// Resource attribute: Strict width (for I/O - must match exact width). +pub const RESOURCE_ATTR_STRICT_WIDTH: u32 = 0x08; +/// Resource attribute: Conditional read access. +pub const RESOURCE_ATTR_COND_READ: u32 = 0x10; +/// Resource attribute: Conditional write access. +pub const RESOURCE_ATTR_COND_WRITE: u32 = 0x20; + +// ============================================================================ +// Instruction Indices +// ============================================================================ + +/// Instruction index for CLI. +pub const INSTRUCTION_CLI: u16 = 0; +/// Instruction index for WBINVD. +pub const INSTRUCTION_WBINVD: u16 = 1; +/// Instruction index for HLT. +pub const INSTRUCTION_HLT: u16 = 2; +/// Total count of privileged instructions tracked. +pub const INSTRUCTION_COUNT: u16 = 3; + +/// Privileged instruction types. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u16)] +pub enum Instruction { + /// CLI - Clear Interrupt Flag + Cli = 0, + /// WBINVD - Write Back and Invalidate Cache + Wbinvd = 1, + /// HLT - Halt + Hlt = 2, +} + +impl Instruction { + /// Convert to instruction index. + pub fn as_index(self) -> u16 { + self as u16 + } + + /// Create from instruction index. + pub fn from_index(index: u16) -> Option { + match index { + 0 => Some(Self::Cli), + 1 => Some(Self::Wbinvd), + 2 => Some(Self::Hlt), + _ => None, + } + } +} + +// ============================================================================ +// Save State Map Fields +// ============================================================================ + +/// Save state field: RAX register. +pub const SVST_RAX: u32 = 0; +/// Save state field: I/O trap information. +pub const SVST_IO_TRAP: u32 = 1; +/// Total count of save state fields tracked. +pub const SVST_COUNT: u32 = 2; + +/// Save state map fields. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u32)] +pub enum SaveStateField { + /// RAX register + Rax = 0, + /// I/O trap information + IoTrap = 1, +} + +impl SaveStateField { + /// Convert to field index. + pub fn as_index(self) -> u32 { + self as u32 + } + + /// Create from field index. + pub fn from_index(index: u32) -> Option { + match index { + 0 => Some(Self::Rax), + 1 => Some(Self::IoTrap), + _ => None, + } + } +} + +// ============================================================================ +// Save State Access Conditions +// ============================================================================ + +/// Save state access is unconditional. +pub const SVST_UNCONDITIONAL: u32 = 0; +/// Save state access is conditional on I/O read trap. +pub const SVST_CONDITION_IO_RD: u32 = 1; +/// Save state access is conditional on I/O write trap. +pub const SVST_CONDITION_IO_WR: u32 = 2; +/// Total count of save state conditions. +pub const SVST_CONDITION_COUNT: u32 = 3; + +/// Save state access conditions. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u32)] +pub enum SaveStateCondition { + /// Unconditional access + Unconditional = 0, + /// Conditional on I/O read trap + IoRead = 1, + /// Conditional on I/O write trap + IoWrite = 2, +} + +// ============================================================================ +// Access Types +// ============================================================================ + +/// Type of access being requested. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AccessType { + /// Read access + Read, + /// Write access + Write, + /// Execute access (for instructions) + Execute, +} + +impl AccessType { + /// Convert to resource attribute mask. + pub fn as_attr_mask(self) -> u32 { + match self { + AccessType::Read => RESOURCE_ATTR_READ, + AccessType::Write => RESOURCE_ATTR_WRITE, + AccessType::Execute => RESOURCE_ATTR_EXECUTE, + } + } +} + +// ============================================================================ +// I/O Width +// ============================================================================ + +/// I/O access width. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u32)] +pub enum IoWidth { + /// 8-bit (1 byte) access + Byte = 1, + /// 16-bit (2 byte) access + Word = 2, + /// 32-bit (4 byte) access + Dword = 4, +} + +impl IoWidth { + /// Get the size in bytes. + pub fn size(self) -> u32 { + self as u32 + } + + /// Create from size in bytes. + pub fn from_size(size: u32) -> Option { + match size { + 1 => Some(Self::Byte), + 2 => Some(Self::Word), + 4 => Some(Self::Dword), + _ => None, + } + } +} + +// ============================================================================ +// Policy Data Structures (V1.0) +// ============================================================================ + +/// Memory policy descriptor (V1.0). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct MemDescriptorV1_0 { + /// Base address of memory region. + pub base_address: u64, + /// Size of memory region in bytes. + pub size: u64, + /// Memory attributes (combination of `RESOURCE_ATTR_*`). + pub mem_attributes: u32, + /// Reserved, must be 0. + pub reserved: u32, +} + +/// I/O policy descriptor (V1.0). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct IoDescriptorV1_0 { + /// Base I/O port address. + pub io_address: u16, + /// Length or width of the I/O range. + pub length_or_width: u16, + /// I/O attributes (combination of `RESOURCE_ATTR_*`). + pub attributes: u16, + /// Reserved, must be 0. + pub reserved: u16, +} + +/// MSR policy descriptor (V1.0). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct MsrDescriptorV1_0 { + /// Base MSR address. + pub msr_address: u32, + /// Length of MSR range. + pub length: u16, + /// MSR attributes (combination of `RESOURCE_ATTR_*`). + pub attributes: u16, +} + +/// Instruction policy descriptor (V1.0). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct InstructionDescriptorV1_0 { + /// Instruction index (one of `INSTRUCTION_*` constants). + pub instruction_index: u16, + /// Instruction attributes (combination of `RESOURCE_ATTR_*`). + pub attributes: u16, + /// Reserved, must be 0. + pub reserved: u32, +} + +/// Save state policy descriptor (V1.0). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct SaveStateDescriptorV1_0 { + /// Save state map field (one of `SVST_*` constants). + pub map_field: u32, + /// Save state attributes (combination of `RESOURCE_ATTR_*`). + pub attributes: u32, + /// Access condition (one of `SVST_CONDITION_*` constants). + pub access_condition: u32, + /// Reserved, must be 0. + pub reserved: u32, +} + +/// Policy root structure (V1). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct PolicyRootV1 { + /// Version of this policy root structure. + pub version: u32, + /// Size of this policy root structure in bytes. + pub policy_root_size: u32, + /// Type of descriptors (one of `TYPE_*` constants). + pub policy_type: u32, + /// Offset in bytes from policy data start to the descriptors. + pub offset: u32, + /// Number of descriptor entries. + pub count: u32, + /// Access attribute (one of `ACCESS_ATTR_*` constants). + pub access_attr: u8, + /// Reserved, must be all zeros. + pub reserved: [u8; 3], +} + +/// Secure policy data header (V1.0). +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct SecurePolicyDataV1_0 { + /// Minor version (should be 0x0000). + pub version_minor: u16, + /// Major version (should be 0x0001). + pub version_major: u16, + /// Total size in bytes of the entire policy block. + pub size: u32, + /// Offset to legacy memory policy (0 if not supported). + pub memory_policy_offset: u32, + /// Count of legacy memory policy entries (0 if not supported). + pub memory_policy_count: u32, + /// Flag field indicating supervisor status. + pub flags: u32, + /// Capability field indicating features supported by supervisor. + pub capabilities: u32, + /// Reserved, must be 0. + pub reserved: u64, + /// Offset from this structure to the policy root array. + pub policy_root_offset: u32, + /// Number of policy roots. + pub policy_root_count: u32, +} + +impl SecurePolicyDataV1_0 { + /// Returns true if this is a valid V1.0 policy header. + pub fn is_valid_version(&self) -> bool { + self.version_major == 1 && self.version_minor == 0 + } + + /// Gets a pointer to the policy root array. + /// + /// # Safety + /// + /// The caller must ensure that this structure is part of a valid policy buffer. + pub unsafe fn get_policy_roots_ptr(&self) -> *const PolicyRootV1 { + let base = self as *const Self as *const u8; + unsafe { base.add(self.policy_root_offset as usize) as *const PolicyRootV1 } + } + + /// Gets a slice of policy roots. + /// + /// # Safety + /// + /// The caller must ensure that this structure is part of a valid policy buffer. + pub unsafe fn get_policy_roots(&self) -> &[PolicyRootV1] { + unsafe { slice::from_raw_parts(self.get_policy_roots_ptr(), self.policy_root_count as usize) } + } +} + +impl PolicyRootV1 { + /// Returns true if the reserved fields are all zeros. + pub fn has_valid_reserved(&self) -> bool { + self.reserved == [0, 0, 0] + } + + /// Gets a pointer to the descriptors for this policy root. + /// + /// # Safety + /// + /// The caller must ensure that `policy_base` points to a valid policy buffer. + pub unsafe fn get_descriptors_ptr(&self, policy_base: *const u8) -> *const T { + unsafe { policy_base.add(self.offset as usize) as *const T } + } + + /// Gets memory descriptors from this policy root. + /// + /// # Safety + /// + /// Caller must ensure this policy root has `policy_type == TYPE_MEM`. + pub unsafe fn get_mem_descriptors(&self, policy_base: *const u8) -> &[MemDescriptorV1_0] { + unsafe { + slice::from_raw_parts( + self.get_descriptors_ptr::(policy_base), + self.count as usize, + ) + } + } + + /// Gets I/O descriptors from this policy root. + /// + /// # Safety + /// + /// Caller must ensure this policy root has `policy_type == TYPE_IO`. + pub unsafe fn get_io_descriptors(&self, policy_base: *const u8) -> &[IoDescriptorV1_0] { + unsafe { + slice::from_raw_parts( + self.get_descriptors_ptr::(policy_base), + self.count as usize, + ) + } + } + + /// Gets MSR descriptors from this policy root. + /// + /// # Safety + /// + /// Caller must ensure this policy root has `policy_type == TYPE_MSR`. + pub unsafe fn get_msr_descriptors(&self, policy_base: *const u8) -> &[MsrDescriptorV1_0] { + unsafe { + slice::from_raw_parts( + self.get_descriptors_ptr::(policy_base), + self.count as usize, + ) + } + } + + /// Gets instruction descriptors from this policy root. + /// + /// # Safety + /// + /// Caller must ensure this policy root has `policy_type == TYPE_INSTRUCTION`. + pub unsafe fn get_instruction_descriptors(&self, policy_base: *const u8) -> &[InstructionDescriptorV1_0] { + unsafe { + slice::from_raw_parts( + self.get_descriptors_ptr::(policy_base), + self.count as usize, + ) + } + } + + /// Gets save state descriptors from this policy root. + /// + /// # Safety + /// + /// Caller must ensure this policy root has `policy_type == TYPE_SAVE_STATE`. + pub unsafe fn get_save_state_descriptors(&self, policy_base: *const u8) -> &[SaveStateDescriptorV1_0] { + unsafe { + slice::from_raw_parts( + self.get_descriptors_ptr::(policy_base), + self.count as usize, + ) + } + } +} + +// ============================================================================ +// Size assertions +// ============================================================================ + +const _: () = { + assert!(core::mem::size_of::() == 24); + assert!(core::mem::size_of::() == 8); + assert!(core::mem::size_of::() == 8); + assert!(core::mem::size_of::() == 8); + assert!(core::mem::size_of::() == 16); + assert!(core::mem::size_of::() == 24); + assert!(core::mem::size_of::() == 40); +}; diff --git a/components/patina_performance/src/component/performance.rs b/components/patina_performance/src/component/performance.rs index ef3c24bfe..216222c9a 100644 --- a/components/patina_performance/src/component/performance.rs +++ b/components/patina_performance/src/component/performance.rs @@ -11,17 +11,16 @@ use crate::{component::protocol::create_performance_measurement_efiapi, config, mm}; use alloc::{boxed::Box, string::String, vec::Vec}; -use core::{clone::Clone, convert::AsRef}; use patina::{ boot_services::{BootServices, StandardBootServices, event::EventType, tpl::Tpl}, component::{ component, hob::Hob, params::Config, - service::{Service, perf_timer::ArchTimerFunctionality}, + service::{Service, timer::ArchTimerFunctionality}, }, error::EfiError, - guids::{EVENT_GROUP_END_OF_DXE, PERFORMANCE_PROTOCOL}, + guids::PERFORMANCE_PROTOCOL, performance::{ globals::{get_static_state, set_load_image_count, set_perf_measurement_mask, set_static_state}, measurement::{PerformanceProperty, event_callback}, @@ -41,8 +40,10 @@ use r_efi::system::EVENT_GROUP_READY_TO_BOOT; pub use mu_rust_helpers::function; +use patina::guids::EVENT_GROUP_END_OF_DXE; + /// Context parameter for the Ready-to-Boot event callback that fetches MM performance records. -type MmPerformanceEventContext = Box<(BB, &'static TplMutex, Service)>; +type MmPerformanceEventContext = Box<(B, &'static TplMutex, Service)>; /// Performance Component. pub struct Performance; @@ -83,29 +84,27 @@ impl Performance { } /// Entry point that have generic parameter. - fn _entry_point( + fn _entry_point( self, - boot_services: BB, - runtime_services: RR, + boot_services: B, + runtime_services: R, records_buffers_hobs: Option

, mm_comm_service: Option>, fbpt: &'static TplMutex, timer: Service, ) -> Result<(), EfiError> where - BB: AsRef + Clone + 'static, - B: BootServices + 'static, - RR: AsRef + Clone + 'static, - R: RuntimeServices + 'static, + B: BootServices + Clone + 'static, + R: RuntimeServices + Clone + 'static, P: HobPerformanceDataExtractor, F: FirmwareBasicBootPerfTable, { // Register EndOfDxe event to allocate the boot performance table and report the table address through status code. - boot_services.as_ref().create_event_ex( + boot_services.create_event_ex( EventType::NOTIFY_SIGNAL, Tpl::CALLBACK, Some(event_callback::report_fbpt_record_buffer), - Box::new((BB::clone(&boot_services), RR::clone(&runtime_services), fbpt)), + Box::new((boot_services.clone(), runtime_services.clone(), fbpt)), &EVENT_GROUP_END_OF_DXE, )?; @@ -132,7 +131,7 @@ impl Performance { } // Install the protocol interfaces for DXE performance. - boot_services.as_ref().install_protocol_interface( + boot_services.install_protocol_interface( None, Box::new(EdkiiPerformanceMeasurement { create_performance_measurement: create_performance_measurement_efiapi, @@ -144,11 +143,11 @@ impl Performance { if let Some(mm_comm_service) = mm_comm_service { // TODO: Replace direct usage of the boot services event services with a Patina service // when available. - boot_services.as_ref().create_event_ex( + boot_services.create_event_ex( EventType::NOTIFY_SIGNAL, Tpl::CALLBACK, - Some(fetch_and_add_mm_performance_records::), - Box::new((BB::clone(&boot_services), fbpt, mm_comm_service)), + Some(fetch_and_add_mm_performance_records::), + Box::new((boot_services.clone(), fbpt, mm_comm_service)), &EVENT_GROUP_READY_TO_BOOT, )?; } else { @@ -162,7 +161,7 @@ impl Performance { // Install configuration table for performance property. // SAFETY: `install_configuration_table` requires that the data match the GUID; PERFORMANCE_PROTOCOL matches `PerformanceProperty`. unsafe { - boot_services.as_ref().install_configuration_table( + boot_services.install_configuration_table( &PERFORMANCE_PROTOCOL, Box::new(PerformanceProperty::new( timer.perf_frequency(), @@ -210,7 +209,7 @@ fn fetch_mm_record_size(comm_service: &Service) -> Result( +pub extern "efiapi" fn fetch_and_add_mm_performance_records( event: r_efi::efi::Event, - ctx: MmPerformanceEventContext, + ctx: MmPerformanceEventContext, ) where - BB: AsRef + Clone, - B: BootServices + 'static, + B: BootServices + Clone + 'static, F: FirmwareBasicBootPerfTable, { let (boot_services, fbpt, comm_service) = *ctx; - let _ = boot_services.as_ref().close_event(event); + let _ = boot_services.close_event(event); if let Err(e) = process_mm_performance_records(&comm_service, fbpt) { log::error!("Performance: {}", e); @@ -427,7 +425,6 @@ pub extern "efiapi" fn fetch_and_add_mm_performance_records( #[cfg(test)] mod tests { use super::*; - use alloc::rc::Rc; use core::assert_eq; use r_efi::efi; @@ -524,7 +521,10 @@ mod tests { .once() .withf_st(|handle, _protocol_interface| { assert_eq!(&None, handle); - assert_eq!(EDKII_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID, EdkiiPerformanceMeasurement::PROTOCOL_GUID); + assert_eq!( + EDKII_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID.into_inner(), + EdkiiPerformanceMeasurement::PROTOCOL_GUID + ); true }) .returning(|_, protocol_interface| Ok((TEST_EFI_HANDLE, protocol_interface.metadata()))); @@ -532,8 +532,8 @@ mod tests { // Test that an event to report the fbpt at the end of dxe is created. boot_services .expect_create_event_ex::, - Rc, + MockBootServices, + MockRuntimeServices, &TplMutex, )>>() .once() @@ -542,9 +542,7 @@ mod tests { assert_eq!(&Tpl::CALLBACK, notify_tpl); assert_eq!( event_callback::report_fbpt_record_buffer::< - Rc<_>, MockBootServices, - Rc<_>, MockRuntimeServices, MockFirmwareBasicBootPerfTable, > as *const () as usize, @@ -566,17 +564,15 @@ mod tests { let mut fbpt = MockFirmwareBasicBootPerfTable::new(); fbpt.expect_set_perf_records().once().return_const(()); - let boot_services_rc = Rc::new(boot_services); - - // TplMutex owns its BootServices instance - let fbpt = TplMutex::new((*boot_services_rc).clone(), Tpl::NOTIFY, fbpt); + // TplMutex owns its own BootServices instance (clone creates a new mock with default TPL expectations) + let fbpt = TplMutex::new(boot_services.clone(), Tpl::NOTIFY, fbpt); // Leak the fbpt to create a 'static reference for testing. let fbpt = Box::leak(Box::new(fbpt)); let _ = Performance._entry_point( - boot_services_rc, - Rc::new(runtime_services), + boot_services, + runtime_services, Some(hob_perf_data_extractor), None, fbpt, @@ -605,23 +601,19 @@ mod tests { let mut entry_point_mock = MockBootServices::new(); entry_point_mock .expect_create_event_ex::, - Rc, + MockBootServices, + MockRuntimeServices, &TplMutex, )>>() .once() .return_const_st(Ok(TEST_EVENT_HANDLE)); entry_point_mock - .expect_create_event_ex::, - MockBootServices, - MockFirmwareBasicBootPerfTable, - >>() + .expect_create_event_ex::>() .once() .withf_st(|_, _, f, _, group| { (f.unwrap() as usize) - == fetch_and_add_mm_performance_records::, MockBootServices, MockFirmwareBasicBootPerfTable> - as * const () as usize + == fetch_and_add_mm_performance_records:: + as *const () as usize && group == &EVENT_GROUP_READY_TO_BOOT }) .return_const_st(Ok(TEST_EVENT_HANDLE_2)); @@ -643,8 +635,8 @@ mod tests { let mm_service: Service = Service::mock(Box::new(FakeComm)); let timer: Service = Service::mock(Box::new(MockTimer {})); let _ = Performance._entry_point( - Rc::new(entry_point_mock), - Rc::new(runtime_services), + entry_point_mock, + runtime_services, Option::::None, Some(mm_service), fbpt_ref, @@ -690,9 +682,9 @@ mod tests { let fbpt_ref: &'static TplMutex<_, _> = Box::leak(Box::new(fbpt_mutex)); let mm_service: Service = Service::mock(Box::new(ZeroSizeComm)); - fetch_and_add_mm_performance_records::, MockBootServices, MockFirmwareBasicBootPerfTable>( + fetch_and_add_mm_performance_records::( TEST_EVENT_HANDLE, - Box::new((Rc::new(callback_mock), fbpt_ref, mm_service)), + Box::new((callback_mock, fbpt_ref, mm_service)), ); } @@ -754,9 +746,9 @@ mod tests { let fbpt_ref: &'static TplMutex<_, _> = Box::leak(Box::new(fbpt_mutex)); let mm_service: Service = Service::mock(Box::new(OneRecordComm::new())); - fetch_and_add_mm_performance_records::, MockBootServices, MockFirmwareBasicBootPerfTable>( + fetch_and_add_mm_performance_records::( TEST_EVENT_HANDLE, - Box::new((Rc::new(callback_mock), fbpt_ref, mm_service)), + Box::new((callback_mock, fbpt_ref, mm_service)), ); } @@ -837,9 +829,9 @@ mod tests { let mm_service: Service = Service::mock(Box::new(MultiChunks { buf: all_records, fetches: Cell::new(0) })); - fetch_and_add_mm_performance_records::, MockBootServices, MockFirmwareBasicBootPerfTable>( + fetch_and_add_mm_performance_records::( TEST_EVENT_HANDLE, - Box::new((Rc::new(callback_mock), fbpt_ref, mm_service)), + Box::new((callback_mock, fbpt_ref, mm_service)), ); } diff --git a/components/patina_performance/src/mm.rs b/components/patina_performance/src/mm.rs index 0932307fe..05fc58fac 100644 --- a/components/patina_performance/src/mm.rs +++ b/components/patina_performance/src/mm.rs @@ -101,8 +101,8 @@ pub const MAX_SMM_BOOT_RECORD_BYTES: usize = 2 * 1024 * 1024; // 2 MiB /// Default chunk size for fetching SMM boot performance records. pub const SMM_FETCH_CHUNK_BYTES: usize = 1024; -pub const EFI_FIRMWARE_PERFORMANCE_GUID: efi::Guid = - efi::Guid::from_fields(0xc095791a, 0x3001, 0x47b2, 0x80, 0xc9, &[0xea, 0xc7, 0x31, 0x9f, 0x2f, 0xa4]); +pub const EFI_FIRMWARE_PERFORMANCE_GUID: patina::BinaryGuid = + patina::BinaryGuid::from_string("C095791A-3001-47B2-80C9-EAC7319F2FA4"); /// MM communicate function to return performance record size info. #[derive(Debug, Default, Copy, Clone)] diff --git a/components/patina_smbios/Cargo.toml b/components/patina_smbios/Cargo.toml index ec1575039..7159bb334 100644 --- a/components/patina_smbios/Cargo.toml +++ b/components/patina_smbios/Cargo.toml @@ -22,7 +22,6 @@ mockall = { workspace = true } patina = { workspace = true, features = ["mockall"] } [features] -enable_patina_tests = [] mockall = ["dep:mockall", "std"] std = [] diff --git a/components/patina_smbios/src/manager/core.rs b/components/patina_smbios/src/manager/core.rs index 435948acb..d97dca724 100644 --- a/components/patina_smbios/src/manager/core.rs +++ b/components/patina_smbios/src/manager/core.rs @@ -15,10 +15,7 @@ extern crate alloc; use alloc::{boxed::Box, collections::BTreeSet, string::String, vec::Vec}; use core::cell::RefCell; use patina::{base::SIZE_64KB, uefi_size_to_pages}; -use r_efi::{ - efi, - efi::{Handle, PhysicalAddress}, -}; +use r_efi::efi::{Handle, PhysicalAddress}; use zerocopy::{IntoBytes, Ref}; use zerocopy_derive::*; @@ -38,8 +35,8 @@ use super::record::SmbiosRecord; /// This GUID identifies the SMBIOS 3.0+ entry point structure in the UEFI Configuration Table. /// Used for SMBIOS 3.0 and later versions which support 64-bit table addresses and remove /// the 4GB table size limitation of SMBIOS 2.x. -pub const SMBIOS_3_X_TABLE_GUID: efi::Guid = - efi::Guid::from_fields(0xF2FD1544, 0x9794, 0x4A2C, 0x99, 0x2E, &[0xE5, 0xBB, 0xCF, 0x20, 0xE3, 0x94]); +pub const SMBIOS_3_X_TABLE_GUID: patina::BinaryGuid = + patina::BinaryGuid::from_string("F2FD1544-9794-4A2C-992E-E5BBCF20E394"); /// SMBIOS 3.0 entry point structure (64-bit) /// Per SMBIOS 3.0+ specification section 5.2.2 diff --git a/components/patina_smbios/src/manager/protocol.rs b/components/patina_smbios/src/manager/protocol.rs index 3bb33913c..28079e680 100644 --- a/components/patina_smbios/src/manager/protocol.rs +++ b/components/patina_smbios/src/manager/protocol.rs @@ -48,8 +48,7 @@ pub(super) struct SmbiosProtocolInternal { // SAFETY: SmbiosProtocol implements the SMBIOS protocol interface. The struct layout // must match the SMBIOS protocol interface with function pointers in the correct order. unsafe impl ProtocolInterface for SmbiosProtocol { - const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, &[0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7]); + const PROTOCOL_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("03583FF6-CB36-4940-947E-B9B39F4AFAF7"); } type SmbiosAdd = @@ -474,8 +473,7 @@ mod tests { use patina::uefi_protocol::ProtocolInterface; // Verify the GUID matches the EDK2 SMBIOS protocol GUID - let expected_guid = - efi::Guid::from_fields(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, &[0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7]); + let expected_guid = patina::BinaryGuid::from_string("03583FF6-CB36-4940-947E-B9B39F4AFAF7"); assert_eq!(SmbiosProtocol::PROTOCOL_GUID, expected_guid); } diff --git a/components/patina_smbios/src/service.rs b/components/patina_smbios/src/service.rs index 0ad925019..0f9696a69 100644 --- a/components/patina_smbios/src/service.rs +++ b/components/patina_smbios/src/service.rs @@ -244,7 +244,10 @@ impl Smbios for SmbiosImpl { // SAFETY: We pass a valid GUID and a pointer to ACPI_RECLAIM_MEMORY that remains valid unsafe { self.boot_services - .install_configuration_table(&crate::manager::SMBIOS_3_X_TABLE_GUID, ep_addr as *mut core::ffi::c_void) + .install_configuration_table( + &crate::manager::SMBIOS_3_X_TABLE_GUID.into_inner(), + ep_addr as *mut core::ffi::c_void, + ) .map_err(|_| crate::error::SmbiosError::AllocationFailed)?; } diff --git a/components/patina_test/Cargo.toml b/components/patina_test/Cargo.toml new file mode 100644 index 000000000..41f597e77 --- /dev/null +++ b/components/patina_test/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "patina_test" +version.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +readme = "README.md" +description = "Component and framework for on-system unit testing." + +[lints] +workspace = true + +[dependencies] +linkme = { workspace = true } +log = { workspace = true } +patina = { workspace = true } +patina_macro = { workspace = true } +r-efi = { workspace = true } +spin = { workspace = true } + +[features] +test-runner = ["patina_macro/enable_patina_tests"] diff --git a/components/patina_test/README.md b/components/patina_test/README.md new file mode 100644 index 000000000..52bf3c9f8 --- /dev/null +++ b/components/patina_test/README.md @@ -0,0 +1,95 @@ +# A Patina testing framework for on-platform unit testing + +This crate provides a macro (`patina_test`) to register dependency injectable functions as on-platform unit tests +that can be discovered and executed by the `TestRunner` component. + +## Writing Tests + +The patina test framework emulates the Rust provided testing framework as much as possible, so writing tests +should feel very similar to writing normal Rust unit tests with some additional configuration attributes available. + +1. A developer should use `#[patina_test]` to mark a function as a test case, rather than `#[test]`. The function + must return a [Result](crate::error::Result) type, rather than panicking on failure, which differs from the standard + Rust testing framework. +2. To assist with (1), this crate provides `assert` equivalent macros that return an error on failure rather than + panicking (See [crate::u_assert], [crate::u_assert_eq], [crate::u_assert_ne]). +3. Tests can be configured with the same attributes as the standard Rust provided testing framework, such as + `#[should_fail]`, `#[should_fail = ""]`, and `#[skip]`. +4. By default, tests are configured to run once during the boot process, but a macro attribute is provided to + change when/how often a test is triggered. See the [patina_test] macro documentation for more details. +5. Test dependencies can be injected as function parameters, and the test framework will resolve them from the + component storage system. The test will not run if the dependency cannot be resolved. + +## Running Tests + +Tests marked with `#[patina_test]` are not automatically executed by a platform. Instead, the platform must opt-in +to running tests by registering one or more `TestRunner` components with the Core. This is done by enabling the +`test-runner` feature of this crate, which does two things: (1) provides access to the component module, which contains +the component and (2) Globally registers any function marked with `#[patina_test]`. Once registered, it will +discover all test cases that match it's configuration and schedule them according to the component's configurations +and the test case's triggers. An overlap in test cases discovered by multiple `TestRunner` components is allowed, +but the test case will only be scheduled to run once based on it's triggers. The Test failure callbacks will be +called for each `TestRunner` that discovers the test case. `debug_mode=true` takes priority, so if any `TestRunner` +that discovers a test case has `debug_mode=true`, then debug messages will be enabled for that test case regardless +of the other `TestRunner`'s debug_mode configuration for that test case. + +## Feature Flags + +- `test-runner`: Will make the `component` module public, providing access to the `TestRunner` component and actually + register patina tests globally. + +## Example + +```rust +use patina_test::{ + patina_test, u_assert, u_assert_eq, + error::Result, +}; + +use patina::boot_services::StandardBootServices; +use patina::guids::CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP; + +#[cfg_attr(target_arch = "aarch64", patina_test)] +fn test_case() -> Result { + u_assert_eq!(1, 1); + Ok(()) +} + +#[patina_test] +fn test_case2() -> Result { + u_assert_eq!(1, 1); + Ok(()) +} + +#[patina_test] +#[should_fail] +fn failing_test_case() -> Result { + u_assert_eq!(1, 2); + Ok(()) +} + +#[patina_test] +#[should_fail = "This test failed"] +fn failing_test_case_with_msg() -> Result { + u_assert_eq!(1, 2, "This test failed"); + Ok(()) +} + +#[patina_test] +#[skip] +fn skipped_test_case() -> Result { + todo!() +} + +#[patina_test] +#[cfg_attr(not(target_arch = "x86_64"), skip)] +fn x86_64_only_test_case(bs: StandardBootServices) -> Result { + todo!() +} + +#[patina_test] +#[on(event = CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP)] +fn on_event_test_case() -> Result { + Ok(()) +} +``` diff --git a/sdk/patina/src/test/__private_api.rs b/components/patina_test/src/__private_api.rs similarity index 73% rename from sdk/patina/src/test/__private_api.rs rename to components/patina_test/src/__private_api.rs index ca093b761..ba9ece978 100644 --- a/sdk/patina/src/test/__private_api.rs +++ b/components/patina_test/src/__private_api.rs @@ -12,32 +12,37 @@ use core::marker::PhantomData; -use r_efi::efi::Guid; - -use crate::component::{ - MetaData, Storage, UnsafeStorageCell, - params::{Param, ParamFunction}, +use patina::{ + BinaryGuid, + component::{ + MetaData, Storage, UnsafeStorageCell, + params::{Param, ParamFunction}, + }, }; +use crate::component::Filter; + /// Where all the test cases marked with `#[patina_test]` are collated to. -#[cfg(feature = "enable_patina_tests")] +#[cfg(feature = "test-runner")] #[linkme::distributed_slice] pub static TEST_CASES: [TestCase]; -/// returns the test cases to run. +/// Returns the test cases to run. /// -/// TEST_CASES exists only when the `enable_patina_tests` feature is +/// Tests are only collected when the `test-runner` feature is /// explicitly enabled. This feature is opt-in and explicit because external /// consumers of `patina` who do not register at least one test case with /// the `#[patina_test]` attribute may encounter a surprising linker crash (not /// just a linker failure), due to the testing infrastructure relying on the /// `linkme` crate. +/// +/// If the `test-runner` feature is not enabled, this function will return an empty slice. pub fn test_cases() -> &'static [TestCase] { - #[cfg(feature = "enable_patina_tests")] + #[cfg(feature = "test-runner")] { &TEST_CASES } - #[cfg(not(feature = "enable_patina_tests"))] + #[cfg(not(feature = "test-runner"))] { &[] } @@ -49,7 +54,7 @@ pub enum TestTrigger { /// The test case should be executed manually. Manual, /// The test case should be executed when the specified event triggers. - Event(&'static Guid), + Event(BinaryGuid), /// The test case should be executed after the specified units of 100ns have elapsed. Timer(u64), } @@ -66,14 +71,29 @@ pub struct TestCase { } impl TestCase { - pub fn should_run(&self, filters: &[&str]) -> bool { - if filters.is_empty() { - return !self.skip; + pub fn should_run(&self, filters: &[Filter]) -> bool { + if self.skip { + return false; } - filters.iter().any(|pattern| self.name.contains(pattern)) && !self.skip + + let mut has_includes = false; + let mut included = false; + + for filter in filters { + match filter { + Filter::Exclude(p) if self.name.contains(p) => return false, + Filter::Exclude(_) => {} + Filter::Include(p) => { + has_includes = true; + included |= self.name.contains(p); + } + } + } + + included || !has_includes } - pub fn run(&self, storage: &mut Storage, debug_mode: bool) -> super::Result { + pub fn run(&self, storage: &mut Storage, debug_mode: bool) -> crate::error::Result { let ret = if debug_mode { log::debug!("#### {} Test Output Start ####", self.name); let ret = (self.func)(storage); @@ -149,7 +169,8 @@ where #[coverage(off)] mod tests { use super::*; - use crate::component::Storage; + + extern crate std; #[test] fn test_should_run() { @@ -162,10 +183,45 @@ mod tests { func: |_| Ok(true), }; - std::assert!(test_case.should_run(&["test"])); - std::assert!(test_case.should_run(&["t"])); + std::assert!(test_case.should_run(&[Filter::include("test")])); + std::assert!(test_case.should_run(&[Filter::include("t")])); std::assert!(test_case.should_run(&[])); - std::assert!(!test_case.should_run(&["not"])); + std::assert!(!test_case.should_run(&[Filter::include("not")])); + } + + #[test] + fn test_should_run_with_no_filters() { + let test_case = TestCase { + name: "test", + triggers: &[TestTrigger::Manual], + skip: false, + should_fail: false, + fail_msg: None, + func: |_| Ok(true), + }; + + std::assert!(test_case.should_run(&[])); + } + + #[test] + fn test_should_run_with_exclude_filters() { + let test_case = TestCase { + name: "my_crate::tests::test_case", + triggers: &[TestTrigger::Manual], + skip: false, + should_fail: false, + fail_msg: None, + func: |_| Ok(true), + }; + + // Exclude filter matches - should not run + std::assert!(!test_case.should_run(&[Filter::exclude("test_case")])); + // Exclude filter does not match - should run + std::assert!(test_case.should_run(&[Filter::exclude("other")])); + // Include filter matches but exclude filter also matches - should not run + std::assert!(!test_case.should_run(&[Filter::include("my_crate"), Filter::exclude("test_case")])); + // Include filter matches and exclude filter does not match - should run + std::assert!(test_case.should_run(&[Filter::include("my_crate"), Filter::exclude("other")])); } #[test] @@ -259,4 +315,12 @@ mod tests { let result = test_case.run(&mut storage, false); std::assert_eq!(result, Err("Failed to install protocol interface")); } + + #[test] + fn test_test_with_invalid_param_combination_is_caught() { + assert_eq!( + crate::component::tests::TEST_CASE_INVALID.run(&mut Storage::new(), false), + Err("Test failed to run due to un-retrievable parameters.") + ); + } } diff --git a/components/patina_test/src/component.rs b/components/patina_test/src/component.rs new file mode 100644 index 000000000..e3bef7d76 --- /dev/null +++ b/components/patina_test/src/component.rs @@ -0,0 +1,401 @@ +//! Patina Test Test Runner Component +//! +//! This module provides the [TestRunner] component, which is responsible for discovering and registering tests marked +//! with the `#[patina_test]` attribute for execution. See the [TestRunner] documentation for details on how to +//! configure and use the test runner. Multiple test runners can be registered to run different sets of tests with +//! different configurations. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +use crate::{ + __private_api, + alloc::vec::Vec, + service::{Recorder, TestRecord}, +}; + +use patina::component::{Storage, component}; + +/// A filter to include or exclude test cases whose name contains the pattern. +/// +/// # Example +/// +/// ```rust +/// use patina_test::component::{TestRunner, Filter}; +/// +/// let runner = TestRunner::default() +/// .with_filter(Filter::include("x64")) +/// .with_filter(Filter::exclude("aarch64")); +/// ``` +#[derive(Debug, Clone)] +pub enum Filter { + /// Only run tests whose name contains the pattern. + Include(&'static str), + /// Do not run tests whose name contains the pattern. + Exclude(&'static str), +} + +impl Filter { + /// Creates an include filter. Tests whose name contains `pattern` will be included. + pub fn include(pattern: &'static str) -> Self { + Self::Include(pattern) + } + + /// Creates an exclude filter. Tests whose name contains `pattern` will be excluded. + pub fn exclude(pattern: &'static str) -> Self { + Self::Exclude(pattern) + } +} + +/// A component that runs all test cases marked with the `#[patina_test]` attribute when loaded by the DXE core. +#[derive(Default, Clone)] +pub struct TestRunner { + filters: Vec, + debug_mode: bool, + fail_callback: Option, +} + +#[component] +impl TestRunner { + /// Adds a filter to control which tests are executed. + /// + /// The filter pattern is matched against the full test name, which includes the module path. + /// For example, if a test is defined in `my_crate::tests`, the name would be + /// `my_crate::tests::test_case`. + /// + /// - [`Filter::Include`]: Only run tests whose name contains the pattern. When multiple include + /// filters are specified, a test runs if it matches **any** of them. + /// - [`Filter::Exclude`]: Prevents tests whose name contains the pattern from running. Exclude + /// filters take priority over include filters. + /// + /// This filter is case-sensitive. It can be called multiple times to add multiple filters. + pub fn with_filter(mut self, filter: Filter) -> Self { + self.filters.push(filter); + self + } + + /// Any log messages generated by the test case will be logged if this is set to true. + /// + /// Defaults to false. + pub fn debug_mode(mut self, debug_mode: bool) -> Self { + self.debug_mode = debug_mode; + self + } + + /// Attach a callback function that will be called on test failure. + /// + /// fn(test_name: &'static str, fail_msg: &'static str) + pub fn with_callback(mut self, callback: fn(&'static str, &'static str)) -> Self { + self.fail_callback = Some(callback); + self + } + + /// The entry point for the test runner component. + #[coverage(off)] + fn entry_point(self, storage: &mut Storage) -> patina::error::Result<()> { + let test_list: &'static [__private_api::TestCase] = __private_api::test_cases(); + self.register_tests(test_list, storage) + } + + /// Registers the tests to be executed by the test runner. + fn register_tests( + &self, + test_list: &'static [__private_api::TestCase], + storage: &mut Storage, + ) -> patina::error::Result<()> { + let recorder = match storage.get_service::() { + Some(recorder) => recorder, + None => { + let recorder = Recorder::default(); + recorder.initialize(storage)?; + storage.add_service(recorder); + storage.get_service::().expect("Recorder service should be registered.") + } + }; + + let records = test_list + .iter() + .filter(|&test_case| test_case.should_run(self.filters.as_slice())) + .map(|test_case| TestRecord::new(self.debug_mode, test_case, self.fail_callback)); + + for record in records { + // Only schedule a run if we have not already scheduled for this test. + if !recorder.test_registered(record.name()) { + record.schedule_run(storage)?; + } + + recorder.update_record(record); + } + + Ok(()) + } +} + +#[cfg(test)] +#[coverage(off)] +pub(crate) mod tests { + extern crate std; + + use super::*; + use crate::{ + __private_api::TestCase, + alloc::{boxed::Box, format}, + }; + use core::mem::MaybeUninit; + use patina::{ + BinaryGuid, + boot_services::StandardBootServices, + component::{IntoComponent, Storage, params::Config}, + }; + + // A test function where we mock DxeComponentInterface to return what we want for the test. + fn test_function(config: Config) -> crate::error::Result { + assert!(*config == 1); + Ok(()) + } + + fn test_function_fail() -> crate::error::Result { + Err("Intentional Failure") + } + + fn test_function_invalid(_: &mut Storage, _: &mut Storage) -> crate::error::Result { + Ok(()) + } + + // This is mirroring the logic in __private_api.rs to ensure we do properly register test cases. + #[linkme::distributed_slice] + pub static TEST_TESTS: [super::__private_api::TestCase]; + + #[linkme::distributed_slice(TEST_TESTS)] + pub static TEST_CASE1: super::__private_api::TestCase = super::__private_api::TestCase { + name: "test", + triggers: &[super::__private_api::TestTrigger::Manual], + skip: false, + should_fail: false, + fail_msg: None, + func: |storage| crate::__private_api::FunctionTest::new(test_function).run(storage.into()), + }; + + #[linkme::distributed_slice(TEST_TESTS)] + pub static TEST_CASE2: super::__private_api::TestCase = super::__private_api::TestCase { + name: "test", + triggers: &[super::__private_api::TestTrigger::Manual], + skip: true, + should_fail: false, + fail_msg: None, + func: |storage| crate::__private_api::FunctionTest::new(test_function).run(storage.into()), + }; + + pub static TEST_CASE3: super::__private_api::TestCase = super::__private_api::TestCase { + name: "test_that_fails", + triggers: &[super::__private_api::TestTrigger::Manual], + skip: false, + should_fail: false, + fail_msg: None, + func: |storage| crate::__private_api::FunctionTest::new(test_function_fail).run(storage.into()), + }; + + pub static TEST_CASE4: super::__private_api::TestCase = super::__private_api::TestCase { + name: "event_triggered_test", + triggers: &[super::__private_api::TestTrigger::Event(BinaryGuid::from_bytes(&[0; 16]))], + skip: false, + should_fail: false, + fail_msg: None, + func: |storage| crate::__private_api::FunctionTest::new(test_function_fail).run(storage.into()), + }; + + pub static TEST_CASE5: super::__private_api::TestCase = super::__private_api::TestCase { + name: "timer_triggered_test", + triggers: &[super::__private_api::TestTrigger::Timer(1_000_000)], + skip: false, + should_fail: false, + fail_msg: None, + func: |storage| crate::__private_api::FunctionTest::new(test_function_fail).run(storage.into()), + }; + + pub static TEST_CASE_INVALID: super::__private_api::TestCase = super::__private_api::TestCase { + name: "invalid_test", + triggers: &[super::__private_api::TestTrigger::Event(BinaryGuid::from_bytes(&[0; 16]))], + skip: false, + should_fail: false, + fail_msg: None, + func: |storage| crate::__private_api::FunctionTest::new(test_function_invalid).run(storage.into()), + }; + + #[test] + #[ignore = "Skipping test until the service for UEFI services is out, so we can mock it."] + fn test_we_can_initialize_the_component() { + let mut storage = Storage::new(); + + let mut component = super::TestRunner::default().into_component(); + component.initialize(&mut storage); + } + + #[test] + #[ignore = "Skipping test until the service for UEFI services is out, so we can mock it."] + fn test_we_can_collect_and_execute_tests() { + assert_eq!(TEST_TESTS.len(), 2); + let mut storage = Storage::new(); + storage.add_config(1_i32); + + let component = super::TestRunner::default(); + let result = component.register_tests(&TEST_TESTS, &mut storage); + assert!(result.is_ok()); + } + + #[test] + #[ignore = "Skipping test until the service for UEFI services is out, so we can mock it."] + fn test_handle_different_test_counts() { + let mut storage = Storage::new(); + storage.add_config(1_i32); + + let test_cases: &'static [TestCase] = Box::leak(Box::new([])); + let component = super::TestRunner::default(); + let result = component.register_tests(test_cases, &mut storage); + assert!(result.is_ok()); + + let test_cases: &'static [TestCase] = Box::leak(Box::new([TEST_CASE1])); + let result = component.register_tests(test_cases, &mut storage); + assert!(result.is_ok()); + + let test_cases: &'static [TestCase] = Box::leak(Box::new([TEST_CASE1, TEST_CASE2])); + let result = component.register_tests(test_cases, &mut storage); + assert!(result.is_ok()); + + let test_cases: &'static [TestCase] = Box::leak(Box::new([TEST_CASE1, TEST_CASE2, TEST_CASE3])); + let result = component.register_tests(test_cases, &mut storage); + assert!(result.is_ok()); + } + + #[test] + fn test_func_implements_into_component() { + let _ = TestRunner::default().into_component(); + } + + #[test] + fn verify_default_values() { + let config = TestRunner::default(); + assert_eq!(config.filters.len(), 0); + assert!(!config.debug_mode); + } + + #[test] + fn verify_config_sets_properly() { + let config = TestRunner::default() + .with_filter(Filter::include("aarch64")) + .with_filter(Filter::include("test")) + .with_filter(Filter::exclude("skip_me")) + .debug_mode(true); + assert_eq!(config.filters.len(), 3); + assert!(config.debug_mode); + } + + #[test] + #[should_panic(expected = "Callback called")] + fn test_test_failure_callback_handler() { + let test_runner = crate::component::TestRunner::default().with_callback(|_, _| { + panic!("Callback called"); + }); + + let mut storage = Storage::new(); + storage.add_service(Recorder::default()); + let bs: MaybeUninit = MaybeUninit::uninit(); + + // SAFETY: This is very unsafe, because it is not initialized, however this code path only calls create_event + // and create_event_ex, which we will fill in with no-op functions. + let mut bs = unsafe { bs.assume_init() }; + extern "efiapi" fn noop_create_event( + _type: u32, + _tpl: r_efi::efi::Tpl, + _notify_function: Option, + _notify_context: *mut core::ffi::c_void, + _event: *mut r_efi::efi::Event, + ) -> r_efi::efi::Status { + r_efi::efi::Status::SUCCESS + } + + extern "efiapi" fn noop_create_event_ex( + _type: u32, + _tpl: r_efi::efi::Tpl, + _notify_function: Option, + _notify_context: *const core::ffi::c_void, + _guid: *const r_efi::efi::Guid, + _event: *mut r_efi::efi::Event, + ) -> r_efi::efi::Status { + r_efi::efi::Status::SUCCESS + } + + bs.create_event = noop_create_event; + bs.create_event_ex = noop_create_event_ex; + + storage.set_boot_services(StandardBootServices::new(Box::leak(Box::new(bs)))); + + // TEST_CASE3 is designed to fail. + let _ = test_runner.register_tests(Box::leak(Box::new([TEST_CASE3])), &mut storage); + storage.get_service::().unwrap().run_manual_tests(&mut storage); + } + + #[test] + fn test_filter_should_work() { + let test_runner = TestRunner::default().with_filter(Filter::include("triggered_test")); + + let mut storage = Storage::new(); + let bs: MaybeUninit = MaybeUninit::uninit(); + + // SAFETY: This is very unsafe, because it is not initialized, however this code path only calls create_event + // create_event_ex, and set_timer which we will fill in with no-op functions. + let mut bs = unsafe { bs.assume_init() }; + extern "efiapi" fn noop_create_event( + _type: u32, + _tpl: r_efi::efi::Tpl, + _notify_function: Option, + _notify_context: *mut core::ffi::c_void, + _event: *mut r_efi::efi::Event, + ) -> r_efi::efi::Status { + r_efi::efi::Status::SUCCESS + } + + extern "efiapi" fn noop_create_event_ex( + _type: u32, + _tpl: r_efi::efi::Tpl, + _notify_function: Option, + _notify_context: *const core::ffi::c_void, + _guid: *const r_efi::efi::Guid, + _event: *mut r_efi::efi::Event, + ) -> r_efi::efi::Status { + r_efi::efi::Status::SUCCESS + } + + extern "efiapi" fn noop_set_timer( + _event: r_efi::efi::Event, + _type: r_efi::efi::TimerDelay, + _trigger_time: u64, + ) -> r_efi::efi::Status { + r_efi::efi::Status::SUCCESS + } + + bs.create_event = noop_create_event; + bs.create_event_ex = noop_create_event_ex; + bs.set_timer = noop_set_timer; + + storage.set_boot_services(StandardBootServices::new(Box::leak(Box::new(bs)))); + + // Failure tests + assert!( + test_runner.register_tests(Box::leak(Box::new([TEST_CASE3, TEST_CASE4, TEST_CASE5])), &mut storage).is_ok() + ); + let recorder = storage.get_service::().expect("Recorder service should be registered."); + recorder.run_manual_tests(&mut storage); + + let output = format!("{}", *recorder); + + // This test is filtered out, so it should not even show up in the results. + assert!(!output.contains("test_that_fails")); + // This test is not filtered out, but never run, so should log as such. + std::println!("{}", output); + assert!(output.contains("event_triggered_test ... not triggered")); + } +} diff --git a/components/patina_test/src/lib.rs b/components/patina_test/src/lib.rs new file mode 100644 index 000000000..214c6ab1f --- /dev/null +++ b/components/patina_test/src/lib.rs @@ -0,0 +1,71 @@ +#![doc = include_str!("../README.md")] +#![doc = concat!( + "## License\n\n", + " Copyright (c) Microsoft Corporation.\n\n", +)] +#![no_std] +#![feature(coverage_attribute)] +extern crate alloc; + +#[doc(hidden)] +pub mod __private_api; +#[doc(hidden)] +pub use linkme; + +#[cfg(any(feature = "test-runner", doc))] +pub mod component; + +#[cfg(all(not(feature = "test-runner"), not(doc)))] +#[allow(unused)] +mod component; + +/// Patina Test Error Definitions. +/// +/// Defines Result type that must be returned by all patina test functions. +pub mod error { + /// The result type for patina tests. All patina test functions must return this type. + pub type Result = core::result::Result<(), &'static str>; +} + +mod service; + +pub use patina_macro::patina_test; + +/// A macro similar to [`core::assert!`] that returns an error message instead of panicking. +#[macro_export] +macro_rules! u_assert { + ($cond:expr, $msg:expr) => { + if !$cond { + return Err($msg); + } + }; + ($cond:expr) => { + u_assert!($cond, "Assertion failed"); + }; +} + +/// A macro similar to [`core::assert_eq!`] that returns an error message instead of panicking. +#[macro_export] +macro_rules! u_assert_eq { + ($left:expr, $right:expr, $msg:expr) => { + if $left != $right { + return Err($msg); + } + }; + ($left:expr, $right:expr) => { + u_assert_eq!($left, $right, concat!("assertion failed: `", stringify!($left), " == ", stringify!($right), "`")); + }; +} + +/// A macro similar to [`core::assert_ne!`] that returns an error message instead of panicking. +#[macro_export] +macro_rules! u_assert_ne { + ($left:expr, $right:expr, $msg:expr) => { + if $left == $right { + return Err($msg); + } + }; + ($left:expr, $right:expr) => { + u_assert_ne!($left, $right, concat!("assertion failed: `", stringify!($left), " != ", stringify!($right), "`")); + }; +} diff --git a/components/patina_test/src/service.rs b/components/patina_test/src/service.rs new file mode 100644 index 000000000..873296765 --- /dev/null +++ b/components/patina_test/src/service.rs @@ -0,0 +1,467 @@ +//! Patina Testing Service +//! +//! This module defines the internal service used by the crate to register and execute tests marked with the +//! `#[patina_test]` attribute. The [TestRunner](crate::component::TestRunner) component checks for the presence of +//! the [Recorder] service, registering a new one if it does not. It then uses the Recorder service to register all +//! discovered tests based on the filtered list each individual TestRunner is configured to run. The Recorder service +//! is then responsible for executing the tests, recording their results, and logging the results at the appropriate +//! time during the boot process. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +use crate::{ + __private_api::{TestCase, TestTrigger}, + alloc::{boxed::Box, collections::BTreeMap, fmt::Display, string::String, vec::Vec}, +}; + +use core::{ops::DerefMut, ptr::NonNull}; + +use patina::{ + boot_services::{ + BootServices, StandardBootServices, + event::{EventTimerType, EventType}, + tpl::Tpl, + }, + component::{Storage, service::IntoService}, +}; + +use r_efi::efi::EVENT_GROUP_READY_TO_BOOT; + +/// A structure containing all necessary data to execute a test at any time. +#[derive(Clone)] +pub(crate) struct TestRecord { + /// Whether or not to log debug messages in the test or not + debug_mode: bool, + /// The test case to execute. + test_case: &'static TestCase, + /// Callback functions to be called on test failure. + callback: Vec, + /// The number of times this test has executed and passed. + pass: u32, + /// The number of times this test has executed and failed. + fail: u32, + /// The error message from the most recent failure, if any. + err_msg: Option<&'static str>, +} + +#[allow(unused)] +impl TestRecord { + /// Creates a new instance of TestRecord. + pub fn new( + debug_mode: bool, + test_case: &'static TestCase, + callback: Option, + ) -> Self { + let callback = callback.into_iter().collect(); + Self { debug_mode, test_case, callback, pass: 0, fail: 0, err_msg: None } + } + + pub fn name(&self) -> &'static str { + self.test_case.name + } + + /// Merges another test record into this one, combining their results and callbacks. + fn merge(&mut self, other: &Self) { + assert_eq!(self.test_case.name, other.test_case.name, "Can only merge records for the same test case."); + self.debug_mode |= other.debug_mode; + self.pass += other.pass; + self.fail += other.fail; + self.callback.extend(other.callback.clone()); + if self.err_msg.is_none() && other.err_msg.is_some() { + self.err_msg = other.err_msg; + } + } + + /// Runs the test case case. + /// + /// Calls the test failure callbacks if the test fails. + fn run(&mut self, storage: &mut Storage) { + let result = self.test_case.run(storage, self.debug_mode); + + match result { + Ok(()) => self.pass += 1, + Err(msg) => { + self.fail += 1; + self.err_msg = Some(msg); + self.callback.iter().for_each(|cb| cb(self.test_case.name, msg)); + } + } + } + + /// Schedules the test to be run according to its triggers. + pub fn schedule_run(&self, storage: &mut Storage) -> patina::error::Result<()> { + let name = self.test_case.name; + + for trigger in self.test_case.triggers { + match trigger { + TestTrigger::Manual => { + // Do nothing. Test must be manually triggered. + } + TestTrigger::Event(guid) => { + storage.boot_services().create_event_ex( + EventType::NOTIFY_SIGNAL, + Tpl::CALLBACK, + Some(Self::run_test), + Box::leak(Box::new((name, NonNull::from_ref(storage)))), + guid, + )?; + } + TestTrigger::Timer(interval) => { + let event = storage.boot_services().create_event( + EventType::NOTIFY_SIGNAL | EventType::TIMER, + Tpl::CALLBACK, + Some(Self::run_test), + // We are setting up this timer to be periodic, so we need to leak it so it is available for + // multiple test runs + Box::leak(Box::new((name, NonNull::from_ref(storage)))), + )?; + + // We need to disable the timer at ReadyToBoot so it does not continue firing while a + // bootloader is running. + let _ = storage.boot_services().create_event_ex( + EventType::NOTIFY_SIGNAL, + Tpl::CALLBACK, + Some(Self::disable_timer), + NonNull::from_ref(Box::leak(Box::new((event, storage.boot_services().clone())))).as_ptr() + as *mut core::ffi::c_void, + &EVENT_GROUP_READY_TO_BOOT, + )?; + + storage.boot_services().set_timer(event, EventTimerType::Periodic, *interval)?; + } + } + } + + Ok(()) + } + + /// Serializes the test record to a JSON string for logging or reporting purposes. + fn json(&self) -> String { + alloc::format!( + r#"{{"name":"{}","pass":{},"fail":{},"err_msg":{}}}"#, + self.test_case.name, + self.pass, + self.fail, + self.err_msg.map_or(String::from("null"), |msg| alloc::format!(r#""{}""#, msg)) + ) + } + + /// EFIAPI event callback to locate a specific test and run it. + extern "efiapi" fn run_test(_: r_efi::efi::Event, &(test, mut storage): &'static (&'static str, NonNull)) { + // SAFETY: Storage is a valid pointer as the pointer is generated from a static reference. + let storage = unsafe { storage.as_mut() }; + + if let Some(recorder) = storage.get_service::() { + let _ = recorder.with_mut(|records| records.get_mut(test).map(|record| record.run(storage))); + } + } + + #[coverage(off)] + /// An EFIAPI compatible event callback to disable a timer event at ReadyToBoot + extern "efiapi" fn disable_timer(rtb_event: r_efi::efi::Event, context: *mut core::ffi::c_void) { + // SAFETY: We set up the context pointer in `run_tests` to point to a valid tuple of (Event, StandardBootServices). + let (timer_event, boot_services) = unsafe { &mut *(context as *mut (r_efi::efi::Event, StandardBootServices)) }; + let _ = boot_services.set_timer(*timer_event, EventTimerType::Cancel, 0); + let _ = boot_services.close_event(rtb_event); + } +} + +/// A private service to record test results. +#[derive(IntoService, Default)] +#[service(Recorder)] +pub(crate) struct Recorder { + records: spin::Mutex>, +} + +#[allow(unused)] +impl Recorder { + /// Allows updates to the test records via a closure to ensure interior mutability safety. + fn with_mut(&self, f: F) -> R + where + F: FnOnce(&mut BTreeMap<&'static str, TestRecord>) -> R, + { + let mut records = self.records.lock(); + f(records.deref_mut()) + } + + /// Registers UEFI event callbacks to log the test results at specific points in the boot process. + pub fn initialize(&self, storage: &mut Storage) -> patina::error::Result<()> { + // Log results at ready to boot + storage.boot_services().create_event_ex( + EventType::NOTIFY_SIGNAL, + Tpl::CALLBACK, + Some(Self::run_tests_and_report), + NonNull::from_ref(storage), + &EVENT_GROUP_READY_TO_BOOT, + )?; + + // log results at exit boot services + storage.boot_services().create_event( + EventType::SIGNAL_EXIT_BOOT_SERVICES, + Tpl::CALLBACK, + Some(Self::run_tests_and_report), + NonNull::from_ref(storage), + )?; + + Ok(()) + } + + /// Returns true if a test with the given name is already registered, false otherwise. + pub fn test_registered(&self, test_name: &str) -> bool { + self.with_mut(|data| data.contains_key(test_name)) + } + + // Updates an existing record or inserts a new record if it does not exist. + pub fn update_record(&self, record: TestRecord) { + let name = record.test_case.name; + + self.with_mut(|data| { + if let Some(existing_record) = data.get_mut(name) { + existing_record.merge(&record); + } else { + data.insert(name, record); + } + }); + } + + /// Runs all tests that are triggered by the [TestTrigger::Manual] trigger if they have not been run before. + pub(crate) fn run_manual_tests(&self, storage: &mut Storage) { + self.with_mut(|data| { + data.values_mut() + .filter(|record| { + record.test_case.triggers.contains(&TestTrigger::Manual) && record.pass == 0 && record.fail == 0 + }) + .for_each(|record| record.run(storage)); + }); + } + + /// Serializes all test records to a JSON string for logging or reporting purposes. + fn json(&self) -> String { + self.with_mut(|records| { + let mut json_records = String::from("["); + for record in records.values() { + json_records.push_str(&record.json()); + json_records.push(','); + } + if !records.is_empty() { + json_records.pop(); + } + json_records.push(']'); + json_records + }) + } + + /// An EFIAPI compatible event callback to run the manually triggered tests and log the current results of patina-test + extern "efiapi" fn run_tests_and_report(event: r_efi::efi::Event, mut storage: NonNull) { + // SAFETY: event callbacks are executed in series, so there exists no other mutable access to storage. + let storage = unsafe { storage.as_mut() }; + + if let Some(recorder) = storage.get_service::() { + recorder.run_manual_tests(storage); + + log::info!("{}", *recorder); + log::info!(r#"{{"patina_on_system_unit_test_results":{}}}"#, recorder.json()); + } + + let _ = storage.boot_services().close_event(event); + } +} + +impl Display for Recorder { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + self.with_mut(|records| { + let mut total_passes = 0; + let mut total_fails = 0; + writeln!(f, "Patina on-system unit-test results:")?; + for (name, record) in records.iter() { + total_passes += record.pass; + total_fails += record.fail; + if record.fail == 0 && record.pass == 0 { + writeln!(f, " {name} ... not triggered")?; + continue; + } + if record.fail == 0 { + writeln!(f, " {name} ... ok ({} passes)", record.pass)?; + } else { + writeln!( + f, + " {name} ... fail ({} fails, {} passes): {}", + record.fail, + record.pass, + record.err_msg.unwrap_or("") + )?; + } + } + writeln!(f, "Patina on-system unit-test result totals: {total_passes} passes, {total_fails} fails")?; + + Ok(()) + }) + } +} + +#[cfg(test)] +#[coverage(off)] +mod tests { + extern crate std; + + use core::mem::MaybeUninit; + + use super::*; + use crate::{alloc::format, component::tests::*}; + + #[test] + fn test_recorder_records_results() { + let recorder = Recorder::default(); + + let mut tr1 = TestRecord::new(false, &TEST_CASE2, None); + tr1.pass = 2; + tr1.fail = 1; + tr1.err_msg = Some("Failure 1"); + recorder.update_record(tr1); + + let mut tr2 = TestRecord::new(false, &TEST_CASE3, None); + tr2.pass = 0; + tr2.fail = 2; + tr2.err_msg = Some("Failure 2"); + recorder.update_record(tr2); + + let mut tr3 = TestRecord::new(false, &TEST_CASE4, None); + tr3.pass = 1; + recorder.update_record(tr3); + + let output = format!("{}", recorder); + assert!(output.contains("test ... fail (1 fails, 2 passes): Failure 1")); + assert!(output.contains("test_that_fails ... fail (2 fails, 0 passes): Failure 2")); + assert!(output.contains("event_triggered_test ... ok (1 passes)")); + } + + #[test] + fn test_test_data_test_running() { + let mut storage = Storage::new(); + storage.add_config(1_i32); + storage.add_service(Recorder::default()); + + let test_case = &TEST_CASE1; + let mut test_data = TestRecord::new(false, test_case, None); + + test_data.run(&mut storage); + + let recorder = storage.get_service::().expect("Recorder service should be registered."); + recorder.update_record(test_data); + + let output = format!("{}", *recorder); + std::println!("{}", output); + assert!(output.contains("test ... ok (1 passes)")); + } + + #[test] + fn test_update_record_with_existing_record() { + let mut record1 = TestRecord::new(false, &TEST_CASE1, Some(|_, _| ())); + record1.pass = 1; + record1.fail = 0; + + let mut record2 = TestRecord::new(true, &TEST_CASE1, Some(|_, _| ())); + record2.pass = 0; + record2.fail = 2; + record2.err_msg = Some("Failure"); + + let recorder = Recorder::default(); + recorder.update_record(record1); + recorder.update_record(record2); + + let record = recorder.with_mut(|data| data.get(&TEST_CASE1.name).cloned().expect("Record should exist.")); + + assert!(record.debug_mode); + assert_eq!(record.pass, 1); + assert_eq!(record.fail, 2); + assert_eq!(record.err_msg, Some("Failure")); + assert!(record.debug_mode); + assert_eq!(record.callback.len(), 2); + } + + #[test] + fn test_efiapi_run_test() { + let mut storage = Storage::new(); + storage.add_config(1_i32); + + let recorder = Recorder::default(); + recorder.update_record(TestRecord::new(false, &TEST_CASE1, None)); + storage.add_service(recorder); + + let context = Box::leak(Box::new(("test", NonNull::from_ref(&storage)))); + TestRecord::run_test(core::ptr::null_mut(), context); + } + + #[test] + fn test_efiapi_run_tests_and_report() { + let bs: MaybeUninit = MaybeUninit::uninit(); + // SAFETY: This is very unsafe, because it is not initialized, however this code path only calls create_event + // create_event_ex, and set_timer which we will fill in with no-op functions. + let mut bs = unsafe { bs.assume_init() }; + + extern "efiapi" fn noop_close_event(_: r_efi::efi::Event) -> r_efi::efi::Status { + r_efi::efi::Status::SUCCESS + } + + bs.close_event = noop_close_event; + + let mut storage = Storage::new(); + storage.set_boot_services(StandardBootServices::new(Box::leak(Box::new(bs)))); + storage.add_config(1_i32); + + let recorder = Recorder::default(); + recorder.update_record(TestRecord::new(false, &TEST_CASE1, None)); + storage.add_service(recorder); + + Recorder::run_tests_and_report(core::ptr::null_mut(), NonNull::from_ref(&storage)); + + // Check that the test run + let recorder = storage.get_service::().expect("Recorder service should be registered."); + let output = format!("{}", *recorder); + assert!(output.contains("test ... ok (1 passes)")); + } + + #[test] + fn test_record_json_string() { + let test_case = &TEST_CASE1; + let mut record = TestRecord::new(false, test_case, None); + record.pass = 2; + record.fail = 1; + record.err_msg = Some("Failure message"); + + let json = record.json(); + assert_eq!(json, r#"{"name":"test","pass":2,"fail":1,"err_msg":"Failure message"}"#); + + record.err_msg = None; + let json = record.json(); + assert_eq!(json, r#"{"name":"test","pass":2,"fail":1,"err_msg":null}"#); + } + + #[test] + fn test_recorder_json_string() { + let recorder = Recorder::default(); + + let mut tr1 = TestRecord::new(false, &TEST_CASE2, None); + tr1.pass = 2; + tr1.fail = 1; + tr1.err_msg = Some("Failure 1"); + recorder.update_record(tr1); + + let mut tr2 = TestRecord::new(false, &TEST_CASE3, None); + tr2.pass = 0; + tr2.fail = 2; + tr2.err_msg = Some("Failure 2"); + recorder.update_record(tr2); + + // Cannot guarantee order of records, so we will just check that the JSON string contains both records in the correct format. + let json = recorder.json(); + assert!(json.contains(r#"{"name":"test","pass":2,"fail":1,"err_msg":"Failure 1"}"#)); + assert!(json.contains(r#"{"name":"test_that_fails","pass":0,"fail":2,"err_msg":"Failure 2"}"#)); + assert!(json.starts_with('[') && json.ends_with(']')); + assert!(json.contains(",")); + } +} diff --git a/core/patina_debugger/src/arch/aarch64.rs b/core/patina_debugger/src/arch/aarch64.rs index 8aea7cbcb..dfc0bbef0 100644 --- a/core/patina_debugger/src/arch/aarch64.rs +++ b/core/patina_debugger/src/arch/aarch64.rs @@ -216,19 +216,11 @@ impl DebuggerArch for Aarch64Arch { } fn get_page_table() -> Result { - // TODO: Check for EL1? - let ttbr0_el2 = read_sysreg!(ttbr0_el2); - - // SAFETY: We are creating from the existing page table root, so the - // page tables should be a valid structure. Using PageAllocatorStub - // ensures that no allocations are attempted. + // SAFETY: We are operating in an exception context with interrupts disabled. No other entity is altering + // the page tables. unsafe { - patina_paging::aarch64::AArch64PageTable::from_existing( - ttbr0_el2, - patina_paging::page_allocator::PageAllocatorStub, - patina_paging::PagingType::Paging4Level, - ) - .map_err(|_| ()) + patina_paging::aarch64::AArch64PageTable::open_active(patina_paging::page_allocator::PageAllocatorStub) + .map_err(|_| ()) } } diff --git a/core/patina_debugger/src/arch/x64.rs b/core/patina_debugger/src/arch/x64.rs index a0660b6e0..6226284d5 100644 --- a/core/patina_debugger/src/arch/x64.rs +++ b/core/patina_debugger/src/arch/x64.rs @@ -10,7 +10,6 @@ use gdbstub::{ }; use patina_internal_cpu::interrupts::ExceptionContext; use patina_mtrr::Mtrr; -use patina_paging::PagingType; use super::{DebuggerArch, UefiArchRegs}; use crate::{ExceptionInfo, ExceptionType}; @@ -148,25 +147,11 @@ impl DebuggerArch for X64Arch { } fn get_page_table() -> Result { - let cr3: u64; - // SAFETY: This is simply reading the CR3 register, which is safe. - unsafe { asm!("mov {}, cr3", out(reg) cr3) }; - let cr4: u64; - // SAFETY: This is simply reading the CR4 register, which is safe. - unsafe { asm!("mov {}, cr4", out(reg) cr4) }; - - // Check CR4 to determine if we are using 4-level or 5-level paging. - let paging_type = { if cr4 & (1 << 12) != 0 { PagingType::Paging5Level } else { PagingType::Paging4Level } }; - - // SAFETY: The CR3 is currently being should be identity mapped and so - // should point to a valid page table. + // SAFETY: We are operating in an exception context with interrupts disabled. No other entity is altering + // the page tables. unsafe { - patina_paging::x64::X64PageTable::from_existing( - cr3, - patina_paging::page_allocator::PageAllocatorStub, - paging_type, - ) - .map_err(|_| ()) + patina_paging::x64::X64PageTable::open_active(patina_paging::page_allocator::PageAllocatorStub) + .map_err(|_| ()) } } diff --git a/core/patina_debugger/src/debugger.rs b/core/patina_debugger/src/debugger.rs index 90a48d919..3d6af3a56 100644 --- a/core/patina_debugger/src/debugger.rs +++ b/core/patina_debugger/src/debugger.rs @@ -22,7 +22,7 @@ use gdbstub::{ conn::{Connection, ConnectionExt}, stub::{GdbStubBuilder, SingleThreadStopReason, state_machine::GdbStubStateMachine}, }; -use patina::{component::service::perf_timer::ArchTimerFunctionality, serial::SerialIO}; +use patina::{serial::SerialIO, timer::ArchTimerFunctionality}; use patina_internal_cpu::interrupts::{ExceptionType, HandlerType, InterruptHandler, InterruptManager}; use spin::Mutex; diff --git a/core/patina_debugger/src/lib.rs b/core/patina_debugger/src/lib.rs index 676308646..f366f80f0 100644 --- a/core/patina_debugger/src/lib.rs +++ b/core/patina_debugger/src/lib.rs @@ -27,7 +27,7 @@ //! extern crate patina; //! # extern crate patina_internal_cpu; //! # use patina_internal_cpu::interrupts::{Interrupts, InterruptManager}; -//! # use patina::component::service::perf_timer::ArchTimerFunctionality; +//! # use patina::timer::ArchTimerFunctionality; //! //! static DEBUGGER: patina_debugger::PatinaDebugger = //! patina_debugger::PatinaDebugger::new(patina::serial::uart::UartNull{}) @@ -121,7 +121,7 @@ pub use debugger::PatinaDebugger; #[cfg(not(test))] use arch::{DebuggerArch, SystemArch}; -use patina::{component::service::perf_timer::ArchTimerFunctionality, serial::SerialIO}; +use patina::{serial::SerialIO, timer::ArchTimerFunctionality}; use patina_internal_cpu::interrupts::{ExceptionContext, InterruptManager}; /// Global instance of the debugger. diff --git a/core/patina_internal_cpu/Cargo.toml b/core/patina_internal_cpu/Cargo.toml index 4a435653a..aa3efcff6 100644 --- a/core/patina_internal_cpu/Cargo.toml +++ b/core/patina_internal_cpu/Cargo.toml @@ -46,3 +46,6 @@ serial_test = { workspace = true } default = [] std = [] doc = [] +alloc = [] +save_state_intel = [] +save_state_amd = [] diff --git a/core/patina_internal_cpu/README.md b/core/patina_internal_cpu/README.md index 3ad8d8bf8..31237856c 100644 --- a/core/patina_internal_cpu/README.md +++ b/core/patina_internal_cpu/README.md @@ -4,6 +4,9 @@ The `patina_internal_cpu` crate hosts core CPU functionality that Patina core co cache control, interrupt dispatch, and paging management. It is compiled as part of the monolithic Patina image and runs in `no_std` UEFI environments. +As a foundational component, this crate is intentionally designed to avoid dependencies on `alloc` and the associated +global allocator. + As an "internal" Patina crate, it is not intended for direct use by code outside of Patina core environments. ## Overview @@ -27,9 +30,6 @@ Protocol. - `EfiCpuAarch64` performs cache maintenance operations for Arm. - `EfiCpuNull` is available for documentation and host-based unit tests that do not require actual CPU services. -All implementations derive `IntoService`, allowing the Patina storage layer to register them as `Service` -instances during core bring-up. - ### `interrupts` `interrupts` defines the `InterruptManager` trait, handler registration (`HandlerType`). The module selects a diff --git a/core/patina_internal_cpu/src/cpu/aarch64/cpu.rs b/core/patina_internal_cpu/src/cpu/aarch64/cpu.rs index e061915c1..49cd0f6f8 100644 --- a/core/patina_internal_cpu/src/cpu/aarch64/cpu.rs +++ b/core/patina_internal_cpu/src/cpu/aarch64/cpu.rs @@ -10,7 +10,6 @@ use crate::cpu::Cpu; #[cfg(all(not(test), target_arch = "aarch64"))] use core::arch::asm; use patina::{ - component::service::IntoService, error::EfiError, pi::protocols::cpu_arch::{CpuFlushType, CpuInitType}, }; @@ -19,10 +18,10 @@ use r_efi::efi; /// Struct to implement AArch64 Cpu Init. /// /// This struct cannot be used directly. It replaces the `EfiCpu` struct when compiling for the AArch64 architecture. -#[derive(Default, IntoService)] -#[service(dyn Cpu)] +#[derive(Default)] pub struct EfiCpuAarch64; +#[allow(dead_code)] impl EfiCpuAarch64 { /// This function initializes the CPU for the AArch64 architecture. pub fn initialize(&mut self) -> Result<(), EfiError> { diff --git a/core/patina_internal_cpu/src/cpu/stub.rs b/core/patina_internal_cpu/src/cpu/stub.rs index b4eda2ad0..1ce6e0577 100644 --- a/core/patina_internal_cpu/src/cpu/stub.rs +++ b/core/patina_internal_cpu/src/cpu/stub.rs @@ -8,7 +8,6 @@ //! use crate::cpu::Cpu; use patina::{ - component::service::IntoService, error::EfiError, pi::protocols::cpu_arch::{CpuFlushType, CpuInitType}, }; @@ -17,8 +16,7 @@ use r_efi::efi; /// Struct to implement Null Cpu Init. /// /// This struct cannot be used directly. It replaces the `EfiCpu` struct when not compiling for x86_64 or AArch64 UEFI architectures. -#[derive(Default, Copy, Clone, IntoService)] -#[service(dyn Cpu)] +#[derive(Default, Copy, Clone)] pub struct EfiCpuStub; impl EfiCpuStub { diff --git a/core/patina_internal_cpu/src/cpu/x64/cpu.rs b/core/patina_internal_cpu/src/cpu/x64/cpu.rs index 445383198..dd810d38b 100644 --- a/core/patina_internal_cpu/src/cpu/x64/cpu.rs +++ b/core/patina_internal_cpu/src/cpu/x64/cpu.rs @@ -12,7 +12,6 @@ use crate::{cpu::Cpu, interrupts}; #[cfg(not(test))] use core::arch::asm; use patina::{ - component::service::IntoService, error::EfiError, pi::protocols::cpu_arch::{CpuFlushType, CpuInitType}, }; @@ -21,12 +20,11 @@ use r_efi::efi; /// Struct to implement X64 Cpu Init. /// /// This struct cannot be used directly. It replaces the `EfiCpu` struct when compiling for the x86_64 architecture. -#[derive(IntoService)] -#[service(dyn Cpu)] pub struct EfiCpuX64 { timer_period: u64, } +#[allow(dead_code)] impl EfiCpuX64 { /// Creates a new instance of the x86_64 implementation of the CPU trait. pub fn new() -> Self { diff --git a/core/patina_internal_cpu/src/interrupts.rs b/core/patina_internal_cpu/src/interrupts.rs index 51b2f3e34..34c69db9b 100644 --- a/core/patina_internal_cpu/src/interrupts.rs +++ b/core/patina_internal_cpu/src/interrupts.rs @@ -222,8 +222,6 @@ pub trait InterruptHandler: Sync { #[coverage(off)] #[cfg(test)] mod tests { - extern crate std; - use super::*; #[test] diff --git a/core/patina_internal_cpu/src/interrupts/aarch64/interrupt_manager.rs b/core/patina_internal_cpu/src/interrupts/aarch64/interrupt_manager.rs index 19dd7cdfa..31cb1e86f 100644 --- a/core/patina_internal_cpu/src/interrupts/aarch64/interrupt_manager.rs +++ b/core/patina_internal_cpu/src/interrupts/aarch64/interrupt_manager.rs @@ -10,10 +10,9 @@ use patina::{ base::{UEFI_PAGE_MASK, UEFI_PAGE_SIZE}, bit, - component::service::IntoService, error::EfiError, }; -use patina_paging::{PageTable, PagingType}; +use patina_paging::PageTable; use crate::interrupts::{ EfiExceptionStackTrace, EfiSystemContext, HandlerType, InterruptManager, aarch64::ExceptionContextAArch64, @@ -36,10 +35,10 @@ cfg_if::cfg_if! { } } /// AARCH64 Implementation of the InterruptManager. -#[derive(Default, Copy, Clone, IntoService)] -#[service(dyn InterruptManager)] +#[derive(Default, Copy, Clone)] pub struct InterruptsAarch64 {} +#[allow(dead_code)] impl InterruptsAarch64 { /// Creates a new instance of the AARCH64 implementation of the InterruptManager. pub const fn new() -> Self { @@ -201,25 +200,10 @@ extern "efiapi" fn synchronous_exception_handler(_exception_type: isize, context } fn dump_pte(far: u64) { - // Needed because attributes on expressions are not stable. - // https://github.com/rust-lang/rust/issues/15701 - #[allow(clippy::needless_late_init)] - let ttbr0_el2; - cfg_if::cfg_if! { - if #[cfg(all(not(test), target_arch = "aarch64"))] { - ttbr0_el2 = read_sysreg!(ttbr0_el2); - } else { - ttbr0_el2 = 0u64; - } - } - - // SAFETY: TTBR0 must be valid as it is the current page table base. + // SAFETY: We are in an exception handler and want to dump the page tables, there is no other active code + // modifying the page tables. if let Ok(pt) = unsafe { - patina_paging::aarch64::AArch64PageTable::from_existing( - ttbr0_el2, - patina_paging::page_allocator::PageAllocatorStub, - PagingType::Paging4Level, - ) + patina_paging::aarch64::AArch64PageTable::open_active(patina_paging::page_allocator::PageAllocatorStub) } { let _ = pt.dump_page_tables(far & !(UEFI_PAGE_MASK as u64), UEFI_PAGE_SIZE as u64); log::error!(""); diff --git a/core/patina_internal_cpu/src/interrupts/stub.rs b/core/patina_internal_cpu/src/interrupts/stub.rs index 96abbaf92..523eb2d48 100644 --- a/core/patina_internal_cpu/src/interrupts/stub.rs +++ b/core/patina_internal_cpu/src/interrupts/stub.rs @@ -7,7 +7,7 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use patina::{component::service::IntoService, error::EfiError, pi::protocols::cpu_arch::EfiSystemContext}; +use patina::{error::EfiError, pi::protocols::cpu_arch::EfiSystemContext}; use crate::interrupts::InterruptManager; @@ -43,8 +43,7 @@ pub fn get_interrupt_state() -> Result { } /// Null Implementation of the InterruptManager. -#[derive(Default, Copy, Clone, IntoService)] -#[service(dyn InterruptManager)] +#[derive(Default, Copy, Clone)] pub struct InterruptsStub {} impl InterruptsStub { diff --git a/core/patina_internal_cpu/src/interrupts/x64/interrupt_manager.rs b/core/patina_internal_cpu/src/interrupts/x64/interrupt_manager.rs index 0c09b73bf..d5fa56374 100644 --- a/core/patina_internal_cpu/src/interrupts/x64/interrupt_manager.rs +++ b/core/patina_internal_cpu/src/interrupts/x64/interrupt_manager.rs @@ -10,13 +10,12 @@ use patina::{ base::{UEFI_PAGE_MASK, UEFI_PAGE_SIZE}, bit, - component::service::IntoService, error::EfiError, pi::protocols::cpu_arch::EfiSystemContext, }; #[cfg(target_arch = "x86_64")] use patina_mtrr::Mtrr; -use patina_paging::{PageTable, PagingType}; +use patina_paging::PageTable; use patina_stacktrace::{StackFrame, StackTrace}; use crate::interrupts::{EfiExceptionStackTrace, HandlerType, InterruptManager, x64::ExceptionContextX64}; @@ -25,10 +24,10 @@ use crate::interrupts::{EfiExceptionStackTrace, HandlerType, InterruptManager, x /// /// An x64 version of the InterruptManager for managing IDT based interrupts. /// -#[derive(Default, Copy, Clone, IntoService)] -#[service(dyn InterruptManager)] +#[derive(Default, Copy, Clone)] pub struct InterruptsX64 {} +#[allow(dead_code)] impl InterruptsX64 { /// Creates a new instance of the x64 implementation of the InterruptManager. pub const fn new() -> Self { @@ -116,11 +115,7 @@ extern "efiapi" fn page_fault_handler(_exception_type: isize, context: EfiSystem (x64_context as &ExceptionContextX64).dump_system_context_registers(); - let paging_type = - { if x64_context.cr4 & (1 << 12) != 0 { PagingType::Paging5Level } else { PagingType::Paging4Level } }; - - // SAFETY: CR3 and the paging type are correct as they are from the current context. - unsafe { dump_pte(x64_context.cr2, x64_context.cr3, paging_type) }; + dump_pte(x64_context.cr2); log::error!("Dumping Exception Stack Trace:"); let stack_frame = StackFrame { pc: x64_context.rip, sp: x64_context.rsp, fp: x64_context.rbp }; @@ -178,21 +173,17 @@ fn interpret_gp_fault_exception_data(exception_data: u64) { // There is no value in coverage for this function. #[coverage(off)] -/// Dumps the page table entries for the given CR2 and CR3 values. -/// -/// ## Safety +/// Dumps the page table entries for the given CR2. This uses the active page tables as they should be the same as the +/// ones at the time of the fault. /// -/// The caller is responsible for ensuring that the CR3 value is a valid and well-formed page table base address and -/// matches the paging type requested. -unsafe fn dump_pte(cr2: u64, cr3: u64, paging_type: PagingType) { - // SAFETY: Caller must ensure cr3 & paging type are correct. - if let Ok(pt) = unsafe { - patina_paging::x64::X64PageTable::from_existing( - cr3, - patina_paging::page_allocator::PageAllocatorStub, - paging_type, - ) - } { +fn dump_pte(cr2: u64) { + if let Ok(pt) = + // SAFETY: We are in an exception handler and want to dump the page tables, there is no other active code + // modifying the page tables. + unsafe { + patina_paging::x64::X64PageTable::open_active(patina_paging::page_allocator::PageAllocatorStub) + } + { let _ = pt.dump_page_tables(cr2 & !(UEFI_PAGE_MASK as u64), UEFI_PAGE_SIZE as u64); } diff --git a/core/patina_internal_cpu/src/lib.rs b/core/patina_internal_cpu/src/lib.rs index 805bb7f17..d4a6214a0 100644 --- a/core/patina_internal_cpu/src/lib.rs +++ b/core/patina_internal_cpu/src/lib.rs @@ -11,8 +11,8 @@ #![cfg_attr(all(not(feature = "std"), not(test)), no_std)] #![feature(abi_x86_interrupt)] #![feature(coverage_attribute)] -extern crate alloc; pub mod cpu; pub mod interrupts; pub mod paging; +pub mod save_state; diff --git a/core/patina_internal_cpu/src/paging/aarch64.rs b/core/patina_internal_cpu/src/paging/aarch64.rs index 0788993ff..4cf136cab 100644 --- a/core/patina_internal_cpu/src/paging/aarch64.rs +++ b/core/patina_internal_cpu/src/paging/aarch64.rs @@ -8,7 +8,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -use alloc::boxed::Box; use patina_paging::{MemoryAttributes, PageTable, PagingType, PtError, aarch64::AArch64PageTable}; use crate::paging::{CacheAttributeValue, PatinaPageTable}; @@ -56,10 +55,8 @@ where /// Create an AArch64 paging instance under the general PatinaPageTable trait. pub fn create_cpu_aarch64_paging( page_allocator: A, -) -> Result, efi::Status> { - Ok(Box::new(EfiCpuPagingAArch64 { - paging: AArch64PageTable::new(page_allocator, PagingType::Paging4Level).unwrap(), - })) +) -> Result { + Ok(EfiCpuPagingAArch64 { paging: AArch64PageTable::new(page_allocator, PagingType::Paging4Level).unwrap() }) } #[cfg(test)] diff --git a/core/patina_internal_cpu/src/paging/null.rs b/core/patina_internal_cpu/src/paging/null.rs index 6d12b0381..ea2f17baa 100644 --- a/core/patina_internal_cpu/src/paging/null.rs +++ b/core/patina_internal_cpu/src/paging/null.rs @@ -56,6 +56,6 @@ where /// Used to specify that this architecture paging implementation is not supported. pub fn create_cpu_null_paging( _page_allocator: A, -) -> Result, efi::Status> { +) -> Result { Err(efi::Status::UNSUPPORTED) } diff --git a/core/patina_internal_cpu/src/paging/x64.rs b/core/patina_internal_cpu/src/paging/x64.rs index db9aadc05..227d1d2e9 100644 --- a/core/patina_internal_cpu/src/paging/x64.rs +++ b/core/patina_internal_cpu/src/paging/x64.rs @@ -9,7 +9,6 @@ //! SPDX-License-Identifier: Apache-2.0 //! use crate::paging::{CacheAttributeValue, PatinaPageTable}; -use alloc::boxed::Box; use patina::error::EfiError; use patina_mtrr::{Mtrr, create_mtrr_lib, error::MtrrError, structs::MtrrMemoryCacheType}; use patina_paging::{ @@ -136,12 +135,12 @@ fn apply_caching_attributes( /// Create an x86_64 paging instance under the general PatinaPageTable trait. pub fn create_cpu_x64_paging( page_allocator: A, -) -> Result, efi::Status> { - Ok(Box::new(EfiCpuPagingX64 { +) -> Result { + Ok(EfiCpuPagingX64 { paging: X64PageTable::new(page_allocator, PagingType::Paging4Level) .map_err(|_| efi::Status::INVALID_PARAMETER)?, mtrr: create_mtrr_lib(0), - })) + }) } fn mtrr_err_to_efi_status(err: MtrrError) -> EfiError { @@ -199,6 +198,7 @@ mod tests { attribute: MtrrMemoryCacheType, ) -> MtrrResult<()>; fn set_memory_attributes(&mut self, ranges: &[MtrrMemoryRange]) -> MtrrResult<()>; + #[allow(refining_impl_trait_internal)] fn get_memory_ranges(&self) -> MtrrResult>; fn debug_print_all_mtrrs(&self); diff --git a/core/patina_internal_cpu/src/save_state/amd.rs b/core/patina_internal_cpu/src/save_state/amd.rs new file mode 100644 index 000000000..6fe0812c3 --- /dev/null +++ b/core/patina_internal_cpu/src/save_state/amd.rs @@ -0,0 +1,558 @@ +//! AMD64 SMRAM Save State Map +//! +//! Register-to-offset lookup table for the AMD 64-bit SMRAM save state +//! layout (`AMD_SMRAM_SAVE_STATE_MAP64`). The save state area starts at +//! `SMBASE + 0xFC00` (the address stored in `CpuSaveState[CpuIndex]`). +//! All offsets are relative to that base. +//! +//! Reference: AMD64 Architecture Programmer's Manual Vol 2, Table 10-2; +//! MdePkg `AmdSmramSaveStateMap.h`. +//! +//! ## Key Differences from Intel +//! +//! - Segment selectors are 2 bytes (UINT16) vs Intel's 4-byte fields. +//! - GDT, IDT, and LDT limits **are** supported (Intel returns `None`). +//! - CR4 is 8 bytes (Intel stores only 4 bytes). +//! - All 8-byte registers are stored contiguously (Intel splits DT bases). +//! - IO information uses `IO_DWord` at offset 0x2C0 with a different bit +//! layout from Intel's `IOMisc`. +//! - AMD64 always operates in 64-bit mode during SMM, so the LMA +//! pseudo-register always returns 64-bit without checking EFER.LMA. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 + +use super::{ + MmSaveStateRegister, ParsedIoInfo, RegisterInfo, VendorConstants, IO_TYPE_INPUT, + IO_TYPE_OUTPUT, IO_WIDTH_UINT8, IO_WIDTH_UINT16, IO_WIDTH_UINT32, +}; + +// ============================================================================ +// Vendor Constants +// ============================================================================ + +/// AMD-specific offsets and behaviour constants. +pub static VENDOR_CONSTANTS: VendorConstants = VendorConstants { + smmrevid_offset: 0x02FC, + io_info_offset: 0x02C0, + efer_offset: 0x02D0, + rax_offset: 0x03F8, + min_rev_id_io: 0x30064, + lma_always_64: true, +}; + +// ============================================================================ +// IO_DWord Bit Definitions (AMD-specific) +// ============================================================================ + +/// IO_DWord bit 0: direction (0 = WRITE/OUT, 1 = READ/IN). +const IO_DIRECTION_IN: u32 = 1; +//const IO_DIRECTION_OUT: u32 = 0; + +/// IO_DWord bits \[5:4\]: data size encoding. +const IO_SIZE_BYTE: u32 = 0; +const IO_SIZE_WORD: u32 = 1; +//const IO_SIZE_RESERVED: u32 = 2; +const IO_SIZE_DWORD: u32 = 3; + +// ============================================================================ +// Register Map +// ============================================================================ +// +// AMD save state struct layout (from SMBASE + 0xFC00): +// +// +0x000 – 0x1FF: Reserved (padding). +// +0x200 – 0x25F: Segment descriptors (ES, CS, SS, DS, FS, GS) — 16 bytes each. +// +0x260 – 0x29F: System descriptors (GDTR, IDTR, LDTR, TR) — 16 bytes each. +// +0x2A0 – 0x2BF: MSRs (KernelGsBase, STAR, LSTAR, CSTAR). +// +0x2C0: IO_DWord (4 bytes). +// +0x2D0: EFER (8 bytes). +// +0x2FC: SMMRevId (4 bytes). +// +0x338 – 0x3FF: Registers (DR7, DR6, CR4, CR3, CR0, RFLAGS, RIP, R15..R8, +// RBP, RSP, RBX, RDI, RSI, RDX, RCX, RAX). +// +// Each segment descriptor is 16 bytes: +// +0: Selector (UINT16) +// +2: Attributes (UINT16) +// +4: Limit (UINT32) +// +8: BaseLoDword (UINT32) +// +C: BaseHiDword (UINT32) + +/// Looks up the AMD64 save state register info for a PI register. +/// +/// Returns `None` for pseudo-registers (IO, LMA, ProcessorId) and for +/// `LdtInfo` (not supported in AMD's save state map). +pub fn register_info(reg: MmSaveStateRegister) -> Option { + match reg { + // ================================================================ + // Descriptor Table Bases (8-byte, contiguous on AMD) + // ================================================================ + MmSaveStateRegister::GdtBase => Some(RegisterInfo { + lo_offset: 0x0268, + hi_offset: 0x026C, + native_width: 8, + }), + MmSaveStateRegister::IdtBase => Some(RegisterInfo { + lo_offset: 0x0278, + hi_offset: 0x027C, + native_width: 8, + }), + // NOTE: The C reference code has a copy-paste bug where both lo and + // hi point to `_LDTRBaseLoDword` (0x288). The correct hi offset is + // `_LDTRBaseHiDword` (0x28C). + MmSaveStateRegister::LdtBase => Some(RegisterInfo { + lo_offset: 0x0288, + hi_offset: 0x028C, + native_width: 8, + }), + + // ================================================================ + // Descriptor Table Limits (supported on AMD, not on Intel) + // ================================================================ + // + // GDT and IDT limits are architecturally 16-bit, stored in UINT32 + // fields. Only the lower 2 bytes are meaningful. + MmSaveStateRegister::GdtLimit => Some(RegisterInfo { + lo_offset: 0x0264, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::IdtLimit => Some(RegisterInfo { + lo_offset: 0x0274, + hi_offset: 0, + native_width: 2, + }), + // LDT limit is a system-segment limit (up to 32 bits in long mode). + MmSaveStateRegister::LdtLimit => Some(RegisterInfo { + lo_offset: 0x0284, + hi_offset: 0, + native_width: 4, + }), + + // LdtInfo is not supported. + MmSaveStateRegister::LdtInfo => None, + + // ================================================================ + // Segment Selectors (2-byte on AMD — UINT16 selectors only) + // ================================================================ + MmSaveStateRegister::Es => Some(RegisterInfo { + lo_offset: 0x0200, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::Cs => Some(RegisterInfo { + lo_offset: 0x0210, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::Ss => Some(RegisterInfo { + lo_offset: 0x0220, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::Ds => Some(RegisterInfo { + lo_offset: 0x0230, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::Fs => Some(RegisterInfo { + lo_offset: 0x0240, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::Gs => Some(RegisterInfo { + lo_offset: 0x0250, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::LdtrSel => Some(RegisterInfo { + lo_offset: 0x0280, + hi_offset: 0, + native_width: 2, + }), + MmSaveStateRegister::TrSel => Some(RegisterInfo { + lo_offset: 0x0290, + hi_offset: 0, + native_width: 2, + }), + + // ================================================================ + // Debug Registers (8-byte, contiguous) + // ================================================================ + MmSaveStateRegister::Dr7 => Some(RegisterInfo { + lo_offset: 0x0338, + hi_offset: 0x033C, + native_width: 8, + }), + MmSaveStateRegister::Dr6 => Some(RegisterInfo { + lo_offset: 0x0340, + hi_offset: 0x0344, + native_width: 8, + }), + + // ================================================================ + // Extended Registers R8–R15 (8-byte, contiguous) + // + // Stored in reverse order: R8 is at the highest address (0x3B8) + // down to R15 at 0x380. + // ================================================================ + MmSaveStateRegister::R8 => Some(RegisterInfo { + lo_offset: 0x03B8, + hi_offset: 0x03BC, + native_width: 8, + }), + MmSaveStateRegister::R9 => Some(RegisterInfo { + lo_offset: 0x03B0, + hi_offset: 0x03B4, + native_width: 8, + }), + MmSaveStateRegister::R10 => Some(RegisterInfo { + lo_offset: 0x03A8, + hi_offset: 0x03AC, + native_width: 8, + }), + MmSaveStateRegister::R11 => Some(RegisterInfo { + lo_offset: 0x03A0, + hi_offset: 0x03A4, + native_width: 8, + }), + MmSaveStateRegister::R12 => Some(RegisterInfo { + lo_offset: 0x0398, + hi_offset: 0x039C, + native_width: 8, + }), + MmSaveStateRegister::R13 => Some(RegisterInfo { + lo_offset: 0x0390, + hi_offset: 0x0394, + native_width: 8, + }), + MmSaveStateRegister::R14 => Some(RegisterInfo { + lo_offset: 0x0388, + hi_offset: 0x038C, + native_width: 8, + }), + MmSaveStateRegister::R15 => Some(RegisterInfo { + lo_offset: 0x0380, + hi_offset: 0x0384, + native_width: 8, + }), + + // ================================================================ + // General-Purpose Registers (8-byte, contiguous) + // + // AMD order (highest to lowest): + // RAX, RCX, RDX, RSI, RDI, RBX, RSP, RBP + // ================================================================ + MmSaveStateRegister::Rax => Some(RegisterInfo { + lo_offset: 0x03F8, + hi_offset: 0x03FC, + native_width: 8, + }), + MmSaveStateRegister::Rbx => Some(RegisterInfo { + lo_offset: 0x03D0, + hi_offset: 0x03D4, + native_width: 8, + }), + MmSaveStateRegister::Rcx => Some(RegisterInfo { + lo_offset: 0x03F0, + hi_offset: 0x03F4, + native_width: 8, + }), + MmSaveStateRegister::Rdx => Some(RegisterInfo { + lo_offset: 0x03E8, + hi_offset: 0x03EC, + native_width: 8, + }), + MmSaveStateRegister::Rsp => Some(RegisterInfo { + lo_offset: 0x03C8, + hi_offset: 0x03CC, + native_width: 8, + }), + MmSaveStateRegister::Rbp => Some(RegisterInfo { + lo_offset: 0x03C0, + hi_offset: 0x03C4, + native_width: 8, + }), + MmSaveStateRegister::Rsi => Some(RegisterInfo { + lo_offset: 0x03E0, + hi_offset: 0x03E4, + native_width: 8, + }), + MmSaveStateRegister::Rdi => Some(RegisterInfo { + lo_offset: 0x03D8, + hi_offset: 0x03DC, + native_width: 8, + }), + MmSaveStateRegister::Rip => Some(RegisterInfo { + lo_offset: 0x0378, + hi_offset: 0x037C, + native_width: 8, + }), + + // ================================================================ + // Flags and Control Registers + // ================================================================ + MmSaveStateRegister::Rflags => Some(RegisterInfo { + lo_offset: 0x0370, + hi_offset: 0x0374, + native_width: 8, + }), + MmSaveStateRegister::Cr0 => Some(RegisterInfo { + lo_offset: 0x0358, + hi_offset: 0x035C, + native_width: 8, + }), + MmSaveStateRegister::Cr3 => Some(RegisterInfo { + lo_offset: 0x0350, + hi_offset: 0x0354, + native_width: 8, + }), + // CR4 is 8 bytes on AMD (vs 4 bytes on Intel). + MmSaveStateRegister::Cr4 => Some(RegisterInfo { + lo_offset: 0x0348, + hi_offset: 0x034C, + native_width: 8, + }), + + // Pseudo-registers are not in the architectural register map. + MmSaveStateRegister::Io | MmSaveStateRegister::Lma | MmSaveStateRegister::ProcessorId => { + None + } + } +} + +// ============================================================================ +// I/O Field Parsing (AMD IO_DWord) +// ============================================================================ + +/// Parses AMD's `IO_DWord` field from the SMRAM save state. +/// +/// AMD `IO_DWord` bit layout: +/// - Bit 0: Direction — 0 = WRITE (OUT), 1 = READ (IN). +/// - Bits \[3:1\]: Reserved. +/// - Bits \[5:4\]: Data size — 0 = byte, 1 = word, 3 = dword. +/// - Bits \[15:6\]: Reserved. +/// - Bits \[31:16\]: I/O port address. +/// +/// Returns `None` if the data-size encoding is invalid (value 2 is reserved). +pub fn parse_io_field(io_field: u32) -> Option { + let direction = io_field & 1; + let size_enc = (io_field >> 4) & 0x3; + let port = (io_field >> 16) & 0xFFFF; + + let io_type = if direction == IO_DIRECTION_IN { + IO_TYPE_INPUT + } else { + IO_TYPE_OUTPUT + }; + + let (io_width, byte_count) = match size_enc { + IO_SIZE_BYTE => (IO_WIDTH_UINT8, 1usize), + IO_SIZE_WORD => (IO_WIDTH_UINT16, 2usize), + IO_SIZE_DWORD => (IO_WIDTH_UINT32, 4usize), + _ => return None, // Reserved encoding. + }; + + Some(ParsedIoInfo { + io_type, + io_width, + byte_count, + io_port: port, + }) +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + // ---------------------------------------------------------------- + // Register map tests + // ---------------------------------------------------------------- + + #[test] + fn test_gpr_offsets() { + let rax = register_info(MmSaveStateRegister::Rax).unwrap(); + assert_eq!(rax.lo_offset, 0x03F8); + assert_eq!(rax.hi_offset, 0x03FC); + assert_eq!(rax.native_width, 8); + + let rcx = register_info(MmSaveStateRegister::Rcx).unwrap(); + assert_eq!(rcx.lo_offset, 0x03F0); + assert_eq!(rcx.hi_offset, 0x03F4); + } + + #[test] + fn test_segment_selectors_are_2_bytes() { + let cs = register_info(MmSaveStateRegister::Cs).unwrap(); + assert_eq!(cs.lo_offset, 0x0210); + assert_eq!(cs.native_width, 2); + assert_eq!(cs.hi_offset, 0); + } + + #[test] + fn test_descriptor_table_bases_contiguous() { + let gdt = register_info(MmSaveStateRegister::GdtBase).unwrap(); + assert_eq!(gdt.lo_offset, 0x0268); + assert_eq!(gdt.hi_offset, 0x026C); + assert_eq!(gdt.native_width, 8); + // AMD uses contiguous lo/hi (unlike Intel's split layout). + assert_eq!(gdt.hi_offset, gdt.lo_offset + 4); + } + + #[test] + fn test_limits_supported_on_amd() { + let gdt_limit = register_info(MmSaveStateRegister::GdtLimit).unwrap(); + assert_eq!(gdt_limit.native_width, 2); + assert_eq!(gdt_limit.lo_offset, 0x0264); + + let idt_limit = register_info(MmSaveStateRegister::IdtLimit).unwrap(); + assert_eq!(idt_limit.native_width, 2); + assert_eq!(idt_limit.lo_offset, 0x0274); + + let ldt_limit = register_info(MmSaveStateRegister::LdtLimit).unwrap(); + assert_eq!(ldt_limit.native_width, 4); + assert_eq!(ldt_limit.lo_offset, 0x0284); + } + + #[test] + fn test_cr4_is_8_bytes_on_amd() { + let cr4 = register_info(MmSaveStateRegister::Cr4).unwrap(); + assert_eq!(cr4.native_width, 8); + assert_eq!(cr4.lo_offset, 0x0348); + assert_eq!(cr4.hi_offset, 0x034C); + } + + #[test] + fn test_ldt_info_unsupported() { + assert!(register_info(MmSaveStateRegister::LdtInfo).is_none()); + } + + #[test] + fn test_pseudo_registers_return_none() { + assert!(register_info(MmSaveStateRegister::Io).is_none()); + assert!(register_info(MmSaveStateRegister::Lma).is_none()); + assert!(register_info(MmSaveStateRegister::ProcessorId).is_none()); + } + + #[test] + fn test_register_coverage() { + // All architectural registers except LdtInfo should be supported. + let supported_regs = [ + MmSaveStateRegister::GdtBase, + MmSaveStateRegister::IdtBase, + MmSaveStateRegister::LdtBase, + MmSaveStateRegister::GdtLimit, + MmSaveStateRegister::IdtLimit, + MmSaveStateRegister::LdtLimit, + MmSaveStateRegister::Es, + MmSaveStateRegister::Cs, + MmSaveStateRegister::Ss, + MmSaveStateRegister::Ds, + MmSaveStateRegister::Fs, + MmSaveStateRegister::Gs, + MmSaveStateRegister::LdtrSel, + MmSaveStateRegister::TrSel, + MmSaveStateRegister::Dr7, + MmSaveStateRegister::Dr6, + MmSaveStateRegister::R8, + MmSaveStateRegister::R9, + MmSaveStateRegister::R10, + MmSaveStateRegister::R11, + MmSaveStateRegister::R12, + MmSaveStateRegister::R13, + MmSaveStateRegister::R14, + MmSaveStateRegister::R15, + MmSaveStateRegister::Rax, + MmSaveStateRegister::Rbx, + MmSaveStateRegister::Rcx, + MmSaveStateRegister::Rdx, + MmSaveStateRegister::Rsp, + MmSaveStateRegister::Rbp, + MmSaveStateRegister::Rsi, + MmSaveStateRegister::Rdi, + MmSaveStateRegister::Rip, + MmSaveStateRegister::Rflags, + MmSaveStateRegister::Cr0, + MmSaveStateRegister::Cr3, + MmSaveStateRegister::Cr4, + ]; + + for reg in &supported_regs { + assert!( + register_info(*reg).is_some(), + "Missing AMD lookup for {:?}", + reg + ); + } + } + + // ---------------------------------------------------------------- + // IO_DWord parsing tests + // ---------------------------------------------------------------- + + #[test] + fn test_parse_io_field_in_byte() { + // Direction=1 (IN), Size=0 (byte), Port=0x80 + let io_field: u32 = (0x0080 << 16) | (IO_SIZE_BYTE << 4) | IO_DIRECTION_IN; + let parsed = parse_io_field(io_field).unwrap(); + assert_eq!(parsed.io_type, IO_TYPE_INPUT); + assert_eq!(parsed.io_width, IO_WIDTH_UINT8); + assert_eq!(parsed.byte_count, 1); + assert_eq!(parsed.io_port, 0x80); + } + + #[test] + fn test_parse_io_field_out_dword() { + // Direction=0 (OUT), Size=3 (dword), Port=0xCF8 + let io_field: u32 = (0x0CF8 << 16) | (IO_SIZE_DWORD << 4) | 0; + let parsed = parse_io_field(io_field).unwrap(); + assert_eq!(parsed.io_type, IO_TYPE_OUTPUT); + assert_eq!(parsed.io_width, IO_WIDTH_UINT32); + assert_eq!(parsed.byte_count, 4); + assert_eq!(parsed.io_port, 0x0CF8); + } + + #[test] + fn test_parse_io_field_in_word() { + // Direction=1 (IN), Size=1 (word), Port=0x3F8 + let io_field: u32 = (0x03F8 << 16) | (IO_SIZE_WORD << 4) | IO_DIRECTION_IN; + let parsed = parse_io_field(io_field).unwrap(); + assert_eq!(parsed.io_type, IO_TYPE_INPUT); + assert_eq!(parsed.io_width, IO_WIDTH_UINT16); + assert_eq!(parsed.byte_count, 2); + assert_eq!(parsed.io_port, 0x03F8); + } + + #[test] + fn test_parse_io_field_reserved_size() { + // Direction=0, Size=2 (reserved) → None + let io_field: u32 = (0x0080 << 16) | (2 << 4) | 0; + assert!(parse_io_field(io_field).is_none()); + } + + #[test] + fn test_idt_base_hi_is_correct() { + // Verify we use the correct hi offset (0x27C), not the buggy + // C code value (0x278 = lo offset repeated). + let idt = register_info(MmSaveStateRegister::IdtBase).unwrap(); + assert_eq!(idt.lo_offset, 0x0278); + assert_eq!(idt.hi_offset, 0x027C); + assert_ne!(idt.lo_offset, idt.hi_offset, "hi must differ from lo"); + } + + #[test] + fn test_ldt_base_hi_is_correct() { + // Same bug fix for LdtBase: hi should be 0x28C, not 0x288. + let ldt = register_info(MmSaveStateRegister::LdtBase).unwrap(); + assert_eq!(ldt.lo_offset, 0x0288); + assert_eq!(ldt.hi_offset, 0x028C); + assert_ne!(ldt.lo_offset, ldt.hi_offset, "hi must differ from lo"); + } +} diff --git a/core/patina_internal_cpu/src/save_state/intel.rs b/core/patina_internal_cpu/src/save_state/intel.rs new file mode 100644 index 000000000..dc8396cd0 --- /dev/null +++ b/core/patina_internal_cpu/src/save_state/intel.rs @@ -0,0 +1,442 @@ +//! Intel x64 SMRAM Save State Map +//! +//! Register-to-offset lookup table for the Intel 64-bit SMRAM save state +//! layout (`SMRAM_SAVE_STATE_MAP64`). The save state area starts at +//! `SMBASE + 0x7C00` (the address stored in `CpuSaveState[CpuIndex]`). +//! +//! Reference: Intel SDM Vol 3C, Table 31-3; MdePkg `SmramSaveStateMap.h`. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 + +use super::{ + MmSaveStateRegister, ParsedIoInfo, RegisterInfo, VendorConstants, IO_TYPE_INPUT, + IO_TYPE_OUTPUT, IO_WIDTH_UINT8, IO_WIDTH_UINT16, IO_WIDTH_UINT32, +}; + +// ============================================================================ +// Vendor Constants +// ============================================================================ + +/// Intel-specific offsets and behaviour constants. +pub static VENDOR_CONSTANTS: VendorConstants = VendorConstants { + smmrevid_offset: 0x02FC, + io_info_offset: 0x03A4, + efer_offset: 0x03E0, + rax_offset: 0x035C, + min_rev_id_io: 0x30004, + lma_always_64: false, +}; + +// ============================================================================ +// IOMisc Bit Definitions (Intel-specific) +// ============================================================================ + +/// IOMisc Type field value: OUT instruction. +const IOMISC_TYPE_OUT: u32 = 0; +/// IOMisc Type field value: IN instruction. +const IOMISC_TYPE_IN: u32 = 1; + +// ============================================================================ +// Register Map +// ============================================================================ + +/// Looks up the Intel x64 save state register info for a PI register. +/// +/// Returns `None` for pseudo-registers (IO, LMA, ProcessorId) — those are +/// handled separately — and for unsupported registers (limits, LdtInfo). +pub fn register_info(reg: MmSaveStateRegister) -> Option { + match reg { + // Descriptor table bases (split hi/lo dwords — non-contiguous on Intel) + MmSaveStateRegister::GdtBase => Some(RegisterInfo { + lo_offset: 0x028C, + hi_offset: 0x01D0, + native_width: 8, + }), + MmSaveStateRegister::IdtBase => Some(RegisterInfo { + lo_offset: 0x0294, + hi_offset: 0x01D8, + native_width: 8, + }), + MmSaveStateRegister::LdtBase => Some(RegisterInfo { + lo_offset: 0x029C, + hi_offset: 0x01D4, + native_width: 8, + }), + + // Limits / LdtInfo — not supported in Intel 64-bit save state map. + MmSaveStateRegister::GdtLimit + | MmSaveStateRegister::IdtLimit + | MmSaveStateRegister::LdtLimit + | MmSaveStateRegister::LdtInfo => None, + + // Segment selectors (4-byte fields on Intel) + MmSaveStateRegister::Es => Some(RegisterInfo { + lo_offset: 0x03A8, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::Cs => Some(RegisterInfo { + lo_offset: 0x03AC, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::Ss => Some(RegisterInfo { + lo_offset: 0x03B0, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::Ds => Some(RegisterInfo { + lo_offset: 0x03B4, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::Fs => Some(RegisterInfo { + lo_offset: 0x03B8, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::Gs => Some(RegisterInfo { + lo_offset: 0x03BC, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::LdtrSel => Some(RegisterInfo { + lo_offset: 0x03C0, + hi_offset: 0, + native_width: 4, + }), + MmSaveStateRegister::TrSel => Some(RegisterInfo { + lo_offset: 0x03C4, + hi_offset: 0, + native_width: 4, + }), + + // Debug registers (8-byte, contiguous) + MmSaveStateRegister::Dr7 => Some(RegisterInfo { + lo_offset: 0x03C8, + hi_offset: 0x03CC, + native_width: 8, + }), + MmSaveStateRegister::Dr6 => Some(RegisterInfo { + lo_offset: 0x03D0, + hi_offset: 0x03D4, + native_width: 8, + }), + + // Extended registers R8–R15 (8-byte, contiguous, descending addresses) + MmSaveStateRegister::R8 => Some(RegisterInfo { + lo_offset: 0x0354, + hi_offset: 0x0358, + native_width: 8, + }), + MmSaveStateRegister::R9 => Some(RegisterInfo { + lo_offset: 0x034C, + hi_offset: 0x0350, + native_width: 8, + }), + MmSaveStateRegister::R10 => Some(RegisterInfo { + lo_offset: 0x0344, + hi_offset: 0x0348, + native_width: 8, + }), + MmSaveStateRegister::R11 => Some(RegisterInfo { + lo_offset: 0x033C, + hi_offset: 0x0340, + native_width: 8, + }), + MmSaveStateRegister::R12 => Some(RegisterInfo { + lo_offset: 0x0334, + hi_offset: 0x0338, + native_width: 8, + }), + MmSaveStateRegister::R13 => Some(RegisterInfo { + lo_offset: 0x032C, + hi_offset: 0x0330, + native_width: 8, + }), + MmSaveStateRegister::R14 => Some(RegisterInfo { + lo_offset: 0x0324, + hi_offset: 0x0328, + native_width: 8, + }), + MmSaveStateRegister::R15 => Some(RegisterInfo { + lo_offset: 0x031C, + hi_offset: 0x0320, + native_width: 8, + }), + + // General-purpose registers (8-byte, contiguous) + MmSaveStateRegister::Rax => Some(RegisterInfo { + lo_offset: 0x035C, + hi_offset: 0x0360, + native_width: 8, + }), + MmSaveStateRegister::Rbx => Some(RegisterInfo { + lo_offset: 0x0374, + hi_offset: 0x0378, + native_width: 8, + }), + MmSaveStateRegister::Rcx => Some(RegisterInfo { + lo_offset: 0x0364, + hi_offset: 0x0368, + native_width: 8, + }), + MmSaveStateRegister::Rdx => Some(RegisterInfo { + lo_offset: 0x036C, + hi_offset: 0x0370, + native_width: 8, + }), + MmSaveStateRegister::Rsp => Some(RegisterInfo { + lo_offset: 0x037C, + hi_offset: 0x0380, + native_width: 8, + }), + MmSaveStateRegister::Rbp => Some(RegisterInfo { + lo_offset: 0x0384, + hi_offset: 0x0388, + native_width: 8, + }), + MmSaveStateRegister::Rsi => Some(RegisterInfo { + lo_offset: 0x038C, + hi_offset: 0x0390, + native_width: 8, + }), + MmSaveStateRegister::Rdi => Some(RegisterInfo { + lo_offset: 0x0394, + hi_offset: 0x0398, + native_width: 8, + }), + MmSaveStateRegister::Rip => Some(RegisterInfo { + lo_offset: 0x03D8, + hi_offset: 0x03DC, + native_width: 8, + }), + + // Flags and control registers + MmSaveStateRegister::Rflags => Some(RegisterInfo { + lo_offset: 0x03E8, + hi_offset: 0x03EC, + native_width: 8, + }), + MmSaveStateRegister::Cr0 => Some(RegisterInfo { + lo_offset: 0x03F8, + hi_offset: 0x03FC, + native_width: 8, + }), + MmSaveStateRegister::Cr3 => Some(RegisterInfo { + lo_offset: 0x03F0, + hi_offset: 0x03F4, + native_width: 8, + }), + // CR4 is only 4 bytes in the Intel x64 save state map. + MmSaveStateRegister::Cr4 => Some(RegisterInfo { + lo_offset: 0x0240, + hi_offset: 0, + native_width: 4, + }), + + // Pseudo-registers are not in the architectural register map. + MmSaveStateRegister::Io | MmSaveStateRegister::Lma | MmSaveStateRegister::ProcessorId => { + None + } + } +} + +// ============================================================================ +// I/O Field Parsing (Intel IOMisc) +// ============================================================================ + +/// Parses Intel's `IOMisc` field from the SMRAM save state. +/// +/// Intel IOMisc bit layout: +/// - Bit 0: `SmiFlag` — 1 if the SMI was caused by an I/O instruction. +/// - Bits \[3:1\]: `Length` — I/O width in bytes (1, 2, or 4). +/// - Bits \[7:4\]: `Type` — 0 = OUT, 1 = IN. +/// - Bits \[31:16\]: `Port` — I/O port address. +/// +/// Returns `None` if `SmiFlag` is 0 (SMI was not caused by I/O) or the I/O +/// type is not a simple IN or OUT (e.g. string / REP I/O). +pub fn parse_io_field(io_field: u32) -> Option { + // Check SmiFlag. + let smi_flag = io_field & 1; + if smi_flag == 0 { + return None; + } + + let length = (io_field >> 1) & 0x7; + let io_type_raw = (io_field >> 4) & 0xF; + let port = (io_field >> 16) & 0xFFFF; + + // Only simple IN/OUT are supported. + let io_type = match io_type_raw { + IOMISC_TYPE_OUT => IO_TYPE_OUTPUT, + IOMISC_TYPE_IN => IO_TYPE_INPUT, + _ => return None, + }; + + // Map length to IO width enum and byte count. + let (io_width, byte_count) = match length { + 1 => (IO_WIDTH_UINT8, 1usize), + 2 => (IO_WIDTH_UINT16, 2usize), + 4 => (IO_WIDTH_UINT32, 4usize), + _ => return None, + }; + + Some(ParsedIoInfo { + io_type, + io_width, + byte_count, + io_port: port, + }) +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gpr_offsets() { + let rax = register_info(MmSaveStateRegister::Rax).unwrap(); + assert_eq!(rax.lo_offset, 0x035C); + assert_eq!(rax.hi_offset, 0x0360); + assert_eq!(rax.native_width, 8); + + let r15 = register_info(MmSaveStateRegister::R15).unwrap(); + assert_eq!(r15.lo_offset, 0x031C); + assert_eq!(r15.hi_offset, 0x0320); + assert_eq!(r15.native_width, 8); + } + + #[test] + fn test_segment_selectors() { + let es = register_info(MmSaveStateRegister::Es).unwrap(); + assert_eq!(es.lo_offset, 0x03A8); + assert_eq!(es.hi_offset, 0); + assert_eq!(es.native_width, 4); + } + + #[test] + fn test_descriptor_table_bases_are_split() { + let gdt = register_info(MmSaveStateRegister::GdtBase).unwrap(); + assert_eq!(gdt.lo_offset, 0x028C); + assert_eq!(gdt.hi_offset, 0x01D0); + assert_eq!(gdt.native_width, 8); + // Verify they are non-contiguous on Intel. + assert_ne!(gdt.hi_offset, gdt.lo_offset + 4); + } + + #[test] + fn test_limits_unsupported_on_intel() { + assert!(register_info(MmSaveStateRegister::GdtLimit).is_none()); + assert!(register_info(MmSaveStateRegister::IdtLimit).is_none()); + assert!(register_info(MmSaveStateRegister::LdtLimit).is_none()); + assert!(register_info(MmSaveStateRegister::LdtInfo).is_none()); + } + + #[test] + fn test_cr4_is_4_bytes_on_intel() { + let cr4 = register_info(MmSaveStateRegister::Cr4).unwrap(); + assert_eq!(cr4.native_width, 4); + assert_eq!(cr4.hi_offset, 0); + } + + #[test] + fn test_pseudo_registers_return_none() { + assert!(register_info(MmSaveStateRegister::Io).is_none()); + assert!(register_info(MmSaveStateRegister::Lma).is_none()); + assert!(register_info(MmSaveStateRegister::ProcessorId).is_none()); + } + + #[test] + fn test_parse_io_field_in() { + // SmiFlag=1, Length=1 (byte), Type=1 (IN), Port=0x80 + // Bits: Port(31:16)=0x0080, Type(7:4)=1, Length(3:1)=1, SmiFlag(0)=1 + let io_field: u32 = (0x0080 << 16) | (1 << 4) | (1 << 1) | 1; + let parsed = parse_io_field(io_field).unwrap(); + assert_eq!(parsed.io_type, IO_TYPE_INPUT); + assert_eq!(parsed.io_width, IO_WIDTH_UINT8); + assert_eq!(parsed.byte_count, 1); + assert_eq!(parsed.io_port, 0x80); + } + + #[test] + fn test_parse_io_field_out() { + // SmiFlag=1, Length=4 (dword), Type=0 (OUT), Port=0xCF8 + let io_field: u32 = (0x0CF8 << 16) | (0 << 4) | (4 << 1) | 1; + let parsed = parse_io_field(io_field).unwrap(); + assert_eq!(parsed.io_type, IO_TYPE_OUTPUT); + assert_eq!(parsed.io_width, IO_WIDTH_UINT32); + assert_eq!(parsed.byte_count, 4); + assert_eq!(parsed.io_port, 0x0CF8); + } + + #[test] + fn test_parse_io_field_no_smi_flag() { + // SmiFlag=0 → should return None + let io_field: u32 = (0x0080 << 16) | (1 << 4) | (1 << 1) | 0; + assert!(parse_io_field(io_field).is_none()); + } + + #[test] + fn test_parse_io_field_string_io() { + // SmiFlag=1, Length=1, Type=4 (string, not IN/OUT) → None + let io_field: u32 = (0x0080 << 16) | (4 << 4) | (1 << 1) | 1; + assert!(parse_io_field(io_field).is_none()); + } + + #[test] + fn test_register_coverage() { + let architectural_regs = [ + MmSaveStateRegister::GdtBase, + MmSaveStateRegister::IdtBase, + MmSaveStateRegister::LdtBase, + MmSaveStateRegister::Es, + MmSaveStateRegister::Cs, + MmSaveStateRegister::Ss, + MmSaveStateRegister::Ds, + MmSaveStateRegister::Fs, + MmSaveStateRegister::Gs, + MmSaveStateRegister::LdtrSel, + MmSaveStateRegister::TrSel, + MmSaveStateRegister::Dr7, + MmSaveStateRegister::Dr6, + MmSaveStateRegister::R8, + MmSaveStateRegister::R9, + MmSaveStateRegister::R10, + MmSaveStateRegister::R11, + MmSaveStateRegister::R12, + MmSaveStateRegister::R13, + MmSaveStateRegister::R14, + MmSaveStateRegister::R15, + MmSaveStateRegister::Rax, + MmSaveStateRegister::Rbx, + MmSaveStateRegister::Rcx, + MmSaveStateRegister::Rdx, + MmSaveStateRegister::Rsp, + MmSaveStateRegister::Rbp, + MmSaveStateRegister::Rsi, + MmSaveStateRegister::Rdi, + MmSaveStateRegister::Rip, + MmSaveStateRegister::Rflags, + MmSaveStateRegister::Cr0, + MmSaveStateRegister::Cr3, + MmSaveStateRegister::Cr4, + ]; + + for reg in &architectural_regs { + assert!( + register_info(*reg).is_some(), + "Missing Intel lookup for {:?}", + reg + ); + } + } +} diff --git a/core/patina_internal_cpu/src/save_state/mod.rs b/core/patina_internal_cpu/src/save_state/mod.rs new file mode 100644 index 000000000..9dd591fed --- /dev/null +++ b/core/patina_internal_cpu/src/save_state/mod.rs @@ -0,0 +1,397 @@ +//! SMRAM Save State Architecture Definitions +//! +//! Provides platform-independent types and feature-gated vendor-specific +//! register maps for reading the SMRAM save state area. +//! +//! ## Vendor Selection +//! +//! The active vendor is selected at **build time** via Cargo features on the +//! `patina_internal_cpu` crate: +//! +//! - `save_state_intel` — Intel x64 SMRAM save state map. +//! - `save_state_amd` — AMD64 SMRAM save state map. +//! +//! Exactly one must be enabled. Re-exported items are available through the +//! `register_info()` function, which resolves to the active vendor's lookup at +//! compile time. +//! +//! ## Overview +//! +//! The common types (`MmSaveStateRegister`, `RegisterInfo`, etc.) are always +//! available. The vendor modules contain only the register-to-offset lookup +//! table and any vendor-specific constants (e.g. IO trap field layout). +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 + +// Vendor-specific submodules — exactly one is compiled in. +#[cfg(feature = "save_state_intel")] +pub mod intel; + +#[cfg(feature = "save_state_amd")] +pub mod amd; + +// ============================================================================ +// EFI_MM_SAVE_STATE_REGISTER +// ============================================================================ + +/// `EFI_MM_SAVE_STATE_REGISTER` values from the PI Specification. +/// +/// These correspond to the registers that can be read from the SMRAM save +/// state area via `EFI_MM_CPU_PROTOCOL.ReadSaveState()`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u64)] +pub enum MmSaveStateRegister { + // Descriptor table bases + /// GDT base address. + GdtBase = 4, + /// IDT base address. + IdtBase = 5, + /// LDT base address. + LdtBase = 6, + /// GDT limit. + GdtLimit = 7, + /// IDT limit. + IdtLimit = 8, + /// LDT limit. + LdtLimit = 9, + /// LDT information. + LdtInfo = 10, + + // Segment selectors + /// ES selector. + Es = 20, + /// CS selector. + Cs = 21, + /// SS selector. + Ss = 22, + /// DS selector. + Ds = 23, + /// FS selector. + Fs = 24, + /// GS selector. + Gs = 25, + /// LDTR selector. + LdtrSel = 26, + /// TR selector. + TrSel = 27, + + // Debug registers + /// DR7. + Dr7 = 28, + /// DR6. + Dr6 = 29, + + // Extended general-purpose registers (x86_64 only) + /// R8. + R8 = 30, + /// R9. + R9 = 31, + /// R10. + R10 = 32, + /// R11. + R11 = 33, + /// R12. + R12 = 34, + /// R13. + R13 = 35, + /// R14. + R14 = 36, + /// R15. + R15 = 37, + + // General-purpose registers + /// RAX. + Rax = 38, + /// RBX. + Rbx = 39, + /// RCX. + Rcx = 40, + /// RDX. + Rdx = 41, + /// RSP. + Rsp = 42, + /// RBP. + Rbp = 43, + /// RSI. + Rsi = 44, + /// RDI. + Rdi = 45, + /// RIP. + Rip = 46, + + // Flags and control registers + /// RFLAGS. + Rflags = 51, + /// CR0. + Cr0 = 52, + /// CR3. + Cr3 = 53, + /// CR4. + Cr4 = 54, + + // Pseudo-registers + /// I/O operation information. + Io = 512, + /// Long Mode Active indicator. + Lma = 513, + /// Processor identifier (APIC ID). + ProcessorId = 514, +} + +impl MmSaveStateRegister { + /// Creates a register from a raw `EFI_MM_SAVE_STATE_REGISTER` value. + pub fn from_u64(value: u64) -> Option { + match value { + 4 => Some(Self::GdtBase), + 5 => Some(Self::IdtBase), + 6 => Some(Self::LdtBase), + 7 => Some(Self::GdtLimit), + 8 => Some(Self::IdtLimit), + 9 => Some(Self::LdtLimit), + 10 => Some(Self::LdtInfo), + 20 => Some(Self::Es), + 21 => Some(Self::Cs), + 22 => Some(Self::Ss), + 23 => Some(Self::Ds), + 24 => Some(Self::Fs), + 25 => Some(Self::Gs), + 26 => Some(Self::LdtrSel), + 27 => Some(Self::TrSel), + 28 => Some(Self::Dr7), + 29 => Some(Self::Dr6), + 30 => Some(Self::R8), + 31 => Some(Self::R9), + 32 => Some(Self::R10), + 33 => Some(Self::R11), + 34 => Some(Self::R12), + 35 => Some(Self::R13), + 36 => Some(Self::R14), + 37 => Some(Self::R15), + 38 => Some(Self::Rax), + 39 => Some(Self::Rbx), + 40 => Some(Self::Rcx), + 41 => Some(Self::Rdx), + 42 => Some(Self::Rsp), + 43 => Some(Self::Rbp), + 44 => Some(Self::Rsi), + 45 => Some(Self::Rdi), + 46 => Some(Self::Rip), + 51 => Some(Self::Rflags), + 52 => Some(Self::Cr0), + 53 => Some(Self::Cr3), + 54 => Some(Self::Cr4), + 512 => Some(Self::Io), + 513 => Some(Self::Lma), + 514 => Some(Self::ProcessorId), + _ => None, + } + } + +} + +// ============================================================================ +// Register Layout Descriptor +// ============================================================================ + +/// Layout descriptor for a register in the SMRAM save state map. +/// +/// For 8-byte registers the value may be stored as two dwords at potentially +/// non-contiguous offsets (e.g. Intel GDT/IDT/LDT base). `lo_offset` gives +/// the low dword; `hi_offset` gives the high dword. +/// +/// For registers narrower than 8 bytes, `hi_offset` is 0 and only the low +/// dword location is used. +#[derive(Debug, Clone, Copy)] +pub struct RegisterInfo { + /// Offset of the low (or only) dword from the save state base. + pub lo_offset: u16, + /// Offset of the high dword. 0 for registers < 8 bytes. + pub hi_offset: u16, + /// Native width in bytes as stored in the save state (1, 2, 4, or 8). + pub native_width: u8, +} + +// ============================================================================ +// Vendor-Specific Constants +// ============================================================================ + +/// Vendor-specific save state field offsets and behaviour that differ between +/// Intel and AMD. Provided by the active vendor module. +pub struct VendorConstants { + /// Offset of `SMMRevId` within the save state map. + pub smmrevid_offset: u16, + /// Offset of the IO information field (Intel `IOMisc`, AMD `IO_DWord`). + pub io_info_offset: u16, + /// Offset of `IA32_EFER` / `EFER`. + pub efer_offset: u16, + /// Offset of `_RAX` (used for IO data reads). + pub rax_offset: u16, + /// Minimum `SMMRevId` that supports IO information. + pub min_rev_id_io: u32, + /// Whether the LMA pseudo-register always returns 64-bit. + /// + /// AMD64 processors always operate in 64-bit mode during SMM, so + /// they skip the EFER.LMA check and always return LMA_64BIT. + pub lma_always_64: bool, +} + +/// Parsed I/O trap information extracted from the vendor-specific IO field. +/// +/// Returned by [`parse_io_field`] after decoding the vendor's raw IO bits. +#[derive(Debug, Clone, Copy)] +pub struct ParsedIoInfo { + /// I/O type: [`IO_TYPE_INPUT`] (1) or [`IO_TYPE_OUTPUT`] (2). + pub io_type: u32, + /// I/O width: [`IO_WIDTH_UINT8`], [`IO_WIDTH_UINT16`], or [`IO_WIDTH_UINT32`]. + pub io_width: u32, + /// Number of bytes transferred (1, 2, or 4). + pub byte_count: usize, + /// I/O port address. + pub io_port: u32, +} + +/// Returns the [`VendorConstants`] for the active vendor (selected at build time). +#[cfg(feature = "save_state_intel")] +pub fn vendor_constants() -> &'static VendorConstants { + &intel::VENDOR_CONSTANTS +} + +/// Returns the [`VendorConstants`] for the active vendor (selected at build time). +#[cfg(feature = "save_state_amd")] +pub fn vendor_constants() -> &'static VendorConstants { + &amd::VENDOR_CONSTANTS +} + +/// Returns the [`RegisterInfo`] for a given register in the active vendor's +/// save state map. +/// +/// Returns `None` for pseudo-registers (IO, LMA, ProcessorId) and for +/// registers not supported by the active vendor's 64-bit save state layout. +#[cfg(feature = "save_state_intel")] +pub fn register_info(reg: MmSaveStateRegister) -> Option { + intel::register_info(reg) +} + +/// Returns the [`RegisterInfo`] for a given register in the active vendor's +/// save state map. +#[cfg(feature = "save_state_amd")] +pub fn register_info(reg: MmSaveStateRegister) -> Option { + amd::register_info(reg) +} + +/// Parses the vendor-specific I/O information field from the save state. +/// +/// Returns `None` if the field indicates no I/O instruction triggered the SMI +/// (Intel: `SmiFlag` not set) or if the I/O type/width is not a simple IN/OUT. +#[cfg(feature = "save_state_intel")] +pub fn parse_io_field(io_field: u32) -> Option { + intel::parse_io_field(io_field) +} + +/// Parses the vendor-specific I/O information field from the save state. +#[cfg(feature = "save_state_amd")] +pub fn parse_io_field(io_field: u32) -> Option { + amd::parse_io_field(io_field) +} + +// ============================================================================ +// I/O Pseudo-Register Structures +// ============================================================================ + +/// `EFI_MM_SAVE_STATE_IO_INFO` — written to the user buffer when reading +/// the IO pseudo-register. +#[repr(C)] +#[derive(Debug, Clone, Copy, Default)] +pub struct MmSaveStateIoInfo { + /// I/O data value (from RAX, zero-extended). + pub io_data: u64, + /// I/O port address. + pub io_port: u64, + /// I/O width enum (`EFI_MM_SAVE_STATE_IO_WIDTH`). + pub io_width: u32, + /// I/O type enum (`EFI_MM_SAVE_STATE_IO_TYPE`). + pub io_type: u32, +} + +/// Size of [`MmSaveStateIoInfo`] in bytes. +pub const IO_INFO_SIZE: usize = 24; + +const _: () = assert!(core::mem::size_of::() == IO_INFO_SIZE); + +// ============================================================================ +// I/O Constants (shared across vendors) +// ============================================================================ + +/// `EFI_MM_SAVE_STATE_IO_TYPE_INPUT` (1). +pub const IO_TYPE_INPUT: u32 = 1; +/// `EFI_MM_SAVE_STATE_IO_TYPE_OUTPUT` (2). +pub const IO_TYPE_OUTPUT: u32 = 2; + +/// `EFI_MM_SAVE_STATE_IO_WIDTH_UINT8` (0). +pub const IO_WIDTH_UINT8: u32 = 0; +/// `EFI_MM_SAVE_STATE_IO_WIDTH_UINT16` (1). +pub const IO_WIDTH_UINT16: u32 = 1; +/// `EFI_MM_SAVE_STATE_IO_WIDTH_UINT32` (2). +pub const IO_WIDTH_UINT32: u32 = 2; + +// ============================================================================ +// LMA Constants +// ============================================================================ + +/// IA32_EFER.LMA bit (bit 10). +pub const IA32_EFER_LMA: u64 = 1 << 10; + +/// LMA value: processor was in 32-bit mode. +pub const LMA_32BIT: u64 = 32; +/// LMA value: processor was in 64-bit mode. +pub const LMA_64BIT: u64 = 64; + +// ============================================================================ +// EFI_PROCESSOR_INFORMATION +// ============================================================================ + +/// Size of one `EFI_PROCESSOR_INFORMATION` entry (PI 1.7+). +/// +/// Layout: `ProcessorId`(8) + `StatusFlag`(4) + `CpuPhysicalLocation`(12) + +/// `ExtendedProcessorInformation`(24) = 48 bytes. +pub const PROCESSOR_INFO_ENTRY_SIZE: usize = 48; + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_register_from_u64() { + assert_eq!( + MmSaveStateRegister::from_u64(38), + Some(MmSaveStateRegister::Rax) + ); + assert_eq!( + MmSaveStateRegister::from_u64(512), + Some(MmSaveStateRegister::Io) + ); + assert_eq!( + MmSaveStateRegister::from_u64(514), + Some(MmSaveStateRegister::ProcessorId) + ); + assert_eq!(MmSaveStateRegister::from_u64(999), None); + assert_eq!(MmSaveStateRegister::from_u64(0), None); + } + + #[test] + fn test_io_info_struct_layout() { + assert_eq!(core::mem::size_of::(), 24); + assert_eq!(core::mem::offset_of!(MmSaveStateIoInfo, io_data), 0); + assert_eq!(core::mem::offset_of!(MmSaveStateIoInfo, io_port), 8); + assert_eq!(core::mem::offset_of!(MmSaveStateIoInfo, io_width), 16); + assert_eq!(core::mem::offset_of!(MmSaveStateIoInfo, io_type), 20); + } +} diff --git a/core/patina_internal_mm_common/Cargo.toml b/core/patina_internal_mm_common/Cargo.toml new file mode 100644 index 000000000..90a65b131 --- /dev/null +++ b/core/patina_internal_mm_common/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "patina_internal_mm_common" +version.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +readme = "README.md" +description = "Shared type definitions for MM supervisor and user cores." + +[lints] +workspace = true + +[dependencies] +r-efi = { workspace = true } diff --git a/core/patina_internal_mm_common/README.md b/core/patina_internal_mm_common/README.md new file mode 100644 index 000000000..2c0ac22b0 --- /dev/null +++ b/core/patina_internal_mm_common/README.md @@ -0,0 +1,10 @@ +# patina_internal_mm_common + +Shared type definitions used by both the MM Supervisor Core and MM User Core. + +This crate provides the communication structures and enumerations that define +the ABI between the supervisor (ring 0) and user (ring 3) MM modules, +including: + +- `UserCommandType` — Supervisor-to-user command enumeration +- `MM_COMM_BUFFER_HOB_GUID` — Shared GUID for the communication buffer HOB diff --git a/core/patina_internal_mm_common/src/lib.rs b/core/patina_internal_mm_common/src/lib.rs new file mode 100644 index 000000000..a10e0538b --- /dev/null +++ b/core/patina_internal_mm_common/src/lib.rs @@ -0,0 +1,120 @@ +//! Shared type definitions for MM supervisor and user cores. +//! +//! This crate provides the communication structures and enumerations that define +//! the ABI between the supervisor (ring 0) and user (ring 3) MM modules. + +#![no_std] + +// ============================================================================= +// Command Types +// ============================================================================= + +/// Command types passed from the supervisor to the user core via `invoke_demoted_routine`. +/// +/// Discriminant values are part of the supervisor↔user ABI and must not change. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u64)] +pub enum UserCommandType { + /// Initialize the user core: walk HOBs, discover drivers, dispatch. + StartUserCore = 0, + /// Handle a runtime MMI request: parse communication buffer and dispatch handlers. + UserRequest = 1, + /// Execute a procedure on an AP. + UserApProcedure = 2, +} + +impl TryFrom for UserCommandType { + type Error = u64; + + fn try_from(value: u64) -> Result { + match value { + 0 => Ok(UserCommandType::StartUserCore), + 1 => Ok(UserCommandType::UserRequest), + 2 => Ok(UserCommandType::UserApProcedure), + other => Err(other), + } + } +} + +// ============================================================================= +// Syscall Indices +// ============================================================================= + +/// Syscall indices for the MM Supervisor ↔ User Core syscall interface. +/// +/// These match the definitions in SysCallLib.h and define the ABI used when +/// Ring 3 code issues a `syscall` instruction to the Ring 0 supervisor. +/// +/// ## ABI +/// +/// - RAX = call index ([`SyscallIndex`]) +/// - RDX = arg1 +/// - R8 = arg2 +/// - R9 = arg3 +/// +/// On return: +/// - RAX = result value +/// - RDX = status (EFI_STATUS) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u64)] +pub enum SyscallIndex { + /// Read MSR - Arg1: MSR index, Returns: MSR value + RdMsr = 0x0000, + /// Write MSR - Arg1: MSR index, Arg2: value + WrMsr = 0x0001, + /// CLI - Clear interrupts + Cli = 0x0002, + /// IO Read - Arg1: port, Arg2: width + IoRead = 0x0003, + /// IO Write - Arg1: port, Arg2: width, Arg3: value + IoWrite = 0x0004, + /// WBINVD - Write back and invalidate cache + Wbinvd = 0x0005, + /// HLT - Halt processor + Hlt = 0x0006, + /// Save State Read - Arg1: register, Arg2: CPU index + SaveStateRead = 0x0007, + /// Maximum value for legacy syscall indices + LegacyMax = 0xFFFF, + /// Allocate Pages - Arg1: alloc_type, Arg2: mem_type, Arg3: page_count + AllocPage = 0x10004, + /// Free Pages - Arg1: address, Arg2: page_count + FreePage = 0x10005, + /// Start AP Procedure - Arg1: procedure, Arg2: CPU index, Arg3: argument + StartApProc = 0x10006, + /// Save state read with extended support - Arg1: width, Arg2: buffer pointer + SaveStateRead2 = 0x10021, + /// MM memory unblocked - Arg1: address, Arg2: size + MmMemoryUnblocked = 0x10022, + /// MM is communication buffer - Arg1: address, Arg2: size + MmIsCommBuffer = 0x10023, +} + +impl SyscallIndex { + /// Creates a `SyscallIndex` from a raw `u64` value. + pub fn from_u64(value: u64) -> Option { + match value { + 0x0000 => Some(Self::RdMsr), + 0x0001 => Some(Self::WrMsr), + 0x0002 => Some(Self::Cli), + 0x0003 => Some(Self::IoRead), + 0x0004 => Some(Self::IoWrite), + 0x0005 => Some(Self::Wbinvd), + 0x0006 => Some(Self::Hlt), + 0x0007 => Some(Self::SaveStateRead), + 0xFFFF => Some(Self::LegacyMax), + 0x10004 => Some(Self::AllocPage), + 0x10005 => Some(Self::FreePage), + 0x10006 => Some(Self::StartApProc), + 0x10021 => Some(Self::SaveStateRead2), + 0x10022 => Some(Self::MmMemoryUnblocked), + 0x10023 => Some(Self::MmIsCommBuffer), + _ => None, + } + } + + /// Returns the raw `u64` value of this syscall index. + pub fn as_u64(self) -> u64 { + self as u64 + } +} diff --git a/core/patina_stacktrace/src/aarch64/runtime_function.rs b/core/patina_stacktrace/src/aarch64/runtime_function.rs index 91b6de3ff..11cf84176 100644 --- a/core/patina_stacktrace/src/aarch64/runtime_function.rs +++ b/core/patina_stacktrace/src/aarch64/runtime_function.rs @@ -1,4 +1,3 @@ -extern crate alloc; use super::unwind::UnwindInfo; use crate::{ byte_reader::ByteReader, @@ -263,7 +262,7 @@ impl<'a> RuntimeFunction<'a> { #[coverage(off)] mod tests { use super::*; - use alloc::vec::Vec; + use std::vec::Vec; const IMAGE_SIZE: usize = 0x2000; const PE_POINTER_OFFSET: usize = 0x3C; diff --git a/core/patina_stacktrace/src/lib.rs b/core/patina_stacktrace/src/lib.rs index b9e83738e..bdfcdeb39 100644 --- a/core/patina_stacktrace/src/lib.rs +++ b/core/patina_stacktrace/src/lib.rs @@ -169,8 +169,6 @@ #![cfg_attr(all(not(feature = "std"), not(test)), no_std)] #![feature(coverage_attribute)] -extern crate alloc; - mod byte_reader; pub mod error; mod pe; diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index ca9666def..4f8818ac7 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -38,6 +38,7 @@ - [Windbg Debugging](dev/debugging/windbg_debugging.md) - [Windbg Debugging Example](dev/debugging/windbg_example.md) - [Core Reload](dev/debugging/core_reload.md) +- [Toolchain Configuration](dev/toolchain_configuration.md) # Patina DXE Core Platform Integration diff --git a/docs/src/component/requirements.md b/docs/src/component/requirements.md index 7f25408f4..a25b05314 100644 --- a/docs/src/component/requirements.md +++ b/docs/src/component/requirements.md @@ -99,11 +99,12 @@ be used as seen below: ```rust # extern crate patina; +# extern crate patina_test; use patina::{ error::Result, component::component, - test::patina_test, }; +use patina_test::patina_test; pub struct MyComponent(u32); @@ -119,12 +120,12 @@ trait MyService { } #[patina_test] -fn test_my_component_name_for_test() -> patina::test::Result { +fn test_my_component_name_for_test() -> patina_test::error::Result { Ok(()) } #[patina_test] -fn test_my_service_name_for_test() -> patina::test::Result { +fn test_my_service_name_for_test() -> patina_test::error::Result { Ok(()) } ``` diff --git a/docs/src/dev/testing/platform.md b/docs/src/dev/testing/platform.md index da8b75b76..e73be7b19 100644 --- a/docs/src/dev/testing/platform.md +++ b/docs/src/dev/testing/platform.md @@ -1,13 +1,13 @@ # Platform Testing -Platform testing is supported through the `patina::test` module, which provides a testing framework similar to +Platform testing is supported through the `patina_test` crate, which provides a testing framework similar to the typical Rust testing framework. The key difference is that instead of tests being collected and executed on the -host system, they are collected and executed via a component (`patina::test::TestRunner`) provided by the same -crate. The platform must register this component with the Patina DXE Core, which will then dispatch the component +host system, they are collected and executed via a component (`patina_test::component::TestRunner`) provided by the +same crate. The platform must register this component with the Patina DXE Core, which will then dispatch the component to run all registered tests. -> **Note:** The most up-to-date documentation on the `patina::test` module can be found on -> [crates.io](https://crates.io/crates/patina). For convenience, some high-level concepts are summarized below. +> **Note:** The most up-to-date documentation on the `patina_test` crate can be found on +> [crates.io](https://crates.io/crates/patina_test). For convenience, some high-level concepts are summarized below. ## Writing On-Platform Tests @@ -16,8 +16,8 @@ following interface, where `...` can be any number of parameters that implement `patina::component::*`: ```rust -# extern crate patina; -use patina::test::{Result, patina_test}; +# extern crate patina_test; +use patina_test::{error::Result, patina_test}; #[patina_test] fn my_test(/* args */) -> Result { todo!() } @@ -28,15 +28,16 @@ platform. Any function tagged with `#[patina_test]` will be collected and execut can filter out tests, but you should also be conscious of when tests should run. Using `cfg_attr` paired with the `skip` attribute is a great way to have tests ignored for reasons like host architecture or feature flags. -> **Note:** `patina::test::Result` is simply `core::result::Result<(), &'static str>`, and you can use that +> **Note:** `patina_test::error::Result` is simply `core::result::Result<(), &'static str>`, and you can use that > instead. This example shows how to use the `skip` attribute paired with `cfg_attr` to skip a test. ```rust +# extern crate patina_test; # extern crate patina; use patina::boot_services::StandardBootServices; -use patina::test::{Result, patina_test}; +use patina_test::{error::Result, patina_test}; #[patina_test] #[cfg_attr(target_arch = "aarch64", skip)] @@ -47,8 +48,8 @@ Next is the `should_fail` attribute, which allows you to specify that a test sho expected failure message. ```rust -# extern crate patina; -use patina::test::{Result, patina_test}; +# extern crate patina_test; +use patina_test::{error::Result, patina_test}; #[patina_test] #[should_fail] @@ -75,7 +76,8 @@ this, e.g.: ```rust # extern crate patina; -use patina::test::{Result, patina_test}; +# extern crate patina_test; +use patina_test::{error::Result, patina_test}; #[patina_test] #[on(event = patina::guids::EVENT_GROUP_END_OF_DXE)] @@ -90,8 +92,8 @@ These tests are executed every specified time interval (in units of 100ns, i.e. also used to indicate this, e.g.: ```rust -# extern crate patina; -use patina::test::{Result, patina_test}; +# extern crate patina_test; +use patina_test::{error::Result, patina_test}; #[patina_test] #[on(timer = 1_000_000)] // run every 100 ms @@ -106,9 +108,9 @@ Running all these tests on a platform is as easy as instantiating the test runne the Patina DXE Core: ```rust,no_run -# extern crate patina; +# extern crate patina_test; # extern crate patina_dxe_core; -use patina::test::TestRunner; +use patina_test::component::TestRunner; use patina_dxe_core::*; struct ExamplePlatform; @@ -131,9 +133,9 @@ customization is `fail_fast` which will immediately exit the test harness as soo default). These two customizations can only be called once. Subsequent calls will overwrite the previous value. ```rust,no_run -# extern crate patina; +# extern crate patina_test; # extern crate patina_dxe_core; -use patina::test::TestRunner; +use patina_test::component::TestRunner; use patina_dxe_core::*; struct ExamplePlatform; diff --git a/docs/src/dev/toolchain_configuration.md b/docs/src/dev/toolchain_configuration.md new file mode 100644 index 000000000..dd32a642d --- /dev/null +++ b/docs/src/dev/toolchain_configuration.md @@ -0,0 +1,58 @@ +# Toolchain Configuration + +Patina relies on platform bin repos copying its `.cargo/config.toml` to properly set toolchain configuration. This +document focuses on the process to update and maintain Patina's toolchain configuration. + +## Background + +Patina must recommend a set of toolchain configuration options for platforms to use to have the best experience. With +different options, various features may not work as intended (e.g. if `-C force-unwind-tables` is not used, the +stack walk in a debugger will be truncated), performance or binary size may be worse, or failures could occur. + +EDK II achieves this same goal by maintaining custom build tools. One of Patina's philosophies is to use existing +tools wherever possible. In pursuit of this goal, Patina has a `.cargo/config.toml` file that is used when building +Patina in CI. It [is recommended](../integrate/patina_dxe_core_requirements.md#35-configtoml-usage) that platforms +copy this `.cargo/config.toml` to match Patina's toolchain configuration exactly. Patina enforces the same version +is being used in a `build.rs` file. + +>**Note:** Platforms still have the flexibility to change toolchain configuration as needed in this setup, they just +> need to ensure the config version is the same as what Patina is expecting. Platforms choosing to diverge from the +> known good Patina config accept the risk of unexpected behavior stemming from these modifications. + +## Updating Configuration + +There are some simple rules that must be followed when updating Patina's configuration. + +1. Consider the change. Is is appropriate for all platforms? All architectures? +2. Add (or remove) the configuration option to `patina/.cargo/config.toml`. See [the rules](#configuration-adding-rules). +3. Add a comment (multiline okay) above the option describing what this option does and why. +4. Increase `PATINA_CONFIG_VERSION` by one. All config updates, no matter how trivial, must update the version. +5. Update the `PATINA_CONFIG_VERSION` defined in `patina/patina_dxe_core/build.rs` by one. This must match the + `PATINA_CONFIG_VERSION` updated in step 4. + +### Configuration Adding Rules + +The following is a set of rules to follow when adding new toolchain configuration options. + +#### Prefer Cargo Options + +Per [Rust documentation](https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags) all `RUSTFLAGS` that +`Cargo` itself manages in `profile` settings (e.g. `lto`, `debug`, etc.) should be set in the relevant `profile` +section, not directly in `RUSTFLAGS`. See [the profile docs](https://doc.rust-lang.org/cargo/reference/profiles.html) +for which settings are managed there. + +#### Put RUSTFLAGS in target.\.rustflags Section + +Per [Rust documentation](https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags), there are four mutually +exclusive ways to pass RUSTFLAGS: + +```text +1. CARGO_ENCODED_RUSTFLAGS environment variable. +2. RUSTFLAGS environment variable. +3. All matching target..rustflags and target..rustflags config entries joined together. +4. build.rustflags config value. +``` + +Patina only uses the third option. We do not directly set environment variables as this is brittle and can conflict +with many different scenarios. We prefer using the target triple sections because they have precedence over general +`[Build]` sections. Do not put RUSTFLAGS in a `[Build]` section, it will be unused. diff --git a/docs/src/dxe_core/synchronization.md b/docs/src/dxe_core/synchronization.md index 460637b73..bcccb3be2 100644 --- a/docs/src/dxe_core/synchronization.md +++ b/docs/src/dxe_core/synchronization.md @@ -7,14 +7,15 @@ simultaneously at different "Task Priority Levels[^events_and_tpl]." Routines executing at a higher TPL may interrupt routines executing at a lower TPL. Both routines may access Patina DXE Core Services, so global state in the -Patina DXE Core, such such as the protocol database, event database, dispatcher -state, etc. must be protected against simultaneous access. +Patina DXE Core, such as the protocol database, event database, dispatcher +state, etc. must be protected against data races caused by simultaneous access +from event callbacks running at different TPL levels. The primary way this is implemented in the Patina DXE Core is via the `TplMutex` structure. [^events_and_tpl]: See [Event, Timer, and Task Priority Services](events.md#event-timer-and-task-priority-services) elsewhere in this book, as well as -the [UEFI Specification Section 7.1](https://uefi.org/specs/UEFI/2.10_A/07_Services_Boot_Services.html#event-timer-and-task-priority-services). +the [UEFI Specification Section 7.1](https://uefi.org/specs/UEFI/2.11/07_Services_Boot_Services.html#event-timer-and-task-priority-services). ## TplMutex @@ -39,7 +40,7 @@ scenarios. ```admonish warning Care must be taken when selecting the `tpl_lock_level` for a `TplMutex`. Code executing at a TPL higher than the `TplMutex` will panic if it attempts to -accquire the lock (because it will attempt to raise the TPL to a lower level, +acquire the lock (because it will attempt to raise the TPL to a lower level, which is an error). But setting a `tpl_lock_level` to a high TPL level will prevent other (unrelated) usage of that TPL, potentially reducing system responsiveness. It is recommended to set the `tpl_lock_level` as low as possible @@ -49,7 +50,7 @@ higher TPL level. ### TplMutex - Locking and Reentrancy -The second mutual exclusion mechanism used by `TplMutex` is a flag to control +The second mutual exclusion mechanism used by `TplMutex` is a flag to control access to the lock. To acquire the `TplMutex`, the flag must be clear to indicate that the lock is not owned by any other agent. There is a significant difference between the `TplMutex` and `sync::Mutex` - while `sync::Mutex` will @@ -77,7 +78,8 @@ mutex. The `try_lock()` routine in `TplMutex` allows a lock to be attempted and fail without blocking; this can be used for scenarios where a lock might be held by -another agent but the caller can handle not acquiring the lock. +another agent in a lower TPL but the caller can handle not acquiring the lock, +or in scenarios where a call is re-entrant at the same TPL. ## TplGuard @@ -109,3 +111,123 @@ assert!(tpl_mutex1.try_lock().is_err()); //mutex1 still locked. drop(guard1); //lock is released. assert!(tpl_mutex1.try_lock().is_ok()); //mutex1 unlocked and can be acquired. ``` + +## General Guidelines for Synchronization within Patina + +This section documents some good rules of thumb for synchronization design +within Patina. + +1. If a type needs to be `Sync`/`Send`, that means it is expected to be shared +across contexts - Rust documentation refers to these contexts as threads, but +within UEFI this generally refers to event callbacks running on the same CPU +thread - see discussion above. The primary place that the Rust compiler requires +this in the Patina context is global shared state, such as the protocol/event +databases or global allocator state. The default safe way to do this is a +`TplMutex` as described above that ensures that data races and deadlock do not +occur on access to this global state. + +2. Do not use non-TPL aware synchronization primitives such as `spin::mutex` or +`spin::rwlock`. In the UEFI/Patina threading model, these primitives are prone +to deadlock because they assume that contention for the lock from one context +will not block execution of the context currently holding the lock. But as +described in the previous section of this chapter, that is not the case for +UEFI: if code running at a low TPL holds a lock and is interrupted by code in an +event callback at a higher TPL that tries to acquire the lock, deadlock will +occur. `TplMutex` helps to ensure this doesn't happen by panicking on an attempt +to acquire the lock in this case, helping the developer identify and resolve the +design issues that would otherwise lead to deadlock. + +3. When designing a `TplMutex` to guard shared data, select the highest TPL that +the shared state will possibly be accessed at as the TPL level associated with +the mutex. Typically this will be `TPL_NOTIFY` as that is the highest level that +the UEFI specification normally allows for general usage - see [Table 7.2](https://uefi.org/specs/UEFI/2.11/07_Services_Boot_Services.html#tpl-usage) +in the UEFI spec. Designs must guarantee the invariant that there will be no +attempts to access the `TplMutex` at a higher TPL level than associated with the +mutex, and this will be enforced with an `assert`. This is known as "TPL +Inversion," and if it were allowed, it would mean that higher TPL levels could +break mutual exclusion and cause data races. +[Table 7.3](https://uefi.org/specs/UEFI/2.11/07_Services_Boot_Services.html#tpl-restrictions) +lists the TPL restrictions associated with various core services and common protocols. + + ```admonish warning + Note: it is important to understand that the TPL level associated with a + `TplMutex` is not the same thing as the TPL level associated with an event + callback routine. The TPL level associated with an event callback determines + the TPL level at which the event callback is permitted to run and can be + thought of as the "ground state" TPL that the event callback executes at. The + callback is permitted to acquire a TplMutex at a higher level than the event + callback is running at, and the TPL will be raised for the duration that the + TPL guard is owned to prevent data races with event callbacks running at the + higher context. + ``` + +4. Care should be taken to ensure that `TplMutex` usages are scoped so that the +critical sections are as narrow as possible. This is especially true if +accessing shared data from a TPL context that is at a lower TPL than the +`TplMutex` lock level since holding the lock at a higher TPL for long periods +will starve event servicing at or below the `TplMutex` lock level as long as the +guard is active. + + Prefer: + + ```rust,ignore + { + let guard = my_tpl_mutex.lock(); + // TPL raised to level associated with my_tpl_mutex + guard.mutation(); + } + // mutex dropped, TPL restored to the base TPL level for the event callback. + long_running_computation(); + // re-acquire mutex + my_tpl_mutex.lock().mutation2(); + ``` + + instead of: + + ```rust,ignore + let guard = my_tpl_mutex.lock(); + guard.mutation(); + //mutex held and TPL stays high during long_running_computation + long_running_computation(); + guard.mutation2(); + ``` + +5. If the design calls for interior mutability on data that is _not_ shared between +contexts, use a standard Rust interior mutability primitive (i.e. `UnsafeCell` +and its derivatives). Do not use `TplMutex` for interior mutability on non-shared +data. A good rule of thumb is that if your usage doesn't require `Sync`/`Send`, +then you don't need a `TplMutex`. + +6. The UEFI spec APIs often use constructs like `context: *mut c_void` to share +data between contexts. When implementing FFI interfaces to support these API +contracts, `TplMutex` should be used to guard shared data accessed via these +context raw pointers even though the raw pointers are not required to be `Sync`/ +`Send` by the compiler. Whether data races can occur and how they are prevented +should be documented as part of the safety comments for usage of the raw context +pointer. Sometimes the `context` pointer is known to be unique to the event +callback and never accessed from other contexts, in which case a `TplMutex` is +not required. + +7. Direct `raise_tpl` and `restore_tpl` calls should be avoided. Directly +manipulating the TPL decouples the mutual exclusion primitives from the data +that is being protected and makes it hard to associate the TPL requirements with +the data synchronization model of the code. + +8. Care should be taken to avoid violating UEFI spec caller restrictions on TPL +as described in +[Table 7.3](https://uefi.org/specs/UEFI/2.11/07_Services_Boot_Services.html#tpl-restrictions) +of the UEFI spec. For example, the following usage of `TplMutex` would be an +error: + + ```rust,ignore + let my_tpl_mutex = TplMutex::::new(efi::TPL_NOTIFY, Data::new(), "my lock"); + let _guard = my_tpl_mutex.lock(); //TPL raised to NOTIFY while _guard is in scope. + let acpi_services = locate_acpi_table_protocol(); + acpi_services.install_acpi_table(); //BUG: UEFI spec requires invocation at < TPL_NOTIFY + ``` + +As with any set of guidelines, exceptions to the above may be required for +specific cases; these should include design rationale for the departure from +these rules of thumb. For example, it might be possible to use a non-TPL +synchronization primitive that only uses `try_lock` to avoid deadlock and is +designed to handle failure to acquire the lock in a non-fatal manner. diff --git a/docs/src/integrate/dxe_core.md b/docs/src/integrate/dxe_core.md index bb5c71850..bac29ea6d 100644 --- a/docs/src/integrate/dxe_core.md +++ b/docs/src/integrate/dxe_core.md @@ -133,6 +133,20 @@ Reference: [sbsa_dxe_core.rs](https://github.com/OpenDevicePartnership/patina-dx > While the QEMU Patina DXE Core implementations provide a good starting point, you need to modify the copied file to > suit your platform's specific requirements. +### Copy config.toml from Patina + +Copy `patina/.cargo/config.toml` to `bin/.cargo/config.toml`. This ensures that the platform is using the same +toolchain configuration as Patina has been verified with and recommends. A platform may customize the copied +`config.toml` as it wishes, but for best performance and stability, use the recommended settings. See the +[Patina DXE Core Requirements](./patina_dxe_core_requirements.md#35-configtoml-usage) for more details. + +The `patina_dxe_core` crate build will fail if the config version has changed. A platform should use this as an +indication when updating Patina to repeat this step to get the latest config. + +> **Note:** Patina will validate that the `PATINA_CONFIG_VERSION` environment variable is set to the same value as its +> `.cargo/config.toml`, so even if a platform updates toolchain configuration, it must keep the same +> `PATINA_CONFIG_VERSION`. + ## 3. Dependencies Inside your crate's Cargo.toml file, add the following, where `$(VERSION)` is replaced with the version of the diff --git a/docs/src/integrate/patina_dxe_core_requirements.md b/docs/src/integrate/patina_dxe_core_requirements.md index 77f700791..b094dd596 100644 --- a/docs/src/integrate/patina_dxe_core_requirements.md +++ b/docs/src/integrate/patina_dxe_core_requirements.md @@ -267,6 +267,14 @@ crashes due to speculative execution trying instruction fetches from memory that never be touched. For x86 platforms, these regions are still protected to prevent devices having access to executable memory. +#### 3.5 Config.toml Usage + +Patina relies on its `.cargo/config.toml` being copied to the platform bin wrapper repo. A `build.rs` file is used +to enforce that the correct config is used. EDK II relies on a centralized `tools_def.template` being used by custom +build tools to create consistent toolchain configuration. In order to use standard Rust tools, Patina has opted for +this simpler approach. See the [integration section](./dxe_core.md#copy-configtoml-from-patina) for instructions on +setting up a platform. + ### 4. Architectural Requirements This section details Patina requirements that are specific to a particular CPU architectural requirements. diff --git a/docs/src/integrate/patina_dxe_core_requirements_checklist.md b/docs/src/integrate/patina_dxe_core_requirements_checklist.md index 2e99409bf..cf002c40b 100644 --- a/docs/src/integrate/patina_dxe_core_requirements_checklist.md +++ b/docs/src/integrate/patina_dxe_core_requirements_checklist.md @@ -1,7 +1,7 @@ # Patina DXE Core Requirements Platform Checklist | ✅ | Requirement | Summary | Details | -|---:|---|---|---| +| ---: | --- | --- | --- | | [ ] | **1.1 Standalone MM is Used** | Traditional SMM and combined SMM/DXE modules aren’t supported, use **Standalone MM** instead. SMM-specific file types/DEPEX aren’t dispatched/evaluated. | [No Traditional SMM](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html#11-no-traditional-smm) | | [ ] | **1.2 A Priori Driver Dispatch Is Not Used** | Remove *A Priori* sections. Patina dispatches in **FFS listed order**. Use **DEPEX** (optionally with empty/stub protocols) to control dependencies. | [A Priori Driver Dispatch Is Not Allowed](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html#12-a-priori-driver-dispatch-is-not-allowed) | | [ ] | **1.3 All DXE Dispatchable Modules Have Page Aligned Sections** | All DXE images (including C-based drivers) must have **≥ 4 KB** section alignment. **ARM64 DXE_RUNTIME_DRIVER** images must use **64 KB**. Set linker flags accordingly (e.g., `/ALIGN:0x1000` or `-z common-page-size=0x1000`). | [Driver Section Alignment](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html#13-driver-section-alignment-must-be-a-multiple-of-4-kb) | @@ -14,3 +14,4 @@ | [ ] | **3.2 All DXE Code Used in the Platform Supports Native Address Width** | Patina allocates **top‑down**, so addresses may be **>4 GB**. All code must store pointers in **native-width** types (not 32‑bit ints). | [Native Address Width](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html#32-all-code-must-support-native-address-width) | | [ ] | **3.3 ConnectController() Is Called Explicitly** | Patina **does not** auto‑call `ConnectController()` during `StartImage()`. Drivers/platforms must call it for any handles they create/modify. | [ConnectController() Must Be Called](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html#33-connectcontroller-must-explicitly-be-called-for-handles-createdmodified-during-image-start) | | [ ] | **3.4 Uncached Memory is Not Executable** | Patina **does not** allow execution from uncached memory regions. Ensure all executable code resides in cached memory. | [Uncached Memory Not Executable](https://opendevicepartnership.github.io/patina/integrate/patina_dxe_core_requirements.html#34-efi_memory_uc-memory-must-be-non-executable) | +| [ ] | **3.5 Patina's Config.toml Used** | Patina enforces toolchain configuration via copying its `.cargo/config.toml` to the platform | [config.toml Must Be Copied](./patina_dxe_core_requirements.md#35-configtoml-usage) | diff --git a/docs/src/patina.md b/docs/src/patina.md index 93ddb1995..983f602b5 100644 --- a/docs/src/patina.md +++ b/docs/src/patina.md @@ -191,7 +191,7 @@ definition but many of the services are still implemented in C so it is orange. - Support for [Enhanced Memory Protections](https://microsoft.github.io/mu/WhatAndWhy/enhancedmemoryprotection/). - Source-level debugging support. - Built-in Brotli and EFI decompression support. -- Infrastructure (in the `patina::test` module) for on-platform execution of unit tests. +- Infrastructure (in the `patina_test` crate) for on-platform execution of unit tests. ``` admonish important The Patina DXE Core otherwise supports the normal responsibilities of a DXE Core except for the design restrictions @@ -276,11 +276,11 @@ Three main types of testing are currently supported. state of the module. Only the external interfaces are being tested. Cargo will detect and run these tests with the same command as for unit tests. More information about integration tests are available in the [cargo book entry](https://doc.rust-lang.org/rust-by-example/testing/integration_testing.html). -- **On-platform tests** are supported with code in a module called `patina::test` that provides a testing framework +- **On-platform tests** are supported with code in a crate called `patina_test` that provides a testing framework similar to the typical rust testing framework. The key difference is that instead of tests being collected and - executed on the host system, they are instead collected and executed via a component (`patina::test::TestRunner`) - provided by the same crate. The platform must register this component with the `DXE core`. The DXE core will then - dispatch this component, which will run all registered tests. + executed on the host system, they are instead collected and executed via a component + (`patina_test::component::TestRunner`) provided by the same crate. The platform must register this component with the + `DXE core`. The DXE core will then dispatch this component, which will run all registered tests. #### Compatibility diff --git a/patina_dxe_core/Cargo.toml b/patina_dxe_core/Cargo.toml index 659ee47b1..16ba96a04 100644 --- a/patina_dxe_core/Cargo.toml +++ b/patina_dxe_core/Cargo.toml @@ -11,6 +11,9 @@ description = "A pure rust implementation of the UEFI DXE Core." [package.metadata.docs.rs] features = ["doc"] +[lints] +workspace = true + [dependencies] bitfield-struct = { workspace = true } cfg-if = { workspace = true } @@ -33,6 +36,7 @@ patina_internal_cpu = { workspace = true } patina_debugger = { workspace = true, features = ["alloc"] } patina_internal_depex = { workspace = true} patina_performance = { workspace = true } +patina_test = { workspace = true } [dev-dependencies] # To avoid circular dependencies, cargo-release skips dev dependencies when evaluating the release order for diff --git a/patina_dxe_core/build.rs b/patina_dxe_core/build.rs new file mode 100644 index 000000000..edb4dab78 --- /dev/null +++ b/patina_dxe_core/build.rs @@ -0,0 +1,34 @@ +//! Build script for patina_dxe_core to verify toolchain configuration. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +use std::{env, process}; + +// This must be kept in sync with .cargo/config.toml's PATINA_CONFIG_VERSION +const PATINA_CONFIG_VERSION: &str = "1"; + +fn main() { + let version = env::var_os("PATINA_CONFIG_VERSION").unwrap_or_default(); + + let rustflags = env::var_os("CARGO_ENCODED_RUSTFLAGS").unwrap_or_default(); + eprintln!("CARGO_ENCODED_RUSTFLAGS={rustflags:?}"); + + if version != PATINA_CONFIG_VERSION { + eprintln!( + "error: Incorrect PATINA_CONFIG_VERSION, expected version \"{}\", got version {:?}", + PATINA_CONFIG_VERSION, version + ); + eprintln!( + "Use Patina's latest config.toml. See https://opendevicepartnership.github.io/patina/dev/toolchain_configuration.html" + ); + process::exit(1); + } + + // Only rerun this when the rustflags or the config version changes + println!("cargo:rerun-if-env-changed=CARGO_ENCODED_RUSTFLAGS"); + println!("cargo:rerun-if-env-changed=PATINA_CONFIG_VERSION"); +} diff --git a/patina_dxe_core/src/allocator.rs b/patina_dxe_core/src/allocator.rs index 4b6deed21..5e24f1e9f 100644 --- a/patina_dxe_core/src/allocator.rs +++ b/patina_dxe_core/src/allocator.rs @@ -83,8 +83,8 @@ const _: () = assert!( // Private tracking guid used to generate new handles for allocator tracking // {9D1FA6E9-0C86-4F7F-A99B-DD229C9B3893} -const PRIVATE_ALLOCATOR_TRACKING_GUID: efi::Guid = - efi::Guid::from_fields(0x9d1fa6e9, 0x0c86, 0x4f7f, 0xa9, 0x9b, &[0xdd, 0x22, 0x9c, 0x9b, 0x38, 0x93]); +const PRIVATE_ALLOCATOR_TRACKING_GUID: patina::BinaryGuid = + patina::BinaryGuid::from_string("9D1FA6E9-0C86-4F7F-A99B-DD229C9B3893"); pub(crate) const DEFAULT_PAGE_ALLOCATION_GRANULARITY: usize = SIZE_4KB; @@ -559,7 +559,7 @@ impl AllocatorMap { } let (handle, _) = PROTOCOL_DB.install_protocol_interface( None, - PRIVATE_ALLOCATOR_TRACKING_GUID, + PRIVATE_ALLOCATOR_TRACKING_GUID.into_inner(), core::ptr::null_mut(), )?; Ok(handle) @@ -884,7 +884,12 @@ pub fn terminate_memory_map(map_key: usize) -> Result<(), EfiError> { pub fn install_memory_type_info_table(system_table: &mut EfiSystemTable) -> Result<(), EfiError> { let table_ptr = NonNull::from(GCD.memory_type_info_table()).cast::().as_ptr(); - config_tables::core_install_configuration_table(guids::MEMORY_TYPE_INFORMATION, table_ptr, system_table).map(|_| ()) + config_tables::core_install_configuration_table( + guids::MEMORY_TYPE_INFORMATION.into_inner(), + table_ptr, + system_table, + ) + .map(|_| ()) } fn process_hob_allocations(hob_list: &HobList) { @@ -1124,7 +1129,7 @@ pub fn init_memory_support(hob_list: &HobList) { // If memory type info HOB is available, then pre-allocate the corresponding buckets. if let Some(memory_type_info) = hob_list.iter().find_map(|x| { match x { - patina::pi::hob::Hob::GuidHob(hob, data) if hob.name == MEMORY_TYPE_INFO_HOB_GUID => { + patina::pi::hob::Hob::GuidHob(hob, data) if hob.name == MEMORY_TYPE_INFO_HOB_GUID.into_inner() => { let memory_type_slice_ptr = data.as_ptr() as *const EFiMemoryTypeInformation; let memory_type_slice_len = data.len() / mem::size_of::(); @@ -1287,11 +1292,12 @@ mod tests { let mut hob_list = HobList::default(); hob_list.discover_hobs(physical_hob_list); + let guid_hob = GuidHob { + header: header::Hob { r#type: GUID_EXTENSION, length: 48, reserved: 0 }, + name: MEMORY_TYPE_INFO_HOB_GUID, + }; hob_list.push(Hob::GuidHob( - &GuidHob { - header: header::Hob { r#type: GUID_EXTENSION, length: 48, reserved: 0 }, - name: MEMORY_TYPE_INFO_HOB_GUID, - }, + &guid_hob, &[ // for test, pick dynamic allocators, since state is easier to clean up for those. 0x0d, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, //0x0100 pages of PAL_CODE diff --git a/patina_dxe_core/src/allocator/fixed_size_block_allocator.rs b/patina_dxe_core/src/allocator/fixed_size_block_allocator.rs index b87c3c09e..9478e79eb 100644 --- a/patina_dxe_core/src/allocator/fixed_size_block_allocator.rs +++ b/patina_dxe_core/src/allocator/fixed_size_block_allocator.rs @@ -92,6 +92,7 @@ impl Iterator for AllocatorIterator { type Item = *mut AllocatorListNode; fn next(&mut self) -> Option<*mut AllocatorListNode> { if let Some(current) = self.current { + // SAFETY: current is a valid node pointer from the allocator list. self.current = unsafe { (*current).next }; Some(current) } else { @@ -183,6 +184,7 @@ impl FixedSizeBlockAllocator { } let heap_region: NonNull<[u8]> = NonNull::slice_from_raw_parts( + // SAFETY: alloc_node_ptr is validated above and points to a region large enough for an AllocatorListNode. NonNull::new(unsafe { alloc_node_ptr.add(1) }).unwrap().cast(), new_region.len() - size_of::(), ); @@ -190,6 +192,7 @@ impl FixedSizeBlockAllocator { //write the allocator node structure into the start of the range, initialize its heap with the remainder of //the range, and add the new allocator to the front of the allocator list. let node = AllocatorListNode { next: None, allocator: linked_list_allocator::Heap::empty() }; + // SAFETY: alloc_node_ptr is aligned and points to valid writable memory for an AllocatorListNode. unsafe { alloc_node_ptr.write(node); (*alloc_node_ptr).allocator.init(heap_region.cast::().as_ptr(), heap_region.len()); @@ -214,6 +217,7 @@ impl FixedSizeBlockAllocator { // appropriate size is not available. fn fallback_alloc(&mut self, layout: Layout) -> Result, FixedSizeBlockAllocatorError> { for node in AllocatorIterator::new(self.allocators) { + // SAFETY: node is a valid allocator list node pointer from the iterator. let allocator = unsafe { &mut (*node).allocator }; if let Ok(ptr) = allocator.allocate_first_fit(layout) { return Ok(NonNull::slice_from_raw_parts(ptr, layout.size())); @@ -275,8 +279,10 @@ impl FixedSizeBlockAllocator { // layout being freed is too big to be tracked as a fixed-size free block. fn fallback_dealloc(&mut self, ptr: NonNull, layout: Layout) { for node in AllocatorIterator::new(self.allocators) { + // SAFETY: node is produced by AllocatorIterator and points to a valid AllocatorListNode. let allocator = unsafe { &mut (*node).allocator }; if (allocator.bottom() <= ptr.as_ptr()) && (ptr.as_ptr() < allocator.top()) { + // SAFETY: ptr was allocated by this allocator for the given layout. unsafe { allocator.deallocate(ptr, layout) }; } } @@ -299,6 +305,7 @@ impl FixedSizeBlockAllocator { panic!("FSB deallocating block too small to store BlockListNode."); } let new_node_ptr = ptr.as_ptr() as *mut BlockListNode; + // SAFETY: new_node_ptr points to memory returned by alloc for this layout. unsafe { new_node_ptr.write(new_node); self.list_heads[index] = Some(&mut *new_node_ptr); @@ -354,6 +361,7 @@ impl FixedSizeBlockAllocator { /// manages. pub fn contains(&self, ptr: *mut u8) -> bool { AllocatorIterator::new(self.allocators).any(|node| { + // SAFETY: node is produced by AllocatorIterator and points to a valid AllocatorListNode. let allocator = unsafe { &mut (*node).allocator }; (allocator.bottom() <= ptr) && (ptr < allocator.top()) }) @@ -389,7 +397,7 @@ impl FixedSizeBlockAllocator { /// If the allocator does not own any memory, it will return an empty iterator. pub(crate) fn get_memory_ranges(&self) -> impl Iterator> { AllocatorIterator::new(self.allocators).map(|node| { - // This is safe because the node is a valid pointer to an AllocatorListNode + // SAFETY: node is produced by AllocatorIterator and points to a valid AllocatorListNode. let allocator = unsafe { &(*node).allocator }; allocator.bottom() as usize..allocator.top() as usize }) @@ -432,6 +440,7 @@ impl Display for FixedSizeBlockAllocator { writeln!(f, "Memory Type: {:x?}", self.memory_type())?; writeln!(f, "Allocation Ranges:")?; for node in AllocatorIterator::new(self.allocators) { + // SAFETY: node is produced by AllocatorIterator and points to a valid AllocatorListNode. let allocator = unsafe { &mut (*node).allocator }; writeln!( f, @@ -586,6 +595,7 @@ impl SpinLockedFixedSizeBlockAllocator { } /// Frees the block of pages at the given address of the given size. + /// /// ## Safety /// Caller must ensure that the given address corresponds to a valid block of pages that was allocated with /// [Self::allocate_pages] @@ -708,6 +718,7 @@ impl SpinLockedFixedSizeBlockAllocator { } } +// SAFETY: SpinLockedFixedSizeBlockAllocator serializes access and delegates to the inner allocator. unsafe impl GlobalAlloc for SpinLockedFixedSizeBlockAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { match self.allocate(layout) { @@ -717,11 +728,13 @@ unsafe impl GlobalAlloc for SpinLockedFixedSizeBlockAllocator { } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { if let Some(ptr) = NonNull::new(ptr) { + // SAFETY: ptr came from alloc with the same layout. unsafe { self.deallocate(ptr, layout) } } } } +// SAFETY: SpinLockedFixedSizeBlockAllocator serializes access and delegates to the inner allocator. unsafe impl Allocator for SpinLockedFixedSizeBlockAllocator { fn allocate(&self, layout: Layout) -> core::result::Result, AllocError> { let allocation = self.lock().alloc(layout); @@ -797,6 +810,7 @@ unsafe impl Allocator for SpinLockedFixedSizeBlockAllocator { } } unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + // SAFETY: ptr came from allocate with the same layout. unsafe { self.lock().dealloc(ptr, layout) } } } @@ -807,7 +821,9 @@ impl Display for SpinLockedFixedSizeBlockAllocator { } } +// SAFETY: SpinLockedFixedSizeBlockAllocator protects internal state with a spin lock. unsafe impl Sync for SpinLockedFixedSizeBlockAllocator {} +// SAFETY: SpinLockedFixedSizeBlockAllocator protects internal state with a spin lock. unsafe impl Send for SpinLockedFixedSizeBlockAllocator {} impl PageAllocator for SpinLockedFixedSizeBlockAllocator { @@ -820,7 +836,15 @@ impl PageAllocator for SpinLockedFixedSizeBlockAllocator { Self::allocate_pages(self, allocation_strategy, pages, alignment) } + /// Frees the block of pages at the given address of the given size. + /// + /// ## Safety + /// + /// Caller must ensure that the given address corresponds to a valid block of pages that was allocated with + /// [Self::allocate_pages]. unsafe fn free_pages(&self, address: usize, pages: usize) -> Result<(), EfiError> { + // SAFETY: address/pages must refer to a valid allocation owned by this allocator + // per the free_pages safety contract. unsafe { Self::free_pages(self, address, pages) } } @@ -876,11 +900,14 @@ mod tests { use super::*; fn init_gcd(gcd: &SpinLockedGcd, size: usize) -> u64 { + // SAFETY: Resetting the test GCD is safe in a test. unsafe { gcd.reset() }; gcd.init(48, 16); let layout = Layout::from_size_align(size, UEFI_PAGE_SIZE).unwrap(); + // SAFETY: System allocator is used with a valid layout for test memory backing. let base = unsafe { System.alloc(layout) as u64 }; + // SAFETY: init_memory_blocks is used with a valid backing allocation for tests. unsafe { gcd.init_memory_blocks(GcdMemoryType::SystemMemory, base as usize, size, efi::MEMORY_WB, efi::MEMORY_WB) .unwrap(); @@ -971,11 +998,13 @@ mod tests { let layout = Layout::from_size_align(0x8, 0x8).unwrap(); let allocation = fsb.allocate(layout).unwrap().cast::(); + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.deallocate(allocation, layout) }; let layout = Layout::from_size_align(0x20, 0x20).unwrap(); let allocation = fsb.allocate(layout).unwrap().cast::(); + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.deallocate(allocation, layout) }; }); }); @@ -1049,6 +1078,7 @@ mod tests { .unwrap(); assert!(fsb.allocators.is_some()); + // SAFETY: fsb.allocators points to valid list nodes after expand. unsafe { assert!((*fsb.allocators.unwrap()).next.is_none()); assert!((*fsb.allocators.unwrap()).allocator.bottom() as usize > base as usize); @@ -1079,6 +1109,7 @@ mod tests { )) .unwrap(); assert!(fsb.allocators.is_some()); + // SAFETY: fsb.allocators points to valid list nodes after expand. unsafe { assert!((*fsb.allocators.unwrap()).next.is_some()); assert!((*(*fsb.allocators.unwrap()).next.unwrap()).next.is_none()); @@ -1131,9 +1162,10 @@ mod tests { assert_eq!(NUM_ALLOCATIONS, AllocatorIterator::new(fsb.allocators).count()); let expected_free = ALLOCATION_SIZE - size_of::(); - assert!( - AllocatorIterator::new(fsb.allocators).all(|node| unsafe { (*node).allocator.free() == expected_free }) - ); + assert!(AllocatorIterator::new(fsb.allocators).all(|node| { + // SAFETY: node pointers come from AllocatorIterator over fsb.allocators. + unsafe { (*node).allocator.free() == expected_free } + })); }); } @@ -1211,6 +1243,7 @@ mod tests { ); let layout = Layout::from_size_align(0x1000, 0x10).unwrap(); + // SAFETY: fsb is initialized and used with a valid layout in tests. let allocation = unsafe { fsb.alloc(layout) }; assert!(fsb.lock().allocators.is_some()); assert!((allocation as u64) > base); @@ -1284,6 +1317,7 @@ mod tests { // Finally, we can test fallback_dealloc fsb.fallback_dealloc(allocation.cast(), layout); let expected_free = DEFAULT_PAGE_ALLOCATION_GRANULARITY - size_of::(); + // SAFETY: fsb.allocators points to a valid allocator after expand. unsafe { assert_eq!((*fsb.allocators.unwrap()).allocator.free(), expected_free); } @@ -1310,16 +1344,20 @@ mod tests { ); let layout = Layout::from_size_align(0x8, 0x8).unwrap(); + // SAFETY: fsb is initialized and used with a valid layout in tests. let allocation = unsafe { fsb.alloc(layout) }; + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.dealloc(allocation, layout) }; let free_block_ptr = fsb.lock().list_heads[list_index(&layout).unwrap()].take().unwrap() as *mut BlockListNode as *mut u8; assert_eq!(free_block_ptr, allocation); let layout = Layout::from_size_align(0x20, 0x20).unwrap(); + // SAFETY: fsb is initialized and used with a valid layout in tests. let allocation = unsafe { fsb.alloc(layout) }; + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.dealloc(allocation, layout) }; let free_block_ptr = fsb.lock().list_heads[list_index(&layout).unwrap()].take().unwrap() as *mut BlockListNode as *mut u8; @@ -1350,6 +1388,7 @@ mod tests { let allocation = fsb.allocate(layout).unwrap().cast::(); let allocation_ptr = allocation.as_ptr(); + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.deallocate(allocation, layout) }; let free_block_ptr = fsb.lock().list_heads[list_index(&layout).unwrap()].take().unwrap() as *mut BlockListNode as *mut u8; @@ -1359,6 +1398,7 @@ mod tests { let allocation = fsb.allocate(layout).unwrap().cast::(); let allocation_ptr = allocation.as_ptr(); + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.deallocate(allocation, layout) }; let free_block_ptr = fsb.lock().list_heads[list_index(&layout).unwrap()].take().unwrap() as *mut BlockListNode as *mut u8; @@ -1416,6 +1456,7 @@ mod tests { assert!(allocation.as_ptr() as u64 >= address); assert!((allocation.as_ptr() as u64) < address + 0x1000000); + // SAFETY: free_pages uses a valid test allocation pointer and page count. unsafe { match fsb.free_pages(0, pages) { Err(EfiError::NotFound) => {} @@ -1423,6 +1464,7 @@ mod tests { }; }; + // SAFETY: allocation and page count come from allocate_pages in this test. unsafe { fsb.free_pages(allocation.as_ptr() as usize, pages).unwrap(); }; @@ -1459,6 +1501,7 @@ mod tests { assert_eq!(allocation.as_ptr() as u64, target_address); + // SAFETY: free_pages uses a valid test allocation pointer and page count. unsafe { fsb.free_pages(allocation.as_ptr() as usize, pages).unwrap(); }; @@ -1493,6 +1536,7 @@ mod tests { .cast::(); assert!((allocation.as_ptr() as u64) < target_address); + // SAFETY: free_pages uses a valid test allocation pointer and page count. unsafe { fsb.free_pages(allocation.as_ptr() as usize, pages).unwrap(); }; @@ -1527,6 +1571,7 @@ mod tests { .cast::(); assert!((allocation.as_ptr() as usize + uefi_pages_to_size!(pages)) <= target_address as usize); + // SAFETY: allocation and page count come from allocate_pages in this test. unsafe { fsb.free_pages(allocation.as_ptr() as usize, pages).unwrap(); }; @@ -1576,6 +1621,7 @@ mod tests { _ => panic!("Expected INVALID_PARAMETER"), } + // SAFETY: Invalid parameters are intentionally tested (unit test). unsafe { match fsb.free_pages(0x1001, 5) { Err(EfiError::InvalidParameter) => {} @@ -1653,6 +1699,7 @@ mod tests { assert_eq!(stats.claimed_pages, uefi_size_to_pages!(TEST_MIN_EXPANSION_SIZE * 2)); //test alloc/deallocate and stats within the bucket + // SAFETY: fsb is initialized and used with a valid layout for testing. let ptr = unsafe { fsb.alloc( Layout::from_size_align(TEST_MIN_EXPANSION_SIZE - size_of::(), 0x8).unwrap(), @@ -1669,6 +1716,7 @@ mod tests { assert_eq!(stats.reserved_used, TEST_MIN_EXPANSION_SIZE + uefi_pages_to_size!(1)); assert_eq!(stats.claimed_pages, uefi_size_to_pages!(TEST_MIN_EXPANSION_SIZE * 2)); + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.dealloc(ptr, Layout::from_size_align(0x100, 0x8).unwrap()); } @@ -1684,6 +1732,7 @@ mod tests { assert_eq!(stats.claimed_pages, uefi_size_to_pages!(TEST_MIN_EXPANSION_SIZE * 2)); //test alloc/deallocate and stats blowing the bucket + // SAFETY: fsb is initialized and used with a valid layout in tests. let ptr = unsafe { fsb.alloc(Layout::from_size_align(TEST_MIN_EXPANSION_SIZE * 3, 0x8).unwrap()) }; //after this allocate, the basic memory map of the FSB should look like: @@ -1702,6 +1751,7 @@ mod tests { assert_eq!(stats.reserved_used, TEST_MIN_EXPANSION_SIZE + uefi_pages_to_size!(1)); assert_eq!(stats.claimed_pages, uefi_size_to_pages!(TEST_MIN_EXPANSION_SIZE * 5) + 1); + // SAFETY: Allocation was returned by fsb for this layout. unsafe { fsb.dealloc(ptr, Layout::from_size_align(TEST_MIN_EXPANSION_SIZE * 3, 0x8).unwrap()); } @@ -1740,6 +1790,7 @@ mod tests { assert_eq!(stats.reserved_used, TEST_MIN_EXPANSION_SIZE + uefi_pages_to_size!(5)); assert_eq!(stats.claimed_pages, uefi_size_to_pages!(TEST_MIN_EXPANSION_SIZE * 5) + 1); + // SAFETY: free_pages uses a valid test allocation pointer and page count. unsafe { fsb.free_pages(ptr as *mut u8 as usize, 0x4).unwrap(); } @@ -1798,9 +1849,11 @@ mod tests { assert_eq!(stats.reserved_used, TEST_MIN_EXPANSION_SIZE + uefi_pages_to_size!(5)); assert_eq!(stats.claimed_pages, uefi_size_to_pages!(TEST_MIN_EXPANSION_SIZE * 5) + 1 + 0x104); + // SAFETY: free_pages uses a valid test allocation pointer and page count. unsafe { fsb.free_pages(ptr1 as *mut u8 as usize, 0x4).unwrap(); } + // SAFETY: free_pages uses a valid test allocation pointer and page count. unsafe { fsb.free_pages(ptr as *mut u8 as usize, 0x104).unwrap(); } diff --git a/patina_dxe_core/src/allocator/uefi_allocator.rs b/patina_dxe_core/src/allocator/uefi_allocator.rs index fe83f4aad..89acb2083 100644 --- a/patina_dxe_core/src/allocator/uefi_allocator.rs +++ b/patina_dxe_core/src/allocator/uefi_allocator.rs @@ -119,6 +119,8 @@ where match self.allocator.allocate(allocation_info.layout) { Ok(ptr) => { let alloc_info_ptr = ptr.cast::().as_ptr(); + // SAFETY: alloc_info_ptr is within the allocation. `buffer` is a caller-provided out pointer per the + // `allocate_pool` safety contract. unsafe { alloc_info_ptr.write(allocation_info); buffer.write((ptr.as_ptr() as *mut u8 as usize + offset) as *mut c_void); @@ -196,10 +198,12 @@ where } /// Frees the block of pages at the given address of the given size. + /// /// ## Safety /// Caller must ensure that the given address corresponds to a valid block of pages that was allocated with /// [Self::allocate_pages] pub unsafe fn free_pages(&self, address: usize, pages: usize) -> Result<(), EfiError> { + // SAFETY: address/pages must refer to a valid allocation from this allocator. unsafe { self.allocator.free_pages(address, pages) } } @@ -221,18 +225,22 @@ where } } +// SAFETY: UefiAllocator delegates to the inner allocator and preserves invariants. unsafe impl GlobalAlloc for UefiAllocator where A: PageAllocator + GlobalAlloc + Allocator + Display + Sync + Send, { unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { + // SAFETY: layout comes from the global allocator contract. unsafe { self.allocator.alloc(layout) } } unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { + // SAFETY: ptr was returned by alloc with the same layout. unsafe { self.allocator.dealloc(ptr, layout) } } } +// SAFETY: UefiAllocator delegates to the inner allocator and preserves invariants. unsafe impl Allocator for UefiAllocator where A: PageAllocator + GlobalAlloc + Allocator + Display + Sync + Send, @@ -241,6 +249,7 @@ where self.allocator.allocate(layout) } unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: core::alloc::Layout) { + // SAFETY: ptr was returned by allocate with the same layout. unsafe { self.allocator.deallocate(ptr, layout) } } } @@ -294,11 +303,14 @@ mod tests { use super::*; fn init_gcd(gcd: &SpinLockedGcd, size: usize) -> u64 { + // SAFETY: Resetting the test GCD is safe for testing purposes. unsafe { gcd.reset() }; gcd.init(48, 16); let layout = Layout::from_size_align(size, UEFI_PAGE_SIZE).unwrap(); + // SAFETY: System allocator is used with a valid layout for test memory backing. let base = unsafe { System.alloc(layout) as u64 }; + // SAFETY: init_memory_blocks uses the test backing allocation. unsafe { gcd.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -361,6 +373,7 @@ mod tests { let ua = UefiAllocator::new(fsb, efi::RUNTIME_SERVICES_DATA); let mut buffer: *mut c_void = core::ptr::null_mut(); + // SAFETY: The allocator is initialized and buffer is a valid out pointer. assert!(unsafe { ua.allocate_pool(0x1000, core::ptr::addr_of_mut!(buffer)) }.is_ok()); assert!(buffer as u64 > base); assert!((buffer as u64) < base + 0x400000); @@ -373,6 +386,7 @@ mod tests { .unwrap_or_else(|err| panic!("Allocation layout error: {err:#?}")); let allocation_info: *mut AllocationInfo = ((buffer as usize) - offset) as *mut AllocationInfo; + // SAFETY: allocation_info is derived from a valid allocation. unsafe { let allocation_info = &*allocation_info; assert_eq!(allocation_info.signature, POOL_SIG); @@ -401,8 +415,10 @@ mod tests { let ua = UefiAllocator::new(fsb, efi::RUNTIME_SERVICES_DATA); let mut buffer: *mut c_void = core::ptr::null_mut(); + // SAFETY: The allocator is initialized and buffer is a valid out pointer. assert!(unsafe { ua.allocate_pool(0x1000, core::ptr::addr_of_mut!(buffer)) }.is_ok()); + // SAFETY: buffer was allocated by ua.allocate_pool above. assert!(unsafe { ua.free_pool(buffer) }.is_ok()); let (_, offset) = Layout::new::() @@ -413,12 +429,14 @@ mod tests { .unwrap_or_else(|err| panic!("Allocation layout error: {err:#?}")); let allocation_info: *mut AllocationInfo = ((buffer as usize) - offset) as *mut AllocationInfo; + // SAFETY: allocation_info is derived from a valid allocation made earlier in this test. unsafe { let allocation_info = &*allocation_info; assert_eq!(allocation_info.signature, 0); } let prev_buffer = buffer; + // SAFETY: The allocator is initialized and buffer is a valid out pointer. assert!(unsafe { ua.allocate_pool(0x1000, core::ptr::addr_of_mut!(buffer)) }.is_ok()); assert!(buffer as u64 > base); assert!((buffer as u64) < base + 0x400000); @@ -464,6 +482,7 @@ mod tests { } // Attempting to free should fail with InvalidParameter due to signature mismatch + // SAFETY: buffer was allocated by ua.allocate_pool in this test. assert_eq!(unsafe { ua.free_pool(buffer) }, Err(EfiError::InvalidParameter)); }); }); @@ -493,6 +512,7 @@ mod tests { assert!(buffer_address >= base); assert!(buffer_address < base + 0x400000); + // SAFETY: free_pages uses a valid allocation pointer and page count. unsafe { ua.free_pages(buffer_address as usize, 4).unwrap(); } @@ -503,6 +523,7 @@ mod tests { assert_eq!(buffer_address, buffer_address2); assert_eq!(buffer.len(), max(granularity, UEFI_PAGE_SIZE * 4)); //should be 4 pages or granularity pages in size. + // SAFETY: free_pages uses a valid allocation pointer and page count. unsafe { ua.free_pages(buffer_address2 as usize, 4).unwrap(); } @@ -541,6 +562,7 @@ mod tests { let bs_buffer_address = bs_buffer.as_ptr() as *mut u8 as efi::PhysicalAddress; let bc_buffer_address = bc_buffer.as_ptr() as *mut u8 as efi::PhysicalAddress; + // SAFETY: free_pages uses valid allocation pointers for each allocator. unsafe { assert_eq!(bs_allocator.free_pages(bc_buffer_address as usize, 4), Err(EfiError::NotFound)); assert_eq!(bc_allocator.free_pages(bs_buffer_address as usize, 4), Err(EfiError::NotFound)); @@ -568,11 +590,13 @@ mod tests { let ua = UefiAllocator::new(fsb, efi::RUNTIME_SERVICES_DATA); let layout = Layout::from_size_align(0x8, 0x8).unwrap(); + // SAFETY: ua.alloc/ua.dealloc are used with a valid layout in tests. unsafe { let a = ua.alloc(layout); ua.dealloc(a, layout) } + // SAFETY: ua.alloc returned non-null for this test allocation. unsafe { let a = ua.alloc(layout); ua.deallocate(NonNull::new_unchecked(a), layout); @@ -718,6 +742,7 @@ mod tests { //verify that if the reserved allocation that is not in the reserved range is freed, other allocators can //use it. + // SAFETY: reserved_page_addr is a valid allocation address for free_pages. unsafe { reserved_allocator.free_pages(reserved_page_addr as usize, 1).unwrap(); } @@ -731,6 +756,7 @@ mod tests { ); //verify that if pages are freed within the reserved range, that other allocators cannot use them. + // SAFETY: reserved_range.start is a valid allocation address for free_pages in the test harness. unsafe { reserved_allocator.free_pages(reserved_range.start as usize, 0x10).unwrap(); } diff --git a/patina_dxe_core/src/allocator/usage_tests/uefi_memory_map.rs b/patina_dxe_core/src/allocator/usage_tests/uefi_memory_map.rs index 27feb48e1..f55c84a86 100644 --- a/patina_dxe_core/src/allocator/usage_tests/uefi_memory_map.rs +++ b/patina_dxe_core/src/allocator/usage_tests/uefi_memory_map.rs @@ -165,8 +165,6 @@ mod tests { /// `patina::pi::hob`) during HOB list building in `build_custom_hob_list()`, where the /// appropriate HOB headers are automatically added. mod hob_config { - use r_efi::efi; - /// Configuration for a resource descriptor HOB (becomes `patina::pi::hob::ResourceDescriptor`) #[derive(Clone, Debug)] pub struct ResourceDescriptorConfig { @@ -174,7 +172,7 @@ mod tests { pub resource_attribute: u32, pub physical_start: u64, pub resource_length: u64, - pub owner: efi::Guid, + pub owner: patina::BinaryGuid, } /// Configuration for a memory allocation HOB (becomes `patina::pi::hob::MemoryAllocation`) @@ -183,7 +181,7 @@ mod tests { pub memory_type: u32, pub memory_base_address: u64, pub memory_length: u64, - pub name: efi::Guid, + pub name: patina::BinaryGuid, } } @@ -629,14 +627,7 @@ mod tests { memory_type: efi::BOOT_SERVICES_DATA, memory_base_address: SIZE_2MB as u64, memory_length: SIZE_512KB as u64, - name: efi::Guid::from_fields( - 0x4ED4BF27, - 0x4092, - 0x42E9, - 0x80, - 0x7D, - &[0x52, 0x7B, 0x1D, 0x00, 0xC9, 0xBD], - ), + name: patina::guids::HOB_MEMORY_ALLOC_STACK, }) .with_memory_allocation(MemoryAllocationConfig { memory_type: efi::BOOT_SERVICES_CODE, diff --git a/patina_dxe_core/src/component_dispatcher.rs b/patina_dxe_core/src/component_dispatcher.rs index da672f205..4da367f02 100644 --- a/patina_dxe_core/src/component_dispatcher.rs +++ b/patina_dxe_core/src/component_dispatcher.rs @@ -189,7 +189,7 @@ impl ComponentDispatcher { pub(crate) fn insert_hobs(&mut self, hob_list: &HobList<'_>) { for hob in hob_list.iter() { if let patina::pi::hob::Hob::GuidHob(guid, data) = hob { - let parser_funcs = self.storage.get_hob_parsers(&patina::OwnedGuid::from(guid.name)); + let parser_funcs = self.storage.get_hob_parsers(&guid.name); if parser_funcs.is_empty() { let (f0, f1, f2, f3, f4, &[f5, f6, f7, f8, f9, f10]) = guid.name.as_fields(); let name = alloc::format!( @@ -386,7 +386,7 @@ mod tests { length: core::mem::size_of::() as u16, reserved: 0, }, - name: *GUID1, + name: GUID1, }; let guid_hob2 = GuidHob { @@ -395,7 +395,7 @@ mod tests { length: core::mem::size_of::() as u16, reserved: 0, }, - name: *GUID2, + name: GUID2, }; let guid_hob3 = GuidHob { @@ -404,7 +404,7 @@ mod tests { length: core::mem::size_of::() as u16, reserved: 0, }, - name: *GUID3, + name: GUID3, }; hob_list.push(patina::pi::hob::Hob::GuidHob(&guid_hob1, hob1_bytes)); diff --git a/patina_dxe_core/src/config_tables.rs b/patina_dxe_core/src/config_tables.rs index b6c8612c0..654d73841 100644 --- a/patina_dxe_core/src/config_tables.rs +++ b/patina_dxe_core/src/config_tables.rs @@ -98,6 +98,7 @@ pub fn core_install_configuration_table( // Updating the table. Reclaim the old table (if present) so it'll get dropped by the runtime allocator. if !system_table.configuration_table.is_null() { + // SAFETY: configuration_table points to number_of_table_entries elements from the runtime allocator. unsafe { let _old_boxed_table = Box::from_raw_in( slice_from_raw_parts_mut(system_table.configuration_table, system_table.number_of_table_entries), diff --git a/patina_dxe_core/src/config_tables/memory_attributes_table.rs b/patina_dxe_core/src/config_tables/memory_attributes_table.rs index 8037bb4ea..9528c0ce0 100644 --- a/patina_dxe_core/src/config_tables/memory_attributes_table.rs +++ b/patina_dxe_core/src/config_tables/memory_attributes_table.rs @@ -61,7 +61,9 @@ impl MemoryAttributesTable { impl Debug for MemoryAttributesTable { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + // SAFETY: self.0 is a valid MAT pointer when Debug is invoked. let mat = unsafe { self.0.as_ref().expect("BAD MAT PTR") }; + // SAFETY: mat.entry points to number_of_entries descriptors in the MAT. let entries = unsafe { slice::from_raw_parts(mat.entry.as_ptr(), mat.number_of_entries as usize) }; writeln!(f, "MemoryAttributesTable {{")?; @@ -111,6 +113,7 @@ pub fn core_install_memory_attributes_table() { // behavior with a stack pointer that goes out of scope match core_allocate_pool(efi::BOOT_SERVICES_DATA, size_of::()) { Ok(empty_ptr) => { + // SAFETY: empty_ptr is a valid allocation for MemoryAttributesTable. if let Some(empty_mat) = unsafe { (empty_ptr as *mut efi::MemoryAttributesTable).as_mut() } { *empty_mat = efi::MemoryAttributesTable { version: 0, @@ -203,6 +206,7 @@ pub fn core_install_memory_attributes_table() { // this ends up being a large unsafe block because we have to dereference the raw pointer core_allocate_pool // gave us and convert it to a real type and back in order to install it + // SAFETY: mat_ptr is a valid allocation for MemoryAttributesTable and copy size is bounded by buffer_size. unsafe { let mat = &mut *mat_ptr; mat.version = efi::MEMORY_ATTRIBUTES_TABLE_VERSION; @@ -263,6 +267,7 @@ mod tests { test_support::with_global_lock(|| { POST_RTB.reset(); + // SAFETY: Test-only initialization under the global lock. unsafe { test_support::init_test_gcd(None); test_support::reset_allocators(); @@ -348,6 +353,7 @@ mod tests { // Check if MEMORY_ATTRIBUTES_TABLE is set after installation let mat_ptr = get_configuration_table(&efi::MEMORY_ATTRIBUTES_TABLE_GUID).unwrap(); + // SAFETY: MAT pointer comes from configuration table and is validated before dereference. unsafe { let mat = &*(mat_ptr.as_ptr() as *const efi::MemoryAttributesTable); diff --git a/patina_dxe_core/src/core_patina_tests/audit_tests.rs b/patina_dxe_core/src/core_patina_tests/audit_tests.rs index f79041715..d5c60c1cb 100644 --- a/patina_dxe_core/src/core_patina_tests/audit_tests.rs +++ b/patina_dxe_core/src/core_patina_tests/audit_tests.rs @@ -11,16 +11,18 @@ use crate::GCD; use alloc::vec::Vec; -use patina::{test::patina_test, u_assert}; +use patina_test::{patina_test, u_assert}; // used in the macro, but not directly referenced; causes a warning if patina tests not enabled. #[allow(unused)] +use patina::BinaryGuid; +#[allow(unused)] use r_efi::efi; // Verify that all adjacent free memory descriptors in the GCD are merged together #[patina_test] -#[on(event = efi::EVENT_GROUP_READY_TO_BOOT)] -#[on(event = efi::EVENT_GROUP_EXIT_BOOT_SERVICES)] -fn gcd_free_memory_merged_test() -> patina::test::Result { +#[on(event = BinaryGuid(efi::EVENT_GROUP_READY_TO_BOOT))] +#[on(event = BinaryGuid(efi::EVENT_GROUP_EXIT_BOOT_SERVICES))] +fn gcd_free_memory_merged_test() -> patina_test::error::Result { let mut last_desc: Option = None; let mut descs = Vec::with_capacity(GCD.memory_descriptor_count() * 2); GCD.get_memory_descriptors(&mut descs, crate::gcd::DescriptorFilter::Free).map_err(|_| "Can't get descriptors")?; diff --git a/patina_dxe_core/src/core_patina_tests/stability_tests.rs b/patina_dxe_core/src/core_patina_tests/stability_tests.rs index 7ac86b3fe..48874ee85 100644 --- a/patina_dxe_core/src/core_patina_tests/stability_tests.rs +++ b/patina_dxe_core/src/core_patina_tests/stability_tests.rs @@ -15,16 +15,15 @@ use alloc::vec::Vec; use patina::{ base::{SIZE_1GB, SIZE_2MB, SIZE_4KB}, pi::dxe_services::GcdMemoryType, - test::patina_test, - u_assert, u_assert_eq, }; use patina_paging::MemoryAttributes; +use patina_test::{patina_test, u_assert, u_assert_eq}; use r_efi::efi; /// Stability Test: Split a 2MB page into 4KB pages and verify correctness #[patina_test] #[on(timer = 3_000_000)] // 300ms interval -fn page_table_tests_2mb_split() -> patina::test::Result { +fn page_table_tests_2mb_split() -> patina_test::error::Result { let mut addr_vec = Vec::new(); for _ in 0..19 { addr_vec.push( @@ -157,7 +156,7 @@ fn page_table_tests_2mb_split() -> patina::test::Result { /// Stability Test: Split a 1GB page into 4KB pages and verify correctness #[patina_test] #[on(timer = 3_000_000)] // 300ms interval -fn page_table_tests_1gb_split() -> patina::test::Result { +fn page_table_tests_1gb_split() -> patina_test::error::Result { let addr = GCD.allocate_memory_space( AllocateType::TopDown(None), GcdMemoryType::SystemMemory, @@ -242,7 +241,7 @@ fn page_table_tests_1gb_split() -> patina::test::Result { /// Stability Test: Map a 2MB page, unmap it, map a 4KB region in it, pattern it, flush tlbs, and verify pattern #[patina_test] #[on(timer = 3_000_000)] // 300ms interval -fn page_table_tests_2mb_unmap() -> patina::test::Result { +fn page_table_tests_2mb_unmap() -> patina_test::error::Result { let mut addr_vec = Vec::new(); for _ in 0..19 { addr_vec.push( @@ -336,7 +335,7 @@ fn page_table_tests_2mb_unmap() -> patina::test::Result { /// Stability Test: Map a 1GB page, unmap it, map a 2MB region in it, pattern it, flush tlbs, and verify pattern #[patina_test] #[on(timer = 3_000_000)] // 300ms interval -fn page_table_tests_1gb_unmap_2mb_remap() -> patina::test::Result { +fn page_table_tests_1gb_unmap_2mb_remap() -> patina_test::error::Result { let addr = GCD.allocate_memory_space( AllocateType::TopDown(None), GcdMemoryType::SystemMemory, @@ -432,7 +431,7 @@ fn page_table_tests_1gb_unmap_2mb_remap() -> patina::test::Result { /// Stability Test: Map a 1GB page, unmap it, map a 4KB region in it, pattern it, flush tlbs, and verify pattern #[patina_test] #[on(timer = 3_000_000)] // 300ms interval -fn page_table_tests_1gb_unmap_4kb_remap() -> patina::test::Result { +fn page_table_tests_1gb_unmap_4kb_remap() -> patina_test::error::Result { let addr = GCD.allocate_memory_space( AllocateType::TopDown(None), GcdMemoryType::SystemMemory, diff --git a/patina_dxe_core/src/cpu.rs b/patina_dxe_core/src/cpu.rs index 1b264a741..1650bb03a 100644 --- a/patina_dxe_core/src/cpu.rs +++ b/patina_dxe_core/src/cpu.rs @@ -13,7 +13,7 @@ mod cpu_arch_protocol; mod hw_interrupt_protocol; mod perf_timer; -pub(crate) use cpu_arch_protocol::CpuArchProtocolInstaller; +pub(crate) use cpu_arch_protocol::{CpuArchProtocolInstaller, DxeCpu, DxeInterruptManager}; #[cfg(all(target_os = "uefi", target_arch = "aarch64"))] pub(crate) use hw_interrupt_protocol::HwInterruptProtocolInstaller; pub(crate) use perf_timer::PerfTimer; diff --git a/patina_dxe_core/src/cpu/cpu_arch_protocol.rs b/patina_dxe_core/src/cpu/cpu_arch_protocol.rs index 241fcb731..19986909c 100644 --- a/patina_dxe_core/src/cpu/cpu_arch_protocol.rs +++ b/patina_dxe_core/src/cpu/cpu_arch_protocol.rs @@ -13,18 +13,53 @@ use alloc::boxed::Box; use core::ffi::c_void; use patina::{ boot_services::{BootServices, StandardBootServices}, - component::{component, service::Service}, + component::{ + Storage, component, + service::{IntoService, Service}, + }, error::{EfiError, Result}, uefi_protocol::ProtocolInterface, }; use patina_internal_cpu::{ - cpu::Cpu, - interrupts::{self, ExceptionType, HandlerType, InterruptManager}, + cpu::{Cpu, EfiCpu}, + interrupts::{self, ExceptionType, HandlerType, InterruptManager, Interrupts}, }; use r_efi::efi; use patina::pi::protocols::cpu_arch::{CpuFlushType, CpuInitType, InterruptHandler, PROTOCOL_GUID, Protocol}; +#[derive(IntoService)] +#[service(dyn Cpu)] +pub(crate) struct DxeCpu(pub(crate) EfiCpu); + +impl Cpu for DxeCpu { + fn flush_data_cache(&self, start: efi::PhysicalAddress, length: u64, flush_type: CpuFlushType) -> Result<()> { + self.0.flush_data_cache(start, length, flush_type) + } + + fn init(&self, init_type: CpuInitType) -> Result<()> { + self.0.init(init_type) + } + + fn get_timer_value(&self, timer_index: u32) -> Result<(u64, u64)> { + self.0.get_timer_value(timer_index) + } +} + +#[derive(IntoService)] +#[service(dyn InterruptManager)] +pub(crate) struct DxeInterruptManager(pub(crate) Interrupts); + +impl InterruptManager for DxeInterruptManager { + fn register_exception_handler(&self, exception_type: ExceptionType, handler: HandlerType) -> Result<()> { + self.0.register_exception_handler(exception_type, handler) + } + + fn unregister_exception_handler(&self, exception_type: ExceptionType) -> Result<()> { + self.0.unregister_exception_handler(exception_type) + } +} + #[repr(C)] struct EfiCpuArchProtocolImpl { protocol: Protocol, @@ -34,8 +69,9 @@ struct EfiCpuArchProtocolImpl { pub(crate) interrupt_manager: Service, } +// SAFETY: EfiCpuArchProtocolImpl provides a valid protocol structure with stable GUID. unsafe impl ProtocolInterface for EfiCpuArchProtocolImpl { - const PROTOCOL_GUID: efi::Guid = PROTOCOL_GUID; + const PROTOCOL_GUID: patina::BinaryGuid = PROTOCOL_GUID; } // Helper function to convert a raw mutable pointer to a mutable reference. @@ -44,6 +80,7 @@ fn get_impl_ref<'a>(this: *const Protocol) -> &'a EfiCpuArchProtocolImpl { panic!("Null pointer passed to get_impl_ref()"); } + // SAFETY: this is non-null and points to an EfiCpuArchProtocolImpl instance. unsafe { &*(this as *const EfiCpuArchProtocolImpl) } } @@ -52,6 +89,7 @@ fn get_impl_ref_mut<'a>(this: *mut Protocol) -> &'a mut EfiCpuArchProtocolImpl { panic!("Null pointer passed to get_impl_ref_mut()"); } + // SAFETY: this is non-null and points to an EfiCpuArchProtocolImpl instance. unsafe { &mut *(this as *mut EfiCpuArchProtocolImpl) } } @@ -258,95 +296,177 @@ mod tests { #[test] fn test_flush_data_cache() { - let mut cpu_init = MockEfiCpuInit::new(); - cpu_init.expect_flush_data_cache().with(eq(0), eq(0), always()).returning(|_, _, _| Ok(())); - let cpu: Service = Service::mock(Box::new(cpu_init)); + with_locked_state(|| { + let mut cpu_init = MockEfiCpuInit::new(); + cpu_init.expect_flush_data_cache().with(eq(0), eq(0), always()).returning(|_, _, _| Ok(())); + let cpu: Service = Service::mock(Box::new(cpu_init)); - let im: Service = Service::mock(Box::new(MockInterruptManager::new())); + let im: Service = Service::mock(Box::new(MockInterruptManager::new())); - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); - let status = flush_data_cache(&protocol.protocol, 0, 0, CpuFlushType::EfiCpuFlushTypeWriteBackInvalidate); - assert_eq!(status, efi::Status::SUCCESS); + let status = flush_data_cache(&protocol.protocol, 0, 0, CpuFlushType::EfiCpuFlushTypeWriteBackInvalidate); + assert_eq!(status, efi::Status::SUCCESS); + }); } #[test] fn test_enable_interrupt() { - let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); - let im: Service = Service::mock(Box::new(MockInterruptManager::new())); - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); - - let status = enable_interrupt(&protocol.protocol); - assert_eq!(status, efi::Status::SUCCESS); + with_locked_state(|| { + let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); + let im: Service = Service::mock(Box::new(MockInterruptManager::new())); + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + + let status = enable_interrupt(&protocol.protocol); + assert_eq!(status, efi::Status::SUCCESS); + }); } #[test] fn test_disable_interrupt() { - let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); - let im: Service = Service::mock(Box::new(MockInterruptManager::new())); - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); - - let status = disable_interrupt(&protocol.protocol); - assert_eq!(status, efi::Status::SUCCESS); + with_locked_state(|| { + let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); + let im: Service = Service::mock(Box::new(MockInterruptManager::new())); + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + + let status = disable_interrupt(&protocol.protocol); + assert_eq!(status, efi::Status::SUCCESS); + }); } #[test] fn test_get_interrupt_state() { - let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); - let im: Service = Service::mock(Box::new(MockInterruptManager::new())); - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); - - let mut state = false; - let status = get_interrupt_state(&protocol.protocol, &mut state as *mut bool); - assert_eq!(status, efi::Status::SUCCESS); + with_locked_state(|| { + let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); + let im: Service = Service::mock(Box::new(MockInterruptManager::new())); + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + + let mut state = false; + let status = get_interrupt_state(&protocol.protocol, &mut state as *mut bool); + assert_eq!(status, efi::Status::SUCCESS); + }); } #[test] fn test_init() { - let mut cpu_init = MockEfiCpuInit::new(); - cpu_init.expect_init().with(always()).returning(|_| Ok(())); - let cpu: Service = Service::mock(Box::new(cpu_init)); + with_locked_state(|| { + let mut cpu_init = MockEfiCpuInit::new(); + cpu_init.expect_init().with(always()).returning(|_| Ok(())); + let cpu: Service = Service::mock(Box::new(cpu_init)); - let mut im: Service = Service::mock(Box::new(MockInterruptManager::new())); + let mut im: Service = Service::mock(Box::new(MockInterruptManager::new())); - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); - let status = init(&protocol.protocol, CpuInitType::EfiCpuInit); - assert_eq!(status, efi::Status::SUCCESS); + let status = init(&protocol.protocol, CpuInitType::EfiCpuInit); + assert_eq!(status, efi::Status::SUCCESS); + }); } extern "efiapi" fn mock_interrupt_handler(_type: EfiExceptionType, _context: EfiSystemContext) {} #[test] fn test_register_interrupt_handler() { - let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); + with_locked_state(|| { + let cpu: Service = Service::mock(Box::new(MockEfiCpuInit::new())); - let mut interrupt_manager = MockInterruptManager::new(); - interrupt_manager - .expect_register_exception_handler() - .with(eq(ExceptionType::from(0_usize)), always()) - .returning(|_, _| Ok(())); - let im: Service = Service::mock(Box::new(interrupt_manager)); + let mut interrupt_manager = MockInterruptManager::new(); + interrupt_manager + .expect_register_exception_handler() + .with(eq(ExceptionType::from(0_usize)), always()) + .returning(|_, _| Ok(())); + let im: Service = Service::mock(Box::new(interrupt_manager)); - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); - let status = register_interrupt_handler(&protocol.protocol, 0, mock_interrupt_handler); - assert_eq!(status, efi::Status::SUCCESS); + let status = register_interrupt_handler(&protocol.protocol, 0, mock_interrupt_handler); + assert_eq!(status, efi::Status::SUCCESS); + }); } #[test] fn test_get_timer_value() { - let mut cpu_init = MockEfiCpuInit::new(); - cpu_init.expect_get_timer_value().with(eq(0)).returning(|_| Ok((0, 0))); - let cpu: Service = Service::mock(Box::new(cpu_init)); + with_locked_state(|| { + let mut cpu_init = MockEfiCpuInit::new(); + cpu_init.expect_get_timer_value().with(eq(0)).returning(|_| Ok((0, 0))); + let cpu: Service = Service::mock(Box::new(cpu_init)); + + let im: Service = Service::mock(Box::new(MockInterruptManager::new())); + + let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + + let mut timer_value: u64 = 0; + let mut timer_period: u64 = 0; + let status = + get_timer_value(&protocol.protocol, 0, &mut timer_value as *mut _, &mut timer_period as *mut _); + assert_eq!(status, efi::Status::SUCCESS); + }); + } + + // Tests for DxeCpu delegation + #[test] + fn test_dxe_cpu_flush_data_cache_delegates() { + with_locked_state(|| { + let dxe_cpu = DxeCpu(EfiCpu::default()); + let result = dxe_cpu.flush_data_cache(0x1000, 0x100, CpuFlushType::EfiCpuFlushTypeWriteBackInvalidate); + assert!(result.is_ok()); + }); + } - let im: Service = Service::mock(Box::new(MockInterruptManager::new())); + #[test] + fn test_dxe_cpu_init_delegates() { + with_locked_state(|| { + let dxe_cpu = DxeCpu(EfiCpu::default()); + let result = dxe_cpu.init(CpuInitType::EfiCpuInit); + assert!(result.is_ok()); + }); + } + + #[test] + fn test_dxe_cpu_get_timer_value_delegates() { + with_locked_state(|| { + let dxe_cpu = DxeCpu(EfiCpu::default()); + let result = dxe_cpu.get_timer_value(0); + assert_eq!(result.unwrap(), (0, 0)); + }); + } - let protocol = EfiCpuArchProtocolImpl::new(cpu, im); + // Tests for DxeInterruptManager delegation + #[test] + fn test_dxe_interrupt_manager_register_then_unregister_delegates() { + with_locked_state(|| { + let dxe_interrupt_manager = DxeInterruptManager(Interrupts::default()); + + // Register first + let result = dxe_interrupt_manager.register_exception_handler( + ExceptionType::from(0_usize), + HandlerType::UefiRoutine(mock_interrupt_handler), + ); + assert!(result.is_ok()); + + // Then unregister + let result = dxe_interrupt_manager.unregister_exception_handler(ExceptionType::from(0_usize)); + assert!(result.is_ok()); + }); + } - let mut timer_value: u64 = 0; - let mut timer_period: u64 = 0; - let status = get_timer_value(&protocol.protocol, 0, &mut timer_value as *mut _, &mut timer_period as *mut _); - assert_eq!(status, efi::Status::SUCCESS); + #[test] + fn test_dxe_interrupt_manager_unregister_then_register_delegates() { + with_locked_state(|| { + let dxe_interrupt_manager = DxeInterruptManager(Interrupts::default()); + let result = dxe_interrupt_manager.unregister_exception_handler(ExceptionType::from(0_usize)); + // Expecting an error because there is no handler registered yet, but the method should still be callable. + assert!(result.is_err()); + + let result = dxe_interrupt_manager.register_exception_handler( + ExceptionType::from(0_usize), + HandlerType::UefiRoutine(mock_interrupt_handler), + ); + assert!(result.is_ok()); + + // Now the unregister should succeed + let result = dxe_interrupt_manager.unregister_exception_handler(ExceptionType::from(0_usize)); + assert!(result.is_ok()); + }); } } diff --git a/patina_dxe_core/src/cpu/hw_interrupt_protocol.rs b/patina_dxe_core/src/cpu/hw_interrupt_protocol.rs index 921e49734..e5068e565 100644 --- a/patina_dxe_core/src/cpu/hw_interrupt_protocol.rs +++ b/patina_dxe_core/src/cpu/hw_interrupt_protocol.rs @@ -12,6 +12,7 @@ use arm_gic::{ gicv3::{GicCpuInterface, InterruptGroup}, }; use patina::{ + BinaryGuid, boot_services::{BootServices, StandardBootServices}, component::{component, service::Service}, guids::{HARDWARE_INTERRUPT_PROTOCOL, HARDWARE_INTERRUPT_PROTOCOL_V2}, @@ -166,7 +167,7 @@ impl<'a> EfiHardwareInterruptProtocol<'a> { } unsafe impl ProtocolInterface for EfiHardwareInterruptProtocol<'_> { - const PROTOCOL_GUID: efi::Guid = HARDWARE_INTERRUPT_PROTOCOL; + const PROTOCOL_GUID: BinaryGuid = HARDWARE_INTERRUPT_PROTOCOL; } type HardwareInterruptRegisterV2 = @@ -354,7 +355,7 @@ impl<'a> EfiHardwareInterruptV2Protocol<'a> { } unsafe impl ProtocolInterface for EfiHardwareInterruptV2Protocol<'_> { - const PROTOCOL_GUID: efi::Guid = HARDWARE_INTERRUPT_PROTOCOL_V2; + const PROTOCOL_GUID: BinaryGuid = HARDWARE_INTERRUPT_PROTOCOL_V2; } impl From for HardwareInterrupt2TriggerType { diff --git a/patina_dxe_core/src/cpu/perf_timer.rs b/patina_dxe_core/src/cpu/perf_timer.rs index 4e7353e35..a94e5186d 100644 --- a/patina_dxe_core/src/cpu/perf_timer.rs +++ b/patina_dxe_core/src/cpu/perf_timer.rs @@ -9,7 +9,8 @@ use core::sync::atomic::{AtomicU64, Ordering}; -use patina::component::service::{IntoService, perf_timer::ArchTimerFunctionality}; +use patina::component::service::IntoService; +use patina::timer::{self as perf_timer, ArchTimerFunctionality}; /// Performance timer implementation. #[derive(IntoService)] @@ -22,7 +23,7 @@ impl ArchTimerFunctionality for PerfTimer { /// Value of the counter (ticks). #[coverage(off)] fn cpu_count(&self) -> u64 { - arch_cpu_count() + perf_timer::arch_cpu_count() } /// Frequency of `cpu_count` increments (in Hz). @@ -30,7 +31,7 @@ impl ArchTimerFunctionality for PerfTimer { /// Otherwise, an architecture-specific method is attempted to determine the frequency. fn perf_frequency(&self) -> u64 { if self.frequency.load(Ordering::Relaxed) == 0 { - self.frequency.store(arch_perf_frequency(), Ordering::Relaxed); + self.frequency.store(perf_timer::arch_perf_frequency(), Ordering::Relaxed); } self.frequency.load(Ordering::Relaxed) } @@ -54,65 +55,6 @@ impl Default for PerfTimer { } } -/// Returns the current CPU count using architecture-specific methods. -/// -/// Skip coverage as any value could be valid, including 0. -#[coverage(off)] -fn arch_cpu_count() -> u64 { - #[cfg(target_arch = "x86_64")] - { - use core::arch::x86_64; - unsafe { x86_64::_rdtsc() } - } - #[cfg(target_arch = "aarch64")] - { - use aarch64_cpu::registers::{self, Readable}; - registers::CNTPCT_EL0.get() - } -} - -/// Returns the performance frequency using architecture-specific methods. -/// In general, the performance frequency is a configurable value that may be -/// provided by the platform. This function is a fallback when no -/// platform-specific configuration is provided. -/// -/// Skip coverage as any value could be valid, including 0. -#[coverage(off)] -pub(crate) fn arch_perf_frequency() -> u64 { - // Try to get TSC frequency from CPUID (most Intel and AMD platforms). - #[cfg(target_arch = "x86_64")] - { - use core::arch::{x86_64, x86_64::CpuidResult}; - - let CpuidResult { eax, ebx, ecx, .. } = x86_64::__cpuid(0x15); - if eax != 0 && ebx != 0 && ecx != 0 { - // CPUID 0x15 gives TSC_frequency = (ECX * EAX) / EBX. - // Most modern x86 platforms support this leaf. - return (ecx as u64 * ebx as u64) / eax as u64; - } - - // CPUID 0x16 gives base frequency in MHz in EAX. - // This is supported on some older x86 platforms. - // This is a nominal frequency and is less accurate for reflecting actual operating conditions. - let CpuidResult { eax, .. } = x86_64::__cpuid(0x16); - if eax != 0 { - return (eax * 1_000_000) as u64; - } - - 0 - } - - // Use CNTFRQ_EL0 for aarch64 platforms. - #[cfg(target_arch = "aarch64")] - { - use patina::read_sysreg; - read_sysreg!(CNTFRQ_EL0) - } - - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] - 0 -} - #[cfg(test)] #[coverage(off)] mod tests { @@ -128,12 +70,12 @@ mod tests { #[test] fn test_zero_frequency_forces_arch_perf_frequency() { let timer = PerfTimer::default(); - assert_eq!(timer.perf_frequency(), arch_perf_frequency()); + assert_eq!(timer.perf_frequency(), perf_timer::arch_perf_frequency()); let timer = PerfTimer::new(); - assert_eq!(timer.perf_frequency(), arch_perf_frequency()); + assert_eq!(timer.perf_frequency(), perf_timer::arch_perf_frequency()); let timer = PerfTimer::with_frequency(0); - assert_eq!(timer.perf_frequency(), arch_perf_frequency()); + assert_eq!(timer.perf_frequency(), perf_timer::arch_perf_frequency()); } } diff --git a/patina_dxe_core/src/driver_services.rs b/patina_dxe_core/src/driver_services.rs index 7190c33a8..f156cf58b 100644 --- a/patina_dxe_core/src/driver_services.rs +++ b/patina_dxe_core/src/driver_services.rs @@ -46,8 +46,9 @@ fn get_platform_driver_override_bindings( .locate_protocol(efi::protocols::platform_driver_override::PROTOCOL_GUID) { Err(_) => return Vec::new(), + // SAFETY: Checks locate_protocol return value to determine if pointer is valid. as_mut() is used for mutable + // access which will also check if the pointer is null before allowing access. Ok(protocol) => unsafe { - // SAFETY: locate_protocol guarantees that if `Ok` is returned, a valid pointer is encapsulated in it. (protocol as *mut efi::protocols::platform_driver_override::Protocol).as_mut().expect("bad protocol ptr") }, }; @@ -154,7 +155,9 @@ fn authenticate_connect( PROTOCOL_DB.get_interface_for_handle(controller_handle, efi::protocols::device_path::PROTOCOL_GUID) { let device_path = device_path as *mut efi::protocols::device_path::Protocol; - if let Ok(security2_ptr) = PROTOCOL_DB.locate_protocol(patina::pi::protocols::security2::PROTOCOL_GUID) { + if let Ok(security2_ptr) = + PROTOCOL_DB.locate_protocol(patina::pi::protocols::security2::PROTOCOL_GUID.into_inner()) + { let file_path = { if !recursive { if let Some(remaining_path) = remaining_device_path { @@ -289,9 +292,11 @@ fn core_connect_single_controller( return Ok(()); } - // SAFETY: caller must ensure that the pointer contained in remaining_device_path is valid if it is Some(_). if let Some(device_path) = remaining_device_path - && unsafe { (device_path.read_unaligned()).r#type == efi::protocols::device_path::TYPE_END } + && { + // SAFETY: caller must ensure that the pointer contained in remaining_device_path is valid if it is Some(_). + unsafe { (device_path.read_unaligned()).r#type == efi::protocols::device_path::TYPE_END } + } { return Ok(()); } @@ -908,7 +913,11 @@ mod tests { // Install the security2 protocol in the protocol database let (_, _) = PROTOCOL_DB - .install_protocol_interface(None, patina::pi::protocols::security2::PROTOCOL_GUID, security2_ptr) + .install_protocol_interface( + None, + patina::pi::protocols::security2::PROTOCOL_GUID.into_inner(), + security2_ptr, + ) .unwrap(); // Create a proper END device path that should be safe to process diff --git a/patina_dxe_core/src/dxe_dispatch_service.rs b/patina_dxe_core/src/dxe_dispatch_service.rs new file mode 100644 index 000000000..95dfbc2d5 --- /dev/null +++ b/patina_dxe_core/src/dxe_dispatch_service.rs @@ -0,0 +1,38 @@ +//! DXE Core Dispatch Service +//! +//! Provides the [`CoreDxeDispatch`] service implementation, which exposes +//! the PI dispatcher to components via dependency injection. This allows +//! components to trigger DXE driver dispatch passes (e.g., to interleave +//! controller connection with driver dispatch during boot). +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +use patina::{ + component::service::{IntoService, dxe_dispatch::DxeDispatch}, + error::Result, +}; + +use crate::{Core, PlatformInfo}; + +/// DXE dispatch service backed by the PI dispatcher. +#[derive(IntoService)] +#[service(dyn DxeDispatch)] +pub(crate) struct CoreDxeDispatch(&'static Core

); + +#[coverage(off)] +impl CoreDxeDispatch

{ + pub(crate) fn new(core: &'static Core

) -> Self { + Self(core) + } +} + +#[coverage(off)] +impl DxeDispatch for CoreDxeDispatch

{ + fn dispatch(&self) -> Result { + self.0.pi_dispatcher.dispatch() + } +} diff --git a/patina_dxe_core/src/dxe_services.rs b/patina_dxe_core/src/dxe_services.rs index d6c98aaf4..8cad89644 100644 --- a/patina_dxe_core/src/dxe_services.rs +++ b/patina_dxe_core/src/dxe_services.rs @@ -26,6 +26,8 @@ extern "efiapi" fn add_memory_space( length: u64, capabilities: u64, ) -> efi::Status { + // SAFETY: The DXE Services contract requires the caller to provide a valid, non-overlapping + // memory region within the processor address space. let result = unsafe { GCD.add_memory_space(gcd_memory_type, base_address as usize, length as usize, capabilities) }; match result { @@ -393,6 +395,7 @@ impl Core

{ set_memory_space_capabilities, }; let dxe_services_system_table_ptr = &dxe_services_system_table as *const dxe_services::DxeServicesTable; + // SAFETY: dxe_services_system_table is a local value, its byte representation is valid for hashing. let crc32 = unsafe { crc32fast::hash(from_raw_parts( dxe_services_system_table_ptr as *const u8, @@ -404,7 +407,7 @@ impl Core

{ let dxe_services_system_table = Box::new(dxe_services_system_table); let _ = config_tables::core_install_configuration_table( - dxe_services::DXE_SERVICES_TABLE_GUID, + dxe_services::DXE_SERVICES_TABLE_GUID.into_inner(), Box::into_raw(dxe_services_system_table) as *mut c_void, system_table, ); @@ -491,6 +494,7 @@ mod tests { fn with_locked_state(f: F) { test_support::with_global_lock(|| { test_support::init_test_logger(); + // SAFETY: Test-only initialization under the global lock. unsafe { crate::test_support::init_test_gcd(None); crate::test_support::init_test_protocol_db(); @@ -1175,6 +1179,7 @@ mod tests { let result = get_memory_space_descriptor(base, descriptor.as_mut_ptr()); assert_eq!(result, efi::Status::SUCCESS, "Should get memory space descriptor"); + // SAFETY: get_memory_space_descriptor initialized descriptor on SUCCESS. let descriptor = unsafe { descriptor.assume_init() }; assert_eq!(descriptor.base_address, base, "Base address should match"); assert_eq!(descriptor.length, length, "Length should match"); @@ -1215,6 +1220,7 @@ mod tests { assert_eq!(result, efi::Status::SUCCESS, "Should get descriptor for address within range"); + // SAFETY: get_memory_space_descriptor initialized descriptor on SUCCESS. let descriptor = unsafe { descriptor.assume_init() }; assert_eq!(descriptor.base_address, base, "Base address should be the region base"); assert_eq!(descriptor.length, length, "Length should match the region length"); @@ -1245,6 +1251,7 @@ mod tests { assert_eq!(result, efi::Status::SUCCESS, "Should get descriptor for type {expected_type:?}"); + // SAFETY: get_memory_space_descriptor initialized descriptor on SUCCESS. let descriptor = unsafe { descriptor.assume_init() }; assert_eq!(descriptor.memory_type, *expected_type, "Memory type should match for {expected_type:?}"); assert_eq!(descriptor.base_address, *base, "Base address should match for type {expected_type:?}"); @@ -1284,6 +1291,7 @@ mod tests { "Should get descriptor for capabilities 0x{expected_capabilities:x}", ); + // SAFETY: get_memory_space_descriptor initialized descriptor on SUCCESS. let descriptor = unsafe { descriptor.assume_init() }; // Check that our requested capabilities are present (GCD may add additional flags) assert!( @@ -1347,6 +1355,7 @@ mod tests { // Read back and verify bits are set let mut d = core::mem::MaybeUninit::::uninit(); assert_eq!(get_memory_space_descriptor(base, d.as_mut_ptr()), efi::Status::SUCCESS); + // SAFETY: get_memory_space_descriptor initialized d on SUCCESS. let d = unsafe { d.assume_init() }; assert_eq!(d.base_address, base); assert_eq!(d.length, length); @@ -1383,12 +1392,14 @@ mod tests { // The first page should have RO set let mut d0 = core::mem::MaybeUninit::::uninit(); assert_eq!(get_memory_space_descriptor(base, d0.as_mut_ptr()), efi::Status::SUCCESS); + // SAFETY: get_memory_space_descriptor initialized d0 on SUCCESS. let d0 = unsafe { d0.assume_init() }; assert!(d0.attributes & efi::MEMORY_RO != 0); // A later page should not necessarily have RO (split expected). We only assert that RO is not set there. let mut d1 = core::mem::MaybeUninit::::uninit(); assert_eq!(get_memory_space_descriptor(base + 0x3000, d1.as_mut_ptr()), efi::Status::SUCCESS); + // SAFETY: get_memory_space_descriptor initialized d1 on SUCCESS. let d1 = unsafe { d1.assume_init() }; assert!(d1.attributes & efi::MEMORY_RO == 0, "RO should not be set on untouched pages"); }); @@ -1397,6 +1408,7 @@ mod tests { #[test] fn test_set_memory_space_attributes_not_ready() { with_locked_state(|| { + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let s = set_memory_space_attributes(0x2421000, 0x1000, efi::MEMORY_WB); assert_eq!(s, efi::Status::NOT_READY); @@ -1424,6 +1436,7 @@ mod tests { #[test] fn test_get_memory_space_map_not_ready() { with_locked_state(|| { + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let mut out_count: usize = 0; @@ -1437,6 +1450,7 @@ mod tests { #[test] fn test_get_memory_space_map_success_and_contents() { with_locked_state(|| { + // SAFETY: Test allocator reset is safe under the test lock. unsafe { crate::test_support::reset_allocators(); } @@ -1454,6 +1468,7 @@ mod tests { assert_eq!(out_count, expected.len()); assert!(!out_ptr.is_null()); + // SAFETY: out_ptr/out_count come from get_memory_space_map and are valid for reads. let out_slice = unsafe { core::slice::from_raw_parts(out_ptr, out_count) }; assert_eq!(out_slice, expected.as_slice()); @@ -1464,6 +1479,7 @@ mod tests { #[test] fn test_get_memory_space_map_with_additional_regions() { with_locked_state(|| { + // SAFETY: Test allocator reset is safe under the test lock. unsafe { crate::test_support::reset_allocators(); } @@ -1488,6 +1504,7 @@ mod tests { assert_eq!(out_count, expected.len()); // Verify first and last few entries match (order should be the same as GCD enumeration) + // SAFETY: out_ptr/out_count come from get_memory_space_map and are valid for reads. let out_slice = unsafe { core::slice::from_raw_parts(out_ptr, out_count) }; assert_eq!(out_slice, expected.as_slice()); @@ -1513,6 +1530,7 @@ mod tests { #[test] fn test_get_io_space_map_not_ready() { with_locked_state(|| { + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let mut out_count: usize = 0; @@ -1526,6 +1544,7 @@ mod tests { #[test] fn test_get_io_space_map_success_and_contents() { with_locked_state(|| { + // SAFETY: Test allocator reset is safe under the test lock. unsafe { crate::test_support::reset_allocators(); } @@ -1542,6 +1561,7 @@ mod tests { assert_eq!(out_count, expected.len()); assert!(!out_ptr.is_null()); + // SAFETY: out_ptr/out_count are returned by get_io_space_map and are valid for that length. let out_slice = unsafe { core::slice::from_raw_parts(out_ptr, out_count) }; assert_eq!(out_slice, expected.as_slice()); @@ -1552,6 +1572,7 @@ mod tests { #[test] fn test_get_io_space_map_with_additional_regions() { with_locked_state(|| { + // SAFETY: Test allocator reset is safe under the test lock. unsafe { crate::test_support::reset_allocators(); } @@ -1571,6 +1592,7 @@ mod tests { assert_eq!(s, efi::Status::SUCCESS); assert_eq!(out_count, expected.len()); + // SAFETY: out_ptr/out_count are returned by get_memory_space_map and are valid for that length. let out_slice = unsafe { core::slice::from_raw_parts(out_ptr, out_count) }; assert_eq!(out_slice, expected.as_slice()); @@ -1597,6 +1619,7 @@ mod tests { // Verify capabilities include the requested bits let mut d = core::mem::MaybeUninit::::uninit(); assert_eq!(get_memory_space_descriptor(base, d.as_mut_ptr()), efi::Status::SUCCESS); + // SAFETY: get_memory_space_descriptor initialized d on SUCCESS. let d = unsafe { d.assume_init() }; assert_eq!(d.base_address, base); assert!(d.capabilities & caps == caps, "Expected caps 0x{:x} to be set in 0x{:x}", caps, d.capabilities); @@ -1615,6 +1638,7 @@ mod tests { fn test_set_memory_space_capabilities_not_ready() { with_locked_state(|| { // Force GCD to an uninitialized state + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let s = set_memory_space_capabilities(0x200000, 0x1000, efi::MEMORY_WB); assert_eq!(s, efi::Status::NOT_READY, "Expected NOT_READY when GCD is reset"); @@ -1670,6 +1694,7 @@ mod tests { let mut desc = core::mem::MaybeUninit::::uninit(); let s = get_io_space_descriptor(base, desc.as_mut_ptr()); assert_eq!(s, efi::Status::SUCCESS); + // SAFETY: get_io_space_descriptor initialized desc on SUCCESS. let desc = unsafe { desc.assume_init() }; assert_eq!(desc.base_address, base); assert_eq!(desc.length, len); @@ -1687,6 +1712,7 @@ mod tests { let mut desc = core::mem::MaybeUninit::::uninit(); assert_eq!(get_io_space_descriptor(base, desc.as_mut_ptr()), efi::Status::SUCCESS); + // SAFETY: get_io_space_descriptor initialized desc on SUCCESS. let desc = unsafe { desc.assume_init() }; assert_eq!(desc.base_address, base); assert_eq!(desc.length, len); @@ -1716,6 +1742,7 @@ mod tests { fn test_add_io_space_not_ready() { with_locked_state(|| { // Force GCD to uninitialized state for IO + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let s = add_io_space(GcdIoType::Io, 0x1000, 0x10); assert_eq!(s, efi::Status::NOT_READY); @@ -1753,6 +1780,7 @@ mod tests { #[test] fn test_allocate_io_space_not_ready() { with_locked_state(|| { + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let mut out: efi::PhysicalAddress = 0; let s = allocate_io_space( @@ -1941,6 +1969,7 @@ mod tests { #[test] fn test_free_io_space_not_ready() { with_locked_state(|| { + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; let s = free_io_space(0x1000, 0x10); assert_eq!(s, efi::Status::NOT_READY); @@ -2037,6 +2066,7 @@ mod tests { #[test] fn test_remove_io_space_not_ready() { with_locked_state(|| { + // SAFETY: Resetting the global GCD is safe under the test lock. unsafe { GCD.reset() }; assert_eq!(remove_io_space(0x1000, 0x10), efi::Status::NOT_READY); }); @@ -2103,6 +2133,7 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); // Install the FV to obtain a real handle + // SAFETY: FV buffer is owned by the test and passed as a valid pointer to the dispatcher. unsafe { CORE.pi_dispatcher.install_firmware_volume(fv.as_ptr() as u64, None).unwrap() }; // Wrapper should still surface NOT_FOUND (no pending drivers to dispatch in tests) @@ -2128,7 +2159,7 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); // Any GUID is fine; there are no pending drivers in this test harness - let guid = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0, 0, 0, 0, 0, 0]); + let guid: efi::Guid = patina::guids::ZERO.into(); let s = MockCore::schedule_efiapi(core::ptr::null_mut(), &guid); assert_eq!(s, efi::Status::NOT_FOUND); }); @@ -2148,6 +2179,7 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); // Install the FV to obtain a real handle + // SAFETY: FV buffer is owned by the test and passed as a valid pointer to the dispatcher. let handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv.as_ptr() as u64, None).unwrap() }; // Use the same GUID as the dispatcher tests; wrapper should map NotFound correctly @@ -2261,6 +2293,7 @@ mod tests { assert_eq!(st_raw.number_of_table_entries, 1); assert!(!st_raw.configuration_table.is_null()); + // SAFETY: configuration_table points to `number_of_table_entries` elements set by init_system_table. let entries = unsafe { core::slice::from_raw_parts(st_raw.configuration_table, st_raw.number_of_table_entries) }; @@ -2271,6 +2304,7 @@ mod tests { assert!(!entry.vendor_table.is_null(), "DXE Services vendor_table pointer should be non-null"); // Validate the contents of the installed DXE Services table + // SAFETY: vendor_table points to a DXE Services table installed by CORE.install_dxe_services_table. let dxe_tbl = unsafe { &*(entry.vendor_table as *const dxe_services::DxeServicesTable) }; // Header signature/revision should match what install_dxe_services_table sets @@ -2278,8 +2312,10 @@ mod tests { assert_eq!(dxe_tbl.header.revision, efi::BOOT_SERVICES_REVISION); // Recompute CRC32 by zeroing the field in a local copy + // SAFETY: dxe_tbl is a valid reference with signature and revision checked above. let mut copy = unsafe { core::ptr::read(dxe_tbl) }; copy.header.crc32 = 0; + // SAFETY: copy is a local value. Creating a slice from its pointer and size is valid. let crc = crc32fast::hash(unsafe { core::slice::from_raw_parts( (© as *const dxe_services::DxeServicesTable) as *const u8, diff --git a/patina_dxe_core/src/event_db.rs b/patina_dxe_core/src/event_db.rs index 3e063656b..e324df0e7 100644 --- a/patina_dxe_core/src/event_db.rs +++ b/patina_dxe_core/src/event_db.rs @@ -215,9 +215,11 @@ struct Event { period: Option, } -// SAFETY: This structure is used within a lock on a single core and is not mutated -// after creation. It is safe to share references to it. +// SAFETY: Access and mutation of Event instances is serialized by the event DB lock, +// so shared references are not concurrently accessed without synchronization. unsafe impl Sync for crate::event_db::Event {} +// SAFETY: Access and mutation of Event instances is serialized by the event DB lock, +// so moving between threads does not introduce data races. unsafe impl Send for crate::event_db::Event {} impl fmt::Debug for Event { @@ -808,7 +810,9 @@ impl SpinLockedEventDb { } } +// SAFETY: SpinLockedEventDb protects internal state with a lock. unsafe impl Send for SpinLockedEventDb {} +// SAFETY: SpinLockedEventDb protects internal state with a lock. unsafe impl Sync for SpinLockedEventDb {} #[cfg(test)] diff --git a/patina_dxe_core/src/events.rs b/patina_dxe_core/src/events.rs index 66e9deff2..6362cfbaf 100644 --- a/patina_dxe_core/src/events.rs +++ b/patina_dxe_core/src/events.rs @@ -135,6 +135,7 @@ extern "efiapi" fn wait_for_event( status => { // SAFETY: caller must ensure that out_index is a valid pointer if it is not null. if !out_index.is_null() { + // SAFETY: out_index is non-null and points to writable memory. unsafe { out_index.write_unaligned(index); }; @@ -301,9 +302,10 @@ extern "efiapi" fn timer_tick(time: u64) { } extern "efiapi" fn timer_available_callback(event: efi::Event, _context: *mut c_void) { - match PROTOCOL_DB.locate_protocol(timer::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(timer::PROTOCOL_GUID.into_inner()) { Ok(timer_arch_ptr) => { let timer_arch_ptr = timer_arch_ptr as *mut timer::Protocol; + // SAFETY: timer_arch_ptr was successfully returned from locate_protocol. let timer_arch = unsafe { &*(timer_arch_ptr) }; (timer_arch.register_handler)(timer_arch_ptr, timer_tick); if let Err(status_err) = EVENT_DB.close_event(event) { @@ -345,7 +347,7 @@ pub fn init_events_support(st: &mut EfiSystemTable) { .expect("Failed to create timer available callback."); PROTOCOL_DB - .register_protocol_notify(timer::PROTOCOL_GUID, event) + .register_protocol_notify(timer::PROTOCOL_GUID.into_inner(), event) .expect("Failed to register protocol notify on timer arch callback."); } @@ -359,6 +361,7 @@ mod tests { fn with_locked_state(f: F) { test_support::with_global_lock(|| { test_support::init_test_logger(); + // SAFETY: Test-only initialization of global services under the global lock. unsafe { crate::test_support::init_test_gcd(None); crate::test_support::reset_allocators(); @@ -480,8 +483,7 @@ mod tests { fn test_create_event_ex_with_event_group() { with_locked_state(|| { let mut event: efi::Event = ptr::null_mut(); - let event_guid: efi::Guid = - efi::Guid::from_fields(0x87a2e5d9, 0xc34f, 0x4b21, 0x8e, 0x57, &[0x1a, 0xf9, 0x3c, 0x82, 0xd7, 0x6b]); + let event_guid: efi::Guid = patina::BinaryGuid::from_string("87A2E5D9-C34F-4B21-8E57-1AF93C82D76B").into(); let notify_fn: Option = Some(test_notify); let result = create_event_ex( efi::EVT_NOTIFY_SIGNAL, diff --git a/patina_dxe_core/src/filesystems.rs b/patina_dxe_core/src/filesystems.rs index 51d6a4520..bb9d7bc06 100644 --- a/patina_dxe_core/src/filesystems.rs +++ b/patina_dxe_core/src/filesystems.rs @@ -32,12 +32,14 @@ impl SimpleFile<'_> { EfiError::status_to_result(status)?; + // SAFETY: file_ptr is filled by the open call above on success. let file = unsafe { file_ptr.as_mut().ok_or(EfiError::NotFound)? }; Ok(Self { file }) } /// Opens the root of a Simple File System and returns a SimpleFile object for it. pub fn open_volume(handle: efi::Handle) -> Result { + // SAFETY: Protocol database returns a valid interface pointer for the handle. let sfs = unsafe { let sfs_protocol_ptr = PROTOCOL_DB.get_interface_for_handle(handle, efi::protocols::simple_file_system::PROTOCOL_GUID)?; @@ -50,6 +52,7 @@ impl SimpleFile<'_> { let status = (sfs.open_volume)(sfs, core::ptr::addr_of_mut!(file_system_ptr)); EfiError::status_to_result(status)?; + // SAFETY: file_system_ptr is filled by the open_volume call above on success. let root = unsafe { file_system_ptr.as_mut().ok_or(EfiError::NotFound)? }; Ok(Self { file: root }) diff --git a/patina_dxe_core/src/gcd.rs b/patina_dxe_core/src/gcd.rs index f4245a2fe..a6c23dcb5 100644 --- a/patina_dxe_core/src/gcd.rs +++ b/patina_dxe_core/src/gcd.rs @@ -14,6 +14,7 @@ pub use spin_locked_gcd::DescriptorFilter; use goblin::pe::section_table; +use alloc::boxed::Box; use core::{cell::Cell, ffi::c_void, ops::Range}; use patina::{ base::{align_down, align_up}, @@ -23,7 +24,7 @@ use patina::{ hob::{self, Hob, HobList, PhaseHandoffInformationTable}, }, }; -use patina_internal_cpu::paging::create_cpu_paging; +use patina_internal_cpu::paging::{PatinaPageTable, create_cpu_paging}; use r_efi::efi; #[cfg(feature = "compatibility_mode_allowed")] @@ -427,6 +428,7 @@ pub fn init_gcd(physical_hob_list: *const c_void) { let mut free_memory_attributes: u64 = 0; let mut free_memory_capabilities: u64 = 0; + // SAFETY: physical_hob_list is provided by the platform and must point to a valid HOB list. let hob_list = Hob::Handoff(unsafe { (physical_hob_list as *const PhaseHandoffInformationTable) .as_ref::<'static>() @@ -515,7 +517,8 @@ pub fn init_gcd(physical_hob_list: *const c_void) { /// in the SpinLockedGcd struct, which is covered by unit tests. pub fn init_paging(hob_list: &HobList) { let page_allocator = PagingAllocator::new(&GCD); - let page_table = create_cpu_paging(page_allocator).expect("Failed to create CPU page table"); + let page_table: Box = + Box::new(create_cpu_paging(page_allocator).expect("Failed to create CPU page table")); GCD.init_paging_with(hob_list, page_table); } @@ -632,6 +635,7 @@ pub fn add_hob_resource_descriptors_to_gcd(hob_list: &HobList) { .take_while(|r| r.is_some()) .flatten() { + // SAFETY: GCD is initialized and split_range is derived from valid HOB ranges. unsafe { GCD.add_memory_space( gcd_mem_type, @@ -1019,8 +1023,10 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested block count. let mem = unsafe { crate::test_support::get_memory(spin_locked_gcd::MEMORY_BLOCK_SLICE_SIZE * 10) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: address/length come from the test buffer and are valid for initializing GCD memory blocks. unsafe { GCD.init_memory_blocks( patina::pi::dxe_services::GcdMemoryType::SystemMemory, diff --git a/patina_dxe_core/src/gcd/spin_locked_gcd.rs b/patina_dxe_core/src/gcd/spin_locked_gcd.rs index 09a70fe72..98de2b873 100644 --- a/patina_dxe_core/src/gcd/spin_locked_gcd.rs +++ b/patina_dxe_core/src/gcd/spin_locked_gcd.rs @@ -400,10 +400,13 @@ impl GCD { ..Default::default() }); - self.memory_blocks - .expand(unsafe { slice::from_raw_parts_mut::<'static>(base_address as *mut u8, MEMORY_BLOCK_SLICE_SIZE) }); + self.memory_blocks.expand( + // SAFETY: base_address/size refer to a reserved backing allocation for memory blocks. + unsafe { slice::from_raw_parts_mut::<'static>(base_address as *mut u8, MEMORY_BLOCK_SLICE_SIZE) }, + ); self.memory_blocks.add(unallocated_memory_space).map_err(|_| EfiError::OutOfResources)?; + // SAFETY: add_memory_space is called during initialization with validated parameters. let idx = unsafe { self.add_memory_space(memory_type, base_address, len, capabilities) }?; // Initialize attributes on the first block to WB + XP @@ -1073,7 +1076,7 @@ impl GCD { log::trace!(target: "allocations", "[{}] Block Index: {:#x}", function!(), idx); log::trace!(target: "allocations", "[{}] Transition:\n {:#?}", function!(), transition); - // split_state_transition does not update the key, so this is safe. + // SAFETY: split_state_transition does not update the key for this block. let new_idx = unsafe { match memory_blocks.get_with_idx_mut(idx).expect("idx valid above").split_state_transition( base_address, @@ -1107,7 +1110,7 @@ impl GCD { Ok(idx) => idx, Err(e) => { log::error!("[{}] Memory block split failed! -> Error: {:#?}", function!(), e); - // Restore the memory block to its previous state. The base_address (key) is not updated with the split, so this is safe. + // SAFETY: restoring the prior block state does not change the base_address key. unsafe { *memory_blocks.get_with_idx_mut(idx).expect("idx valid above") = mb_before_split; } @@ -1119,7 +1122,7 @@ impl GCD { if let Some(next_idx) = memory_blocks.next_idx(idx) { let mut next = *memory_blocks.get_with_idx(next_idx).expect("idx valid from insert"); - // base_address (they key) is not updated with the merge, so this is safe. + // SAFETY: merge does not update the base_address key for this block. unsafe { if memory_blocks.get_with_idx_mut(idx).expect("idx valid from insert").merge(&mut next) { memory_blocks.delete_with_idx(next_idx).expect("Index already verified."); @@ -1131,7 +1134,7 @@ impl GCD { if let Some(prev_idx) = memory_blocks.prev_idx(idx) { let mut block = *memory_blocks.get_with_idx(idx).expect("idx valid from insert"); - // base_address (they key) is not updated with the merge, so this is safe. + // SAFETY: merge does not update the base_address key for this block. unsafe { if memory_blocks.get_with_idx_mut(prev_idx).expect("idx valid from insert").merge(&mut block) { memory_blocks.delete_with_idx(idx).expect("Index already verified."); @@ -1171,16 +1174,24 @@ impl GCD { let prev = &mut descriptors[write_idx - 1]; if prev.r#type == current.r#type && prev.attribute == current.attribute - && prev.physical_start + (prev.number_of_pages * UEFI_PAGE_SIZE as u64) == current.physical_start + && prev.physical_start + uefi_pages_to_size!(prev.number_of_pages as usize) as u64 + == current.physical_start { // Free memory shouldn't even need to be merged because it should already be consistent and coalesced. // If this fails to be true it can cause odd behavior if applications try to allocate blocks of free // memory by address, which is a common pattern for OS loaders. - debug_assert!( - prev.r#type != efi::CONVENTIONAL_MEMORY, - "Free memory is fragmented in memory descriptors!" - ); - + if prev.r#type == efi::CONVENTIONAL_MEMORY { + log::error!( + "Free memory is fragmented in memory descriptors! prev: {:#x}-{:#x} (attr: {:#x}), current: {:#x}-{:#x} (attr: {:#x})", + prev.physical_start, + prev.physical_start + uefi_pages_to_size!(prev.number_of_pages as usize) as u64, + prev.attribute, + current.physical_start, + current.physical_start + uefi_pages_to_size!(current.number_of_pages as usize) as u64, + current.attribute, + ); + debug_assert!(false); + } // Merge by extending the previous descriptor prev.number_of_pages += current.number_of_pages; continue; @@ -1407,11 +1418,14 @@ impl IoGCD { fn init_io_blocks(&mut self) -> Result<(), EfiError> { ensure!(self.maximum_address != 0, EfiError::NotReady); - self.io_blocks.expand(unsafe { - Box::into_raw(vec![0_u8; IO_BLOCK_SLICE_SIZE].into_boxed_slice()) - .as_mut() - .expect("RBT given null pointer in initialization.") - }); + self.io_blocks.expand( + // SAFETY: the boxed slice is leaked to back the tree storage for its lifetime. + unsafe { + Box::into_raw(vec![0_u8; IO_BLOCK_SLICE_SIZE].into_boxed_slice()) + .as_mut() + .expect("RBT given null pointer in initialization.") + }, + ); self.io_blocks .add(IoBlock::Unallocated(dxe_services::IoSpaceDescriptor { @@ -1818,7 +1832,7 @@ impl IoGCD { log::trace!(target: "allocations", "[{}] Block Index: {:#x}", function!(), idx); log::trace!(target: "allocations", "[{}] Transition: {:?}\n", function!(), transition); - // split_state_transition does not update the key, so this is safe. + // SAFETY: split_state_transition does not update the key for this block. let new_idx = unsafe { match io_blocks.get_with_idx_mut(idx).expect("idx valid above").split_state_transition( base_address, @@ -1850,7 +1864,7 @@ impl IoGCD { Ok(idx) => idx, Err(e) => { log::error!("[{}] IO block split failed! -> Error: {:#?}", function!(), e); - // Restore the memory block to its previous state. The base_address (key) is not updated with the split, so this is safe. + // SAFETY: restoring the prior block state does not change the base_address key. unsafe { *io_blocks.get_with_idx_mut(idx).expect("idx valid above") = ib_before_split; } @@ -1861,7 +1875,7 @@ impl IoGCD { // Lets see if we can merge the block with the next block if let Some(next_idx) = io_blocks.next_idx(idx) { let mut next = *io_blocks.get_with_idx(next_idx).expect("idx valid from insert"); - // base_address (they key) is not updated with the merge, so this is safe. + // SAFETY: merge does not update the base_address key for this block. unsafe { if io_blocks.get_with_idx_mut(idx).expect("idx valid from insert").merge(&mut next) { io_blocks.delete_with_idx(next_idx).expect("Index already verified."); @@ -1872,7 +1886,7 @@ impl IoGCD { // Lets see if we can merge the block with the previous block if let Some(prev_idx) = io_blocks.prev_idx(idx) { let mut block = *io_blocks.get_with_idx(idx).expect("idx valid from insert"); - // base_address (they key) is not updated with the merge, so this is safe. + // SAFETY: merge does not update the base_address key for this block. unsafe { if io_blocks.get_with_idx_mut(prev_idx).expect("idx valid from insert").merge(&mut block) { io_blocks.delete_with_idx(idx).expect("Index already verified."); @@ -2156,7 +2170,7 @@ impl SpinLockedGcd { "Cache attributes for memory region {base_address:#x?} of length {len:#x?} were updated to {new_cache_attributes:#x?} from {old_cache_attrs:#x?}, sending cache attributes changed event", ); - EVENT_DB.signal_group(CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP); + EVENT_DB.signal_group(CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP.into_inner()); } else if unmapped && old_cache_attributes.is_none() { // in this case the region was unmapped and we had no caching attributes set up log::trace!( @@ -2164,7 +2178,7 @@ impl SpinLockedGcd { "Cache attributes for memory region {base_address:#x?} of length {len:#x?} were updated to {new_cache_attributes:#x?} from an unmapped state, sending cache attributes changed event", ); - EVENT_DB.signal_group(CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP); + EVENT_DB.signal_group(CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP.into_inner()); } } @@ -2218,6 +2232,8 @@ impl SpinLockedGcd { io.maximum_address = 0; io.io_blocks = Rbt::new(); self.page_table.lock().take(); + // Reset memory protection policy to default state + self.memory_protection_policy.memory_allocation_default_attributes.set(efi::MEMORY_XP); } /// Adds a page table for testing purposes @@ -2314,6 +2330,7 @@ impl SpinLockedGcd { }) .expect("Did not find MemoryAllocationModule Hob for DxeCore. Use patina::guid::DXE_CORE as FFS GUID."); + // SAFETY: the DXE core HOB points to the loaded image buffer and size. let pe_info = unsafe { UefiPeInfo::parse_mapped(core::slice::from_raw_parts( dxe_core_hob.alloc_descriptor.memory_base_address as *const u8, @@ -2510,6 +2527,7 @@ impl SpinLockedGcd { len: usize, capabilities: u64, ) -> Result { + // SAFETY: caller upholds the contract for add_memory_space. let result = unsafe { self.memory.lock().add_memory_space(memory_type, base_address, len, capabilities) }; if result.is_ok() && let Some(callback) = self.memory_change_callback @@ -2988,7 +3006,9 @@ impl core::fmt::Debug for SpinLockedGcd { } } +// SAFETY: SpinLockedGcd uses internal locks to serialize access to shared state. unsafe impl Sync for SpinLockedGcd {} +// SAFETY: SpinLockedGcd is safe to move between threads because it owns thread-safe synchronization. unsafe impl Send for SpinLockedGcd {} /// Iterator over GCD memory descriptors within a specified range. @@ -3162,6 +3182,7 @@ mod tests { // SAFETY: GCD test operation - address comes from controlled allocation above. assert_eq!( Err(EfiError::NotReady), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, address, MEMORY_BLOCK_SLICE_SIZE, 0) }, @@ -3170,7 +3191,9 @@ mod tests { assert_eq!(0, gcd.memory_descriptor_count()); assert_eq!( + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. Err(EfiError::OutOfResources), + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { gcd.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -3189,24 +3212,32 @@ mod tests { #[test] fn test_add_memory_space_with_all_memory_type() { with_locked_state(|| { + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. let (mut gcd, _) = create_gcd(); - + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(0), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 0, 1, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(3), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1, 1, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(4), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Persistent, 2, 1, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(5), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::MoreReliable, 3, 1, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(6), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Unaccepted, 4, 1, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(7), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::MemoryMappedIo, 5, 1, 0) }); let snapshot = copy_memory_block(&gcd); assert_eq!( Err(EfiError::InvalidParameter), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::NonExistent, 10, 1, 0) }, "Can't manually add NonExistent memory space manually." ); assert!(is_gcd_memory_slice_valid(&gcd)); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(snapshot, copy_memory_block(&gcd)); }); } @@ -3216,12 +3247,14 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); let snapshot = copy_memory_block(&gcd); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Err(EfiError::InvalidParameter), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 0, 0) }); assert_eq!(snapshot, copy_memory_block(&gcd)); }); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. #[test] fn test_add_memory_space_when_memory_block_full() { @@ -3231,7 +3264,9 @@ mod tests { let mut n = 0; while gcd.memory_descriptor_count() < MEMORY_BLOCK_SLICE_LEN { + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr + n, 1, n as u64) } .is_ok() ); @@ -3241,64 +3276,79 @@ mod tests { assert!(is_gcd_memory_slice_valid(&gcd)); let memory_blocks_snapshot = copy_memory_block(&gcd); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. let res = unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr + n, 1, n as u64) }; assert_eq!( Err(EfiError::OutOfResources), res, "Should return out of memory if there is no space in memory blocks." ); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(memory_blocks_snapshot, copy_memory_block(&gcd),); }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. } #[test] + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. fn test_add_memory_space_outside_processor_range() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); let snapshot = copy_memory_block(&gcd); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Err(EfiError::Unsupported), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address + 1, 1, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Err(EfiError::Unsupported), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address, 1, 0) + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Err(EfiError::Unsupported), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 1, 2, 0) }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(snapshot, copy_memory_block(&gcd)); }); } #[test] + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. fn test_add_memory_space_in_range_already_added() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); // Add block to test the boundary on. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1000, 10, 0) }.unwrap(); let snapshot = copy_memory_block(&gcd); assert_eq!( Err(EfiError::AccessDenied), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1002, 5, 0) }, "Can't add inside a range previously added." ); assert_eq!( Err(EfiError::AccessDenied), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 998, 5, 0) }, "Can't add partially inside a range previously added (Start)." ); assert_eq!( Err(EfiError::AccessDenied), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1009, 5, 0) }, "Can't add partially inside a range previously added (End)." ); assert_eq!(snapshot, copy_memory_block(&gcd)); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. }); } @@ -3307,23 +3357,29 @@ mod tests { with_locked_state(|| { let (mut gcd, address) = create_gcd(); // Add unallocated block after allocated one. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, address - 100, 100, 0) }.unwrap(); let snapshot = copy_memory_block(&gcd); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!( Err(EfiError::AccessDenied), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, address, 5, 0) }, + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. "Can't add inside a range previously allocated." ); assert_eq!( Err(EfiError::AccessDenied), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, address - 100, 200, 0) }, "Can't add partially inside a range previously allocated." ); assert_eq!(snapshot, copy_memory_block(&gcd)); }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. } #[test] @@ -3331,13 +3387,17 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(Ok(4), unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1000, 10, 0) }); let block_count = gcd.memory_descriptor_count(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. // Test merging when added after + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. match unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1010, 10, 0) } { Ok(idx) => { let mb = gcd.memory_blocks.get_with_idx(idx).unwrap(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(1000, mb.as_ref().base_address); assert_eq!(20, mb.as_ref().length); assert_eq!(block_count, gcd.memory_descriptor_count()); @@ -3346,10 +3406,12 @@ mod tests { } // Test merging when added before + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. match unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 990, 10, 0) } { Ok(idx) => { let mb = gcd.memory_blocks.get_with_idx(idx).unwrap(); assert_eq!(990, mb.as_ref().base_address); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(30, mb.as_ref().length); assert_eq!(block_count, gcd.memory_descriptor_count()); } @@ -3357,11 +3419,13 @@ mod tests { } assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1020, 10, 0) }.is_ok(), "A different memory type should note result in a merge." ); assert_eq!(block_count + 1, gcd.memory_descriptor_count()); assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 1030, 10, 1) }.is_ok(), "A different capabilities should note result in a merge." ); @@ -3370,11 +3434,13 @@ mod tests { assert!(is_gcd_memory_slice_valid(&gcd)); }); } + // SAFETY: get_memory returns a test-owned buffer of the requested size. #[test] fn test_add_memory_space_state() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. match unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 100, 10, 123) } { Ok(idx) => { let mb = *gcd.memory_blocks.get_with_idx(idx).unwrap(); @@ -3383,6 +3449,7 @@ mod tests { assert_eq!(100, md.base_address); assert_eq!(10, md.length); assert_eq!(efi::MEMORY_RUNTIME | efi::MEMORY_ACCESS_MASK | 123, md.capabilities); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(0, md.image_handle as usize); assert_eq!(0, md.device_handle as usize); } @@ -3397,12 +3464,14 @@ mod tests { #[test] fn test_remove_memory_space_before_memory_blocks_instantiated() { with_locked_state(|| { + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) }; let address = mem.as_ptr() as usize; let mut gcd = GCD::new(48); assert_eq!(Err(EfiError::NotFound), gcd.remove_memory_space(address, MEMORY_BLOCK_SLICE_SIZE)); }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. } #[test] @@ -3411,6 +3480,7 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add memory space to remove in a valid area. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert!(unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 10, 0) }.is_ok()); let snapshot = copy_memory_block(&gcd); @@ -3430,8 +3500,10 @@ mod tests { fn test_remove_memory_space_outside_processor_range() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. // Add memory space to remove in a valid area. assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 10, 10, 0) } @@ -3460,6 +3532,7 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); // Add memory space to remove in a valid area. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert!(unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 100, 10, 0) }.is_ok()); let snapshot = copy_memory_block(&gcd); @@ -3488,11 +3561,13 @@ mod tests { fn test_remove_memory_space_in_range_allocated() { with_locked_state(|| { let (mut gcd, address) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. let snapshot = copy_memory_block(&gcd); // Not found has a priority over the access denied because the check if the range is valid is done earlier. assert_eq!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. Err(EfiError::NotFound), gcd.remove_memory_space(address - 5, 10), "Can't remove memory space partially allocated." @@ -3520,12 +3595,15 @@ mod tests { let addr = address + MEMORY_BLOCK_SLICE_SIZE; assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr, 10, 0_u64) }.is_ok() ); let mut n = 1; while gcd.memory_descriptor_count() < MEMORY_BLOCK_SLICE_LEN { assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr + 10 + n, 1, n as u64) } .is_ok() @@ -3556,10 +3634,12 @@ mod tests { let aligned_address = if aligned_address > aligned_length { aligned_address - aligned_length } else { + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. aligned_address + aligned_length }; assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, aligned_address, aligned_length, 0) } @@ -3590,6 +3670,7 @@ mod tests { with_locked_state(|| { let (mut gcd, address) = create_gcd(); assert!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, address, 123) }.is_ok() ); @@ -3682,6 +3763,7 @@ mod tests { AllocateType::Address(gcd.maximum_address - 100), dxe_services::GcdMemoryType::Reserved, 0, + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. 1000, 1 as _, None @@ -3699,8 +3781,11 @@ mod tests { ), ); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!(snapshot, copy_memory_block(&gcd)); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. } #[test] @@ -3718,6 +3803,7 @@ mod tests { .into_iter() .enumerate() { + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(memory_type, (i + 1) * 10, 10, 0) }.unwrap(); let res = gcd.allocate_memory_space(AllocateType::Address((i + 1) * 10), memory_type, 0, 10, 1 as _, None); @@ -3735,8 +3821,11 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add memory space of len 100 to multiple space. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 100, 0) }.unwrap(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 1000, 100, 0) }.unwrap(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 100, 100, 0) } @@ -3747,6 +3836,7 @@ mod tests { // Try to allocate chunk bigger than 100. for allocate_type in [AllocateType::BottomUp(None), AllocateType::TopDown(None)] { assert_eq!( + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. Err(EfiError::OutOfResources), gcd.allocate_memory_space( allocate_type, @@ -3787,6 +3877,7 @@ mod tests { fn test_allocate_memory_space_alignment() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x1000, 0) }.unwrap(); assert_eq!( @@ -3856,6 +3947,7 @@ mod tests { dxe_services::GcdMemoryType::SystemMemory, 4, 0xe0, + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. 1 as _, None ), @@ -3897,6 +3989,7 @@ mod tests { fn test_allocate_memory_space_block_merging() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x1000, 0) }.unwrap(); for allocate_type in [AllocateType::BottomUp(None), AllocateType::TopDown(None)] { @@ -3937,6 +4030,7 @@ mod tests { None ) .is_ok(), + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. "{allocate_type:?}: A different image handle should not result in a merge." ); assert_eq!(block_count + 2, gcd.memory_descriptor_count()); @@ -3979,6 +4073,7 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x100, 10, 0) }.unwrap(); let snapshot = copy_memory_block(&gcd); @@ -4030,6 +4125,7 @@ mod tests { assert_eq!(snapshot, copy_memory_block(&gcd)); }); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. } #[test] @@ -4067,12 +4163,14 @@ mod tests { assert_eq!(snapshot, copy_memory_block(&gcd)); }); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. #[test] fn test_free_memory_space_outside_processor_range() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, gcd.maximum_address - 100, 100, 0) } @@ -4088,6 +4186,7 @@ mod tests { .unwrap(); let snapshot = copy_memory_block(&gcd); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. assert_eq!( Err(EfiError::Unsupported), @@ -4105,11 +4204,13 @@ mod tests { assert_eq!(snapshot, copy_memory_block(&gcd)); }); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. #[test] fn test_free_memory_space_in_range_not_allocated() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x3000, 0x3000, 0) }.unwrap(); gcd.allocate_memory_space( AllocateType::Address(0x3000), @@ -4124,6 +4225,7 @@ mod tests { assert_eq!(Err(EfiError::AccessDenied), gcd.free_memory_space(0x2000, 0x1000, MemoryStateTransition::Free)); assert_eq!(Err(EfiError::AccessDenied), gcd.free_memory_space(0x4000, 0x1000, MemoryStateTransition::Free)); assert_eq!(Err(EfiError::AccessDenied), gcd.free_memory_space(0, 0x1000, MemoryStateTransition::Free)); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. }); } @@ -4132,6 +4234,7 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000000, UEFI_PAGE_SIZE * 2, 0) } @@ -4149,6 +4252,7 @@ mod tests { let mut n = 1; while gcd.memory_descriptor_count() < MEMORY_BLOCK_SLICE_LEN { let addr = 0x2000000 + (n * UEFI_PAGE_SIZE); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, addr, UEFI_PAGE_SIZE, n as u64) } @@ -4169,6 +4273,7 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x10000, 0) }.unwrap(); gcd.allocate_memory_space( AllocateType::Address(0x1000), @@ -4235,6 +4340,7 @@ mod tests { maximum_address: 0, allocate_memory_space_fn: GCD::allocate_memory_space_internal, free_memory_space_fn: GCD::free_memory_space, + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. prioritize_32_bit_memory: false, }; assert_eq!(Err(EfiError::NotReady), gcd.set_memory_space_attributes(0, 0x50000, 0b1111)); @@ -4259,7 +4365,7 @@ mod tests { // Test that a non-page aligned address with the runtime attribute set returns invalid parameter assert_eq!( Err(EfiError::InvalidParameter), - gcd.set_memory_space_attributes(0xFFFFFFFF, 0x1000, efi::MEMORY_RUNTIME | efi::MEMORY_WB) + gcd.set_memory_space_attributes(0xFFFFFFFF, 0x1000, efi::MEMORY_RUNTIME | efi::MEMORY_WB) // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. ); // Test that a non-page aligned size returns invalid parameter @@ -4281,8 +4387,10 @@ mod tests { #[test] fn test_set_capabilities_and_attributes() { + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. with_locked_state(|| { let (mut gcd, address) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, address - 0x1000, 0) } .unwrap(); @@ -4308,6 +4416,7 @@ mod tests { fn test_set_attributes_panic() { with_locked_state(|| { let (mut gcd, address) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, address, 0) }.unwrap(); gcd.allocate_memory_space( @@ -4329,6 +4438,7 @@ mod tests { fn test_block_split_when_memory_blocks_full() { with_locked_state(|| { let (mut gcd, address) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::SystemMemory, @@ -4657,9 +4767,11 @@ mod tests { } fn create_gcd() -> (GCD, usize) { + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) }; let address = mem.as_ptr() as usize; let mut gcd = GCD::new(48); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { gcd.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -4713,6 +4825,7 @@ mod tests { assert_eq!(GCD.memory.lock().maximum_address, 0); + // SAFETY: The GCD is intentionally uninitialized to validate error handling paths. let add_result = unsafe { GCD.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 100, 0) }; assert_eq!(add_result, Err(EfiError::NotReady)); @@ -4741,9 +4854,11 @@ mod tests { assert_eq!(GCD.memory.lock().maximum_address, 0); + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) }; let address = mem.as_ptr() as usize; GCD.init(48, 16); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -4774,9 +4889,11 @@ mod tests { assert_eq!(GCD.memory.lock().maximum_address, 0); + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) }; let address = mem.as_ptr() as usize; GCD.init(48, 16); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -4788,6 +4905,7 @@ mod tests { .unwrap(); } + // SAFETY: Adds a small test range to trigger the map-change callback. unsafe { GCD.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x1000, efi::MEMORY_WB) .unwrap(); @@ -4811,9 +4929,11 @@ mod tests { assert_eq!(GCD.memory.lock().maximum_address, 0); + // SAFETY: get_memory returns a test-owned buffer sized for the requested range. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 2) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); GCD.init(48, 16); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -4843,7 +4963,9 @@ mod tests { GCD.init(48, 16); let layout = Layout::from_size_align(GCD_SIZE, 0x1000).unwrap(); + // SAFETY: The allocator returns a test buffer aligned to pages for GCD initialization. let base = unsafe { std::alloc::System.alloc(layout) as u64 }; + // SAFETY: base/size come from the test allocation and are valid for initializing memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -4879,6 +5001,7 @@ mod tests { }); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. #[test] fn allocate_top_down_should_allocate_decreasing_addresses() { with_locked_state(|| { @@ -4887,7 +5010,9 @@ mod tests { GCD.init(48, 16); let layout = Layout::from_size_align(GCD_SIZE, 0x1000).unwrap(); + // SAFETY: The allocator returns a test buffer aligned to pages for GCD initialization. let base = unsafe { std::alloc::System.alloc(layout) as u64 }; + // SAFETY: base/size come from the test allocation and are valid for initializing memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -4904,6 +5029,7 @@ mod tests { let allocate_result = GCD.allocate_memory_space( AllocateType::TopDown(None), dxe_services::GcdMemoryType::SystemMemory, + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. 12, 0x1000, 1 as _, @@ -4928,6 +5054,7 @@ mod tests { with_locked_state(|| { let (mut gcd, _) = create_gcd(); // Increase the memory block size so allocation at 0x1000 is possible after skipping page 0 + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 0x2000, efi::MEMORY_WB).unwrap(); } @@ -4939,6 +5066,7 @@ mod tests { 0, 0x1000, 1 as _, + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. None, ); assert_eq!(res.unwrap(), 0x1000, "Should not be able to allocate page 0"); @@ -4955,6 +5083,7 @@ mod tests { assert_eq!(res, Err(EfiError::OutOfResources), "Should not be able to allocate page 0"); // add a new block to ensure block skipping logic works + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x2000, 0x2000, efi::MEMORY_WB) .unwrap(); @@ -4991,6 +5120,7 @@ mod tests { gcd.prioritize_32_bit_memory = true; // Test with a contiguous 8gb without a gap. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 2 * SIZE_4GB, 0) }.unwrap(); // make sure it prioritizes 32 bit addresses. @@ -5038,10 +5168,12 @@ mod tests { static GCD: SpinLockedGcd = SpinLockedGcd::new(None); // Initialize and add some memory + // SAFETY: get_memory returns a valid, owned buffer for the test and the size is bounded by the constant. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) }; let address = mem.as_ptr() as usize; GCD.init(48, 16); + // SAFETY: address/size come from the test allocation and are used to initialize the GCD memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -5084,7 +5216,9 @@ mod tests { GCD.init(48, 16); let layout = Layout::from_size_align(GCD_SIZE, 0x1000).unwrap(); + // SAFETY: The allocator is set up to return an aligned and available test buffer for GCD initialization. let base = unsafe { std::alloc::System.alloc(layout) as u64 }; + // SAFETY: base points to the test allocation and GCD_SIZE defines the initialized range. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -5109,6 +5243,7 @@ mod tests { // allocate another page let page2 = allocator .allocate_page(UEFI_PAGE_SIZE as u64, UEFI_PAGE_SIZE as u64, false) + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. .expect("Should allocate a second page"); assert!(page2 != page, "Allocated pages should be unique"); assert!( @@ -5135,7 +5270,9 @@ mod tests { GCD.init(48, 16); let layout = Layout::from_size_align(GCD_SIZE, 0x1000).unwrap(); + // SAFETY: The allocator is set up to return an aligned and available test buffer for GCD initialization. let base = unsafe { std::alloc::System.alloc(layout) as u64 }; + // SAFETY: base/size correspond to the test allocation and are safe to register with the GCD. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -5149,18 +5286,22 @@ mod tests { let mut allocator = PagingAllocator::new(&GCD); // Exhaust all available pages + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. let mut allocated = Vec::new(); while let Ok(page) = allocator.allocate_page(UEFI_PAGE_SIZE as u64, UEFI_PAGE_SIZE as u64, false) { allocated.push(page); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. } }); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. #[test] fn test_get_memory_descriptors_allocated_filter() { with_locked_state(|| { let (mut gcd, _address) = create_gcd(); + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0, 2 * SIZE_4GB, 0) }.unwrap(); gcd.allocate_memory_space( @@ -5203,12 +5344,15 @@ mod tests { with_locked_state(|| { let (mut gcd, _address) = create_gcd(); // Add MMIO and Reserved blocks + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::MemoryMappedIo, 0x2000, 0x1000, 0).unwrap(); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 0x3000, 0x10000, 0).unwrap(); } + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x14000, 0x6000, 0).unwrap(); } @@ -5232,8 +5376,10 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer used to seed GCD memory blocks. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 100) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: address/length are derived from the test buffer so the ranges are valid for GCD initialization. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -5394,8 +5540,10 @@ mod tests { GCD.init(48, 16); // Set up memory space like other tests + // SAFETY: get_memory returns a test-owned buffer sized for the requested block count. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 2) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: The address/length come from the test allocation and are valid to register with the GCD. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -5445,8 +5593,10 @@ mod tests { GCD.init(48, 16); // Set up memory space + // SAFETY: The GCD is prepared so that get_memory returns a valid, owned buffer for the test. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 2) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: The buffer range is owned by this test and can be registered as system memory. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -5705,6 +5855,7 @@ mod tests { static GCD: SpinLockedGcd = SpinLockedGcd::new(None); GCD.init(48, 16); + // SAFETY: get_memory returns a test-owned buffer used to seed GCD memory blocks. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 3) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -5751,6 +5902,7 @@ mod tests { static GCD: SpinLockedGcd = SpinLockedGcd::new(None); GCD.init(48, 16); + // SAFETY: get_memory returns a test-owned buffer used to seed GCD memory blocks. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 3) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -5792,6 +5944,7 @@ mod tests { static GCD: SpinLockedGcd = SpinLockedGcd::new(None); GCD.init(48, 16); + // SAFETY: get_memory returns a test-owned buffer used to seed GCD memory blocks. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 3) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -6314,6 +6467,7 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add runtime MMIO - should be counted + // SAFETY: This is a synthetic MMIO range used for test coverage only. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::MemoryMappedIo, @@ -6339,6 +6493,7 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add runtime MMIO + // SAFETY: This is a synthetic MMIO range for test bookkeeping only. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::MemoryMappedIo, @@ -6352,6 +6507,7 @@ mod tests { .expect("Failed to set runtime MMIO attributes"); // Add Persistent memory + // SAFETY: This is a synthetic persistent memory range used only for test coverage. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::Persistent, @@ -6365,6 +6521,7 @@ mod tests { .expect("Failed to set Persistent memory attributes"); // Add Reserved memory + // SAFETY: This is a synthetic reserved range used only for test coverage. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::Reserved, @@ -6389,6 +6546,7 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add non-runtime MMIO - should not be counted + // SAFETY: This is a synthetic MMIO range used only for test bookkeeping. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::MemoryMappedIo, @@ -6411,11 +6569,14 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add Persistent memory - should be counted + // SAFETY: This is a synthetic persistent memory range used only for test coverage. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::Persistent, + // SAFETY: get_memory returns a test-owned buffer of the requested size. 0x100000000, UEFI_PAGE_SIZE * 100, + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. efi::MEMORY_WB | efi::MEMORY_NV, ) } @@ -6428,11 +6589,13 @@ mod tests { } #[test] + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. fn test_memory_descriptor_count_for_efi_memory_map_unaccepted_memory() { with_locked_state(|| { let (mut gcd, _) = create_gcd(); // Add Unaccepted memory - should be counted + // SAFETY: This is a synthetic unaccepted memory range used only for test coverage. unsafe { gcd.add_memory_space( dxe_services::GcdMemoryType::Unaccepted, @@ -6455,6 +6618,7 @@ mod tests { let (mut gcd, _) = create_gcd(); // Add Reserved memory - should be counted + // SAFETY: This is a synthetic reserved range used only for test coverage. unsafe { gcd.add_memory_space(dxe_services::GcdMemoryType::Reserved, 0x90000000, UEFI_PAGE_SIZE * 20, 0) } .expect("Failed to add Reserved memory"); @@ -6470,8 +6634,10 @@ mod tests { static GCD: SpinLockedGcd = SpinLockedGcd::new(None); GCD.init(48, 16); + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE) }; let address = mem.as_ptr() as usize; + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -6484,7 +6650,10 @@ mod tests { } // Add multiple memory regions with different types + // SAFETY: get_memory returns a test-owned buffer of the requested size. + // SAFETY: Test-controlled addresses and sizes are used with the GCD initialized by create_gcd or get_memory. unsafe { + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. GCD.add_memory_space(dxe_services::GcdMemoryType::SystemMemory, 0x1000, 0x2000, efi::MEMORY_WB) .unwrap(); GCD.add_memory_space(dxe_services::GcdMemoryType::MemoryMappedIo, 0x5000, 0x1000, efi::MEMORY_UC) @@ -6544,8 +6713,11 @@ mod tests { GCD.init(48, 16); // Set up memory space + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 100) }; + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -6605,8 +6777,10 @@ mod tests { GCD.init(48, 16); // Set up memory space + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 100) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, @@ -6618,8 +6792,10 @@ mod tests { .unwrap(); } + // SAFETY: get_memory returns a test-owned buffer of the requested size. // Create DXE Core HOB let dxe_core_base = address + 0x1000; + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. let dxe_core_len = 0x1000000; let dxe_core_hob = Hob::MemoryAllocationModule(&patina::pi::hob::MemoryAllocationModule { header: patina::pi::hob::header::Hob { @@ -6683,8 +6859,10 @@ mod tests { GCD.init(48, 16); // Set up memory space + // SAFETY: get_memory returns a test-owned buffer of the requested size. let mem = unsafe { get_memory(MEMORY_BLOCK_SLICE_SIZE * 100) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); + // SAFETY: address/size come from the test buffer and are valid to initialize memory blocks. unsafe { GCD.init_memory_blocks( dxe_services::GcdMemoryType::SystemMemory, diff --git a/patina_dxe_core/src/lib.rs b/patina_dxe_core/src/lib.rs index 8184f5786..848060b04 100644 --- a/patina_dxe_core/src/lib.rs +++ b/patina_dxe_core/src/lib.rs @@ -77,6 +77,7 @@ mod cpu; mod debugger_reload; mod decompress; mod driver_services; +mod dxe_dispatch_service; mod dxe_services; mod event_db; mod events; @@ -113,9 +114,9 @@ use core::{ ffi::c_void, num::NonZeroUsize, ptr::{self, NonNull}, - str::FromStr, }; +use cpu::{DxeCpu, DxeInterruptManager}; use gcd::SpinLockedGcd; use memory_manager::CoreMemoryManager; use patina::{ @@ -402,7 +403,7 @@ impl Core

{ /// Initializes the core with the given configuration, including GCD initialization, enabling allocations. /// /// Returns the relocated HOB list pointer that should be used for all subsequent operations. - fn init_memory(&self, physical_hob_list: *const c_void) -> *mut c_void { + fn init_memory(&'static self, physical_hob_list: *const c_void) -> *mut c_void { log::info!("DXE Core Crate v{DXE_CORE_VERSION}"); GCD.prioritize_32_bit_memory(P::MemoryInfo::prioritize_32_bit_memory()); @@ -472,9 +473,10 @@ impl Core

{ log::info!("GCD - After memory init:\n{GCD}"); let mut component_dispatcher = self.component_dispatcher.lock(); - component_dispatcher.add_service(cpu); - component_dispatcher.add_service(interrupt_manager); + component_dispatcher.add_service(DxeCpu(cpu)); + component_dispatcher.add_service(DxeInterruptManager(interrupt_manager)); component_dispatcher.add_service(CoreMemoryManager); + component_dispatcher.add_service(dxe_dispatch_service::CoreDxeDispatch::new(self)); component_dispatcher .add_service(cpu::PerfTimer::with_frequency(P::CpuInfo::perf_timer_frequency().unwrap_or(0))); @@ -532,10 +534,7 @@ impl Core

{ st.checksum_all(); // Install HobList configuration table - let (a, b, c, &[d0, d1, d2, d3, d4, d5, d6, d7]) = - uuid::Uuid::from_str("7739F24C-93D7-11D4-9A3A-0090273FC14D").expect("Invalid UUID format.").as_fields(); - let hob_list_guid: efi::Guid = efi::Guid::from_fields(a, b, c, d0, d1, &[d2, d3, d4, d5, d6, d7]); - config_tables::core_install_configuration_table(hob_list_guid, physical_hob_list, st) + config_tables::core_install_configuration_table(patina::guids::HOB_LIST.into_inner(), physical_hob_list, st) .expect("Unable to create configuration table due to invalid table entry."); // Install Memory Type Info configuration table. @@ -639,16 +638,17 @@ fn core_display_missing_arch_protocols() { fn call_bds() -> ! { // Enable status code capability in Firmware Performance DXE. - match protocols::PROTOCOL_DB.locate_protocol(status_code::PROTOCOL_GUID) { + match protocols::PROTOCOL_DB.locate_protocol(status_code::PROTOCOL_GUID.into_inner()) { Ok(status_code_ptr) => { if let Some(status_code_protocol_ptr) = NonNull::new(status_code_ptr) { // SAFETY: Some(status_code_protocol_ptr) guarantees that the pointer is non-NULL let status_code_protocol = unsafe { status_code_protocol_ptr.cast::().as_ref() }; + let dxe_core_guid = patina::guids::DXE_CORE.into_inner(); (status_code_protocol.report_status_code)( EFI_PROGRESS_CODE, EFI_SOFTWARE_DXE_CORE | EFI_SW_DXE_CORE_PC_HANDOFF_TO_NEXT, 0, - &patina::guids::DXE_CORE, + &dxe_core_guid, ptr::null(), ); } else { @@ -658,7 +658,7 @@ fn call_bds() -> ! { Err(err) => log::error!("Unable to locate status code runtime protocol: {err:?}"), } - match protocols::PROTOCOL_DB.locate_protocol(bds::PROTOCOL_GUID) { + match protocols::PROTOCOL_DB.locate_protocol(bds::PROTOCOL_GUID.into_inner()) { Ok(bds_ptr) => { if let Some(bds_protocol_ptr) = NonNull::new(bds_ptr) { let bds_protocol_ptr = bds_protocol_ptr.cast::(); @@ -753,7 +753,7 @@ mod tests { protocols::core_install_protocol_interface( None, - patina::pi::protocols::bds::PROTOCOL_GUID, + patina::pi::protocols::bds::PROTOCOL_GUID.into_inner(), protocol as *mut _ as *mut c_void, ) .unwrap(); @@ -776,7 +776,7 @@ mod tests { with_reset_global_state(|| { protocols::core_install_protocol_interface( None, - patina::pi::protocols::bds::PROTOCOL_GUID, + patina::pi::protocols::bds::PROTOCOL_GUID.into_inner(), core::ptr::null_mut(), ) .unwrap(); @@ -827,7 +827,7 @@ mod tests { protocols::core_install_protocol_interface( None, - patina::pi::protocols::status_code::PROTOCOL_GUID, + patina::pi::protocols::status_code::PROTOCOL_GUID.into_inner(), protocol as *mut _ as *mut c_void, ) .unwrap(); @@ -850,7 +850,7 @@ mod tests { with_reset_global_state(|| { protocols::core_install_protocol_interface( None, - patina::pi::protocols::status_code::PROTOCOL_GUID, + patina::pi::protocols::status_code::PROTOCOL_GUID.into_inner(), core::ptr::null_mut(), ) .unwrap(); diff --git a/patina_dxe_core/src/memory_attributes_protocol.rs b/patina_dxe_core/src/memory_attributes_protocol.rs index 079891b89..4a1054ae3 100644 --- a/patina_dxe_core/src/memory_attributes_protocol.rs +++ b/patina_dxe_core/src/memory_attributes_protocol.rs @@ -265,9 +265,12 @@ pub(crate) fn install_memory_attributes_protocol() { MEMORY_ATTRIBUTES_PROTOCOL_INTERFACE.store(interface, Ordering::SeqCst); match PROTOCOL_DB.install_protocol_interface(None, efi::protocols::memory_attribute::PROTOCOL_GUID, interface) { - Ok((handle, _)) => unsafe { - MEMORY_ATTRIBUTES_PROTOCOL_HANDLE.store(handle, Ordering::SeqCst); - }, + Ok((handle, _)) => { + // SAFETY: handle is returned by protocol DB on a successful installation. + unsafe { + MEMORY_ATTRIBUTES_PROTOCOL_HANDLE.store(handle, Ordering::SeqCst); + } + } Err(e) => { log::error!("Failed to install MEMORY_ATTRIBUTES_PROTOCOL_GUID: {e:?}"); } @@ -277,6 +280,7 @@ pub(crate) fn install_memory_attributes_protocol() { #[cfg(feature = "compatibility_mode_allowed")] /// This function is called in compatibility mode to uninstall the protocol. pub(crate) fn uninstall_memory_attributes_protocol() { + // SAFETY: Reads global atomics to determine protocol handle/interface before uninstall. unsafe { match ( MEMORY_ATTRIBUTES_PROTOCOL_HANDLE.load(Ordering::SeqCst), @@ -375,6 +379,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -418,6 +423,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -462,6 +468,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -507,6 +514,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -562,6 +570,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -608,6 +617,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -656,6 +666,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); @@ -702,6 +713,7 @@ mod tests { GCD.init(48, 16); // Add memory and MMIO regions + // SAFETY: get_memory returns a test-owned buffer sized for the requested region. let mem = unsafe { crate::test_support::get_memory(0x120000) }; let address = align_up(mem.as_ptr() as usize, 0x1000).unwrap(); diff --git a/patina_dxe_core/src/memory_manager.rs b/patina_dxe_core/src/memory_manager.rs index 5bf365e91..ea1755624 100644 --- a/patina_dxe_core/src/memory_manager.rs +++ b/patina_dxe_core/src/memory_manager.rs @@ -18,9 +18,9 @@ use patina::{ }, efi_types::EfiMemoryType, error::EfiError, - test::patina_test, - u_assert, u_assert_eq, uefi_pages_to_size, + uefi_pages_to_size, }; +use patina_test::{patina_test, u_assert, u_assert_eq}; use r_efi::efi; use crate::{ @@ -65,6 +65,7 @@ impl MemoryManager for CoreMemoryManager { match result { Ok(_) => { + // SAFETY: address/page_count come from a successful core_allocate_pages call. let allocation = unsafe { PageAllocation::new(address as usize, page_count, &CoreMemoryManager) .map_err(|_| MemoryError::InternalError)? @@ -76,6 +77,11 @@ impl MemoryManager for CoreMemoryManager { } } + /// Frees the block of pages at the given address of the given size. + /// + /// ## Safety + /// Caller must ensure that the given address corresponds to a valid block of pages that was allocated with + /// [Self::allocate_pages]. unsafe fn free_pages(&self, address: usize, page_count: usize) -> Result<(), MemoryError> { let result = core_free_pages(address as efi::PhysicalAddress, page_count); match result { @@ -207,7 +213,7 @@ fn allow_allocations_for_type(memory_type: EfiMemoryType) -> Result<(), MemoryEr #[patina_test] #[coverage(off)] -fn memory_manager_allocations_test(mm: Service) -> patina::test::Result { +fn memory_manager_allocations_test(mm: Service) -> patina_test::error::Result { // Allocate a page, and make sure it is accessible. let result = mm.allocate_pages(1, AllocationOptions::new()); u_assert!(result.is_ok(), "Failed to allocate single page."); @@ -222,6 +228,7 @@ fn memory_manager_allocations_test(mm: Service) -> patina::te u_assert!(result.is_ok(), "Failed to allocate single page."); let allocation = result.unwrap(); let address = allocation.into_raw_ptr::().unwrap() as usize; + // SAFETY: address was returned by allocate_pages for this manager. let result = unsafe { mm.free_pages(address, 1) }; u_assert!(result.is_ok(), "Failed to free page."); let result = mm.allocate_pages(1, AllocationOptions::new().with_strategy(PageAllocationStrategy::Address(address))); @@ -236,6 +243,7 @@ fn memory_manager_allocations_test(mm: Service) -> patina::te u_assert_eq!(allocation.page_count(), 8); let address = allocation.into_raw_ptr::().unwrap() as usize; u_assert_eq!(address % TEST_ALIGNMENT, 0, "Allocated page not correctly aligned."); + // SAFETY: address was returned by allocate_pages for this manager. let result = unsafe { mm.free_pages(address, 8) }; u_assert!(result.is_ok(), "Failed to free page."); @@ -272,7 +280,7 @@ fn memory_manager_allocations_test(mm: Service) -> patina::te } #[patina_test] -fn memory_manager_attributes_test(mm: Service) -> patina::test::Result { +fn memory_manager_attributes_test(mm: Service) -> patina_test::error::Result { // The default attributes for memory should be read/write. let result = mm.allocate_pages(1, AllocationOptions::new()); u_assert!(result.is_ok(), "Failed to allocate single page."); @@ -284,6 +292,7 @@ fn memory_manager_attributes_test(mm: Service) -> patina::tes u_assert_eq!(access, AccessType::ReadWrite, "Allocation did not return Read/Write access."); // Test changing the attributes to read only. + // SAFETY: address was returned by allocate_pages for this manager. let result = unsafe { mm.set_page_attributes(address, 1, AccessType::ReadOnly, None) }; u_assert!(result.is_ok(), "Failed to set page attributes."); let result = mm.get_page_attributes(address, 1); @@ -293,6 +302,7 @@ fn memory_manager_attributes_test(mm: Service) -> patina::tes u_assert_eq!(new_caching, caching, "Caching type changes unexpectedly."); // Free the page + // SAFETY: address was returned by allocate_pages for this manager. let result = unsafe { mm.free_pages(address, 1) }; u_assert!(result.is_ok(), "Failed to free page."); diff --git a/patina_dxe_core/src/misc_boot_services.rs b/patina_dxe_core/src/misc_boot_services.rs index 6377f53b3..464140856 100644 --- a/patina_dxe_core/src/misc_boot_services.rs +++ b/patina_dxe_core/src/misc_boot_services.rs @@ -44,9 +44,9 @@ impl ArchProtocolPtr { } } -// SAFETY: ArchProtocolPtr is Send/Sync because the pointer it wraps is initialized in a thread-safe manner (using -// `Once`), and the pointer itself is never used to mutate data. +// SAFETY: ArchProtocolPtr is Send because the pointer is initialized once and never mutates the pointed-to data. unsafe impl Send for ArchProtocolPtr {} +// SAFETY: ArchProtocolPtr is Sync because the pointer is initialized once and never mutates the pointed-to data. unsafe impl Sync for ArchProtocolPtr {} static METRONOME_ARCH_PTR: ArchProtocolPtr = ArchProtocolPtr::new(); @@ -60,8 +60,7 @@ static WATCHDOG_ARCH_PTR: ArchProtocolPtr = ArchP // Pre-EBS GUID is a Project Mu defined GUID. It should be removed in favor of the UEFI Spec defined // Before Exit Boot Services event group when all platform usage is confirmed to be transitioned to that. // { 0x5f1d7e16, 0x784a, 0x4da2, { 0xb0, 0x84, 0xf8, 0x12, 0xf2, 0x3a, 0x8d, 0xce }} -pub const PRE_EBS_GUID: efi::Guid = - efi::Guid::from_fields(0x5f1d7e16, 0x784a, 0x4da2, 0xb0, 0x84, &[0xf8, 0x12, 0xf2, 0x3a, 0x8d, 0xce]); +pub const PRE_EBS_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("5F1D7E16-784A-4DA2-B084-F812F23A8DCE"); // TODO [END]: LOCAL (TEMP) GUID DEFINITIONS (MOVE LATER) extern "efiapi" fn calculate_crc32(data: *mut c_void, data_size: usize, crc_32: *mut u32) -> efi::Status { if data.is_null() || data_size == 0 || crc_32.is_null() { @@ -137,13 +136,13 @@ extern "efiapi" fn set_watchdog_timer( // This callback is invoked when the Metronome Architectural protocol is installed. It initializes the // METRONOME_ARCH_PTR to point to the Metronome Architectural protocol interface. extern "efiapi" fn metronome_arch_available(event: efi::Event, _context: *mut c_void) { - match PROTOCOL_DB.locate_protocol(protocols::metronome::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(protocols::metronome::PROTOCOL_GUID.into_inner()) { Ok(metronome_arch_ptr) => { - // SAFETY: metronome_arch_ptr is expected to be a valid pointer to the metronome protocol since it is - // associated with the metronome arch guid. if metronome_arch_ptr.is_null() { panic!("Located metronome protocol pointer is null."); } + // SAFETY: metronome_arch_ptr is expected to be a valid pointer to the metronome protocol since it is + // associated with the metronome arch guid. unsafe { METRONOME_ARCH_PTR.init(metronome_arch_ptr) }; if let Err(status_err) = EVENT_DB.close_event(event) { log::warn!("Could not close event for metronome_arch_available due to error {status_err:?}"); @@ -157,13 +156,13 @@ extern "efiapi" fn metronome_arch_available(event: efi::Event, _context: *mut c_ // This callback is invoked when the Watchdog Timer Architectural protocol is installed. It initializes the // WATCHDOG_ARCH_PTR to point to the Watchdog Timer Architectural protocol interface. extern "efiapi" fn watchdog_arch_available(event: efi::Event, _context: *mut c_void) { - match PROTOCOL_DB.locate_protocol(protocols::watchdog::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(protocols::watchdog::PROTOCOL_GUID.into_inner()) { Ok(watchdog_arch_ptr) => { - // SAFETY: watchdog_arch_ptr is expected to be a valid pointer to the watchdog protocol since it is - // associated with the watchdog arch guid. if watchdog_arch_ptr.is_null() { panic!("Located watchdog protocol pointer is null."); } + // SAFETY: watchdog_arch_ptr is expected to be a valid pointer to the watchdog protocol since it is + // associated with the watchdog arch guid. unsafe { WATCHDOG_ARCH_PTR.init(watchdog_arch_ptr) }; if let Err(status_err) = EVENT_DB.close_event(event) { log::warn!("Could not close event for watchdog_arch_available due to error {status_err:?}"); @@ -179,7 +178,7 @@ pub extern "efiapi" fn exit_boot_services(_handle: efi::Handle, map_key: usize) log::info!("EBS initiated."); // Pre-exit boot services and before exit boot services are only signaled once if !EXIT_BOOT_SERVICES_CALLED.is_completed() { - EVENT_DB.signal_group(PRE_EBS_GUID); + EVENT_DB.signal_group(PRE_EBS_GUID.into_inner()); // Signal the event group before exit boot services EVENT_DB.signal_group(efi::EVENT_GROUP_BEFORE_EXIT_BOOT_SERVICES); @@ -188,9 +187,11 @@ pub extern "efiapi" fn exit_boot_services(_handle: efi::Handle, map_key: usize) } // Disable the timer - match PROTOCOL_DB.locate_protocol(protocols::timer::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(protocols::timer::PROTOCOL_GUID.into_inner()) { Ok(timer_arch_ptr) => { let timer_arch_ptr = timer_arch_ptr as *mut protocols::timer::Protocol; + // SAFETY: timer_arch_ptr comes from locate_protocol and is considered valid based on the successful + // return status from locate_protocol. let timer_arch = unsafe { &*(timer_arch_ptr) }; (timer_arch.set_timer_period)(timer_arch_ptr, 0); } @@ -207,7 +208,7 @@ pub extern "efiapi" fn exit_boot_services(_handle: efi::Handle, map_key: usize) Err(err) => { log::error!("Failed to terminate memory map: {err:?}"); GCD.unlock_memory_space(); - EVENT_DB.signal_group(guids::EBS_FAILED); + EVENT_DB.signal_group(guids::EBS_FAILED.into_inner()); return err.into(); } } @@ -216,15 +217,17 @@ pub extern "efiapi" fn exit_boot_services(_handle: efi::Handle, map_key: usize) EVENT_DB.signal_group(efi::EVENT_GROUP_EXIT_BOOT_SERVICES); // Initialize StatusCode and send EFI_SW_BS_PC_EXIT_BOOT_SERVICES - match PROTOCOL_DB.locate_protocol(protocols::status_code::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(protocols::status_code::PROTOCOL_GUID.into_inner()) { Ok(status_code_ptr) => { let status_code_ptr = status_code_ptr as *mut protocols::status_code::Protocol; + // SAFETY: status_code_ptr comes from locate_protocol and is considered valid based on the successful + // return status from locate_protocol. let status_code_protocol = unsafe { &*(status_code_ptr) }; (status_code_protocol.report_status_code)( status_code::EFI_PROGRESS_CODE, status_code::EFI_SOFTWARE_EFI_BOOT_SERVICE | status_code::EFI_SW_BS_PC_EXIT_BOOT_SERVICES, 0, - &guids::DXE_CORE, + &guids::DXE_CORE.into_inner(), core::ptr::null(), ); } @@ -244,9 +247,11 @@ pub extern "efiapi" fn exit_boot_services(_handle: efi::Handle, map_key: usize) .expect("The System Table pointer is null. This is invalid.") .clear_boot_time_services(); } - match PROTOCOL_DB.locate_protocol(protocols::runtime::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(protocols::runtime::PROTOCOL_GUID.into_inner()) { Ok(rt_arch_ptr) => { let rt_arch_ptr = rt_arch_ptr as *mut protocols::runtime::Protocol; + // SAFETY: rt_arch_ptr comes from locate_protocol and is considered valid based on the successful + // return status from locate_protocol. let rt_arch_protocol = unsafe { &mut *(rt_arch_ptr) }; rt_arch_protocol.at_runtime.store(true, Ordering::SeqCst); } @@ -273,7 +278,7 @@ pub fn init_misc_boot_services_support(st: &mut EfiSystemTable) { .expect("Failed to create metronome available callback."); PROTOCOL_DB - .register_protocol_notify(protocols::metronome::PROTOCOL_GUID, event) + .register_protocol_notify(protocols::metronome::PROTOCOL_GUID.into_inner(), event) .expect("Failed to register protocol notify on metronome available."); //set up call back for watchdog arch protocol installation. @@ -282,7 +287,7 @@ pub fn init_misc_boot_services_support(st: &mut EfiSystemTable) { .expect("Failed to create watchdog available callback."); PROTOCOL_DB - .register_protocol_notify(protocols::watchdog::PROTOCOL_GUID, event) + .register_protocol_notify(protocols::watchdog::PROTOCOL_GUID.into_inner(), event) .expect("Failed to register protocol notify on metronome available."); } @@ -462,6 +467,7 @@ mod tests { unimplemented!() } let watchdog = protocols::watchdog::Protocol { register_handler, set_timer_period, get_timer_period }; + // SAFETY: The mock protocol lives for the duration of the test and the pointer is only used by the test. unsafe { WATCHDOG_ARCH_PTR.init(&watchdog as *const _ as *mut c_void); }; @@ -521,6 +527,7 @@ mod tests { wait_for_tick, }; + // SAFETY: The mock protocol lives for the duration of the test and the pointer is only used by the test. unsafe { METRONOME_ARCH_PTR.init(&metronome as *const _ as *mut c_void); } diff --git a/patina_dxe_core/src/pecoff.rs b/patina_dxe_core/src/pecoff.rs index d82cb2c13..60eb92cb7 100644 --- a/patina_dxe_core/src/pecoff.rs +++ b/patina_dxe_core/src/pecoff.rs @@ -433,9 +433,13 @@ pub fn load_resource_section(pe_info: &UefiPeInfo, image: &[u8]) -> error::Resul return Err(error::Error::Goblin(goblin::error::Error::BufferTooShort(offset, "bytes"))); } - let mut directory_entry: DirectoryEntry = resource_section.pread(core::mem::size_of::())?; + let mut directory_entry: DirectoryEntry; + + for i in 0..directory.number_of_named_entries { + let entry_offset = + core::mem::size_of::() + (i as usize) * core::mem::size_of::(); + directory_entry = resource_section.pread(entry_offset)?; - for _ in 0..directory.number_of_named_entries { if directory_entry.name_is_string() { if directory_entry.name_offset() >= size { return Err(error::Error::Goblin(goblin::error::Error::BufferTooShort( @@ -447,14 +451,15 @@ pub fn load_resource_section(pe_info: &UefiPeInfo, image: &[u8]) -> error::Resul let resource_directory_string = resource_section.pread::(directory_entry.name_offset() as usize)?; - let name_start_offset = (directory_entry.name_offset() + 1) as usize; - let name_end_offset = name_start_offset + (resource_directory_string.length * 2) as usize; + let name_start_offset = + directory_entry.name_offset() as usize + core::mem::size_of::(); + let name_end_offset = name_start_offset + (resource_directory_string.length as usize) * 2; let string_val = resource_section .get(name_start_offset..name_end_offset) .ok_or(error::Error::Goblin(goblin::error::Error::BufferTooShort(name_end_offset, "bytes")))?; - // L"HII" = [0x0, 0x48, 0x0, 0x49, 0x0, 0x49] - if resource_directory_string.length == 3 && string_val == [0x0, 0x48, 0x0, 0x49, 0x0, 0x49] { + // L"HII" in UTF-16LE = [0x48, 0x00, 0x49, 0x00, 0x49, 0x00] + if resource_directory_string.length == 3 && string_val == [0x48, 0x00, 0x49, 0x00, 0x49, 0x00] { if directory_entry.data_is_directory() { if directory_entry.offset_to_directory() > size { return Err(error::Error::Goblin(goblin::error::Error::BufferTooShort( @@ -874,4 +879,98 @@ mod tests { Err(e) => panic!("Expected BufferTooShort error, got {e:?}"), } } + + /// Verifies that the HII resource string comparison in `load_resource_section` uses the + /// correct UTF-16LE encoding and reads from the correct offset in the resource directory. + #[test] + fn test_hii_string_uses_correct_utf16le_encoding_at_correct_offset() { + test_support::init_test_logger(); + let image = include_bytes!("../resources/test/pe32/test_image_msvc_hii.pe32"); + let image_info = UefiPeInfo::parse(image).unwrap(); + + let result = load_resource_section(&image_info, image).unwrap(); + assert!(result.is_some(), "load_resource_section should find the HII resource section"); + + // Knowing that the string is at offset 0xAA5A in test_image_msvc_hii.pe32, + // verify load_resource_section correctly identifies it by checking that the + // UTF-16LE encoding of "HII" is correct at that offset. + const HII_STRING_FILE_OFFSET: usize = 0xAA5A; + assert_eq!( + &image[HII_STRING_FILE_OFFSET..HII_STRING_FILE_OFFSET + 6], + &[0x48u8, 0x00, 0x49, 0x00, 0x49, 0x00], + "UTF-16LE for 'HII' should be [0x48, 0x00, 0x49, 0x00, 0x49, 0x00]" + ); + } + + /// Verifies that `load_resource_section` iterates past non-HII named entries to find the HII entry. + /// + /// The resource section constructed in the test has two named entries: "ABC" (first) and "HII" (second). + /// Note: This test creates a .rsrc section in-memory because there is not a known image that has multiple + /// named entries with the HII entry not being the first entry. + #[test] + fn test_load_resource_section_finds_hii_when_not_first_entry() { + test_support::init_test_logger(); + + // Build a .rsrc section with 2 named entries. + // + // From the PE/COFF spec: + // + // The resource directory string area consists of Unicode strings, which are word-aligned. These + // strings are stored together after the last Resource Directory entry and before the first + // Resource Data entry. + // + // Layout: + // 0x00: Directory (16 bytes) — 2 named entries, 0 id entries + // 0x10: Entry[0] (8 bytes) — named "ABC" at string offset 0x30 + // 0x18: Entry[1] (8 bytes) — named "HII" at string offset 0x38, data at 0x40 + // 0x20: (padding, 16 bytes) + // 0x30: DirectoryString "ABC" — length=3, then "ABC" (8 bytes) + // 0x38: DirectoryString "HII" — length=3, then "HII" (8 bytes) + // 0x40: DataEntry (16 bytes) — offset_to_data=0xABCD, size=0x100 + // + // Structure definitions: + // - Directory: https://learn.microsoft.com/windows/win32/debug/pe-format#resource-directory-table + // - Entry: https://learn.microsoft.com/windows/win32/debug/pe-format#resource-directory-entries + // - DirectoryString: https://learn.microsoft.com/windows/win32/debug/pe-format#resource-directory-entries + let mut rsrc = vec![0u8; 0x50]; + + // Directory header at 0x00 + rsrc[12] = 2; // number_of_named_entries = 2 + + // Entry[0] at 0x10: "ABC" — id=0x80000030 (name_is_string + offset 0x30) + rsrc[0x10..0x14].copy_from_slice(&0x8000_0030u32.to_le_bytes()); + // data=0x00000000 (not a directory, won't be reached since "ABC" != "HII") + rsrc[0x14..0x18].copy_from_slice(&0x0000_0000u32.to_le_bytes()); + + // Entry[1] at 0x18: "HII" — id=0x80000038 (name_is_string + offset 0x38) + rsrc[0x18..0x1C].copy_from_slice(&0x8000_0038u32.to_le_bytes()); + // data=0x00000040 (not a directory, points to DataEntry at 0x40) + rsrc[0x1C..0x20].copy_from_slice(&0x0000_0040u32.to_le_bytes()); + + // DirectoryString "ABC" at 0x30: length=3 + rsrc[0x30..0x32].copy_from_slice(&3u16.to_le_bytes()); + // "ABC" (UTF-16LE) = [0x46, 0x00, 0x4F, 0x00, 0x4F, 0x00] + rsrc[0x32..0x38].copy_from_slice(&[0x46, 0x00, 0x4F, 0x00, 0x4F, 0x00]); + + // DirectoryString "HII" at 0x38: length=3 + rsrc[0x38..0x3A].copy_from_slice(&3u16.to_le_bytes()); + // "HII" (UTF-16LE) = [0x48, 0x00, 0x49, 0x00, 0x49, 0x00] + rsrc[0x3A..0x40].copy_from_slice(&[0x48, 0x00, 0x49, 0x00, 0x49, 0x00]); + + // DataEntry at 0x40: offset_to_data=0xABCD, size=0x100 + rsrc[0x40..0x44].copy_from_slice(&0x0000_ABCDu32.to_le_bytes()); + rsrc[0x44..0x48].copy_from_slice(&0x0000_0100u32.to_le_bytes()); + + let mut pe_info = UefiPeInfo::default(); + pe_info.sections.push(goblin::pe::section_table::SectionTable { + name: *b".rsrc\0\0\0", + virtual_size: rsrc.len() as u32, + size_of_raw_data: rsrc.len() as u32, + pointer_to_raw_data: 0, + ..Default::default() + }); + + let result = load_resource_section(&pe_info, &rsrc).unwrap(); + assert_eq!(result, Some((0xABCD, 0x100))); + } } diff --git a/patina_dxe_core/src/pi_dispatcher.rs b/patina_dxe_core/src/pi_dispatcher.rs index 515ff4f83..176ea1de4 100644 --- a/patina_dxe_core/src/pi_dispatcher.rs +++ b/patina_dxe_core/src/pi_dispatcher.rs @@ -146,7 +146,7 @@ impl PiDispatcher

{ .expect("Failed to create fv protocol installation callback."); PROTOCOL_DB - .register_protocol_notify(firmware_volume_block::PROTOCOL_GUID, event) + .register_protocol_notify(firmware_volume_block::PROTOCOL_GUID.into_inner(), event) .expect("Failed to register protocol notify on fv protocol."); // Perform image related initialization for the debugger. @@ -344,7 +344,7 @@ impl PiDispatcher

{ }; if let Some(fv_name_guid) = fv_name_guid - && self.is_fv_already_installed(fv_name_guid) + && self.is_fv_already_installed(fv_name_guid.into_inner()) { log::debug!( "Skipping FV file {:?} - FV with name GUID {:?} is already installed", @@ -412,14 +412,16 @@ impl PiDispatcher

{ /// `false` otherwise fn is_fv_already_installed(&self, fv_name_guid: efi::Guid) -> bool { // Get all handles with a FVB protocol - let fvb_handles = match PROTOCOL_DB.locate_handles(Some(firmware_volume_block::PROTOCOL_GUID)) { + let fvb_handles = match PROTOCOL_DB.locate_handles(Some(firmware_volume_block::PROTOCOL_GUID.into_inner())) { Ok(handles) => handles, Err(_) => return false, }; // Check each FVB handle to see if it has the same FV name GUID for handle in fvb_handles { - let Ok(ptr) = PROTOCOL_DB.get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) else { + let Ok(ptr) = + PROTOCOL_DB.get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) + else { continue; }; let fvb_ptr = ptr as *mut firmware_volume_block::Protocol; @@ -487,7 +489,7 @@ impl PiDispatcher

{ extern "efiapi" fn fw_vol_event_protocol_notify_efiapi(_event: efi::Event, _context: *mut c_void) { let pd = &crate::Core::

::instance().pi_dispatcher; //Note: runs at TPL_CALLBACK - match PROTOCOL_DB.locate_handles(Some(firmware_volume_block::PROTOCOL_GUID)) { + match PROTOCOL_DB.locate_handles(Some(firmware_volume_block::PROTOCOL_GUID.into_inner())) { Ok(fv_handles) => pd.add_fv_handles(fv_handles).expect("Error adding FV handles"), Err(_) => panic!("could not locate handles in protocol call back"), }; @@ -514,8 +516,9 @@ struct PendingFirmwareVolumeImage { impl PendingFirmwareVolumeImage { // authenticate the pending firmware volume via the Security Architectural Protocol fn evaluate_auth(&self) -> Result<(), EfiError> { + // SAFETY: locate_protocol returns a valid pointer when present. as_ref is used for shared access. let security_protocol = unsafe { - match PROTOCOL_DB.locate_protocol(patina::pi::protocols::security::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(patina::pi::protocols::security::PROTOCOL_GUID.into_inner()) { Ok(protocol) => (protocol as *mut patina::pi::protocols::security::Protocol) .as_ref() .expect("Security Protocol should not be null"), @@ -591,7 +594,9 @@ impl DispatcherContext { for handle in new_handles { if self.processed_fvs.insert(handle) { //process freshly discovered FV - let fvb_ptr = match PROTOCOL_DB.get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) { + let fvb_ptr = match PROTOCOL_DB + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) + { Err(_) => { panic!( "get_interface_for_handle failed to return an interface on a handle where it should have existed" @@ -600,6 +605,8 @@ impl DispatcherContext { Ok(protocol) => protocol as *mut firmware_volume_block::Protocol, }; + // SAFETY: fvb_ptr was successfully returned from get_interface_for_handle and should point to a + // valid FVB protocol instance. The as_ref() call checks for null. let fvb = unsafe { fvb_ptr.as_ref().expect("get_interface_for_handle returned NULL ptr for FirmwareVolumeBlock") }; @@ -639,7 +646,7 @@ impl DispatcherContext { let file = file?; if file.file_type_raw() == ffs::file::raw::r#type::DRIVER { let file = file.clone(); - let file_name = file.name(); + let file_name = file.name().into_inner(); let sections = file.sections_with_extractor(extractor)?; let depex = sections @@ -673,6 +680,8 @@ impl DispatcherContext { }; let mut filename_nodes_buf = Vec::::with_capacity(FILENAME_NODE_SIZE + END_NODE_SIZE); // 20 bytes (filename_node + GUID) + 4 bytes (end node) + // SAFETY: filename_node is a local value. Its byte representation is valid for the size + // of the struct filename_nodes_buf.extend_from_slice(unsafe { core::slice::from_raw_parts( &filename_node as *const _ as *const u8, @@ -683,6 +692,8 @@ impl DispatcherContext { filename_nodes_buf.extend_from_slice(file_name.as_bytes()); // Copy filename_end_node into the buffer + // SAFETY: filename_end_node is a local value. Its byte representation is valid for the + // size of the struct filename_nodes_buf.extend_from_slice(unsafe { core::slice::from_raw_parts( &filename_end_node as *const _ as *const u8, @@ -715,7 +726,7 @@ impl DispatcherContext { } if file.file_type_raw() == ffs::file::raw::r#type::FIRMWARE_VOLUME_IMAGE { let file = file.clone(); - let file_name = file.name(); + let file_name = file.name().into_inner(); let sections = file.sections_with_extractor(extractor)?; @@ -778,6 +789,7 @@ impl DispatcherContext { } } +// SAFETY: DispatcherContext is owned by the dispatcher and not shared across threads without synchronization. unsafe impl Send for DispatcherContext {} #[cfg(test)] @@ -824,6 +836,7 @@ mod tests { F: Fn() + std::panic::RefUnwindSafe, { test_support::with_global_lock(|| { + // SAFETY: Test-only initialization of the protocol database occurs under the global lock. unsafe { test_support::init_test_protocol_db() }; f(); }) @@ -843,6 +856,7 @@ mod tests { _: *mut pi::protocols::firmware_volume_block::Protocol, addr: *mut u64, ) -> efi::Status { + // SAFETY: addr is provided by the caller and is expected to be valid for a single u64 write. unsafe { addr.write(0) }; efi::Status::SUCCESS } @@ -852,6 +866,7 @@ mod tests { _: *mut pi::protocols::firmware_volume_block::Protocol, addr: *mut u64, ) -> efi::Status { + // SAFETY: addr is provided by the caller and is expected to be valid for a single u64 write. unsafe { addr.write(GET_PHYSICAL_ADDRESS3_VALUE) }; efi::Status::SUCCESS } @@ -932,6 +947,7 @@ mod tests { assert_eq!(CORE.pi_dispatcher.dispatcher_context.lock().pending_drivers.len(), DRIVERS_IN_DXEFV); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -975,15 +991,17 @@ mod tests { // Monkey Patch get_physical_address to one that returns an error. let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); let protocol = protocol as *mut firmware_volume_block::Protocol; + // SAFETY: protocol was retrieved from PROTOCOL_DB and remains valid for this test scope. unsafe { &mut *protocol }.get_physical_address = get_physical_address1; CORE.pi_dispatcher.add_fv_handles(vec![handle]).expect("Failed to add FV handle"); assert_eq!(CORE.pi_dispatcher.dispatcher_context.lock().pending_drivers.len(), 0); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1011,15 +1029,17 @@ mod tests { // Monkey Patch get_physical_address to set address to 0. let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); let protocol = protocol as *mut firmware_volume_block::Protocol; + // SAFETY: protocol was retrieved from PROTOCOL_DB and remains valid for this test scope. unsafe { &mut *protocol }.get_physical_address = get_physical_address2; CORE.pi_dispatcher.add_fv_handles(vec![handle]).expect("Failed to add FV handle"); assert_eq!(CORE.pi_dispatcher.dispatcher_context.lock().pending_drivers.len(), 0); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1038,23 +1058,28 @@ mod tests { // SAFETY: fv is leaked to ensure it is not freed and remains valid for the duration of the program. let fv_phys_addr = fv_raw.expose_provenance() as u64; + // SAFETY: fv_raw is leaked for the duration of this test scope. let handle = unsafe { CORE.pi_dispatcher.fv_data.lock().install_firmware_volume(fv_phys_addr, None).unwrap() }; // Monkey Patch get_physical_address to set to a slightly invalid address. let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); let protocol = protocol as *mut firmware_volume_block::Protocol; + // SAFETY: protocol was retrieved from PROTOCOL_DB and remains valid for this test scope. unsafe { &mut *protocol }.get_physical_address = get_physical_address3; + // SAFETY: Test-only mutable static is used under the global lock. unsafe { GET_PHYSICAL_ADDRESS3_VALUE = fv_phys_addr + 0x1000 }; CORE.pi_dispatcher.add_fv_handles(vec![handle]).expect("Failed to add FV handle"); + // SAFETY: Reset the test-only mutable static under the global lock. unsafe { GET_PHYSICAL_ADDRESS3_VALUE = 0 }; assert_eq!(CORE.pi_dispatcher.dispatcher_context.lock().pending_drivers.len(), 0); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1084,6 +1109,7 @@ mod tests { assert_eq!(CORE.pi_dispatcher.dispatcher_context.lock().pending_firmware_volume_images.len(), 1); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1114,6 +1140,7 @@ mod tests { CORE.pi_dispatcher.display_discovered_not_dispatched(); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1147,6 +1174,7 @@ mod tests { assert_eq!(CORE.pi_dispatcher.dispatcher_context.lock().pending_drivers.len(), DRIVERS_IN_DXEFV); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1204,6 +1232,7 @@ mod tests { assert_eq!(result, Err(EfiError::NotFound)); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1238,6 +1267,7 @@ mod tests { assert_eq!(result, Err(EfiError::NotFound)); }); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; } @@ -1262,6 +1292,7 @@ mod tests { assert!(!this.is_null()); assert_eq!(authentication_status, 0); + // SAFETY: `file` is a valid device path pointer provided by the dispatcher for this callback. unsafe { let mut node_walker = DevicePathWalker::new(file); //outer FV of NESTEDFV.Fv does not have an extended header so expect MMAP device path. @@ -1295,7 +1326,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - patina::pi::protocols::security::PROTOCOL_GUID, + patina::pi::protocols::security::PROTOCOL_GUID.into_inner(), &security_protocol as *const _ as *mut _, ) .unwrap(); @@ -1326,18 +1357,20 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); + // SAFETY: fv_raw is leaked for the duration of this test scope. let _handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; // Get the actual FV name GUID from the installed FV let actual_fv_guid = { + // SAFETY: fv_raw points to a valid FV buffer for parsing in this test. let volume = unsafe { VolumeRef::new_from_address(fv_raw.expose_provenance() as u64).unwrap() }; volume.fv_name().expect("Test FV should have a name GUID") }; // Check that the installed FV is detected assert!( - CORE.pi_dispatcher.is_fv_already_installed(actual_fv_guid), + CORE.pi_dispatcher.is_fv_already_installed(actual_fv_guid.into_inner()), "Should return true when FV is installed" ); @@ -1355,6 +1388,7 @@ mod tests { "Should return false for non-existent FV GUID" ); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; }); } @@ -1386,6 +1420,7 @@ mod tests { let fv = fv.into_boxed_slice(); let fv_raw = Box::into_raw(fv); + // SAFETY: fv_raw is leaked for the duration of this test scope. let _handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; @@ -1402,6 +1437,7 @@ mod tests { "Should return false when GUID doesn't match any installed FV" ); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; }); } @@ -1422,6 +1458,7 @@ mod tests { let fv_raw = Box::into_raw(fv); // Install the parent FV + // SAFETY: fv_raw is leaked for the duration of this test scope. let parent_handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; @@ -1445,6 +1482,7 @@ mod tests { .expect("There should be a pending FV image"); // Extract and install the child FV separately to simulate it being already installed if let Some(section) = child_fv_sections.first() { let child_fv_data = section.try_content_as_slice().expect("Should be able to get child FV data"); + // SAFETY: child_fv_data points to a valid FV image buffer for parsing. let child_volume = unsafe { VolumeRef::new_from_address(child_fv_data.as_ptr() as u64) } .expect("Should be able to parse the child FV"); @@ -1452,6 +1490,7 @@ mod tests { // Install the child FV directly let child_fv_box: Box<[u8]> = Box::from(child_fv_data); let child_fv_raw = Box::into_raw(child_fv_box); + // SAFETY: child_fv_raw is leaked for the duration of this test scope. let _child_handle = unsafe { CORE.pi_dispatcher .install_firmware_volume(child_fv_raw.expose_provenance() as u64, Some(parent_handle)) @@ -1459,7 +1498,7 @@ mod tests { }; assert!( - CORE.pi_dispatcher.is_fv_already_installed(child_fv_guid), + CORE.pi_dispatcher.is_fv_already_installed(child_fv_guid.into_inner()), "Child FV should be detected as already installed" ); @@ -1478,10 +1517,12 @@ mod tests { "Pending FV images should be empty after dispatch skipped duplicate" ); + // SAFETY: child_fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_child = unsafe { Box::from_raw(child_fv_raw) }; } } + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; }); } @@ -1500,21 +1541,22 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); + // SAFETY: fv_raw is leaked for the duration of this test scope. let handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); PROTOCOL_DB - .uninstall_protocol_interface(handle, firmware_volume_block::PROTOCOL_GUID, protocol) + .uninstall_protocol_interface(handle, firmware_volume_block::PROTOCOL_GUID.into_inner(), protocol) .expect("Failed to uninstall protocol"); PROTOCOL_DB .install_protocol_interface( Some(handle), - firmware_volume_block::PROTOCOL_GUID, + firmware_volume_block::PROTOCOL_GUID.into_inner(), core::ptr::null_mut::(), ) .expect("Failed to install null protocol"); @@ -1533,6 +1575,7 @@ mod tests { "Should return false when the FVB protocol is null" ); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; }); } @@ -1551,14 +1594,16 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); + // SAFETY: fv_raw is leaked for the duration of this test scope. let handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); let protocol = protocol as *mut firmware_volume_block::Protocol; // Patch get_physical_address to return an error + // SAFETY: protocol was retrieved from PROTOCOL_DB and remains valid for this test scope. unsafe { &mut *protocol }.get_physical_address = get_physical_address1; let test_guid = r_efi::efi::Guid::from_fields( @@ -1574,6 +1619,7 @@ mod tests { "Should return false when get_physical_address fails" ); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; }); } @@ -1592,14 +1638,16 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); + // SAFETY: fv_raw is leaked for the duration of this test scope. let handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; // Patch get_physical_address to return zero let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); let protocol = protocol as *mut firmware_volume_block::Protocol; + // SAFETY: protocol was retrieved from PROTOCOL_DB and remains valid for this test scope. unsafe { &mut *protocol }.get_physical_address = get_physical_address2; let test_guid = r_efi::efi::Guid::from_fields( @@ -1615,6 +1663,7 @@ mod tests { "Should return false when the address is zero" ); + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; }); } @@ -1633,6 +1682,7 @@ mod tests { static CORE: MockCore = MockCore::new(NullSectionExtractor::new()); CORE.override_instance(); + // SAFETY: fv_raw is leaked for the duration of this test scope. let handle = unsafe { CORE.pi_dispatcher.install_firmware_volume(fv_raw.expose_provenance() as u64, None).unwrap() }; @@ -1643,12 +1693,14 @@ mod tests { // Patch get_physical_address to return the invalid FV address let protocol = PROTOCOL_DB - .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID) + .get_interface_for_handle(handle, firmware_volume_block::PROTOCOL_GUID.into_inner()) .expect("Failed to get FVB protocol"); let protocol = protocol as *mut firmware_volume_block::Protocol; + // SAFETY: protocol was retrieved from PROTOCOL_DB and remains valid for this test scope. unsafe { &mut *protocol }.get_physical_address = get_physical_address3; // Set to the address of the invalid FV data + // SAFETY: Test-only mutable static is used under the global lock. unsafe { GET_PHYSICAL_ADDRESS3_VALUE = invalid_fv_raw.expose_provenance() as u64 }; let test_guid = r_efi::efi::Guid::from_fields( @@ -1664,9 +1716,12 @@ mod tests { "Should return false when volume parsing fails" ); + // SAFETY: Reset the test-only mutable static under the global lock. unsafe { GET_PHYSICAL_ADDRESS3_VALUE = 0 }; + // SAFETY: fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_fv = unsafe { Box::from_raw(fv_raw) }; + // SAFETY: invalid_fv_raw was created from Box::into_raw and is dropped only once here. let _dropped_invalid_fv = unsafe { Box::from_raw(invalid_fv_raw) }; }); } diff --git a/patina_dxe_core/src/pi_dispatcher/debug_image_info_table.rs b/patina_dxe_core/src/pi_dispatcher/debug_image_info_table.rs index 50d965ebd..2e5eda6f8 100644 --- a/patina_dxe_core/src/pi_dispatcher/debug_image_info_table.rs +++ b/patina_dxe_core/src/pi_dispatcher/debug_image_info_table.rs @@ -214,6 +214,7 @@ impl DebugImageInfoData { let last = self.len() - 1; if index != last { + // SAFETY: data pointers are within allocated table bounds and non-overlapping for a single element copy. unsafe { ptr::copy_nonoverlapping(data.add(last), data.add(index), 1) }; } @@ -269,8 +270,8 @@ impl Drop for DebugImageInfoData { // Deallocate the data if self.capacity > 0 { let layout = Layout::array::(self.capacity).unwrap(); + // SAFETY: Invariants of this struct ensure that `data` was allocated with this layout. unsafe { - // SAFETY: Invariants of this struct ensure that `data` was allocated with this exact layout. alloc::alloc::dealloc(self.table_mut().cast::(), layout); } } diff --git a/patina_dxe_core/src/pi_dispatcher/fv.rs b/patina_dxe_core/src/pi_dispatcher/fv.rs index 221efa407..5e5cc3ff5 100644 --- a/patina_dxe_core/src/pi_dispatcher/fv.rs +++ b/patina_dxe_core/src/pi_dispatcher/fv.rs @@ -286,7 +286,7 @@ impl FvProtocolData

{ file.fv_attributes() }; - Ok((file.name(), attributes, file.data().len(), file.file_type_raw())) + Ok((file.name().into_inner(), attributes, file.data().len(), file.file_type_raw())) } fn new_fvb_protocol(parent_handle: Option) -> Box { @@ -336,7 +336,7 @@ impl FvProtocolData

{ // install the protocol and return status core_install_protocol_interface( handle, - pi::protocols::firmware_volume_block::PROTOCOL_GUID, + pi::protocols::firmware_volume_block::PROTOCOL_GUID.into_inner(), protocol_ptr.as_ptr(), ) } @@ -358,7 +358,11 @@ impl FvProtocolData

{ self.fv_metadata.insert(protocol_ptr.addr(), metadata); // install the protocol and return status - core_install_protocol_interface(handle, pi::protocols::firmware_volume::PROTOCOL_GUID, protocol_ptr.as_ptr()) + core_install_protocol_interface( + handle, + pi::protocols::firmware_volume::PROTOCOL_GUID.into_inner(), + protocol_ptr.as_ptr(), + ) } /// Installs both the FVB and FV protocols for a firmware volume at the specified base address. @@ -410,7 +414,7 @@ impl FvProtocolData

{ let device_path_ptr = match fv.fv_name() { Some(fv_name) => { // Construct FvPiWgDevicePath - let device_path = FvPiWgDevicePath::new_fv(fv_name); + let device_path = FvPiWgDevicePath::new_fv(fv_name.into_inner()); Box::into_raw(Box::new(device_path)) as *mut c_void } None => { @@ -630,6 +634,8 @@ impl FvProtocolData

{ // SAFETY: caller must provide valid pointers for buffer_size and name_guid. They are null-checked above. let local_buffer_size = unsafe { buffer_size.read_unaligned() }; // SAFETY: caller must provide valid pointers for buffer_size and name_guid. They are null-checked above. + // SAFETY: name_guid is checked to be non-null above. The caller must ensure + // that it points to a valid GUID (as per the C interface). let name = unsafe { name_guid.read_unaligned() }; let this = Self::instance(); @@ -742,6 +748,8 @@ impl FvProtocolData

{ // SAFETY: null-checks are at the start of the routine, but caller is required to guarantee that buffer_size and // buffer are valid. let mut local_buffer_size = unsafe { buffer_size.read_unaligned() }; + // SAFETY: null-checks are at the start of the routine, but caller is required to guarantee that buffer_size and + // buffer are valid (as per the C interface). let mut local_buffer_ptr = unsafe { buffer.read_unaligned() }; if local_buffer_ptr.is_null() { @@ -924,8 +932,8 @@ mod tests { header, base_address: 0, length: 0x8000, - fv_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), - file_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + fv_name: patina::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + file_name: patina::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), } } fn gen_firmware_volume() -> hob::FirmwareVolume { @@ -1795,6 +1803,7 @@ mod tests { test_support::with_global_lock(|| { static CORE: MockCore = MockCore::new(CompositeSectionExtractor::new()); CORE.override_instance(); + // SAFETY: Initializes the test GCD state for this test scope only. unsafe { test_support::init_test_gcd(None) }; let fv_interface = MockProtocolData::new_fv_protocol(parent_handle); diff --git a/patina_dxe_core/src/pi_dispatcher/image.rs b/patina_dxe_core/src/pi_dispatcher/image.rs index 5bc2b41e9..aa0a30cb9 100644 --- a/patina_dxe_core/src/pi_dispatcher/image.rs +++ b/patina_dxe_core/src/pi_dispatcher/image.rs @@ -137,6 +137,7 @@ impl ImageStack { } } +// SAFETY: ImageStack provides a stable, owned stack buffer with valid base/limit pointers. unsafe impl Stack for ImageStack { fn base(&self) -> StackPointer { //stack grows downward, so "base" is the highest address, i.e. the ptr + size. @@ -308,6 +309,7 @@ impl PrivateImageData { )?; // update the entry point. Transmute is required here to cast the raw function address to the ImageEntryPoint function pointer type. + // SAFETY: Entry point is computed from a validated PE image base and entry offset. self.entry_point = unsafe { transmute:: efi::Status>( physical_addr + self.pe_info.entry_point_offset, @@ -684,7 +686,9 @@ impl ImageData { // ImageData is accessed through a mutex guard, so it is safe to // mark it sync/send. +// SAFETY: ImageData is only accessed through the image_data mutex. unsafe impl Sync for ImageData {} +// SAFETY: ImageData is only accessed through the image_data mutex. unsafe impl Send for ImageData {} impl super::PiDispatcher

{ @@ -826,6 +830,7 @@ impl super::PiDispatcher

{ if source_size == 0 { return efi::Status::LOAD_ERROR; } + // SAFETY: source_buffer/source_size are provided by the caller and validated for non-null/size. Some(unsafe { from_raw_parts(source_buffer as *const u8, source_size) }) }; @@ -919,6 +924,7 @@ impl super::PiDispatcher

{ // will try to use unwind to clean up the co-routine stack (i.e. "drop" any // live objects). This unwind support requires std and will panic if // executed. + // SAFETY: force_reset prevents unwinding a suspended coroutine with a custom stack. unsafe { coroutine.force_reset() }; self.image_data.lock().current_running_image = previous_image; @@ -1103,6 +1109,7 @@ impl super::PiDispatcher

{ // safety note: this assumes that the top of the image_start_contexts stack // is the currently running image. if let Some(yielder) = private_data.image_start_contexts.pop() { + // SAFETY: yielder pointer is created and stored by start_image for the current context. let yielder = unsafe { &*yielder }; drop(private_data); @@ -1275,6 +1282,7 @@ fn core_load_pe_image( } fn get_file_guid_from_device_path(path: *mut efi::protocols::device_path::Protocol) -> Result { + // SAFETY: path is validated by the caller and must point to a valid device path structure. let mut walker = unsafe { DevicePathWalker::new(path) }; let file_path_node = walker.next().ok_or(EfiError::InvalidParameter)?; if file_path_node.header().r#type != efi::protocols::device_path::TYPE_MEDIA @@ -1290,18 +1298,20 @@ fn get_file_buffer_from_fw( ) -> Result<(Vec, efi::Handle), EfiError> { // Locate the handles to a device on the file_path that supports the firmware volume protocol let (remaining_file_path, handle) = - core_locate_device_path(pi::protocols::firmware_volume::PROTOCOL_GUID, file_path)?; + core_locate_device_path(pi::protocols::firmware_volume::PROTOCOL_GUID.into_inner(), file_path)?; // For FwVol File system there is only a single file name that is a GUID. let fv_name_guid = get_file_guid_from_device_path(remaining_file_path)?; // Get the firmware volume protocol - let fv_ptr = PROTOCOL_DB.get_interface_for_handle(handle, pi::protocols::firmware_volume::PROTOCOL_GUID)? + let fv_ptr = PROTOCOL_DB + .get_interface_for_handle(handle, pi::protocols::firmware_volume::PROTOCOL_GUID.into_inner())? as *mut pi::protocols::firmware_volume::Protocol; if fv_ptr.is_null() { debug_assert!(!fv_ptr.is_null(), "ERROR: get_interface_for_handle returned NULL ptr for FirmwareVolume!"); return Err(EfiError::InvalidParameter); } + // SAFETY: fv_ptr is non-null and points to a valid firmware volume protocol. let fw_vol = unsafe { fv_ptr.as_ref().unwrap() }; // Read image from the firmware file @@ -1322,6 +1332,7 @@ fn get_file_buffer_from_fw( EfiError::status_to_result(status)?; + // SAFETY: buffer/buffer_size are returned by read_section and are valid for that length. let section_slice = unsafe { slice::from_raw_parts(buffer, buffer_size) }; Ok((section_slice.to_vec(), handle)) } @@ -1334,6 +1345,7 @@ fn get_file_buffer_from_sfs( let mut file = SimpleFile::open_volume(handle)?; + // SAFETY: remaining_file_path is returned by core_locate_device_path and is a valid device path. for node in unsafe { DevicePathWalker::new(remaining_file_path) } { match node.header().r#type { efi::protocols::device_path::TYPE_MEDIA @@ -1379,6 +1391,7 @@ fn get_file_buffer_from_load_protocol( let (remaining_file_path, handle) = core_locate_device_path(protocol, file_path)?; let load_file = PROTOCOL_DB.get_interface_for_handle(handle, protocol)?; + // SAFETY: load_file is obtained from the protocol database and is a valid load_file protocol pointer. let load_file = unsafe { (load_file as *mut efi::protocols::load_file::Protocol).as_mut().ok_or(EfiError::Unsupported)? }; @@ -1418,8 +1431,10 @@ fn authenticate_image( from_fv: bool, authentication_status: u32, ) -> Result<(), EfiError> { + // SAFETY: Checks locate_protocol return value to determine if pointer is valid. as_ref() is used for shared access + // which will also check if the pointer is null before allowing access. let security2_protocol = unsafe { - match PROTOCOL_DB.locate_protocol(pi::protocols::security2::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(pi::protocols::security2::PROTOCOL_GUID.into_inner()) { Ok(protocol) => (protocol as *mut pi::protocols::security2::Protocol).as_ref(), //If security protocol is not located, then assume it has not yet been produced and implicitly trust the //Firmware Volume. @@ -1427,8 +1442,10 @@ fn authenticate_image( } }; + // SAFETY: Checks locate_protocol return value to determine if pointer is valid. as_ref() is used for shared access + // which will also check if the pointer is null before allowing access. let security_protocol = unsafe { - match PROTOCOL_DB.locate_protocol(pi::protocols::security::PROTOCOL_GUID) { + match PROTOCOL_DB.locate_protocol(pi::protocols::security::PROTOCOL_GUID.into_inner()) { Ok(protocol) => (protocol as *mut pi::protocols::security::Protocol).as_ref(), //If security protocol is not located, then assume it has not yet been produced and implicitly trust the //Firmware Volume. @@ -1839,7 +1856,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - pi::protocols::security::PROTOCOL_GUID, + pi::protocols::security::PROTOCOL_GUID.into_inner(), &security_protocol as *const _ as *mut _, ) .unwrap(); @@ -1890,7 +1907,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - pi::protocols::security::PROTOCOL_GUID, + pi::protocols::security::PROTOCOL_GUID.into_inner(), &security_protocol as *const _ as *mut _, ) .unwrap(); @@ -1919,7 +1936,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - pi::protocols::security2::PROTOCOL_GUID, + pi::protocols::security2::PROTOCOL_GUID.into_inner(), &security2_protocol as *const _ as *mut _, ) .unwrap(); @@ -1970,7 +1987,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - pi::protocols::security2::PROTOCOL_GUID, + pi::protocols::security2::PROTOCOL_GUID.into_inner(), &security2_protocol as *const _ as *mut _, ) .unwrap(); @@ -2023,7 +2040,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - pi::protocols::security2::PROTOCOL_GUID, + pi::protocols::security2::PROTOCOL_GUID.into_inner(), &security2_protocol as *const _ as *mut _, ) .unwrap(); @@ -2069,7 +2086,7 @@ mod tests { PROTOCOL_DB .install_protocol_interface( None, - pi::protocols::security2::PROTOCOL_GUID, + pi::protocols::security2::PROTOCOL_GUID.into_inner(), &security2_protocol as *const _ as *mut _, ) .unwrap(); @@ -2630,11 +2647,12 @@ mod tests { image_info.image_size = 0x2000; // Manually construct PrivateImageData with minimal required fields - // SAFETY: Allocating memory for fake image buffer to construct test data const LEN: usize = 0x2000; + // SAFETY: Allocate a page-aligned test buffer and treat it as a raw image backing store. let fake_buffer = unsafe { alloc::alloc::alloc(alloc::alloc::Layout::from_size_align(LEN, 0x1000).unwrap()) }; + // SAFETY: fake_buffer points to LEN bytes we just allocated and is valid for mutable slice creation. let slice = unsafe { core::slice::from_raw_parts_mut(fake_buffer, LEN) }; let bytes = super::Buffer::Borrowed(slice); @@ -2697,6 +2715,7 @@ mod tests { let result = test_support::with_global_lock(|| { // SAFETY: These test initialization functions require unsafe because they // manipulate global state (GCD, protocol DB, system table) + // SAFETY: Test-only initialization of global tables happens under the global lock. unsafe { test_support::init_test_gcd(None); test_support::init_test_protocol_db(); @@ -3171,6 +3190,7 @@ mod tests { let child_device_path = device_path_from_string(String::from("PCI(0,1C)/PCI(0,0)/EFI/BOOT/BOOT_X64.EFI/END")); + // SAFETY: Test-only initialization of global tables happens under the global lock. unsafe { test_support::init_test_gcd(None); test_support::init_test_protocol_db(); @@ -3200,6 +3220,7 @@ mod tests { // Validate the file path was set correctly let (_, len) = device_path_node_count(private_info.image_info.file_path).unwrap(); + // SAFETY: file_path points to a valid device path buffer of length `len` per device_path_node_count. let bytes = unsafe { core::slice::from_raw_parts(private_info.image_info.file_path as *const u8, len) }; assert_eq!(bytes, child_device_path.as_ref()); @@ -3207,6 +3228,7 @@ mod tests { let (_, len) = device_path_node_count(private_info.get_file_path() as *mut efi::protocols::device_path::Protocol) .unwrap(); + // SAFETY: get_file_path returns a valid device path pointer with length `len` per device_path_node_count. let bytes = unsafe { core::slice::from_raw_parts(private_info.get_file_path() as *const u8, len) }; assert_eq!(bytes, child_device_path.as_ref()); }) @@ -3224,6 +3246,7 @@ mod tests { device_path_from_string(String::from("PCI(0,1C)/PCI(0,0)/EFI/BOOT/BOOT_X64.EFI/END")); let child_filename = device_path_from_string(String::from("EFI/BOOT/BOOT_X64.EFI/END")); + // SAFETY: Test-only initialization of global tables happens under the global lock. unsafe { test_support::init_test_gcd(None); test_support::init_test_protocol_db(); @@ -3262,6 +3285,7 @@ mod tests { // Validate the file path was set correctly let (_, len) = device_path_node_count(private_info.image_info.file_path).unwrap(); + // SAFETY: file_path points to a valid device path buffer of length `len` per device_path_node_count. let bytes = unsafe { core::slice::from_raw_parts(private_info.image_info.file_path as *const u8, len) }; // IMPORTANT: This is validating that we cut off the parent device path correctly. @@ -3271,6 +3295,7 @@ mod tests { let (_, len) = device_path_node_count(private_info.get_file_path() as *mut efi::protocols::device_path::Protocol) .unwrap(); + // SAFETY: get_file_path returns a valid device path pointer with length `len` per device_path_node_count. let bytes = unsafe { core::slice::from_raw_parts(private_info.get_file_path() as *const u8, len) }; // IMPORTANT: This should always contain the full path. @@ -3319,12 +3344,14 @@ mod tests { }; let mut hobs = Vec::new(); + // SAFETY: Taking a byte view of a stack-allocated HOB struct for serialization into the test HOB list. hobs.extend_from_slice(unsafe { core::slice::from_raw_parts( &ma_hob as *const MemoryAllocationModule as *const u8, core::mem::size_of::(), ) }); + // SAFETY: Taking a byte view of a stack-allocated HOB header for serialization into the test HOB list. hobs.extend_from_slice(unsafe { core::slice::from_raw_parts( &end_hob as *const patina::pi::hob::header::Hob as *const u8, diff --git a/patina_dxe_core/src/protocol_db.rs b/patina_dxe_core/src/protocol_db.rs index 75133804b..5fef07d2f 100644 --- a/patina_dxe_core/src/protocol_db.rs +++ b/patina_dxe_core/src/protocol_db.rs @@ -874,7 +874,9 @@ impl SpinLockedProtocolDb { } } +// SAFETY: SpinLockedProtocolDb enforces mutual exclusion and interior mutability with locking. unsafe impl Send for SpinLockedProtocolDb {} +// SAFETY: SpinLockedProtocolDb enforces mutual exclusion and interior mutability with locking. unsafe impl Sync for SpinLockedProtocolDb {} #[cfg(test)] diff --git a/patina_dxe_core/src/protocols.rs b/patina_dxe_core/src/protocols.rs index 290eba667..9693e9c4f 100644 --- a/patina_dxe_core/src/protocols.rs +++ b/patina_dxe_core/src/protocols.rs @@ -61,6 +61,7 @@ extern "efiapi" fn install_protocol_interface( } // SAFETY: Caller must ensure that handle and protocol are valid pointers. They are null-checked above. let caller_handle = unsafe { handle.read_unaligned() }; + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. let caller_protocol = unsafe { protocol.read_unaligned() }; let caller_handle = if caller_handle.is_null() { None } else { Some(caller_handle) }; @@ -70,6 +71,7 @@ extern "efiapi" fn install_protocol_interface( Ok(handle) => handle, }; + // SAFETY: Caller must ensure that handle is a valid pointer. It is checked for null above. unsafe { *handle = installed_handle }; efi::Status::SUCCESS @@ -105,6 +107,8 @@ pub fn core_uninstall_protocol_interface( for usage in usages { if (usage.attributes & efi::OPEN_PROTOCOL_BY_DRIVER) != 0 { debug_assert!(usage.agent_handle.is_some()); + // SAFETY: Handles are validated by the protocol database, and controller disconnect is required + // for cleanup. unsafe { usage_close_status = core_disconnect_controller(handle, usage.agent_handle, None); if usage_close_status.is_ok() { @@ -153,6 +157,7 @@ pub fn core_uninstall_protocol_interface( } if usage_close_status.is_err() || unclosed_usages { + // SAFETY: Handle is validated above and reconnect is best-effort to restore state. unsafe { let _result = core_connect_controller(handle, Vec::new(), None, true); } @@ -171,7 +176,7 @@ extern "efiapi" fn uninstall_protocol_interface( return efi::Status::INVALID_PARAMETER; } - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. let caller_protocol = unsafe { protocol.read_unaligned() }; core_uninstall_protocol_interface(handle, caller_protocol, interface) @@ -222,7 +227,7 @@ extern "efiapi" fn reinstall_protocol_interface( } } - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. let protocol = unsafe { protocol.read_unaligned() }; // Call install to install the new interface and trigger any notifies @@ -238,6 +243,7 @@ extern "efiapi" fn reinstall_protocol_interface( // Connect controller so agents that were forced to release old_interface can now consume new_interface. Error // status is ignored. + // SAFETY: handle is valid and reconnect is best-effort to restore state after reinstall. unsafe { let _ = core_connect_controller(handle, Vec::new(), None, true); } @@ -253,10 +259,14 @@ extern "efiapi" fn register_protocol_notify( if protocol.is_null() || registration.is_null() || !EVENT_DB.is_valid(event) { return efi::Status::INVALID_PARAMETER; } - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. - match PROTOCOL_DB.register_protocol_notify(unsafe { protocol.read_unaligned() }, event) { + let protocol_guid = { + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. + unsafe { protocol.read_unaligned() } + }; + match PROTOCOL_DB.register_protocol_notify(protocol_guid, event) { Err(err) => err.into(), Ok(new_registration) => { + // SAFETY: Caller must ensure that registration is a valid pointer. It is checked for null above. unsafe { *registration = new_registration }; efi::Status::SUCCESS } @@ -286,7 +296,7 @@ extern "efiapi" fn locate_handle( if protocol.is_null() { return efi::Status::INVALID_PARAMETER; } - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. PROTOCOL_DB.locate_handles(Some(unsafe { protocol.read_unaligned() })) } _ => return efi::Status::INVALID_PARAMETER, @@ -303,9 +313,9 @@ extern "efiapi" fn locate_handle( } list.shrink_to_fit(); - // SAFETY: Caller must ensure that buffer_size is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that buffer_size is a valid pointer. It is checked for null above. let input_size = unsafe { buffer_size.read_unaligned() }; - // SAFETY: Caller must ensure that buffer_size is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that buffer_size is a valid pointer. It is checked for null above. unsafe { buffer_size.write_unaligned(list.len() * size_of::()); } @@ -316,7 +326,7 @@ extern "efiapi" fn locate_handle( return efi::Status::INVALID_PARAMETER; } - // Caller must ensure that handle_buffer is valid for writes of list.len() handles. It is null-checked above. + // SAFETY: Caller must ensure that handle_buffer is valid for writes of list.len() handles. It is checked for null above. unsafe { core::ptr::copy( list.as_ptr() as *const u8, @@ -357,7 +367,7 @@ extern "efiapi" fn open_protocol( return efi::Status::INVALID_PARAMETER; } - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. let protocol = unsafe { protocol.read_unaligned() }; if interface.is_null() && attributes != efi::OPEN_PROTOCOL_TEST_PROTOCOL { @@ -451,14 +461,11 @@ extern "efiapi" fn close_protocol( } }; - match PROTOCOL_DB.remove_protocol_usage( - handle, - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. - unsafe { protocol.read_unaligned() }, - Some(agent_handle), - controller_handle, - None, - ) { + let protocol_guid = { + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. + unsafe { protocol.read_unaligned() } + }; + match PROTOCOL_DB.remove_protocol_usage(handle, protocol_guid, Some(agent_handle), controller_handle, None) { Err(err) => err.into(), Ok(_) => efi::Status::SUCCESS, } @@ -474,9 +481,12 @@ extern "efiapi" fn open_protocol_information( return efi::Status::INVALID_PARAMETER; } + let protocol_guid = { + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. + unsafe { protocol.read_unaligned() } + }; let mut open_info: Vec = - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. - match PROTOCOL_DB.get_open_protocol_information_by_protocol(handle, unsafe { protocol.read_unaligned() }) { + match PROTOCOL_DB.get_open_protocol_information_by_protocol(handle, protocol_guid) { Err(err) => return err.into(), Ok(info) => info.into_iter().map(efi::OpenProtocolInformationEntry::from).collect(), }; @@ -519,18 +529,24 @@ unsafe extern "C" fn install_multiple_protocol_interfaces(handle: *mut efi::Hand let mut interfaces_to_install = Vec::new(); loop { //consume the protocol, break the loop if it is null. + // SAFETY: Variadic argument list is controlled by the caller and accessed in order. let protocol: *mut efi::Guid = unsafe { args.arg() }; if protocol.is_null() { break; } + // SAFETY: Variadic argument list is controlled by the caller and accessed in order. let interface: *mut c_void = unsafe { args.arg() }; + // SAFETY: protocol is checked for null above before dereferencing. if unsafe { *protocol } == efi::protocols::device_path::PROTOCOL_GUID && let Ok((remaining_path, handle)) = core_locate_device_path( efi::protocols::device_path::PROTOCOL_GUID, interface as *const efi::protocols::device_path::Protocol, ) && PROTOCOL_DB.validate_handle(handle).is_ok() - && unsafe { is_device_path_end(remaining_path) } + && { + // SAFETY: remaining_path is returned from core_locate_device_path and is a valid device path pointer. + unsafe { is_device_path_end(remaining_path) } + } { return efi::Status::ALREADY_STARTED; } @@ -545,7 +561,9 @@ unsafe extern "C" fn install_multiple_protocol_interfaces(handle: *mut efi::Hand err => { //on error, attempt to uninstall all the previously installed interfaces. best-effort, errors are ignored. for (protocol, interface) in interfaces_to_uninstall_on_error { - let _ = uninstall_protocol_interface(unsafe { *handle }, protocol, interface); + // SAFETY: handle is validated for null above. + let handle_value = unsafe { *handle }; + let _ = uninstall_protocol_interface(handle_value, protocol, interface); } return err; } @@ -562,10 +580,12 @@ unsafe extern "C" fn uninstall_multiple_protocol_interfaces(handle: efi::Handle, let mut interfaces_to_uninstall = Vec::new(); loop { + // SAFETY: Variadic argument list is controlled by the caller and accessed in order. let protocol: *mut efi::Guid = unsafe { args.arg() }; if protocol.is_null() { break; } + // SAFETY: Variadic argument list is controlled by the caller and accessed in order. let interface: *mut c_void = unsafe { args.arg() }; interfaces_to_uninstall.push((protocol, interface)); } @@ -577,6 +597,7 @@ unsafe extern "C" fn uninstall_multiple_protocol_interfaces(handle: efi::Handle, _err => { //on error, attempt to re-install all the previously uninstall interfaces. best-effort, errors are ignored. for (protocol, interface) in interfaces_to_reinstall_on_error { + // SAFETY: protocol was checked for null when building interfaces_to_uninstall. let protocol = *(unsafe { protocol.as_mut().expect("previously null-checked pointer is null.") }); let _ = core_install_protocol_interface(Some(handle), protocol, interface); } @@ -669,7 +690,7 @@ extern "efiapi" fn locate_handle_buffer( if protocol.is_null() { return efi::Status::INVALID_PARAMETER; } - // SAFETY: Caller must ensure that protocol is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. unsafe { PROTOCOL_DB.locate_handles(Some(protocol.read_unaligned())) } } _ => return efi::Status::INVALID_PARAMETER, @@ -708,22 +729,30 @@ extern "efiapi" fn locate_protocol( if !registration.is_null() { if let Some(handle) = PROTOCOL_DB.next_handle_for_registration(registration) { - // SAFETY: Caller must ensure that protocol and interface are valid pointers. They are null-checked above. + let protocol_guid = { + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. + unsafe { protocol.read_unaligned() } + }; let i_face = PROTOCOL_DB - .get_interface_for_handle(handle, unsafe { protocol.read_unaligned() }) + .get_interface_for_handle(handle, protocol_guid) .expect("Protocol should exist on handle if it is returned for registration key."); + // SAFETY: Caller must ensure that interface is a valid pointer. It is checked for null above. unsafe { interface.write_unaligned(i_face) }; } else { return efi::Status::NOT_FOUND; } } else { - match PROTOCOL_DB.locate_protocol(unsafe { protocol.read_unaligned() }) { + let protocol_guid = { + // SAFETY: Caller must ensure that protocol is a valid pointer. It is checked for null above. + unsafe { protocol.read_unaligned() } + }; + match PROTOCOL_DB.locate_protocol(protocol_guid) { Err(err) => { - // SAFETY: Caller must ensure that interface is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that interface is a valid pointer. It is checked for null above. unsafe { interface.write_unaligned(core::ptr::null_mut()) }; return err.into(); } - // SAFETY: Caller must ensure that interface is a valid pointer. It is null-checked above. + // SAFETY: Caller must ensure that interface is a valid pointer. It is checked for null above. Ok(i_face) => unsafe { interface.write_unaligned(i_face) }, } } @@ -753,7 +782,10 @@ pub fn core_locate_device_path( continue; } - let (remaining_path, matching_nodes) = match unsafe { remaining_device_path(temp_device_path, device_path) } { + let (remaining_path, matching_nodes) = match + // SAFETY: temp_device_path and device_path are validated before use and are device path pointers. + unsafe { remaining_device_path(temp_device_path, device_path) } + { Some((remaining_path, matching_nodes)) => (remaining_path, matching_nodes as isize), None => continue, }; @@ -777,17 +809,26 @@ extern "efiapi" fn locate_device_path( device_path: *mut *mut r_efi::protocols::device_path::Protocol, device: *mut efi::Handle, ) -> efi::Status { - // SAFETY: Caller must ensure that protocol, device_path, and device are valid pointers. They are null-checked below. - if protocol.is_null() || device_path.is_null() || unsafe { device_path.read_unaligned() }.is_null() { + if protocol.is_null() || device_path.is_null() { return efi::Status::INVALID_PARAMETER; } - let (best_remaining_path, best_device) = - // SAFETY: Caller must ensure that protocol and device_path are valid pointers. They are null-checked above. - match core_locate_device_path(unsafe { protocol.read_unaligned() }, unsafe { device_path.read_unaligned() }) { - Err(err) => return err.into(), - Ok((path, device)) => (path, device), - }; + let current_device_path = { + // SAFETY: device_path is null-checked above. + unsafe { device_path.read_unaligned() } + }; + if current_device_path.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let protocol_guid = { + // SAFETY: protocol is null-checked above. + unsafe { protocol.read_unaligned() } + }; + let (best_remaining_path, best_device) = match core_locate_device_path(protocol_guid, current_device_path) { + Err(err) => return err.into(), + Ok((path, device)) => (path, device), + }; if device.is_null() { return efi::Status::INVALID_PARAMETER; } @@ -809,12 +850,14 @@ pub fn init_protocol_support(st: &mut EfiSystemTable) { //transmute here. Fixing it properly would require an upstream change in r_efi to pick up. There is also a bug in //the r_efi definition for uninstall_multiple_protocol_interfaces - per spec, the first argument is a handle, but //r_efi has it as *mut handle. + // SAFETY: Transmute bridges r_efi signature mismatch for variadic interface. ABI matches for efiapi/extern C. bs.install_multiple_protocol_interfaces = unsafe { let ptr = install_multiple_protocol_interfaces as *const (); core::mem::transmute::<*const (), extern "efiapi" fn(*mut *mut c_void, *mut c_void, *mut c_void) -> efi::Status>( ptr, ) }; + // SAFETY: Transmute bridges r_efi signature mismatch for variadic interface. ABI matches for efiapi/extern C. bs.uninstall_multiple_protocol_interfaces = unsafe { let ptr = uninstall_multiple_protocol_interfaces as *const (); core::mem::transmute::<*const (), extern "efiapi" fn(*mut c_void, *mut c_void, *mut c_void) -> efi::Status>(ptr) diff --git a/patina_dxe_core/src/runtime.rs b/patina_dxe_core/src/runtime.rs index 7f142493d..aa7a4f3c4 100644 --- a/patina_dxe_core/src/runtime.rs +++ b/patina_dxe_core/src/runtime.rs @@ -23,7 +23,9 @@ struct RuntimeData { runtime_events: LinkedList, } +// SAFETY: RuntimeData is only accessed behind the RUNTIME_DATA mutex. unsafe impl Sync for RuntimeData {} +// SAFETY: RuntimeData is only accessed behind the RUNTIME_DATA mutex. unsafe impl Send for RuntimeData {} static RUNTIME_DATA: Mutex = Mutex::new(RuntimeData::new()); @@ -75,20 +77,22 @@ pub fn init_runtime_support() { .expect("Failed to create runtime protocol installation callback."); PROTOCOL_DB - .register_protocol_notify(runtime::PROTOCOL_GUID, event) + .register_protocol_notify(runtime::PROTOCOL_GUID.into_inner(), event) .expect("Failed to register protocol notify on runtime protocol."); } pub fn finalize_runtime_support() { let data = RUNTIME_DATA.lock(); if !data.runtime_arch_ptr.is_null() { + // SAFETY: runtime_arch_ptr is set from the runtime protocol and checked for null. unsafe { (*data.runtime_arch_ptr).at_runtime.store(true, core::sync::atomic::Ordering::Relaxed) }; } } extern "efiapi" fn runtime_protocol_notify(_event: efi::Event, _context: *mut c_void) { log::info!("Runtime protocol installed. Setting up pointers."); - let ptr = PROTOCOL_DB.locate_protocol(runtime::PROTOCOL_GUID).expect("Failed to locate runtime protocol."); + let ptr = + PROTOCOL_DB.locate_protocol(runtime::PROTOCOL_GUID.into_inner()).expect("Failed to locate runtime protocol."); let mut data = RUNTIME_DATA.lock(); data.runtime_arch_ptr = ptr as *mut runtime::Protocol; data.update_protocol_lists(); diff --git a/patina_dxe_core/src/systemtables.rs b/patina_dxe_core/src/systemtables.rs index 1f651e3e0..6032530ca 100644 --- a/patina_dxe_core/src/systemtables.rs +++ b/patina_dxe_core/src/systemtables.rs @@ -47,6 +47,7 @@ impl EfiRuntimeServicesTable { let mut table_copy = unsafe { self.runtime_services.read() }; table_copy.hdr.crc32 = 0; + // SAFETY: table_copy is a valid, initialized RuntimeServices value on the stack. let tbl_slice = unsafe { from_raw_parts(&table_copy as *const _ as *const u8, size_of::()) }; table_copy.hdr.crc32 = crc32fast::hash(tbl_slice); @@ -247,6 +248,7 @@ impl EfiBootServicesTable { let mut table_copy = unsafe { self.boot_services.read() }; table_copy.hdr.crc32 = 0; + // SAFETY: table_copy is a valid, initialized BootServices value on the stack. let tbl_slice = unsafe { from_raw_parts(&table_copy as *const _ as *const u8, size_of::()) }; table_copy.hdr.crc32 = crc32fast::hash(tbl_slice); @@ -714,6 +716,8 @@ impl EfiSystemTable { /// # Safety /// The pointer must be valid and point to a properly initialized efi::SystemTable structure. pub unsafe fn from_raw_pointer(ptr: *mut efi::SystemTable) -> Self { + // SAFETY: Caller guarantees ptr is a valid SystemTable pointer with initialized pointers + // per the function safety contract. unsafe { if ptr.is_null() { panic!("Attempted to create EfiSystemTable with null System Table pointer"); @@ -734,6 +738,7 @@ impl EfiSystemTable { let mut table_copy = unsafe { self.system_table.read() }; table_copy.hdr.crc32 = 0; + // SAFETY: table_copy is a valid, initialized SystemTable value on the stack. let st_slice = unsafe { from_raw_parts(&table_copy as *const _ as *const u8, size_of::()) }; table_copy.hdr.crc32 = crc32fast::hash(st_slice); diff --git a/patina_dxe_core/src/test_support.rs b/patina_dxe_core/src/test_support.rs index 4f8699ac7..ff6b7ba58 100644 --- a/patina_dxe_core/src/test_support.rs +++ b/patina_dxe_core/src/test_support.rs @@ -175,7 +175,9 @@ impl PatinaPageTable for MockPageTable { } } +// SAFETY: MockPageTable uses interior mutability for test-only state and is not shared across threads in tests. unsafe impl Send for MockPageTable {} +// SAFETY: MockPageTable's interior mutability is confined to test usage where concurrent access is controlled. unsafe impl Sync for MockPageTable {} impl Default for MockPageTable { @@ -392,7 +394,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_SYSTEM_MEMORY, resource_attribute: hob::TESTED_MEMORY_ATTRIBUTES | hob::EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE, physical_start: mem_base + 0xE0000, @@ -408,7 +410,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_SYSTEM_MEMORY, resource_attribute: hob::INITIALIZED_MEMORY_ATTRIBUTES | hob::EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE, physical_start: mem_base + 0x190000, @@ -424,7 +426,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_MEMORY_MAPPED_IO, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT | hob::EFI_RESOURCE_ATTRIBUTE_INITIALIZED @@ -442,7 +444,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_FIRMWARE_DEVICE, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT | hob::EFI_RESOURCE_ATTRIBUTE_INITIALIZED @@ -460,7 +462,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_MEMORY_RESERVED, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT | hob::EFI_RESOURCE_ATTRIBUTE_INITIALIZED @@ -478,7 +480,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_IO, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT | hob::EFI_RESOURCE_ATTRIBUTE_INITIALIZED, physical_start: 0x1000, @@ -494,7 +496,7 @@ pub(crate) fn build_test_hob_list(mem_size: u64) -> *const c_void { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_IO_RESERVED, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT, physical_start: 0x0000, @@ -707,7 +709,7 @@ mod tests { length: core::mem::size_of::() as u16, reserved: 0x00000000, }, - owner: efi::Guid::from_fields(0, 0, 0, 0, 0, &[0u8; 6]), + owner: patina::guids::ZERO, resource_type: hob::EFI_RESOURCE_SYSTEM_MEMORY, resource_attribute: hob::TESTED_MEMORY_ATTRIBUTES, physical_start: mem_base + 0xE0000, diff --git a/patina_dxe_core/src/tpl_mutex.rs b/patina_dxe_core/src/tpl_mutex.rs index 46019b619..b202657ad 100644 --- a/patina_dxe_core/src/tpl_mutex.rs +++ b/patina_dxe_core/src/tpl_mutex.rs @@ -37,10 +37,14 @@ pub struct TplGuard<'a, T: ?Sized + 'a> { mutex: &'a TplMutex, } +// SAFETY: TplMutex enforces mutual exclusion with atomic lock and TPL elevation. unsafe impl Sync for TplMutex {} +// SAFETY: TplMutex enforces mutual exclusion with atomic lock and TPL elevation. unsafe impl Send for TplMutex {} +// SAFETY: TplGuard grants exclusive access to the protected data while the lock is held. unsafe impl Sync for TplGuard<'_, T> {} +// SAFETY: TplGuard grants exclusive access to the protected data while the lock is held. unsafe impl Send for TplGuard<'_, T> {} impl TplMutex { diff --git a/patina_mm_supervisor_core/Cargo.toml b/patina_mm_supervisor_core/Cargo.toml new file mode 100644 index 000000000..b2868e714 --- /dev/null +++ b/patina_mm_supervisor_core/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "patina_mm_supervisor_core" +resolver = "2" +version.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +readme = "README.md" +description = "A pure Rust implementation of the MM Supervisor Core for standalone MM mode environments." + +# Metadata to tell docs.rs how to build the documentation when uploading +[package.metadata.docs.rs] +features = ["doc"] + +[dependencies] +linkme = { workspace = true } +log = { workspace = true } +patina = { workspace = true } +patina_internal_cpu = { workspace = true } +patina_internal_mm_common = { workspace = true } +patina_mm_policy = { workspace = true } +patina_paging = { workspace = true, features = [ + "mm_supv" +] } +patina_stacktrace = { workspace = true } +r-efi = { workspace = true } +spin = { workspace = true } +x86_64 = { workspace = true, features = [ + "instructions", + "abi_x86_interrupt", +] } + +[dev-dependencies] +mockall = { workspace = true } +serial_test = { workspace = true } + +[features] +default = [] +std = [] +doc = [] +save_state_intel = ["patina_internal_cpu/save_state_intel"] +save_state_amd = ["patina_internal_cpu/save_state_amd"] diff --git a/patina_mm_supervisor_core/README.md b/patina_mm_supervisor_core/README.md new file mode 100644 index 000000000..f6c4183c2 --- /dev/null +++ b/patina_mm_supervisor_core/README.md @@ -0,0 +1,177 @@ +# Patina MM Supervisor Core + +A pure Rust implementation of the MM Supervisor Core for standalone MM mode environments. + +## Overview + +This crate provides the core functionality for the MM (Management Mode) Supervisor in a standalone MM environment. It is designed to run on x64 systems where: + +- Page tables are already set up by the pre-MM phase +- All images are loaded and ready to execute +- The BSP (Bootstrap Processor) orchestrates incoming requests +- APs (Application Processors) wait in a holding pen, checking a mailbox for work + +## Memory Model + +**This is a core component that does not use heap allocation.** All data structures use fixed-size arrays with compile-time constants provided via const generics in the `PlatformInfo` trait: + +- `MAX_CPU_COUNT` - Maximum number of CPUs supported +- `MAX_HANDLERS` - Maximum number of request handlers + +This allows the entire supervisor to be instantiated as a `static` with no runtime allocation. + +## Building a PE/COFF Binary + +### Prerequisites + +1. Install the Rust UEFI target: + ```bash + rustup target add x86_64-unknown-uefi + ``` + +2. Ensure you have the nightly toolchain (required for `#![feature(...)]`): + ```bash + rustup override set nightly + ``` + +### Build Command + +Build the example MM Supervisor binary: + +```bash +cargo build --release --target x86_64-unknown-uefi --bin example_mm_supervisor +``` + +The output PE/COFF binary will be at: +``` +target/x86_64-unknown-uefi/release/example_mm_supervisor.efi +``` + +### Entry Point + +The MM Supervisor exports `MmSupervisorMain` as its entry point, matching the EDK2 convention: + +```rust +#[unsafe(export_name = "MmSupervisorMain")] +pub extern "efiapi" fn mm_supervisor_main(hob_list: *const c_void) -> ! { + SUPERVISOR.entry_point(hob_list) +} +``` + +The MM IPL (Initial Program Loader) calls this entry point on **all processors** after: +1. Loading the supervisor image into MMRAM +2. Setting up page tables +3. Constructing the HOB list with MMRAM ranges + +## Architecture + +### Entry Point Model + +The entry point is executed on all cores simultaneously: + +1. **BSP (Bootstrap Processor)**: + - First CPU to arrive (determined by atomic counter) + - Performs one-time initialization + - Sets up the request handling infrastructure + - Enters the main request serving loop + +2. **APs (Application Processors)**: + - All other CPUs + - Wait for BSP initialization to complete + - Enter a holding pen and poll mailboxes for commands + +### Mailbox System + +The mailbox system provides inter-processor communication: + +- Each AP has a dedicated mailbox (cache-line aligned to avoid false sharing) +- BSP sends commands to APs via mailboxes +- APs respond with results through the same mailbox +- Supports synchronization primitives for coordinated operations + +## Usage + +### Basic Platform Implementation + +```rust +#![no_std] +#![no_main] + +use core::{ffi::c_void, panic::PanicInfo}; +use patina_mm_supervisor_core::*; + +struct MyPlatform; + +impl CpuInfo for MyPlatform { + fn ap_poll_timeout_us() -> u64 { 1000 } +} + +impl PlatformInfo for MyPlatform { + type CpuInfo = Self; + const MAX_CPU_COUNT: usize = 8; + const MAX_HANDLERS: usize = 32; +} + +// Static instance - no heap allocation required +static SUPERVISOR: MmSupervisorCore = MmSupervisorCore::new(); + +#[panic_handler] +fn panic(_info: &PanicInfo) -> ! { + loop { core::hint::spin_loop(); } +} + +#[unsafe(export_name = "MmSupervisorMain")] +pub extern "efiapi" fn mm_supervisor_main(hob_list: *const c_void) -> ! { + SUPERVISOR.entry_point(hob_list) +} +``` + +### Registering Request Handlers + +Handlers must be defined as static references: + +```rust +use patina_mm_supervisor_core::*; + +struct MyHandler; + +impl RequestHandler for MyHandler { + fn guid(&self) -> r_efi::efi::Guid { + // Your handler's GUID + r_efi::efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0; 6]) + } + + fn handle(&self, context: &mut RequestContext) -> RequestResult { + // Handle the request + RequestResult::Success + } + + fn name(&self) -> &'static str { + "MyHandler" + } +} + +static MY_HANDLER: MyHandler = MyHandler; + +// Register before calling entry_point, or during BSP initialization +SUPERVISOR.register_handler(&MY_HANDLER); +``` + +### Integration with MM IPL + +The MM IPL (from EDK2/MmSupervisorPkg) loads this binary and calls the entry point. The HOB list passed contains: + +- `gEfiMmPeiMmramMemoryReserveGuid` - MMRAM ranges +- `gMmCommBufferHobGuid` - Communication buffer information +- `gMmCommonRegionHobGuid` - Common memory regions +- FV HOBs for MM driver firmware volumes + +## Example Binary + +See [bin/example_mm_supervisor.rs](bin/example_mm_supervisor.rs) for a complete example platform implementation. + +## License + +Copyright (c) Microsoft Corporation. + +SPDX-License-Identifier: Apache-2.0 diff --git a/patina_mm_supervisor_core/bin/example_mm_supervisor.rs b/patina_mm_supervisor_core/bin/example_mm_supervisor.rs new file mode 100644 index 000000000..13270a9ef --- /dev/null +++ b/patina_mm_supervisor_core/bin/example_mm_supervisor.rs @@ -0,0 +1,156 @@ +//! Example MM Supervisor Binary for QEMU Q35 +//! +//! This is an example platform binary that demonstrates how to build a PE/COFF +//! MM Supervisor using the `patina_mm_supervisor_core` crate. +//! +//! ## Building +//! +//! Build with cargo for the UEFI target: +//! ```bash +//! cargo build --release --target x86_64-unknown-uefi --bin example_mm_supervisor +//! ``` +//! +//! ## Entry Point +//! +//! The MM Supervisor is handed off by the MM IPL (Initial Program Loader) after: +//! - Page tables are set up +//! - The supervisor image is loaded into MMRAM +//! - A HOB list is constructed with MMRAM ranges and other configuration +//! +//! The entry point `MmSupervisorMain` is called on ALL processors simultaneously. +//! The first processor to arrive becomes the BSP, others become APs. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +#![cfg(all(target_os = "uefi", target_arch = "x86_64"))] +#![feature(generic_const_exprs)] +#![allow(incomplete_features)] +#![no_std] +#![no_main] + +use core::{ffi::c_void, panic::PanicInfo}; +use core::sync::atomic::AtomicBool; +use patina_mm_supervisor_core::*; +// use the the uart from patina +use patina::{log::Format, serial::uart::Uart16550}; +use patina::log::SerialLogger; +use patina_stacktrace::StackTrace; + +// ============================================================================= +// Platform Configuration +// ============================================================================= + +/// Platform configuration for the example MM Supervisor. +struct ExamplePlatform; + +/// ACPI PM Timer port on QEMU Q35 (from FADT X_PM_TIMER_BLOCK). +const PM_TIMER_PORT: u16 = 0x608; + +impl CpuInfo for ExamplePlatform { + /// Override the default AP polling timeout if needed. + fn ap_poll_timeout_us() -> u64 { + 1000 // 1ms polling interval + } + + fn perf_timer_frequency() -> Option { + // SAFETY: On Q35 the PM Timer is always available at PM_TIMER_PORT. + Some(unsafe { + patina::component::service::timer::x86_64::calibrate_tsc_from_pm_timer(PM_TIMER_PORT) + }) + } +} + +impl PlatformInfo for ExamplePlatform { + type CpuInfo = Self; + + /// Maximum number of CPUs this platform supports. + /// This should match your hardware/VM configuration. + const MAX_CPU_COUNT: usize = 8; +} + +// ============================================================================= +// Static Supervisor Instance +// ============================================================================= + +/// Flag indicating that advanced logger initialization is complete. +static ADV_LOGGER_INIT_COMPLETE: AtomicBool = AtomicBool::new(false); + +/// The static MM Supervisor Core instance. +/// +/// This is instantiated at compile time with no heap allocation. +static SUPERVISOR: MmSupervisorCore = MmSupervisorCore::new(); + +static LOGGER: SerialLogger = SerialLogger::new( + Format::Standard, + &[ + ("goblin", log::LevelFilter::Off), + ("gcd_measure", log::LevelFilter::Off), + ("allocations", log::LevelFilter::Off), + ("efi_memory_map", log::LevelFilter::Off), + ("mm_comm", log::LevelFilter::Off), + ("sw_mmi", log::LevelFilter::Off), + ("patina_performance", log::LevelFilter::Off), + ], + log::LevelFilter::Info, + Uart16550::Io { base: 0x402 }, +); + +// ============================================================================= +// Panic Handler +// ============================================================================= + +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + log::error!("{}", info); + + if let Err(err) = unsafe { StackTrace::dump() } { + log::error!("StackTrace: {}", err); + } + + loop {} +} + +// ============================================================================= +// Entry Point +// ============================================================================= + +/// The MM Supervisor entry point. +/// +/// This function is called by the MM IPL on ALL processors after the supervisor +/// image has been loaded into MMRAM and page tables have been configured. +/// +/// # Arguments +/// +/// * `hob_list` - Pointer to the HOB (Hand-Off Block) list containing: +/// - MMRAM ranges +/// - Memory allocation information +/// - Platform configuration +/// - FV (Firmware Volume) locations for MM drivers +/// +/// # Entry Convention +/// +/// - All processors enter this function simultaneously +/// - The first processor to arrive becomes the BSP +/// - Other processors become APs and enter the holding pen +/// - The function never returns (diverging `-> !`) +/// +/// # Export Name +/// +/// The export name `MmSupervisorMain` matches the EDK2 convention for +/// standalone MM supervisor entry points. The MM IPL looks for this symbol +/// when loading the supervisor. +#[unsafe(export_name = "rust_main")] +pub extern "efiapi" fn mm_supervisor_main(cpu_index: usize, hob_list: *const c_void) { + + // Initialize the advanced logger on the first CPU to arrive (BSP) + if !ADV_LOGGER_INIT_COMPLETE.swap(true, core::sync::atomic::Ordering::SeqCst) { + log::set_logger(&LOGGER).map(|()| log::set_max_level(log::LevelFilter::Trace)).unwrap(); + } + + // The entry_point handles BSP vs AP routing internally + SUPERVISOR.entry_point(cpu_index, hob_list) +} diff --git a/patina_mm_supervisor_core/src/cpu.rs b/patina_mm_supervisor_core/src/cpu.rs new file mode 100644 index 000000000..773352d1c --- /dev/null +++ b/patina_mm_supervisor_core/src/cpu.rs @@ -0,0 +1,555 @@ +//! CPU Management Module +//! +//! This module provides CPU identification and management for the MM Supervisor Core. +//! It handles BSP/AP detection, CPU registration, and state tracking. +//! +//! ## Memory Model +//! +//! This module does not perform heap allocation. All structures use fixed-size arrays +//! with compile-time constants provided via const generics. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::sync::atomic::{AtomicU32, AtomicU8, Ordering}; +use core::arch::{x86_64, x86_64::CpuidResult}; + +/// A trait to be implemented by the platform to provide CPU-related configuration. +/// +/// ## Example +/// +/// ```rust,ignore +/// use patina_mm_supervisor_core::CpuInfo; +/// +/// struct ExamplePlatform; +/// +/// impl CpuInfo for ExamplePlatform { +/// fn ap_poll_timeout_us() -> u64 { 500 } +/// } +/// ``` +#[cfg_attr(test, mockall::automock)] +pub trait CpuInfo { + /// Returns the timeout in microseconds for AP mailbox polling. + /// + /// By default, this returns 1000 (1ms) which is a reasonable polling interval. + #[inline(always)] + fn ap_poll_timeout_us() -> u64 { + 1000 + } + + /// Returns the performance counter frequency in Hz, if known by the platform. + /// + /// For example, on QEMU Q35 the platform can calibrate the TSC frequency + /// from the ACPI PM Timer and return it here. + /// + /// If `None` is returned (the default), the supervisor will attempt + /// auto-detection via CPUID. + fn perf_timer_frequency() -> Option { + None + } +} + +/// The state of an Application Processor (AP). +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u8)] +pub enum ApState { + /// The AP has not been registered yet. + NotPresent = 0, + /// The AP is in the holding pen, waiting for work. + InHoldingPen = 1, + /// The AP is currently executing a task. + Busy = 2, + /// The AP has been halted. + Halted = 3, +} + +impl From for ApState { + fn from(value: u8) -> Self { + match value { + 0 => ApState::NotPresent, + 1 => ApState::InHoldingPen, + 2 => ApState::Busy, + 3 => ApState::Halted, + _ => ApState::NotPresent, + } + } +} + +/// Information about a registered CPU stored in a fixed-size slot. +#[repr(C)] +struct CpuSlot { + /// The CPU's APIC ID. u32::MAX means slot is unused. + cpu_id: AtomicU32, + /// Whether this CPU is the BSP (0 = AP, 1 = BSP). + is_bsp: AtomicU8, + /// Current state (for APs only). + state: AtomicU8, + /// Padding for alignment. + _padding: [u8; 2], +} + +impl CpuSlot { + /// Creates a new empty CPU slot. + const fn new() -> Self { + Self { + cpu_id: AtomicU32::new(u32::MAX), + is_bsp: AtomicU8::new(0), + state: AtomicU8::new(ApState::NotPresent as u8), + _padding: [0; 2], + } + } + + /// Checks if this slot is in use. + fn is_used(&self) -> bool { + self.cpu_id.load(Ordering::Acquire) != u32::MAX + } + + /// Gets the CPU ID if the slot is used. + fn get_cpu_id(&self) -> Option { + let id = self.cpu_id.load(Ordering::Acquire); + if id == u32::MAX { + None + } else { + Some(id) + } + } +} + +/// Manager for CPU-related operations. +/// +/// Tracks registered CPUs and their states using fixed-size arrays. +/// +/// ## Const Generic Parameters +/// +/// * `MAX_CPUS` - The maximum number of CPUs that can be registered. +pub struct CpuManager { + /// CPU slots - fixed size array. + slots: [CpuSlot; MAX_CPUS], + /// Number of CPUs currently registered. + registered_count: AtomicU32, + /// The APIC ID of the BSP. + bsp_id: AtomicU32, +} + +impl CpuManager { + /// Creates a new CPU manager. + /// + /// This is a const fn and performs no heap allocation. + pub const fn new() -> Self { + Self { + slots: [const { CpuSlot::new() }; MAX_CPUS], + registered_count: AtomicU32::new(0), + bsp_id: AtomicU32::new(u32::MAX), + } + } + + /// Registers a CPU with the manager. + /// + /// # Arguments + /// + /// * `cpu_id` - The CPU's APIC ID. + /// * `is_bsp` - Whether this CPU is the BSP. + /// + /// # Returns + /// + /// The index of the registered CPU, or `None` if max CPUs reached or already registered. + pub fn register_cpu(&self, cpu_id: u32, is_bsp: bool) -> Option { + // Check if already registered + for slot in &self.slots { + if slot.get_cpu_id() == Some(cpu_id) { + log::warn!("CPU {} already registered", cpu_id); + return None; + } + } + + // Find an empty slot + for (index, slot) in self.slots.iter().enumerate() { + // Try to claim this slot using compare-exchange + let result = slot.cpu_id.compare_exchange( + u32::MAX, + cpu_id, + Ordering::AcqRel, + Ordering::Acquire, + ); + + if result.is_ok() { + // Successfully claimed the slot + slot.is_bsp.store(if is_bsp { 1 } else { 0 }, Ordering::Release); + slot.state.store( + if is_bsp { ApState::Busy as u8 } else { ApState::InHoldingPen as u8 }, + Ordering::Release, + ); + + self.registered_count.fetch_add(1, Ordering::SeqCst); + + if is_bsp { + self.bsp_id.store(cpu_id, Ordering::SeqCst); + log::info!("Registered BSP with APIC ID {}", cpu_id); + } else { + log::trace!("Registered AP with APIC ID {} at index {}", cpu_id, index); + } + + return Some(index); + } + } + + log::warn!("Maximum CPU count ({}) reached, cannot register CPU {}", MAX_CPUS, cpu_id); + None + } + + /// Gets the number of registered CPUs. + pub fn registered_count(&self) -> usize { + self.registered_count.load(Ordering::SeqCst) as usize + } + + /// Gets the maximum number of CPUs supported. + pub const fn max_cpus(&self) -> usize { + MAX_CPUS + } + + /// Gets the APIC ID of the BSP. + pub fn bsp_id(&self) -> Option { + let id = self.bsp_id.load(Ordering::SeqCst); + if id == u32::MAX { + None + } else { + Some(id) + } + } + + /// Checks if the given CPU ID is the BSP. + pub fn is_bsp(&self, cpu_id: u32) -> bool { + self.bsp_id() == Some(cpu_id) + } + + /// Finds the slot index for a given CPU ID (APIC ID). + fn find_slot(&self, cpu_id: u32) -> Option { + for (index, slot) in self.slots.iter().enumerate() { + if slot.get_cpu_id() == Some(cpu_id) { + return Some(index); + } + } + None + } + + /// Finds the slot index for a given CPU ID (public wrapper). + pub fn find_cpu_index(&self, cpu_id: u32) -> Option { + self.find_slot(cpu_id) + } + + /// Gets the APIC ID of the CPU at the given slot index. + /// + /// Returns `None` if the index is out of range or the slot is unused. + pub fn get_cpu_id_by_index(&self, index: usize) -> Option { + if index >= MAX_CPUS { + return None; + } + self.slots[index].get_cpu_id() + } + + /// Gets the AP state by slot index. + /// + /// Returns `None` if the index is out of range or the slot is unused. + pub fn get_ap_state_by_index(&self, index: usize) -> Option { + if index >= MAX_CPUS { + return None; + } + let slot = &self.slots[index]; + if slot.is_used() { + Some(ApState::from(slot.state.load(Ordering::Acquire))) + } else { + None + } + } + + /// Gets the state of an AP. + pub fn get_ap_state(&self, cpu_id: u32) -> Option { + let index = self.find_slot(cpu_id)?; + Some(ApState::from(self.slots[index].state.load(Ordering::Acquire))) + } + + /// Sets the state of an AP. + pub fn set_ap_state(&self, cpu_id: u32, state: ApState) -> bool { + let index = match self.find_slot(cpu_id) { + Some(idx) => idx, + None => return false, + }; + + let slot = &self.slots[index]; + + // Don't allow changing BSP state + if slot.is_bsp.load(Ordering::Acquire) != 0 { + log::warn!("Attempted to change BSP state, ignoring"); + return false; + } + + slot.state.store(state as u8, Ordering::Release); + true + } + + /// Iterates over all registered AP IDs. + /// + /// Calls the provided closure for each registered AP. + pub fn for_each_ap(&self, mut f: F) { + for slot in &self.slots { + if let Some(cpu_id) = slot.get_cpu_id() { + if slot.is_bsp.load(Ordering::Acquire) == 0 { + f(cpu_id); + } + } + } + } + + /// Counts APs in a specific state. + pub fn count_aps_in_state(&self, state: ApState) -> usize { + let mut count = 0; + for slot in &self.slots { + if slot.is_used() + && slot.is_bsp.load(Ordering::Acquire) == 0 + && slot.state.load(Ordering::Acquire) == state as u8 + { + count += 1; + } + } + count + } +} + +impl Default for CpuManager { + fn default() -> Self { + Self::new() + } +} + +/// Gets the current CPU's APIC ID. +/// +/// On x86_64, this reads the APIC ID from the Local APIC or CPUID. +#[cfg(target_arch = "x86_64")] +pub fn get_current_cpu_id() -> u32 { + // Use CPUID to get the initial APIC ID + // CPUID function 0x01, EBX[31:24] contains the initial APIC ID + + // SAFETY: CPUID is always available on x86_64 and reading it is safe. + let CpuidResult { ebx, .. } = x86_64::__cpuid(0x01); + let cpuid_result = (ebx >> 24) & 0xff; + cpuid_result +} + +/// Gets the current CPU's APIC ID (stub for non-x86_64). +#[cfg(not(target_arch = "x86_64"))] +pub fn get_current_cpu_id() -> u32 { + 0 +} + +// ============================================================================ +// BSP Detection via IA32_APIC_BASE MSR +// ============================================================================ + +/// MSR index for IA32_APIC_BASE. +const IA32_APIC_BASE_MSR_INDEX: u32 = 0x1B; + +/// BSP flag bit in IA32_APIC_BASE MSR (bit 8). +const IA32_APIC_BSP: u64 = 1 << 8; + +/// MSR index for SMM Base Address (SMBASE). +pub const IA32_MSR_SMBASE: u32 = 0x9E; + +/// Reads a Model-Specific Register (MSR) by index. +/// +/// # Safety +/// +/// The caller must ensure the MSR index is valid and readable on the current platform. +#[cfg(target_arch = "x86_64")] +pub unsafe fn read_msr(msr: u32) -> Result { + let lo: u32; + let hi: u32; + unsafe { + core::arch::asm!( + "rdmsr", + in("ecx") msr, + out("eax") lo, + out("edx") hi, + options(nomem, nostack), + ); + } + Ok(((hi as u64) << 32) | (lo as u64)) +} + +/// Reads a Model-Specific Register (stub for non-x86_64). +#[cfg(not(target_arch = "x86_64"))] +pub unsafe fn read_msr(_msr: u32) -> Result { + Err("rdmsr not supported on this architecture") +} + +/// Writes a 64-bit value to a Model-Specific Register (MSR). +/// +/// # Safety +/// +/// The caller must ensure the MSR index is valid and writable on the current platform. +#[cfg(target_arch = "x86_64")] +pub unsafe fn write_msr(msr: u32, value: u64) -> Result<(), &'static str> { + let lo = value as u32; + let hi = (value >> 32) as u32; + unsafe { + core::arch::asm!( + "wrmsr", + in("ecx") msr, + in("eax") lo, + in("edx") hi, + options(nomem, nostack), + ); + } + Ok(()) +} + +/// Writes a Model-Specific Register (stub for non-x86_64). +#[cfg(not(target_arch = "x86_64"))] +pub unsafe fn write_msr(_msr: u32, _value: u64) -> Result<(), &'static str> { + Err("wrmsr not supported on this architecture") +} + +/// Checks if the current processor is the Bootstrap Processor (BSP). +/// +/// This reads the IA32_APIC_BASE MSR and checks the BSP flag (bit 8). +/// The BSP flag is set by hardware during reset and indicates which +/// processor is the bootstrap processor. +/// +/// # Returns +/// +/// `true` if this is the BSP, `false` if this is an AP. +#[cfg(target_arch = "x86_64")] +pub fn is_bsp() -> bool { + // SAFETY: Reading the IA32_APIC_BASE MSR is safe on x86_64. + let apic_base_lo: u32; + let apic_base_hi: u32; + unsafe { + core::arch::asm!( + "rdmsr", + in("ecx") IA32_APIC_BASE_MSR_INDEX, + out("eax") apic_base_lo, + out("edx") apic_base_hi, + ); + } + + let apic_base = ((apic_base_hi as u64) << 32) | (apic_base_lo as u64); + (apic_base & IA32_APIC_BSP) != 0 +} + +/// Checks if the current processor is the BSP (stub for non-x86_64). +#[cfg(not(target_arch = "x86_64"))] +pub fn is_bsp() -> bool { + true // Assume BSP on non-x86_64 platforms +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cpu_manager_creation() { + let manager: CpuManager<4> = CpuManager::new(); + assert_eq!(manager.registered_count(), 0); + assert!(manager.bsp_id().is_none()); + assert_eq!(manager.max_cpus(), 4); + } + + #[test] + fn test_cpu_manager_is_const() { + // Verify we can create a static instance + static _MANAGER: CpuManager<8> = CpuManager::new(); + } + + #[test] + fn test_cpu_registration() { + let manager: CpuManager<4> = CpuManager::new(); + + // Register BSP + let bsp_idx = manager.register_cpu(0, true); + assert_eq!(bsp_idx, Some(0)); + assert_eq!(manager.bsp_id(), Some(0)); + assert!(manager.is_bsp(0)); + + // Register APs + let ap1_idx = manager.register_cpu(1, false); + assert_eq!(ap1_idx, Some(1)); + assert!(!manager.is_bsp(1)); + + let ap2_idx = manager.register_cpu(2, false); + assert_eq!(ap2_idx, Some(2)); + + assert_eq!(manager.registered_count(), 3); + } + + #[test] + fn test_duplicate_registration() { + let manager: CpuManager<4> = CpuManager::new(); + + assert!(manager.register_cpu(1, false).is_some()); + assert!(manager.register_cpu(1, false).is_none()); // Should fail - duplicate + } + + #[test] + fn test_ap_state_management() { + let manager: CpuManager<4> = CpuManager::new(); + manager.register_cpu(0, true); + manager.register_cpu(1, false); + + // Check initial state + assert_eq!(manager.get_ap_state(1), Some(ApState::InHoldingPen)); + + // Change state + assert!(manager.set_ap_state(1, ApState::Busy)); + assert_eq!(manager.get_ap_state(1), Some(ApState::Busy)); + + // Cannot change BSP state + assert!(!manager.set_ap_state(0, ApState::Halted)); + } + + #[test] + fn test_for_each_ap() { + let manager: CpuManager<4> = CpuManager::new(); + manager.register_cpu(0, true); + manager.register_cpu(1, false); + manager.register_cpu(2, false); + + let mut ap_ids = [0u32; 4]; + let mut count = 0; + manager.for_each_ap(|id| { + if count < 4 { + ap_ids[count] = id; + count += 1; + } + }); + + assert_eq!(count, 2); + assert!(ap_ids[..count].contains(&1)); + assert!(ap_ids[..count].contains(&2)); + } + + #[test] + fn test_max_cpu_limit() { + let manager: CpuManager<2> = CpuManager::new(); + assert!(manager.register_cpu(0, true).is_some()); + assert!(manager.register_cpu(1, false).is_some()); + assert!(manager.register_cpu(2, false).is_none()); // Should fail + } + + #[test] + fn test_count_aps_in_state() { + let manager: CpuManager<4> = CpuManager::new(); + manager.register_cpu(0, true); + manager.register_cpu(1, false); + manager.register_cpu(2, false); + + assert_eq!(manager.count_aps_in_state(ApState::InHoldingPen), 2); + assert_eq!(manager.count_aps_in_state(ApState::Busy), 0); + + manager.set_ap_state(1, ApState::Busy); + assert_eq!(manager.count_aps_in_state(ApState::InHoldingPen), 1); + assert_eq!(manager.count_aps_in_state(ApState::Busy), 1); + } +} diff --git a/patina_mm_supervisor_core/src/entry_point.asm b/patina_mm_supervisor_core/src/entry_point.asm new file mode 100644 index 000000000..3b9ad2681 --- /dev/null +++ b/patina_mm_supervisor_core/src/entry_point.asm @@ -0,0 +1,19 @@ +# +# Exception entry point logic for X64. +# +# Copyright (c) Microsoft Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +.section .data + +.section .text +.global rust_main +.global efi_main + +.align 8 +# Shim layer that redefines the contract between runtime module and init. +efi_main: + + jmp rust_main diff --git a/patina_mm_supervisor_core/src/lib.rs b/patina_mm_supervisor_core/src/lib.rs new file mode 100644 index 000000000..4116485af --- /dev/null +++ b/patina_mm_supervisor_core/src/lib.rs @@ -0,0 +1,2268 @@ +//! MM Supervisor Core +//! +//! A pure Rust implementation of the MM Supervisor Core for standalone MM mode environments. +//! +//! This crate provides the core functionality for running a supervisor in MM (Management Mode) +//! that orchestrates incoming requests on the BSP while APs wait in a holding pen. +//! +//! ## Architecture +//! +//! The entry point is executed on all cores: +//! - **BSP**: Performs one-time initialization and enters the request serving loop +//! - **APs**: Enter a holding pen and poll mailboxes for commands from BSP +//! +//! ## Memory Model +//! +//! This is a core component that manages its own memory. It does **not** use heap allocation. +//! All structures use fixed-size arrays with compile-time constants provided via const generics. +//! +//! ## Example +//! +//! ```rust,ignore +//! use patina_mm_supervisor_core::*; +//! +//! struct MyPlatform; +//! +//! impl PlatformInfo for MyPlatform { +//! type CpuInfo = Self; +//! const MAX_CPU_COUNT: usize = 8; +//! } +//! +//! impl CpuInfo for MyPlatform { +//! fn ap_poll_timeout_us() -> u64 { 1000 } +//! } +//! +//! static SUPERVISOR: MmSupervisorCore = MmSupervisorCore::new(); +//! ``` +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] +#![cfg(target_arch = "x86_64")] +#![feature(coverage_attribute)] + +#![allow(incomplete_features)] +#![feature(generic_const_exprs)] + +mod cpu; +mod mailbox; +pub mod mm_mem; +pub mod paging_allocator; +pub mod perf_timer; +pub mod privilege_mgmt; +pub mod save_state; +pub mod supervisor_handlers; +pub mod unblock_memory; + +pub use cpu::{ApState, CpuInfo, CpuManager, get_current_cpu_id, is_bsp}; +pub use mailbox::{ApCommand, ApMailbox, ApResponse, MailboxManager}; +pub use mm_mem::{ + AllocationType, PageAllocator, PageAllocError, SmramDescriptor, + PAGE_ALLOCATOR, + SMM_SMRAM_MEMORY_GUID, MM_PEI_MMRAM_MEMORY_RESERVE_GUID, +}; +pub use paging_allocator::{ + PagingPoolAllocator, PagingAllocError, SharedPagingAllocator, + PAGING_ALLOCATOR, DEFAULT_PAGING_POOL_PAGES, +}; +pub use unblock_memory::{ + UnblockedMemoryTracker, UnblockedMemoryEntry, UnblockError, + UNBLOCKED_MEMORY_TRACKER, +}; +pub use privilege_mgmt::{ + SyscallInterface, + invoke_demoted_routine, +}; +pub use supervisor_handlers::{ + SupervisorMmiHandler, SUPERVISOR_MMI_HANDLERS, +}; + +pub use patina_internal_mm_common::UserCommandType; + +use core::{ + arch::{asm, global_asm}, ffi::c_void, num::NonZeroUsize, panic, ptr::NonNull, sync::atomic::{AtomicBool, AtomicU32, Ordering} +}; + +use patina::pi::hob::{self, Hob, PhaseHandoffInformationTable}; +use patina::base::UEFI_PAGE_SIZE; +use patina::pi::protocols::communication::EfiMmCommunicateHeader; +use patina::pi::mm_cis::EfiMmEntryContext; +use patina::management_mode::MmCommBufferStatus; +use patina::management_mode::comm_buffer_hob::{MmCommonBufferHobData, MM_COMM_BUFFER_HOB_GUID}; +use patina_paging::{MemoryAttributes, PageTable, PagingType, x64::{X64PageTable, disable_write_protection, enable_write_protection}}; +use r_efi::efi; + +use patina_mm_policy::{walk_page_table, MemDescriptorV1_0}; + +// GUID for gMmSupervisorHobMemoryAllocModuleGuid +// { 0x3efafe72, 0x3dbf, 0x4341, { 0xad, 0x04, 0x1c, 0xb6, 0xe8, 0xb6, 0x8e, 0x5e }} +/// GUID used in MemoryAllocationModule HOBs to identify MM Supervisor module allocations. +pub const MM_SUPERVISOR_HOB_MEMORY_ALLOC_MODULE_GUID: efi::Guid = efi::Guid::from_fields( + 0x3efafe72, + 0x3dbf, + 0x4341, + 0xad, + 0x04, + &[0x1c, 0xb6, 0xe8, 0xb6, 0x8e, 0x5e], +); + +// GUID for gMmSupervisorUserGuid +// { 0x30d1cc3f, 0xc1db, 0x41ed, { 0xb1, 0x13, 0xab, 0xce, 0x21, 0xb0, 0x2b, 0xce }} +/// GUID identifying the MM Supervisor User module. +pub const MM_SUPERVISOR_USER_GUID: efi::Guid = efi::Guid::from_fields( + 0x30d1cc3f, + 0xc1db, + 0x41ed, + 0xb1, + 0x13, + &[0xab, 0xce, 0x21, 0xb0, 0x2b, 0xce], +); + +// GUID for gMmCommonRegionHobGuid +// { 0xd4ffc718, 0xfb82, 0x4274, { 0x9a, 0xfc, 0xaa, 0x8b, 0x1e, 0xef, 0x52, 0x93 } } +pub const MM_COMMON_REGION_HOB_GUID: efi::Guid = efi::Guid::from_fields( + 0xd4ffc718, + 0xfb82, + 0x4274, + 0x9a, + 0xfc, + &[0xaa, 0x8b, 0x1e, 0xef, 0x52, 0x93], +); + +/// MM Common Region HOB Data Structure +/// +/// This structure contains information about the common memory region used by the MM Supervisor. +#[repr(C, packed)] +#[derive(Debug, Clone, Copy)] +pub struct MmCommonRegionHobData { + /// Type of the common region, must be 0 to represent the MM Supervisor communication buffer region + pub region_type: u64, + /// Base address of the supervisor communication buffer region + pub addr: u64, + /// Number of pages in the supervisor communication buffer region + pub number_of_pages: u64, +} + +// GUID for gMmSupervisorPassDownHobGuid +// { 0x3f2d2d1a, 0x7c6a, 0x4e2e, { 0x91, 0x2e, 0x5c, 0x4f, 0x5b, 0x8c, 0x2a, 0x9d } } +/// GUID for the MM Supervisor PassDown HOB. +pub const MM_SUPV_PASS_DOWN_HOB_GUID: efi::Guid = efi::Guid::from_fields( + 0x3f2d2d1a, + 0x7c6a, + 0x4e2e, + 0x91, + 0x2e, + &[0x5c, 0x4f, 0x5b, 0x8c, 0x2a, 0x9d], +); + +/// MM Supervisor PassDown HOB Revision +pub const MM_SUPV_PASS_DOWN_HOB_REVISION: u32 = 1; + +/// MM Supervisor PassDown HOB Data Structure +/// +/// This structure contains various buffer pointers and sizes passed from +/// the PEI phase to the MM Supervisor. +#[repr(C, packed)] +#[derive(Debug, Clone, Copy)] +pub struct MmSupvPassDownHobData { + /// Revision of this HOB structure + pub revision: u32, + /// Reserved for future use + pub reserved: u32, + /// Base address of CPL3 stack for MM Supervisor + pub mm_supervisor_cpl3_stack_base: u64, + /// Per-CPU stack size for CPL3 + pub mm_supervisor_cpl3_per_core_stack_size: u64, + /// MM Supervisor CPU private data base address + pub mm_supv_cpu_private: u64, + /// Size of MM Supervisor CPU private data + pub mm_supv_cpu_private_size: u64, + /// MM Initialized buffer base address + pub mm_initialized_buffer: u64, + /// MM Supervisor firmware policy buffer base address + pub mm_supv_firmware_policy_buffer: u64, + /// Size of MM Supervisor firmware policy buffer + pub mm_supv_firmware_policy_buffer_size: u64, + /// Size of the MMI entry point structure (for validating against expected size in supervisor) + pub mmi_entrypoint_size: u64, + /// Base address of the BSP MM + pub bsp_mm_base_address: u64, +} + +/// Errors that can occur during policy initialization. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PolicyInitError { + /// The HOB list pointer is null. + NullHobList, + /// Some HOB not found. + HobNotFound, + /// Invalid PassDown HOB revision. + InvalidRevision { found: u32, expected: u32 }, + /// Firmware policy buffer is null or empty. + NullFirmwarePolicyBuffer, + /// Invalid policy data. + InvalidPolicyData, + /// Memory allocation failed for policy buffers. + MemoryAllocationFailed, + /// One or more communication buffers are not properly initialized. + MissingCommunicationBuffer, +} + +use spin::{Mutex, Once}; +use patina_internal_cpu::interrupts::Interrupts; + +global_asm!(include_str!("entry_point.asm")); + +/// A trait to be implemented by the platform to provide configuration values and types to be used +/// by the MM Supervisor Core. +/// +/// ## Example +/// +/// ```rust,ignore +/// use patina_mm_supervisor_core::*; +/// +/// struct ExamplePlatform; +/// +/// impl CpuInfo for ExamplePlatform { +/// fn ap_poll_timeout_us() -> u64 { 1000 } +/// } +/// +/// impl PlatformInfo for ExamplePlatform { +/// type CpuInfo = Self; +/// const MAX_CPU_COUNT: usize = 8; +/// } +/// ``` +#[cfg_attr(test, mockall::automock(type CpuInfo = MockCpuInfo;))] +pub trait PlatformInfo: 'static { + /// The platform's CPU information and configuration. + type CpuInfo: CpuInfo; + + /// Maximum number of CPUs supported by the platform. + /// This is used to size the CPU manager and mailbox arrays. + const MAX_CPU_COUNT: usize; +} + +/// Static reference to the MM Supervisor Core instance. +/// +/// This is set during the `entry_point` call and provides global access to the supervisor. +static __SUPERVISOR: Once = Once::new(); + +/// Flag indicating that BSP one-time initialization is complete. +static BSP_INIT_COMPLETE: AtomicBool = AtomicBool::new(false); + +/// Pointer to the per-core initialized buffer from the PassDown HOB. +/// Each core has a 64-bit slot at `buffer_base + (cpu_index * 8)`. +/// A non-zero value indicates the core has completed initialization. +static MM_INITIALIZED_BUFFER: Once = Once::new(); + +/// Counter for tracking how many cores have completed their per-core init. +static PER_CORE_INIT_COUNT: AtomicU32 = AtomicU32::new(0); + +/// The policy object is initialized once during BSP initialization and provides access to the security policy +/// for the MM Supervisor. It is stored in a static variable for global access. +/// The policy gate is initialized from the firmware policy buffer provided in the PassDown HOB. +pub(crate) static POLICY_GATE: Once = Once::new(); + +/// Global page table instance for managing page attributes. +/// +/// Initialized during BSP init from the active CR3 register. This allows the supervisor +/// to modify page table attributes (e.g., marking supervisor pages as R/W + NX) when +/// allocating memory. +pub(crate) static PAGE_TABLE: Mutex>> = Mutex::new(None); + +/// Result of a page table ownership query. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum PageOwnership { + /// The page is user-accessible (U/S = 1, SpecialPurpose clear). + User, + /// The page is supervisor-only (U/S = 0, SpecialPurpose set). + Supervisor, +} + +/// Aligns an address and size to page boundaries for page table queries. +/// +/// Rounds the address down to the nearest page boundary and adjusts the size +/// upward so the entire original range `[address, address+size)` is covered. +/// +/// Returns `(page_aligned_address, page_aligned_size)`. +#[inline] +fn page_align_range(address: u64, size: u64) -> (u64, u64) { + const PAGE_MASK: u64 = (UEFI_PAGE_SIZE as u64) - 1; + let aligned_start = address & !PAGE_MASK; + let end = address.saturating_add(size); + let aligned_end = end.saturating_add(PAGE_MASK) & !PAGE_MASK; + (aligned_start, aligned_end.saturating_sub(aligned_start)) +} + +/// Queries the page table to determine the ownership (user vs supervisor) of an address. +/// +/// The address and size are page-aligned before querying (rounded down / up respectively). +/// +/// Checks the `Supervisor` attribute which maps to the U/S bit on X64: +/// - `Supervisor` set => `PageOwnership::Supervisor` (U/S = 0) +/// - `Supervisor` clear => `PageOwnership::User` (U/S = 1) +/// +/// Returns `None` if the page table is not initialized or the address is unmapped. +pub(crate) fn query_address_ownership(address: u64, size: u64) -> Option { + let (aligned_addr, aligned_size) = page_align_range(address, size); + let page_table = PAGE_TABLE.lock(); + let pt = page_table.as_ref()?; + let attrs = pt.query_memory_region(aligned_addr, aligned_size).ok()?; + log::info!( + "Queried page ownership for address range 0x{:016x}-0x{:016x}: attributes={:?}", + aligned_addr, + aligned_addr + aligned_size, + attrs + ); + if attrs.contains(MemoryAttributes::Supervisor) { + Some(PageOwnership::Supervisor) + } else { + Some(PageOwnership::User) + } +} + +// ============================================================================ +// Communication Buffer Pointers (from PassDown HOB) +// ============================================================================ + +/// Communication buffer configuration extracted from PassDown HOB. +#[derive(Debug, Clone, Copy, Default)] +pub struct CommBufferConfig { + /// MM Supervisor communication buffer (external interface). + pub supv_comm_buffer: u64, + /// MM Supervisor internal communication buffer. + pub supv_comm_buffer_internal: u64, + /// Size of supervisor communication buffer. + pub supv_comm_buffer_size: u64, + /// MM User communication buffer (external interface). + pub user_comm_buffer: u64, + /// MM User internal communication buffer. + pub user_comm_buffer_internal: u64, + /// Size of user communication buffer. + pub user_comm_buffer_size: u64, + /// MM Supervisor status buffer (indicates target: supervisor or user). + pub status_buffer: u64, + /// MM Supervisor to User buffer. + pub supv_to_user_buffer: u64, + /// Size of Supervisor to User buffer. + pub supv_to_user_buffer_size: u64, +} + +/// Communication buffer configuration initialized from PassDown HOB. +pub(crate) static COMM_BUFFER_CONFIG: Once = Once::new(); + +/// User module entry point discovered from HOB list. +static USER_ENTRY_POINT: Once = Once::new(); + +/// Pointer to the SMM_CPU_PRIVATE_DATA structure from the PassDown HOB. +/// This is used to access the SmmCoreEntryContext for user request dispatch. +static SMM_CPU_PRIVATE: Once = Once::new(); + +/// Type-erased function pointer for AP startup dispatch. +/// +/// This is set during [`MmSupervisorCore`] initialization. The function is monomorphized +/// for the concrete `PlatformInfo` type so the syscall dispatcher can invoke it without +/// knowing the platform's const-generic parameters. +/// +/// Signature: `fn(cpu_index: u64, procedure: u64, argument: u64) -> u64` +/// +/// Returns 0 on success, or an EFI status code on failure. +pub(crate) static AP_STARTUP_FN: Once u64> = Once::new(); + +/// EFI_SMM_RESERVED_SMRAM_REGION structure. +/// +/// Describes a reserved SMRAM region that cannot be used for the SMRAM heap. +/// Matches the C structure from PI specification. +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct EfiSmmReservedSmramRegion { + /// Starting address of the reserved SMRAM area. + pub smram_reserved_start: u64, + /// Number of bytes occupied by the reserved SMRAM area. + pub smram_reserved_size: u64, +} + +/// SMM_CPU_PRIVATE_DATA structure. +/// +/// Private structure for the SMM CPU module, passed from PEI via the PassDown HOB. +/// Matches the C `SMM_CPU_PRIVATE_DATA` layout from MpService.h. +/// +/// Layout (x86_64): +/// ```text +/// Offset Field +/// 0x00 signature (UINTN) +/// 0x08 smm_cpu_handle (EFI_HANDLE) +/// 0x10 processor_info (ptr) +/// 0x18 cpu_save_state_size (ptr) +/// 0x20 cpu_save_state (ptr) +/// 0x28 smm_reserved_smram_region[1] (16 bytes) +/// 0x38 smm_core_entry_context (40 bytes, inline) +/// 0x60 smm_core_entry (fn ptr) +/// 0x68 smm_user_entry (fn ptr) +/// 0x70 ap_wrapper_func (ptr) +/// 0x78 token_list (ptr) +/// 0x80 first_free_token (ptr) +/// Total: 0x88 bytes +/// ``` +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct SmmCpuPrivateData { + /// Signature ('scpu'). + pub signature: u64, + /// SMM CPU handle. + pub smm_cpu_handle: u64, + /// Pointer to processor information array. + pub processor_info: u64, + /// Pointer to per-CPU save state size array. + pub cpu_save_state_size: u64, + /// Pointer to per-CPU save state pointer array. + pub cpu_save_state: u64, + /// Reserved SMRAM region descriptor (single element array). + pub smm_reserved_smram_region: EfiSmmReservedSmramRegion, + /// Inline entry context structure (40 bytes). + pub smm_core_entry_context: EfiMmEntryContext, + /// Supervisor core entry point function pointer. + pub smm_core_entry: u64, + /// User core entry point function pointer. + pub smm_user_entry: u64, + /// AP wrapper function pointer. + pub ap_wrapper_func: u64, + /// Token list pointer. + pub token_list: u64, + /// First free token pointer. + pub first_free_token: u64, +} + +/// Request target derived from MM_COMM_BUFFER_STATUS. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestTarget { + /// No pending request (buffer not valid). + None, + /// Request targets the Supervisor. + Supervisor, + /// Request targets the User module. + User, +} + +impl From<&MmCommBufferStatus> for RequestTarget { + fn from(status: &MmCommBufferStatus) -> Self { + if status.is_comm_buffer_valid == 0 { + RequestTarget::User + } else if status.talk_to_supervisor != 0 { + RequestTarget::Supervisor + } else { + RequestTarget::User + } + } +} + + +/// The MM Supervisor Core responsible for managing the standalone MM environment. +/// +/// This struct is generic over the [`PlatformInfo`] trait, which provides platform-specific +/// configuration including compile-time constants for array sizes. +/// +/// The supervisor manages: +/// - BSP initialization and request handling +/// - AP management through the holding pen and mailbox system +/// - Request dispatching and response handling +/// +/// ## Memory Model +/// +/// This struct does not perform heap allocation. All internal structures use fixed-size +/// arrays based on the `MAX_CPU_COUNT` constant from [`PlatformInfo`]. +/// +/// ## Usage +/// +/// Create a static instance of the supervisor and call `entry_point` from all cores: +/// +/// ```rust,ignore +/// use patina_mm_supervisor_core::*; +/// +/// static SUPERVISOR: MmSupervisorCore = MmSupervisorCore::new(); +/// +/// #[no_mangle] +/// pub extern "efiapi" fn mm_entry(hob_list: *const c_void) -> ! { +/// SUPERVISOR.entry_point(hob_list) +/// } +/// ``` +pub struct MmSupervisorCore +where + [(); P::MAX_CPU_COUNT]:, +{ + /// Manager for CPU-related operations. + cpu_manager: CpuManager<{ P::MAX_CPU_COUNT }>, + /// Manager for AP mailboxes. + mailbox_manager: MailboxManager<{ P::MAX_CPU_COUNT }>, + /// Syscall interface for privilege transitions. + syscall_interface: SyscallInterface<{ P::MAX_CPU_COUNT }>, + /// Flag indicating if the core has been initialized. + initialized: AtomicBool, + /// Phantom data for the platform type. + _phantom: core::marker::PhantomData

, +} + +// SAFETY: The MmSupervisorCore is designed to be shared across threads with proper synchronization. +unsafe impl Send for MmSupervisorCore

+where + [(); P::MAX_CPU_COUNT]:, +{ +} +unsafe impl Sync for MmSupervisorCore

+where + [(); P::MAX_CPU_COUNT]:, +{ +} + +pub(crate) fn is_buffer_inside_mmram(base: u64, size: u64) -> bool { + // we will go over the page allocator to see if this region falls inside any of the MMRAM regions + mm_mem::PAGE_ALLOCATOR.is_region_inside_mmram(base, size) +} + +/// Read CR3 register. +pub(crate) fn read_cr3() -> u64 { + let mut _value = 0u64; + + #[cfg(all(not(test), target_arch = "x86_64"))] + { + // SAFETY: inline asm is inherently unsafe because Rust can't reason about it. + // In this case we are reading the CR3 register, which is a safe operation. + unsafe { + asm!("mov {}, cr3", out(reg) _value, options(nostack, preserves_flags)); + } + } + + _value +} + +use x86_64::structures::DescriptorTablePointer; + +// ============================================================================ +// SMI Handler Fixup Constants +// ============================================================================ + +/// Offset from SMBASE where the SMI handler code is located. +const SMM_HANDLER_OFFSET: u64 = 0x8000; + +/// Index into the Fixup64 array for the SMI handler IDTR pointer. +const FIXUP64_SMI_HANDLER_IDTR: usize = 5; + +/// Per-core MMI entry structure header. +/// +/// This packed structure is embedded at the end of the SMI handler binary template. +/// It contains offsets (relative to the header start) to fixup arrays that the +/// relocation code uses to patch per-CPU values into the binary. +/// +/// Layout matches the C `PER_CORE_MMI_ENTRY_STRUCT_HDR` from SeaResponder.h. +#[repr(C, packed)] +#[derive(Debug, Clone, Copy)] +struct PerCoreMmiEntryStructHdr { + /// Header version (4 for version 4). + header_version: u32, + /// Offset from header start to FixUpStruct array. + fixup_struct_offset: u8, + /// Number of FixUpStruct array entries. + fixup_struct_num: u8, + /// Offset from header start to Fixup64 array. + fixup64_offset: u8, + /// Number of Fixup64 array entries. + fixup64_num: u8, + /// Offset from header start to Fixup32 array. + fixup32_offset: u8, + /// Number of Fixup32 array entries. + fixup32_num: u8, + /// Offset from header start to Fixup8 array. + fixup8_offset: u8, + /// Number of Fixup8 array entries. + fixup8_num: u8, + /// SMI entry binary version. + binary_version: u16, + /// SPL value for SMI entry binary. + spl_value: u32, + /// Reserved for future use. + reserved: u32, +} + +/// Read the current IDT Register (IDTR) via the `SIDT` instruction. +/// +/// Returns a [`DescriptorTablePointer`] containing the IDT base and limit. +fn read_idtr() -> DescriptorTablePointer { + let mut descriptor = DescriptorTablePointer { + limit: 0, + base: x86_64::VirtAddr::zero(), + }; + + #[cfg(all(not(test), target_arch = "x86_64"))] + { + // SAFETY: SIDT stores the 10-byte IDTR pseudo-descriptor to the specified + // memory location. This is a read-only operation on CPU state. + unsafe { + asm!( + "sidt [{}]", + in(reg) &mut descriptor as *mut DescriptorTablePointer, + options(nostack, preserves_flags) + ); + } + } + + descriptor +} + +// ============================================================================ +// Per-Core Initialization Status Helpers +// ============================================================================ + +/// Checks if a specific core has completed initialization. +/// +/// Reads the 64-bit slot at `mm_initialized_buffer + (cpu_index * 8)`. +/// A non-zero value indicates the core has completed initialization. +fn is_core_initialized(cpu_index: usize) -> bool { + if let Some(&buffer_base) = MM_INITIALIZED_BUFFER.get() { + if buffer_base == 0 { + return false; + } + let slot_ptr = (buffer_base as usize + cpu_index) as *const u8; + // SAFETY: The buffer is provided by the MM IPL and is guaranteed to be valid. + // Each core only reads its own slot or slots of other cores. + let value = unsafe { core::ptr::read_volatile(slot_ptr) }; + value != 0 + } else { + false + } +} + +/// Marks a specific core as initialized. +/// +/// Writes a non-zero value to the 64-bit slot at `mm_initialized_buffer + (cpu_index * 8)`. +fn mark_core_initialized(cpu_index: usize) { + if let Some(&buffer_base) = MM_INITIALIZED_BUFFER.get() { + if buffer_base == 0 { + log::error!("MM initialized buffer is null, cannot mark core {} as initialized", cpu_index); + return; + } + let slot_ptr = (buffer_base as usize + cpu_index) as *mut u8; + // SAFETY: The buffer is provided by the MM IPL and is guaranteed to be valid. + // Each core writes only to its own slot. + unsafe { core::ptr::write_volatile(slot_ptr, 1) }; + log::trace!("Core {} marked as initialized at 0x{:016x}", cpu_index, slot_ptr as u64); + } else { + log::error!("MM initialized buffer not set, cannot mark core {} as initialized", cpu_index); + } +} + +/// Helper function to disable the SMAP bit in EFLAGS to allow supervisor code to access user memory when needed. +pub fn disable_smap() { + #[cfg(all(not(test), target_arch = "x86_64"))] + unsafe { + asm!( + "stac", // Set AC flag to enable access to user memory + options(nostack, preserves_flags) + ); + } +} + +/// Helper function to re-enable the SMAP bit in EFLAGS after accessing user memory. +pub fn enable_smap() { + #[cfg(all(not(test), target_arch = "x86_64"))] + unsafe { + asm!( + "clac", // Clear AC flag to re-enable SMAP protections + options(nostack, preserves_flags) + ); + } +} + +#[coverage(off)] +impl MmSupervisorCore

+where + [(); P::MAX_CPU_COUNT]:, +{ + /// Creates a new instance of the MM Supervisor Core. + /// + /// This is a const fn that performs no heap allocation. + pub const fn new() -> Self { + Self { + cpu_manager: CpuManager::new(), + mailbox_manager: MailboxManager::new(), + syscall_interface: SyscallInterface::new(), + initialized: AtomicBool::new(false), + _phantom: core::marker::PhantomData, + } + } + + /// Sets the static supervisor instance for global access. + /// + /// Returns true if the address was successfully stored, false if already set. + /// Also registers the type-erased AP startup function pointer. + #[must_use] + fn set_instance(&'static self) -> bool { + let physical_address = NonNull::from_ref(self).expose_provenance(); + let stored = &physical_address == __SUPERVISOR.call_once(|| physical_address); + if stored { + // Register the monomorphized AP startup function for this platform + AP_STARTUP_FN.call_once(|| Self::start_ap_procedure_trampoline); + } + stored + } + + /// Gets the static MM Supervisor Core instance for global access. + #[allow(unused)] + pub(crate) fn instance<'a>() -> &'a Self { + // SAFETY: The pointer is guaranteed to be valid as set_instance ensures single initialization. + unsafe { + NonNull::::with_exposed_provenance( + *__SUPERVISOR.get().expect("MM Supervisor Core is not initialized."), + ) + .as_ref() + } + } + + /// The entry point for the MM Supervisor Core. + /// + /// This function is called on all cores (BSP and APs). The BSP performs initialization + /// and enters the request serving loop, while APs enter the holding pen. + /// + /// # Arguments + /// + /// * `hob_list` - Pointer to the HOB (Hand-Off Block) list passed from the pre-MM phase. + /// + /// # Panics + /// + /// Panics if: + /// - The supervisor instance was already set + /// - The HOB list pointer is null + /// + /// # Returns + /// + /// On the first call (initialization phase), this function returns after init is complete. + /// On subsequent calls, BSP enters the request loop and APs enter the holding pen (neither returns). + pub fn entry_point(&'static self, cpu_index: usize, hob_list: *const c_void) { + // Get the current CPU's APIC ID + let cpu_id = cpu::get_current_cpu_id(); + + // Determine if we're BSP by checking IA32_APIC_BASE MSR + let is_bsp = cpu::is_bsp(); + + // Check if this core has already completed initialization (per-core check) + if is_core_initialized(cpu_index) { + // Subsequent entry: go directly to request loop or holding pen (does not return) + self.enter_runtime(cpu_id); + + return; + } + + // First entry: initialization phase + if is_bsp { + // BSP path: Initialize the supervisor + assert!(self.set_instance(), "MM Supervisor Core instance was already set!"); + assert!(!hob_list.is_null(), "MM Supervisor Core requires a non-null HOB list pointer."); + + log::info!("MM Supervisor Core v{}", env!("CARGO_PKG_VERSION")); + log::info!("BSP (CPU {}, index {}) starting one-time initialization...", cpu_id, cpu_index); + + // Register BSP with CPU manager + self.cpu_manager.register_cpu(cpu_id, true); + + // Perform BSP-only one-time initialization (this sets up MM_INITIALIZED_BUFFER) + self.bsp_init(hob_list); + + // Dispatch to the user level entry point discovered from the HOB list (if found) + let user_entry = match USER_ENTRY_POINT.get() { + Some(&entry) if entry != 0 => entry, + _ => { + log::error!("User entry point not configured, cannot demote"); + return; + } + }; + + let cpl3_stack = match self.syscall_interface.get_cpl3_stack(cpu_index) { + Ok(stack) => stack, + Err(e) => { + log::error!("Failed to get CPL3 stack for CPU {}: {:?}", cpu_index, e); + return; + } + }; + + // SAFETY: We are transitioning from the supervisor (CPL0) to the user module (CPL3) for the first time. + // The entry point and stack have been validated and set up during initialization, and the user module is + // will be responsible for validating any further inputs. + let ret = unsafe { + invoke_demoted_routine ( + cpu_index, + user_entry, + cpl3_stack, + 3, + UserCommandType::StartUserCore as u64, + hob_list, + 0) + }; + log::info!("Returned from user entry point with value: 0x{:016x}", ret); + + // Mark BSP init as complete so APs can proceed + self.initialized.store(true, Ordering::Release); + BSP_INIT_COMPLETE.store(true, Ordering::Release); + + log::info!("BSP one-time initialization complete."); + } else { + // AP path: Wait for BSP to complete one-time initialization + log::trace!("AP (CPU {}, index {}) waiting for BSP initialization...", cpu_id, cpu_index); + + // Spin until BSP completes initialization + while !BSP_INIT_COMPLETE.load(Ordering::Acquire) { + core::hint::spin_loop(); + } + + // Register this AP with the CPU manager + self.cpu_manager.register_cpu(cpu_id, false); + } + + // All cores perform per-core initialization + self.per_core_init(cpu_id, is_bsp); + + // Mark this core as initialized in the per-core buffer + mark_core_initialized(cpu_index); + + // Track that this core has completed per-core init + let init_count = PER_CORE_INIT_COUNT.fetch_add(1, Ordering::SeqCst) + 1; + log::trace!("CPU {} (index {}) completed per-core init ({} cores initialized)", cpu_id, cpu_index, init_count); + + // BSP waits for all registered CPUs to complete per-core init before returning + if is_bsp { + let expected_cpus = self.cpu_manager.registered_count(); + while PER_CORE_INIT_COUNT.load(Ordering::Acquire) < expected_cpus as u32 { + core::hint::spin_loop(); + } + + log::info!("All {} cores completed initialization, returning to caller.", expected_cpus); + } + + // First entry returns to caller after init is complete + // (Each core has already marked itself as initialized via mark_core_initialized) + } + + /// BSP-specific initialization. + /// + /// This is called only on the BSP after basic setup is complete. + fn bsp_init(&'static self, hob_list: *const c_void) { + log::info!("BSP performing one-time initialization..."); + + // Initialize the performance timer early so all subsequent code + // can use real TSC-based timeouts. + perf_timer::init(P::CpuInfo::perf_timer_frequency().unwrap_or(0)); + + let mut interrupt_manager = Interrupts::new(); + interrupt_manager.initialize().unwrap_or_else(|err| { + panic!("Failed to initialize Interrupt Manager: {:?}", err); + }); + + // Initialize the page allocator from the HOB list + // This finds all SMRAM regions and sets up memory tracking + // SAFETY: hob_list is provided by the MM IPL and is guaranteed to be valid + unsafe { + if let Err(e) = mm_mem::PAGE_ALLOCATOR.init_from_hob_list(hob_list) { + log::error!("Failed to initialize page allocator: {:?}", e); + } + } + + // Reserve pages from the page allocator for paging structures. + // This is done before paging is initialized to avoid circular dependency. + unsafe { + match mm_mem::PAGE_ALLOCATOR.allocate_pages(paging_allocator::DEFAULT_PAGING_POOL_PAGES) { + Ok(paging_pool_base) => { + log::info!( + "Reserved {} pages at 0x{:016x} for paging structures", + paging_allocator::DEFAULT_PAGING_POOL_PAGES, + paging_pool_base + ); + // Initialize the paging allocator with the reserved pool + if let Err(e) = paging_allocator::PAGING_ALLOCATOR.init( + paging_pool_base, + paging_allocator::DEFAULT_PAGING_POOL_PAGES, + ) { + log::error!("Failed to initialize paging allocator: {:?}", e); + } + } + Err(e) => { + log::error!("Failed to reserve pages for paging structures: {:?}", e); + } + } + } + + // Initialize the global page table from the active CR3. + // This allows the supervisor to modify page attributes on newly allocated pages. + let cr3 = read_cr3(); + let paging_alloc = paging_allocator::SharedPagingAllocator::new(&paging_allocator::PAGING_ALLOCATOR); + let page_table = unsafe { + X64PageTable::from_existing(cr3, paging_alloc, PagingType::Paging4Level) + }.expect("Failed to create page table from active CR3"); + *PAGE_TABLE.lock() = Some(page_table); + log::info!("Page table initialized from CR3=0x{:016x}", cr3); + + // Discover the MM Supervisor User module entry point from the HOB list. + // We look for EFI_HOB_TYPE_MEMORY_ALLOCATION HOBs that have: + // - MemoryAllocationHeader.Name == gMmSupervisorHobMemoryAllocModuleGuid + // - ModuleName == gMmSupervisorUserGuid + // SAFETY: hob_list is provided by the MM IPL and is guaranteed to be valid + let user_entry_point = unsafe { self.discover_user_module_entry(hob_list) }; + if let Some(entry) = user_entry_point { + log::info!("Discovered MM User module entry point: 0x{:016x}", entry); + // Store entry point in static for use during request processing + USER_ENTRY_POINT.call_once(|| entry); + } else { + log::warn!("MM User module entry point not found in HOB list"); + } + + // Initialize the policy gate from the PassDown HOB. + // This discovers the firmware policy buffer and initializes the policy gate. + // SAFETY: hob_list is provided by the MM IPL and is guaranteed to be valid + unsafe { + if let Err(e) = self.init_policy_from_hob_list(hob_list) { + log::error!("Failed to initialize policy gate: {:?}", e); + } + } + + // Okay, we have all the hob content used, will map this to user level and don't need to keep + // the HOB list pointer anymore. + // + // Walk the HOB list to compute its total size (the list may not start + // with a PHIT, so we cannot use `end_of_hob_list`). Each HOB has a + // generic header with `{ type: u16, length: u16, ... }`. We advance + // by `length` until we hit `END_OF_HOB_LIST`, then include that + // terminal header in the total size — mirroring the C `GetHobListSize`. + // + // After computing the size, page-align the range and remap as + // user-accessible + read-only + non-executable so the demoted user + // core can walk the HOBs during `StartUserCore`. + let hob_base = hob_list as u64; + let hob_list_size = { + let mut cursor = hob_list as *const u8; + loop { + // SAFETY: cursor is within the HOB list buffer; the header is at + // least `size_of::()` bytes (8). + let hdr = unsafe { &*(cursor as *const hob::header::Hob) }; + if hdr.r#type == hob::END_OF_HOB_LIST { + // Include the END_OF_HOB_LIST header itself. + break (cursor as u64 - hob_base) + core::mem::size_of::() as u64; + } + if hdr.length == 0 { + log::error!("HOB with zero length at 0x{:016x}, aborting walk", cursor as u64); + break cursor as u64 - hob_base; + } + cursor = unsafe { cursor.add(hdr.length as usize) }; + } + }; + + let page_mask: u64 = (UEFI_PAGE_SIZE as u64) - 1; + let aligned_base = hob_base & !page_mask; + let aligned_end = (hob_base + hob_list_size + page_mask) & !page_mask; + let hob_region_size = aligned_end.saturating_sub(aligned_base); + log::info!( + "HOB list at 0x{:016x} size 0x{:x}, aligned region 0x{:016x}-0x{:016x} (0x{:x} bytes)", + hob_base, hob_list_size, aligned_base, aligned_end, hob_region_size + ); + if hob_region_size > 0 { + let attrs = MemoryAttributes::ReadOnly | MemoryAttributes::ExecuteProtect; + let mut pt_guard = PAGE_TABLE.lock(); + if let Some(ref mut pt) = *pt_guard { + if let Err(e) = pt.map_memory_region(aligned_base, hob_region_size, attrs) { + log::error!( + "Failed to remap HOB list to user level at 0x{:016x} (0x{:x} bytes): {:?}", + aligned_base, hob_region_size, e + ); + panic!("Cannot continue without user access to HOB list"); + } else { + log::info!( + "Remapped HOB list 0x{:016x}-0x{:016x} as user read-only", + aligned_base, aligned_end + ); + } + } else { + log::error!("Page table not initialized, cannot remap HOB list to user level"); + panic!("Cannot continue without user access to HOB list"); + } + } + + log::trace!("BSP one-time initialization complete."); + } + + /// Patches the SMI handler's IDT descriptor to point to Rust interrupt handlers. + /// + /// Navigates the per-core MMI entry fixup structure embedded at the end of the + /// SMI handler binary to locate the `gSmiHandlerIdtr` pointer, then overwrites + /// it with the current IDT descriptor (base + limit). + /// + /// # Arguments + /// + /// * `mmi_entry_size` - Size of the MMI entry binary from the PassDown HOB. + fn patch_smi_handler_idt(mmbase: u64, mmi_entry_size: u64) { + if mmi_entry_size == 0 { + log::warn!("MMI entry size is 0 in PassDown HOB, cannot navigate fixup structure"); + return; + } + + // SAFETY: Reading SMBASE MSR is safe during BSP init in SMM context. + let mut smbase = unsafe { cpu::read_msr(cpu::IA32_MSR_SMBASE) }.unwrap_or_else(|err| { + panic!("Failed to read IA32_MSR_SMBASE: {:?}", err); + }); + + if smbase == 0 { + smbase = mmbase; + } + + let mmi_entry_base = smbase + SMM_HANDLER_OFFSET; + log::info!("MMI entry at 0x{:016x} with size 0x{:x}", mmi_entry_base, mmi_entry_size); + + // The last u32 in the MMI entry binary is the total fixup structure size. + let whole_struct_size_addr = mmi_entry_base + mmi_entry_size - 4; + // SAFETY: whole_struct_size_addr points into the SMI handler template in SMRAM. + let whole_struct_size = unsafe { + core::ptr::read_unaligned(whole_struct_size_addr as *const u32) + }; + + // The structure header starts before the trailing size field. + let hdr_addr = (mmi_entry_base + mmi_entry_size - 4 - whole_struct_size as u64) + as *const PerCoreMmiEntryStructHdr; + // SAFETY: hdr_addr points to the packed fixup header within the SMI handler binary. + let hdr = unsafe { core::ptr::read_unaligned(hdr_addr) }; + + let hdr_version = hdr.header_version; + let f64_offset = hdr.fixup64_offset; + let f64_num = hdr.fixup64_num; + log::trace!( + "Fixup header at 0x{:016x}: version={}, fixup64_offset={}, fixup64_num={}", + hdr_addr as u64, hdr_version, f64_offset, f64_num + ); + + // Validate the Fixup64 array has the IDTR entry. + if (FIXUP64_SMI_HANDLER_IDTR as u8) >= f64_num { + log::error!( + "Fixup64 array too small: need index {} but only {} entries", + FIXUP64_SMI_HANDLER_IDTR, f64_num + ); + return; + } + + // Navigate to the Fixup64 array and read the IDTR entry. + let fixup64_base = (hdr_addr as u64 + f64_offset as u64) as *const u64; + // SAFETY: fixup64_base + index is within the fixup array in the SMI handler binary. + let idt_desc_addr = unsafe { + core::ptr::read_unaligned(fixup64_base.add(FIXUP64_SMI_HANDLER_IDTR)) + }; + + if idt_desc_addr == 0 { + log::warn!("Fixup64[{}] (SMI_HANDLER_IDTR) is null", FIXUP64_SMI_HANDLER_IDTR); + return; + } + + // Overwrite the IA32_DESCRIPTOR with our Rust IDT's base/limit. + let idt_desc_ptr = idt_desc_addr as *mut DescriptorTablePointer; + let idtr = read_idtr(); + + // SAFETY: idt_desc_ptr points to an IA32_DESCRIPTOR allocated by the C relocation + // code via AllocateCodePages(1). Both DescriptorTablePointer (packed(2)) and + // IA32_DESCRIPTOR (packed(1)) have the same 10-byte {u16, u64} layout. + unsafe { core::ptr::write_unaligned(idt_desc_ptr, idtr) }; + + log::info!( + "Patched SMI handler IDT descriptor at 0x{:016x}: base=0x{:016x}, limit=0x{:04x}", + idt_desc_addr, idtr.base.as_u64(), idtr.limit + ); + } + + /// Per-core initialization. + /// + /// This is called on every core (BSP and APs) during the first entry. + /// Use this for setting up per-CPU state like syscall MSRs, GS base, etc. + fn per_core_init(&'static self, cpu_id: u32, is_bsp: bool) { + let core_type = if is_bsp { "BSP" } else { "AP" }; + log::trace!("{} (CPU {}) performing per-core initialization...", core_type, cpu_id); + + // TODO: Set up per-CPU GDT/TSS if needed + // TODO: Set up per-CPU interrupt stacks + // TODO: Initialize per-CPU data structures + + log::trace!("{} (CPU {}) per-core initialization complete.", core_type, cpu_id); + } + + /// Enter runtime mode (called on subsequent entries after init is complete). + /// + /// Implements the MP synchronization protocol: + /// 1. APs check in by setting their state to `InHoldingPen` and entering the holding pen + /// 2. BSP waits (with timeout) for all registered APs to arrive + /// 3. BSP processes the pending request via `bsp_request_loop` + /// 4. BSP broadcasts `Return` to all APs so they exit the holding pen + /// 5. BSP waits for all AP responses before returning + fn enter_runtime(&'static self, cpu_id: u32) { + let is_bsp = cpu::is_bsp(); + + if is_bsp { + log::trace!("BSP (CPU {}) waiting for APs to arrive...", cpu_id); + + // Wait for all registered APs to check in (set state to InHoldingPen) + let expected_aps = self.cpu_manager.registered_count().saturating_sub(1) as usize; + self.wait_for_ap_arrival(expected_aps); + + // All APs (or timeout) - proceed with request processing + log::trace!("BSP (CPU {}) entering request serving routine...", cpu_id); + self.bsp_request_loop(cpu_id as usize); + + // BSP is done handling the request - broadcast Return to all APs + log::trace!("BSP (CPU {}) broadcasting Return to all APs...", cpu_id); + let sent = self.mailbox_manager.broadcast_command(ApCommand::Return); + log::trace!("BSP (CPU {}) sent Return to {} APs, waiting for acknowledgement...", cpu_id, sent); + + // TODO: Wait for all APs to acknowledge the Return command + const RETURN_TIMEOUT_US: u64 = 100_000; // 100 ms + let responded = self.mailbox_manager.wait_all_responses(RETURN_TIMEOUT_US); + log::trace!( + "BSP (CPU {}) done: {}/{} APs acknowledged Return", + cpu_id, responded, sent + ); + } else { + // AP: check in by marking state, then enter holding pen + self.cpu_manager.set_ap_state(cpu_id, cpu::ApState::InHoldingPen); + log::trace!("AP (CPU {}) checked in, entering holding pen...", cpu_id); + self.ap_holding_pen(cpu_id); + } + } + + /// Waits for APs to arrive with a timeout. + /// + /// Spins until the expected number of APs have set their state to `InHoldingPen`, + /// or the timeout expires (whichever comes first). + fn wait_for_ap_arrival(&self, expected_aps: usize) { + if expected_aps == 0 { + return; + } + + const AP_ARRIVAL_TIMEOUT_US: u64 = 100_000; // 100 ms + + let all_arrived = perf_timer::spin_until(AP_ARRIVAL_TIMEOUT_US, || { + self.cpu_manager.count_aps_in_state(cpu::ApState::InHoldingPen) >= expected_aps + }); + + if all_arrived { + log::trace!("All {} APs arrived", expected_aps); + } else { + let arrived = self.cpu_manager.count_aps_in_state(cpu::ApState::InHoldingPen); + log::warn!( + "AP arrival timeout: {}/{} APs arrived, proceeding with available cores", + arrived, expected_aps + ); + } + } + + /// Discovers the MM Supervisor User module entry point from the HOB list. + /// + /// This function iterates through the HOB list looking for `MemoryAllocationModule` HOBs + /// that match the MM Supervisor memory allocation module GUID and have the MM Supervisor + /// User GUID as their module name. + /// + /// # Safety + /// + /// The caller must ensure that `hob_list` points to a valid HOB list. + /// + /// # Returns + /// + /// The entry point address of the user module if found, or `None` otherwise. + unsafe fn discover_user_module_entry(&self, hob_list: *const c_void) -> Option { + if hob_list.is_null() { + return None; + } + + // Get the HOB list header + let hob_list_info = unsafe { + (hob_list as *const PhaseHandoffInformationTable).as_ref()? + }; + + let hob = Hob::Handoff(hob_list_info); + + // Iterate through the HOB list looking for MemoryAllocationModule HOBs + for current_hob in &hob { + if let Hob::MemoryAllocationModule(mem_alloc_mod) = current_hob { + // Check if this is an MM Supervisor module allocation + // (MemoryAllocationHeader.Name == gMmSupervisorHobMemoryAllocModuleGuid) + if mem_alloc_mod.alloc_descriptor.name == MM_SUPERVISOR_HOB_MEMORY_ALLOC_MODULE_GUID { + log::debug!( + "Found MM Supervisor module HOB: module_name={:?}, entry_point=0x{:016x}", + mem_alloc_mod.module_name, + mem_alloc_mod.entry_point + ); + + // Check if this is the User module (ModuleName == gMmSupervisorUserGuid) + if mem_alloc_mod.module_name == MM_SUPERVISOR_USER_GUID { + log::info!( + "Found MM User module: entry_point=0x{:016x}, base=0x{:016x}, size=0x{:x}", + mem_alloc_mod.entry_point, + mem_alloc_mod.alloc_descriptor.memory_base_address, + mem_alloc_mod.alloc_descriptor.memory_length + ); + return Some(mem_alloc_mod.entry_point); + } + } + } + } + + None + } + + /// Initializes the policy gate from the PassDown HOB. + /// + /// This function iterates through the HOB list looking for the PassDown HOB, + /// extracts the firmware policy buffer pointer, and initializes the policy gate. + /// + /// # Safety + /// + /// The caller must ensure that `hob_list` points to a valid HOB list. + /// + /// # Returns + /// + /// `Ok(())` if the policy gate was successfully initialized, or an error otherwise. + /// TODO: Remove the passdown hob eventually!!!!! + unsafe fn init_policy_from_hob_list(&self, hob_list: *const c_void) -> Result<(), PolicyInitError> { + if hob_list.is_null() { + return Err(PolicyInitError::NullHobList); + } + + // Get the HOB list header + let hob_list_info = unsafe { + (hob_list as *const PhaseHandoffInformationTable) + .as_ref() + .ok_or(PolicyInitError::NullHobList)? + }; + + let mut supv_comm_buffer = 0 as u64; + let mut supv_comm_buffer_size = 0 as u64; + let mut supv_comm_buffer_internal = 0 as u64; + let mut user_comm_buffer = 0 as u64; + let mut user_comm_buffer_size = 0 as u64; + let mut user_comm_buffer_internal = 0 as u64; + let mut status_buffer = 0 as u64; + + let hob = Hob::Handoff(hob_list_info); + + // Walk through HOBs to find the PassDown HOB + for current_hob in &hob { + if let Hob::GuidHob(guid_hob, data) = current_hob { + if guid_hob.name == MM_SUPV_PASS_DOWN_HOB_GUID { + log::info!("Found MM Supervisor PassDown HOB"); + + // Verify data size + if data.len() < core::mem::size_of::() { + log::error!( + "PassDown HOB data too small: {} < {}", + data.len(), + core::mem::size_of::() + ); + return Err(PolicyInitError::InvalidPolicyData); + } + + // Cast to PassDown HOB data structure + let pass_down = unsafe { &*(data.as_ptr() as *const MmSupvPassDownHobData) }; + + // Copy packed struct fields to local variables to avoid unaligned access + // SAFETY: Direct access to read the addresses from the hob data. + let revision = unsafe { core::ptr::addr_of!(pass_down.revision).read() }; + let mm_initialized_buffer = unsafe { core::ptr::addr_of!(pass_down.mm_initialized_buffer).read() }; + let firmware_policy_buffer = unsafe { core::ptr::addr_of!(pass_down.mm_supv_firmware_policy_buffer).read() }; + let firmware_policy_buffer_size = unsafe { core::ptr::addr_of!(pass_down.mm_supv_firmware_policy_buffer_size).read() }; + + // Extract communication buffer pointers + let cpl3_stack_buffer = unsafe { core::ptr::addr_of!(pass_down.mm_supervisor_cpl3_stack_base).read() }; + let cpl3_stack_buffer_size = unsafe { core::ptr::addr_of!(pass_down.mm_supervisor_cpl3_per_core_stack_size).read() }; + let mmi_entry_size = unsafe { core::ptr::addr_of!(pass_down.mmi_entrypoint_size).read() }; + let mmbase = unsafe { core::ptr::addr_of!(pass_down.bsp_mm_base_address).read() }; + + // Patch the SMI entry IDT descriptor to point to our interrupt handlers. + // The C relocation code patches each CPU's SMI handler template with fixup + // arrays. The Fixup64 array at index FIXUP64_SMI_HANDLER_IDTR contains the + // address of an IA32_DESCRIPTOR (gSmiHandlerIdtr). On SMI entry, the assembly + // loads that address and does `lidt [rax]`. We navigate the fixup structure + // in the BSP's SMI handler to find this pointer, then overwrite the descriptor + // with our Rust IDT's base/limit. + Self::patch_smi_handler_idt(mmbase, mmi_entry_size); + + // Extract CPU private data pointer + let cpu_private = unsafe { core::ptr::addr_of!(pass_down.mm_supv_cpu_private).read() }; + + // Validate revision + if revision != MM_SUPV_PASS_DOWN_HOB_REVISION { + log::error!( + "Invalid PassDown HOB revision: {} (expected {})", + revision, + MM_SUPV_PASS_DOWN_HOB_REVISION + ); + return Err(PolicyInitError::InvalidRevision { + found: revision, + expected: MM_SUPV_PASS_DOWN_HOB_REVISION, + }); + } + + // Store the per-core initialized buffer address for use by all cores + if mm_initialized_buffer != 0 { + MM_INITIALIZED_BUFFER.call_once(|| mm_initialized_buffer); + log::info!("MM Initialized buffer set to 0x{:016x}", mm_initialized_buffer); + } else { + log::warn!("MM Initialized buffer is null in PassDown HOB"); + } + + // Store CPU private data pointer for SmmCoreEntryContext access + if cpu_private != 0 { + SMM_CPU_PRIVATE.call_once(|| cpu_private); + log::info!("SMM CPU Private data at 0x{:016x}", cpu_private); + } else { + log::warn!("SMM CPU Private data pointer is null in PassDown HOB"); + } + + log::info!( + "PassDown HOB: FirmwarePolicyBuffer=0x{:x}, Size=0x{:x}", + firmware_policy_buffer, + firmware_policy_buffer_size + ); + + // Validate firmware policy buffer + if firmware_policy_buffer == 0 + || firmware_policy_buffer_size == 0 + { + log::error!("Firmware policy buffer is null or empty"); + return Err(PolicyInitError::NullFirmwarePolicyBuffer); + } + + // Initialize the policy gate with the firmware policy buffer + let policy_ptr = firmware_policy_buffer as *const u8; + // allocate one page for the memory policy buffer which will be filled in by walk_page_table + let memory_policy_buffer = + mm_mem::PAGE_ALLOCATOR.allocate_pages(1).map_err(|e| { + log::error!("Failed to allocate page for memory policy buffer: {:?}", e); + PolicyInitError::MemoryAllocationFailed + })?; + // SAFETY: We validated that policy_ptr is non-null above and comes from + // the PassDown HOB which is set up by the MM IPL. + match unsafe { patina_mm_policy::PolicyGate::new(policy_ptr) } { + Ok(mut gate) => { + log::info!("Policy gate initialized successfully"); + // SAFETY: policy_ptr points to valid policy data as validated above. + unsafe { patina_mm_policy::dump_policy(policy_ptr) }; + + // Configure the memory policy buffer on the gate so that + // take_snapshot / verify_snapshot / fetch_n_update_policy + // can use it. + let mem_policy_max_count = UEFI_PAGE_SIZE as usize + / core::mem::size_of::(); + gate.set_memory_policy_buffer( + memory_policy_buffer as *mut MemDescriptorV1_0, + mem_policy_max_count, + ); + + // Store the initialized policy gate in the static variable for global access + POLICY_GATE.call_once(|| gate); + } + Err(e) => { + log::error!("Failed to create policy gate: {:?}", e); + return Err(PolicyInitError::InvalidPolicyData); + } + } + + // Init syscall interface + self.syscall_interface.init( + self.cpu_manager.max_cpus(), + cpl3_stack_buffer, + cpl3_stack_buffer_size.try_into().unwrap_or_else( + |err| panic!("Invalid CPL3 stack buffer size: {:?}", err) + ), + ).unwrap_or_else(|err| { + panic!("Failed to initialize syscall interface: {:?}", err); + }); + + // Read CR3 from hardware + let cr3: u64 = read_cr3(); + + // Walk page table and generate memory policy + let count = unsafe { + walk_page_table( + cr3, + memory_policy_buffer as *mut MemDescriptorV1_0, + UEFI_PAGE_SIZE as usize, + |base, size| is_buffer_inside_mmram(base, size), // Your MMRAM check + ) + }; + + if let Ok(descriptor_count) = count { + log::info!("Successfully generated {} memory policy descriptors", descriptor_count); + + // Initialize the unblocked memory tracker from the generated descriptors + // SAFETY: memory_policy_buffer points to valid MemDescriptorV1_0 array + // with descriptor_count entries, as we just filled it via walk_page_table + if let Err(e) = unsafe { + unblock_memory::UNBLOCKED_MEMORY_TRACKER.init_from_buffer( + memory_policy_buffer as *const MemDescriptorV1_0, + descriptor_count, + ) + } { + log::error!("Failed to initialize unblocked memory tracker: {:?}", e); + } else { + log::info!("Unblocked memory tracker initialized"); + // Dump regions for debugging + unblock_memory::UNBLOCKED_MEMORY_TRACKER.dump_regions(); + } + } else { + log::error!("Failed to generate memory policy descriptors: {:?}", count.err()); + } + + log::info!("Generated {} memory policy descriptors", count.unwrap_or(0)); + } else if guid_hob.name == MM_COMMON_REGION_HOB_GUID { + // This is a hob describing the supervisor communication region (user goes through a different one now) + log::info!("Found MM Common Region HOB"); + + // Cast to comm buffer HOB data structure + let supv_buffer_hob = unsafe { &*(data.as_ptr() as *const MmCommonRegionHobData) }; + supv_comm_buffer = unsafe { core::ptr::addr_of!(supv_buffer_hob.addr).read() }; + + let supv_comm_buffer_pages = unsafe { core::ptr::addr_of!(supv_buffer_hob.number_of_pages).read() }; + // safe multiplication with checked arithmetic to prevent overflow + supv_comm_buffer_size = supv_comm_buffer_pages.checked_mul(UEFI_PAGE_SIZE as u64).unwrap_or_else(|| { + panic!( + "Invalid supervisor common buffer size: {} pages * {} page size overflows", + supv_comm_buffer_pages, UEFI_PAGE_SIZE + ); + }); + + // Check to see if this region is outside of MMRAM and has the supervisor/read/write attribute + if !is_buffer_inside_mmram(supv_comm_buffer, supv_comm_buffer_size) { + match query_address_ownership(supv_comm_buffer, supv_comm_buffer_size) { + Some(PageOwnership::User) => { + panic!( + "Supervisor common buffer at 0x{:016x}-0x{:016x} is not marked as supervisor-owned", + supv_comm_buffer, + supv_comm_buffer + supv_comm_buffer_size + ); + }, + Some(PageOwnership::Supervisor) => { + // Do nothing + }, + None => { + panic!( + "Failed to query page ownership for supervisor common buffer at 0x{:016x}", + supv_comm_buffer + ); + } + }; + } + + // All checked out, make a copy of this supervisor to be used when handling incoming requests + supv_comm_buffer_internal = mm_mem::PAGE_ALLOCATOR.allocate_pages_with_type( + supv_comm_buffer_pages as usize, + mm_mem::AllocationType::Supervisor, + ).map_err(|e| { + log::error!("Failed to allocate internal supervisor common buffer: {:?}", e); + PolicyInitError::MemoryAllocationFailed + })?; + } else if guid_hob.name == MM_COMM_BUFFER_HOB_GUID { + // This is a hob describing the communication buffer + log::info!("Found MM Communication Buffer HOB"); + + // Cast to comm buffer HOB data structure + let comm_buffer_hob = data.as_ptr() as *mut MmCommonBufferHobData; + user_comm_buffer = unsafe { core::ptr::addr_of!((*comm_buffer_hob).physical_start).read() }; + + let user_comm_buffer_pages = unsafe { core::ptr::addr_of!((*comm_buffer_hob).number_of_pages).read() }; + // safe multiplication with checked arithmetic to prevent overflow + user_comm_buffer_size = user_comm_buffer_pages.checked_mul(UEFI_PAGE_SIZE as u64).unwrap_or_else(|| { + panic!( + "Invalid user common buffer size: {} pages * {} page size overflows", + user_comm_buffer_pages, UEFI_PAGE_SIZE + ); + }); + + // Check to see if this region is outside of MMRAM and has the supervisor/read/write attribute + if !is_buffer_inside_mmram(user_comm_buffer, user_comm_buffer_size) { + match query_address_ownership(user_comm_buffer, user_comm_buffer_size) { + Some(PageOwnership::User) => { + panic!( + "User common buffer at 0x{:016x}-0x{:016x} is not marked as user-owned", + user_comm_buffer, + user_comm_buffer + user_comm_buffer_size + ); + }, + Some(PageOwnership::Supervisor) => { + // Do nothing + }, + None => { + panic!( + "Failed to query page ownership for user common buffer at 0x{:016x}", + user_comm_buffer + ); + } + }; + } + + status_buffer = unsafe { core::ptr::addr_of!((*comm_buffer_hob).status_buffer).read() }; + + // Validate that the status buffer is also within the supervisor common buffer region (so that the user do not have direct access) + if !is_buffer_inside_mmram(status_buffer, core::mem::size_of::() as u64) { + match query_address_ownership(status_buffer, core::mem::size_of::() as u64) { + Some(PageOwnership::User) => { + panic!( + "User common buffer at 0x{:016x}-0x{:016x} is not marked as supervisor-exposed", + user_comm_buffer, + user_comm_buffer + user_comm_buffer_size + ); + }, + Some(PageOwnership::Supervisor) => { + // Do nothing + }, + None => { + panic!( + "Failed to query page ownership for status buffer at 0x{:016x}", + status_buffer + ); + } + }; + } + + // All checked out, make a copy of this buffer to be used when handling incoming requests + user_comm_buffer_internal = mm_mem::PAGE_ALLOCATOR.allocate_pages_with_type( + user_comm_buffer_pages as usize, + mm_mem::AllocationType::User, + ).map_err(|e| { + log::error!("Failed to allocate internal user common buffer: {:?}", e); + PolicyInitError::MemoryAllocationFailed + })?; + + // TODO: HACKHACK: this updates the hob passed to user module with the internal buffer address, which is a bit gross but it works for now. + // SAFETY: We have exclusive access to the HOB data structure at this point during initialization, and we're just updating the physical_start field to point to our internal buffer copy. + unsafe { + // Disable page protection to allow writing to the HOB data structure if needed + let original_cr0 = disable_write_protection(); + + core::ptr::write_volatile( + core::ptr::addr_of_mut!((*comm_buffer_hob).physical_start), + user_comm_buffer_internal + ); + + // Restore original CR0 value to re-enable page protection + enable_write_protection(original_cr0); + } + } + } + } + + // allocate one page for the buffer that the supervisor will use to send messages to the user module + let supv_to_user_buffer = mm_mem::PAGE_ALLOCATOR.allocate_pages_with_type(1, mm_mem::AllocationType::User).map_err(|e| { + log::error!("Failed to allocate page for supervisor-to-user buffer: {:?}", e); + PolicyInitError::MemoryAllocationFailed + })?; + + // At this point, none of the following buffers may be zero. + if supv_comm_buffer == 0 || user_comm_buffer == 0 || status_buffer == 0 || supv_to_user_buffer == 0 { + log::error!("One or more communication buffers are not properly initialized"); + return Err(PolicyInitError::MissingCommunicationBuffer); + } + + // Store communication buffer configuration. + COMM_BUFFER_CONFIG.call_once(|| CommBufferConfig { + supv_comm_buffer, + supv_comm_buffer_internal, + supv_comm_buffer_size, + user_comm_buffer, + user_comm_buffer_internal, + user_comm_buffer_size, + status_buffer, + supv_to_user_buffer, + supv_to_user_buffer_size: UEFI_PAGE_SIZE as u64, + }); + log::info!( + "Comm buffers: supv=0x{:x}/0x{:x} size=0x{:x}, user=0x{:x}/0x{:x} size=0x{:x}, status=0x{:x}", + supv_comm_buffer, supv_comm_buffer_internal, supv_comm_buffer_size, + user_comm_buffer, user_comm_buffer_internal, user_comm_buffer_size, + status_buffer + ); + + + Ok(()) + } + + /// The main request serving loop for the BSP. + /// It manages other CPUs and processes pending requests from the communication buffer. + /// + /// This function reads the MM_COMM_BUFFER_STATUS structure to determine if there's a pending request + /// and whether it targets the Supervisor or User module. + /// + /// - If targeting User: copies user comm buffer to internal, then demotes to user entry point + /// - If targeting Supervisor: dispatches to the request dispatcher + fn bsp_request_loop(&self, cpu_index: usize) { + // Get communication buffer configuration + let config = match COMM_BUFFER_CONFIG.get() { + Some(c) => c, + None => { + // Not yet initialized, nothing to process + return; + } + }; + + // Check status buffer for pending request + if config.status_buffer == 0 { + return; + } + + // Read the MM_COMM_BUFFER_STATUS structure + // SAFETY: status_buffer is provided by MM IPL and is guaranteed valid + let status = unsafe { + core::ptr::read_volatile(config.status_buffer as *const MmCommBufferStatus) + }; + let target = RequestTarget::from(&status); + + log::trace!( + "Processing request: valid={}, talk_to_supervisor={}, target={:?}", + status.is_comm_buffer_valid, + status.talk_to_supervisor, + target + ); + + match target { + RequestTarget::None => { + // No pending request + } + RequestTarget::User => { + // Request targets the User module + self.process_user_request(config, &status, cpu_index); + } + RequestTarget::Supervisor => { + // Request targets the Supervisor + self.process_supervisor_request(config, &status, cpu_index); + } + } + } + + /// Process a request targeting the User module. + /// + /// This function implements the user-mode MMI dispatch pathway: + /// 1. Builds a fresh `EfiMmEntryContext` with the current CPU index and CPU count + /// 2. Copies the `EfiMmEntryContext` into the supervisor-to-user data buffer + /// 3. Appends the `MmCommBufferStatus` immediately after the context + /// 4. For synchronous MMIs, copies the user comm buffer to the internal copy + /// 5. Demotes to the user entry point via `invoke_demoted_routine` + /// 6. On return, copies back the user comm buffer and reads the updated status + fn process_user_request(&self, config: &CommBufferConfig, status: &MmCommBufferStatus, cpu_index: usize) { + log::trace!("Processing User request..."); + + // Validate buffers + if config.user_comm_buffer == 0 || config.user_comm_buffer_internal == 0 { + log::error!("User communication buffer not configured"); + return; + } + + if config.supv_to_user_buffer == 0 { + log::error!("Supervisor-to-user data buffer not configured"); + return; + } + + // Get user entry point + let user_entry = match USER_ENTRY_POINT.get() { + Some(&entry) if entry != 0 => entry, + _ => { + log::error!("User entry point not configured, cannot demote"); + return; + } + }; + + // Demote to user entry point to process the request + let cpl3_stack = match self.syscall_interface.get_cpl3_stack(cpu_index) { + Ok(stack) => stack, + Err(e) => { + log::error!("Failed to get CPL3 stack for CPU {}: {:?}", cpu_index, e); + return; + } + }; + + // Build a fresh EfiMmEntryContext with only the fields the user actually needs. + // The legacy C structure carried pointers (mm_startup_this_ap, cpu_save_state, + // cpu_save_state_size) that are meaningless in the Rust supervisor model — the + // user module accesses those services through syscalls instead. + let entry_context = EfiMmEntryContext { + mm_startup_this_ap: 0, + currently_executing_cpu: cpu_index as u64, + number_of_cpus: self.cpu_manager.registered_count() as u64, + cpu_save_state_size: 0, + cpu_save_state: 0, + }; + + // Copy the EfiMmEntryContext + MmCommBufferStatus into the supervisor-to-user + // data buffer so the user can read processor information after demotion. + let context_size = core::mem::size_of::(); + let status_size = core::mem::size_of::(); + + // Validate the supervisor-to-user buffer is large enough for context + status + if (config.supv_to_user_buffer_size as usize) < context_size + status_size { + log::error!( + "Supervisor-to-user buffer too small: {} < {} (context) + {} (status)", + config.supv_to_user_buffer_size, + context_size, + status_size + ); + return; + } + + // SAFETY: supv_to_user_buffer is valid and large enough, verified above. + unsafe { + // First disable SMAP to allow the supervisor to write to the user buffer + disable_smap(); + // Copy the EfiMmEntryContext to the start of the supervisor-to-user buffer + core::ptr::copy_nonoverlapping( + &entry_context as *const EfiMmEntryContext as *const u8, + config.supv_to_user_buffer as *mut u8, + context_size, + ); + + // Copy the MmCommBufferStatus right after the context + core::ptr::copy_nonoverlapping( + status as *const MmCommBufferStatus as *const u8, + (config.supv_to_user_buffer as *mut u8).add(context_size), + status_size, + ); + // Re-enable SMAP after writing to the user buffer + enable_smap(); + } + + // Determine whether this is synchronous or asynchronous request + let sync_mmi = status.is_comm_buffer_valid; + + if sync_mmi != 0 { + // Copy user buffer to user internal buffer for processing in Ring 3 + // SAFETY: Buffers are provided by MM IPL and are guaranteed valid + unsafe { + // Disable SMAP to allow copying from the user buffer + disable_smap(); + core::ptr::copy_nonoverlapping( + config.user_comm_buffer as *const u8, + config.user_comm_buffer_internal as *mut u8, + config.user_comm_buffer_size as usize, + ); + // Re-enable SMAP after copying from the user buffer + enable_smap(); + } + log::trace!( + "Copied {} bytes from user buffer 0x{:x} to internal 0x{:x}", + config.user_comm_buffer_size, + config.user_comm_buffer, + config.user_comm_buffer_internal + ); + } + + // Invoke the demoted user entry point with: + // arg1: UserCommandType::UserRequest (command type) + // arg2: supv_to_user_buffer (pointer to EfiMmEntryContext + MmCommBufferStatus) + // arg3: sizeof(EfiMmEntryContext) (size of the context portion) + let ret = unsafe { + invoke_demoted_routine( + cpu_index, + user_entry, + cpl3_stack, + 3, + UserCommandType::UserRequest as u64, + config.supv_to_user_buffer, + context_size as u64, + ) + }; + log::trace!("Returned from user request with value: 0x{}", ret); + + // Copy the response from the internal buffer back to the user buffer + // SAFETY: Buffers are provided by MM IPL and are guaranteed valid + if sync_mmi != 0 { + unsafe { + // Disable SMAP to allow copying back to the user buffer + disable_smap(); + core::ptr::copy_nonoverlapping( + config.user_comm_buffer_internal as *const u8, + config.user_comm_buffer as *mut u8, + config.user_comm_buffer_size as usize, + ); + // Re-enable SMAP after copying back to the user buffer + enable_smap(); + } + } + + // Read the updated MmCommBufferStatus back from the supervisor-to-user buffer + // (the user may have modified return_status and return_buffer_size) + // SAFETY: supv_to_user_buffer is valid and the status is at offset context_size + let returned_status = unsafe { + // Again, disable SMAP to allow reading from the user buffer + disable_smap(); + let status = core::ptr::read( + (config.supv_to_user_buffer as *const u8).add(context_size) as *const MmCommBufferStatus, + ); + // Re-enable SMAP after reading from the user buffer + enable_smap(); + status + }; + + // Write the returned status back to the supervisor's status buffer, clearing + // is_comm_buffer_valid to indicate processing is complete + // SAFETY: status_buffer is valid and writable + unsafe { + let status_ptr = config.status_buffer as *mut MmCommBufferStatus; + let mut final_status = returned_status; + final_status.is_comm_buffer_valid = 0; + core::ptr::write_volatile(status_ptr, final_status); + } + } + + /// Process a request targeting the Supervisor. + /// + /// Parses the [`EfiMmCommunicateHeader`] from the supervisor communication buffer, + /// matches the header GUID against the [`SUPERVISOR_MMI_HANDLERS`] distributed slice, + /// and invokes the first matching handler. Handlers are registered at build time, + /// allowing platforms to link in additional handlers without modifying the core. + /// + /// ## Dispatch Flow + /// + /// 1. Zero the internal buffer and copy the external supervisor buffer into it + /// 2. Parse the `EfiMmCommunicateHeader` (GUID + message length) from the internal buffer + /// 3. Validate message length does not exceed the buffer size + /// 4. Iterate [`SUPERVISOR_MMI_HANDLERS`] to find a handler matching the header GUID + /// 5. Call the handler with a pointer to the data payload and mutable size + /// 6. Update the status buffer with return status and total response size + /// 7. Copy the internal buffer back to the external buffer + fn process_supervisor_request(&self, config: &CommBufferConfig, status: &MmCommBufferStatus, cpu_index: usize) { + log::trace!("Processing Supervisor request on CPU {}...", cpu_index); + + // Validate buffers + if config.supv_comm_buffer == 0 || config.supv_comm_buffer_internal == 0 { + log::error!("Supervisor communication buffer not configured"); + return; + } + + let buffer_size = config.supv_comm_buffer_size as usize; + + // Zero the internal buffer then copy the external supervisor buffer into it + // SAFETY: Buffers are provided by MM IPL and are guaranteed valid and non-overlapping + unsafe { + core::ptr::write_bytes(config.supv_comm_buffer_internal as *mut u8, 0, buffer_size); + core::ptr::copy_nonoverlapping( + config.supv_comm_buffer as *const u8, + config.supv_comm_buffer_internal as *mut u8, + buffer_size, + ); + } + + // Parse the EfiMmCommunicateHeader from the internal buffer + if buffer_size < EfiMmCommunicateHeader::size() { + log::error!( + "Supervisor buffer too small for communicate header: {} < {}", + buffer_size, + EfiMmCommunicateHeader::size() + ); + self.write_supv_status(config, status, efi::Status::BAD_BUFFER_SIZE, 0); + return; + } + + // SAFETY: We verified the buffer is large enough for the header. + // The header is packed so we use read_unaligned. + let header = unsafe { + core::ptr::read_unaligned(config.supv_comm_buffer_internal as *const EfiMmCommunicateHeader) + }; + + let message_length = header.message_length(); + + // Validate message length doesn't exceed the buffer + if message_length > buffer_size.saturating_sub(EfiMmCommunicateHeader::size()) { + log::error!( + "Message length 0x{:x} exceeds available buffer space 0x{:x}", + message_length, + buffer_size - EfiMmCommunicateHeader::size() + ); + self.write_supv_status(config, status, efi::Status::BAD_BUFFER_SIZE, 0); + return; + } + + // Compute pointer to the data payload (after the header) + let data_ptr = unsafe { + (config.supv_comm_buffer_internal as *mut u8).add(EfiMmCommunicateHeader::size()) + }; + let mut data_size = message_length; + + // Dispatch: iterate the SUPERVISOR_MMI_HANDLERS distributed slice to find a match + let handler_guid = header.header_guid(); + let mut dispatch_status = efi::Status::NOT_FOUND; + + for handler in SUPERVISOR_MMI_HANDLERS.iter() { + if patina::Guid::from_ref(&handler.handler_guid) == handler_guid { + log::trace!( + "Dispatching supervisor request to handler '{}' (GUID: {:?})", + handler.name, + handler.handler_guid + ); + dispatch_status = (handler.handle)(data_ptr, &mut data_size); + break; + } + } + + if dispatch_status == efi::Status::NOT_FOUND { + log::warn!( + "No handler found for supervisor request GUID: {:?}", + handler_guid + ); + } + + // Compute the total response size (header + data) for the copy-back + let total_response_size = data_size + EfiMmCommunicateHeader::size(); + + // Copy the (possibly modified) internal buffer back to the external buffer + if total_response_size <= buffer_size { + // SAFETY: Both buffers are valid and total_response_size is within bounds + unsafe { + core::ptr::copy_nonoverlapping( + config.supv_comm_buffer_internal as *const u8, + config.supv_comm_buffer as *mut u8, + total_response_size, + ); + } + } else { + log::error!( + "Response size 0x{:x} exceeds buffer capacity 0x{:x}", + total_response_size, + buffer_size + ); + } + log::trace!( + "Copied {} bytes from internal buffer 0x{:x} back to external 0x{:x}", + total_response_size, + config.supv_comm_buffer_internal, + config.supv_comm_buffer + ); + + // Update the status buffer with return status and response size + let return_status = if dispatch_status == efi::Status::SUCCESS { + efi::Status::SUCCESS + } else { + efi::Status::NOT_FOUND + }; + self.write_supv_status(config, status, return_status, total_response_size as u64); + } + + /// Write the supervisor status buffer after processing a supervisor request. + /// + /// Clears `is_comm_buffer_valid` and `talk_to_supervisor`, sets return status and size. + fn write_supv_status( + &self, + config: &CommBufferConfig, + _status: &MmCommBufferStatus, + return_status: efi::Status, + return_buffer_size: u64, + ) { + // SAFETY: status_buffer is valid and writable, set up by MM IPL + unsafe { + let status_ptr = config.status_buffer as *mut MmCommBufferStatus; + let updated = MmCommBufferStatus { + is_comm_buffer_valid: 0, + talk_to_supervisor: 0, + _padding: [0; 6], + return_status: return_status.as_usize() as u64, + return_buffer_size, + }; + core::ptr::write_volatile(status_ptr, updated); + } + } + + /// The holding pen for APs. + /// + /// APs wait here, polling their mailbox for commands from the BSP. + /// The loop exits when the AP receives a `Return` command. + fn ap_holding_pen(&'static self, cpu_id: u32) { + log::trace!("AP (CPU {}) in holding pen, polling mailbox...", cpu_id); + + loop { + // Check mailbox for commands + if let Some(command) = self.mailbox_manager.check_mailbox(cpu_id) { + log::trace!("AP (CPU {}) received command: {:?}", cpu_id, command); + + // Execute the command + let response = self.execute_ap_command(cpu_id, &command); + + // Post the response + self.mailbox_manager.post_response(cpu_id, response); + + // Break out of the holding pen on Return + if matches!(command, ApCommand::Return) { + log::trace!("AP (CPU {}) exiting holding pen", cpu_id); + break; + } + } + } + } + + /// Execute a command received by an AP. + fn execute_ap_command(&self, cpu_id: u32, command: &ApCommand) -> ApResponse { + match *command { + ApCommand::RunProcedure { procedure, argument } => { + self.run_procedure_on_ap(cpu_id, procedure, argument) + } + ApCommand::Return => { + log::trace!("AP (CPU {}) received return command", cpu_id); + ApResponse::Success + } + } + } + + /// Run a procedure on an AP, demoting to user mode if the procedure is in user-owned range. + /// + /// This is the AP-side handler for `ApCommand::RunProcedure`. It mirrors the C + /// `ProcedureWrapper` logic: inspects the procedure pointer ownership and either + /// calls it directly (supervisor-owned) or demotes to Ring 3 (user-owned). + fn run_procedure_on_ap(&self, cpu_id: u32, procedure: u64, argument: u64) -> ApResponse { + log::trace!( + "AP (CPU {}) running procedure 0x{:x} with arg 0x{:x}", + cpu_id, procedure, argument + ); + + // Determine if the procedure is in user-owned (Ring 3) range by querying the + // page table via the centralized helper. + let is_user_range = match query_address_ownership(procedure, core::mem::size_of::() as u64) { + Some(PageOwnership::User) => true, + Some(PageOwnership::Supervisor) => false, + None => { + log::error!( + "AP (CPU {}) failed to query ownership for 0x{:x} (unmapped or page table not ready)", + cpu_id, procedure + ); + return ApResponse::Error(efi::Status::DEVICE_ERROR.as_usize() as u32); + } + }; + + if is_user_range { + // Resolve the cpu_index (slot index) for this APIC ID + let cpu_index = match self.cpu_manager.find_cpu_index(cpu_id) { + Some(idx) => idx, + None => { + log::error!("AP (CPU {}) has no registered slot, cannot demote", cpu_id); + return ApResponse::Error(efi::Status::DEVICE_ERROR.as_usize() as u32); + } + }; + + // Get the CPL3 stack for this CPU + let cpl3_stack = match self.syscall_interface.get_cpl3_stack(cpu_index) { + Ok(stack) => stack, + Err(e) => { + log::error!("AP (CPU {}) failed to get CPL3 stack: {:?}", cpu_id, e); + return ApResponse::Error(efi::Status::DEVICE_ERROR.as_usize() as u32); + } + }; + + let user_entry = match USER_ENTRY_POINT.get() { + Some(&entry) if entry != 0 => entry, + _ => { + log::error!("User entry point not configured, cannot demote AP (CPU {})", cpu_id); + return ApResponse::Error(efi::Status::DEVICE_ERROR.as_usize() as u32); + } + }; + + // Demote to user mode and call the procedure + // The procedure signature is: void (EFIAPI *)(void *ProcedureArgument) + log::trace!( + "AP (CPU {}) demoting to user: proc=0x{:x}, stack=0x{:x}, arg=0x{:x}", + cpu_id, procedure, cpl3_stack, argument + ); + + let _ret = unsafe { + invoke_demoted_routine( + cpu_index, + user_entry, + cpl3_stack, + 3, + UserCommandType::UserApProcedure as u64, + procedure, + argument, + ) + }; + + log::trace!("AP (CPU {}) returned from demoted procedure: 0x{:x}", cpu_id, _ret); + ApResponse::Success + } else { + // Supervisor-owned: call directly in Ring 0 + log::trace!("AP (CPU {}) calling supervisor procedure directly at 0x{:x}", cpu_id, procedure); + + // SAFETY: The BSP validated the procedure pointer before dispatching. + // The procedure follows the EFI AP_PROCEDURE calling convention. + type EfiApProcedure = unsafe extern "efiapi" fn(*mut core::ffi::c_void); + let proc_fn: EfiApProcedure = unsafe { core::mem::transmute(procedure) }; + unsafe { proc_fn(argument as *mut core::ffi::c_void) }; + + ApResponse::Success + } + } + + /// Type-erased trampoline for AP startup, called from the syscall dispatcher. + /// + /// This function is monomorphized for the concrete `P: PlatformInfo` type + /// and stored as a `fn(u64, u64, u64) -> u64` in [`AP_STARTUP_FN`]. + /// + /// # Arguments + /// + /// * `cpu_index` - The EFI processor index (slot index) of the target AP + /// * `procedure` - The procedure function pointer to execute on the AP + /// * `argument` - The argument to pass to the procedure + /// + /// # Returns + /// + /// 0 on success, or an EFI status code (as u64) on failure. + fn start_ap_procedure_trampoline(cpu_index: u64, procedure: u64, argument: u64) -> u64 { + let core = Self::instance(); + core.start_ap_procedure(cpu_index, procedure, argument) + } + + /// Validate and dispatch a procedure to a specific AP. + /// + /// Performs validation checks similar to the C `InternalSmmStartupThisAp`: + /// 1. CPU index is within range of registered CPUs + /// 2. CPU at that index is present (registered) + /// 3. CPU is not the BSP + /// 4. Procedure pointer is non-null + /// 5. Sends the command via the mailbox (fails if AP is busy) + /// 6. Waits for the AP to complete (blocking) + fn start_ap_procedure(&self, cpu_index: u64, procedure: u64, argument: u64) -> u64 { + let cpu_index = cpu_index as usize; + + // 1. Validate CPU index is within registered count + let registered = self.cpu_manager.registered_count(); + if cpu_index >= registered { + log::error!( + "START_AP: CpuIndex({}) >= registered_count({})", + cpu_index, registered + ); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + // 2. Look up the APIC ID for this index + let cpu_id = match self.cpu_manager.get_cpu_id_by_index(cpu_index) { + Some(id) => id, + None => { + log::error!("START_AP: CpuIndex({}) has no registered CPU", cpu_index); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + }; + + // 3. Check that the target is not the BSP + if self.cpu_manager.is_bsp(cpu_id) { + log::error!("START_AP: CpuIndex({}) is the BSP, cannot start as AP", cpu_index); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + // 4. Validate procedure pointer is non-null + if procedure == 0 { + log::error!("START_AP: Null procedure pointer"); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + // 5. Send the RunProcedure command to the AP via mailbox + // This will fail if the AP's mailbox is not empty (AP is busy). + let command = ApCommand::RunProcedure { procedure, argument }; + if let Err(()) = self.mailbox_manager.send_command(cpu_id, command) { + log::error!( + "START_AP: AP (CPU {}, index {}) is busy or mailbox unavailable", + cpu_id, cpu_index + ); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + log::trace!( + "START_AP: Dispatched proc=0x{:x} arg=0x{:x} to CPU {} (index {})", + procedure, argument, cpu_id, cpu_index + ); + + // 6. Wait for the AP to complete (blocking mode) + // Use a generous timeout (10 seconds = 10_000_000 microseconds) + const AP_TIMEOUT_US: u64 = 10_000_000; + match self.mailbox_manager.wait_response(cpu_id, AP_TIMEOUT_US) { + Some(ApResponse::Success) => { + log::trace!("START_AP: AP (CPU {}) completed successfully", cpu_id); + efi::Status::SUCCESS.as_usize() as u64 + } + Some(ApResponse::Error(code)) => { + log::error!("START_AP: AP (CPU {}) returned error: 0x{:x}", cpu_id, code); + code as u64 + } + Some(ApResponse::Busy) => { + log::error!("START_AP: AP (CPU {}) reported busy", cpu_id); + efi::Status::NOT_READY.as_usize() as u64 + } + Some(ApResponse::None) | None => { + log::error!("START_AP: AP (CPU {}) timed out or no response", cpu_id); + efi::Status::TIMEOUT.as_usize() as u64 + } + } + } + + /// Get the CPU manager. + pub fn cpu_manager(&self) -> &CpuManager<{ P::MAX_CPU_COUNT }> { + &self.cpu_manager + } + + /// Get the mailbox manager. + pub fn mailbox_manager(&self) -> &MailboxManager<{ P::MAX_CPU_COUNT }> { + &self.mailbox_manager + } + + /// Send a command to a specific AP. + /// + /// Returns `Ok(())` if the command was successfully posted to the mailbox, + /// or `Err(())` if the AP is not available or the mailbox is full. + pub fn send_ap_command(&self, cpu_id: u32, command: ApCommand) -> Result<(), ()> { + self.mailbox_manager.send_command(cpu_id, command) + } + + /// Wait for a response from a specific AP. + /// + /// Returns the response from the AP, or `None` if timeout or error. + pub fn wait_ap_response(&self, cpu_id: u32, timeout_us: u64) -> Option { + self.mailbox_manager.wait_response(cpu_id, timeout_us) + } + + /// Check if the supervisor has been initialized. + pub fn is_initialized(&self) -> bool { + self.initialized.load(Ordering::Acquire) + } +} + +impl Default for MmSupervisorCore

+where + [(); P::MAX_CPU_COUNT]:, +{ + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + struct TestPlatform; + + impl CpuInfo for TestPlatform {} + + impl PlatformInfo for TestPlatform { + type CpuInfo = Self; + const MAX_CPU_COUNT: usize = 4; + } + + #[test] + fn test_cpu_info_defaults() { + assert_eq!(::ap_poll_timeout_us(), 1000); + } + + #[test] + fn test_supervisor_creation() { + let _supervisor: MmSupervisorCore = MmSupervisorCore::new(); + // Just verify it compiles and creates without panic + } + + #[test] + fn test_supervisor_is_const() { + // Verify we can create a static instance (no heap allocation) + static _SUPERVISOR: MmSupervisorCore = MmSupervisorCore::new(); + } +} diff --git a/patina_mm_supervisor_core/src/mailbox.rs b/patina_mm_supervisor_core/src/mailbox.rs new file mode 100644 index 000000000..43ca4a65e --- /dev/null +++ b/patina_mm_supervisor_core/src/mailbox.rs @@ -0,0 +1,645 @@ +//! Mailbox Module +//! +//! This module provides the mailbox infrastructure for BSP-AP communication. +//! Each AP has a dedicated mailbox that the BSP uses to send commands and receive responses. +//! +//! ## Architecture +//! +//! The mailbox system uses a simple producer-consumer model: +//! - BSP writes commands to AP mailboxes +//! - APs poll their mailboxes for commands +//! - APs write responses back +//! - BSP reads responses when ready +//! +//! ## Memory Model +//! +//! This module does not perform heap allocation. All structures use fixed-size arrays +//! with compile-time constants provided via const generics. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::sync::atomic::{AtomicU32, AtomicU64, Ordering}; + +use crate::perf_timer; + +/// Commands that can be sent from BSP to APs via the mailbox. +/// +/// APs sit in a holding pen polling for commands. When no command is pending +/// the AP simply keeps spinning - there is no explicit "no-op" variant. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ApCommand { + /// Run a procedure on the AP, with potential demotion to user mode. + /// + /// The AP checks buffer ownership and demotes to Ring 3 if the procedure + /// lives in user-owned memory, otherwise calls it directly in Ring 0. + RunProcedure { + /// The procedure function pointer. + procedure: u64, + /// The argument to pass to the procedure. + argument: u64, + }, + /// Exit the holding pen and return to the caller. + Return, +} + +impl ApCommand { + /// Converts the command to a u64 tag for atomic storage. + fn to_u64(self) -> u64 { + match self { + ApCommand::RunProcedure { .. } => 1, + ApCommand::Return => 2, + } + } + + /// Converts a u64 tag back to a command. + fn from_u64(tag: u64, procedure: u64, argument: u64) -> Option { + match tag & 0xFF { + 1 => Some(ApCommand::RunProcedure { procedure, argument }), + 2 => Some(ApCommand::Return), + _ => None, + } + } +} + +/// Responses from APs to the BSP. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ApResponse { + /// No response yet (mailbox empty). + None, + /// Command executed successfully. + Success, + /// Command failed with an error code. + Error(u32), + /// AP is busy and cannot accept commands. + Busy, +} + +impl ApResponse { + /// Converts the response to a u64 for atomic storage. + fn to_u64(self) -> u64 { + match self { + ApResponse::None => 0, + ApResponse::Success => 1, + ApResponse::Error(code) => 2 | ((code as u64) << 32), + ApResponse::Busy => 3, + } + } + + /// Converts a u64 back to a response. + fn from_u64(value: u64) -> Self { + let resp_type = value & 0xFF; + match resp_type { + 0 => ApResponse::None, + 1 => ApResponse::Success, + 2 => { + let code = (value >> 32) as u32; + ApResponse::Error(code) + } + 3 => ApResponse::Busy, + _ => ApResponse::None, + } + } +} + +/// Mailbox state flags. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u32)] +enum MailboxState { + /// Mailbox is empty, no pending command. + Empty = 0, + /// Mailbox has a command waiting to be processed. + CommandPending = 1, + /// Command is being processed. + Processing = 2, + /// Response is ready for BSP to read. + ResponseReady = 3, +} + +impl From for MailboxState { + fn from(value: u32) -> Self { + match value { + 0 => MailboxState::Empty, + 1 => MailboxState::CommandPending, + 2 => MailboxState::Processing, + 3 => MailboxState::ResponseReady, + _ => MailboxState::Empty, + } + } +} + +/// A single AP's mailbox for communication with the BSP. +#[repr(C, align(64))] // Cache-line aligned to avoid false sharing +pub struct ApMailbox { + /// Current state of the mailbox. + state: AtomicU32, + /// The command tag (discriminant packed into u64). + command: AtomicU64, + /// The procedure function pointer (for RunProcedure). + procedure: AtomicU64, + /// The argument to pass to the procedure (for RunProcedure). + argument: AtomicU64, + /// The response data (packed into u64). + response: AtomicU64, + /// The CPU ID this mailbox is assigned to (u32::MAX = unassigned). + assigned_cpu: AtomicU32, + /// Padding to ensure cache-line alignment. + _padding: [u8; 12], +} + +impl ApMailbox { + /// Creates a new empty mailbox. + pub const fn new() -> Self { + Self { + state: AtomicU32::new(MailboxState::Empty as u32), + command: AtomicU64::new(0), + procedure: AtomicU64::new(0), + argument: AtomicU64::new(0), + response: AtomicU64::new(0), + assigned_cpu: AtomicU32::new(u32::MAX), + _padding: [0; 12], + } + } + + /// Gets the current state of the mailbox. + fn state(&self) -> MailboxState { + self.state.load(Ordering::Acquire).into() + } + + /// Gets the assigned CPU ID, if any. + pub fn assigned_cpu(&self) -> Option { + let cpu = self.assigned_cpu.load(Ordering::Acquire); + if cpu == u32::MAX { + None + } else { + Some(cpu) + } + } + + /// Assigns this mailbox to a CPU. + /// + /// Returns true if assignment succeeded, false if already assigned. + fn assign(&self, cpu_id: u32) -> bool { + self.assigned_cpu + .compare_exchange(u32::MAX, cpu_id, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + } + + /// Checks if a command is pending (called by AP). + pub fn has_pending_command(&self) -> bool { + self.state() == MailboxState::CommandPending + } + + /// Gets the pending command (called by AP). + /// + /// Returns `Some(command)` if a command is pending, `None` otherwise. + /// This also transitions the mailbox to the Processing state. + pub fn take_command(&self) -> Option { + // Try to transition from CommandPending to Processing + let result = self.state.compare_exchange( + MailboxState::CommandPending as u32, + MailboxState::Processing as u32, + Ordering::AcqRel, + Ordering::Acquire, + ); + + if result.is_ok() { + let tag = self.command.load(Ordering::Acquire); + let proc = self.procedure.load(Ordering::Acquire); + let arg = self.argument.load(Ordering::Acquire); + ApCommand::from_u64(tag, proc, arg) + } else { + None + } + } + + /// Posts a response (called by AP). + pub fn post_response(&self, response: ApResponse) { + self.response.store(response.to_u64(), Ordering::Release); + self.state.store(MailboxState::ResponseReady as u32, Ordering::Release); + } + + /// Sends a command to this mailbox (called by BSP). + /// + /// Returns `true` if the command was successfully posted, `false` if the mailbox is busy. + /// + /// The payload (command tag, procedure, argument) is written first with `Relaxed` + /// ordering, then `state` is set to `CommandPending` with `Release` ordering. + /// The AP acquires `state`, which guarantees it sees the fully-written payload. + pub fn send_command(&self, command: ApCommand) -> bool { + // Only allow sending if mailbox is empty + let result = self.state.compare_exchange( + MailboxState::Empty as u32, + MailboxState::Empty as u32, // keep Empty while we fill the payload + Ordering::AcqRel, + Ordering::Acquire, + ); + + if result.is_ok() { + // Write all payload fields before publishing. + // Relaxed is fine here — the Release store to `state` below + // will fence all prior writes. + match command { + ApCommand::RunProcedure { procedure, argument } => { + self.procedure.store(procedure, Ordering::Relaxed); + self.argument.store(argument, Ordering::Relaxed); + } + ApCommand::Return => { + self.procedure.store(0, Ordering::Relaxed); + self.argument.store(0, Ordering::Relaxed); + } + } + self.command.store(command.to_u64(), Ordering::Relaxed); + + // Publish: the AP polls on `state` with Acquire, so this + // Release ensures it sees the payload written above. + self.state.store(MailboxState::CommandPending as u32, Ordering::Release); + true + } else { + false + } + } + + /// Gets the response from this mailbox (called by BSP). + /// + /// Returns the response and clears the mailbox if a response is ready. + pub fn get_response(&self) -> Option { + // Only read if response is ready + let result = self.state.compare_exchange( + MailboxState::ResponseReady as u32, + MailboxState::Empty as u32, + Ordering::AcqRel, + Ordering::Acquire, + ); + + if result.is_ok() { + let resp = self.response.load(Ordering::Acquire); + Some(ApResponse::from_u64(resp)) + } else { + None + } + } + + /// Spins until the mailbox reaches the `Empty` state, draining any pending response. + /// + /// This is analogous to the C code's `WaitForAllAPsNotBusy(TRUE)` which + /// acquires+releases each AP's Busy spinlock, blocking until the AP is done. + /// + /// If the mailbox is in `ResponseReady`, the response is consumed to transition + /// it back to `Empty`. If it is in `CommandPending` or `Processing`, this spins + /// until the AP finishes and posts a response (which is then drained). + fn drain_to_empty(&self) { + loop { + match self.state() { + MailboxState::Empty => return, + MailboxState::ResponseReady => { + // Consume the response to transition back to Empty. + let _ = self.get_response(); + } + _ => { + // CommandPending or Processing — AP is still working. + core::hint::spin_loop(); + } + } + } + } + + /// Checks if the mailbox is empty (no pending work). + pub fn is_empty(&self) -> bool { + self.state() == MailboxState::Empty + } + + /// Checks if a response is ready. + pub fn has_response(&self) -> bool { + self.state() == MailboxState::ResponseReady + } +} + +impl Default for ApMailbox { + fn default() -> Self { + Self::new() + } +} + +/// Manager for all AP mailboxes. +/// +/// Uses fixed-size arrays with const generic for maximum AP count. +/// +/// ## Const Generic Parameters +/// +/// * `MAX_APS` - The maximum number of APs that can be managed. +pub struct MailboxManager { + /// Mailboxes - fixed size array. + mailboxes: [ApMailbox; MAX_APS], + /// Number of assigned mailboxes. + assigned_count: AtomicU32, +} + +impl MailboxManager { + /// Creates a new mailbox manager. + /// + /// This is a const fn and performs no heap allocation. + pub const fn new() -> Self { + Self { + mailboxes: [const { ApMailbox::new() }; MAX_APS], + assigned_count: AtomicU32::new(0), + } + } + + /// Finds or allocates a mailbox for the specified CPU ID. + fn get_or_assign_mailbox(&self, cpu_id: u32) -> Option<&ApMailbox> { + // First, check if already assigned + for mailbox in &self.mailboxes { + if mailbox.assigned_cpu() == Some(cpu_id) { + return Some(mailbox); + } + } + + // Find an unassigned mailbox + for mailbox in &self.mailboxes { + if mailbox.assign(cpu_id) { + self.assigned_count.fetch_add(1, Ordering::SeqCst); + log::trace!("Assigned mailbox to CPU {}", cpu_id); + return Some(mailbox); + } + } + + log::warn!("No available mailbox for CPU {}", cpu_id); + None + } + + /// Gets the mailbox for the specified CPU ID. + fn get_mailbox(&self, cpu_id: u32) -> Option<&ApMailbox> { + for mailbox in &self.mailboxes { + if mailbox.assigned_cpu() == Some(cpu_id) { + return Some(mailbox); + } + } + None + } + + /// Sends a command to a specific AP. + pub fn send_command(&self, cpu_id: u32, command: ApCommand) -> Result<(), ()> { + let mailbox = self.get_or_assign_mailbox(cpu_id).ok_or(())?; + if mailbox.send_command(command) { + Ok(()) + } else { + Err(()) + } + } + + /// Checks for a pending command (called by AP). + pub fn check_mailbox(&self, cpu_id: u32) -> Option { + let mailbox = self.get_or_assign_mailbox(cpu_id)?; + mailbox.take_command() + } + + /// Posts a response (called by AP). + pub fn post_response(&self, cpu_id: u32, response: ApResponse) { + if let Some(mailbox) = self.get_mailbox(cpu_id) { + mailbox.post_response(response); + } + } + + /// Waits for a response from an AP with timeout. + /// + /// Returns the response, or `None` if timeout. + pub fn wait_response(&self, cpu_id: u32, timeout_us: u64) -> Option { + let mailbox = self.get_mailbox(cpu_id)?; + let mut result = None; + + perf_timer::spin_until(timeout_us, || { + if let Some(response) = mailbox.get_response() { + result = Some(response); + true + } else { + false + } + }); + + result + } + + /// Broadcasts a command to all assigned APs. + /// + /// For each assigned mailbox, this first drains any pending response + /// (spinning until the mailbox is `Empty`), then sends the command. + /// This mirrors the C code's `WaitForAllAPsNotBusy(TRUE)` followed + /// by `ReleaseAllAPs()`, ensuring no AP is ever skipped. + /// + /// Returns the number of APs that received the command. + pub fn broadcast_command(&self, command: ApCommand) -> usize { + let mut success_count = 0; + + for mailbox in &self.mailboxes { + if let Some(cpu_id) = mailbox.assigned_cpu() { + // Drain any in-flight work so the mailbox is Empty. + mailbox.drain_to_empty(); + + // Mailbox is now guaranteed Empty — send_command must succeed. + let sent = mailbox.send_command(command); + debug_assert!(sent, "send_command failed after drain_to_empty for CPU {}", cpu_id); + success_count += 1; + log::trace!("Broadcast command to CPU {}", cpu_id); + } + } + + success_count + } + + /// Waits for all assigned APs to post responses, with a timeout. + /// + /// Returns the number of APs that responded within the timeout. + pub fn wait_all_responses(&self, timeout_us: u64) -> usize { + let total = self.assigned_count(); + + perf_timer::spin_until(timeout_us, || { + let mut responded = 0; + for mailbox in &self.mailboxes { + if mailbox.assigned_cpu().is_some() { + // Count APs that have already been consumed (Empty) or have response ready + if mailbox.is_empty() || mailbox.has_response() { + responded += 1; + } + } + } + responded >= total + }); + + // Drain all pending responses and count + let mut responded = 0; + for mailbox in &self.mailboxes { + if mailbox.assigned_cpu().is_some() { + if mailbox.has_response() { + let _ = mailbox.get_response(); + responded += 1; + } else if mailbox.is_empty() { + responded += 1; + } + } + } + + responded + } + + /// Gets the number of assigned mailboxes. + pub fn assigned_count(&self) -> usize { + self.assigned_count.load(Ordering::SeqCst) as usize + } + + /// Gets the maximum number of mailboxes. + pub const fn max_mailboxes(&self) -> usize { + MAX_APS + } + + /// Iterates over assigned mailboxes, calling the closure for each. + pub fn for_each_assigned(&self, mut f: F) { + for mailbox in &self.mailboxes { + if let Some(cpu_id) = mailbox.assigned_cpu() { + f(cpu_id, mailbox); + } + } + } +} + +impl Default for MailboxManager { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_mailbox_creation() { + let mailbox = ApMailbox::new(); + assert!(mailbox.is_empty()); + assert!(!mailbox.has_pending_command()); + assert!(!mailbox.has_response()); + assert!(mailbox.assigned_cpu().is_none()); + } + + #[test] + fn test_mailbox_is_const() { + // Verify we can create a static mailbox + static _MAILBOX: ApMailbox = ApMailbox::new(); + } + + #[test] + fn test_command_send_receive() { + let mailbox = ApMailbox::new(); + mailbox.assign(1); + + // Send a command + assert!(mailbox.send_command(ApCommand::Return)); + assert!(mailbox.has_pending_command()); + + // Cannot send another while one is pending + assert!(!mailbox.send_command(ApCommand::Return)); + + // Take the command + let cmd = mailbox.take_command(); + assert_eq!(cmd, Some(ApCommand::Return)); + assert!(!mailbox.has_pending_command()); + + // Post response + mailbox.post_response(ApResponse::Success); + assert!(mailbox.has_response()); + + // Get response + let resp = mailbox.get_response(); + assert_eq!(resp, Some(ApResponse::Success)); + assert!(mailbox.is_empty()); + } + + #[test] + fn test_run_procedure_command() { + let mailbox = ApMailbox::new(); + mailbox.assign(1); + + let cmd = ApCommand::RunProcedure { + procedure: 0xDEAD_BEEF, + argument: 0x12345678, + }; + + assert!(mailbox.send_command(cmd)); + let received = mailbox.take_command(); + assert!(matches!( + received, + Some(ApCommand::RunProcedure { procedure: 0xDEAD_BEEF, argument: 0x12345678 }) + )); + } + + #[test] + fn test_mailbox_manager_is_const() { + // Verify we can create a static manager + static _MANAGER: MailboxManager<8> = MailboxManager::new(); + } + + #[test] + fn test_mailbox_manager() { + let manager: MailboxManager<4> = MailboxManager::new(); + + // Send command (implicitly assigns mailbox) + assert!(manager.send_command(1, ApCommand::Return).is_ok()); + assert_eq!(manager.assigned_count(), 1); + + // Check mailbox + let cmd = manager.check_mailbox(1); + assert_eq!(cmd, Some(ApCommand::Return)); + + // Post response + manager.post_response(1, ApResponse::Success); + + // Wait for response + let resp = manager.wait_response(1, 1000); + assert_eq!(resp, Some(ApResponse::Success)); + } + + #[test] + fn test_broadcast() { + let manager: MailboxManager<4> = MailboxManager::new(); + + // Assign mailboxes for multiple APs + manager.send_command(1, ApCommand::Return).ok(); + manager.check_mailbox(1); // Clear command + manager.post_response(1, ApResponse::Success); + manager.wait_response(1, 1); + + manager.send_command(2, ApCommand::Return).ok(); + manager.check_mailbox(2); + manager.post_response(2, ApResponse::Success); + manager.wait_response(2, 1); + + manager.send_command(3, ApCommand::Return).ok(); + manager.check_mailbox(3); + manager.post_response(3, ApResponse::Success); + manager.wait_response(3, 1); + + // Broadcast + let count = manager.broadcast_command(ApCommand::Return); + assert_eq!(count, 3); + } + + #[test] + fn test_response_encoding() { + let responses = [ + ApResponse::None, + ApResponse::Success, + ApResponse::Error(42), + ApResponse::Busy, + ]; + + for resp in responses { + let encoded = resp.to_u64(); + let decoded = ApResponse::from_u64(encoded); + assert_eq!(resp, decoded); + } + } +} diff --git a/patina_mm_supervisor_core/src/mm_mem.rs b/patina_mm_supervisor_core/src/mm_mem.rs new file mode 100644 index 000000000..fbe23ca0b --- /dev/null +++ b/patina_mm_supervisor_core/src/mm_mem.rs @@ -0,0 +1,1016 @@ +//! MM Supervisor Core Page and Pool Allocators +//! +//! Provides a page-granularity memory allocator and a pool allocator for the MM Supervisor Core. +//! +//! ## Page Allocator +//! +//! When the one-time initialization routine is called, it will mark the blocks reported under +//! `gEfiSmmSmramMemoryGuid` or `gEfiMmPeiMmramMemoryReserveGuid` in the HOB list accordingly. +//! Blocks that have the `EFI_ALLOCATED` bit set in the `RegionState` field will be marked as allocated, +//! indicating they are in use. All other blocks will be marked as free. +//! +//! The page allocator is fully dynamic: +//! - No fixed limit on number of SMRAM regions +//! - No fixed limit on pages per region (supports up to 4GB per region) +//! - Bookkeeping is stored in SMRAM itself +//! +//! The page allocator provides: +//! - `allocate_pages(num_pages)` - Allocate contiguous pages +//! - `free_pages(addr, num_pages)` - Free previously allocated pages +//! +//! ## Pool Allocator +//! +//! Built on top of the page allocator, the pool allocator provides smaller-granularity allocations. +//! It allocates pages from the page allocator and subdivides them for pool allocations. +//! When a pool page is exhausted, more pages are allocated as needed. +//! +//! The pool allocator implements the `GlobalAlloc` trait for use as a global allocator. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::{ + cell::UnsafeCell, + ffi::c_void, + mem::size_of, + ptr, + slice, + sync::atomic::{AtomicBool, Ordering}, +}; + +use patina::pi::hob::{Hob, PhaseHandoffInformationTable}; +use patina::base::UEFI_PAGE_SIZE; +use r_efi::efi; +use spin::Mutex; +use patina_paging::{MemoryAttributes, PageTable}; + +// ============================================================================ +// Constants +// ============================================================================ + +/// Errors that can occur during page allocation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PageAllocError { + /// The allocator has not been initialized. + NotInitialized, + /// No free pages available to satisfy the request. + OutOfMemory, + /// The requested address is not aligned to page boundary. + NotAligned, + /// The address is not within any known SMRAM region. + InvalidAddress, + /// The address was not previously allocated. + NotAllocated, + /// Too many regions to track. + TooManyRegions, +} + +/// Bits per byte. +const BITS_PER_BYTE: usize = 8; + +/// EFI_ALLOCATED bit in RegionState. +pub const EFI_ALLOCATED: u64 = 0x0000000000000010; + +// GUID for gEfiSmmSmramMemoryGuid +// { 0x6dadf1d1, 0xd4cc, 0x4910, { 0xbb, 0x6e, 0x82, 0xb1, 0xfd, 0x80, 0xff, 0x3d }} +pub const SMM_SMRAM_MEMORY_GUID: efi::Guid = efi::Guid::from_fields( + 0x6dadf1d1, + 0xd4cc, + 0x4910, + 0xbb, + 0x6e, + &[0x82, 0xb1, 0xfd, 0x80, 0xff, 0x3d], +); + +// GUID for gEfiMmPeiMmramMemoryReserveGuid +// { 0x0703f912, 0xbf8d, 0x4e2a, { 0xbe, 0x07, 0xab, 0x27, 0x25, 0x25, 0xc5, 0x92 }} +pub const MM_PEI_MMRAM_MEMORY_RESERVE_GUID: efi::Guid = efi::Guid::from_fields( + 0x0703f912, + 0xbf8d, + 0x4e2a, + 0xbe, + 0x07, + &[0xab, 0x27, 0x25, 0x25, 0xc5, 0x92], +); + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Type of memory allocation - distinguishes supervisor-internal vs user/driver allocations. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u8)] +pub enum AllocationType { + /// Supervisor-internal allocation (e.g., for core data structures). + /// These are typically never freed and may have stricter protections. + Supervisor = 0, + /// User/driver allocation (e.g., for MM driver requests). + /// These can be allocated and freed by external code. + User = 1, +} + +// ============================================================================ +// SMRAM Descriptor (matches EFI_SMRAM_DESCRIPTOR) +// ============================================================================ + +/// SMRAM descriptor structure matching EFI_SMRAM_DESCRIPTOR. +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct SmramDescriptor { + /// Physical start address of the SMRAM region. + pub physical_start: efi::PhysicalAddress, + /// CPU start address (may differ from physical for remapping). + pub cpu_start: efi::PhysicalAddress, + /// Size of the SMRAM region in bytes. + pub physical_size: u64, + /// Region state flags (EFI_ALLOCATED, etc.). + pub region_state: u64, +} + +// ============================================================================ +// SMRAM Reserve HOB structure +// ============================================================================ + +/// SMRAM reserve descriptor count structure. +/// This is the data that immediately follows a GuidHob with SMM_SMRAM_MEMORY_GUID +/// or MM_PEI_MMRAM_MEMORY_RESERVE_GUID. +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct SmramReserveHobData { + /// Number of SMRAM descriptors that follow. + pub number_of_smram_regions: u32, + /// Reserved for alignment. + pub reserved: u32, + // SmramDescriptor array follows immediately after +} + +// ============================================================================ +// Memory Region Tracking (Dynamic) +// ============================================================================ + +/// Metadata for a single SMRAM region. +/// This struct is stored in the bookkeeping pages, not statically. +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct RegionInfo { + /// Base physical address of the region. + pub base: u64, + /// Total number of pages in this region. + pub total_pages: usize, + /// Starting bit index in the global allocation bitmap. + pub bitmap_start_bit: usize, +} + +/// Internal state for the page allocator, stored in bookkeeping pages. +#[repr(C)] +struct AllocatorState { + /// Number of regions. + region_count: usize, + /// Total number of pages across all regions. + total_pages: usize, + /// Number of pages used for bookkeeping. + bookkeeping_pages: usize, + /// Base address of bookkeeping memory. + bookkeeping_base: u64, + // Followed by: + // - RegionInfo array (region_count entries) + // - Allocation bitmap (total_pages bits, rounded up to bytes) + // - Type bitmap (total_pages bits, rounded up to bytes) +} + +// ============================================================================ +// Page Allocator (Dynamic) +// ============================================================================ + +/// Page-granularity allocator for SMRAM memory. +/// +/// This allocator is fully dynamic: +/// - No fixed limit on number of SMRAM regions +/// - No fixed limit on pages per region (supports up to 4GB per region) +/// - Bookkeeping data structures are allocated from SMRAM itself +/// +/// ## Initialization +/// +/// During initialization, the allocator: +/// 1. Scans HOBs to count regions and total pages +/// 2. Calculates bookkeeping space needed +/// 3. Reserves pages from the first available region for bookkeeping +/// 4. Initializes bitmaps in the reserved pages +pub struct PageAllocator { + /// Pointer to the allocator state (stored in SMRAM). + state: UnsafeCell<*mut AllocatorState>, + /// Lock for thread safety. + lock: Mutex<()>, + /// Whether the allocator has been initialized. + initialized: AtomicBool, +} + +// SAFETY: The PageAllocator uses internal locking for thread safety. +unsafe impl Send for PageAllocator {} +unsafe impl Sync for PageAllocator {} + +impl PageAllocator { + /// Creates a new uninitialized page allocator. + pub const fn new() -> Self { + Self { + state: UnsafeCell::new(ptr::null_mut()), + lock: Mutex::new(()), + initialized: AtomicBool::new(false), + } + } + + /// Calculates the number of pages needed for bookkeeping. + /// + /// Bookkeeping includes: + /// - AllocatorState header + /// - RegionInfo array + /// - Allocation bitmap (1 bit per page) + /// - Type bitmap (1 bit per page) + fn calculate_bookkeeping_pages(region_count: usize, total_pages: usize) -> usize { + let header_size = size_of::(); + let regions_size = region_count * size_of::(); + let bitmap_bytes = (total_pages + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + let total_bytes = header_size + regions_size + bitmap_bytes * 2; // alloc + type bitmaps + (total_bytes + UEFI_PAGE_SIZE - 1) / UEFI_PAGE_SIZE + } + + /// Gets the regions array from the state. + unsafe fn get_regions(&self) -> &[RegionInfo] { + unsafe { + let state = *self.state.get(); + if state.is_null() { + return &[]; + } + let region_count = (*state).region_count; + let regions_ptr = (state as *const u8).add(size_of::()) as *const RegionInfo; + slice::from_raw_parts(regions_ptr, region_count) + } + } + + /// Gets the regions array mutably from the state. + unsafe fn get_regions_mut(&self) -> &mut [RegionInfo] { + unsafe { + let state = *self.state.get(); + if state.is_null() { + return &mut []; + } + let region_count = (*state).region_count; + let regions_ptr = (state as *mut u8).add(size_of::()) as *mut RegionInfo; + slice::from_raw_parts_mut(regions_ptr, region_count) + } + } + + /// Gets the allocation bitmap from the state. + unsafe fn get_alloc_bitmap(&self) -> &[u8] { + unsafe { + let state = *self.state.get(); + if state.is_null() { + return &[]; + } + let region_count = (*state).region_count; + let total_pages = (*state).total_pages; + let bitmap_bytes = (total_pages + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + let bitmap_ptr = (state as *const u8) + .add(size_of::()) + .add(region_count * size_of::()); + slice::from_raw_parts(bitmap_ptr, bitmap_bytes) + } + } + + /// Gets the allocation bitmap mutably from the state. + unsafe fn get_alloc_bitmap_mut(&self) -> &mut [u8] { + unsafe { + let state = *self.state.get(); + if state.is_null() { + return &mut []; + } + let region_count = (*state).region_count; + let total_pages = (*state).total_pages; + let bitmap_bytes = (total_pages + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + let bitmap_ptr = (state as *mut u8) + .add(size_of::()) + .add(region_count * size_of::()); + slice::from_raw_parts_mut(bitmap_ptr, bitmap_bytes) + } + } + + /// Gets the type bitmap from the state. + unsafe fn get_type_bitmap(&self) -> &[u8] { + unsafe { + let state = *self.state.get(); + if state.is_null() { + return &[]; + } + let region_count = (*state).region_count; + let total_pages = (*state).total_pages; + let bitmap_bytes = (total_pages + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + let type_bitmap_ptr = (state as *const u8) + .add(size_of::()) + .add(region_count * size_of::()) + .add(bitmap_bytes); + slice::from_raw_parts(type_bitmap_ptr, bitmap_bytes) + } + } + + /// Gets the type bitmap mutably from the state. + unsafe fn get_type_bitmap_mut(&self) -> &mut [u8] { + unsafe { + let state = *self.state.get(); + if state.is_null() { + return &mut []; + } + let region_count = (*state).region_count; + let total_pages = (*state).total_pages; + let bitmap_bytes = (total_pages + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + let type_bitmap_ptr = (state as *mut u8) + .add(size_of::()) + .add(region_count * size_of::()) + .add(bitmap_bytes); + slice::from_raw_parts_mut(type_bitmap_ptr, bitmap_bytes) + } + } + + /// Checks if a global bit index is allocated. + unsafe fn is_bit_allocated(&self, bit_index: usize) -> bool { + unsafe { + let bitmap = self.get_alloc_bitmap(); + let byte_index = bit_index / BITS_PER_BYTE; + let bit_offset = bit_index % BITS_PER_BYTE; + if byte_index >= bitmap.len() { + return true; // Out of bounds = allocated + } + (bitmap[byte_index] & (1 << bit_offset)) != 0 + } + } + + /// Gets the allocation type for a global bit index. + unsafe fn get_bit_type(&self, bit_index: usize) -> AllocationType { + unsafe { + let bitmap = self.get_type_bitmap(); + let byte_index = bit_index / BITS_PER_BYTE; + let bit_offset = bit_index % BITS_PER_BYTE; + if byte_index >= bitmap.len() { + return AllocationType::Supervisor; + } + if (bitmap[byte_index] & (1 << bit_offset)) != 0 { + AllocationType::User + } else { + AllocationType::Supervisor + } + } + } + + /// Sets a bit as allocated with the given type. + unsafe fn set_bit_allocated(&self, bit_index: usize, alloc_type: AllocationType) { + unsafe { + let alloc_bitmap = self.get_alloc_bitmap_mut(); + let type_bitmap = self.get_type_bitmap_mut(); + let byte_index = bit_index / BITS_PER_BYTE; + let bit_offset = bit_index % BITS_PER_BYTE; + + if byte_index < alloc_bitmap.len() { + alloc_bitmap[byte_index] |= 1 << bit_offset; + match alloc_type { + AllocationType::User => { + type_bitmap[byte_index] |= 1 << bit_offset; + } + AllocationType::Supervisor => { + type_bitmap[byte_index] &= !(1 << bit_offset); + } + } + } + } + } + + /// Clears a bit (marks as free). + unsafe fn set_bit_free(&self, bit_index: usize) { + unsafe { + let alloc_bitmap = self.get_alloc_bitmap_mut(); + let type_bitmap = self.get_type_bitmap_mut(); + let byte_index = bit_index / BITS_PER_BYTE; + let bit_offset = bit_index % BITS_PER_BYTE; + + if byte_index < alloc_bitmap.len() { + alloc_bitmap[byte_index] &= !(1 << bit_offset); + type_bitmap[byte_index] &= !(1 << bit_offset); + } + } + } + + /// Finds which region contains an address and returns (region_index, page_index_in_region). + unsafe fn find_region_for_address(&self, addr: u64) -> Option<(usize, usize)> { + unsafe { + let regions = self.get_regions(); + for (i, region) in regions.iter().enumerate() { + let region_end = region.base + (region.total_pages as u64 * UEFI_PAGE_SIZE as u64); + if addr >= region.base && addr < region_end { + let page_in_region = ((addr - region.base) / UEFI_PAGE_SIZE as u64) as usize; + return Some((i, page_in_region)); + } + } + None + } + } + + /// Converts a region index and page-in-region to a global bit index. + unsafe fn region_page_to_bit(&self, region_index: usize, page_in_region: usize) -> usize { + unsafe { + let regions = self.get_regions(); + if region_index < regions.len() { + regions[region_index].bitmap_start_bit + page_in_region + } else { + 0 + } + } + } + + /// Initializes the page allocator from the HOB list. + /// + /// This function: + /// 1. Scans HOBs to count regions and total pages + /// 2. Finds the first non-allocated region for bookkeeping + /// 3. Reserves pages for bookkeeping structures + /// 4. Initializes the bitmaps + /// + /// # Safety + /// + /// The caller must ensure that `hob_list` points to a valid HOB list. + pub unsafe fn init_from_hob_list(&self, hob_list: *const c_void) -> Result<(), PageAllocError> { + if hob_list.is_null() { + return Err(PageAllocError::NotInitialized); + } + + let _guard = self.lock.lock(); + + // Get the HOB list iterator + let hob_list_info = unsafe { + (hob_list as *const PhaseHandoffInformationTable) + .as_ref() + .ok_or(PageAllocError::NotInitialized)? + }; + + let hob = Hob::Handoff(hob_list_info); + + // First pass: count regions and total pages + let mut region_count = 0usize; + let mut total_pages = 0usize; + let mut first_free_region_base: Option = None; + let mut first_free_region_size: u64 = 0; + + // Temporary storage for region info (we'll copy to SMRAM later) + // Using a reasonable stack limit - actual regions stored in SMRAM + const MAX_TEMP_REGIONS: usize = 256; + let mut temp_regions: [(u64, u64, bool); MAX_TEMP_REGIONS] = [(0, 0, false); MAX_TEMP_REGIONS]; + + for current_hob in &hob { + if let Hob::GuidHob(guid_hob, data) = current_hob { + if guid_hob.name == SMM_SMRAM_MEMORY_GUID + || guid_hob.name == MM_PEI_MMRAM_MEMORY_RESERVE_GUID + { + log::info!("Found SMRAM memory HOB with GUID {:?}", guid_hob.name); + + if data.len() < size_of::() { + continue; + } + + let reserve_data = unsafe { &*(data.as_ptr() as *const SmramReserveHobData) }; + let descriptor_count = reserve_data.number_of_smram_regions as usize; + + let descriptors_ptr = unsafe { + data.as_ptr().add(size_of::()) as *const SmramDescriptor + }; + + for i in 0..descriptor_count { + if region_count >= MAX_TEMP_REGIONS { + log::warn!("Too many SMRAM regions for temp storage, increase MAX_TEMP_REGIONS"); + break; + } + + let descriptor = unsafe { &*descriptors_ptr.add(i) }; + let pre_allocated = (descriptor.region_state & EFI_ALLOCATED) != 0; + let pages = (descriptor.physical_size as usize) / UEFI_PAGE_SIZE; + + log::info!( + "SMRAM Region {}: base=0x{:016x}, size=0x{:x}, pages={}, state=0x{:x}, allocated={}", + region_count, + descriptor.physical_start, + descriptor.physical_size, + pages, + descriptor.region_state, + pre_allocated + ); + + temp_regions[region_count] = ( + descriptor.physical_start, + descriptor.physical_size, + pre_allocated, + ); + + // Track first non-allocated region for bookkeeping + if first_free_region_base.is_none() && !pre_allocated { + first_free_region_base = Some(descriptor.physical_start); + first_free_region_size = descriptor.physical_size; + } + + total_pages += pages; + region_count += 1; + } + } + } + } + + if region_count == 0 { + log::error!("No SMRAM regions found in HOB list"); + return Err(PageAllocError::NotInitialized); + } + + // Calculate bookkeeping space needed + let bookkeeping_pages = Self::calculate_bookkeeping_pages(region_count, total_pages); + + log::info!( + "Allocator needs {} pages for bookkeeping ({} regions, {} total pages)", + bookkeeping_pages, + region_count, + total_pages + ); + + // Find space for bookkeeping + let bookkeeping_base = first_free_region_base.ok_or_else(|| { + log::error!("No free SMRAM region available for bookkeeping"); + PageAllocError::OutOfMemory + })?; + + if (bookkeeping_pages * UEFI_PAGE_SIZE) as u64 > first_free_region_size { + log::error!("First free region too small for bookkeeping"); + return Err(PageAllocError::OutOfMemory); + } + + log::info!( + "Using 0x{:016x} for bookkeeping ({} pages)", + bookkeeping_base, + bookkeeping_pages + ); + + // Initialize the state structure in SMRAM + let state_ptr = bookkeeping_base as *mut AllocatorState; + unsafe { + // Zero the bookkeeping pages first + ptr::write_bytes(bookkeeping_base as *mut u8, 0, bookkeeping_pages * UEFI_PAGE_SIZE); + + // Write the header + (*state_ptr).region_count = region_count; + (*state_ptr).total_pages = total_pages; + (*state_ptr).bookkeeping_pages = bookkeeping_pages; + (*state_ptr).bookkeeping_base = bookkeeping_base; + + // Store state pointer + *self.state.get() = state_ptr; + } + + // Initialize region info + let mut bitmap_start_bit = 0usize; + { + let regions = unsafe { self.get_regions_mut() }; + for (i, region) in regions.iter_mut().enumerate() { + let (base, size, _) = temp_regions[i]; + let pages = (size as usize) / UEFI_PAGE_SIZE; + region.base = base; + region.total_pages = pages; + region.bitmap_start_bit = bitmap_start_bit; + bitmap_start_bit += pages; + } + } + + // Mark pre-allocated regions and bookkeeping pages as allocated + for i in 0..region_count { + let (base, size, pre_allocated) = temp_regions[i]; + let pages = (size as usize) / UEFI_PAGE_SIZE; + + if pre_allocated { + // Mark entire region as allocated (supervisor) + let regions = unsafe { self.get_regions() }; + let start_bit = regions[i].bitmap_start_bit; + for p in 0..pages { + unsafe { self.set_bit_allocated(start_bit + p, AllocationType::Supervisor) }; + } + } else if base == bookkeeping_base { + // Mark bookkeeping pages as allocated (supervisor) + let regions = unsafe { self.get_regions() }; + let start_bit = regions[i].bitmap_start_bit; + for p in 0..bookkeeping_pages { + unsafe { self.set_bit_allocated(start_bit + p, AllocationType::Supervisor) }; + } + } + } + + self.initialized.store(true, Ordering::Release); + + log::info!( + "Page allocator initialized: {} region(s), {} total pages, {} free pages", + region_count, + total_pages, + self.free_page_count() + ); + + Ok(()) + } + + /// Allocates contiguous pages from SMRAM for supervisor use. + pub fn allocate_pages(&self, num_pages: usize) -> Result { + self.allocate_pages_with_type(num_pages, AllocationType::Supervisor) + } + + /// Allocates contiguous pages from SMRAM with the specified allocation type. + /// + /// For `Supervisor` allocations, the allocated region is marked as supervisor-owned + /// data pages (R/W, non-executable) in the page table. + pub fn allocate_pages_with_type( + &self, + num_pages: usize, + alloc_type: AllocationType, + ) -> Result { + if !self.initialized.load(Ordering::Acquire) { + return Err(PageAllocError::NotInitialized); + } + + if num_pages == 0 { + return Err(PageAllocError::OutOfMemory); + } + + let allocated_addr = { + let _guard = self.lock.lock(); + + // SAFETY: We have exclusive access via the lock + unsafe { + let regions = self.get_regions(); + + let mut found: Option = None; + // Try each region + 'outer: for region in regions.iter() { + // First-fit search for contiguous pages + let mut run_start = 0usize; + let mut run_length = 0usize; + + for page_in_region in 0..region.total_pages { + let bit_index = region.bitmap_start_bit + page_in_region; + if self.is_bit_allocated(bit_index) { + run_start = page_in_region + 1; + run_length = 0; + } else { + run_length += 1; + if run_length == num_pages { + // Found a suitable run, allocate it + for p in run_start..run_start + num_pages { + let bit = region.bitmap_start_bit + p; + self.set_bit_allocated(bit, alloc_type); + } + let addr = region.base + (run_start as u64 * UEFI_PAGE_SIZE as u64); + log::trace!( + "Allocated {} {:?} page(s) at 0x{:016x}", + num_pages, + alloc_type, + addr + ); + found = Some(addr); + break 'outer; + } + } + } + } + + found + } + }; // lock is dropped here + + let addr = allocated_addr.ok_or(PageAllocError::OutOfMemory)?; + + // For supervisor allocations, update page table attributes to mark as + // supervisor-owned data pages (R/W/NX/S), otherwise they would + // default to user data (R/W/NX/U). + self.apply_data_page_attributes(addr, num_pages, alloc_type); + + Ok(addr) + } + + /// Applies supervisor page table attributes to a newly allocated region. + /// + /// Marks pages as supervisor-owned data pages: Read/Write + Non-Executable (NX). + /// This ensures supervisor data cannot be executed, providing W^X enforcement. + /// + /// If the global page table is not yet initialized (e.g., during early boot), + /// this is a no-op with a warning. + fn apply_data_page_attributes(&self, addr: u64, num_pages: usize, _alloc_type: AllocationType) { + + let size = (num_pages * UEFI_PAGE_SIZE) as u64; + let mut pt_guard = crate::PAGE_TABLE.lock(); + if let Some(ref mut pt) = *pt_guard { + // Data pages: R/W (no ReadOnly) + NX (ExecuteProtect) + let mut attributes = MemoryAttributes::ExecuteProtect; + + if _alloc_type == AllocationType::Supervisor { + // For Supervisor allocations, we additionally want the U/S bit cleared (Supervisor-only). + attributes = attributes | MemoryAttributes::Supervisor; // Ensure not writable by user code + } + + if let Err(e) = pt.map_memory_region(addr, size, attributes) { + log::error!( + "Failed to set supervisor page attributes for 0x{:016x} ({} pages): {:?}", + addr, + num_pages, + e + ); + } else { + log::trace!( + "Marked 0x{:016x} ({} pages) as supervisor R/W+NX", + addr, + num_pages, + ); + } + } else { + log::warn!( + "Page table not initialized, skipping attribute update for 0x{:016x}", + addr + ); + } + } + + /// Applies restrictive page table attributes to freed pages. + /// + /// Marks pages as completely inaccessible: Supervisor + ReadProtect + ExecuteProtect (NX). + /// This prevents any read, write, or execute access to freed memory, mitigating + /// use-after-free vulnerabilities. + /// + /// If the global page table is not yet initialized (e.g., during early boot), + /// this is a no-op with a warning. + fn apply_freed_page_attributes(&self, addr: u64, num_pages: usize) { + + let size = (num_pages * UEFI_PAGE_SIZE) as u64; + let mut pt_guard = crate::PAGE_TABLE.lock(); + if let Some(ref mut pt) = *pt_guard { + // Freed pages: ReadProtect (not present) + NX (no execute) + ReadOnly (no write) + // This makes the pages completely inaccessible. + if let Err(e) = pt.unmap_memory_region(addr, size) { + log::error!( + "Failed to set freed page attributes for 0x{:016x} ({} pages): {:?}", + addr, + num_pages, + e + ); + } else { + log::trace!( + "Marked 0x{:016x} ({} pages) as inaccessible (RP+NX+RO+S)", + addr, + num_pages, + ); + } + } else { + log::warn!( + "Page table not initialized, skipping freed page attribute update for 0x{:016x}", + addr + ); + } + } + + /// Frees previously allocated pages. + /// + /// After freeing, the pages are marked as inaccessible in the page table + /// (Supervisor + ReadProtect + ExecuteProtect) to prevent use-after-free. + pub fn free_pages(&self, addr: u64, num_pages: usize) -> Result<(), PageAllocError> { + if !self.initialized.load(Ordering::Acquire) { + return Err(PageAllocError::NotInitialized); + } + + if addr % UEFI_PAGE_SIZE as u64 != 0 { + return Err(PageAllocError::NotAligned); + } + + { + let _guard = self.lock.lock(); + + unsafe { + let (region_index, page_in_region) = self + .find_region_for_address(addr) + .ok_or(PageAllocError::InvalidAddress)?; + + let regions = self.get_regions(); + let region = ®ions[region_index]; + + // Verify all pages are allocated + for p in 0..num_pages { + let bit = region.bitmap_start_bit + page_in_region + p; + if !self.is_bit_allocated(bit) { + return Err(PageAllocError::NotAllocated); + } + } + + // Free the pages + for p in 0..num_pages { + let bit = region.bitmap_start_bit + page_in_region + p; + self.set_bit_free(bit); + } + + log::trace!("Freed {} page(s) at 0x{:016x}", num_pages, addr); + } + } // lock is dropped here + + // Mark freed pages as inaccessible in the page table. + self.apply_freed_page_attributes(addr, num_pages); + + Ok(()) + } + + /// Frees previously allocated pages, verifying the allocation type matches. + /// + /// After freeing, the pages are marked as inaccessible in the page table + /// (Supervisor + ReadProtect + ExecuteProtect) to prevent use-after-free. + pub fn free_pages_checked( + &self, + addr: u64, + num_pages: usize, + expected_type: AllocationType, + ) -> Result<(), PageAllocError> { + if !self.initialized.load(Ordering::Acquire) { + return Err(PageAllocError::NotInitialized); + } + + if addr % UEFI_PAGE_SIZE as u64 != 0 { + return Err(PageAllocError::NotAligned); + } + + { + let _guard = self.lock.lock(); + + unsafe { + let (region_index, page_in_region) = self + .find_region_for_address(addr) + .ok_or(PageAllocError::InvalidAddress)?; + + let regions = self.get_regions(); + let region = ®ions[region_index]; + + // Verify all pages are allocated with expected type + for p in 0..num_pages { + let bit = region.bitmap_start_bit + page_in_region + p; + if !self.is_bit_allocated(bit) { + return Err(PageAllocError::NotAllocated); + } + if self.get_bit_type(bit) != expected_type { + log::warn!( + "Type mismatch at 0x{:016x}: expected {:?}, got {:?}", + addr + (p as u64 * UEFI_PAGE_SIZE as u64), + expected_type, + self.get_bit_type(bit) + ); + return Err(PageAllocError::InvalidAddress); + } + } + + // Free the pages + for p in 0..num_pages { + let bit = region.bitmap_start_bit + page_in_region + p; + self.set_bit_free(bit); + } + + log::trace!("Freed {} {:?} page(s) at 0x{:016x}", num_pages, expected_type, addr); + } + } // lock is dropped here + + // Mark freed pages as inaccessible in the page table. + self.apply_freed_page_attributes(addr, num_pages); + + Ok(()) + } + + /// Returns the total number of free pages across all regions. + pub fn free_page_count(&self) -> usize { + if !self.initialized.load(Ordering::Acquire) { + return 0; + } + + unsafe { + let state = *self.state.get(); + if state.is_null() { + return 0; + } + let total_pages = (*state).total_pages; + let mut free = 0; + for bit in 0..total_pages { + if !self.is_bit_allocated(bit) { + free += 1; + } + } + free + } + } + + /// Returns the number of pages allocated for a specific type. + pub fn allocated_page_count(&self, alloc_type: AllocationType) -> usize { + if !self.initialized.load(Ordering::Acquire) { + return 0; + } + + let _guard = self.lock.lock(); + + unsafe { + let state = *self.state.get(); + if state.is_null() { + return 0; + } + let total_pages = (*state).total_pages; + let mut count = 0; + for bit in 0..total_pages { + if self.is_bit_allocated(bit) && self.get_bit_type(bit) == alloc_type { + count += 1; + } + } + count + } + } + + /// Returns the allocation type for a given address. + pub fn get_allocation_type(&self, addr: u64) -> Option { + if !self.initialized.load(Ordering::Acquire) { + return None; + } + + let _guard = self.lock.lock(); + + unsafe { + let (region_index, page_in_region) = self.find_region_for_address(addr)?; + let bit = self.region_page_to_bit(region_index, page_in_region); + if self.is_bit_allocated(bit) { + Some(self.get_bit_type(bit)) + } else { + None + } + } + } + + /// Returns whether the allocator has been initialized. + pub fn is_initialized(&self) -> bool { + self.initialized.load(Ordering::Acquire) + } + + /// Returns the total number of pages across all regions. + pub fn total_page_count(&self) -> usize { + if !self.initialized.load(Ordering::Acquire) { + return 0; + } + unsafe { + let state = *self.state.get(); + if state.is_null() { + 0 + } else { + (*state).total_pages + } + } + } + + /// Returns the number of regions. + pub fn region_count(&self) -> usize { + if !self.initialized.load(Ordering::Acquire) { + return 0; + } + unsafe { + let state = *self.state.get(); + if state.is_null() { + 0 + } else { + (*state).region_count + } + } + } + + pub fn is_region_inside_mmram(&self, addr: u64, size: u64) -> bool { + if !self.initialized.load(Ordering::Acquire) { + return false; + } + + let _guard = self.lock.lock(); + + unsafe { + let regions = self.get_regions(); + for region in regions.iter() { + let region_end = region.base + (region.total_pages as u64 * UEFI_PAGE_SIZE as u64); + if addr >= region.base && (addr + size) <= region_end { + return true; + } + } + false + } + } +} + +// ============================================================================ +// Global Allocator Instance +// ============================================================================ + +/// Global page allocator instance. +/// +/// This must be initialized via `init_from_hob_list` before use. +pub static PAGE_ALLOCATOR: PageAllocator = PageAllocator::new(); + diff --git a/patina_mm_supervisor_core/src/paging_allocator.rs b/patina_mm_supervisor_core/src/paging_allocator.rs new file mode 100644 index 000000000..00f0a57e1 --- /dev/null +++ b/patina_mm_supervisor_core/src/paging_allocator.rs @@ -0,0 +1,444 @@ +//! Paging Page Allocator +//! +//! A dedicated page allocator for the paging subsystem that allocates pages for +//! page table structures (PML4, PDPT, PD, PT entries). +//! +//! ## Design +//! +//! This allocator is separate from the generic PageAllocator for two reasons: +//! +//! 1. **Bootstrap problem**: The paging subsystem needs to allocate pages for page +//! tables, but the generic PageAllocator wants to call into paging to set page +//! attributes for newly allocated pages. This creates a circular dependency. +//! +//! 2. **Security**: Page table pages require special attributes (Supervisor, RW, +//! non-executable) and should be tracked separately from general allocations. +//! +//! ## Initialization +//! +//! The paging allocator is initialized with a reserved memory region from SMRAM. +//! This region is exclusively used for page table allocations. +//! +//! ## Integration with Paging +//! +//! After the paging subsystem is fully initialized, the generic PageAllocator can +//! optionally register a callback to apply page table attributes to newly allocated +//! pages via the paging instance. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::{ + cell::UnsafeCell, + sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, +}; + +use patina_paging::{PtError, page_allocator::PageAllocator as PagingPageAllocator}; +use patina::base::UEFI_PAGE_SIZE; +use spin::Mutex; + +// ============================================================================ +// Constants +// ============================================================================ + +/// Default number of pages to reserve for page table allocations. +/// This should be sufficient for most MM environments (128 pages = 512KB). +pub const DEFAULT_PAGING_POOL_PAGES: usize = 128; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur during paging allocator operations. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PagingAllocError { + /// The allocator has not been initialized. + NotInitialized, + /// Already initialized. + AlreadyInitialized, + /// No free pages available to satisfy the request. + OutOfMemory, + /// Invalid alignment requested. + InvalidAlignment, + /// The pool region is too small. + PoolTooSmall, +} + +// ============================================================================ +// Paging Page Allocator +// ============================================================================ + +/// A dedicated page allocator for the paging subsystem. +/// +/// This allocator uses a simple bump allocator from a reserved pool of pages. +/// It implements the `patina_paging::PageAllocator` trait to be used directly +/// by the paging crate for allocating page table structures. +/// +/// ## Thread Safety +/// +/// This allocator is thread-safe and can be used from multiple CPUs. +/// +/// ## Example +/// +/// ```rust,ignore +/// use patina_mm_supervisor_core::paging_allocator::{PagingPageAllocator, DEFAULT_PAGING_POOL_PAGES}; +/// +/// // During early init, reserve a region from SMRAM for page tables +/// let pool_base = 0x8000_0000u64; // Example base address +/// let pool_pages = DEFAULT_PAGING_POOL_PAGES; +/// +/// // Initialize the allocator +/// unsafe { +/// PAGING_ALLOCATOR.init(pool_base, pool_pages)?; +/// } +/// +/// // The allocator can now be used by the paging crate +/// ``` +pub struct PagingPoolAllocator { + /// Base address of the pool. + pool_base: AtomicU64, + /// Total number of pages in the pool. + pool_pages: AtomicUsize, + /// Current allocation offset (bump pointer) in bytes. + current_offset: AtomicUsize, + /// Number of pages allocated. + allocated_pages: AtomicUsize, + /// Whether the allocator has been initialized. + initialized: AtomicBool, + /// Lock for thread safety during allocation. + lock: Mutex<()>, +} + +// SAFETY: The PagingPoolAllocator uses internal locking for thread safety. +unsafe impl Send for PagingPoolAllocator {} +unsafe impl Sync for PagingPoolAllocator {} + +impl PagingPoolAllocator { + /// Creates a new uninitialized paging page allocator. + pub const fn new() -> Self { + Self { + pool_base: AtomicU64::new(0), + pool_pages: AtomicUsize::new(0), + current_offset: AtomicUsize::new(0), + allocated_pages: AtomicUsize::new(0), + initialized: AtomicBool::new(false), + lock: Mutex::new(()), + } + } + + /// Initializes the paging allocator with a reserved memory region. + /// + /// # Arguments + /// + /// * `pool_base` - Base physical address of the reserved pool (must be page-aligned) + /// * `pool_pages` - Number of pages in the pool + /// + /// # Safety + /// + /// The caller must ensure that: + /// - `pool_base` points to a valid memory region in SMRAM + /// - The region is not used by any other allocator + /// - The region has at least `pool_pages * UEFI_PAGE_SIZE` bytes available + /// + /// # Errors + /// + /// Returns an error if already initialized or if parameters are invalid. + pub unsafe fn init(&self, pool_base: u64, pool_pages: usize) -> Result<(), PagingAllocError> { + if self.initialized.load(Ordering::Acquire) { + return Err(PagingAllocError::AlreadyInitialized); + } + + if pool_base == 0 || pool_pages == 0 { + return Err(PagingAllocError::PoolTooSmall); + } + + if pool_base % UEFI_PAGE_SIZE as u64 != 0 { + return Err(PagingAllocError::InvalidAlignment); + } + + let _guard = self.lock.lock(); + + // Zero the pool region + unsafe { + core::ptr::write_bytes(pool_base as *mut u8, 0, pool_pages * UEFI_PAGE_SIZE); + } + + self.pool_base.store(pool_base, Ordering::Release); + self.pool_pages.store(pool_pages, Ordering::Release); + self.current_offset.store(0, Ordering::Release); + self.allocated_pages.store(0, Ordering::Release); + self.initialized.store(true, Ordering::Release); + + log::info!( + "Paging allocator initialized: base=0x{:016x}, pages={} ({} KB)", + pool_base, + pool_pages, + pool_pages * UEFI_PAGE_SIZE / 1024 + ); + + Ok(()) + } + + /// Allocates a page for page table structures. + /// + /// # Arguments + /// + /// * `align` - Required alignment in bytes (must be a power of 2 and >= UEFI_PAGE_SIZE) + /// * `size` - Size in bytes (must be >= UEFI_PAGE_SIZE) + /// * `is_root` - Whether this is a root page table (e.g., PML4) + /// + /// # Returns + /// + /// The physical address of the allocated page, or an error. + pub fn allocate_page_internal( + &self, + align: u64, + size: u64, + _is_root: bool, + ) -> Result { + if !self.initialized.load(Ordering::Acquire) { + return Err(PagingAllocError::NotInitialized); + } + + // Validate alignment (must be at least UEFI_PAGE_SIZE and power of 2) + let align = align.max(UEFI_PAGE_SIZE as u64); + if !align.is_power_of_two() { + return Err(PagingAllocError::InvalidAlignment); + } + + // Validate size (must be at least UEFI_PAGE_SIZE) + let size = size.max(UEFI_PAGE_SIZE as u64); + let pages_needed = ((size as usize) + UEFI_PAGE_SIZE - 1) / UEFI_PAGE_SIZE; + + let _guard = self.lock.lock(); + + let pool_base = self.pool_base.load(Ordering::Acquire); + let pool_pages = self.pool_pages.load(Ordering::Acquire); + let current_offset = self.current_offset.load(Ordering::Acquire); + + // Calculate the aligned address + let current_addr = pool_base + current_offset as u64; + let aligned_addr = (current_addr + align - 1) & !(align - 1); + let padding = (aligned_addr - current_addr) as usize; + let total_bytes = padding + (pages_needed * UEFI_PAGE_SIZE); + + // Check if we have enough space + if current_offset + total_bytes > pool_pages * UEFI_PAGE_SIZE { + log::error!( + "Paging allocator out of memory: need {} bytes, have {} bytes remaining", + total_bytes, + pool_pages * UEFI_PAGE_SIZE - current_offset + ); + return Err(PagingAllocError::OutOfMemory); + } + + // Update the offset + self.current_offset + .store(current_offset + total_bytes, Ordering::Release); + self.allocated_pages + .fetch_add(pages_needed, Ordering::Release); + + log::trace!( + "Paging allocator: allocated {} page(s) at 0x{:016x} (align=0x{:x})", + pages_needed, + aligned_addr, + align + ); + + Ok(aligned_addr) + } + + /// Returns whether the allocator has been initialized. + pub fn is_initialized(&self) -> bool { + self.initialized.load(Ordering::Acquire) + } + + /// Returns the number of pages allocated. + pub fn allocated_page_count(&self) -> usize { + self.allocated_pages.load(Ordering::Acquire) + } + + /// Returns the number of free pages remaining. + pub fn free_page_count(&self) -> usize { + if !self.initialized.load(Ordering::Acquire) { + return 0; + } + + let pool_pages = self.pool_pages.load(Ordering::Acquire); + let current_offset = self.current_offset.load(Ordering::Acquire); + let used_pages = (current_offset + UEFI_PAGE_SIZE - 1) / UEFI_PAGE_SIZE; + pool_pages.saturating_sub(used_pages) + } + + /// Returns the base address of the pool. + pub fn pool_base(&self) -> u64 { + self.pool_base.load(Ordering::Acquire) + } + + /// Returns the total size of the pool in bytes. + pub fn pool_size(&self) -> usize { + self.pool_pages.load(Ordering::Acquire) * UEFI_PAGE_SIZE + } +} + +// ============================================================================ +// Implementation of patina_paging::PageAllocator trait +// ============================================================================ + +impl PagingPageAllocator for PagingPoolAllocator { + /// Allocates a page for page table structures. + /// + /// This implements the `patina_paging::PageAllocator` trait. + fn allocate_page(&mut self, align: u64, size: u64, is_root: bool) -> Result { + self.allocate_page_internal(align, size, is_root) + .map_err(|e| { + log::error!("Paging allocator error: {:?}", e); + match e { + PagingAllocError::NotInitialized => PtError::InvalidParameter, + PagingAllocError::AlreadyInitialized => PtError::InvalidParameter, + PagingAllocError::OutOfMemory => PtError::OutOfResources, + PagingAllocError::InvalidAlignment => PtError::InvalidParameter, + PagingAllocError::PoolTooSmall => PtError::InvalidParameter, + } + }) + } +} + +// ============================================================================ +// Wrapper for Shared Access +// ============================================================================ + +/// A wrapper around PagingPoolAllocator that allows shared (non-mutable) access +/// while still implementing the PageAllocator trait. +/// +/// This is needed because the `patina_paging::PageAllocator` trait requires `&mut self`, +/// but we want to use a global static allocator with interior mutability. +pub struct SharedPagingAllocator { + /// The underlying allocator. + inner: UnsafeCell<&'static PagingPoolAllocator>, +} + +// SAFETY: The PagingPoolAllocator uses internal locking for thread safety. +unsafe impl Send for SharedPagingAllocator {} +unsafe impl Sync for SharedPagingAllocator {} + +impl SharedPagingAllocator { + /// Creates a new shared paging allocator wrapper. + pub const fn new(allocator: &'static PagingPoolAllocator) -> Self { + Self { + inner: UnsafeCell::new(allocator), + } + } +} + +impl PagingPageAllocator for SharedPagingAllocator { + fn allocate_page(&mut self, align: u64, size: u64, is_root: bool) -> Result { + // SAFETY: The underlying PagingPoolAllocator uses internal locking + let allocator = unsafe { *self.inner.get() }; + allocator + .allocate_page_internal(align, size, is_root) + .map_err(|e| { + log::error!("Paging allocator error: {:?}", e); + match e { + PagingAllocError::NotInitialized => PtError::InvalidParameter, + PagingAllocError::AlreadyInitialized => PtError::InvalidParameter, + PagingAllocError::OutOfMemory => PtError::OutOfResources, + PagingAllocError::InvalidAlignment => PtError::InvalidParameter, + PagingAllocError::PoolTooSmall => PtError::InvalidParameter, + } + }) + } +} + +// ============================================================================ +// Global Instance +// ============================================================================ + +/// Global paging page allocator instance. +/// +/// This must be initialized via `init()` before use. +pub static PAGING_ALLOCATOR: PagingPoolAllocator = PagingPoolAllocator::new(); + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_paging_allocator_not_initialized() { + let allocator = PagingPoolAllocator::new(); + assert!(!allocator.is_initialized()); + assert_eq!(allocator.free_page_count(), 0); + assert_eq!(allocator.allocated_page_count(), 0); + } + + #[test] + fn test_paging_allocator_init() { + let allocator = PagingPoolAllocator::new(); + + // Create a test buffer + let mut buffer = vec![0u8; 16 * UEFI_PAGE_SIZE]; + let base = buffer.as_mut_ptr() as u64; + // Align to page boundary + let aligned_base = (base + UEFI_PAGE_SIZE as u64 - 1) & !(UEFI_PAGE_SIZE as u64 - 1); + + unsafe { + assert!(allocator.init(aligned_base, 8).is_ok()); + } + + assert!(allocator.is_initialized()); + assert_eq!(allocator.free_page_count(), 8); + assert_eq!(allocator.allocated_page_count(), 0); + } + + #[test] + fn test_paging_allocator_double_init() { + let allocator = PagingPoolAllocator::new(); + + let mut buffer = vec![0u8; 16 * UEFI_PAGE_SIZE]; + let base = buffer.as_mut_ptr() as u64; + let aligned_base = (base + UEFI_PAGE_SIZE as u64 - 1) & !(UEFI_PAGE_SIZE as u64 - 1); + + unsafe { + assert!(allocator.init(aligned_base, 8).is_ok()); + assert_eq!( + allocator.init(aligned_base, 8), + Err(PagingAllocError::AlreadyInitialized) + ); + } + } + + #[test] + fn test_paging_allocator_allocate() { + let allocator = PagingPoolAllocator::new(); + + let mut buffer = vec![0u8; 32 * UEFI_PAGE_SIZE]; + let base = buffer.as_mut_ptr() as u64; + let aligned_base = (base + UEFI_PAGE_SIZE as u64 - 1) & !(UEFI_PAGE_SIZE as u64 - 1); + + unsafe { + allocator.init(aligned_base, 16).unwrap(); + } + + // Allocate a page + let result = allocator.allocate_page_internal(UEFI_PAGE_SIZE as u64, UEFI_PAGE_SIZE as u64, false); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr, aligned_base); + assert_eq!(allocator.allocated_page_count(), 1); + + // Allocate another page + let result2 = allocator.allocate_page_internal(UEFI_PAGE_SIZE as u64, UEFI_PAGE_SIZE as u64, false); + assert!(result2.is_ok()); + let addr2 = result2.unwrap(); + assert_eq!(addr2, aligned_base + UEFI_PAGE_SIZE as u64); + assert_eq!(allocator.allocated_page_count(), 2); + } +} diff --git a/patina_mm_supervisor_core/src/perf_timer.rs b/patina_mm_supervisor_core/src/perf_timer.rs new file mode 100644 index 000000000..9c5cc9e43 --- /dev/null +++ b/patina_mm_supervisor_core/src/perf_timer.rs @@ -0,0 +1,142 @@ +//! Performance Timer for the MM Supervisor Core +//! +//! Provides real-time, TSC-based timing helpers used by mailbox timeouts and +//! AP-arrival polling. The module reads the hardware counter via the shared +//! [`patina::component::service::timer`] utilities and stores a one-time +//! calibrated frequency. +//! +//! ## Initialization +//! +//! Call [`init`] once during BSP init with the platform-provided frequency +//! (or `0` to auto-detect from CPUID). +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use patina::timer as perf_timer; +use spin::Once; + +/// Cached performance counter frequency (Hz). Initialized once during +/// BSP init and read by all cores. +static FREQUENCY: Once = Once::new(); + +/// Initializes the performance timer with a frequency value. +/// +/// If `platform_frequency` is non-zero it is used as-is. Otherwise +/// the function falls back to CPUID-based auto-detection. +/// +/// Calling this more than once is harmless — subsequent calls are no-ops. +pub fn init(platform_frequency: u64) { + FREQUENCY.call_once(|| { + let freq = if platform_frequency != 0 { + platform_frequency + } else { + perf_timer::arch_perf_frequency() + }; + + if freq == 0 { + log::warn!( + "perf_timer: unable to determine performance counter frequency; \ + timeouts will use iteration-count fallback" + ); + } else { + log::info!("perf_timer: frequency = {} Hz ({:.3} GHz)", freq, freq as f64 / 1e9); + } + + freq + }); +} + +/// Returns the current performance counter value (TSC on x86_64). +#[inline(always)] +pub fn ticks() -> u64 { + perf_timer::arch_cpu_count() +} + +/// Returns the cached frequency in Hz, or `0` if not yet initialized / +/// not determinable. +#[inline] +pub fn frequency() -> u64 { + FREQUENCY.get().copied().unwrap_or(0) +} + +/// Converts a duration in microseconds to the equivalent tick count using +/// the cached frequency. +/// +/// Returns `None` if the frequency is unknown (0). +#[inline] +pub fn us_to_ticks(us: u64) -> Option { + let freq = frequency(); + if freq == 0 { + return None; + } + // ticks = freq * us / 1_000_000 + // Use u128 to avoid overflow for large values of freq * us. + Some(((freq as u128 * us as u128) / 1_000_000) as u64) +} + +/// Spins until at least `timeout_us` microseconds have elapsed. +/// +/// Returns `true` when the provided `condition` closure returns `true` +/// before the deadline, or `false` on timeout. +/// +/// If the performance frequency is unknown, falls back to a conservative +/// iteration-count heuristic (`timeout_us * 10` loops). +pub fn spin_until(timeout_us: u64, mut condition: F) -> bool +where + F: FnMut() -> bool, +{ + if let Some(deadline_ticks) = us_to_ticks(timeout_us) { + let start = ticks(); + loop { + if condition() { + return true; + } + if ticks().wrapping_sub(start) >= deadline_ticks { + return false; + } + core::hint::spin_loop(); + } + } else { + // Fallback: iteration-count approximation. + let iterations = timeout_us.saturating_mul(10); + for _ in 0..iterations { + if condition() { + return true; + } + core::hint::spin_loop(); + } + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_init_with_known_frequency() { + // Note: Once is global — run in isolation or accept first-writer-wins. + init(1_000_000_000); // 1 GHz + assert!(frequency() > 0); + } + + #[test] + fn test_us_to_ticks_basic() { + // For a 1 GHz counter, 1 us = 1000 ticks. + // We can't guarantee the global FREQUENCY state, so just verify None for 0. + if frequency() == 0 { + assert_eq!(us_to_ticks(1000), None); + } + } + + #[test] + fn test_spin_until_immediate_true() { + let result = spin_until(1_000, || true); + assert!(result); + } +} diff --git a/patina_mm_supervisor_core/src/privilege_mgmt/call_gate.rs b/patina_mm_supervisor_core/src/privilege_mgmt/call_gate.rs new file mode 100644 index 000000000..1d610d638 --- /dev/null +++ b/patina_mm_supervisor_core/src/privilege_mgmt/call_gate.rs @@ -0,0 +1,232 @@ +//! Call Gate and TSS Management +//! +//! This module manages call gates and Task State Segment (TSS) descriptors +//! for privilege level transitions. Call gates provide an alternative mechanism +//! (besides syscall/sysret) for Ring 3 code to transition back to Ring 0. +//! +//! ## Call Gate Usage +//! +//! 1. When invoking a demoted routine, the supervisor sets up a call gate +//! pointing to the return address. +//! +//! 2. The demoted routine in Ring 3 can return to Ring 0 by doing a far call +//! to the call gate selector. +//! +//! 3. The CPU automatically transitions to Ring 0 and jumps to the address +//! in the call gate descriptor. +//! +//! ## TSS Usage +//! +//! The TSS is used to specify the Ring 0 stack pointer (RSP0) that the CPU +//! will load when transitioning from Ring 3 to Ring 0 via an interrupt or +//! call gate. +//! + +#![allow(unsafe_op_in_unsafe_fn)] + +use core::arch::{global_asm, asm}; +use x86_64::{VirtAddr, structures::tss::TaskStateSegment}; +use super::{ + CALL_GATE_OFFSET, TSS_SEL_OFFSET, TSS_DESC_OFFSET, + LONG_CS_R0, +}; + +global_asm!(include_str!("call_gate_transfer.asm")); + +// ============================================================================ +// GDT Descriptor Structures +// ============================================================================ + +/// 64-bit Call Gate Descriptor. +/// +/// A call gate allows privilege level transitions through a far call instruction. +#[repr(C, packed)] +#[derive(Debug, Clone, Copy, Default)] +pub struct CallGateDescriptor { + /// Offset bits 15:0 + pub offset_low: u16, + /// Target code segment selector + pub selector: u16, + /// Reserved (must be 0) and IST (bits 2:0) + pub ist: u8, + /// Type (0xC = 64-bit call gate) and DPL + pub type_attr: u8, + /// Offset bits 31:16 + pub offset_mid: u16, + /// Offset bits 63:32 + pub offset_high: u32, + /// Reserved (must be 0) + pub reserved: u32, +} + +impl CallGateDescriptor { + /// Sets the target offset in the descriptor. + pub fn set_offset(&mut self, offset: u64) { + self.offset_low = (offset & 0xFFFF) as u16; + self.offset_mid = ((offset >> 16) & 0xFFFF) as u16; + self.offset_high = ((offset >> 32) & 0xFFFFFFFF) as u32; + } +} + +/// 64-bit TSS Descriptor (16 bytes in 64-bit mode). +#[repr(C, packed)] +#[derive(Debug, Clone, Copy, Default)] +pub struct TssDescriptor { + /// Limit bits 15:0 + pub limit_low: u16, + /// Base bits 15:0 + pub base_low: u16, + /// Base bits 23:16 + pub base_mid_low: u8, + /// Type and attributes + pub type_attr: u8, + /// Limit bits 19:16 and flags + pub limit_flags: u8, + /// Base bits 31:24 + pub base_mid_high: u8, + /// Base bits 63:32 + pub base_high: u32, + /// Reserved + pub reserved: u32, +} + +impl TssDescriptor { + /// Sets the base address in the descriptor. + pub fn set_base(&mut self, base: u64) { + self.base_low = (base & 0xFFFF) as u16; + self.base_mid_low = ((base >> 16) & 0xFF) as u8; + self.base_mid_high = ((base >> 24) & 0xFF) as u8; + self.base_high = ((base >> 32) & 0xFFFFFFFF) as u32; + } +} + +// ============================================================================ +// GDT Register +// ============================================================================ + +/// GDTR (GDT Register) structure. +#[repr(C, packed)] +#[derive(Debug, Clone, Copy, Default)] +pub struct GdtRegister { + /// Size of the GDT minus 1 + pub limit: u16, + /// Linear address of the GDT + pub base: u64, +} + +// ============================================================================ +// Standalone Functions +// ============================================================================ + +/// Gets the current GDT base address by reading the GDTR register. +/// # Safety +/// This function is safe to call as it only reads the GDTR register and does not modify +/// any state. However, it is marked unsafe because it uses inline assembly. +#[cfg(target_arch = "x86_64")] +pub unsafe fn get_current_gdt_base() -> u64 { + // Get current GDT base + let mut gdtr = GdtRegister::default(); + core::arch::asm!( + "sgdt [{}]", + in(reg) &mut gdtr, + options(nostack, preserves_flags) + ); + let gdt_base = gdtr.base; + gdt_base +} + +/// Sets up the call gate for returning from a demoted routine. +/// This function is called from assembly code (InvokeDemotedRoutine). +/// +/// # Arguments +/// +/// * `return_pointer` - Address to jump to when the call gate is invoked +/// +/// # Safety +/// +/// This modifies the GDT. +#[unsafe(no_mangle)] +#[cfg(target_arch = "x86_64")] +pub unsafe extern "efiapi" fn setup_call_gate( + return_pointer: u64, + cpl0_stack_ptr: u64, +) { + // Get current GDT base + let gdt_base = get_current_gdt_base(); + + let call_gate_addr = gdt_base + CALL_GATE_OFFSET as u64; + + let tss_desc_addr = gdt_base + TSS_SEL_OFFSET as u64; + let tss_addr = gdt_base + TSS_DESC_OFFSET as u64; + + // Mask page protection on GDT to allow writing to the call gate descriptor + let mut cr4: u64; + // SAFETY: This is safe because we are temporarily disabling page protection + // on the GDT to update the call gate descriptor, which is necessary for the + // call gate setup. We will restore protections after updating. + unsafe { + asm!("mov {}, cr4", out(reg) cr4); + asm!("mov cr4, {}", in(reg) cr4 & !(1 << 7)); // Clear PGE to disable page protection on GDT + }; + + // Now program the call gate descriptor for the return address + let call_gate = call_gate_addr as *mut CallGateDescriptor; + + // Update the call gate offset + let mut desc = core::ptr::read_volatile(call_gate); + desc.set_offset(return_pointer); + desc.selector = LONG_CS_R0; + // Type = 0xC (64-bit call gate), P = 1, DPL = 3 (Ring 3 can call) + desc.type_attr = 0xEC; + core::ptr::write_volatile(call_gate, desc); + + + // Then program the TSS descriptor to point to the TSS (which contains the stack pointer for Ring 0) + let tss_desc = tss_desc_addr as *mut TssDescriptor; + let tss = tss_addr as *mut TaskStateSegment; + + // Update TSS descriptor to point to the TSS + let mut desc = core::ptr::read_volatile(tss_desc); + desc.set_base(tss_addr); + core::ptr::write_volatile(tss_desc, desc); + + // Update RSP0 in the TSS + let mut tss_data = core::ptr::read_volatile(tss); + tss_data.privilege_stack_table[0] = VirtAddr::new(cpl0_stack_ptr); + core::ptr::write_volatile(tss, tss_data); + + // Restore GDT read-only protection + unsafe { + asm!("mov cr4, {}", in(reg) cr4); + } + + log::trace!("Call gate set to 0x{:016x}, CPL0 stack pointer set to 0x{:016x}", return_pointer, cpl0_stack_ptr); +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_call_gate_descriptor_offset() { + let mut desc = CallGateDescriptor::new(0x12345678_9ABCDEF0, LONG_CS_R0, 3); + assert_eq!(desc.get_offset(), 0x12345678_9ABCDEF0); + + desc.set_offset(0xFEDCBA98_76543210); + assert_eq!(desc.get_offset(), 0xFEDCBA98_76543210); + } + + #[test] + fn test_tss_descriptor_base() { + let mut desc = TssDescriptor::new(0x12345678_9ABCDEF0, 0x1000); + assert_eq!(desc.get_base(), 0x12345678_9ABCDEF0); + + desc.set_base(0xFEDCBA98_76543210); + assert_eq!(desc.get_base(), 0xFEDCBA98_76543210); + } + +} diff --git a/patina_mm_supervisor_core/src/privilege_mgmt/call_gate_transfer.asm b/patina_mm_supervisor_core/src/privilege_mgmt/call_gate_transfer.asm new file mode 100644 index 000000000..2458e0801 --- /dev/null +++ b/patina_mm_supervisor_core/src/privilege_mgmt/call_gate_transfer.asm @@ -0,0 +1,323 @@ +#------------------------------------------------------------------------------ +# Copyright 2008 - 2020 ADVANCED MICRO DEVICES, INC. All Rights Reserved. +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +# Module Name: +# +# WriteTr.nasm +# +# Abstract: +# +# Write TR register +# +# Notes: +# +#------------------------------------------------------------------------------ + +.section .data + +.global invoke_demoted_routine +.global setup_call_gate +.global syscall_center + +# .global SetupCpl0MsrStar +# .global RestoreCpl0MsrStar + +.section .text +.align 8 + +# Segments defined in SmiException.nasm +.equ PROTECTED_DS, 0x20 +.equ LONG_CS_R0, 0x38 +.equ LONG_DS_R0, 0x40 +.equ LONG_CS_R3_PH, 0x4B +.equ LONG_DS_R3, 0x53 +.equ LONG_CS_R3, 0x5B +.equ CALL_GATE_OFFSET, 0x63 + +# MSR constants +.equ MSR_IA32_EFER, 0xC0000080 +.equ MSR_IA32_EFER_SCE_MASK, 0x00000001 + +.equ MSR_IA32_STAR, 0xC0000081 +.equ MSR_IA32_LSTAR, 0xC0000082 +.equ MSR_IA32_GS_BASE, 0xC0000101 +.equ MSR_IA32_KERNEL_GS_BASE, 0xC0000102 + + +.macro CHECK_RAX + cmp rax, 0 + jz 4f +.endm + +#------------------------------------------------------------------------------ +# /** +# Invoke specified routine on specified core in CPL 3. +# +# @param[in] CpuIndex CpuIndex value of intended core, cannot be +# greater than mNumberOfCpus. +# @param[in] Cpl3Routine Function pointer to demoted routine. +# @param[in] ArgCount Number of arguments needed by Cpl3Routine. +# @param ... The variable argument list whose count is defined by +# ArgCount. Its contented will be accessed and populated +# to the registers and/or CPL3 stack areas per EFIAPI +# calling convention. +# +# @retval EFI_SUCCESS The demoted routine returns successfully. +# @retval Others Errors caught by subroutines during ring transitioning +# or error code returned from demoted routine. +# **/ +# EFI_STATUS +# EFIAPI +# InvokeDemotedRoutine ( +# IN UINTN CpuIndex, +# IN EFI_PHYSICAL_ADDRESS Cpl3Routine, +# IN EFI_PHYSICAL_ADDRESS Cpl3Stack, +# IN UINTN ArgCount, +# ... +# ); +# Calling convention: Arg0 in RCX, Arg1 in RDX, Arg2 in R8, Arg3 in R9, more on the stack +#------------------------------------------------------------------------------ +invoke_demoted_routine: + #Preserve input parameters onto reg parameter stack area for later usage + mov [rsp + 0x20], r9 + mov [rsp + 0x18], r8 + mov [rsp + 0x10], rdx + mov [rsp + 0x08], rcx + + #Preserve nonvolatile registers, in case demoted routines mess with them + push rbp + mov rbp, rsp + #Clear the lowest 16 bit after saving rsp, to make sure the stack pointer 16byte aligned + and rsp, -16 + + push rbx + push rdi + push rsi + push r12 + push r13 + push r14 + push r15 + + #Preserve the updated rbp as we need them on return + push rbp + + mov r15, r8 + and r15, -16 + + # Set up the MSR STAR, LSTAR, EFER, GS_BASE and KERNEL_GS_BASE, in situ + mov rcx, MSR_IA32_STAR + rdmsr + push rdx + push rax + + mov edx, LONG_CS_R3_PH + shl edx, 16 + add edx, LONG_CS_R0 + wrmsr + + mov rcx, MSR_IA32_LSTAR + rdmsr + push rdx + push rax + + lea rax, syscall_center + lea rdx, syscall_center + shr rdx, 32 + wrmsr + + mov rcx, MSR_IA32_EFER + rdmsr + push rdx + push rax + + or rax, MSR_IA32_EFER_SCE_MASK + wrmsr + + mov rcx, MSR_IA32_GS_BASE + rdmsr + push rdx + push rax + + xor rdx, rdx + xor rax, rax + wrmsr + + mov rcx, MSR_IA32_KERNEL_GS_BASE + rdmsr + push rdx + push rax + + mov eax, esp + sub eax, 16 + mov rdx, rsp + sub rdx, 16 + shr rdx, 32 + wrmsr + + # This is to do the GS trick upon syscall entry + sub rsp, 8 + + # This is to do the GS trick upon syscall entry + mov rdx, rsp + sub rdx, 8 + push rdx + + # Now the stack will look like + # Current RSP <- Incoming calls will operate on top of this + # 0 <- Will be used for user stack saving + # KERNEL_GS_BASE * 2 <- Will be restored on return + # GS_BASE * 2 <- Will be restored on return + # EFER <- Will be restored on return + # LSTAR * 2 <- Will be restored on return + # STAR * 2 <- Will be restored on return + # One version of RBP <- Value after we pushed NV registers + # r15 + # r14 + # r13 + # r12 + # rsi + # rdi + # rbx + # ? <- Potential buffer for unaligned incoming caller + # Original RBP + # --------------- <- RSP When the caller invokes this + # rcx + # rdx + # r8 + # r9 + + #Setup call gate for return + lea rcx, [rip + 5f] + mov rdx, rsp + sub rsp, 0x20 + call setup_call_gate + add rsp, 0x20 + + #Same level far return to apply GDT change + xor rcx, rcx + mov rcx, cs + push rcx #prepare cs on the stack + lea rax, [rip + 2f] + push rax #prepare return rip on the stack + retfq + +2: + #Prepare for ds, es, fs, gs + xor rax, rax + mov ax, LONG_DS_R3 + mov ds, ax + mov es, ax + mov fs, ax + mov gs, ax + + #Prepare input arguments + mov rax, [rbp + 0x28] #Get ArgCount from stack + CHECK_RAX + mov rcx, [rbp + 0x30] #First input argument for demoted routine + dec rax + CHECK_RAX + mov rdx, [rbp + 0x38] #Second input argument for demoted routine + dec rax + CHECK_RAX + mov r8, [rbp + 0x40] #Third input argument for demoted routine + dec rax + CHECK_RAX + mov r9, [rbp + 0x48] #Forth input argument for demoted routine + dec rax + CHECK_RAX + #For further input arguments, they will be put on the stack + xor rbx, rbx #rbx=0 + mov r14, rax + shl r14, 3 #r14=8*rax + sub r15, r14 #r15-=r14, offset the stack for remainder of input arguments + sub r15, 0x20 #r15-=0x20, 4 stack parameters + and r15, -16 #finally we worry about the stack alignment in CPL3 +3: + mov r14, [rbp + 0x48 + rbx] #r14=*(rbp+0x48+rbx) + mov [r15 + 0x20 + rbx], r14 #*(r15+0x20+rbx)=r14 + add rbx, 0x08 #rbx+=0x08 + dec rax + CHECK_RAX + jmp 3b + +4: + #Demote to CPL3 by far return, it will take care of cs and ss + #Note: we did more pushes on the way, so need to compensate the calculation when grabbing earlier pushed values + sub r15, 0x08 #dummy r15 displacement, to mimic the return pointer on the stack + push LONG_DS_R3 #prepare ss on the stack + mov rax, r15 #grab Cpl3StackPtr from r15 + push rax #prepare CPL3 stack pointer on the stack + push LONG_CS_R3 #prepare cs on the stack + mov rax, [rbp + 0x18] #grab routine pointer from stack + push rax #prepare routine pointer on the stack + + mov r15, CALL_GATE_OFFSET #This is our way to come back, do not mess it up + shl r15, 32 #Call gate on call far stack should be CS:rIP + + retfq + + #2000 years later... + +5: + #First offset the return far related 4 pushes (we have 0 count of arguments): + #PUSH.v old_SS // #SS on this or next pushes use SS.sel as error code + #PUSH.v old_RSP + #PUSH.v old_CS + #PUSH.v next_RIP + add rsp, 0x20 + + #Demoted routine is responsible for returning to this point by invoking call gate + #Return status should still be in rax, save it before calling other functions + push rax + + add rsp, 24 + + pop rax + pop rdx + mov rcx, MSR_IA32_KERNEL_GS_BASE + wrmsr + + pop rax + pop rdx + mov rcx, MSR_IA32_GS_BASE + wrmsr + + pop rax + pop rdx + mov rcx, MSR_IA32_EFER + wrmsr + + pop rax + pop rdx + mov rcx, MSR_IA32_LSTAR + wrmsr + + pop rax + pop rdx + mov rcx, MSR_IA32_STAR + wrmsr + + mov rax, [rsp - 13 * 8] + + xor rcx, rcx + mov cx, LONG_DS_R0 + mov ds, cx + mov es, cx + mov fs, cx + mov gs, cx + + add rsp, 0x08 #Unwind the rbp from the last net-push + #Unwind the rest of the pushes + pop r15 + pop r14 + pop r13 + pop r12 + pop rsi + pop rdi + pop rbx + mov rsp, rbp + pop rbp + + ret diff --git a/patina_mm_supervisor_core/src/privilege_mgmt/mod.rs b/patina_mm_supervisor_core/src/privilege_mgmt/mod.rs new file mode 100644 index 000000000..b8ca9ff03 --- /dev/null +++ b/patina_mm_supervisor_core/src/privilege_mgmt/mod.rs @@ -0,0 +1,242 @@ +//! Privilege Management for MM Supervisor Core +//! +//! This module manages the privilege level transitions between Ring 0 (supervisor) +//! and Ring 3 (user) in the MM environment. It provides: +//! +//! - One-time initialization of syscall/sysret MSRs +//! - Demotion of code execution to Ring 3 via `InvokeDemotedRoutine` +//! - Handling of syscall requests from Ring 3 code +//! - Call gate and TSS descriptor management for privilege transitions +//! +//! ## Architecture +//! +//! The privilege management follows the x86_64 syscall/sysret model: +//! +//! 1. **Initialization**: Configure MSR_IA32_STAR, MSR_IA32_LSTAR, MSR_IA32_EFER +//! to set up syscall entry points and segment selectors. +//! +//! 2. **Demotion**: Use `InvokeDemotedRoutine` to transition from Ring 0 to Ring 3. +//! This sets up call gates for return and prepares the Ring 3 stack. +//! +//! 3. **Syscall Entry**: When Ring 3 code executes `syscall`, the CPU jumps to +//! the address in MSR_IA32_LSTAR (our `SyscallCenter`), which dispatches +//! to the appropriate handler. +//! +//! 4. **Return**: Ring 3 code returns via call gate or syscall dispatcher returns +//! via `sysret`. +//! +//! ## Segment Layout (from SmiException.nasm) +//! +//! ```text +//! PROTECTED_DS = 0x20 +//! LONG_CS_R0 = 0x38 (Ring 0 code segment) +//! LONG_DS_R0 = 0x40 (Ring 0 data segment) +//! LONG_CS_R3_PH = 0x4B (Ring 3 code segment placeholder) +//! LONG_DS_R3 = 0x53 (Ring 3 data segment) +//! LONG_CS_R3 = 0x5B (Ring 3 code segment) +//! CALL_GATE_OFFSET = 0x60 (Call gate descriptor offset) +//! TSS_SEL_OFFSET = 0x70 (TSS selector offset) +//! TSS_DESC_OFFSET = 0x80 (TSS descriptor offset) +//! ``` +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +mod syscall_setup; +mod syscall_dispatcher; +mod call_gate; + +pub use syscall_setup::{ + SyscallInterface, SyscallSetupError, +}; +pub use syscall_dispatcher::{ + SyscallDispatcher, SyscallIndex, SyscallResult, +}; + +// ============================================================================ +// External Assembly Routine +// ============================================================================ + +unsafe extern "efiapi" { + /// Invokes a specified routine in CPL 3 (Ring 3). + /// + /// This function transitions from Ring 0 to Ring 3, executes the demoted routine, + /// and returns back to Ring 0 through a call gate. + /// + /// # Arguments + /// + /// * `cpu_index` - CPU index value of the intended core + /// * `cpl3_routine` - Function pointer to the demoted routine + /// * `cpl3_stack` - Stack pointer for Ring 3 execution + /// * `arg_count` - Number of arguments needed by the demoted routine + /// * `...` - Variable argument list (count defined by `arg_count`), populated + /// to registers and/or CPL3 stack areas per EFIAPI calling convention + /// + /// # Returns + /// + /// * `EFI_SUCCESS` - The demoted routine returned successfully + /// * Other values - Errors from ring transitioning or the demoted routine + /// + /// # Safety + /// + /// This function modifies privilege levels and stack pointers. Callers must ensure: + /// - Valid function pointer for `cpl3_routine` + /// - Valid stack pointer for `cpl3_stack` + /// - Correct `arg_count` matching the actual arguments + pub fn invoke_demoted_routine( + cpu_index: usize, + cpl3_routine: u64, + cpl3_stack: u64, + arg_count: usize, + ... + ) -> usize; +} + +// ============================================================================ +// Segment Selector Constants +// ============================================================================ + +/// Protected mode data segment selector. +pub const PROTECTED_DS: u16 = 0x20; + +/// Long mode Ring 0 code segment selector. +pub const LONG_CS_R0: u16 = 0x38; + +/// Long mode Ring 0 data segment selector. +pub const LONG_DS_R0: u16 = 0x40; + +/// Long mode Ring 3 code segment placeholder (for STAR MSR). +pub const LONG_CS_R3_PH: u16 = 0x4B; + +/// Long mode Ring 3 data segment selector. +pub const LONG_DS_R3: u16 = 0x53; + +/// Long mode Ring 3 code segment selector. +pub const LONG_CS_R3: u16 = 0x5B; + +/// Call gate descriptor offset in GDT. +pub const CALL_GATE_OFFSET: u16 = 0x60; + +/// TSS selector offset in GDT. +pub const TSS_SEL_OFFSET: u16 = 0x70; + +/// TSS descriptor offset in GDT. +pub const TSS_DESC_OFFSET: u16 = 0x80; + +// ============================================================================ +// MSR Definitions +// ============================================================================ + +/// MSR_IA32_STAR - System Call Target Address Register +/// Contains the segment selectors for syscall/sysret. +pub const MSR_IA32_STAR: u32 = 0xC000_0081; + +/// MSR_IA32_LSTAR - Long Mode System Call Target Address Register +/// Contains the RIP for 64-bit syscall entry. +pub const MSR_IA32_LSTAR: u32 = 0xC000_0082; + +/// MSR_IA32_CSTAR - Compatibility Mode System Call Target Address Register +/// Contains the RIP for compatibility mode syscall (not used in pure 64-bit). +pub const MSR_IA32_CSTAR: u32 = 0xC000_0083; + +/// MSR_IA32_SFMASK - System Call Flag Mask +/// Contains flags to be cleared on syscall. +pub const MSR_IA32_SFMASK: u32 = 0xC000_0084; + +/// MSR_IA32_EFER - Extended Feature Enable Register +pub const MSR_IA32_EFER: u32 = 0xC000_0080; + +/// MSR_IA32_GS_BASE - GS Base Address Register +pub const MSR_IA32_GS_BASE: u32 = 0xC000_0101; + +/// MSR_IA32_KERNEL_GS_BASE - Kernel GS Base Address Register (swapped on swapgs) +pub const MSR_IA32_KERNEL_GS_BASE: u32 = 0xC000_0102; + +// ============================================================================ +// EFER Bits +// ============================================================================ + +/// EFER.SCE - System Call Enable bit +pub const EFER_SCE: u64 = 1 << 0; + +/// EFER.LME - Long Mode Enable bit +pub const EFER_LME: u64 = 1 << 8; + +/// EFER.LMA - Long Mode Active bit +pub const EFER_LMA: u64 = 1 << 10; + +/// EFER.NXE - No-Execute Enable bit +pub const EFER_NXE: u64 = 1 << 11; + +// ============================================================================ +// Common Types +// ============================================================================ + +/// Current privilege level. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u8)] +pub enum PrivilegeLevel { + /// Ring 0 - Supervisor/Kernel mode + Ring0 = 0, + /// Ring 3 - User mode + Ring3 = 3, +} + +impl PrivilegeLevel { + /// Returns the numeric value of the privilege level. + pub fn as_u8(self) -> u8 { + self as u8 + } + + /// Creates a PrivilegeLevel from a numeric value. + pub fn from_u8(value: u8) -> Option { + match value { + 0 => Some(Self::Ring0), + 3 => Some(Self::Ring3), + _ => None, + } + } +} + +/// Result type for privilege management operations. +pub type PrivilegeResult = Result; + +/// Errors that can occur during privilege management operations. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PrivilegeError { + /// The privilege management system has not been initialized. + NotInitialized, + /// Already initialized (cannot re-initialize). + AlreadyInitialized, + /// Invalid CPU index. + InvalidCpuIndex, + /// Out of resources (memory allocation failed). + OutOfResources, + /// The operation is not ready (missing prerequisite). + NotReady, + /// Invalid parameter provided. + InvalidParameter, + /// Security violation detected. + SecurityViolation, + /// The syscall index is not supported. + UnsupportedSyscall, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_privilege_level() { + assert_eq!(PrivilegeLevel::Ring0.as_u8(), 0); + assert_eq!(PrivilegeLevel::Ring3.as_u8(), 3); + assert_eq!(PrivilegeLevel::from_u8(0), Some(PrivilegeLevel::Ring0)); + assert_eq!(PrivilegeLevel::from_u8(3), Some(PrivilegeLevel::Ring3)); + assert_eq!(PrivilegeLevel::from_u8(1), None); + assert_eq!(PrivilegeLevel::from_u8(2), None); + } +} diff --git a/patina_mm_supervisor_core/src/privilege_mgmt/syscall_dispatcher.rs b/patina_mm_supervisor_core/src/privilege_mgmt/syscall_dispatcher.rs new file mode 100644 index 000000000..b07f12926 --- /dev/null +++ b/patina_mm_supervisor_core/src/privilege_mgmt/syscall_dispatcher.rs @@ -0,0 +1,930 @@ +//! Syscall Dispatcher +//! +//! This module handles syscall requests from Ring 3 code. When Ring 3 code +//! executes the `syscall` instruction, the CPU jumps to the address in +//! MSR_IA32_LSTAR (our SyscallCenter assembly stub), which then calls into +//! this dispatcher. +//! +//! ## Syscall Interface +//! +//! The syscall uses a custom calling convention: +//! - RAX: Call index (SyscallIndex) +//! - RDX: Argument 1 +//! - R8: Argument 2 +//! - R9: Argument 3 +//! - RCX: Caller return address (set by syscall instruction) +//! - R11: RFLAGS (set by syscall instruction) +//! +//! The dispatcher validates the request and dispatches to the appropriate handler. +//! +//! ## Security +//! +//! All syscall handlers must validate their arguments and check that any +//! memory pointers are within valid user-accessible regions. +//! + +use core::sync::atomic::{AtomicBool, Ordering}; +use core::arch::{global_asm, asm}; +use r_efi::efi::{AllocateType, ALLOCATE_ANY_PAGES, MemoryType, RUNTIME_SERVICES_DATA}; + +use patina_mm_policy::{AccessType, IoWidth, Instruction}; +use patina::base::UEFI_PAGE_SIZE; + +use super::{PrivilegeError, PrivilegeResult}; +use crate::{COMM_BUFFER_CONFIG, POLICY_GATE, UNBLOCKED_MEMORY_TRACKER, PageOwnership, query_address_ownership}; + +global_asm!(include_str!("syscall_entry.asm")); + +// ============================================================================ +// EFI_MM_IO_WIDTH values (from EFI spec) +// ============================================================================ + +/// MM_IO_UINT8 - 8-bit I/O access width. +const MM_IO_UINT8: u64 = 0; +/// MM_IO_UINT16 - 16-bit I/O access width. +const MM_IO_UINT16: u64 = 1; +/// MM_IO_UINT32 - 32-bit I/O access width. +const MM_IO_UINT32: u64 = 2; + +/// Converts an EFI_MM_IO_WIDTH enum value to our [`IoWidth`] type. +/// +/// The EFI spec defines: MM_IO_UINT8=0, MM_IO_UINT16=1, MM_IO_UINT32=2. +fn efi_io_width_to_io_width(width: u64) -> Option { + match width { + MM_IO_UINT8 => Some(IoWidth::Byte), + MM_IO_UINT16 => Some(IoWidth::Word), + MM_IO_UINT32 => Some(IoWidth::Dword), + _ => None, + } +} + +// ============================================================================ +// Hardware Operation Helpers +// ============================================================================ + +/// Reads an 8-bit value from an I/O port. +/// +/// # Safety +/// +/// The caller must ensure the port address is valid and access is allowed by policy. +#[inline] +unsafe fn io_read_u8(port: u16) -> u8 { + let value: u8; + unsafe { + asm!("in al, dx", out("al") value, in("dx") port, options(nomem, nostack)); + } + value +} + +/// Reads a 16-bit value from an I/O port. +/// +/// # Safety +/// +/// The caller must ensure the port address is valid and access is allowed by policy. +#[inline] +unsafe fn io_read_u16(port: u16) -> u16 { + let value: u16; + unsafe { + asm!("in ax, dx", out("ax") value, in("dx") port, options(nomem, nostack)); + } + value +} + +/// Reads a 32-bit value from an I/O port. +/// +/// # Safety +/// +/// The caller must ensure the port address is valid and access is allowed by policy. +#[inline] +unsafe fn io_read_u32(port: u16) -> u32 { + let value: u32; + unsafe { + asm!("in eax, dx", out("eax") value, in("dx") port, options(nomem, nostack)); + } + value +} + +/// Writes an 8-bit value to an I/O port. +/// +/// # Safety +/// +/// The caller must ensure the port address is valid and access is allowed by policy. +#[inline] +unsafe fn io_write_u8(port: u16, value: u8) { + unsafe { + asm!("out dx, al", in("dx") port, in("al") value, options(nomem, nostack)); + } +} + +/// Writes a 16-bit value to an I/O port. +/// +/// # Safety +/// +/// The caller must ensure the port address is valid and access is allowed by policy. +#[inline] +unsafe fn io_write_u16(port: u16, value: u16) { + unsafe { + asm!("out dx, ax", in("dx") port, in("ax") value, options(nomem, nostack)); + } +} + +/// Writes a 32-bit value to an I/O port. +/// +/// # Safety +/// +/// The caller must ensure the port address is valid and access is allowed by policy. +#[inline] +unsafe fn io_write_u32(port: u16, value: u32) { + unsafe { + asm!("out dx, eax", in("dx") port, in("eax") value, options(nomem, nostack)); + } +} + +// ============================================================================ +// Syscall Indices +// ============================================================================ + +// ============================================================================ +// Syscall Indices (re-exported from the shared common crate) +// ============================================================================ + +pub use patina_internal_mm_common::SyscallIndex; + +// ============================================================================ +// Syscall Result +// ============================================================================ + +/// Result of a syscall operation. +#[derive(Debug, Clone, Copy)] +pub struct SyscallResult { + /// Return value (in RAX on return to Ring 3). + pub value: u64, + /// Status code (EFI_STATUS compatible). + pub status: u64, +} + +impl SyscallResult { + /// Creates a successful result with a value. + pub const fn success(value: u64) -> Self { + Self { value, status: 0 } + } + + /// Creates an error result. + pub const fn error(status: u64) -> Self { + Self { value: 0, status } + } + + /// EFI_SUCCESS + pub const EFI_SUCCESS: u64 = 0; + /// EFI_INVALID_PARAMETER + pub const EFI_INVALID_PARAMETER: u64 = 0x8000_0000_0000_0002; + /// EFI_UNSUPPORTED + pub const EFI_UNSUPPORTED: u64 = 0x8000_0000_0000_0003; + /// EFI_ACCESS_DENIED + pub const EFI_ACCESS_DENIED: u64 = 0x8000_0000_0000_000F; + /// EFI_NOT_READY + pub const EFI_NOT_READY: u64 = 0x8000_0000_0000_0006; + /// EFI_OUT_OF_RESOURCES + pub const EFI_OUT_OF_RESOURCES: u64 = 0x8000_0000_0000_0009; + /// EFI_SECURITY_VIOLATION + pub const EFI_SECURITY_VIOLATION: u64 = 0x8000_0000_0000_001A; +} + +// ============================================================================ +// Syscall Context +// ============================================================================ + +/// Context for a syscall invocation. +#[derive(Debug, Clone, Copy)] +pub struct SyscallContext { + /// The syscall index (from RAX). + pub call_index: u64, + /// First argument (from RDX). + pub arg1: u64, + /// Second argument (from R8). + pub arg2: u64, + /// Third argument (from R9). + pub arg3: u64, + /// Caller return address (from RCX, set by syscall instruction). + pub caller_addr: u64, + /// Ring 3 stack pointer at syscall entry. + pub ring3_stack_ptr: u64, +} + +// ============================================================================ +// Syscall Dispatcher +// ============================================================================ + +/// The syscall dispatcher handles incoming syscalls from Ring 3. +pub struct SyscallDispatcher { + /// Whether the dispatcher has been initialized. + initialized: AtomicBool, +} + +impl SyscallDispatcher { + /// Creates a new syscall dispatcher. + pub const fn new() -> Self { + Self { + initialized: AtomicBool::new(false), + } + } + + /// Initializes the syscall dispatcher. + pub fn init(&self) -> PrivilegeResult<()> { + if self.initialized.swap(true, Ordering::SeqCst) { + return Err(PrivilegeError::AlreadyInitialized); + } + + log::info!("SyscallDispatcher initialized"); + Ok(()) + } + + /// Checks if the dispatcher is initialized. + pub fn is_initialized(&self) -> bool { + self.initialized.load(Ordering::Acquire) + } + + /// Dispatches a syscall. + /// + /// This is the main entry point called from the assembly syscall handler. + /// It validates the syscall index and dispatches to the appropriate handler. + /// + /// # Arguments + /// + /// * `ctx` - The syscall context containing all arguments + /// + /// # Returns + /// + /// The result to be returned to Ring 3 in RAX. + pub fn dispatch(&self, ctx: &SyscallContext) -> SyscallResult { + // Parse the syscall index + let index = match SyscallIndex::from_u64(ctx.call_index) { + Some(idx) => idx, + None => { + log::warn!("Unknown syscall index: 0x{:x}", ctx.call_index); + return SyscallResult::error(SyscallResult::EFI_UNSUPPORTED); + } + }; + + log::trace!( + "Syscall: {:?} (0x{:x}), args: 0x{:x}, 0x{:x}, 0x{:x}", + index, + ctx.call_index, + ctx.arg1, + ctx.arg2, + ctx.arg3 + ); + + // Dispatch to the appropriate handler + match index { + SyscallIndex::RdMsr => self.handle_rdmsr(ctx), + SyscallIndex::WrMsr => self.handle_wrmsr(ctx), + SyscallIndex::Cli => self.handle_cli(ctx), + SyscallIndex::IoRead => self.handle_io_read(ctx), + SyscallIndex::IoWrite => self.handle_io_write(ctx), + SyscallIndex::Wbinvd => self.handle_wbinvd(ctx), + SyscallIndex::Hlt => self.handle_hlt(ctx), + SyscallIndex::SaveStateRead => self.handle_save_state_read(ctx), + SyscallIndex::LegacyMax => panic!("Invalid syscall index: LegacyMax is not a real syscall"), + SyscallIndex::AllocPage => self.handle_alloc_page(ctx), + SyscallIndex::FreePage => self.handle_free_page(ctx), + SyscallIndex::StartApProc => self.handle_start_ap_proc(ctx), + SyscallIndex::SaveStateRead2 => self.handle_save_state_read2(ctx), + SyscallIndex::MmMemoryUnblocked => self.handle_mm_memory_unblocked(ctx), + SyscallIndex::MmIsCommBuffer => self.handle_mm_is_comm_buffer(ctx), + } + } + + // ======================================================================== + // Syscall Handlers + // ======================================================================== + + /// Handles MSR read syscall. + /// + /// Validates the MSR read against firmware policy, then executes `rdmsr`. + /// - Arg1: MSR index + /// - Returns: MSR value in result.value + fn handle_rdmsr(&self, ctx: &SyscallContext) -> SyscallResult { + let msr_index = ctx.arg1 as u32; + log::trace!("RDMSR: msr=0x{:x}", msr_index); + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("RDMSR: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_msr_allowed(msr_index, AccessType::Read) { + log::error!("RDMSR: MSR 0x{:x} blocked by policy: {:?}", msr_index, e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - execute the MSR read + let value = unsafe { crate::cpu::read_msr(msr_index) }.unwrap_or_else(|e| { + log::error!("RDMSR: rdmsr failed: {}", e); + 0 + }); + log::debug!("RDMSR: MSR 0x{:x} = 0x{:x}", msr_index, value); + SyscallResult::success(value) + } + + /// Handles MSR write syscall. + /// + /// Validates the MSR write against firmware policy, then executes `wrmsr`. + /// - Arg1: MSR index + /// - Arg2: Value to write + fn handle_wrmsr(&self, ctx: &SyscallContext) -> SyscallResult { + let msr_index = ctx.arg1 as u32; + let value = ctx.arg2; + log::trace!("WRMSR: msr=0x{:x}, value=0x{:x}", msr_index, value); + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("WRMSR: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_msr_allowed(msr_index, AccessType::Write) { + log::error!("WRMSR: MSR 0x{:x} blocked by policy: {:?}", msr_index, e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - execute the MSR write + if let Err(e) = unsafe { crate::cpu::write_msr(msr_index, value) } { + log::error!("WRMSR: wrmsr failed: {}", e); + return SyscallResult::error(SyscallResult::EFI_UNSUPPORTED); + } + log::debug!("WRMSR: MSR 0x{:x} written with 0x{:x}", msr_index, value); + SyscallResult::success(0) + } + + /// Handles CLI (clear interrupt flag) syscall. + /// + /// Validates the CLI instruction against firmware policy, then executes `cli`. + fn handle_cli(&self, _ctx: &SyscallContext) -> SyscallResult { + log::trace!("CLI"); + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("CLI: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_instruction_allowed(Instruction::Cli) { + log::error!("CLI: Instruction blocked by policy: {:?}", e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - disable interrupts + unsafe { asm!("cli", options(nomem, nostack)) }; + log::debug!("CLI: Interrupts disabled"); + SyscallResult::success(0) + } + + /// Handles I/O port read syscall. + /// + /// Validates the I/O read against firmware policy, then executes the `in` instruction. + /// - Arg1: I/O port address + /// - Arg2: EFI_MM_IO_WIDTH (0=UINT8, 1=UINT16, 2=UINT32) + /// - Returns: Value read from the port in result.value + fn handle_io_read(&self, ctx: &SyscallContext) -> SyscallResult { + let port = ctx.arg1; + let efi_width = ctx.arg2; + log::trace!("IO_READ: port=0x{:x}, width={}", port, efi_width); + + // Convert EFI_MM_IO_WIDTH to IoWidth + let io_width = match efi_io_width_to_io_width(efi_width) { + Some(w) => w, + None => { + log::error!("IO_READ: Invalid IO width: {}", efi_width); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + }; + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("IO_READ: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_io_allowed(port as u32, io_width, AccessType::Read) { + log::error!("IO_READ: Port 0x{:x} width {:?} blocked by policy: {:?}", port, io_width, e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - execute the I/O read + let port_addr = port as u16; + let value: u64 = unsafe { + match efi_width { + MM_IO_UINT8 => io_read_u8(port_addr) as u64, + MM_IO_UINT16 => io_read_u16(port_addr) as u64, + MM_IO_UINT32 => io_read_u32(port_addr) as u64, + _ => unreachable!(), // Already validated above + } + }; + + log::debug!("IO_READ: port=0x{:x} => 0x{:x}", port, value); + SyscallResult::success(value) + } + + /// Handles I/O port write syscall. + /// + /// Validates the I/O write against firmware policy, then executes the `out` instruction. + /// - Arg1: I/O port address + /// - Arg2: EFI_MM_IO_WIDTH (0=UINT8, 1=UINT16, 2=UINT32) + /// - Arg3: Value to write + fn handle_io_write(&self, ctx: &SyscallContext) -> SyscallResult { + let port = ctx.arg1; + let efi_width = ctx.arg2; + let value = ctx.arg3; + log::trace!("IO_WRITE: port=0x{:x}, width={}, value=0x{:x}", port, efi_width, value); + + // Convert EFI_MM_IO_WIDTH to IoWidth + let io_width = match efi_io_width_to_io_width(efi_width) { + Some(w) => w, + None => { + log::error!("IO_WRITE: Invalid IO width: {}", efi_width); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + }; + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("IO_WRITE: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_io_allowed(port as u32, io_width, AccessType::Write) { + log::error!("IO_WRITE: Port 0x{:x} width {:?} blocked by policy: {:?}", port, io_width, e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - execute the I/O write + let port_addr = port as u16; + unsafe { + match efi_width { + MM_IO_UINT8 => io_write_u8(port_addr, value as u8), + MM_IO_UINT16 => io_write_u16(port_addr, value as u16), + MM_IO_UINT32 => io_write_u32(port_addr, value as u32), + _ => unreachable!(), // Already validated above + } + } + + log::debug!("IO_WRITE: port=0x{:x} <= 0x{:x}", port, value); + SyscallResult::success(0) + } + + /// Handles WBINVD (write-back and invalidate cache) syscall. + /// + /// Validates the WBINVD instruction against firmware policy, then executes `wbinvd`. + fn handle_wbinvd(&self, _ctx: &SyscallContext) -> SyscallResult { + log::trace!("WBINVD"); + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("WBINVD: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_instruction_allowed(Instruction::Wbinvd) { + log::error!("WBINVD: Instruction blocked by policy: {:?}", e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - write back and invalidate cache + unsafe { asm!("wbinvd", options(nomem, nostack)) }; + log::debug!("WBINVD: Cache written back and invalidated"); + SyscallResult::success(0) + } + + /// Handles HLT (halt processor) syscall. + /// + /// Validates the HLT instruction against firmware policy, then executes `hlt`. + fn handle_hlt(&self, _ctx: &SyscallContext) -> SyscallResult { + log::trace!("HLT"); + + // Validate against policy + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("HLT: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_instruction_allowed(Instruction::Hlt) { + log::error!("HLT: Instruction blocked by policy: {:?}", e); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + // Policy allows - halt processor (sleep until next interrupt) + unsafe { asm!("hlt", options(nomem, nostack)) }; + log::debug!("HLT: Processor halted and resumed"); + SyscallResult::success(0) + } + + /// Handles save state read syscall (legacy). + /// + /// - Arg1: User MM CPU protocol pointer + /// - Arg2: Register to be read (`EFI_MM_SAVE_STATE_REGISTER`) + /// - Arg3: CPU index to read from + fn handle_save_state_read(&self, ctx: &SyscallContext) -> SyscallResult { + log::trace!( + "SAVE_STATE_READ: protocol=0x{:x}, register={}, cpu={}", + ctx.arg1, + ctx.arg2, + ctx.arg3 + ); + + // Validate parameters + if ctx.arg1 == 0 { + log::error!("SAVE_STATE_READ: Null protocol pointer"); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Delegate to save state module Phase 1 + crate::save_state::save_state_read_phase1(ctx.arg1, ctx.arg2, ctx.arg3) + } + + /// Handles page allocation syscall. + /// + /// - Arg1: Allocate type (EFI_ALLOCATE_TYPE) + /// - Arg2: Memory type (must be EfiRuntimeServicesData) + /// - Arg3: Page count + /// - Returns: Allocated physical address in result.value + fn handle_alloc_page(&self, ctx: &SyscallContext) -> SyscallResult { + let alloc_type = ctx.arg1 as AllocateType; + let mem_type = ctx.arg2 as MemoryType; + let page_count = ctx.arg3; + log::trace!("ALLOC_PAGE: alloc_type={}, mem_type={}, count={}", alloc_type, mem_type, page_count); + + // Only BSP can allocate pages (AP allocating involves page table updates) + if !crate::is_bsp() { + log::error!("ALLOC_PAGE: AP cannot allocate pages"); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + + if mem_type != RUNTIME_SERVICES_DATA { + log::error!("ALLOC_PAGE: Invalid memory type: {}", mem_type); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Currently only AllocateAnyPages is supported by our page allocator + if alloc_type != ALLOCATE_ANY_PAGES { + log::error!("ALLOC_PAGE: Only AllocateAnyPages (0) is supported, got {}", alloc_type); + return SyscallResult::error(SyscallResult::EFI_UNSUPPORTED); + } + + if page_count == 0 { + log::error!("ALLOC_PAGE: Zero page count"); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Allocate pages as User type (Ring 3 driver request) + match crate::PAGE_ALLOCATOR.allocate_pages_with_type( + page_count as usize, + crate::mm_mem::AllocationType::User, + ) { + Ok(addr) => { + log::trace!("ALLOC_PAGE: Allocated {} page(s) at 0x{:x}", page_count, addr); + SyscallResult::success(addr) + } + Err(e) => { + log::error!("ALLOC_PAGE: Allocation failed: {:?}", e); + SyscallResult::error(SyscallResult::EFI_OUT_OF_RESOURCES) + } + } + } + + /// Handles page free syscall. + /// + /// Mirrors the C implementation's `SMM_FREE_PAGE` case. + /// - Arg1: Physical address to free + /// - Arg2: Number of pages + fn handle_free_page(&self, ctx: &SyscallContext) -> SyscallResult { + let addr = ctx.arg1; + let page_count = ctx.arg2; + log::trace!("FREE_PAGE: addr=0x{:x}, count={}", addr, page_count); + + if page_count == 0 { + log::error!("FREE_PAGE: Zero page count"); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Validate the address is page-aligned + if addr % UEFI_PAGE_SIZE as u64 != 0 { + log::error!("FREE_PAGE: Address 0x{:x} is not page-aligned", addr); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Verify the range was allocated as User type (Ring 3 code should only free its own memory) + // This prevents user code from freeing supervisor-internal allocations. + match crate::PAGE_ALLOCATOR.get_allocation_type(addr) { + Some(crate::mm_mem::AllocationType::User) => { + // Good - this is user-owned memory + } + Some(crate::mm_mem::AllocationType::Supervisor) => { + log::error!("FREE_PAGE: Address 0x{:x} is a supervisor allocation - access denied", addr); + return SyscallResult::error(SyscallResult::EFI_SECURITY_VIOLATION); + } + None => { + log::error!("FREE_PAGE: Address 0x{:x} is not allocated", addr); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + } + + // Free the pages, verifying they are all User allocations + match crate::PAGE_ALLOCATOR.free_pages_checked( + addr, + page_count as usize, + crate::mm_mem::AllocationType::User, + ) { + Ok(()) => { + log::debug!("FREE_PAGE: Freed {} page(s) at 0x{:x}", page_count, addr); + SyscallResult::success(0) + } + Err(e) => { + log::error!("FREE_PAGE: Free failed: {:?}", e); + SyscallResult::error(SyscallResult::EFI_SECURITY_VIOLATION) + } + } + } + + /// Handles start AP procedure syscall. + /// + /// Validates the request and delegates to the platform-specific AP startup + /// function registered during [`MmSupervisorCore`] initialization. + /// + /// Checks performed before dispatch: + /// - Procedure pointer is non-null + /// - Procedure pointer is within user-accessible memory (unblocked region) + /// - Argument pointer (if non-null) is within user-accessible memory + /// + /// The remaining validation (CPU index range, BSP check, AP busy check) and + /// the actual dispatch are handled by the registered AP startup function, + /// which has access to the CPU manager and mailbox manager. + /// + /// - Arg1: Procedure function pointer + /// - Arg2: CPU index + /// - Arg3: Argument pointer + fn handle_start_ap_proc(&self, ctx: &SyscallContext) -> SyscallResult { + let procedure = ctx.arg1; + let cpu_index = ctx.arg2; + let argument = ctx.arg3; + + log::info!( + "START_AP_PROC: proc=0x{:x}, cpu={}, arg=0x{:x}", + procedure, cpu_index, argument + ); + + // 1. Validate procedure pointer is non-null + if procedure == 0 { + log::error!("START_AP_PROC: Null procedure pointer"); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // 2. Validate procedure pointer is within mapped memory via page table query + if crate::query_address_ownership(procedure, core::mem::size_of::() as u64).is_none() { + log::error!( + "START_AP_PROC: Procedure 0x{:x} not in mapped memory", + procedure + ); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // 3. Validate argument pointer (if non-null) is within mapped memory + if argument != 0 { + if crate::query_address_ownership(argument, core::mem::size_of::() as u64).is_none() { + log::error!( + "START_AP_PROC: Argument 0x{:x} not in mapped memory", + argument + ); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + } + + // 4. Delegate to the registered AP startup function + match crate::AP_STARTUP_FN.get() { + Some(start_fn) => { + log::info!( + "START_AP_PROC: Dispatching to AP startup function at {:p} for CPU {}", + *start_fn as *const (), cpu_index + ); + let status = start_fn(cpu_index, procedure, argument); + if status == 0 { + SyscallResult::success(0) + } else { + SyscallResult::error(status) + } + } + None => { + log::error!("START_AP_PROC: AP startup not initialized"); + SyscallResult::error(SyscallResult::EFI_NOT_READY) + } + } + } + + /// Handles extended save state read syscall. + /// + /// - Arg1: User MM CPU protocol pointer + /// - Arg2: Width of buffer to read in bytes + /// - Arg3: User buffer to hold return data + fn handle_save_state_read2(&self, ctx: &SyscallContext) -> SyscallResult { + log::trace!( + "SAVE_STATE_READ2: protocol=0x{:x}, width={}, buffer=0x{:x}", + ctx.arg1, + ctx.arg2, + ctx.arg3 + ); + + // Validate parameters + if ctx.arg1 == 0 { + log::error!("SAVE_STATE_READ2: Null protocol pointer"); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Delegate to save state module Phase 2 + crate::save_state::save_state_read_phase2(ctx.arg1, ctx.arg2, ctx.arg3) + } + + /// Handles MM memory unblocked check syscall. + /// + /// Checks if a memory range is outside MMRAM and valid (unblocked), AND + /// is within user-owned space. + /// - Arg1: Physical address + /// - Arg2: Size in bytes + /// - Returns: 1 (TRUE) if valid, 0 (FALSE) otherwise + fn handle_mm_memory_unblocked(&self, ctx: &SyscallContext) -> SyscallResult { + let addr = ctx.arg1; + let size = ctx.arg2; + log::trace!("MM_MEMORY_UNBLOCKED: addr=0x{:x}, size=0x{:x}", addr, size); + + // Check if the buffer is within an unblocked memory region + let is_valid = UNBLOCKED_MEMORY_TRACKER.is_within_unblocked_region(addr, size); + + if !is_valid { + log::trace!("MM_MEMORY_UNBLOCKED: addr=0x{:x} size=0x{:x} not in unblocked region", addr, size); + return SyscallResult::success(0); // FALSE + } + + // Additional check - verify buffer is in user-owned space + match query_address_ownership(addr, size) { + Some(owner) => { + if owner != PageOwnership::User { + log::trace!("MM_MEMORY_UNBLOCKED: addr=0x{:x} size=0x{:x} owned by {:?} - not valid", addr, size, owner); + return SyscallResult::success(0); // FALSE + } + } + None => { + log::trace!("MM_MEMORY_UNBLOCKED: addr=0x{:x} size=0x{:x} not in mapped memory", addr, size); + return SyscallResult::success(0); // FALSE + } + } + + log::trace!("MM_MEMORY_UNBLOCKED: addr=0x{:x} size=0x{:x} is valid", addr, size); + SyscallResult::success(1) // TRUE + } + + /// Handles MM is communication buffer check syscall. + /// + /// Verifies that a given memory range is a valid communication buffer. + /// - Arg1: Buffer address + /// - Arg2: Buffer size + /// - Returns: 1 (TRUE) if valid comm buffer, 0 (FALSE) otherwise + fn handle_mm_is_comm_buffer(&self, ctx: &SyscallContext) -> SyscallResult { + let address = ctx.arg1; + let size = ctx.arg2; + log::trace!("MM_IS_COMM_BUFFER: addr=0x{:x}, size=0x{:x}", address, size); + + let config = match COMM_BUFFER_CONFIG.get() { + Some(c) => c, + None => { + log::error!("MM_IS_COMM_BUFFER: Comm buffer config not initialized"); + return SyscallResult::success(0); // FALSE + } + }; + + let buf_start = config.user_comm_buffer_internal; + let buf_end = buf_start.saturating_add(config.user_comm_buffer_size); + let range_end = address.saturating_add(size); + + // Check that the range is non-empty and falls entirely within the user comm buffer. + let is_valid = size > 0 && address >= buf_start && range_end <= buf_end; + + log::debug!("MM_IS_COMM_BUFFER: addr=0x{:x} size=0x{:x} => {}", address, size, is_valid); + SyscallResult::success(if is_valid { 1 } else { 0 }) + } +} + +// ============================================================================ +// Global Instance +// ============================================================================ + +/// Global syscall dispatcher instance. +pub static SYSCALL_DISPATCHER: SyscallDispatcher = SyscallDispatcher::new(); + +// ============================================================================ +// C-compatible Entry Point +// ============================================================================ + +/// C-compatible syscall dispatcher entry point. +/// +/// This function is called from the assembly syscall entry stub (SyscallCenter). +/// +/// # Arguments +/// +/// * `call_index` - Syscall index (from RAX) +/// * `arg1` - First argument (from RDX) +/// * `arg2` - Second argument (from R8) +/// * `arg3` - Third argument (from R9) +/// * `caller_addr` - Caller return address (from RCX) +/// * `ring3_stack_ptr` - Ring 3 stack pointer +/// +/// # Returns +/// +/// The value to return in RAX. +#[unsafe(no_mangle)] +pub extern "efiapi" fn syscall_dispatcher( + call_index: u64, + arg1: u64, + arg2: u64, + arg3: u64, + caller_addr: u64, + ring3_stack_ptr: u64, +) -> u64 { + let ctx = SyscallContext { + call_index, + arg1, + arg2, + arg3, + caller_addr, + ring3_stack_ptr, + }; + + let result = SYSCALL_DISPATCHER.dispatch(&ctx); + + // For now, just return the value. In the future, we may need to handle + // error codes differently. + if result.status != 0 { + panic!("Syscall error: status=0x{:x}", result.status); + } else { + result.value + } +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_syscall_index_roundtrip() { + for idx in [ + SyscallIndex::RdMsr, + SyscallIndex::WrMsr, + SyscallIndex::Cli, + SyscallIndex::IoRead, + SyscallIndex::IoWrite, + ] { + assert_eq!(SyscallIndex::from_u64(idx.as_u64()), Some(idx)); + } + } + + #[test] + fn test_unknown_syscall_index() { + assert_eq!(SyscallIndex::from_u64(0xFFFF), None); + assert_eq!(SyscallIndex::from_u64(0), None); + } + + #[test] + fn test_syscall_result() { + let success = SyscallResult::success(42); + assert_eq!(success.value, 42); + assert_eq!(success.status, 0); + + let error = SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + assert_eq!(error.value, 0); + assert_eq!(error.status, SyscallResult::EFI_INVALID_PARAMETER); + } +} diff --git a/patina_mm_supervisor_core/src/privilege_mgmt/syscall_entry.asm b/patina_mm_supervisor_core/src/privilege_mgmt/syscall_entry.asm new file mode 100644 index 000000000..9ba1e5cf2 --- /dev/null +++ b/patina_mm_supervisor_core/src/privilege_mgmt/syscall_entry.asm @@ -0,0 +1,159 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, AMD Incorporated. All rights reserved.
+# Copyright (c) 2017, Intel Corporation. All rights reserved.
+# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +# Module Name: +# +# WriteTr.nasm +# +# Abstract: +# +# Write TR register +# +# Notes: +# +#------------------------------------------------------------------------------ + +.section .data +.global syscall_center +.global syscall_dispatcher + +.section .text +.align 8 + +# Segments defined in SmiException.nasm +.equ LONG_DS_R0, 0x40 +.equ LONG_DS_R3, 0x53 + +# This should be OFFSET_OF (MM_SUPV_SYSCALL_CACHE, MmSupvRsp) +.equ MM_SUPV_RSP, 0x00 +# This should be OFFSET_OF (MM_SUPV_SYSCALL_CACHE, SavedUserRsp) +.equ SAVED_USER_RSP, 0x08 + +#------------------------------------------------------------------------------ +# Caller Interface: +# UINT64 +# EFIAPI +# SysCall ( +# UINTN CallIndex, +# UINTN Arg1, +# UINTN Arg2, +# UINTN Arg3 +# ); +# +# Backend Interface: +# /// C-compatible syscall dispatcher entry point. +# /// +# /// This function is called from the assembly syscall entry stub (SyscallCenter). +# /// +# /// # Arguments +# /// +# /// * `call_index` - Syscall index (from RAX) +# /// * `arg1` - First argument (from RDX) +# /// * `arg2` - Second argument (from R8) +# /// * `arg3` - Third argument (from R9) +# /// * `caller_addr` - Caller return address (from RCX) +# /// * `ring3_stack_ptr` - Ring 3 stack pointer +# /// +# /// # Returns +# /// +# /// The value to return in RAX. +# #[unsafe(no_mangle)] +# pub extern "efiapi" fn syscall_dispatcher( +# call_index: u64, +# arg1: u64, +# arg2: u64, +# arg3: u64, +# caller_addr: u64, +# ring3_stack_ptr: u64, +# ) -> u64; +#------------------------------------------------------------------------------ +syscall_center: +# Calling convention: CallIndex in RAX, Arg1 in RDX, Arg2 in R8, Arg3 in R9 from SysCallLib +# Architectural definition: CallerAddr in RCX, rFLAGs in R11 from x64 syscall instruction +# push CallIndex stored at top of stack + + swapgs # get kernel pointer, save user GSbase + mov gs:[SAVED_USER_RSP], rsp # save user's stack pointer + mov rsp, gs:[MM_SUPV_RSP] # set up kernel stack + + #Preserve all registers in CPL3 + push rax + push rcx + push rbp + push rdx + push r8 + push r9 + push rsi + push r12 + push rdi + push rbx + push r11 + push r10 + push r13 + push r14 + push r15 + + mov rbp, rsp + and rsp, -16 + + ## FX_SAVE_STATE_X64 FxSaveState# + sub rsp, 512 + mov rdi, rsp + .byte 0x0f, 0xae, 0x07 #fxsave [rdi] + + #Prepare for ds, es, fs, gs + xor rbx, rbx + mov bx, LONG_DS_R0 + mov ds, bx + mov es, bx + mov fs, bx + + mov rsi, gs:[SAVED_USER_RSP] # Save Ring 3 stack to RSI + push rsi # Push Ring 3 stack as Ring3Stack for syscall_dispatcher + push rcx # Push return address on stack as CallerAddr for syscall_dispatcher + mov rcx, rax + sub rsp, 0x20 + + call syscall_dispatcher + + add rsp, 0x20 + pop rcx # Restore SP to avoid stack overflow + pop rsi # Restore SI to avoid stack overflow + + #Prepare for ds, es, fs, gs + xor rbx, rbx + mov bx, LONG_DS_R3 + mov ds, bx + mov es, bx + mov fs, bx + + mov rsi, rsp + .byte 0x0f, 0xae, 0x0e # fxrstor [rsi] + add rsp, 512 + + mov rsp, rbp + + #restore registers from CPL3 stack + pop r15 + pop r14 + pop r13 + pop r10 + pop r11 + pop rbx + pop rdi + pop r12 + pop rsi + pop r9 + pop r8 + pop rdx + pop rbp + pop rcx # return rcx from stack + + add rsp, 8 # return rsp to original position + mov rsp, gs:[SAVED_USER_RSP] # restore user RSP + swapgs # restore user GS, save kernel pointer + .byte 0x48 # return to the long mode + sysret # RAX contains return value diff --git a/patina_mm_supervisor_core/src/privilege_mgmt/syscall_setup.rs b/patina_mm_supervisor_core/src/privilege_mgmt/syscall_setup.rs new file mode 100644 index 000000000..11a07bfec --- /dev/null +++ b/patina_mm_supervisor_core/src/privilege_mgmt/syscall_setup.rs @@ -0,0 +1,269 @@ +//! Syscall Interface Setup +//! +//! This module handles the initialization and configuration of syscall/sysret MSRs +//! for privilege level transitions. It manages per-CPU storage for MSR values and +//! the syscall cache structure used during ring transitions. +//! +//! ## MSR Configuration +//! +//! - **MSR_IA32_STAR**: Contains segment selectors for syscall/sysret +//! - Bits 47:32 = SYSRET CS and SS (LONG_CS_R3_PH << 16) +//! - Bits 31:16 = SYSCALL CS and SS (LONG_CS_R0) +//! +//! - **MSR_IA32_LSTAR**: Contains the 64-bit RIP for syscall entry (SyscallCenter) +//! +//! - **MSR_IA32_EFER**: Extended Feature Enable Register +//! - Bit 0 (SCE) must be set to enable syscall/sysret +//! +//! - **MSR_IA32_KERNEL_GS_BASE**: Used with swapgs to switch between user and kernel +//! GS base addresses, allowing access to per-CPU data in the syscall handler. +//! + +#![allow(unsafe_op_in_unsafe_fn)] + +use core::sync::atomic::{AtomicBool, Ordering}; +use spin::Mutex; + +use super::PrivilegeError; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors specific to syscall setup operations. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SyscallSetupError { + /// The syscall interface has not been initialized. + NotInitialized, + /// Already initialized. + AlreadyInitialized, + /// Invalid CPU index (exceeds configured CPU count). + InvalidCpuIndex, + /// Out of resources. + OutOfResources, + /// MSR stores are not ready. + NotReady, +} + +impl From for PrivilegeError { + fn from(e: SyscallSetupError) -> Self { + match e { + SyscallSetupError::NotInitialized => PrivilegeError::NotInitialized, + SyscallSetupError::AlreadyInitialized => PrivilegeError::AlreadyInitialized, + SyscallSetupError::InvalidCpuIndex => PrivilegeError::InvalidCpuIndex, + SyscallSetupError::OutOfResources => PrivilegeError::OutOfResources, + SyscallSetupError::NotReady => PrivilegeError::NotReady, + } + } +} + +// ============================================================================ +// Syscall Interface +// ============================================================================ + +/// Internal state for the syscall interface. +/// +/// ## Const Generic Parameters +/// +/// * `MAX_CPUS` - The maximum number of CPUs that can be supported. +struct SyscallInterfaceState { + /// Number of CPUs configured. + num_cpus: usize, + /// CPL3 stack array base address. + cpl3_stack_base: u64, + /// Per-CPU stack size. + stack_size: usize, +} + +impl SyscallInterfaceState { + const fn new() -> Self { + Self { + num_cpus: 0, + cpl3_stack_base: 0, + stack_size: 0, + } + } +} + +/// Syscall interface manager. +/// +/// Manages the syscall/sysret MSR configuration for all CPUs and provides +/// the infrastructure for Ring 0 ↔ Ring 3 transitions. +/// +/// ## Const Generic Parameters +/// +/// * `MAX_CPUS` - The maximum number of CPUs that can be supported. +/// This should match `PlatformInfo::MAX_CPU_COUNT`. +pub struct SyscallInterface { + /// Whether the interface has been initialized. + initialized: AtomicBool, + /// Internal state protected by mutex. + state: Mutex>, +} + +impl SyscallInterface { + /// Creates a new syscall interface. + pub const fn new() -> Self { + Self { + initialized: AtomicBool::new(false), + state: Mutex::new(SyscallInterfaceState::new()), + } + } + + /// Returns the maximum number of CPUs this interface supports. + pub const fn max_cpus(&self) -> usize { + MAX_CPUS + } + + /// Initializes the syscall interface. + /// + /// This should be called once during BSP initialization. + /// + /// # Arguments + /// + /// * `num_cpus` - Total number of CPUs to support (must be <= MAX_CPUS) + /// * `cpl3_stack_base` - Base address of the CPL3 stack array + /// * `stack_size` - Per-CPU stack size + /// + /// # Returns + /// + /// `Ok(())` if initialization succeeded, error otherwise. + pub fn init( + &self, + num_cpus: usize, + cpl3_stack_base: u64, + stack_size: usize, + ) -> Result<(), SyscallSetupError> { + // Check if already initialized + if self.initialized.swap(true, Ordering::SeqCst) { + return Err(SyscallSetupError::AlreadyInitialized); + } + + if num_cpus == 0 || num_cpus > MAX_CPUS { + self.initialized.store(false, Ordering::SeqCst); + return Err(SyscallSetupError::InvalidCpuIndex); + } + + let mut state = self.state.lock(); + state.num_cpus = num_cpus; + state.cpl3_stack_base = cpl3_stack_base; + state.stack_size = stack_size; + + log::info!( + "SyscallInterface<{}> initialized: {} CPUs, cpl3_stack=0x{:016x}, stack_size=0x{:x}", + MAX_CPUS, + num_cpus, + cpl3_stack_base, + stack_size + ); + + Ok(()) + } + + /// Checks if the syscall interface is initialized. + pub fn is_initialized(&self) -> bool { + self.initialized.load(Ordering::Acquire) + } + + /// Gets the CPL3 stack pointer for a specific CPU. + /// + /// The stack pointer is calculated as: + /// `cpl3_stack_base + stack_size * (cpu_index + 1) - sizeof(usize)` + /// + /// This gives the top of the stack for the CPU (stacks grow downward). + /// TODO: Might just want to allocate a page on the fly before demotion and free the pointer + /// upon returning instead of messing with the pre-allocated stack array. + pub fn get_cpl3_stack(&self, cpu_index: usize) -> Result { + if !self.is_initialized() { + log::error!("SyscallInterface not initialized"); + return Err(SyscallSetupError::NotInitialized); + } + + let state = self.state.lock(); + if cpu_index >= state.num_cpus { + log::error!( + "Invalid CPU index {}: exceeds configured CPU count {}", + cpu_index, + state.num_cpus + ); + return Err(SyscallSetupError::InvalidCpuIndex); + } + + // Calculate stack top: base + size * (index + 1) - sizeof(usize) + let stack_top = state.cpl3_stack_base + .wrapping_add((state.stack_size as u64) * ((cpu_index as u64) + 1)) + .wrapping_sub(core::mem::size_of::() as u64); + + Ok(stack_top) + } +} + +impl Default for SyscallInterface { + fn default() -> Self { + Self::new() + } +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cpl3_stack_calculation() { + let interface: SyscallInterface<8> = SyscallInterface::new(); + + // Initialize with known values: num_cpus=4, entry=0x1000, cpl3_stack_base=0x10000, stack_size=0x4000 + interface.init(4, 0x1000, 0x10000, 0x4000).unwrap(); + + // CPU 0: base + 0x4000 * 1 - 8 = 0x10000 + 0x4000 - 8 = 0x13FF8 + assert_eq!(interface.get_cpl3_stack(0).unwrap(), 0x13FF8); + + // CPU 1: base + 0x4000 * 2 - 8 = 0x10000 + 0x8000 - 8 = 0x17FF8 + assert_eq!(interface.get_cpl3_stack(1).unwrap(), 0x17FF8); + } + + #[test] + fn test_init_twice_fails() { + let interface: SyscallInterface<8> = SyscallInterface::new(); + assert!(interface.init(4, 0x1000, 0x10000, 0x4000).is_ok()); + assert_eq!( + interface.init(4, 0x1000, 0x10000, 0x4000), + Err(SyscallSetupError::AlreadyInitialized) + ); + } + + #[test] + fn test_invalid_cpu_index() { + let interface: SyscallInterface<8> = SyscallInterface::new(); + interface.init(4, 0x1000, 0x10000, 0x4000).unwrap(); + + assert_eq!( + interface.get_cpl3_stack(4), + Err(SyscallSetupError::InvalidCpuIndex) + ); + assert_eq!( + interface.get_cpl3_stack(100), + Err(SyscallSetupError::InvalidCpuIndex) + ); + } + + #[test] + fn test_max_cpus_exceeded() { + let interface: SyscallInterface<4> = SyscallInterface::new(); + // Try to init with more CPUs than the const generic allows + assert_eq!( + interface.init(8, 0x1000, 0x10000, 0x4000), + Err(SyscallSetupError::InvalidCpuIndex) + ); + } + + #[test] + fn test_max_cpus_accessor() { + let interface: SyscallInterface<16> = SyscallInterface::new(); + assert_eq!(interface.max_cpus(), 16); + } +} diff --git a/patina_mm_supervisor_core/src/save_state.rs b/patina_mm_supervisor_core/src/save_state.rs new file mode 100644 index 000000000..f911863b0 --- /dev/null +++ b/patina_mm_supervisor_core/src/save_state.rs @@ -0,0 +1,758 @@ +//! Save State Read Operations for the MM Supervisor Syscall Dispatcher +//! +//! Implements the two-phase save state read protocol used by the +//! `EFI_MM_CPU_PROTOCOL.ReadSaveState()` user-space API. +//! +//! **Phase 1** (`SyscallIndex::SaveStateRead`): stores the requested register +//! and CPU index in a per-BSP holder. +//! +//! **Phase 2** (`SyscallIndex::SaveStateRead2`): validates the request against +//! the MM security policy, reads the register value from the CPU's SMRAM save +//! state area, and copies the result into the caller-supplied buffer. +//! +//! ## Security Model +//! +//! - User buffer addresses are validated via page-table ownership queries. +//! - Policy-gated registers (RAX, IO) are checked through +//! [`PolicyGate::is_save_state_read_allowed`](patina_mm_policy::PolicyGate::is_save_state_read_allowed). +//! - `PROCESSOR_ID` is always allowed (informational, not security-sensitive). +//! - Other architectural registers pass through without policy gating, matching +//! the C reference implementation's allow-list semantics. +//! +//! ## Vendor Selection +//! +//! The SMRAM save state layout (Intel vs AMD) is selected **at build time** +//! via Cargo features on the `patina` crate (`save_state_intel` or +//! `save_state_amd`). All vendor-specific register offsets and I/O field +//! parsing live in the SDK; this module is vendor-agnostic. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 + +use spin::Mutex; + +use patina_internal_cpu::save_state::{ + self, MmSaveStateIoInfo, MmSaveStateRegister, IO_INFO_SIZE, IO_TYPE_INPUT, IO_TYPE_OUTPUT, + LMA_32BIT, LMA_64BIT, PROCESSOR_INFO_ENTRY_SIZE, IA32_EFER_LMA, +}; +use patina_mm_policy::{SaveStateCondition, SaveStateField}; + +use crate::privilege_mgmt::SyscallResult; +use crate::{PageOwnership, POLICY_GATE, SMM_CPU_PRIVATE, SmmCpuPrivateData, query_address_ownership}; + +// ============================================================================ +// Policy Field Mapping (supervisor-specific, not in SDK) +// ============================================================================ + +/// Maps a save state register to a policy-gated [`SaveStateField`], if any. +/// +/// Only RAX and IO are subject to policy gating. All other registers are +/// either always allowed or have special handling (PROCESSOR_ID). +fn to_policy_field(reg: MmSaveStateRegister) -> Option { + match reg { + MmSaveStateRegister::Rax => Some(SaveStateField::Rax), + MmSaveStateRegister::Io => Some(SaveStateField::IoTrap), + _ => None, + } +} + +// ============================================================================ +// Two-Phase State Holder +// ============================================================================ + +/// Holds the parameters from Phase 1 until Phase 2 completes the read. +struct SaveStateAccessHolder { + /// User protocol pointer (must match across both phases). + user_protocol: u64, + /// Register to read. + register: MmSaveStateRegister, + /// CPU index to read from. + cpu_index: u64, +} + +/// Global holder for the in-flight two-phase save state read. +/// +/// Only one read can be in flight at a time (enforced by the single-threaded +/// BSP syscall dispatch model). +static SAVE_STATE_ACCESS: Mutex> = Mutex::new(None); + +// ============================================================================ +// Phase 1: Store Register + CPU Index +// ============================================================================ + +/// Processes Phase 1 of the save state read syscall. +/// +/// Validates and stores the register and CPU index for the subsequent Phase 2 +/// call. +/// +/// # Arguments +/// +/// * `protocol` - User MM CPU protocol pointer (for consistency check in Phase 2) +/// * `register_raw` - Raw `EFI_MM_SAVE_STATE_REGISTER` value +/// * `cpu_index` - CPU index to read the save state from +pub fn save_state_read_phase1( + protocol: u64, + register_raw: u64, + cpu_index: u64, +) -> SyscallResult { + // Validate register + let register = match MmSaveStateRegister::from_u64(register_raw) { + Some(r) => r, + None => { + log::error!( + "SAVE_STATE_READ: Unknown register value: {}", + register_raw + ); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + }; + + // Validate CPU index against NumberOfCpus + let num_cpus = match get_number_of_cpus() { + Ok(n) => n, + Err(status) => return SyscallResult::error(status), + }; + + if cpu_index >= num_cpus { + log::error!( + "SAVE_STATE_READ: CPU index {} >= NumberOfCpus {}", + cpu_index, + num_cpus + ); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Store for Phase 2 + let mut access = SAVE_STATE_ACCESS.lock(); + *access = Some(SaveStateAccessHolder { + user_protocol: protocol, + register, + cpu_index, + }); + + log::debug!( + "SAVE_STATE_READ: Stored register={:?}, cpu_index={} for Phase 2", + register, + cpu_index + ); + SyscallResult::success(0) +} + +// ============================================================================ +// Phase 2: Policy Check + Read + Copy +// ============================================================================ + +/// Processes Phase 2 of the save state read syscall. +/// +/// Validates the request against the MM security policy, reads the register +/// from the CPU's SMRAM save state area, and copies the result into the user +/// buffer. +/// +/// # Arguments +/// +/// * `protocol` - User MM CPU protocol pointer (must match Phase 1) +/// * `width` - Width of the read in bytes +/// * `buffer` - User buffer to receive the register value +pub fn save_state_read_phase2(protocol: u64, width: u64, buffer: u64) -> SyscallResult { + // Retrieve and consume the Phase 1 state + let holder = { + let mut access = SAVE_STATE_ACCESS.lock(); + match access.take() { + Some(h) => h, + None => { + log::error!("SAVE_STATE_READ2: Phase 1 not completed"); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + } + }; + + // Verify protocol matches Phase 1 + if holder.user_protocol != protocol { + log::error!( + "SAVE_STATE_READ2: Protocol mismatch: expected 0x{:x}, got 0x{:x}", + holder.user_protocol, + protocol + ); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + // Validate width and buffer + if width == 0 || buffer == 0 { + log::error!( + "SAVE_STATE_READ2: Invalid width ({}) or null buffer", + width + ); + return SyscallResult::error(SyscallResult::EFI_INVALID_PARAMETER); + } + + let register = holder.register; + let cpu_index = holder.cpu_index; + + // Determine the actual number of bytes we'll write + let write_size = actual_write_size(register, width); + if write_size == 0 { + log::error!( + "SAVE_STATE_READ2: Unsupported width {} for register {:?}", + width, + register + ); + return SyscallResult::error(SyscallResult::EFI_UNSUPPORTED); + } + + // Validate buffer is in user-owned memory + match query_address_ownership(buffer, write_size as u64) { + Some(PageOwnership::User) => {} + Some(owner) => { + log::error!( + "SAVE_STATE_READ2: Buffer 0x{:x} owned by {:?}, expected User", + buffer, + owner + ); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + None => { + log::error!( + "SAVE_STATE_READ2: Buffer 0x{:x} not in mapped memory", + buffer + ); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + } + + // Special case: PROCESSOR_ID — always allowed, no policy check + if register == MmSaveStateRegister::ProcessorId { + return read_processor_id(cpu_index, buffer); + } + + // Policy check for gated registers (RAX, IO) + if let Some(policy_field) = to_policy_field(register) { + let condition = inspect_io_condition(cpu_index); + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("SAVE_STATE_READ2: Policy gate not initialized"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + }; + + if let Err(e) = gate.is_save_state_read_allowed(policy_field, width as usize, condition) { + log::error!( + "SAVE_STATE_READ2: Policy denied read of {:?}: {:?}", + register, + e + ); + return SyscallResult::error(SyscallResult::EFI_ACCESS_DENIED); + } + } + + // Get the save state base pointer for this CPU + let save_state_base = match get_save_state_base(cpu_index) { + Ok(base) => base, + Err(status) => return SyscallResult::error(status), + }; + + // Dispatch to the appropriate read handler. + // + // SAFETY: `save_state_base` points to a valid SMRAM save state region + // (obtained from SMM_CPU_PRIVATE which is set up by PiSmmCpuDxeSmm). + // `buffer` has been validated as a user-owned region of sufficient size. + let status = match register { + MmSaveStateRegister::Io => unsafe { read_io_register(save_state_base, buffer as *mut u8) }, + MmSaveStateRegister::Lma => unsafe { + read_lma_register(save_state_base, width, buffer as *mut u8) + }, + _ => unsafe { + read_architectural_register(save_state_base, register, width, buffer as *mut u8) + }, + }; + + if status == SyscallResult::EFI_SUCCESS { + log::debug!( + "SAVE_STATE_READ2: Read {:?} (cpu={}, width={}) successfully", + register, + cpu_index, + width + ); + SyscallResult::success(0) + } else { + SyscallResult::error(status) + } +} + +// ============================================================================ +// Internal Helpers +// ============================================================================ + +/// Returns the number of CPUs from the SMM CPU private data. +fn get_number_of_cpus() -> Result { + let cpu_private_addr = match SMM_CPU_PRIVATE.get() { + Some(&addr) if addr != 0 => addr, + _ => { + log::error!("SMM CPU Private data not initialized"); + return Err(SyscallResult::EFI_NOT_READY); + } + }; + + // SAFETY: cpu_private_addr is provided by MM IPL via PassDown HOB and validated + // during initialization to point to a valid SmmCpuPrivateData in SMRAM. + let cpu_private = unsafe { &*(cpu_private_addr as *const SmmCpuPrivateData) }; + Ok(cpu_private.smm_core_entry_context.number_of_cpus) +} + +/// Returns the save state base pointer for a given CPU index. +/// +/// The pointer comes from `SmmCpuPrivateData.cpu_save_state[cpu_index]`, +/// which points to SMBASE + 0x7C00 for that CPU's save state area. +fn get_save_state_base(cpu_index: u64) -> Result<*const u8, u64> { + let cpu_private_addr = match SMM_CPU_PRIVATE.get() { + Some(&addr) if addr != 0 => addr, + _ => { + log::error!("SMM CPU Private data not initialized for save state read"); + return Err(SyscallResult::EFI_NOT_READY); + } + }; + + // SAFETY: cpu_private_addr points to valid SmmCpuPrivateData in SMRAM. + let cpu_private = unsafe { &*(cpu_private_addr as *const SmmCpuPrivateData) }; + let save_state_array = cpu_private.cpu_save_state; + if save_state_array == 0 { + log::error!("CpuSaveState array pointer is null"); + return Err(SyscallResult::EFI_NOT_READY); + } + + // Read the per-CPU save state pointer from the array. + // SAFETY: cpu_save_state points to a valid array of VOID* pointers in SMRAM, + // and cpu_index has been validated against NumberOfCpus. + let save_state_ptr = unsafe { + let array_ptr = save_state_array as *const u64; + *array_ptr.add(cpu_index as usize) + }; + + if save_state_ptr == 0 { + log::error!("CpuSaveState[{}] is null", cpu_index); + return Err(SyscallResult::EFI_INVALID_PARAMETER); + } + + Ok(save_state_ptr as *const u8) +} + +/// Determines the actual number of bytes that will be written to the user buffer. +/// +/// Returns 0 if the width is not supported for the given register. +fn actual_write_size(register: MmSaveStateRegister, width: u64) -> usize { + match register { + MmSaveStateRegister::Io => IO_INFO_SIZE, + MmSaveStateRegister::ProcessorId => 8, + MmSaveStateRegister::Lma => { + if width == 4 || width == 8 { + width as usize + } else { + 0 + } + } + _ => { + if let Some(info) = save_state::register_info(register) { + if width == 2 && info.native_width >= 2 { + 2 + } else if width == 4 && info.native_width >= 4 { + 4 + } else if width == 8 && info.native_width == 8 { + 8 + } else { + 0 + } + } else { + 0 + } + } + } +} + +/// Reads the PROCESSOR_ID for a given CPU and writes it to the user buffer. +/// +/// The ProcessorId (APIC ID) is read from the `EFI_PROCESSOR_INFORMATION` array +/// that was set up by PiSmmCpuDxeSmm and passed through the PassDown HOB. +fn read_processor_id(cpu_index: u64, buffer: u64) -> SyscallResult { + let cpu_private_addr = match SMM_CPU_PRIVATE.get() { + Some(&addr) if addr != 0 => addr, + _ => return SyscallResult::error(SyscallResult::EFI_NOT_READY), + }; + + // SAFETY: validated during initialization. + let cpu_private = unsafe { &*(cpu_private_addr as *const SmmCpuPrivateData) }; + + if cpu_private.processor_info == 0 { + log::error!("PROCESSOR_ID: ProcessorInfo array is null"); + return SyscallResult::error(SyscallResult::EFI_NOT_READY); + } + + // Read ProcessorId (first field, offset 0) from the EFI_PROCESSOR_INFORMATION + // entry at the given CPU index. + // + // SAFETY: processor_info points to a valid array of EFI_PROCESSOR_INFORMATION + // entries in firmware memory, and cpu_index has been validated. + let processor_id: u64 = unsafe { + let base = cpu_private.processor_info as *const u8; + let entry_ptr = base.add(cpu_index as usize * PROCESSOR_INFO_ENTRY_SIZE); + core::ptr::read_unaligned(entry_ptr as *const u64) + }; + + // Write the 8-byte ProcessorId to the user buffer. + // + // SAFETY: buffer is validated as user-owned with sufficient size (8 bytes). + unsafe { + core::ptr::write_unaligned(buffer as *mut u64, processor_id); + } + + log::debug!("PROCESSOR_ID: CPU {} = 0x{:x}", cpu_index, processor_id); + SyscallResult::success(0) +} + +/// Inspects the I/O condition (IN vs OUT) from the save state for policy checking. +/// +/// Reads the vendor-specific IO field from the CPU's save state and uses the +/// SDK's [`save_state::parse_io_field`] to determine whether the I/O trap was +/// caused by an IN or OUT instruction. +fn inspect_io_condition(cpu_index: u64) -> Option { + log::info!( + "Inspecting I/O condition for CPU {}: retrieving save state base", + cpu_index + ); + let save_state_base = match get_save_state_base(cpu_index) { + Ok(base) => base, + Err(e) => { + log::error!("Failed to get save state base for CPU {} due to {}", cpu_index, e); + return None + } + }; + + let vc = save_state::vendor_constants(); + + // Verify the save state revision supports IO info before reading the field. + // + // SAFETY: save_state_base is valid and smmrevid_offset is within the + // save state map region. + let smm_rev_id: u32 = unsafe { + core::ptr::read_volatile(save_state_base.add(vc.smmrevid_offset as usize) as *const u32) + }; + + if smm_rev_id < vc.min_rev_id_io { + log::warn!( + "inspect_io_condition: SMMRevId 0x{:x} < 0x{:x}, IO info not available for CPU {}", + smm_rev_id, + vc.min_rev_id_io, + cpu_index + ); + return None; + } + + // Read the vendor-specific IO information field (u32). + // + // SAFETY: save_state_base is valid and io_info_offset is within the + // save state map region. + let io_field: u32 = unsafe { + core::ptr::read_volatile(save_state_base.add(vc.io_info_offset as usize) as *const u32) + }; + + log::info!( + "Inspecting IO condition for CPU {}: IO field = 0x{:x}", + cpu_index, + io_field + ); + + // Use the SDK's vendor-specific parser. + match save_state::parse_io_field(io_field) { + Some(parsed) => match parsed.io_type { + IO_TYPE_INPUT => Some(SaveStateCondition::IoRead), + IO_TYPE_OUTPUT => Some(SaveStateCondition::IoWrite), + _ => Some(SaveStateCondition::IoWrite), + }, + None => { + // No valid IO info (Intel: SmiFlag not set, AMD: reserved size) — + // default to write condition (matching C behaviour). + Some(SaveStateCondition::IoWrite) + } + } +} + +/// Reads an architectural register from the Intel x64 save state map. +/// +/// # Safety +/// +/// - `save_state_base` must point to a valid `SMRAM_SAVE_STATE_MAP64` region. +/// - `buffer` must point to a user-owned region with at least `width` bytes. +unsafe fn read_architectural_register( + save_state_base: *const u8, + register: MmSaveStateRegister, + width: u64, + buffer: *mut u8, +) -> u64 { + let info = match save_state::register_info(register) { + Some(i) => i, + None => { + log::error!( + "Register {:?} not found in save state map", + register + ); + return SyscallResult::EFI_UNSUPPORTED; + } + }; + + if width == 2 { + if info.native_width < 2 { + return SyscallResult::EFI_UNSUPPORTED; + } + // Read the low 2 bytes (AMD segment selectors, DT limits). + let val = unsafe { + core::ptr::read_volatile(save_state_base.add(info.lo_offset as usize) as *const u16) + }; + unsafe { + core::ptr::write_unaligned(buffer as *mut u16, val); + } + } else if width == 4 { + if info.native_width < 4 { + return SyscallResult::EFI_UNSUPPORTED; + } + // Read the low 4 bytes. + let lo = unsafe { + core::ptr::read_volatile(save_state_base.add(info.lo_offset as usize) as *const u32) + }; + unsafe { + core::ptr::write_unaligned(buffer as *mut u32, lo); + } + } else if width == 8 { + if info.native_width != 8 { + return SyscallResult::EFI_UNSUPPORTED; + } + // Read lo dword then hi dword (handles both contiguous and split layouts). + let lo = unsafe { + core::ptr::read_volatile(save_state_base.add(info.lo_offset as usize) as *const u32) + }; + let hi = unsafe { + core::ptr::read_volatile(save_state_base.add(info.hi_offset as usize) as *const u32) + }; + // Write as two adjacent dwords (matching C split-register behaviour). + unsafe { + core::ptr::write_unaligned(buffer as *mut u32, lo); + core::ptr::write_unaligned((buffer as *mut u32).add(1), hi); + } + } else { + return SyscallResult::EFI_INVALID_PARAMETER; + } + + SyscallResult::EFI_SUCCESS +} + +/// Reads the IO pseudo-register and writes an `EFI_MM_SAVE_STATE_IO_INFO` +/// structure to the user buffer. +/// +/// The IO pseudo-register provides information about the I/O instruction that +/// triggered the SMI, including the port, width, direction, and data value. +/// +/// # Safety +/// +/// - `save_state_base` must point to a valid `SMRAM_SAVE_STATE_MAP64` region. +/// - `buffer` must point to a user-owned region with at least [`IO_INFO_SIZE`] bytes. +unsafe fn read_io_register(save_state_base: *const u8, buffer: *mut u8) -> u64 { + let vc = save_state::vendor_constants(); + + // 1. Read SMMRevId to verify IO info is available. + let smm_rev_id = unsafe { + core::ptr::read_volatile( + save_state_base.add(vc.smmrevid_offset as usize) as *const u32, + ) + }; + + if smm_rev_id < vc.min_rev_id_io { + log::error!( + "IO_READ: SMMRevId 0x{:x} < 0x{:x}, IO info not supported", + smm_rev_id, + vc.min_rev_id_io + ); + return SyscallResult::EFI_UNSUPPORTED; + } + + // 2. Read the vendor-specific IO information field and parse it. + let io_field = unsafe { + core::ptr::read_volatile( + save_state_base.add(vc.io_info_offset as usize) as *const u32, + ) + }; + + let parsed = match save_state::parse_io_field(io_field) { + Some(p) => p, + None => { + log::debug!("IO_READ: IO field 0x{:x} did not indicate a valid I/O trap", io_field); + return SyscallResult::EFI_UNSUPPORTED; + } + }; + + // 3. Read I/O data from RAX (only the significant bytes). + let io_data: u64 = unsafe { + let rax_ptr = save_state_base.add(vc.rax_offset as usize); + match parsed.byte_count { + 1 => core::ptr::read_volatile(rax_ptr as *const u8) as u64, + 2 => core::ptr::read_volatile(rax_ptr as *const u16) as u64, + 4 => core::ptr::read_volatile(rax_ptr as *const u32) as u64, + _ => 0, + } + }; + + // 4. Compose the EFI_MM_SAVE_STATE_IO_INFO structure and copy to user buffer. + let io_info = MmSaveStateIoInfo { + io_data, + io_port: parsed.io_port as u64, + io_width: parsed.io_width, + io_type: parsed.io_type, + }; + + unsafe { + core::ptr::copy_nonoverlapping( + &io_info as *const MmSaveStateIoInfo as *const u8, + buffer, + IO_INFO_SIZE, + ); + } + + SyscallResult::EFI_SUCCESS +} + +/// Reads the LMA pseudo-register (processor Long Mode Active state). +/// +/// Returns `LMA_32BIT` (32) or `LMA_64BIT` (64) depending on the IA32_EFER.LMA +/// bit in the save state. +/// +/// # Safety +/// +/// - `save_state_base` must point to a valid `SMRAM_SAVE_STATE_MAP64` region. +/// - `buffer` must point to a user-owned region with at least `width` bytes. +unsafe fn read_lma_register( + save_state_base: *const u8, + width: u64, + buffer: *mut u8, +) -> u64 { + let vc = save_state::vendor_constants(); + + // AMD64 always operates in 64-bit mode during SMM. + let lma_value = if vc.lma_always_64 { + LMA_64BIT + } else { + // Read IA32_EFER from the save state. + let efer = unsafe { + core::ptr::read_volatile( + save_state_base.add(vc.efer_offset as usize) as *const u64, + ) + }; + if (efer & IA32_EFER_LMA) != 0 { + LMA_64BIT + } else { + LMA_32BIT + } + }; + + if width == 4 { + unsafe { + core::ptr::write_unaligned(buffer as *mut u32, lma_value as u32); + } + } else if width == 8 { + unsafe { + core::ptr::write_unaligned(buffer as *mut u64, lma_value); + } + } else { + return SyscallResult::EFI_INVALID_PARAMETER; + } + + SyscallResult::EFI_SUCCESS +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_register_from_u64() { + assert_eq!( + MmSaveStateRegister::from_u64(38), + Some(MmSaveStateRegister::Rax) + ); + assert_eq!( + MmSaveStateRegister::from_u64(512), + Some(MmSaveStateRegister::Io) + ); + assert_eq!( + MmSaveStateRegister::from_u64(514), + Some(MmSaveStateRegister::ProcessorId) + ); + assert_eq!(MmSaveStateRegister::from_u64(999), None); + assert_eq!(MmSaveStateRegister::from_u64(0), None); + } + + #[test] + fn test_to_policy_field() { + assert_eq!( + to_policy_field(MmSaveStateRegister::Rax), + Some(SaveStateField::Rax) + ); + assert_eq!( + to_policy_field(MmSaveStateRegister::Io), + Some(SaveStateField::IoTrap) + ); + assert_eq!(to_policy_field(MmSaveStateRegister::Rbx), None); + assert_eq!(to_policy_field(MmSaveStateRegister::ProcessorId), None); + } + + #[test] + fn test_actual_write_size() { + // IO always writes IO_INFO_SIZE + assert_eq!(actual_write_size(MmSaveStateRegister::Io, 4), IO_INFO_SIZE); + assert_eq!(actual_write_size(MmSaveStateRegister::Io, 24), IO_INFO_SIZE); + + // PROCESSOR_ID always writes 8 + assert_eq!(actual_write_size(MmSaveStateRegister::ProcessorId, 8), 8); + + // LMA supports 4 and 8 + assert_eq!(actual_write_size(MmSaveStateRegister::Lma, 4), 4); + assert_eq!(actual_write_size(MmSaveStateRegister::Lma, 8), 8); + assert_eq!(actual_write_size(MmSaveStateRegister::Lma, 3), 0); + + // RAX (native 8): supports Width=2, 4, and 8 + assert_eq!(actual_write_size(MmSaveStateRegister::Rax, 2), 2); + assert_eq!(actual_write_size(MmSaveStateRegister::Rax, 4), 4); + assert_eq!(actual_write_size(MmSaveStateRegister::Rax, 8), 8); + assert_eq!(actual_write_size(MmSaveStateRegister::Rax, 16), 0); + } + + #[test] + fn test_save_state_access_holder() { + // Test that the mutex works correctly for Phase 1/Phase 2. + { + let mut access = SAVE_STATE_ACCESS.lock(); + assert!(access.is_none()); + *access = Some(SaveStateAccessHolder { + user_protocol: 0xDEAD, + register: MmSaveStateRegister::Rax, + cpu_index: 0, + }); + } + + { + let mut access = SAVE_STATE_ACCESS.lock(); + let holder = access.take().unwrap(); + assert_eq!(holder.user_protocol, 0xDEAD); + assert_eq!(holder.register, MmSaveStateRegister::Rax); + assert_eq!(holder.cpu_index, 0); + } + + { + let access = SAVE_STATE_ACCESS.lock(); + assert!(access.is_none()); + } + } +} diff --git a/patina_mm_supervisor_core/src/supervisor_handlers.rs b/patina_mm_supervisor_core/src/supervisor_handlers.rs new file mode 100644 index 000000000..9279e94d0 --- /dev/null +++ b/patina_mm_supervisor_core/src/supervisor_handlers.rs @@ -0,0 +1,763 @@ +//! Supervisor MMI Handler Registry +//! +//! This module provides the build-time handler registration mechanism for the MM Supervisor Core. +//! Handlers are registered at link time using `linkme::distributed_slice`, allowing platforms +//! to add custom supervisor handlers without modifying the core. +//! +//! ## Architecture +//! +//! The [`SUPERVISOR_MMI_HANDLERS`] distributed slice collects all handler entries across the +//! final binary. Each entry is a [`SupervisorMmiHandler`] that specifies a GUID and handler +//! function. During supervisor request processing, the core iterates the slice to find a +//! handler matching the communicate header GUID. +//! +//! ## Adding Platform-Specific Handlers +//! +//! To register a handler from a platform crate: +//! +//! ```rust,ignore +//! use patina_mm_supervisor_core::{SupervisorMmiHandler, SUPERVISOR_MMI_HANDLERS}; +//! use r_efi::efi; +//! +//! fn my_handler(comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status { +//! // Handle the request... +//! efi::Status::SUCCESS +//! } +//! +//! #[linkme::distributed_slice(SUPERVISOR_MMI_HANDLERS)] +//! static MY_HANDLER: SupervisorMmiHandler = SupervisorMmiHandler { +//! name: "MyPlatformHandler", +//! handler_guid: efi::Guid::from_fields( +//! 0x12345678, 0xabcd, 0xef01, +//! 0x23, 0x45, &[0x67, 0x89, 0xab, 0xcd, 0xef, 0x01] +//! ), +//! handle: my_handler, +//! }; +//! ``` +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use r_efi::efi; + +use patina_paging::{MemoryAttributes, PageTable, PtError}; +use patina::base::UEFI_PAGE_SIZE; + +use crate::mm_mem::PAGE_ALLOCATOR; +use crate::unblock_memory::{UnblockError, UNBLOCKED_MEMORY_TRACKER}; +use crate::{ + POLICY_GATE, + is_buffer_inside_mmram, read_cr3, +}; + +use patina::management_mode::protocol::mm_supervisor_request::{ + MmSupervisorRequestHeader, + MmSupervisorVersionInfo, + RequestType, + MM_SUPERVISOR_REQUEST_HANDLER_GUID, + MmSupervisorUnblockMemoryParams, + REVISION, SIGNATURE, +}; + +use patina_mm_policy::{MemDescriptorV1_0, PolicyError}; + +// ============================================================================ +// Supervisor MMI Handler Infrastructure +// ============================================================================ + +/// A build-time registered supervisor MMI handler. +/// +/// Each entry represents a handler that the supervisor core will consider when dispatching +/// supervisor-channel requests. Handlers are matched by comparing the +/// [`EfiMmCommunicateHeader::header_guid`](crate::EfiMmCommunicateHeader::header_guid) +/// against [`handler_guid`](SupervisorMmiHandler::handler_guid). +/// +/// ## Handler Function Signature +/// +/// The [`handle`](SupervisorMmiHandler::handle) function receives: +/// - `comm_buffer`: Pointer to the data portion of the communicate buffer (after the header). +/// - `comm_buffer_size`: On input, the message length. On output, the response data length. +/// +/// The handler should return an [`efi::Status`] code. +#[derive(Debug)] +pub struct SupervisorMmiHandler { + /// Human-readable name for logging and debugging. + pub name: &'static str, + /// GUID identifying the request type this handler processes. + pub handler_guid: efi::Guid, + /// The handler function. + /// + /// # Arguments + /// + /// * `comm_buffer` - Pointer to the data payload (after `EfiMmCommunicateHeader`). + /// * `comm_buffer_size` - On input, the data size; on output, the response data size. + /// + /// # Returns + /// + /// An EFI status code indicating the result of the handler. + pub handle: fn(comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status, +} + +// SAFETY: SupervisorMmiHandler contains only a &'static str, a Guid (plain data), and a fn pointer. +// All of these are inherently Sync. +unsafe impl Sync for SupervisorMmiHandler {} + +/// The global distributed slice collecting all supervisor MMI handlers. +/// +/// Handlers from the core and from platform crates are collected here at link time. +/// The supervisor dispatch loop iterates this slice to find a matching handler for +/// each incoming supervisor-channel request. +/// +/// ## Usage +/// +/// ```rust,ignore +/// use patina_mm_supervisor_core::{SupervisorMmiHandler, SUPERVISOR_MMI_HANDLERS}; +/// +/// #[linkme::distributed_slice(SUPERVISOR_MMI_HANDLERS)] +/// static MY_HANDLER: SupervisorMmiHandler = SupervisorMmiHandler { +/// name: "MyHandler", +/// handler_guid: MY_GUID, +/// handle: my_handler_fn, +/// }; +/// ``` +#[linkme::distributed_slice] +pub static SUPERVISOR_MMI_HANDLERS: [SupervisorMmiHandler]; + +// ============================================================================ +// Core Supervisor MMI Handlers +// ============================================================================ + +// GUID for gEfiDxeMmReadyToLockProtocolGuid +// { 0x60ff8964, 0xe906, 0x41d0, { 0xaf, 0xed, 0xf2, 0x41, 0xe9, 0x74, 0xe0, 0x8e } } +/// GUID for the DXE MM Ready To Lock protocol. +pub const EFI_DXE_MM_READY_TO_LOCK_PROTOCOL_GUID: efi::Guid = efi::Guid::from_fields( + 0x60ff8964, + 0xe906, + 0x41d0, + 0xaf, + 0xed, + &[0xf2, 0x41, 0xe9, 0x74, 0xe0, 0x8e], +); + +/// Ready-to-lock handler. +/// +/// Triggered from the non-MM environment upon DxeMmReadyToLock event. +/// After this handler runs, certain features (e.g., unblock memory) are no longer available. +#[linkme::distributed_slice(SUPERVISOR_MMI_HANDLERS)] +static READY_TO_LOCK_HANDLER: SupervisorMmiHandler = SupervisorMmiHandler { + name: "MmReadyToLock", + handler_guid: EFI_DXE_MM_READY_TO_LOCK_PROTOCOL_GUID, + handle: mm_ready_to_lock_handler, +}; + +/// Supervisor request handler. +/// +/// Handles general supervisor requests such as unblock memory, fetch policy, +/// version info, and communication buffer updates. +#[linkme::distributed_slice(SUPERVISOR_MMI_HANDLERS)] +static SUPV_REQUEST_HANDLER: SupervisorMmiHandler = SupervisorMmiHandler { + name: "MmSupvRequest", + handler_guid: MM_SUPERVISOR_REQUEST_HANDLER_GUID.into_inner(), + handle: mm_supv_request_handler, +}; + +// ============================================================================ +// Handler Implementations +// ============================================================================ + +/// MmReadyToLock handler implementation. +/// +/// Called when the DXE phase signals that MM should transition to a locked state. +/// After this runs, no new memory regions can be unblocked and the memory policy +/// snapshot stored inside `PolicyGate` is considered the reference baseline. +fn mm_ready_to_lock_handler(_comm_buffer: *mut u8, _comm_buffer_size: &mut usize) -> efi::Status { + log::info!("MmReadyToLockHandler invoked"); + + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("MmReadyToLock: POLICY_GATE not initialized"); + return efi::Status::NOT_READY; + } + }; + + // If already locked, this is a no-op (idempotent). + if gate.is_locked() { + log::warn!("MmReadyToLock: already locked, ignoring duplicate"); + return efi::Status::SUCCESS; + } + + // Take a snapshot and mark as locked. + let cr3 = read_cr3(); + // SAFETY: cr3 points to the active PML4 table inside SMM, + // and the memory policy buffer was configured during init. + if let Err(e) = unsafe { gate.take_snapshot(cr3, is_buffer_inside_mmram) } { + log::error!("MmReadyToLock: take_snapshot failed: {:?}", e); + return efi::Status::DEVICE_ERROR; + } + + // And mark the unblock memory tracker as locked as well since unblock memory is no longer allowed after this point. + UNBLOCKED_MEMORY_TRACKER.set_core_init_complete(); + + efi::Status::SUCCESS +} + +// ============================================================================ +// Supervisor Version Constants +// ============================================================================ + +/// Supervisor version. Encodes major.minor as (major << 16) | minor. +pub const VERSION: u32 = 0x00130008; + +/// Supervisor patch level. +pub const PATCH_LEVEL: u32 = 0x00010001; + +// ============================================================================ +// MM Supervisor Request Handler +// ============================================================================ + +/// MM Supervisor request handler implementation. +/// +/// Handles structured requests from the non-MM environment, such as: +/// - [`RequestType::UnblockMem`]: Unblock memory regions +/// - [`RequestType::FetchPolicy`]: Fetch security policy +/// - [`RequestType::VersionInfo`]: Query supervisor version information +/// - [`RequestType::CommUpdate`]: Update communication buffer configuration +/// +/// The buffer is expected to contain an [`MmSupervisorRequestHeader`] at the start. +/// On return, the header's `result` field is set and any response payload follows +/// immediately after the header. +fn mm_supv_request_handler(comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status { + log::info!("MmSupvRequestHandler invoked (buffer_size={})", *comm_buffer_size); + + if comm_buffer.is_null() || *comm_buffer_size < MmSupervisorRequestHeader::SIZE { + log::error!( + "MmSupvRequestHandler: buffer too small ({} bytes, need at least {})", + *comm_buffer_size, + MmSupervisorRequestHeader::SIZE, + ); + return efi::Status::INVALID_PARAMETER; + } + + // SAFETY: We verified the buffer is non-null and large enough for the header. + let header = unsafe { &*(comm_buffer as *const MmSupervisorRequestHeader) }; + + // Validate signature + if header.signature != SIGNATURE { + log::error!( + "MmSupvRequestHandler: invalid signature 0x{:08X}, expected 0x{:08X}", + header.signature, + SIGNATURE, + ); + return efi::Status::INVALID_PARAMETER; + } + + // Validate revision + if header.revision > REVISION { + log::error!( + "MmSupvRequestHandler: unsupported revision {}, max supported {}", + header.revision, + REVISION, + ); + return efi::Status::UNSUPPORTED; + } + + // Dispatch by request type + let status = match RequestType::try_from(header.request) { + Ok(RequestType::VersionInfo) => { + log::info!("Processing VERSION_INFO request"); + handle_version_info(comm_buffer, comm_buffer_size) + } + Ok(RequestType::FetchPolicy) => { + log::info!("Processing FETCH_POLICY request"); + handle_fetch_policy(comm_buffer, comm_buffer_size) + } + Ok(RequestType::CommUpdate) => { + log::info!("Processing COMM_UPDATE request"); + handle_comm_update(comm_buffer, comm_buffer_size) + } + Ok(RequestType::UnblockMem) => { + log::info!("Processing UNBLOCK_MEM request"); + handle_unblock_mem(comm_buffer, comm_buffer_size) + } + Err(unknown) => { + log::warn!("MmSupvRequestHandler: unsupported request type 0x{:08X}", unknown); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + efi::Status::UNSUPPORTED + } + }; + + // Write the final status into the request header's result field. + write_request_result(comm_buffer, status); + + // The handler's return value is only for indicating communication-level errors + // (e.g., interrupt is being handled or not), in this case we handled the request successfully. + efi::Status::SUCCESS +} + +/// Write an [`efi::Status`] into the request header's `result` field. +/// +/// The status is stored as its raw `usize` representation cast to `u64`, +/// matching the C `MM_SUPERVISOR_REQUEST_HEADER.Result` convention. +/// +/// # Safety +/// +/// `comm_buffer` must point to at least `MmSupervisorRequestHeader::SIZE` bytes of writable memory. +fn write_request_result(comm_buffer: *mut u8, status: efi::Status) { + // SAFETY: caller guarantees buffer is large enough for the header. + unsafe { + let header = &mut *(comm_buffer as *mut MmSupervisorRequestHeader); + header.result = status.as_usize() as u64; + } +} + +/// Handle a VERSION_INFO request. +/// +/// Writes back the response header followed by [`MmSupervisorVersionInfo`]. +fn handle_version_info(comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status { + let response_size = MmSupervisorRequestHeader::SIZE + MmSupervisorVersionInfo::SIZE; + + if *comm_buffer_size < response_size { + log::error!( + "VERSION_INFO: buffer too small for response ({} bytes, need {})", + *comm_buffer_size, + response_size, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::BUFFER_TOO_SMALL; + } + + // Write version info payload after the header + let version_info = MmSupervisorVersionInfo { + version: VERSION, + patch_level: PATCH_LEVEL, + max_supervisor_request_level: RequestType::MAX_REQUEST_TYPE, + }; + + // SAFETY: We verified the buffer is large enough for header + version info. + unsafe { + let payload_ptr = comm_buffer.add(MmSupervisorRequestHeader::SIZE) as *mut MmSupervisorVersionInfo; + core::ptr::write(payload_ptr, version_info); + } + + *comm_buffer_size = response_size; + log::info!( + "VERSION_INFO response: version=0x{:08X}, patch=0x{:08X}, max_level={}", + VERSION, + PATCH_LEVEL, + RequestType::MAX_REQUEST_TYPE, + ); + + efi::Status::SUCCESS +} + +/// Handle a FETCH_POLICY request. +/// +/// Returns the merged memory + firmware policy to the caller. +/// +/// ## Behaviour +/// +/// 1. **First-time call (before lock):** takes a memory policy snapshot, saves it, +/// and sets the ready-to-lock flag (whichever of `MmReadyToLock` or `FETCH_POLICY` +/// fires first performs this). +/// 2. **Subsequent calls (after lock):** re-walks the page table and compares the +/// fresh result against the saved snapshot. Any discrepancy is a security +/// violation. +/// 3. **Merges** the memory policy snapshot with the static firmware policy blob +/// from `POLICY_GATE` and writes the combined result into `comm_buffer`. +/// +/// ## Response layout +/// +/// ```text +/// |----------------------------------| +/// | MmSupervisorRequestHeader (24 B) | +/// |----------------------------------| +/// | MemDescriptorV1_0[0..N] | <- memory policy snapshot +/// |----------------------------------| +/// | SecurePolicyDataV1_0 + payload | <- firmware policy blob (raw copy) +/// |----------------------------------| +/// ``` +fn handle_fetch_policy(comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status { + log::info!("FETCH_POLICY request"); + + // -- 0. Obtain the PolicyGate ------------------------------------- + let gate = match POLICY_GATE.get() { + Some(g) => g, + None => { + log::error!("FETCH_POLICY: POLICY_GATE not initialized"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::NOT_READY; + } + }; + + let cr3 = read_cr3(); + + // -- 1. Ensure we have a snapshot (lock if not yet locked) ------------ + if !gate.is_locked() { + // Policy requested prior to ready to lock - enforce lock now. + log::info!("FETCH_POLICY: not yet locked - taking snapshot and locking now"); + // SAFETY: cr3 is valid and the memory policy buffer was configured during init. + if let Err(e) = unsafe { gate.take_snapshot(cr3, is_buffer_inside_mmram) } { + log::error!("FETCH_POLICY: take_snapshot failed: {:?}", e); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::DEVICE_ERROR; + } + } else { + // -- 2. Already locked - verify that current page table matches snapshot + if let Err(status) = verify_policy_snapshot(gate, cr3) { + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return status; + } + } + + // -- 3. Write the merged policy into the comm buffer (after the header) - + let payload_capacity = match comm_buffer_size + .checked_sub(MmSupervisorRequestHeader::SIZE) + { + Some(c) => c, + None => { + log::error!("FETCH_POLICY: comm_buffer_size too small for header"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::BUFFER_TOO_SMALL; + } + }; + + // SAFETY: comm_buffer + header offset is valid writable memory. + let dest = unsafe { comm_buffer.add(MmSupervisorRequestHeader::SIZE) }; + let payload_written = match unsafe { gate.fetch_n_update_policy(dest, payload_capacity) } { + Ok(n) => n, + Err(PolicyError::InternalError) => { + // Could be buffer-too-small, size overflow, or missing snapshot. + log::error!("FETCH_POLICY: fetch_n_update_policy failed"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::BUFFER_TOO_SMALL; + } + Err(e) => { + log::error!("FETCH_POLICY: fetch_n_update_policy unexpected error: {:?}", e); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::DEVICE_ERROR; + } + }; + + let total_response = MmSupervisorRequestHeader::SIZE + payload_written; + *comm_buffer_size = total_response; + log::info!("FETCH_POLICY: response {} bytes (header={}, payload={})", total_response, MmSupervisorRequestHeader::SIZE, payload_written); + + efi::Status::SUCCESS +} + +// ============================================================================ +// Policy Snapshot Helpers +// ============================================================================ + +/// Walks the page table and compares the result against the saved snapshot +/// inside `PolicyGate`. Allocates a temporary scratch buffer from the page +/// allocator for the fresh walk. +/// +/// Returns `Ok(())` if the tables match, or an `efi::Status` error on mismatch +/// or allocation failure. +fn verify_policy_snapshot( + gate: &patina_mm_policy::PolicyGate, + cr3: u64, +) -> Result<(), efi::Status> { + let saved_count = match gate.snapshot_count() { + Some(c) => c, + None => { + log::warn!("verify_policy_snapshot: no snapshot available, skipping"); + return Ok(()); + } + }; + + let desc_size = core::mem::size_of::(); + let needed_bytes = saved_count.checked_mul(desc_size).ok_or_else(|| { + log::error!("verify_policy_snapshot: descriptor count overflow"); + efi::Status::DEVICE_ERROR + })?; + let needed_pages = (needed_bytes + 0xFFF) / 0x1000; + + let scratch_base = PAGE_ALLOCATOR + .allocate_pages(needed_pages) + .map_err(|e| { + log::error!("verify_policy_snapshot: failed to allocate scratch buffer: {:?}", e); + efi::Status::OUT_OF_RESOURCES + })?; + + let scratch_ptr = scratch_base as *mut MemDescriptorV1_0; + let scratch_max_count = (needed_pages * 0x1000) / desc_size; + + // SAFETY: scratch_ptr was just allocated and scratch_max_count is correct. + let result = unsafe { + gate.verify_snapshot(cr3, is_buffer_inside_mmram, scratch_ptr, scratch_max_count) + }; + + // Free the scratch buffer regardless of the result. + let _ = PAGE_ALLOCATOR.free_pages(scratch_base, needed_pages); + + result.map_err(|e| { + log::error!("verify_policy_snapshot: snapshot verification failed: {:?}", e); + efi::Status::SECURITY_VIOLATION + }) +} + +/// Handle a COMM_UPDATE request. +/// +/// Updates the communication buffer address for future SMI entries. +fn handle_comm_update(_comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status { + log::info!("COMM_UPDATE request"); + + // We do not support dynamic communication buffer updates in this implementation, because + // we expect the runtime allocation will fall into PEI memory bin. + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + + efi::Status::ACCESS_DENIED +} + +/// Handle an UNBLOCK_MEM request. +/// +/// Unblocks a memory region so that user-mode MM drivers can access it. +/// +/// ## Validation (stricter than the C `ProcessUnblockPages` implementation) +/// +/// 1. **Ready-to-lock check** - reject if core init is complete (post-lock state). +/// 2. **Buffer size** - must hold header + [`MmSupervisorUnblockMemoryParams`]. +/// 3. **Zero-GUID** - the identifier GUID must be non-zero. +/// 4. **Page alignment** - `PhysicalStart` must be 4 KiB aligned. +/// 5. **Non-zero page count** - `NumberOfPages` must be > 0. +/// 6. **Overflow** - `NumberOfPages * UEFI_PAGE_SIZE` and `PhysicalStart + size` must not overflow. +/// 7. **MMRAM overlap** - region must not overlap supervisor RAM. +/// 8. **Duplicate / conflict** - checked by the [`UNBLOCKED_MEMORY_TRACKER`]. +/// 9. **Page attributes** - pages must be not-present (RP set) and not read-only. +/// 10. **Page table update** - make pages present, R/W, NX; optionally supervisor-only (SP). +fn handle_unblock_mem(comm_buffer: *mut u8, comm_buffer_size: &mut usize) -> efi::Status { + log::info!("UNBLOCK_MEM request"); + + // 1. Ready-to-lock check + // After core initialization is complete, unblock requests are rejected. + // This mirrors the C `mMmReadyToLockDone` guard. + if UNBLOCKED_MEMORY_TRACKER.is_core_init_complete() { + log::error!("UNBLOCK_MEM: rejected - core initialization already complete (post ready-to-lock)"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::ACCESS_DENIED; + } + + // 2. Buffer size check + let min_size = MmSupervisorRequestHeader::SIZE + MmSupervisorUnblockMemoryParams::SIZE; + if *comm_buffer_size < min_size { + log::error!( + "UNBLOCK_MEM: buffer too small ({} bytes, need at least {})", + *comm_buffer_size, + min_size, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::BUFFER_TOO_SMALL; + } + + // 3. Parse the payload + // SAFETY: We verified the buffer is large enough for header + params. + let params = unsafe { + &*(comm_buffer.add(MmSupervisorRequestHeader::SIZE) as *const MmSupervisorUnblockMemoryParams) + }; + + let physical_start = params.memory_descriptor.physical_start; + let number_of_pages = params.memory_descriptor.number_of_pages; + let attribute = params.memory_descriptor.attribute; + let identifier_guid = params.identifier_guid; + + log::info!( + "UNBLOCK_MEM: request from {:?} - PhysicalStart=0x{:016x}, Pages={}, Attr=0x{:x}", + identifier_guid, + physical_start, + number_of_pages, + attribute, + ); + + // 4. Zero-GUID check + if *identifier_guid.as_bytes() == [0u8; 16] { + log::error!("UNBLOCK_MEM: identifier GUID is zero"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::INVALID_PARAMETER; + } + + // 5. Page alignment check (stricter than C) + if physical_start & (UEFI_PAGE_SIZE as u64 - 1) != 0 { + log::error!( + "UNBLOCK_MEM: PhysicalStart 0x{:016x} is not page-aligned", + physical_start, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::INVALID_PARAMETER; + } + + // 6. Non-zero page count + if number_of_pages == 0 { + log::error!("UNBLOCK_MEM: NumberOfPages is 0"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::INVALID_PARAMETER; + } + + // 7. Overflow checks + let region_size = match number_of_pages.checked_mul(UEFI_PAGE_SIZE as u64) { + Some(s) => s, + None => { + log::error!( + "UNBLOCK_MEM: NumberOfPages ({}) * UEFI_PAGE_SIZE overflows u64", + number_of_pages, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::INVALID_PARAMETER; + } + }; + + if physical_start.checked_add(region_size).is_none() { + log::error!( + "UNBLOCK_MEM: address range 0x{:016x} + 0x{:x} overflows", + physical_start, + region_size, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::INVALID_PARAMETER; + } + + // 8. MMRAM overlap check + if PAGE_ALLOCATOR.is_region_inside_mmram(physical_start, region_size) { + log::error!( + "UNBLOCK_MEM: region 0x{:016x}-0x{:016x} overlaps with MMRAM", + physical_start, + physical_start + region_size, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::SECURITY_VIOLATION; + } + + // 9. Duplicate / conflict check via tracker + // We use the tracker's region count to distinguish newly-added vs idempotent. + // For newly-added regions we must additionally verify page attributes and + // apply page table changes. For idempotent (exact duplicate) requests we + // can short-circuit with SUCCESS. + let is_supervisor_page = (attribute & efi::MEMORY_SP) != 0; + let track_attributes: u32 = if is_supervisor_page { + patina_mm_policy::RESOURCE_ATTR_READ | patina_mm_policy::RESOURCE_ATTR_WRITE + | 0x80000000 // high bit tag for supervisor-only tracking + } else { + patina_mm_policy::RESOURCE_ATTR_READ | patina_mm_policy::RESOURCE_ATTR_WRITE + }; + + let count_before = UNBLOCKED_MEMORY_TRACKER.region_count(); + match UNBLOCKED_MEMORY_TRACKER.unblock_memory(physical_start, region_size, track_attributes) { + Ok(()) => { + let count_after = UNBLOCKED_MEMORY_TRACKER.region_count(); + if count_after == count_before { + // Idempotent - already tracked with same attributes, nothing more to do. + log::info!( + "UNBLOCK_MEM: region 0x{:016x}-0x{:016x} already unblocked (idempotent)", + physical_start, + physical_start + region_size, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::SUCCESS; + } + // Newly added - continue to verify page attributes and update page table. + } + Err(UnblockError::ConflictingAttributes) => { + log::error!( + "UNBLOCK_MEM: region 0x{:016x}-0x{:016x} conflicts with existing entry", + physical_start, + physical_start + region_size, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::SECURITY_VIOLATION; + } + Err(e) => { + log::error!( + "UNBLOCK_MEM: tracker rejected request for 0x{:016x}-0x{:016x}: {:?}", + physical_start, + physical_start + region_size, + e, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::INVALID_PARAMETER; + } + } + + // 10. Verify current page attributes + // Pages must be not-present (ReadProtect) and NOT read-only. This ensures + // we only unblock pages that were explicitly guarded, matching the C + // `VerifyUnblockRequest` logic with an additional RO check. + { + let pt_guard = crate::PAGE_TABLE.lock(); + if let Some(ref pt) = *pt_guard { + match pt.query_memory_region(physical_start, region_size) { + Ok(current_attrs) => { + log::error!( + "UNBLOCK_MEM: pages at 0x{:016x} are already present (attrs: {:?}). \ + Only not-present pages may be unblocked.", + physical_start, + current_attrs, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::SECURITY_VIOLATION; + } + Err(PtError::NoMapping) => { + // Expected case - pages are currently not present, so we can unblock them. + } + Err(e) => { + log::error!( + "UNBLOCK_MEM: failed to query page attributes for 0x{:016x}-0x{:016x}: {:?}", + physical_start, + physical_start + region_size, + e, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::DEVICE_ERROR; + } + } + } else { + log::error!("UNBLOCK_MEM: page table not initialized"); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::NOT_READY; + } + } + + // 11. Apply page table changes + // Make the region: + // - Present (clear ReadProtect) + // - Read/Write (clear ReadOnly) + // - Non-executable (set ExecuteProtect) - data pages must be W^X + // - Optionally Supervisor-only (set Supervisor) if EFI_MEMORY_SP requested + { + let mut pt_guard = crate::PAGE_TABLE.lock(); + if let Some(ref mut pt) = *pt_guard { + let mut new_attrs = MemoryAttributes::ExecuteProtect; // NX - data pages are non-executable + if is_supervisor_page { + new_attrs = new_attrs | MemoryAttributes::Supervisor; // Supervisor-only (U/S=0) + } + + if let Err(e) = pt.map_memory_region(physical_start, region_size, new_attrs) { + log::error!( + "UNBLOCK_MEM: failed to update page table for 0x{:016x}-0x{:016x}: {:?}", + physical_start, + physical_start + region_size, + e, + ); + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + return efi::Status::DEVICE_ERROR; + } + } + // If page table is None, we already returned NOT_READY above. + } + + log::info!( + "UNBLOCK_MEM: SUCCESS - unblocked 0x{:016x}-0x{:016x} ({} pages, {})", + physical_start, + physical_start + region_size, + number_of_pages, + if is_supervisor_page { "supervisor-only" } else { "user-accessible" }, + ); + + *comm_buffer_size = MmSupervisorRequestHeader::SIZE; + efi::Status::SUCCESS +} diff --git a/patina_mm_supervisor_core/src/unblock_memory.rs b/patina_mm_supervisor_core/src/unblock_memory.rs new file mode 100644 index 000000000..cf3943449 --- /dev/null +++ b/patina_mm_supervisor_core/src/unblock_memory.rs @@ -0,0 +1,707 @@ +//! Unblocked Memory Region Management +//! +//! This module provides functionality to track and manage memory regions that have been +//! unblocked for access in the MM (Management Mode) environment, similar to `UnblockMemory.c`. +//! +//! ## Overview +//! +//! The MM Supervisor maintains a list of memory regions that have been explicitly unblocked +//! for access. By default, all memory outside MMRAM is blocked. Drivers and handlers can +//! request specific regions to be unblocked via the `unblock_memory` interface. +//! +//! ## Design +//! +//! - The unblocked region tracker is initialized from memory policy descriptors +//! - Regions can be dynamically added via `unblock_memory()` +//! - Access checks use `is_memory_blocked()` to validate memory access requests +//! - Duplicate unblock requests with identical attributes are allowed (idempotent) +//! - Overlapping requests with different attributes are rejected +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::sync::atomic::{AtomicBool, Ordering}; +use spin::Mutex; + +use patina_mm_policy::{MemDescriptorV1_0, RESOURCE_ATTR_READ, RESOURCE_ATTR_WRITE, RESOURCE_ATTR_EXECUTE}; + +use crate::mm_mem::PAGE_ALLOCATOR; + +// ============================================================================ +// Constants +// ============================================================================ + +/// Maximum number of unblocked memory regions that can be tracked. +/// This is a conservative limit; in practice, most systems will have far fewer. +const MAX_UNBLOCKED_REGIONS: usize = 256; + +/// EFI_MEMORY_SP attribute bit - Supervisor page (kernel-only access). +pub const EFI_MEMORY_SP: u64 = 0x0000000000040000; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur during unblock memory operations. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum UnblockError { + /// The region tracker has not been initialized. + NotInitialized, + /// Already initialized (cannot re-initialize). + AlreadyInitialized, + /// Too many regions to track (exceeded MAX_UNBLOCKED_REGIONS). + TooManyRegions, + /// The requested region overlaps with MMRAM. + OverlapsWithMmram, + /// The requested region overlaps with an existing unblocked region + /// but has different attributes. + ConflictingAttributes, + /// The requested region is already unblocked (identical request). + AlreadyUnblocked, + /// Invalid parameters (null pointer, zero length, etc.). + InvalidParameter, + /// The region's address + size would overflow. + AddressOverflow, +} + +// ============================================================================ +// Unblocked Memory Entry +// ============================================================================ + +/// A single entry in the unblocked memory region list. +#[derive(Clone, Copy, Debug, Default)] +pub struct UnblockedMemoryEntry { + /// Base address of the unblocked region. + pub base_address: u64, + /// Size of the unblocked region in bytes. + pub size: u64, + /// Memory attributes (combination of `RESOURCE_ATTR_*`). + pub attributes: u32, + /// Whether this entry is valid (in use). + pub valid: bool, +} + +impl UnblockedMemoryEntry { + /// Creates a new empty (invalid) entry. + pub const fn empty() -> Self { + Self { + base_address: 0, + size: 0, + attributes: 0, + valid: false, + } + } + + /// Creates a new valid entry from base, size, and attributes. + pub const fn new(base_address: u64, size: u64, attributes: u32) -> Self { + Self { + base_address, + size, + attributes, + valid: true, + } + } + + /// Returns the end address (exclusive) of this region. + pub fn end_address(&self) -> u64 { + self.base_address.saturating_add(self.size) + } + + /// Checks if the given range [base, base + size) is fully contained within this entry. + pub fn contains(&self, base: u64, size: u64) -> bool { + if !self.valid || size == 0 { + return false; + } + let query_end = base.saturating_add(size); + base >= self.base_address && query_end <= self.end_address() + } + + /// Checks if the given range [base, base + size) overlaps with this entry. + pub fn overlaps(&self, base: u64, size: u64) -> bool { + if !self.valid || size == 0 { + return false; + } + let query_end = base.saturating_add(size); + let entry_end = self.end_address(); + + // Two ranges overlap if: start1 < end2 && start2 < end1 + base < entry_end && self.base_address < query_end + } + + /// Checks if this entry has identical base, size, and attributes as the query. + pub fn is_identical(&self, base: u64, size: u64, attributes: u32) -> bool { + self.valid + && self.base_address == base + && self.size == size + && self.attributes == attributes + } +} + +// ============================================================================ +// Unblocked Memory Tracker +// ============================================================================ + +/// Internal state for the unblocked memory tracker. +struct UnblockedMemoryState { + /// Array of unblocked memory entries. + entries: [UnblockedMemoryEntry; MAX_UNBLOCKED_REGIONS], + /// Number of valid entries in the array. + count: usize, +} + +impl UnblockedMemoryState { + /// Creates a new empty state. + const fn new() -> Self { + Self { + entries: [UnblockedMemoryEntry::empty(); MAX_UNBLOCKED_REGIONS], + count: 0, + } + } + + /// Finds an entry that exactly matches the given base and size. + fn find_exact_match(&self, base: u64, size: u64) -> Option<&UnblockedMemoryEntry> { + self.entries[..self.count].iter().find(|e| { + e.valid && e.base_address == base && e.size == size + }) + } + + /// Finds all entries that overlap with the given range. + #[allow(dead_code)] + fn find_overlapping(&self, base: u64, size: u64) -> impl Iterator { + self.entries[..self.count] + .iter() + .filter(move |e| e.overlaps(base, size)) + } + + /// Adds a new entry if there's space. + fn add_entry(&mut self, base: u64, size: u64, attributes: u32) -> Result<(), UnblockError> { + if self.count >= MAX_UNBLOCKED_REGIONS { + return Err(UnblockError::TooManyRegions); + } + + self.entries[self.count] = UnblockedMemoryEntry::new(base, size, attributes); + self.count += 1; + Ok(()) + } +} + +/// Global unblocked memory region tracker. +/// +/// This struct manages a list of memory regions that have been unblocked for +/// access within the MM environment. It provides thread-safe access to the +/// region list through internal locking. +pub struct UnblockedMemoryTracker { + /// Whether the tracker has been initialized. + initialized: AtomicBool, + /// Flag indicating if core initialization is complete (after which we enforce checks). + core_init_complete: AtomicBool, + /// Internal state protected by a mutex. + state: Mutex, +} + +impl UnblockedMemoryTracker { + /// Creates a new unblocked memory tracker. + pub const fn new() -> Self { + Self { + initialized: AtomicBool::new(false), + core_init_complete: AtomicBool::new(false), + state: Mutex::new(UnblockedMemoryState::new()), + } + } + + /// Initializes the tracker from an array of memory policy descriptors. + /// + /// This should be called once during BSP initialization after the memory + /// policy has been generated from the page table walk. + /// + /// # Arguments + /// + /// * `descriptors` - Slice of memory policy descriptors from page table walk. + /// These represent the initial "unblocked" regions. + /// + /// # Returns + /// + /// `Ok(())` if initialization succeeded, or an error if: + /// - Already initialized + /// - Too many descriptors to track + pub fn init_from_descriptors(&self, descriptors: &[MemDescriptorV1_0]) -> Result<(), UnblockError> { + // Check if already initialized + if self.initialized.swap(true, Ordering::SeqCst) { + return Err(UnblockError::AlreadyInitialized); + } + + let mut state = self.state.lock(); + + // Add each descriptor as an unblocked region + for desc in descriptors { + if desc.size == 0 { + continue; // Skip zero-size entries + } + + // Skip regions inside MMRAM - those are supervisor-controlled, not "unblocked" + if PAGE_ALLOCATOR.is_region_inside_mmram(desc.base_address, desc.size) { + log::trace!( + "Skipping MMRAM region during unblock init: 0x{:016x} - 0x{:016x}", + desc.base_address, + desc.base_address.saturating_add(desc.size) + ); + continue; + } + + state.add_entry(desc.base_address, desc.size, desc.mem_attributes)?; + } + + log::info!( + "UnblockedMemoryTracker initialized with {} regions", + state.count + ); + + Ok(()) + } + + /// Initializes the tracker from a raw buffer of memory policy descriptors. + /// + /// # Safety + /// + /// The caller must ensure: + /// - `buffer` points to a valid array of `MemDescriptorV1_0` structures + /// - `count` is the number of valid entries in the buffer + pub unsafe fn init_from_buffer( + &self, + buffer: *const MemDescriptorV1_0, + count: usize, + ) -> Result<(), UnblockError> { + if buffer.is_null() || count == 0 { + // Empty initialization is valid + if self.initialized.swap(true, Ordering::SeqCst) { + return Err(UnblockError::AlreadyInitialized); + } + log::info!("UnblockedMemoryTracker initialized with 0 regions (empty)"); + return Ok(()); + } + + // SAFETY: Caller guarantees buffer is valid for count entries + let descriptors = unsafe { core::slice::from_raw_parts(buffer, count) }; + self.init_from_descriptors(descriptors) + } + + /// Marks core initialization as complete. + /// + /// After this is called, memory access checks will be enforced. + /// Before this, all memory is considered accessible (for bootstrap). + pub fn set_core_init_complete(&self) { + self.core_init_complete.store(true, Ordering::Release); + log::info!("UnblockedMemoryTracker: Core initialization complete, enforcing checks"); + } + + /// Checks if core initialization is complete. + pub fn is_core_init_complete(&self) -> bool { + self.core_init_complete.load(Ordering::Acquire) + } + + /// Unblocks a memory region for access. + /// + /// This adds a new region to the unblocked list after validating: + /// - The region does not overlap with MMRAM + /// - The region is not already unblocked with different attributes + /// - Identical unblock requests are allowed (idempotent) + /// + /// # Arguments + /// + /// * `base` - Base address of the region to unblock + /// * `size` - Size of the region in bytes + /// * `attributes` - Memory attributes (RESOURCE_ATTR_READ | WRITE | EXECUTE) + /// + /// # Returns + /// + /// `Ok(())` if the region was successfully unblocked or already unblocked with same attributes. + /// `Err(UnblockError)` if the request is invalid or conflicts with existing regions. + pub fn unblock_memory( + &self, + base: u64, + size: u64, + attributes: u32, + ) -> Result<(), UnblockError> { + // Validate parameters + if size == 0 { + return Err(UnblockError::InvalidParameter); + } + + // Check for address overflow + if base.checked_add(size).is_none() { + return Err(UnblockError::AddressOverflow); + } + + // Check if the region overlaps with MMRAM + if PAGE_ALLOCATOR.is_region_inside_mmram(base, size) { + log::error!( + "unblock_memory: Region 0x{:016x} - 0x{:016x} overlaps with MMRAM", + base, + base.saturating_add(size) + ); + return Err(UnblockError::OverlapsWithMmram); + } + + let mut state = self.state.lock(); + + // Check for existing entries that might conflict + // First, check for exact match (idempotent unblock) + if let Some(existing) = state.find_exact_match(base, size) { + if existing.attributes == attributes { + // Identical request - this is allowed (idempotent) + log::debug!( + "unblock_memory: Region 0x{:016x} - 0x{:016x} already unblocked with same attributes", + base, + base.saturating_add(size) + ); + return Ok(()); + } else { + // Same base/size but different attributes - conflict + log::error!( + "unblock_memory: Region 0x{:016x} - 0x{:016x} already unblocked with different attributes (existing: 0x{:x}, requested: 0x{:x})", + base, + base.saturating_add(size), + existing.attributes, + attributes + ); + return Err(UnblockError::ConflictingAttributes); + } + } + + // Check for partial overlaps (not allowed) + // We iterate directly without collecting to avoid heap allocation + let mut has_overlap = false; + for entry in &state.entries[..state.count] { + if entry.overlaps(base, size) { + log::error!( + "unblock_memory: Region 0x{:016x} - 0x{:016x} overlaps with existing region 0x{:016x} - 0x{:016x}", + base, + base.saturating_add(size), + entry.base_address, + entry.end_address() + ); + has_overlap = true; + // Continue to log all overlaps for debugging + } + } + + if has_overlap { + return Err(UnblockError::ConflictingAttributes); + } + + // No conflicts - add the new entry + state.add_entry(base, size, attributes)?; + + log::info!( + "unblock_memory: Unblocked region 0x{:016x} - 0x{:016x} with attributes 0x{:x}", + base, + base.saturating_add(size), + attributes + ); + + Ok(()) + } + + /// Checks if a memory region is blocked (i.e., NOT in the unblocked list). + /// + /// This is the inverse of checking if memory is accessible - a blocked region + /// should not be accessed by MM handlers. + /// + /// # Arguments + /// + /// * `base` - Base address of the region to check + /// * `size` - Size of the region in bytes + /// + /// # Returns + /// + /// `true` if the region is blocked (not accessible), `false` if unblocked. + /// + /// # Note + /// + /// Before core initialization is complete, this always returns `false` + /// (everything is accessible during bootstrap). + pub fn is_memory_blocked(&self, base: u64, size: u64) -> bool { + // During initialization, everything is accessible + if !self.core_init_complete.load(Ordering::Acquire) { + return false; + } + + // Zero-size queries are invalid + if size == 0 { + log::warn!("is_memory_blocked: Zero-size query for address 0x{:016x}", base); + return true; // Invalid query = blocked + } + + // Check for address overflow + if base.checked_add(size).is_none() { + log::warn!("is_memory_blocked: Address overflow for 0x{:016x} + 0x{:x}", base, size); + return true; // Invalid query = blocked + } + + let state = self.state.lock(); + + // Check if the queried region is fully contained within any unblocked entry + for entry in &state.entries[..state.count] { + if entry.contains(base, size) { + log::trace!( + "is_memory_blocked: Region 0x{:016x} - 0x{:016x} is within unblocked region 0x{:016x} - 0x{:016x}", + base, + base.saturating_add(size), + entry.base_address, + entry.end_address() + ); + return false; // Found within unblocked region + } + } + + log::trace!( + "is_memory_blocked: Region 0x{:016x} - 0x{:016x} is NOT within any unblocked region", + base, + base.saturating_add(size) + ); + + true // Not found in any unblocked region = blocked + } + + /// Checks if a memory region is within unblocked regions (the inverse of `is_memory_blocked`). + /// + /// This is a convenience method that returns `true` if the region is accessible. + #[inline] + pub fn is_within_unblocked_region(&self, base: u64, size: u64) -> bool { + !self.is_memory_blocked(base, size) + } + + /// Gets the current count of unblocked regions. + pub fn region_count(&self) -> usize { + self.state.lock().count + } + + /// Collects unblocked regions into a provided buffer. + /// + /// This is useful for reporting or serializing the unblocked region list. + /// + /// # Arguments + /// + /// * `start_index` - Starting index in the region list + /// * `buffer` - Buffer to fill with region descriptors + /// + /// # Returns + /// + /// The number of entries actually copied to the buffer. + pub fn collect_regions( + &self, + start_index: usize, + buffer: &mut [MemDescriptorV1_0], + ) -> usize { + let state = self.state.lock(); + + if start_index >= state.count || buffer.is_empty() { + return 0; + } + + let mut copied = 0; + for (i, entry) in state.entries[start_index..state.count].iter().enumerate() { + if i >= buffer.len() { + break; + } + if entry.valid { + buffer[i] = MemDescriptorV1_0 { + base_address: entry.base_address, + size: entry.size, + mem_attributes: entry.attributes, + reserved: 0, + }; + copied += 1; + } + } + + copied + } + + /// Dumps the unblocked regions for debugging. + pub fn dump_regions(&self) { + let state = self.state.lock(); + + log::info!("UnblockedMemoryTracker: {} regions", state.count); + for (i, entry) in state.entries[..state.count].iter().enumerate() { + if entry.valid { + let r = if (entry.attributes & RESOURCE_ATTR_READ) != 0 { "R" } else { "." }; + let w = if (entry.attributes & RESOURCE_ATTR_WRITE) != 0 { "W" } else { "." }; + let x = if (entry.attributes & RESOURCE_ATTR_EXECUTE) != 0 { "X" } else { "." }; + log::info!( + " [{}] 0x{:016x} - 0x{:016x} {}{}{}", + i, + entry.base_address, + entry.end_address(), + r, w, x + ); + } + } + } +} + +// ============================================================================ +// Global Instance +// ============================================================================ + +/// Global unblocked memory tracker instance. +/// +/// This is the singleton that manages all unblocked memory regions for the +/// MM Supervisor. It should be initialized during BSP initialization and +/// used for all memory access validation. +pub static UNBLOCKED_MEMORY_TRACKER: UnblockedMemoryTracker = UnblockedMemoryTracker::new(); + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + fn create_test_tracker() -> UnblockedMemoryTracker { + UnblockedMemoryTracker::new() + } + + #[test] + fn test_empty_entry() { + let entry = UnblockedMemoryEntry::empty(); + assert!(!entry.valid); + assert_eq!(entry.base_address, 0); + assert_eq!(entry.size, 0); + } + + #[test] + fn test_entry_contains() { + let entry = UnblockedMemoryEntry::new(0x1000, 0x1000, RESOURCE_ATTR_READ); + + // Fully contained + assert!(entry.contains(0x1000, 0x1000)); + assert!(entry.contains(0x1000, 0x800)); + assert!(entry.contains(0x1800, 0x800)); + + // Partially outside + assert!(!entry.contains(0x0800, 0x1000)); // Starts before + assert!(!entry.contains(0x1800, 0x1000)); // Ends after + + // Completely outside + assert!(!entry.contains(0x3000, 0x1000)); + } + + #[test] + fn test_entry_overlaps() { + let entry = UnblockedMemoryEntry::new(0x1000, 0x1000, RESOURCE_ATTR_READ); + + // Overlapping cases + assert!(entry.overlaps(0x1000, 0x1000)); // Exact match + assert!(entry.overlaps(0x0800, 0x1000)); // Starts before, ends inside + assert!(entry.overlaps(0x1800, 0x1000)); // Starts inside, ends after + assert!(entry.overlaps(0x0800, 0x2000)); // Completely contains entry + + // Non-overlapping + assert!(!entry.overlaps(0x2000, 0x1000)); // Immediately after + assert!(!entry.overlaps(0x0000, 0x1000)); // Immediately before + assert!(!entry.overlaps(0x3000, 0x1000)); // Far after + } + + #[test] + fn test_tracker_before_init_complete() { + let tracker = create_test_tracker(); + + // Before core init complete, nothing is blocked + assert!(!tracker.is_memory_blocked(0x1000, 0x1000)); + assert!(!tracker.is_memory_blocked(0x0, 0x100000)); + } + + #[test] + fn test_tracker_after_init_complete_empty() { + let tracker = create_test_tracker(); + tracker.set_core_init_complete(); + + // After init complete with no regions, everything is blocked + assert!(tracker.is_memory_blocked(0x1000, 0x1000)); + } + + #[test] + fn test_unblock_memory() { + let tracker = create_test_tracker(); + + // Unblock a region + assert!(tracker.unblock_memory(0x1000, 0x1000, RESOURCE_ATTR_READ | RESOURCE_ATTR_WRITE).is_ok()); + + tracker.set_core_init_complete(); + + // Region should be accessible + assert!(!tracker.is_memory_blocked(0x1000, 0x1000)); + assert!(!tracker.is_memory_blocked(0x1000, 0x800)); + + // Outside region should be blocked + assert!(tracker.is_memory_blocked(0x3000, 0x1000)); + } + + #[test] + fn test_idempotent_unblock() { + let tracker = create_test_tracker(); + + // First unblock + assert!(tracker.unblock_memory(0x1000, 0x1000, RESOURCE_ATTR_READ).is_ok()); + + // Identical unblock should succeed + assert!(tracker.unblock_memory(0x1000, 0x1000, RESOURCE_ATTR_READ).is_ok()); + + // Same region with different attributes should fail + assert_eq!( + tracker.unblock_memory(0x1000, 0x1000, RESOURCE_ATTR_READ | RESOURCE_ATTR_WRITE), + Err(UnblockError::ConflictingAttributes) + ); + } + + #[test] + fn test_overlapping_unblock_fails() { + let tracker = create_test_tracker(); + + // First unblock + assert!(tracker.unblock_memory(0x1000, 0x1000, RESOURCE_ATTR_READ).is_ok()); + + // Overlapping unblock should fail + assert_eq!( + tracker.unblock_memory(0x1800, 0x1000, RESOURCE_ATTR_READ), + Err(UnblockError::ConflictingAttributes) + ); + } + + #[test] + fn test_invalid_parameters() { + let tracker = create_test_tracker(); + tracker.set_core_init_complete(); + + // Zero size + assert_eq!( + tracker.unblock_memory(0x1000, 0, RESOURCE_ATTR_READ), + Err(UnblockError::InvalidParameter) + ); + + // Overflow + assert_eq!( + tracker.unblock_memory(u64::MAX, 0x1000, RESOURCE_ATTR_READ), + Err(UnblockError::AddressOverflow) + ); + } + + #[test] + fn test_region_count() { + let tracker = create_test_tracker(); + + assert_eq!(tracker.region_count(), 0); + + tracker.unblock_memory(0x1000, 0x1000, RESOURCE_ATTR_READ).unwrap(); + assert_eq!(tracker.region_count(), 1); + + tracker.unblock_memory(0x3000, 0x1000, RESOURCE_ATTR_WRITE).unwrap(); + assert_eq!(tracker.region_count(), 2); + } +} diff --git a/patina_mm_user_core/Cargo.toml b/patina_mm_user_core/Cargo.toml new file mode 100644 index 000000000..fbebe6002 --- /dev/null +++ b/patina_mm_user_core/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "patina_mm_user_core" +version.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +readme = "README.md" +description = "A pure Rust implementation of the MM User Core for standalone MM mode environments." + +# Metadata to tell docs.rs how to build the documentation when uploading +[package.metadata.docs.rs] +features = ["doc"] + +# Example binary showing how to build a PE/COFF MM User Core +[[bin]] +name = "example_mm_user" +path = "bin/example_mm_user.rs" + +[dependencies] +goblin = { workspace = true, features = ["pe32", "pe64"] } +log = { workspace = true } +patina = { workspace = true } +patina_internal_depex = { workspace = true } +patina_internal_mm_common = { workspace = true } +patina_adv_logger = { workspace = true } +r-efi = { workspace = true } +spin = { workspace = true } +uuid = { workspace = true } + +[dev-dependencies] +mockall = { workspace = true } +serial_test = { workspace = true } + +[features] +default = [] +std = [] +doc = [] diff --git a/patina_mm_user_core/README.md b/patina_mm_user_core/README.md new file mode 100644 index 000000000..4697f3e16 --- /dev/null +++ b/patina_mm_user_core/README.md @@ -0,0 +1,168 @@ +# Patina MM Supervisor Core + +A pure Rust implementation of the MM Supervisor Core for standalone MM mode environments. + +## Overview + +This crate provides the core functionality for the MM (Management Mode) Supervisor in a standalone MM environment. It is designed to run on x64 systems where: + +- Page tables are already set up by the pre-MM phase +- All images are loaded and ready to execute +- The BSP (Bootstrap Processor) orchestrates incoming requests +- APs (Application Processors) wait in a holding pen, checking a mailbox for work + +## Memory Model + +The core can be instantiated as a `static` with no runtime allocation required for core data structures. + +## Building a PE/COFF Binary + +### Prerequisites + +1. Install the Rust UEFI target: + ```bash + rustup target add x86_64-unknown-uefi + ``` + +2. Ensure you have the nightly toolchain (required for `#![feature(...)]`): + ```bash + rustup override set nightly + ``` + +### Build Command + +Build the example MM Supervisor binary: + +```bash +cargo build --release --target x86_64-unknown-uefi --bin example_mm_supervisor +``` + +The output PE/COFF binary will be at: +``` +target/x86_64-unknown-uefi/release/example_mm_supervisor.efi +``` + +### Entry Point + +The MM Supervisor exports `MmSupervisorMain` as its entry point, matching the EDK2 convention: + +```rust +#[unsafe(export_name = "MmSupervisorMain")] +pub extern "efiapi" fn mm_supervisor_main(hob_list: *const c_void) -> ! { + SUPERVISOR.entry_point(hob_list) +} +``` + +The MM IPL (Initial Program Loader) calls this entry point on **all processors** after: +1. Loading the supervisor image into MMRAM +2. Setting up page tables +3. Constructing the HOB list with MMRAM ranges + +## Architecture + +### Entry Point Model + +The entry point is executed on all cores simultaneously: + +1. **BSP (Bootstrap Processor)**: + - First CPU to arrive (determined by atomic counter) + - Performs one-time initialization + - Sets up the request handling infrastructure + - Enters the main request serving loop + +2. **APs (Application Processors)**: + - All other CPUs + - Wait for BSP initialization to complete + - Enter a holding pen and poll mailboxes for commands + +### Mailbox System + +The mailbox system provides inter-processor communication: + +- Each AP has a dedicated mailbox (cache-line aligned to avoid false sharing) +- BSP sends commands to APs via mailboxes +- APs respond with results through the same mailbox +- Supports synchronization primitives for coordinated operations + +## Usage + +### Basic Platform Implementation + +```rust +#![no_std] +#![no_main] + +use core::{ffi::c_void, panic::PanicInfo}; +use patina_mm_supervisor_core::*; + +struct MyPlatform; + +impl CpuInfo for MyPlatform { + fn ap_poll_timeout_us() -> u64 { 1000 } +} + + + +// Static instance - no heap allocation required +static SUPERVISOR: MmSupervisorCore = MmSupervisorCore::new(); + +#[panic_handler] +fn panic(_info: &PanicInfo) -> ! { + loop { core::hint::spin_loop(); } +} + +#[unsafe(export_name = "MmSupervisorMain")] +pub extern "efiapi" fn mm_supervisor_main(hob_list: *const c_void) -> ! { + SUPERVISOR.entry_point(hob_list) +} +``` + +### Registering Request Handlers + +Handlers must be defined as static references: + +```rust +use patina_mm_supervisor_core::*; + +struct MyHandler; + +impl RequestHandler for MyHandler { + fn guid(&self) -> r_efi::efi::Guid { + // Your handler's GUID + r_efi::efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0; 6]) + } + + fn handle(&self, context: &mut RequestContext) -> RequestResult { + // Handle the request + RequestResult::Success + } + + fn name(&self) -> &'static str { + "MyHandler" + } +} + +static MY_HANDLER: MyHandler = MyHandler; + +// Register before calling entry_point, or during BSP initialization +SUPERVISOR.register_handler(&MY_HANDLER); +``` + +### Integration with MM IPL + +The MM IPL (from EDK2/MmSupervisorPkg) loads this binary and calls the entry point. The HOB list passed contains: + +- `gEfiMmPeiMmramMemoryReserveGuid` - MMRAM ranges +- `gMmCommBufferHobGuid` - Communication buffer information +- `gMmCommonRegionHobGuid` - Common memory regions +- FV HOBs for MM driver firmware volumes + +## Example Binary + +See [bin/example_mm_supervisor.rs](bin/example_mm_supervisor.rs) for a complete example platform implementation. + +## License + +Copyright (c) Microsoft Corporation. + +SPDX-License-Identifier: Apache-2.0 diff --git a/patina_mm_user_core/bin/example_mm_user.rs b/patina_mm_user_core/bin/example_mm_user.rs new file mode 100644 index 000000000..f043d37b9 --- /dev/null +++ b/patina_mm_user_core/bin/example_mm_user.rs @@ -0,0 +1,107 @@ +//! Example MM User Core Binary +//! +//! This is an example platform binary that demonstrates how to build a PE/COFF +//! MM User Core using the `patina_mm_user_core` crate. It follows the same pattern +//! as `q35_dxe_core.rs` for the DXE Core. +//! +//! ## Building +//! +//! Build with cargo for the UEFI target: +//! ```bash +//! cargo build --release --target x86_64-unknown-uefi --bin example_mm_user +//! ``` +//! +//! ## Entry Point +//! +//! The MM User Core is invoked by the MM Supervisor Core via `invoke_demoted_routine` +//! after being loaded into MMRAM. The supervisor passes three arguments: +//! - `arg1`: Command type (StartUserCore, UserRequest, UserApProcedure) +//! - `arg2`: Command-specific data pointer (HOB list for init, buffer for requests) +//! - `arg3`: Command-specific auxiliary data +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +#![cfg(all(target_os = "uefi", target_arch = "x86_64"))] +#![no_std] +#![no_main] + +use core::panic::PanicInfo; +use core::sync::atomic::AtomicBool; +use core::ffi::c_void; +use patina::{log::Format, serial::uart::Uart16550}; +use patina_adv_logger::logger::{ AdvancedLogger, TargetFilter}; +use patina_internal_mm_common::UserCommandType; +use patina_mm_user_core::MmUserCore; + +// ============================================================================= +// Static Core Instance +// ============================================================================= + +/// Flag indicating that advanced logger initialization is complete. +static ADV_LOGGER_INIT_COMPLETE: AtomicBool = AtomicBool::new(false); + +/// The static MM User Core instance. +static USER_CORE: MmUserCore = MmUserCore::new(); + +static LOGGER: AdvancedLogger = AdvancedLogger::new( + Format::Standard, + &[ + TargetFilter { target: "goblin", log_level: log::LevelFilter::Off, hw_filter_override: None }, + TargetFilter { target: "allocations", log_level: log::LevelFilter::Off, hw_filter_override: None }, + TargetFilter { target: "efi_memory_map", log_level: log::LevelFilter::Off, hw_filter_override: None }, + TargetFilter { target: "mm_comm", log_level: log::LevelFilter::Off, hw_filter_override: None }, + TargetFilter { target: "sw_mmi", log_level: log::LevelFilter::Off, hw_filter_override: None }, + TargetFilter { target: "patina_performance", log_level: log::LevelFilter::Off, hw_filter_override: None }, + ], + log::LevelFilter::Info, + Uart16550::Io { base: 0x402 }, +); + + +// ============================================================================= +// Panic Handler +// ============================================================================= + +#[panic_handler] +fn panic(_info: &PanicInfo) -> ! { + loop {} +} + +// ============================================================================= +// Entry Point +// ============================================================================= + +/// The entry point for the MM User Core binary. +/// +/// Called by the MM Supervisor via `invoke_demoted_routine` with three arguments: +/// - `arg1`: Command type (0 = StartUserCore, 1 = UserRequest, 2 = UserApProcedure) +/// - `arg2`: Command-specific data (HOB list pointer for init, buffer pointer for requests) +/// - `arg3`: Command-specific auxiliary data (0 for init, context size for requests) +/// +/// Returns 0 (`EFI_SUCCESS`) on success, or a non-zero EFI status code on failure. +#[cfg_attr(target_os = "uefi", unsafe(export_name = "user_core_main"))] +pub extern "efiapi" fn mm_user_main(op_code: u64, arg1: u64, arg2: u64) -> u64 { + + // Initialize the advanced logger on the first CPU to arrive (BSP) + if !ADV_LOGGER_INIT_COMPLETE.swap(true, core::sync::atomic::Ordering::SeqCst) { + // If this is our first time here, it better be that the op_code being MmUserRequestTypeInit + if op_code != UserCommandType::StartUserCore as u64 { + // This means the BSP didn't send the expected init command first, which is a problem. + // Log an error and return failure. + panic!("MM User Core received non-init command before initialization: op_code = {}", op_code); + } + + log::set_logger(&LOGGER).map(|()| log::set_max_level(log::LevelFilter::Trace)).unwrap(); + // SAFETY: The physical_hob_list pointer is considered valid at this point as it's provided by the core + // to the entry point. + unsafe { + LOGGER.init(arg1 as *const c_void).unwrap(); + } + } + + USER_CORE.entry_point_worker(op_code, arg1, arg2) +} diff --git a/patina_mm_user_core/src/config_table.rs b/patina_mm_user_core/src/config_table.rs new file mode 100644 index 000000000..9ab57fc60 --- /dev/null +++ b/patina_mm_user_core/src/config_table.rs @@ -0,0 +1,220 @@ +//! Configuration Table Management for MM System Table +//! +//! Implements `MmInstallConfigurationTable` — the ability to add, modify, or +//! delete (GUID, pointer) pairs stored in the MM System Table's configuration +//! table array. +//! +//! ## Semantics +//! +//! The configuration table is an array of `EFI_CONFIGURATION_TABLE` entries +//! exposed through `EfiMmSystemTable.mm_configuration_table`. Drivers use +//! this to publish well-known data (e.g. the HOB list) that other drivers +//! can discover by iterating the table. +//! +//! Operations: +//! - **Add**: GUID not present, table pointer non-null. +//! - **Modify**: GUID already present, table pointer non-null → update pointer. +//! - **Delete**: GUID already present, table pointer null → remove entry. +//! - **Error**: GUID not present, table pointer null → `NOT_FOUND`. +//! +//! After every modification the MM System Table's `mm_configuration_table` +//! pointer and `number_of_table_entries` count are updated so the change is +//! immediately visible to all consumers. +//! +//! ## Thread Safety +//! +//! All access is serialized through a `spin::Mutex`. The configuration table +//! array pointer stored in the system table is replaced atomically (pointer- +//! sized write) so readers always see a consistent snapshot even without +//! holding the lock. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +extern crate alloc; + +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::ffi::c_void; + +use r_efi::efi; +use spin::Mutex; + +use crate::mm_services::get_mm_system_table; + +// ============================================================================= +// Global instance +// ============================================================================= + +/// Global configuration table database used by the MM System Table. +pub static GLOBAL_CONFIG_TABLE_DB: MmConfigurationTableDb = MmConfigurationTableDb::new(); + +// ============================================================================= +// Database struct +// ============================================================================= + +/// Configuration table database for the MM System Table. +/// +/// Maintains the canonical list of `(GUID, Pointer)` pairs. After each +/// mutation the MM System Table's pointer and count are updated so all +/// consumers see the change immediately. +pub struct MmConfigurationTableDb { + inner: Mutex, +} + +struct ConfigTableInner { + /// The authoritative list of entries. + entries: Vec, + /// Raw pointer to the most recently leaked boxed‐slice that the system + /// table currently points to. `null` when no allocation exists. + leaked_ptr: *mut efi::ConfigurationTable, + /// Length of the leaked allocation (for reclaim). + leaked_len: usize, +} + +// SAFETY: All access is synchronized by the Mutex. +unsafe impl Send for MmConfigurationTableDb {} +unsafe impl Sync for MmConfigurationTableDb {} + +impl MmConfigurationTableDb { + /// Create a new, empty configuration table database. + pub const fn new() -> Self { + Self { + inner: Mutex::new(ConfigTableInner { + entries: Vec::new(), + leaked_ptr: core::ptr::null_mut(), + leaked_len: 0, + }), + } + } + + /// Install, modify, or delete a configuration table entry. + /// + /// Semantics match the PI Specification `EFI_MM_INSTALL_CONFIGURATION_TABLE`: + /// + /// | Existing? | `table` | Action | + /// |-----------|---------|----------| + /// | Yes | non-null| Modify | + /// | Yes | null | Delete | + /// | No | non-null| Add | + /// | No | null | NOT_FOUND| + pub fn install_configuration_table( + &self, + guid: &efi::Guid, + table: *mut c_void, + ) -> efi::Status { + let mut inner = self.inner.lock(); + + // Search for an existing entry with the same GUID. + let existing_idx = inner.entries.iter().position(|e| e.vendor_guid == *guid); + + match (existing_idx, table.is_null()) { + // Match found, table non-null → modify + (Some(idx), false) => { + inner.entries[idx].vendor_table = table; + log::debug!( + "MmInstallConfigurationTable: modified {:?}", + guid + ); + } + // Match found, table null → delete + (Some(idx), true) => { + inner.entries.remove(idx); + log::debug!( + "MmInstallConfigurationTable: deleted {:?}", + guid + ); + } + // No match, table non-null → add + (None, false) => { + inner.entries.push(efi::ConfigurationTable { + vendor_guid: *guid, + vendor_table: table, + }); + log::debug!( + "MmInstallConfigurationTable: added {:?}", + guid + ); + } + // No match, table null → error + (None, true) => { + return efi::Status::NOT_FOUND; + } + } + + // Publish the updated table to the MM System Table. + Self::publish_to_system_table(&mut inner); + + efi::Status::SUCCESS + } + + /// Look up a configuration table entry by GUID. + /// + /// Returns the `vendor_table` pointer if found, or `None`. + #[allow(dead_code)] + pub fn get_configuration_table(&self, guid: &efi::Guid) -> Option<*mut c_void> { + let inner = self.inner.lock(); + inner + .entries + .iter() + .find(|e| e.vendor_guid == *guid) + .map(|e| e.vendor_table) + } + + // ------------------------------------------------------------------------- + // Internal helpers + // ------------------------------------------------------------------------- + + /// Re‐publish the current entry list to the MM System Table. + /// + /// Allocates a new boxed slice, updates the system table pointer, then + /// reclaims the previous allocation. + fn publish_to_system_table(inner: &mut ConfigTableInner) { + let mmst = get_mm_system_table(); + if mmst.is_null() { + return; + } + + // --- Reclaim the previous allocation --------------------------------- + if !inner.leaked_ptr.is_null() { + // SAFETY: `leaked_ptr` / `leaked_len` were produced by + // `Box::into_raw` in a prior call to this function. + unsafe { + let _ = Box::from_raw(core::ptr::slice_from_raw_parts_mut( + inner.leaked_ptr, + inner.leaked_len, + )); + } + inner.leaked_ptr = core::ptr::null_mut(); + inner.leaked_len = 0; + } + + // --- Produce the new allocation and patch the MMST ------------------- + if inner.entries.is_empty() { + // SAFETY: We hold the Mutex, and `mmst` was initialised by + // `init_mm_system_table`. + unsafe { + (*mmst).number_of_table_entries = 0; + (*mmst).mm_configuration_table = core::ptr::null_mut(); + } + } else { + let boxed: Box<[efi::ConfigurationTable]> = + inner.entries.clone().into_boxed_slice(); + let len = boxed.len(); + let ptr = Box::into_raw(boxed) as *mut efi::ConfigurationTable; + + inner.leaked_ptr = ptr; + inner.leaked_len = len; + + // SAFETY: Same as above. + unsafe { + (*mmst).number_of_table_entries = len; + (*mmst).mm_configuration_table = ptr; + } + } + } +} diff --git a/patina_mm_user_core/src/core_handlers.rs b/patina_mm_user_core/src/core_handlers.rs new file mode 100644 index 000000000..4b0084a51 --- /dev/null +++ b/patina_mm_user_core/src/core_handlers.rs @@ -0,0 +1,321 @@ +//! MM Core Internal MMI Handlers +//! +//! These are the MMI handlers registered by the MM Core itself to handle +//! lifecycle events forwarded from the DXE phase. They mirror the C +//! `mMmCoreMmiHandlers[]` table in `StandaloneMmCore.c`. +//! +//! Each handler is registered with [`MmiDatabase::register_internal_handler`] +//! during startup and dispatched when the supervisor forwards the corresponding +//! GUID-tagged MMI through the communication buffer. +//! +//! ## Lifecycle Events +//! +//! | GUID | Handler | Description | +//! |------|---------|-------------| +//! | `MM_DISPATCH_EVENT` | [`mm_driver_dispatch_handler`] | Re-triggers driver dispatch | +//! | `MM_DXE_READY_TO_LOCK_PROTOCOL` | [`mm_ready_to_lock_handler`] | Unregisters one-shot handlers, installs lock protocol | +//! | `MM_END_OF_PEI_PROTOCOL` | [`mm_end_of_pei_handler`] | Installs end-of-PEI protocol | +//! | `EVENT_GROUP_END_OF_DXE` | [`mm_end_of_dxe_handler`] | Installs end-of-DXE protocol | +//! | `EVENT_EXIT_BOOT_SERVICES` | [`mm_exit_boot_service_handler`] | Installs exit-boot-services protocol | +//! | `EVENT_READY_TO_BOOT` | [`mm_ready_to_boot_handler`] | Installs ready-to-boot protocol | +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::ffi::c_void; + +use r_efi::efi; +use spin::Mutex; + +use crate::mmi::InternalMmiHandler; +use patina::{BinaryGuid, guids}; + +// ============================================================================= +// Handler table definition +// ============================================================================= + +/// Description of a core MMI handler to be registered at startup. +struct CoreMmiHandler { + /// The handler function (native Rust signature). + handler: InternalMmiHandler, + /// The GUID that triggers this handler. + handler_type: &'static BinaryGuid, + /// Whether this handler should be unregistered during ready-to-lock. + unregister_on_lock: bool, +} + +/// Table of MMI handlers registered by the MM Core, mirroring the C `mMmCoreMmiHandlers[]`. +static CORE_MMI_HANDLERS: &[CoreMmiHandler] = &[ + CoreMmiHandler { + handler: mm_driver_dispatch_handler, + handler_type: &guids::MM_DISPATCH_EVENT, + unregister_on_lock: false, + }, + CoreMmiHandler { + handler: mm_ready_to_lock_handler, + handler_type: &guids::MM_DXE_READY_TO_LOCK_PROTOCOL, + unregister_on_lock: true, + }, + CoreMmiHandler { + handler: mm_end_of_pei_handler, + handler_type: &guids::MM_END_OF_PEI_PROTOCOL, + unregister_on_lock: false, + }, + CoreMmiHandler { + handler: mm_end_of_dxe_handler, + handler_type: &guids::EVENT_GROUP_END_OF_DXE, + unregister_on_lock: false, + }, + CoreMmiHandler { + handler: mm_exit_boot_service_handler, + handler_type: &guids::EVENT_EXIT_BOOT_SERVICES, + unregister_on_lock: false, + }, + CoreMmiHandler { + handler: mm_ready_to_boot_handler, + handler_type: &guids::EVENT_READY_TO_BOOT, + unregister_on_lock: false, + }, +]; + +// ============================================================================= +// Dispatch handle tracking +// ============================================================================= + +/// Newtype wrapper around `efi::Handle` so it can be stored in a `static Mutex`. +/// +/// `efi::Handle` is `*mut c_void` which is `!Send`. The dispatch handles are only +/// written by the BSP during single-threaded init and read during the ready-to-lock +/// handler (also on the BSP), so it is safe to share them. +#[derive(Clone, Copy)] +struct SendHandle(efi::Handle); +unsafe impl Send for SendHandle {} +unsafe impl Sync for SendHandle {} + +impl SendHandle { + const NULL: Self = Self(core::ptr::null_mut()); +} + +/// Dispatch handles returned from `register_internal_handler` for each core handler. +/// +/// Index matches the `CORE_MMI_HANDLERS` table. Populated by [`register_core_mmi_handlers`]. +static DISPATCH_HANDLES: Mutex<[SendHandle; 6]> = Mutex::new([SendHandle::NULL; 6]); + +// ============================================================================= +// Public API +// ============================================================================= + +/// Register all core MMI handlers with the global MMI database. +/// +/// This must be called after driver dispatch (matching the C `StandaloneMmMain` +/// ordering where handlers are registered after `MmDispatchFvs`). +pub fn register_core_mmi_handlers() { + let mut handles = DISPATCH_HANDLES.lock(); + + for (i, entry) in CORE_MMI_HANDLERS.iter().enumerate() { + match crate::mm_services::GLOBAL_MMI_DB.register_internal_handler( + entry.handler, + Some(entry.handler_type), + ) { + Ok(handle) => { + handles[i] = SendHandle(handle); + log::info!( + "Registered core MMI handler [{}] for {:?}", + i, + entry.handler_type, + ); + } + Err(status) => { + log::error!( + "Failed to register core MMI handler [{}] for {:?}: {:?}", + i, + entry.handler_type, + status, + ); + } + } + } +} + +// ============================================================================= +// Install-protocol helper +// ============================================================================= + +/// Install a protocol with a NULL interface on a new handle. +/// +/// This mirrors the C pattern used in lifecycle handlers: +/// ```c +/// MmHandle = NULL; +/// Status = MmInstallProtocolInterface(&MmHandle, &guid, EFI_NATIVE_INTERFACE, NULL); +/// ``` +fn install_lifecycle_protocol(guid: &efi::Guid) -> efi::Status { + let (handle, pending_notifies) = + match crate::mm_services::GLOBAL_PROTOCOL_DB.install_protocol( + core::ptr::null_mut(), // NULL handle → allocate new + guid, + core::ptr::null_mut(), // NULL interface + ) { + Ok(result) => result, + Err(status) => return status, + }; + + log::info!( + "Installed lifecycle protocol {:?} on handle {:p}", + guid, + handle, + ); + + // Fire pending notify callbacks outside the protocol DB lock. + for notify in pending_notifies { + unsafe { + (notify.function)( + ¬ify.guid as *const efi::Guid, + notify.interface, + notify.handle, + ); + } + } + + efi::Status::SUCCESS +} + +// ============================================================================= +// Handler implementations +// ============================================================================= + +/// MM Driver Dispatch Handler. +/// +/// Re-triggers driver dispatch for any previously discovered but not-yet-dispatched +/// drivers. Once dispatch completes, the handler unregisters itself (it is a +/// one-shot handler). +/// +/// Corresponds to the C `MmDriverDispatchHandler`. +fn mm_driver_dispatch_handler( + _handler_type: &efi::Guid, + _comm_buffer: *mut c_void, + _comm_buffer_size: *mut usize, +) -> efi::Status { + log::info!("MmDriverDispatchHandler"); + + // TODO: Re-dispatch any remaining undispatched drivers. + // Currently all drivers are dispatched during StartUserCore, so this is a no-op. + + // Self-unregister (one-shot). + let handles = DISPATCH_HANDLES.lock(); + let dispatch_handle = handles[0].0; + drop(handles); + + if !dispatch_handle.is_null() { + let _ = crate::mm_services::GLOBAL_MMI_DB.mmi_handler_unregister(dispatch_handle); + } + + log::info!("MmDriverDispatchHandler done"); + + efi::Status::SUCCESS +} + +/// MM Ready To Lock Handler. +/// +/// Called when `gEfiDxeMmReadyToLockProtocolGuid` MMI is received. This: +/// 1. Unregisters handlers marked with `unregister_on_lock` (including itself). +/// 2. Installs the `gEfiMmReadyToLockProtocolGuid` protocol to notify MM drivers. +/// +/// Corresponds to the C `MmReadyToLockHandler`. +fn mm_ready_to_lock_handler( + _handler_type: &efi::Guid, + _comm_buffer: *mut c_void, + _comm_buffer_size: *mut usize, +) -> efi::Status { + log::info!("MmReadyToLockHandler"); + + // Unregister handlers that are no longer needed after MM lock. + let handles = DISPATCH_HANDLES.lock(); + for (i, entry) in CORE_MMI_HANDLERS.iter().enumerate() { + if entry.unregister_on_lock && !handles[i].0.is_null() { + let _ = crate::mm_services::GLOBAL_MMI_DB.mmi_handler_unregister(handles[i].0); + } + } + drop(handles); + + // Install the MM Ready To Lock Protocol. + let status = install_lifecycle_protocol(&guids::MM_READY_TO_LOCK_PROTOCOL); + if status != efi::Status::SUCCESS { + log::error!("Failed to install MM Ready To Lock Protocol: {:?}", status); + } + + status +} + +/// MM End of PEI Handler. +/// +/// Installs the `gEfiMmEndOfPeiProtocol` protocol. +/// +/// Corresponds to the C `MmEndOfPeiHandler`. +fn mm_end_of_pei_handler( + _handler_type: &efi::Guid, + _comm_buffer: *mut c_void, + _comm_buffer_size: *mut usize, +) -> efi::Status { + log::info!("MmEndOfPeiHandler"); + + install_lifecycle_protocol(&guids::MM_END_OF_PEI_PROTOCOL) +} + +/// MM End of DXE Handler. +/// +/// Installs the `gEfiMmEndOfDxeProtocolGuid` protocol. +/// +/// Corresponds to the C `MmEndOfDxeHandler`. +fn mm_end_of_dxe_handler( + _handler_type: &efi::Guid, + _comm_buffer: *mut c_void, + _comm_buffer_size: *mut usize, +) -> efi::Status { + log::info!("MmEndOfDxeHandler"); + + install_lifecycle_protocol(&guids::MM_END_OF_DXE_PROTOCOL) +} + +/// MM Exit Boot Service Handler. +/// +/// Installs the `gEfiEventExitBootServicesGuid` protocol (once). +/// +/// Corresponds to the C `MmExitBootServiceHandler`. +fn mm_exit_boot_service_handler( + _handler_type: &efi::Guid, + _comm_buffer: *mut c_void, + _comm_buffer_size: *mut usize, +) -> efi::Status { + static FIRED: spin::Once<()> = spin::Once::new(); + let mut status = efi::Status::SUCCESS; + + FIRED.call_once(|| { + status = install_lifecycle_protocol(&guids::EVENT_EXIT_BOOT_SERVICES); + }); + + status +} + +/// MM Ready To Boot Handler. +/// +/// Installs the `gEfiEventReadyToBootGuid` protocol (once). +/// +/// Corresponds to the C `MmReadyToBootHandler`. +fn mm_ready_to_boot_handler( + _handler_type: &efi::Guid, + _comm_buffer: *mut c_void, + _comm_buffer_size: *mut usize, +) -> efi::Status { + static FIRED: spin::Once<()> = spin::Once::new(); + let mut status = efi::Status::SUCCESS; + + FIRED.call_once(|| { + status = install_lifecycle_protocol(&guids::EVENT_READY_TO_BOOT); + }); + + status +} diff --git a/patina_mm_user_core/src/entry_point.asm b/patina_mm_user_core/src/entry_point.asm new file mode 100644 index 000000000..2d5c4a3e1 --- /dev/null +++ b/patina_mm_user_core/src/entry_point.asm @@ -0,0 +1,33 @@ +# +# Entry point to a Standalone MM driver. +# +# Copyright (c), Microsoft Corporation. +# SPDX-License-Identifier: BSD-2-Clause-Patent +# + +.section .data + +.section .text +.global user_core_main +.global efi_main + + +.align 8 +# Shim layer that redefines the contract between runtime module and init. +efi_main: + + #By the time we are here, it should be everything CPL3 already + sub rsp, 0x28 + + #To boot strap this driver, we directly call the entry point worker + call user_core_main + + #Restore the stack pointer + add rsp, 0x28 + + # Once returned, we will get returned status in rax, don't touch it, if you can help + # r15 contains call gate selector that was planned ahead + push r15 # New selector to be used, which is set to call gate by the supervisor + .byte 0xff, 0x1c, 0x24 # call far qword [rsp]# return to ring 0 via call gate m16:32 +1: + jmp 1b # Code should not reach here diff --git a/patina_mm_user_core/src/lib.rs b/patina_mm_user_core/src/lib.rs new file mode 100644 index 000000000..ed3a43911 --- /dev/null +++ b/patina_mm_user_core/src/lib.rs @@ -0,0 +1,608 @@ +//! MM User Core +//! +//! A pure Rust implementation of the MM User Core for standalone MM mode environments. +//! +//! This crate provides the core functionality for a user-mode (Ring 3) MM module that is +//! invoked by the MM Supervisor Core via privilege demotion. It implements the equivalent +//! functionality of the C `StandaloneMmCore` — discovering drivers from HOBs, evaluating +//! dependency expressions, dispatching drivers, and managing MMI handlers. +//! +//! ## Architecture +//! +//! The user core is invoked by the supervisor with three command types: +//! - **StartUserCore**: One-time initialization. Walk HOBs to discover drivers and dispatch them. +//! - **UserRequest**: Runtime MMI dispatch. Parse the communication buffer and invoke registered handlers. +//! - **UserApProcedure**: Execute a procedure on behalf of an AP. +//! +//! ## Entry Protocol +//! +//! The supervisor calls the user core entry point with three arguments: +//! - `arg1` (`u64`): Command type (0 = StartUserCore, 1 = UserRequest, 2 = UserApProcedure) +//! - `arg2` (`u64`): Command-specific data pointer +//! - `arg3` (`u64`): Command-specific size or auxiliary data +//! +//! ## Memory Model +//! +//! This crate runs in Ring 3 (user mode). It does not have direct access to supervisor +//! resources. All supervisor services are accessed through syscalls. +//! +//! ## Example +//! +//! ```rust,ignore +//! use patina_mm_user_core::*; +//! +//! static USER_CORE: MmUserCore = MmUserCore::new(); +//! ``` +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] +#![cfg(target_arch = "x86_64")] + +extern crate alloc; + +pub mod config_table; +pub mod core_handlers; +pub mod mm_dispatcher; +pub mod mm_mem; +pub mod mm_services; +pub mod mmi; +pub mod pool_allocator; +pub mod protocol_db; + +use core::{ + ffi::c_void, + mem, + num::NonZeroUsize, + ptr::NonNull, + sync::atomic::{AtomicBool, AtomicU64, Ordering}, +}; + +use patina::pi::hob::{Hob, PhaseHandoffInformationTable}; +use r_efi::efi; +use spin::Once; + +use crate::{ + mm_dispatcher::MmDispatcher, + mmi::MmiDatabase, + protocol_db::ProtocolDatabase, +}; + +use core::arch::global_asm; + +global_asm!(include_str!("entry_point.asm")); + +// ============================================================================= +// GUIDs +// ============================================================================= + +/// GUID used in `MemoryAllocationModule` HOBs to identify MM Supervisor module allocations. +/// +/// `gMmSupervisorHobMemoryAllocModuleGuid` +pub const MM_SUPERVISOR_HOB_MEMORY_ALLOC_MODULE_GUID: efi::Guid = efi::Guid::from_fields( + 0x3efafe72, + 0x3dbf, + 0x4341, + 0xad, + 0x04, + &[0x1c, 0xb6, 0xe8, 0xb6, 0x8e, 0x5e], +); + +/// GUID identifying the MM User Core module itself. +/// +/// `gMmSupervisorUserGuid` +pub const MM_SUPERVISOR_USER_GUID: efi::Guid = efi::Guid::from_fields( + 0x30d1cc3f, + 0xc1db, + 0x41ed, + 0xb1, + 0x13, + &[0xab, 0xce, 0x21, 0xb0, 0x2b, 0xce], +); + +/// GUID identifying the MM Supervisor Core module (to be skipped during driver discovery). +/// +/// `gMmSupervisorCoreGuid` +pub const MM_SUPERVISOR_CORE_GUID: efi::Guid = efi::Guid::from_fields( + 0x4e4c89dc, + 0xa452, + 0x4b6b, + 0xb1, + 0x83, + &[0xf1, 0x6a, 0x2a, 0x22, 0x37, 0x33], +); + +/// GUID for depex data HOBs paired with driver `MemoryAllocationModule` HOBs. +/// +/// `gMmSupervisorDepexHobGuid` +pub const MM_SUPERVISOR_DEPEX_HOB_GUID: efi::Guid = efi::Guid::from_fields( + 0xb17f0049, + 0xaffd, + 0x4530, + 0xac, + 0xd6, + &[0xe2, 0x45, 0xe1, 0x9d, 0xea, 0xf1], +); + +/// Mirrors the MM_SUPV_DEPEX_HOB_DATA structure defined in the supervisor. +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct DepexHobData { + pub name: efi::Guid, // Protocol GUID + pub depex_expression_size: u64, + pub depex_expression: [u8; 0], +} + +/// GUID for the MM communication buffer HOB. +/// +/// `gMmCommBufferHobGuid` +use patina_internal_mm_common::UserCommandType; +use patina::pi::protocols::communication::EfiMmCommunicateHeader; +use patina::pi::mm_cis::EfiMmEntryContext; +use patina::management_mode::MmCommBufferStatus; +use patina::management_mode::comm_buffer_hob::{MM_COMM_BUFFER_HOB_GUID, MmCommonBufferHobData}; + +// ============================================================================= +// Communication Buffer Tracking +// ============================================================================= + +/// Base address of the user communication buffer (discovered from HOBs). +/// +/// The supervisor rewrites the HOB's `physical_start` to point to the internal +/// (MMRAM-resident, user-accessible) copy of the communication buffer before +/// invoking `StartUserCore`. +static COMM_BUFFER_BASE: AtomicU64 = AtomicU64::new(0); + +/// Size in bytes of the user communication buffer. +static COMM_BUFFER_SIZE: AtomicU64 = AtomicU64::new(0); + +// ============================================================================= +// MmUserCore +// ============================================================================= + +/// Static reference to the user core instance. +static __USER_CORE: Once = Once::new(); + +/// Useful for offline inspection (like debugging) to determine core version. +#[used] +static MM_USER_CORE_VERSION: &str = env!("CARGO_PKG_VERSION"); + +/// The MM User Core responsible for driver dispatch and MMI handling in user mode. +/// +/// Create a static instance and call [`entry_point_worker`](MmUserCore::entry_point_worker) +/// from the binary entry point. +/// +/// ## Example +/// +/// ```rust,ignore +/// static USER_CORE: MmUserCore = MmUserCore::new(); +/// +/// #[unsafe(export_name = "efi_main")] +/// pub extern "efiapi" fn _start(arg1: u64, arg2: u64, arg3: u64) -> u64 { +/// USER_CORE.entry_point_worker(arg1, arg2, arg3) +/// } +/// ``` +pub struct MmUserCore { + /// The MMI handler database. + pub mmi_db: MmiDatabase, + /// The protocol/handle database (for depex evaluation and driver services). + pub protocol_db: ProtocolDatabase, + /// The driver dispatcher. + pub dispatcher: MmDispatcher, + /// Whether the core has completed initialization. + initialized: AtomicBool, +} + +// SAFETY: MmUserCore is designed to be shared across threads with proper synchronization. +unsafe impl Send for MmUserCore {} +unsafe impl Sync for MmUserCore {} + +impl MmUserCore { + /// Creates a new instance of the MM User Core. + pub const fn new() -> Self { + Self { + mmi_db: MmiDatabase::new(), + protocol_db: ProtocolDatabase::new(), + dispatcher: MmDispatcher::new(), + initialized: AtomicBool::new(false), + } + } + + /// Sets the static user core instance for global access. + /// + /// Returns true if the address was successfully stored, false if already set. + #[must_use] + fn set_instance(&'static self) -> bool { + let physical_address = NonNull::from_ref(self).expose_provenance(); + &physical_address == __USER_CORE.call_once(|| physical_address) + } + + /// Gets the static MM User Core instance for global access. + #[allow(unused)] + pub fn instance<'a>() -> &'a Self { + // SAFETY: The pointer is guaranteed to be valid as set_instance ensures single initialization. + unsafe { + NonNull::::with_exposed_provenance( + *__USER_CORE.get().expect("MM User Core is not initialized."), + ) + .as_ref() + } + } + + /// Main entry point for the MM User Core. + /// + /// This is called by the supervisor via `invoke_demoted_routine`. The arguments + /// correspond to the three parameters passed by the supervisor: + /// + /// - `arg1`: Command type ([`UserCommandType`]) + /// - `arg2`: Command-specific data pointer + /// - `arg3`: Command-specific size or auxiliary data + /// + /// Returns 0 on success, or a non-zero status on failure. + pub fn entry_point_worker(&'static self, op_code: u64, arg1: u64, arg2: u64) -> u64 { + let command = match UserCommandType::try_from(op_code) { + Ok(cmd) => cmd, + Err(unknown) => { + log::error!("Unknown command type: {}", unknown); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + }; + + match command { + UserCommandType::StartUserCore => { + self.handle_start_user_core(arg1 as *const c_void) + } + UserCommandType::UserRequest => { + self.handle_user_request(arg1, arg2) + } + UserCommandType::UserApProcedure => { + self.handle_user_ap_procedure(arg1, arg2) + } + } + } + + /// Handle the `StartUserCore` command. + /// + /// This is called once during initialization. The supervisor passes the HOB list + /// pointer as `arg2`. We: + /// 1. Set the static instance + /// 2. Walk HOBs to discover the communication buffer + /// 3. Walk HOBs to discover MM drivers (MemoryAllocationModule HOBs) + /// 4. Read paired depex GuidHobs for each driver + /// 5. Evaluate dependency expressions and dispatch drivers in order + fn handle_start_user_core(&'static self, hob_list: *const c_void) -> u64 { + if !self.set_instance() { + log::warn!("MM User Core instance was already set, skipping re-initialization."); + return efi::Status::ALREADY_STARTED.as_usize() as u64; + } + + if hob_list.is_null() { + log::error!("HOB list pointer is null."); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + log::info!("MM User Core v{} starting initialization...", env!("CARGO_PKG_VERSION")); + + // Enable the heap (syscall page allocator) before doing anything that + // requires dynamic allocation (driver discovery, depex parsing, etc.). + mm_mem::SYSCALL_PAGE_ALLOCATOR.set_initialized(); + + // Parse the HOB list + let hob_list_info = unsafe { + match (hob_list as *const PhaseHandoffInformationTable).as_ref() { + Some(info) => info, + None => { + log::error!("Failed to read HOB list header."); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + } + }; + + let hob = Hob::Handoff(hob_list_info); + + // Discover communication buffer from HOBs + self.discover_comm_buffer(&hob); + + // Initialize the MM System Table (heap-allocated, function pointers + // route to the global protocol DB and MMI DB in mm_services). + let mm_system_table = mm_services::init_mm_system_table(); + log::info!("MM System Table initialized at {:p}", mm_system_table); + + // Publish the HOB list as a configuration table entry so dispatched + // drivers can locate it via the system table (mirrors the C + // `MmInstallConfigurationTable(&gMmCoreMmst, &gEfiHobListGuid, ...)` + // call in `InitializeMmHobList`). + let status = config_table::GLOBAL_CONFIG_TABLE_DB.install_configuration_table( + &patina::guids::HOB_LIST, + hob_list as *mut c_void, + ); + if status != efi::Status::SUCCESS { + log::error!("Failed to install HOB list configuration table: {:?}", status); + } + + // Discover and dispatch MM drivers from HOBs + let dispatch_result = self.dispatcher.discover_and_dispatch_drivers( + &hob, + &self.mmi_db, + &self.protocol_db, + mm_system_table as *const _ as *const core::ffi::c_void, + ); + + match dispatch_result { + Ok(count) => { + log::info!("Successfully dispatched {} MM driver(s).", count); + } + Err(status) => { + log::error!("Driver dispatch failed: {:?}", status); + return status.as_usize() as u64; + } + } + + // Register core MMI handlers (lifecycle events like ready-to-lock, + // end-of-DXE, exit-boot-services, etc.). Matches the C ordering + // where handlers are registered after `MmDispatchFvs()`. + core_handlers::register_core_mmi_handlers(); + + self.initialized.store(true, Ordering::Release); + log::info!("MM User Core initialization complete."); + + efi::Status::SUCCESS.as_usize() as u64 + } + + /// Handle the `UserRequest` command (runtime MMI dispatch). + /// + /// The supervisor passes a pointer to a buffer containing: + /// - `EfiMmEntryContext` (at offset 0) + /// - `MmCommBufferStatus` (at offset `context_size`) + /// + /// For synchronous MMIs the supervisor has already copied the external + /// communication buffer into an internal (user-accessible) region. We: + /// 1. Validate the buffer via the `MmIsCommBuffer` syscall + /// 2. Parse the `EfiMmCommunicateHeader` to extract the handler GUID and data + /// 3. Dispatch via `mmi_manage` with the GUID and data pointer + /// + /// Asynchronous MMIs (timer, etc.) are always dispatched as root-only + /// (`mmi_manage(None, …)`). + /// + /// Mirrors the C `MmEntryPoint` flow in `StandaloneMmCore.c`. + fn handle_user_request(&self, supv_to_user_buffer: u64, context_size: u64) -> u64 { + if supv_to_user_buffer == 0 { + log::error!("Supervisor-to-user buffer is null."); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + // Read the EfiMmEntryContext + let entry_context = unsafe { + core::ptr::read(supv_to_user_buffer as *const EfiMmEntryContext) + }; + + mm_services::update_cpu_context(entry_context.currently_executing_cpu as usize, entry_context.number_of_cpus as usize); + + // Read MmCommBufferStatus (immediately after the context) + let comm_status = unsafe { + core::ptr::read( + (supv_to_user_buffer as *const u8).add(context_size as usize) + as *const MmCommBufferStatus, + ) + }; + + // ---- Synchronous MMI dispatch ---- + let mut sync_status = efi::Status::NOT_FOUND; + let mut return_buffer_size: u64 = 0; + + let comm_buffer_base = COMM_BUFFER_BASE.load(Ordering::Acquire); + let comm_buffer_size = COMM_BUFFER_SIZE.load(Ordering::Acquire); + + if comm_buffer_base != 0 && comm_status.is_comm_buffer_valid != 0 { + // Validate the communication buffer via a supervisor syscall. + if !mm_mem::is_comm_buffer(comm_buffer_base, comm_buffer_size) { + log::error!( + "MmIsCommBuffer rejected buffer at 0x{:x} size 0x{:x}", + comm_buffer_base, + comm_buffer_size + ); + } else { + sync_status = self.dispatch_synchronous_mmi( + comm_buffer_base, + comm_buffer_size, + &mut return_buffer_size, + ); + } + } + + // ---- Asynchronous MMI dispatch (always runs) ---- + mm_services::GLOBAL_MMI_DB.mmi_manage( + None, + core::ptr::null(), + core::ptr::null_mut(), + core::ptr::null_mut(), + ); + + // Write back the updated status to the supervisor-to-user buffer + let updated_status = MmCommBufferStatus { + is_comm_buffer_valid: 0, + talk_to_supervisor: 0, + _padding: [0; 6], + return_status: if sync_status == efi::Status::SUCCESS { + efi::Status::SUCCESS.as_usize() as u64 + } else { + efi::Status::NOT_FOUND.as_usize() as u64 + }, + return_buffer_size, + }; + + unsafe { + core::ptr::write( + (supv_to_user_buffer as *mut u8).add(context_size as usize) + as *mut MmCommBufferStatus, + updated_status, + ); + } + + efi::Status::SUCCESS.as_usize() as u64 + } + + /// Parse the `EfiMmCommunicateHeader` from the communication buffer and + /// dispatch the appropriate GUID-specific MMI handler. + /// + /// Returns the dispatch status and updates `return_buffer_size` with the + /// total response size (header + data). + fn dispatch_synchronous_mmi( + &self, + comm_buffer_base: u64, + comm_buffer_size: u64, + return_buffer_size: &mut u64, + ) -> efi::Status { + let buffer_size = comm_buffer_size as usize; + + // The buffer must be large enough for at least the communicate header. + if buffer_size < EfiMmCommunicateHeader::size() { + log::error!( + "Communication buffer too small for header: {} < {}", + buffer_size, + EfiMmCommunicateHeader::size() + ); + return efi::Status::BAD_BUFFER_SIZE; + } + + // SAFETY: We verified the buffer is large enough for the header. + let header = unsafe { + core::ptr::read_unaligned(comm_buffer_base as *const EfiMmCommunicateHeader) + }; + + // Determine header layout: check for V3 signature first, then fall + // back to the legacy `EfiMmCommunicateHeader`. + let (comm_guid_ptr, comm_header_size, mut data_size) = + if header.header_guid() == patina::Guid::from_ref(&patina::pi::protocols::communication3::COMMUNICATE_HEADER_V3_GUID) { + // V3 header + let v3 = unsafe { + core::ptr::read_unaligned( + comm_buffer_base as *const patina::pi::protocols::communication3::EfiMmCommunicateHeader, + ) + }; + let header_size = mem::size_of::(); + let total = v3.buffer_size as usize; + if total > buffer_size { + log::error!( + "V3 buffer_size 0x{:x} exceeds available 0x{:x}", + total, + buffer_size + ); + return efi::Status::BAD_BUFFER_SIZE; + } + // GUID to dispatch is `message_guid` in V3 + let guid_offset = core::mem::offset_of!( + patina::pi::protocols::communication3::EfiMmCommunicateHeader, + message_guid + ); + let guid_ptr = (comm_buffer_base as *const u8).wrapping_add(guid_offset) as *const efi::Guid; + (guid_ptr, header_size, total.saturating_sub(header_size)) + } else { + // Legacy header + let message_length = header.message_length(); + let total = EfiMmCommunicateHeader::size() + message_length; + if total > buffer_size { + log::error!( + "Legacy message_length 0x{:x} exceeds available 0x{:x}", + message_length, + buffer_size.saturating_sub(EfiMmCommunicateHeader::size()) + ); + return efi::Status::BAD_BUFFER_SIZE; + } + // GUID to dispatch is `header_guid` in legacy + let guid_ptr = comm_buffer_base as *const efi::Guid; + (guid_ptr, EfiMmCommunicateHeader::size(), message_length) + }; + + // Zero the remainder of the buffer past the message (matches C behaviour). + let used = comm_header_size + data_size; + if used < buffer_size { + unsafe { + core::ptr::write_bytes( + (comm_buffer_base as *mut u8).add(used), + 0, + buffer_size - used, + ); + } + } + + // Dispatch the GUID-specific handler. + let comm_data_ptr = unsafe { + (comm_buffer_base as *mut u8).add(comm_header_size) as *mut c_void + }; + + let status = mm_services::GLOBAL_MMI_DB.mmi_manage( + Some(unsafe { &*comm_guid_ptr }), + core::ptr::null(), + comm_data_ptr, + &mut data_size as *mut usize, + ); + + *return_buffer_size = (data_size + comm_header_size) as u64; + status + } + + /// Handle the `UserApProcedure` command. + /// + /// The supervisor passes the procedure pointer and argument. We call the procedure + /// directly since we're already in user mode. + fn handle_user_ap_procedure(&self, procedure: u64, argument: u64) -> u64 { + if procedure == 0 { + log::error!("AP procedure pointer is null."); + return efi::Status::INVALID_PARAMETER.as_usize() as u64; + } + + log::trace!("Executing AP procedure at 0x{:016x} with arg 0x{:016x}", procedure, argument); + + // SAFETY: The supervisor has validated the procedure pointer before dispatching. + // The procedure follows the EFI AP_PROCEDURE calling convention. + type EfiApProcedure = unsafe extern "efiapi" fn(*mut c_void); + let proc_fn: EfiApProcedure = unsafe { core::mem::transmute(procedure) }; + unsafe { proc_fn(argument as *mut c_void) }; + + efi::Status::SUCCESS.as_usize() as u64 + } + + /// Discover the communication buffer address from HOBs and store it for + /// later use in `handle_user_request`. + /// + /// The supervisor rewrites the HOB's `physical_start` field to point to + /// the internal (user-accessible) copy of the buffer before invoking + /// `StartUserCore`, so the address we read here is the one we should + /// read from at runtime. + fn discover_comm_buffer(&self, hob: &Hob<'_>) { + for current_hob in hob { + if let Hob::GuidHob(guid_hob, data) = current_hob { + if guid_hob.name == MM_COMM_BUFFER_HOB_GUID { + if data.len() >= mem::size_of::() { + let buffer_data = + unsafe { &*(data.as_ptr() as *const MmCommonBufferHobData) }; + let physical_start = + unsafe { core::ptr::addr_of!(buffer_data.physical_start).read_unaligned() }; + let number_of_pages = + unsafe { core::ptr::addr_of!(buffer_data.number_of_pages).read_unaligned() }; + + let buffer_size = number_of_pages.saturating_mul(4096); + + COMM_BUFFER_BASE.store(physical_start, Ordering::Release); + COMM_BUFFER_SIZE.store(buffer_size, Ordering::Release); + + log::info!( + "Found MM communication buffer: base=0x{:016x}, pages={}, size=0x{:x}", + physical_start, + number_of_pages, + buffer_size, + ); + return; + } + } + } + } + + log::warn!("No MM communication buffer HOB found — only root MMI handlers will be supported."); + } +} diff --git a/patina_mm_user_core/src/mm_dispatcher.rs b/patina_mm_user_core/src/mm_dispatcher.rs new file mode 100644 index 000000000..b4b9b7ec6 --- /dev/null +++ b/patina_mm_user_core/src/mm_dispatcher.rs @@ -0,0 +1,378 @@ +//! MM Driver Dispatcher +//! +//! This module is responsible for discovering MM drivers from HOBs and dispatching them +//! in dependency order. It follows the same pattern as the C `StandaloneMmCore` dispatcher +//! in `FwVol.c` and `Dispatcher.c`, and the Rust DXE Core's `pi_dispatcher.rs`. +//! +//! ## Driver Discovery +//! +//! MM drivers are discovered from `MemoryAllocationModule` HOBs in the HOB list. Each driver +//! HOB is identified by having `alloc_descriptor.name == MM_SUPERVISOR_HOB_MEMORY_ALLOC_MODULE_GUID`. +//! The HOB's `module_name` provides the driver GUID, and `entry_point` provides the address to call. +//! +//! Drivers that are the supervisor core or user core themselves are skipped. +//! +//! ## Depex Evaluation +//! +//! Each driver's `MemoryAllocationModule` HOB is followed by a `GuidHob` with +//! `name == MM_SUPERVISOR_DEPEX_HOB_GUID` containing the raw dependency expression bytes. +//! The depex is parsed and evaluated against the protocol database. +//! +//! ## Dispatch Order +//! +//! Drivers with satisfied dependencies (or `TRUE`/empty depex) are dispatched first. +//! `BEFORE`/`AFTER` associations are respected: if driver A has `BEFORE(B)`, A is +//! dispatched immediately before B. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::{cmp::Ordering, ffi::c_void}; + +use patina::{boot_services::c_ptr::CPtr, pi::hob::Hob}; +use patina_internal_depex::{AssociatedDependency, Depex}; +use r_efi::efi; +use spin::Mutex; + +use crate::{ + MM_SUPERVISOR_CORE_GUID, MM_SUPERVISOR_DEPEX_HOB_GUID, + MM_SUPERVISOR_HOB_MEMORY_ALLOC_MODULE_GUID, MM_SUPERVISOR_USER_GUID, + mmi::MmiDatabase, + protocol_db::ProtocolDatabase, + DepexHobData +}; + +// ============================================================================= +// Driver Entry +// ============================================================================= + +/// Represents a discovered MM driver pending dispatch. +#[derive(Debug)] +struct DriverEntry { + /// The GUID identifying this driver (from `MemoryAllocationModule.module_name`). + file_name: efi::Guid, + /// The entry point address of the driver. + entry_point: u64, + /// The base address of the driver image in memory. + _image_base: u64, + /// The size of the driver image in memory. + _image_size: u64, + /// The parsed dependency expression, if any. + depex: Option, +} + +/// Wrapper for `efi::Guid` that implements `Ord` for use in `BTreeMap`. +#[derive(Debug, Eq, PartialEq)] +struct OrdGuid(efi::Guid); + +impl PartialOrd for OrdGuid { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for OrdGuid { + fn cmp(&self, other: &Self) -> Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +// ============================================================================= +// MM Dispatcher +// ============================================================================= + +/// The MM Driver Dispatcher. +/// +/// Discovers drivers from HOBs at initialization time, evaluates their dependency +/// expressions, and dispatches them by calling their entry points. +pub struct MmDispatcher { + /// Tracks whether the dispatcher is currently executing (prevents re-entrancy). + executing: Mutex, +} + +impl MmDispatcher { + /// Creates a new `MmDispatcher`. + pub const fn new() -> Self { + Self { + executing: Mutex::new(false), + } + } + + /// Discover drivers from HOBs and dispatch them. + /// + /// This is the main entry point called during `StartUserCore`. It: + /// 1. Walks the HOB list to find `MemoryAllocationModule` HOBs with the supervisor alloc GUID + /// 2. Skips the supervisor core and user core modules + /// 3. Reads the paired depex `GuidHob` that follows each driver HOB + /// 4. Evaluates dependencies and dispatches in order + /// + /// Returns the number of drivers successfully dispatched, or an error status. + pub fn discover_and_dispatch_drivers( + &self, + hob: &Hob<'_>, + _mmi_db: &MmiDatabase, + protocol_db: &ProtocolDatabase, + mm_system_table: *const c_void, + ) -> Result { + let mut is_executing = self.executing.lock(); + if *is_executing { + return Err(efi::Status::ALREADY_STARTED); + } + *is_executing = true; + drop(is_executing); + + let drivers = self.discover_drivers(hob); + log::info!("Discovered {} MM driver(s) from HOBs.", drivers.len()); + + let dispatched = self.dispatch_drivers(drivers, protocol_db, mm_system_table); + + + *self.executing.lock() = false; + Ok(dispatched) + } + + /// Walk the HOB list and collect driver entries. + /// + /// For each `MemoryAllocationModule` HOB with the supervisor allocation GUID: + /// - Skip if the module is the supervisor core or user core + /// - Look at the next HOB for a depex `GuidHob` with `MM_SUPERVISOR_DEPEX_HOB_GUID` + /// - Create a `DriverEntry` with the parsed depex + fn discover_drivers(&self, hob: &Hob<'_>) -> Vec { + let mut drivers = Vec::new(); + + // Collect all HOBs into a vec for indexed access (we need to look ahead for depex) + let all_hobs: Vec> = hob.into_iter().collect(); + + for (index, current_hob) in all_hobs.iter().enumerate() { + if let Hob::MemoryAllocationModule(mem_alloc_mod) = current_hob { + // Check if this is an MM Supervisor module allocation + if mem_alloc_mod.alloc_descriptor.name != MM_SUPERVISOR_HOB_MEMORY_ALLOC_MODULE_GUID { + continue; + } + + let module_name = mem_alloc_mod.module_name; + + // Skip the supervisor core and user core modules + if module_name == MM_SUPERVISOR_CORE_GUID || module_name == MM_SUPERVISOR_USER_GUID { + log::info!("Skipping core module: {:?}", module_name); + continue; + } + + log::info!( + "Found MM driver: name={:?}, entry=0x{:016x}, base=0x{:016x}, size=0x{:x}", + module_name, + mem_alloc_mod.entry_point, + mem_alloc_mod.alloc_descriptor.memory_base_address, + mem_alloc_mod.alloc_descriptor.memory_length, + ); + + // Look for a paired depex GuidHob in the next HOB + let depex: Option = if let Some(next_hob) = all_hobs.get(index + 1) { + if let Hob::GuidHob(guid_hob, data) = next_hob { + if guid_hob.name == MM_SUPERVISOR_DEPEX_HOB_GUID { + log::debug!(" Found depex HOB ({} bytes)", data.len()); + if data.is_empty() { + None + } else { + // Check to make sure the name matches the expected depex HOB GUID before parsing + let depex_hob_data = data.as_ref() as *const [u8] as *const DepexHobData; + // SAFETY: We trust that the supervisor correctly formats the depex HOB data + let depex_hob_data = unsafe { &*depex_hob_data }; + if depex_hob_data.name != module_name { + panic!( + "Depex HOB module name {:?} does not match driver module name {:?}", + depex_hob_data.name, module_name + ); + } + // print depex_hob_data.depex_expression pointer and length + log::info!(" Parsed depex HOB {:#x?} for driver {:?} at {:#x?}: expression length = {}", + depex_hob_data.as_ptr(), + module_name, + depex_hob_data.depex_expression.as_ptr(), + depex_hob_data.depex_expression_size); + // SAFETY: depex_expression is a zero-length array (flexible array member). + // The actual bytes follow the struct in memory; use from_raw_parts with the real size. + let depex_bytes = unsafe { + core::slice::from_raw_parts( + depex_hob_data.depex_expression.as_ptr(), + depex_hob_data.depex_expression_size as usize, + ) + }; + Some(Depex::from(depex_bytes)) + } + } else { + log::debug!(" No depex HOB (next HOB has different GUID)"); + None + } + } else { + log::debug!(" No depex HOB (next HOB is not GuidHob)"); + None + } + } else { + log::debug!(" No depex HOB (no next HOB)"); + None + }; + + log::info!( + " Driver {:?} has depex: {:?}", + module_name, + depex + ); + + drivers.push(DriverEntry { + file_name: module_name.into_inner(), + entry_point: mem_alloc_mod.entry_point, + _image_base: mem_alloc_mod.alloc_descriptor.memory_base_address, + _image_size: mem_alloc_mod.alloc_descriptor.memory_length, + depex, + }); + } + } + + drivers + } + + /// Dispatch drivers in dependency order. + /// + /// This implements a multi-pass dispatch loop similar to the DXE Core's `PiDispatcher`: + /// 1. Evaluate each pending driver's depex against the current protocol database + /// 2. Drivers with satisfied (or absent) depexes are scheduled + /// 3. Before/After associations are handled by reordering the scheduled list + /// 4. Each scheduled driver's entry point is called + /// 5. Repeat until no more drivers can be dispatched + fn dispatch_drivers( + &self, + mut pending: Vec, + protocol_db: &ProtocolDatabase, + mm_system_table: *const c_void, + ) -> usize { + let mut total_dispatched = 0; + + loop { + // Merge protocols from both the original protocol_db (for depex evaluation) + // and the global protocol DB (populated by drivers via the MM System Table). + let mut registered_protocols = protocol_db.registered_protocols(); + registered_protocols.extend(crate::mm_services::GLOBAL_PROTOCOL_DB.registered_protocols()); + let mut scheduled = Vec::new(); + let mut still_pending = Vec::new(); + let mut associated_before: BTreeMap> = BTreeMap::new(); + let mut associated_after: BTreeMap> = BTreeMap::new(); + + for mut driver in pending.drain(..) { + let depex_satisfied = match driver.depex { + Some(ref mut depex) => depex.eval(®istered_protocols), + // No depex means the driver can be dispatched immediately + None => true, + }; + + if depex_satisfied { + scheduled.push(driver); + } else { + + // Check for Before/After associations + match driver.depex.as_ref().map(|d| d.is_associated()) { + Some(Some(AssociatedDependency::Before(guid))) => { + + associated_before + .entry(OrdGuid(guid)) + .or_default() + .push(driver); + } + Some(Some(AssociatedDependency::After(guid))) => { + + associated_after + .entry(OrdGuid(guid)) + .or_default() + .push(driver); + } + _ => { + + still_pending.push(driver); + } + } + + } + + } + + if scheduled.is_empty() { + + // No more drivers can be dispatched; move remaining to pending for logging + pending = still_pending; + break; + } + + // Build the final dispatch order respecting Before/After associations + let ordered: Vec = scheduled + .into_iter() + .flat_map(|driver| { + let filename = OrdGuid(driver.file_name); + let mut list = associated_before.remove(&filename).unwrap_or_default(); + let mut after_list = associated_after.remove(&filename).unwrap_or_default(); + list.push(driver); + list.append(&mut after_list); + list + }) + .collect(); + + // Dispatch each scheduled driver + for driver in ordered { + log::info!( + "Dispatching MM driver {:?} at entry 0x{:016x}", + driver.file_name, + driver.entry_point, + ); + + // Call the driver's entry point. + // MM driver entry signature: EFI_STATUS EFIAPI DriverEntry(EFI_HANDLE ImageHandle, EFI_MM_SYSTEM_TABLE *MmSystemTable) + // We pass a null image handle and the system table pointer. + type MmDriverEntryPoint = + unsafe extern "efiapi" fn(efi::Handle, *const c_void) -> efi::Status; + let entry_fn: MmDriverEntryPoint = + unsafe { core::mem::transmute(driver.entry_point) }; + + let status = unsafe { + entry_fn(core::ptr::null_mut(), mm_system_table) + }; + + if status == efi::Status::SUCCESS { + log::info!(" Driver {:?} returned SUCCESS.", driver.file_name); + total_dispatched += 1; + } else { + log::warn!( + " Driver {:?} returned status: 0x{:x}", + driver.file_name, + status.as_usize(), + ); + } + + } + + // Remaining unmatched Before/After drivers go back to pending + for (_guid, drivers) in associated_before { + still_pending.extend(drivers); + } + + for (_guid, drivers) in associated_after { + still_pending.extend(drivers); + } + + pending = still_pending; + } + + // Log any undispatched drivers + for driver in &pending { + log::warn!( + "Driver {:?} discovered but not dispatched (unsatisfied depex).", + driver.file_name, + ); + } + + total_dispatched + } +} diff --git a/patina_mm_user_core/src/mm_mem.rs b/patina_mm_user_core/src/mm_mem.rs new file mode 100644 index 000000000..64ff73eec --- /dev/null +++ b/patina_mm_user_core/src/mm_mem.rs @@ -0,0 +1,250 @@ +//! MM User Core Memory Allocator +//! +//! Provides a [`SyscallPageAllocator`] that implements [`PageAllocatorBackend`] +//! by issuing `syscall` instructions to the MM Supervisor for page allocation +//! and deallocation. +//! +//! The pool allocator, `PoolAllocator`, is wired up as the `#[global_allocator]`. +//! +//! ## Syscall ABI +//! +//! The MM Supervisor exposes page allocation via the following syscall indices +//! (defined in SysCallLib.h / `SyscallIndex` enum in the supervisor): +//! +//! | Syscall | RAX | RDX (arg1) | R8 (arg2) | R9 (arg3) | +//! |-------------|-----------|----------------|----------------|-------------| +//! | AllocPage | `0x10004` | alloc_type (0) | mem_type (6) | page_count | +//! | FreePage | `0x10005` | address | page_count | 0 | +//! +//! The supervisor returns: +//! - RAX: result value (allocated address for AllocPage, 0 for FreePage) +//! - RDX: EFI status (0 = success) +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::sync::atomic::{AtomicBool, Ordering}; + +use crate::pool_allocator::{PageAllocError, PageAllocatorBackend, PoolAllocator}; +use patina_internal_mm_common::SyscallIndex; + +// ============================================================================ +// AllocateType constants (matching EFI_ALLOCATE_TYPE) +// ============================================================================ + +/// `AllocateAnyPages` — allocate any available pages. +const ALLOCATE_ANY_PAGES: u64 = 0; + +// ============================================================================ +// Memory type constants (matching EFI_MEMORY_TYPE) +// ============================================================================ + +/// `EfiRuntimeServicesData` — the memory type used for MM pool allocations. +const RUNTIME_SERVICES_DATA: u64 = 6; + +// ============================================================================ +// Syscall helpers +// ============================================================================ + +/// Result of a raw syscall to the supervisor. +#[derive(Debug, Clone, Copy)] +struct RawSyscallResult { + /// Value returned in RAX (e.g., allocated address). + value: u64, + /// Status returned in RDX (EFI_STATUS). + status: u64, +} + +/// Issue a `syscall` instruction to the MM Supervisor. +/// +/// # ABI +/// +/// - RAX = call_index +/// - RDX = arg1 +/// - R8 = arg2 +/// - R9 = arg3 +/// +/// On return: +/// - RAX = result value +/// - RDX = status +/// +/// # Safety +/// +/// This is inherently unsafe — it transfers control to the supervisor and +/// the arguments must be valid for the specific syscall index. +#[cfg(target_arch = "x86_64")] +unsafe fn raw_syscall(call_index: u64, arg1: u64, arg2: u64, arg3: u64) -> RawSyscallResult { + let value: u64; + let status: u64; + + // The `syscall` instruction uses: + // RAX = syscall number + // RCX = return address (set by CPU on syscall entry, clobbered) + // R11 = RFLAGS (set by CPU on syscall entry, clobbered) + // RDX = arg1 (also used for status return) + // R8 = arg2 + // R9 = arg3 + // + // On return from the supervisor: + // RAX = result value + // RDX = status + unsafe { + core::arch::asm!( + "syscall", + inlateout("rax") call_index => value, + inlateout("rdx") arg1 => status, + in("r8") arg2, + in("r9") arg3, + // RCX and R11 are clobbered by the `syscall` instruction. + lateout("rcx") _, + lateout("r11") _, + options(nostack), + ); + } + + RawSyscallResult { value, status } +} + +// ============================================================================ +// Communication buffer validation +// ============================================================================ + +/// Validate that a given memory range is a valid MM communication buffer by +/// issuing the `MmIsCommBuffer` syscall to the supervisor. +/// +/// Returns `true` if the supervisor confirms the range falls entirely within +/// the user communication buffer region. +pub fn is_comm_buffer(address: u64, size: u64) -> bool { + let result = unsafe { raw_syscall(SyscallIndex::MmIsCommBuffer.as_u64(), address, size, 0) }; + result.value != 0 +} + +// ============================================================================ +// SyscallPageAllocator +// ============================================================================ + +/// A page allocator backend that issues `syscall` instructions to the MM Supervisor. +/// +/// This is used as the [`PageAllocatorBackend`] for the MM User Core's +/// [`PoolAllocator`] and global allocator. +/// +/// ## Initialization +/// +/// Call [`SyscallPageAllocator::set_initialized`] after the user core has been +/// set up and is ready to issue syscalls (i.e., during `StartUserCore` handling, +/// before driver dispatch begins). +pub struct SyscallPageAllocator { + /// Whether the allocator has been activated. Before this is set, all + /// allocations will fail immediately. This prevents accidental allocations + /// before the syscall interface is ready. + initialized: AtomicBool, +} + +// SAFETY: SyscallPageAllocator uses an atomic flag and the syscall interface is +// re-entrant from the BSP. +unsafe impl Send for SyscallPageAllocator {} +unsafe impl Sync for SyscallPageAllocator {} + +impl SyscallPageAllocator { + /// Creates a new uninitialized syscall page allocator. + pub const fn new() -> Self { + Self { + initialized: AtomicBool::new(false), + } + } + + /// Marks the allocator as ready. Must be called after the syscall interface + /// is available (i.e., early in `StartUserCore` handling). + pub fn set_initialized(&self) { + self.initialized.store(true, Ordering::Release); + log::info!("SyscallPageAllocator initialized — heap is now available."); + } +} + +impl PageAllocatorBackend for SyscallPageAllocator { + fn allocate_pages(&self, num_pages: usize) -> Result { + if !self.initialized.load(Ordering::Acquire) { + return Err(PageAllocError::NotInitialized); + } + + if num_pages == 0 { + return Err(PageAllocError::OutOfMemory); + } + + let result = unsafe { + raw_syscall( + SyscallIndex::AllocPage.as_u64(), + ALLOCATE_ANY_PAGES, + RUNTIME_SERVICES_DATA, + num_pages as u64, + ) + }; + + if result.status != 0 { + log::warn!( + "SyscallPageAllocator: AllocPage({} pages) failed with status 0x{:x}", + num_pages, + result.status + ); + return Err(PageAllocError::SyscallFailed(result.status)); + } + + log::trace!( + "SyscallPageAllocator: allocated {} page(s) at 0x{:016x}", + num_pages, + result.value + ); + + Ok(result.value) + } + + fn free_pages(&self, addr: u64, num_pages: usize) -> Result<(), PageAllocError> { + if !self.initialized.load(Ordering::Acquire) { + return Err(PageAllocError::NotInitialized); + } + + unsafe { + raw_syscall( + SyscallIndex::FreePage.as_u64(), + addr, + num_pages as u64, + 0, + ) + }; + + log::trace!( + "SyscallPageAllocator: freed {} page(s) at 0x{:016x}", + num_pages, + addr + ); + + Ok(()) + } + + fn is_initialized(&self) -> bool { + self.initialized.load(Ordering::Acquire) + } +} + +// ============================================================================ +// Global Allocator Instance +// ============================================================================ + +/// Global page allocator instance for the user core. +/// +/// This issues syscalls to the supervisor for actual page allocation. +/// Call [`SYSCALL_PAGE_ALLOCATOR.set_initialized()`] during `StartUserCore` +/// to enable the heap. +pub static SYSCALL_PAGE_ALLOCATOR: SyscallPageAllocator = SyscallPageAllocator::new(); + +/// Global pool allocator instance. +/// +/// Uses the shared [`PoolAllocator`] from `pool_allocator`, +/// backed by [`SyscallPageAllocator`] for page allocation via syscalls. +#[global_allocator] +static GLOBAL_ALLOCATOR: PoolAllocator = + PoolAllocator::new(&SYSCALL_PAGE_ALLOCATOR); diff --git a/patina_mm_user_core/src/mm_services.rs b/patina_mm_user_core/src/mm_services.rs new file mode 100644 index 000000000..190c093e9 --- /dev/null +++ b/patina_mm_user_core/src/mm_services.rs @@ -0,0 +1,792 @@ +//! MM System Table (MMST) Construction — User Core Implementation +//! +//! This module builds the concrete `EfiMmSystemTable` instance that is passed +//! to dispatched MM drivers. The *type definitions* (`EfiMmSystemTable`, +//! `MmServices` trait, `StandardMmServices`, etc.) live in the Patina SDK at +//! [`patina::mm_services`] — this module only provides the `extern "efiapi"` +//! thunk functions, the global databases they route to, and the one-time +//! `init_mm_system_table()` entry point. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; +use core::ffi::c_void; + +use r_efi::efi; +use spin::Once; + +use patina::pi::mm_cis::{ + EfiMmSystemTable, MmCpuIoAccess, MmCpuIoProtocol, MmiHandlerEntryPoint, + MM_MMST_SIGNATURE, MM_SYSTEM_TABLE_REVISION, +}; +use crate::pool_allocator::PageAllocatorBackend; + +// ============================================================================= +// Global system table pointer +// ============================================================================= + +/// Wrapper around a raw pointer so it can live in a `static Once<>`. +struct SendSyncPtr(*mut EfiMmSystemTable); + +// SAFETY: The pointer is only written once (in `init_mm_system_table`) and read +// immutably afterwards. All mutable state behind it is protected by locks. +unsafe impl Send for SendSyncPtr {} +unsafe impl Sync for SendSyncPtr {} + +/// The heap-allocated MM System Table. Initialized once in [`init_mm_system_table`]. +static MM_SYSTEM_TABLE: Once = Once::new(); + +/// Initialize the MM System Table. +/// +/// Allocates the table on the heap and populates it with service function pointers +/// that route to the user core's databases. Must be called once during +/// `StartUserCore`, **after** the heap is available. +/// +/// Returns a raw pointer to the table suitable for passing to driver entry points. +pub fn init_mm_system_table() -> *mut EfiMmSystemTable { + MM_SYSTEM_TABLE.call_once(|| { + let table = EfiMmSystemTable { + hdr: efi::TableHeader { + signature: MM_MMST_SIGNATURE as u64, + revision: MM_SYSTEM_TABLE_REVISION, + header_size: core::mem::size_of::() as u32, + crc32: 0, + reserved: 0, + }, + mm_firmware_vendor: core::ptr::null_mut(), + mm_firmware_revision: 0, + + mm_install_configuration_table: mm_install_configuration_table_impl, + + mm_io: MmCpuIoProtocol { + mem: MmCpuIoAccess { + read: mm_io_not_available, + write: mm_io_not_available, + }, + io: MmCpuIoAccess { + read: mm_io_not_available, + write: mm_io_not_available, + }, + }, + + mm_allocate_pool: mm_allocate_pool_impl, + mm_free_pool: mm_free_pool_impl, + mm_allocate_pages: mm_allocate_pages_impl, + mm_free_pages: mm_free_pages_impl, + + mm_startup_this_ap: mm_startup_this_ap_not_available, + + currently_executing_cpu: 0, + number_of_cpus: 0, + cpu_save_state_size: core::ptr::null_mut(), + cpu_save_state: core::ptr::null_mut(), + + number_of_table_entries: 0, + mm_configuration_table: core::ptr::null_mut(), + + mm_install_protocol_interface: mm_install_protocol_interface_impl, + mm_uninstall_protocol_interface: mm_uninstall_protocol_interface_impl, + mm_handle_protocol: mm_handle_protocol_impl, + mm_register_protocol_notify: mm_register_protocol_notify_impl, + mm_locate_handle: mm_locate_handle_impl, + mm_locate_protocol: mm_locate_protocol_impl, + + mmi_manage: mmi_manage_impl, + mmi_handler_register: mmi_handler_register_impl, + mmi_handler_unregister: mmi_handler_unregister_impl, + }; + + let ptr = Box::into_raw(Box::new(table)); + log::info!("MM System Table allocated at {:p}", ptr); + SendSyncPtr(ptr) + }).0 +} + +/// Returns the MM System Table pointer, or null if not yet initialized. +pub fn get_mm_system_table() -> *mut EfiMmSystemTable { + MM_SYSTEM_TABLE.get().map(|p| p.0).unwrap_or(core::ptr::null_mut()) +} + +// ============================================================================= +// Service implementations — I/O (stubs) +// ============================================================================= + +unsafe extern "efiapi" fn mm_io_not_available( + _this: *const MmCpuIoAccess, + _width: usize, + _address: u64, + _count: usize, + _buffer: *mut c_void, +) -> efi::Status { + efi::Status::UNSUPPORTED +} + +// ============================================================================= +// Service implementations — Configuration Table +// ============================================================================= + +unsafe extern "efiapi" fn mm_install_configuration_table_impl( + _system_table: *const EfiMmSystemTable, + guid: *const efi::Guid, + table: *mut c_void, + _table_size: usize, +) -> efi::Status { + if guid.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = unsafe { &*guid }; + crate::config_table::GLOBAL_CONFIG_TABLE_DB.install_configuration_table(guid, table) +} + +// ============================================================================= +// Service implementations — Memory services (syscall-backed) +// ============================================================================= + +extern "efiapi" fn mm_allocate_pool_impl( + _pool_type: efi::MemoryType, + size: usize, + buffer: *mut *mut c_void, +) -> efi::Status { + if buffer.is_null() || size == 0 { + return efi::Status::INVALID_PARAMETER; + } + + let layout = match core::alloc::Layout::from_size_align(size, 8) { + Ok(l) => l, + Err(_) => return efi::Status::INVALID_PARAMETER, + }; + + let ptr = unsafe { alloc::alloc::alloc(layout) }; + if ptr.is_null() { + return efi::Status::OUT_OF_RESOURCES; + } + + unsafe { *buffer = ptr as *mut c_void }; + efi::Status::SUCCESS +} + +extern "efiapi" fn mm_free_pool_impl( + buffer: *mut c_void, +) -> efi::Status { + if buffer.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let layout = unsafe { core::alloc::Layout::from_size_align_unchecked(1, 1) }; + unsafe { alloc::alloc::dealloc(buffer as *mut u8, layout) }; + efi::Status::SUCCESS +} + +extern "efiapi" fn mm_allocate_pages_impl( + _alloc_type: efi::AllocateType, + _memory_type: efi::MemoryType, + pages: usize, + memory: *mut efi::PhysicalAddress, +) -> efi::Status { + if memory.is_null() || pages == 0 { + return efi::Status::INVALID_PARAMETER; + } + + match crate::mm_mem::SYSCALL_PAGE_ALLOCATOR.allocate_pages(pages) { + Ok(addr) => { + unsafe { *memory = addr }; + efi::Status::SUCCESS + } + Err(_) => efi::Status::OUT_OF_RESOURCES, + } +} + +extern "efiapi" fn mm_free_pages_impl( + memory: efi::PhysicalAddress, + pages: usize, +) -> efi::Status { + if memory == 0 || pages == 0 { + return efi::Status::INVALID_PARAMETER; + } + + match crate::mm_mem::SYSCALL_PAGE_ALLOCATOR.free_pages(memory, pages) { + Ok(()) => efi::Status::SUCCESS, + Err(_) => efi::Status::INVALID_PARAMETER, + } +} + +// ============================================================================= +// Service implementations — MP service (stub) +// ============================================================================= + +unsafe extern "efiapi" fn mm_startup_this_ap_not_available( + _procedure: usize, + _cpu_number: usize, + _proc_arguments: *mut c_void, +) -> efi::Status { + efi::Status::UNSUPPORTED +} + +// ============================================================================= +// Service implementations — Protocol services +// ============================================================================= + +extern "efiapi" fn mm_install_protocol_interface_impl( + handle: *mut efi::Handle, + protocol: *mut efi::Guid, + _interface_type: efi::InterfaceType, + interface: *mut c_void, +) -> efi::Status { + if handle.is_null() || protocol.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = unsafe { &*protocol }; + let caller_handle = unsafe { *handle }; + + match GLOBAL_PROTOCOL_DB.install_protocol(caller_handle, guid, interface) { + Ok((new_handle, pending_notifies)) => { + unsafe { *handle = new_handle }; + // Fire notifications outside the DB lock. + for notify in pending_notifies { + unsafe { + (notify.function)( + ¬ify.guid as *const efi::Guid, + notify.interface, + notify.handle, + ); + } + } + efi::Status::SUCCESS + } + Err(status) => status, + } +} + +extern "efiapi" fn mm_uninstall_protocol_interface_impl( + handle: efi::Handle, + protocol: *mut efi::Guid, + interface: *mut c_void, +) -> efi::Status { + if handle.is_null() || protocol.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = unsafe { &*protocol }; + + match GLOBAL_PROTOCOL_DB.uninstall_protocol(handle, guid, interface) { + Ok(()) => efi::Status::SUCCESS, + Err(status) => status, + } +} + +extern "efiapi" fn mm_handle_protocol_impl( + handle: efi::Handle, + protocol: *mut efi::Guid, + interface: *mut *mut c_void, +) -> efi::Status { + if protocol.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + if interface.is_null() { + return efi::Status::INVALID_PARAMETER; + } + // C reference: *Interface = NULL before lookup. + unsafe { *interface = core::ptr::null_mut() }; + + if handle.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = unsafe { &*protocol }; + + match GLOBAL_PROTOCOL_DB.handle_protocol(handle, guid) { + Some(iface) => { + unsafe { *interface = iface }; + efi::Status::SUCCESS + } + None => efi::Status::UNSUPPORTED, + } +} + +extern "efiapi" fn mm_register_protocol_notify_impl( + protocol: *const efi::Guid, + function: usize, + registration: *mut *mut c_void, +) -> efi::Status { + if protocol.is_null() || registration.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = unsafe { &*protocol }; + + if function == 0 { + // Function is NULL → unregister the notification identified by *Registration. + let reg = unsafe { *registration }; + match GLOBAL_PROTOCOL_DB.unregister_protocol_notify(guid, reg) { + Ok(()) => efi::Status::SUCCESS, + Err(status) => status, + } + } else { + // Register a new notification. + // SAFETY: function is an `EFI_MM_NOTIFY_FN` function pointer passed as usize. + let notify_fn: MmNotifyFn = unsafe { core::mem::transmute(function) }; + let token = GLOBAL_PROTOCOL_DB.register_protocol_notify(guid, notify_fn); + unsafe { *registration = token }; + efi::Status::SUCCESS + } +} + +extern "efiapi" fn mm_locate_handle_impl( + search_type: efi::LocateSearchType, + protocol: *mut efi::Guid, + _search_key: *mut c_void, + buffer_size: *mut usize, + buffer: *mut efi::Handle, +) -> efi::Status { + if buffer_size.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let handles = match search_type { + efi::ALL_HANDLES => GLOBAL_PROTOCOL_DB.all_handles(), + efi::BY_PROTOCOL => { + if protocol.is_null() { + return efi::Status::INVALID_PARAMETER; + } + let guid = unsafe { &*protocol }; + GLOBAL_PROTOCOL_DB.locate_handle_by_protocol(guid) + } + _ => { + log::warn!("MmLocateHandle: search type {} not yet supported", search_type); + return efi::Status::UNSUPPORTED; + } + }; + + if handles.is_empty() { + return efi::Status::NOT_FOUND; + } + + let required_size = handles.len() * core::mem::size_of::(); + let caller_size = unsafe { *buffer_size }; + unsafe { *buffer_size = required_size }; + + if caller_size < required_size { + return efi::Status::BUFFER_TOO_SMALL; + } + + if buffer.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + unsafe { + core::ptr::copy_nonoverlapping(handles.as_ptr(), buffer, handles.len()); + } + efi::Status::SUCCESS +} + +extern "efiapi" fn mm_locate_protocol_impl( + protocol: *mut efi::Guid, + _registration: *mut c_void, + interface: *mut *mut c_void, +) -> efi::Status { + if protocol.is_null() || interface.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = unsafe { &*protocol }; + + match GLOBAL_PROTOCOL_DB.locate_protocol(guid) { + Some(iface) => { + unsafe { *interface = iface }; + efi::Status::SUCCESS + } + None => efi::Status::NOT_FOUND, + } +} + +// ============================================================================= +// Service implementations — MMI management +// ============================================================================= + +unsafe extern "efiapi" fn mmi_manage_impl( + handler_type: *const efi::Guid, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, +) -> efi::Status { + let guid = if handler_type.is_null() { + None + } else { + Some(unsafe { &*handler_type }) + }; + + GLOBAL_MMI_DB.mmi_manage(guid, context, comm_buffer, comm_buffer_size) +} + +unsafe extern "efiapi" fn mmi_handler_register_impl( + handler: MmiHandlerEntryPoint, + handler_type: *const efi::Guid, + dispatch_handle: *mut efi::Handle, +) -> efi::Status { + if dispatch_handle.is_null() { + return efi::Status::INVALID_PARAMETER; + } + + let guid = if handler_type.is_null() { + None + } else { + Some(unsafe { &*handler_type }) + }; + + match GLOBAL_MMI_DB.mmi_handler_register(handler, guid) { + Ok(handle) => { + unsafe { *dispatch_handle = handle }; + efi::Status::SUCCESS + } + Err(status) => status, + } +} + +unsafe extern "efiapi" fn mmi_handler_unregister_impl( + dispatch_handle: efi::Handle, +) -> efi::Status { + match GLOBAL_MMI_DB.mmi_handler_unregister(dispatch_handle) { + Ok(()) => efi::Status::SUCCESS, + Err(status) => status, + } +} + +// ============================================================================= +// Global databases accessed by service thunks +// ============================================================================= + +use crate::mmi::MmiDatabase; + +/// Global MMI handler database used by the system table services. +pub static GLOBAL_MMI_DB: MmiDatabase = MmiDatabase::new(); + +// ============================================================================= +// Protocol database — handle-aware, with notify support +// ============================================================================= + +/// `EFI_MM_NOTIFY_FN` — callback invoked when a protocol is installed. +/// +/// ```c +/// typedef EFI_STATUS (EFIAPI *EFI_MM_NOTIFY_FN)( +/// IN CONST EFI_GUID *Protocol, +/// IN VOID *Interface, +/// IN EFI_HANDLE Handle +/// ); +/// ``` +type MmNotifyFn = unsafe extern "efiapi" fn( + *const efi::Guid, + *mut c_void, + efi::Handle, +) -> efi::Status; + +/// Per-handle state: all protocol interfaces installed on one handle. +struct MmHandle { + protocols: Vec<(efi::Guid, *mut c_void)>, +} + +/// A registered protocol notification. +struct ProtocolNotifyEntry { + guid: efi::Guid, + function: MmNotifyFn, + /// Unique token returned via `*Registration`. + token: usize, +} + +/// Info collected under the lock, fired after the lock is released. +pub struct PendingNotify { + pub function: MmNotifyFn, + pub guid: efi::Guid, + pub interface: *mut c_void, + pub handle: efi::Handle, +} + +struct MmProtocolDatabaseInner { + /// All handles: (opaque id, per-handle data). + handles: Vec<(usize, MmHandle)>, + /// Registered protocol notifications. + notifications: Vec, + /// Next monotonic id for handle allocation (starts at 1 to avoid null). + next_handle_id: usize, + /// Next monotonic id for registration tokens (starts at 1 to avoid null). + next_registration_id: usize, +} + +/// Handle-aware protocol database with notification support. +/// +/// Mirrors the C `StandaloneMmCore` handle/protocol infrastructure +/// (`IHANDLE`, `PROTOCOL_ENTRY`, `PROTOCOL_INTERFACE`, `PROTOCOL_NOTIFY`). +pub struct MmProtocolDatabase { + inner: spin::Mutex, +} + +// SAFETY: All mutable state is behind a spin::Mutex. +unsafe impl Send for MmProtocolDatabase {} +unsafe impl Sync for MmProtocolDatabase {} + +impl MmProtocolDatabase { + pub const fn new() -> Self { + Self { + inner: spin::Mutex::new(MmProtocolDatabaseInner { + handles: Vec::new(), + notifications: Vec::new(), + next_handle_id: 1, + next_registration_id: 1, + }), + } + } + + // ----------------------------------------------------------------- + // Install / Uninstall + // ----------------------------------------------------------------- + + /// Install a protocol interface onto a handle. + /// + /// If `handle` is null a new handle is allocated. Returns the + /// (possibly new) handle and any pending notify callbacks that must + /// be invoked **after** the caller has released the lock. + pub fn install_protocol( + &self, + handle: efi::Handle, + guid: &efi::Guid, + interface: *mut c_void, + ) -> Result<(efi::Handle, Vec), efi::Status> { + let mut inner = self.inner.lock(); + + let handle_id = if handle.is_null() { + // Allocate a new handle. + let id = inner.next_handle_id; + inner.next_handle_id += 1; + inner.handles.push((id, MmHandle { protocols: Vec::new() })); + id + } else { + let id = handle as usize; + // Validate the handle exists. + if !inner.handles.iter().any(|(h, _)| *h == id) { + return Err(efi::Status::INVALID_PARAMETER); + } + // Reject duplicate: same protocol already on this handle. + let mm_handle = &inner.handles.iter().find(|(h, _)| *h == id).unwrap().1; + if mm_handle.protocols.iter().any(|(g, _)| g == guid) { + return Err(efi::Status::INVALID_PARAMETER); + } + id + }; + + // Add the protocol interface to the handle. + let mm_handle = &mut inner.handles.iter_mut().find(|(h, _)| *h == handle_id).unwrap().1; + mm_handle.protocols.push((*guid, interface)); + + // Collect pending notifications. + let actual_handle = handle_id as efi::Handle; + let notifies: Vec = inner + .notifications + .iter() + .filter(|n| n.guid == *guid) + .map(|n| PendingNotify { + function: n.function, + guid: *guid, + interface, + handle: actual_handle, + }) + .collect(); + + log::debug!("MmInstallProtocolInterface: {:?} on handle {:p}", guid, actual_handle); + Ok((actual_handle, notifies)) + } + + /// Uninstall a protocol interface from a handle. + /// + /// If the handle has no remaining protocols it is removed from the + /// database (matching the C `MmUninstallProtocolInterface` behaviour). + pub fn uninstall_protocol( + &self, + handle: efi::Handle, + guid: &efi::Guid, + interface: *mut c_void, + ) -> Result<(), efi::Status> { + let mut inner = self.inner.lock(); + let id = handle as usize; + + let mm_handle = match inner.handles.iter_mut().find(|(h, _)| *h == id) { + Some((_, h)) => h, + None => return Err(efi::Status::INVALID_PARAMETER), + }; + + if let Some(pos) = mm_handle.protocols.iter().position(|(g, i)| g == guid && *i == interface) { + mm_handle.protocols.remove(pos); + } else { + return Err(efi::Status::NOT_FOUND); + } + + // Remove the handle entirely when it has no more protocols. + if inner.handles.iter().find(|(h, _)| *h == id).unwrap().1.protocols.is_empty() { + inner.handles.retain(|(h, _)| *h != id); + } + + Ok(()) + } + + // ----------------------------------------------------------------- + // Lookup + // ----------------------------------------------------------------- + + /// Look up a specific protocol on a specific handle (`HandleProtocol`). + pub fn handle_protocol( + &self, + handle: efi::Handle, + guid: &efi::Guid, + ) -> Option<*mut c_void> { + let inner = self.inner.lock(); + let id = handle as usize; + let mm_handle = &inner.handles.iter().find(|(h, _)| *h == id)?.1; + mm_handle.protocols.iter().find(|(g, _)| g == guid).map(|(_, i)| *i) + } + + /// Locate the first installed interface for a GUID across all handles. + pub fn locate_protocol(&self, guid: &efi::Guid) -> Option<*mut c_void> { + let inner = self.inner.lock(); + for (_, mm_handle) in &inner.handles { + if let Some((_, iface)) = mm_handle.protocols.iter().find(|(g, _)| g == guid) { + return Some(*iface); + } + } + None + } + + /// Return all handles that support a given protocol. + pub fn locate_handle_by_protocol(&self, guid: &efi::Guid) -> Vec { + let inner = self.inner.lock(); + inner + .handles + .iter() + .filter(|(_, mm_handle)| mm_handle.protocols.iter().any(|(g, _)| g == guid)) + .map(|(id, _)| *id as efi::Handle) + .collect() + } + + /// Return all handles in the database. + pub fn all_handles(&self) -> Vec { + let inner = self.inner.lock(); + inner.handles.iter().map(|(id, _)| *id as efi::Handle).collect() + } + + // ----------------------------------------------------------------- + // Notify + // ----------------------------------------------------------------- + + /// Register a notification callback for a protocol GUID. + /// + /// If an identical `(GUID, function)` pair is already registered the + /// existing token is returned (matching the C implementation). + pub fn register_protocol_notify( + &self, + guid: &efi::Guid, + function: MmNotifyFn, + ) -> *mut c_void { + let mut inner = self.inner.lock(); + let fn_addr = function as usize; + + // De-duplicate: same GUID + same function pointer. + if let Some(existing) = inner + .notifications + .iter() + .find(|n| n.guid == *guid && (n.function as usize) == fn_addr) + { + return existing.token as *mut c_void; + } + + let token = inner.next_registration_id; + inner.next_registration_id += 1; + inner.notifications.push(ProtocolNotifyEntry { + guid: *guid, + function, + token, + }); + + token as *mut c_void + } + + /// Unregister a notification by its registration token. + pub fn unregister_protocol_notify( + &self, + guid: &efi::Guid, + registration: *mut c_void, + ) -> Result<(), efi::Status> { + let mut inner = self.inner.lock(); + let token = registration as usize; + + if let Some(pos) = inner + .notifications + .iter() + .position(|n| n.guid == *guid && n.token == token) + { + inner.notifications.remove(pos); + Ok(()) + } else { + Err(efi::Status::NOT_FOUND) + } + } + + // ----------------------------------------------------------------- + // Depex helpers (backward-compatible public API) + // ----------------------------------------------------------------- + + /// Check if a protocol GUID is installed on any handle. + pub fn is_protocol_installed(&self, guid: &efi::Guid) -> bool { + let inner = self.inner.lock(); + inner + .handles + .iter() + .any(|(_, mm_handle)| mm_handle.protocols.iter().any(|(g, _)| g == guid)) + } + + /// Return all unique installed protocol GUIDs. + pub fn registered_protocols(&self) -> Vec { + let inner = self.inner.lock(); + let mut guids = Vec::new(); + for (_, mm_handle) in &inner.handles { + for (g, _) in &mm_handle.protocols { + if !guids.contains(g) { + guids.push(*g); + } + } + } + guids + } +} + +/// Global protocol database used by the system table services. +pub static GLOBAL_PROTOCOL_DB: MmProtocolDatabase = MmProtocolDatabase::new(); + +// ============================================================================= +// Helper: update CPU context from MmEntryContext +// ============================================================================= + +/// Update the system table's CPU information from a new `EfiMmEntryContext`. +/// +/// Called at the start of each `UserRequest` handling to reflect the current +/// processor state. +pub fn update_cpu_context( + currently_executing_cpu: usize, + number_of_cpus: usize, +) { + let ptr = get_mm_system_table(); + if ptr.is_null() { + return; + } + // SAFETY: The table is heap-allocated and we are the only writer of these fields. + unsafe { + (*ptr).currently_executing_cpu = currently_executing_cpu; + (*ptr).number_of_cpus = number_of_cpus; + } +} diff --git a/patina_mm_user_core/src/mmi.rs b/patina_mm_user_core/src/mmi.rs new file mode 100644 index 000000000..af1a16288 --- /dev/null +++ b/patina_mm_user_core/src/mmi.rs @@ -0,0 +1,450 @@ +//! MMI (Management Mode Interrupt) Handler Database +//! +//! This module manages the registration and dispatch of MMI handlers, following the +//! same patterns as the C `Mmi.c` in `StandaloneMmPkg/Core`. +//! +//! ## Handler Types +//! +//! - **Root handlers**: Registered with `handler_type = None`. Called on every MMI regardless +//! of the communication buffer contents. Used for hardware-level interrupt sources. +//! - **GUID-specific handlers**: Registered with a specific GUID. Called only when an MMI +//! communication targets that GUID. +//! +//! ## External vs Internal Handlers +//! +//! The database supports two calling conventions: +//! - **External** (`MmiHandlerEntryPoint`): `unsafe extern "efiapi" fn` — used by drivers +//! registering through the MMST `MmiHandlerRegister` service. +//! - **Internal** (`InternalMmiHandler`): Safe Rust `fn` — used by the core's own lifecycle +//! handlers (ready-to-lock, end-of-DXE, etc.) without going through the C ABI. +//! +//! ## Dispatch Flow +//! +//! [`MmiDatabase::mmi_manage`] is the main dispatch entry point: +//! 1. If `handler_type` is `None`, iterate root handlers +//! 2. If `handler_type` is `Some(guid)`, find the `MmiEntry` for that GUID and iterate its handlers +//! 3. Each handler returns a status that determines whether dispatch continues +//! +//! **Lock safety**: The database lock is released before calling handlers and +//! re-acquired afterwards, so handlers may safely call `mmi_handler_register` or +//! `mmi_handler_unregister` without deadlocking. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use alloc::{vec, vec::Vec}; +use core::ffi::c_void; + +use r_efi::efi; +use spin::Mutex; + +/// EFI_WARN_INTERRUPT_SOURCE_QUIESCED — PI spec warning status code. +/// Indicates an interrupt source was quiesced. +const WARN_INTERRUPT_SOURCE_QUIESCED: efi::Status = efi::Status::from_usize(3); + +/// EFI_INTERRUPT_PENDING — PI spec status for pending interrupts. +const INTERRUPT_PENDING: efi::Status = efi::Status::from_usize(0x80000000 | 0x00000004); + +// ============================================================================= +// Handler Types +// ============================================================================= + +/// MMI handler entry point signature (external / C ABI). +/// +/// Re-exported from [`patina::pi::mm_cis::MmiHandlerEntryPoint`]. +pub use patina::pi::mm_cis::MmiHandlerEntryPoint; + +/// Signature for internal (Rust-native) MMI handlers. +/// +/// These are registered by the core itself for lifecycle events and do not go +/// through the `unsafe extern "efiapi"` calling convention. +/// +/// ## Parameters +/// +/// - `handler_type` — The GUID that triggered this handler (same as the registration GUID). +/// - `comm_buffer` — Pointer to the communication data (may be null for async MMIs). +/// - `comm_buffer_size` — Mutable pointer to the communication buffer size. +/// +/// ## Returns +/// +/// An [`efi::Status`] following the standard `MmiManage` return protocol. +pub type InternalMmiHandler = fn( + handler_type: &efi::Guid, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, +) -> efi::Status; + +/// An MMI handler callback — either an external (C ABI) or internal (Rust) function. +#[derive(Clone, Copy)] +enum HandlerKind { + /// External handler registered by a driver through the MMST. + External(MmiHandlerEntryPoint), + /// Internal handler registered by the MM Core directly. + Internal(InternalMmiHandler), +} + +/// An MMI entry groups all handlers registered for a specific GUID. +#[derive(Clone)] +struct MmiEntry { + /// The handler type GUID. + handler_type: efi::Guid, + /// All handlers registered for this GUID. + handlers: Vec, +} + +/// A registered MMI handler. +#[derive(Clone, Copy)] +struct MmiHandler { + /// The handler callback. + kind: HandlerKind, + /// Monotonic ID used as the dispatch handle for unregistration. + id: usize, + /// Whether this handler is marked for removal (deferred removal during dispatch). + to_remove: bool, +} + +// ============================================================================= +// MMI Database +// ============================================================================= + +/// The MMI handler database. +/// +/// Manages root handlers (called for all MMIs) and GUID-specific handlers. +/// Thread-safe via internal `Mutex`. +pub struct MmiDatabase { + /// Internal state protected by a mutex. + inner: Mutex, +} + +struct MmiDatabaseInner { + /// Root MMI handlers (called for every MMI, regardless of GUID). + root_handlers: Vec, + /// GUID-specific MMI entries. + entries: Vec, + /// Re-entrance depth counter for `mmi_manage`. + manage_calling_depth: usize, + /// Monotonic ID counter for handler dispatch handles. + next_id: usize, +} + +impl MmiDatabase { + /// Creates a new empty `MmiDatabase`. + pub const fn new() -> Self { + Self { + inner: Mutex::new(MmiDatabaseInner { + root_handlers: Vec::new(), + entries: Vec::new(), + manage_calling_depth: 0, + next_id: 1, + }), + } + } + + // ========================================================================= + // Registration — external (driver) handlers + // ========================================================================= + + /// Register an external (C ABI) MMI handler. + /// + /// If `handler_type` is `None`, the handler is registered as a root handler. + /// If `handler_type` is `Some(guid)`, the handler is registered for that specific GUID. + /// + /// Returns `Ok(dispatch_handle)` on success, where `dispatch_handle` is an opaque handle + /// that can be used to unregister the handler. + pub fn mmi_handler_register( + &self, + handler: MmiHandlerEntryPoint, + handler_type: Option<&efi::Guid>, + ) -> Result { + let mut inner = self.inner.lock(); + let id = inner.next_id; + inner.next_id += 1; + + let mmi_handler = MmiHandler { + kind: HandlerKind::External(handler), + id, + to_remove: false, + }; + + Self::insert_handler(&mut inner, handler_type, mmi_handler); + + let handle = id as efi::Handle; + log::debug!( + "Registered external MMI handler id={} for {:?}", + id, + handler_type, + ); + Ok(handle) + } + + // ========================================================================= + // Registration — internal (core) handlers + // ========================================================================= + + /// Register an internal (Rust-native) MMI handler. + /// + /// Works like [`mmi_handler_register`](Self::mmi_handler_register) but takes a safe + /// Rust function pointer instead of an `unsafe extern "efiapi" fn`. + /// + /// Returns the dispatch handle (an opaque `usize`-based ID) on success. + pub fn register_internal_handler( + &self, + handler: InternalMmiHandler, + handler_type: Option<&efi::Guid>, + ) -> Result { + let mut inner = self.inner.lock(); + let id = inner.next_id; + inner.next_id += 1; + + let mmi_handler = MmiHandler { + kind: HandlerKind::Internal(handler), + id, + to_remove: false, + }; + + Self::insert_handler(&mut inner, handler_type, mmi_handler); + + let handle = id as efi::Handle; + log::debug!( + "Registered internal MMI handler id={} for {:?}", + id, + handler_type, + ); + Ok(handle) + } + + /// Insert a handler into the appropriate list (root or GUID-specific). + fn insert_handler( + inner: &mut MmiDatabaseInner, + handler_type: Option<&efi::Guid>, + handler: MmiHandler, + ) { + match handler_type { + None => { + inner.root_handlers.push(handler); + } + Some(guid) => { + if let Some(entry) = inner.entries.iter_mut().find(|e| e.handler_type == *guid) { + entry.handlers.push(handler); + } else { + inner.entries.push(MmiEntry { + handler_type: *guid, + handlers: vec![handler], + }); + } + } + } + } + + // ========================================================================= + // Unregistration + // ========================================================================= + + /// Unregister an MMI handler by its dispatch handle. + /// + /// If we are inside a dispatch (`manage_calling_depth > 0`) the handler is + /// marked for deferred removal. Otherwise it is removed immediately. + pub fn mmi_handler_unregister( + &self, + dispatch_handle: efi::Handle, + ) -> Result<(), efi::Status> { + let target_id = dispatch_handle as usize; + let mut inner = self.inner.lock(); + + // Search root handlers + for handler in inner.root_handlers.iter_mut() { + if handler.id == target_id { + handler.to_remove = true; + log::debug!("Marked root MMI handler id={} for removal.", target_id); + if inner.manage_calling_depth == 0 { + Self::cleanup_removed_handlers(&mut inner); + } + return Ok(()); + } + } + + // Search GUID-specific handlers + for entry in inner.entries.iter_mut() { + for handler in entry.handlers.iter_mut() { + if handler.id == target_id { + handler.to_remove = true; + log::debug!( + "Marked MMI handler id={} for removal (GUID: {:?}).", + target_id, + entry.handler_type, + ); + if inner.manage_calling_depth == 0 { + Self::cleanup_removed_handlers(&mut inner); + } + return Ok(()); + } + } + } + + log::warn!("MMI handler {:?} not found for unregistration.", dispatch_handle); + Err(efi::Status::NOT_FOUND) + } + + // ========================================================================= + // Dispatch + // ========================================================================= + + /// Manage (dispatch) an MMI. + /// + /// This is the main dispatch function, equivalent to the C `MmiManage`. + /// + /// - If `handler_type` is `None`, root handlers are dispatched. + /// - If `handler_type` is `Some(guid)`, the handlers for that GUID are dispatched. + /// + /// **Lock safety**: The database lock is released before calling any handler + /// and re-acquired afterwards, so handlers may call `mmi_handler_register` / + /// `mmi_handler_unregister` without deadlocking. + /// + /// Returns: + /// - `EFI_SUCCESS` if at least one handler returned success + /// - `EFI_WARN_INTERRUPT_SOURCE_QUIESCED` if a source was quiesced + /// - `EFI_INTERRUPT_PENDING` if a handler indicated the interrupt is still pending + /// - `EFI_NOT_FOUND` if no handlers are registered for the given type + pub fn mmi_manage( + &self, + handler_type: Option<&efi::Guid>, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, + ) -> efi::Status { + // ----- Phase 1: snapshot handlers under the lock ----- + let handlers_snapshot = { + let mut inner = self.inner.lock(); + inner.manage_calling_depth += 1; + + match handler_type { + None => inner + .root_handlers + .iter() + .filter(|h| !h.to_remove) + .cloned() + .collect::>(), + Some(guid) => { + if let Some(entry) = inner.entries.iter().find(|e| e.handler_type == *guid) { + entry + .handlers + .iter() + .filter(|h| !h.to_remove) + .cloned() + .collect::>() + } else { + Vec::new() + } + } + } + // lock released here + }; + + let short_circuit = handler_type.is_some(); + + // ----- Phase 2: dispatch without the lock held ----- + let return_status = Self::dispatch_handler_snapshot( + &handlers_snapshot, + handler_type, + context, + comm_buffer, + comm_buffer_size, + short_circuit, + ); + + // ----- Phase 3: update depth and clean up under the lock ----- + { + let mut inner = self.inner.lock(); + inner.manage_calling_depth -= 1; + + if inner.manage_calling_depth == 0 { + Self::cleanup_removed_handlers(&mut inner); + } + } + + return_status + } + + /// Dispatch a snapshot of handlers. The database lock is NOT held. + fn dispatch_handler_snapshot( + handlers: &[MmiHandler], + handler_type: Option<&efi::Guid>, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, + short_circuit: bool, + ) -> efi::Status { + if handlers.is_empty() { + return efi::Status::NOT_FOUND; + } + + let mut return_status = efi::Status::NOT_FOUND; + + // Provide a dummy GUID for root dispatch (handlers don't use it). + let null_guid = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0; 6]); + let guid_ref = handler_type.unwrap_or(&null_guid); + + for handler in handlers { + let status = match handler.kind { + HandlerKind::External(entry_point) => { + // SAFETY: External handler follows the PI spec efiapi calling convention. + // The dispatch_handle is the monotonic ID cast to a handle. + unsafe { + entry_point( + handler.id as efi::Handle, + context, + comm_buffer, + comm_buffer_size, + ) + } + } + HandlerKind::Internal(fn_ptr) => { + fn_ptr(guid_ref, comm_buffer, comm_buffer_size) + } + }; + + match status { + efi::Status::SUCCESS => { + return_status = efi::Status::SUCCESS; + if short_circuit { + break; + } + } + s if s == INTERRUPT_PENDING => { + if short_circuit { + return INTERRUPT_PENDING; + } + if return_status != efi::Status::SUCCESS { + return_status = status; + } + } + s if s == WARN_INTERRUPT_SOURCE_QUIESCED => { + return_status = efi::Status::SUCCESS; + } + _ => { + // Other statuses are ignored per PI spec + } + } + } + + return_status + } + + // ========================================================================= + // Cleanup + // ========================================================================= + + /// Remove handlers marked with `to_remove` and clean up empty entries. + fn cleanup_removed_handlers(inner: &mut MmiDatabaseInner) { + inner.root_handlers.retain(|h| !h.to_remove); + + inner.entries.retain_mut(|entry| { + entry.handlers.retain(|h| !h.to_remove); + !entry.handlers.is_empty() + }); + } +} diff --git a/patina_mm_user_core/src/pool_allocator.rs b/patina_mm_user_core/src/pool_allocator.rs new file mode 100644 index 000000000..43e5f6e55 --- /dev/null +++ b/patina_mm_user_core/src/pool_allocator.rs @@ -0,0 +1,306 @@ +//! Pool Allocator +//! +//! This module provides a trait-based page allocator abstraction and a generic pool +//! allocator for the MM User Core. +//! +//! ## Design +//! +//! The [`PageAllocatorBackend`] trait abstracts the page allocation mechanism. +//! The user core implements it by issuing `syscall` instructions that thunk +//! into the supervisor for page allocation. +//! +//! The [`PoolAllocator`] is a bump-allocator built on top of any `PageAllocatorBackend`. +//! It implements [`GlobalAlloc`] so it can be used as `#[global_allocator]`. +//! +//! ## Block Management +//! +//! Block metadata is stored **in-band** at the start of each page allocation, forming +//! an intrusive linked list. This means there is no fixed cap on the number of blocks — +//! the allocator grows dynamically as needed by requesting more pages from the backend. +//! When all allocations within a block are freed, the block is unlinked from the list +//! and the pages are returned to the backend. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::{ + alloc::{GlobalAlloc, Layout}, + mem, + ptr, +}; + +use spin::Mutex; + +// ============================================================================ +// Constants +// ============================================================================ + +/// Standard UEFI page size (4 KB). +pub const PAGE_SIZE: usize = 4096; + +/// Minimum allocation size for the pool allocator. +const MIN_POOL_ALLOC_SIZE: usize = 16; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur during page allocation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PageAllocError { + /// The allocator has not been initialized. + NotInitialized, + /// No free pages available to satisfy the request. + OutOfMemory, + /// The requested address is not aligned to page boundary. + NotAligned, + /// The address is not within any known SMRAM region. + InvalidAddress, + /// The address was not previously allocated. + NotAllocated, + /// Too many regions to track. + TooManyRegions, + /// A syscall to the supervisor failed. + SyscallFailed(u64), +} + +// ============================================================================ +// Page Allocator Backend Trait +// ============================================================================ + +/// Trait for page-granularity memory allocation. +/// +/// Implementors provide the actual page allocation mechanism. The user core +/// implements this by issuing syscalls to the supervisor. +pub trait PageAllocatorBackend: Send + Sync { + /// Allocates `num_pages` contiguous pages. + /// + /// Returns the physical base address of the allocated region on success. + fn allocate_pages(&self, num_pages: usize) -> Result; + + /// Frees `num_pages` contiguous pages starting at `addr`. + fn free_pages(&self, addr: u64, num_pages: usize) -> Result<(), PageAllocError>; + + /// Returns whether the page allocator has been initialized and is ready for use. + fn is_initialized(&self) -> bool; +} + +// ============================================================================ +// Pool Block Header (intrusive linked list node) +// ============================================================================ + +/// In-band header stored at the beginning of each pool page block. +/// +/// By placing the metadata inside the allocated pages themselves, we avoid +/// any fixed-size bookkeeping array. Blocks form a singly-linked list so +/// traversal, insertion, and removal are straightforward. +#[repr(C)] +struct PoolBlockHeader { + /// Pointer to the next block in the linked list (`null` if this is the tail). + next: *mut PoolBlockHeader, + /// Number of pages backing this block (includes the header). + num_pages: usize, + /// Current bump offset (in bytes from the block base). Starts just past the header. + offset: usize, + /// Number of live allocations served from this block. + alloc_count: usize, +} + +impl PoolBlockHeader { + /// Base address of this block (== address of the header itself). + fn base(&self) -> usize { + self as *const Self as usize + } + + /// Total usable capacity of this block in bytes. + fn capacity(&self) -> usize { + self.num_pages * PAGE_SIZE + } + + /// Remaining bytes available for bump allocation. + fn remaining(&self) -> usize { + self.capacity().saturating_sub(self.offset) + } + + /// Returns `true` if the given address falls within this block's page range. + fn contains(&self, addr: usize) -> bool { + addr >= self.base() && addr < self.base() + self.capacity() + } + + /// Try to bump-allocate `layout` from this block. + fn try_alloc(&mut self, layout: Layout) -> Option<*mut u8> { + let current_ptr = self.base() + self.offset; + let align = layout.align().max(MIN_POOL_ALLOC_SIZE); + let aligned_ptr = (current_ptr + align - 1) & !(align - 1); + let padding = aligned_ptr - current_ptr; + let total_size = padding + layout.size(); + + if total_size > self.remaining() { + return None; + } + + self.offset += total_size; + self.alloc_count += 1; + + Some(aligned_ptr as *mut u8) + } +} + +// ============================================================================ +// Pool Allocator +// ============================================================================ + +/// Pool allocator built on top of a [`PageAllocatorBackend`]. +/// +/// This allocator provides smaller-granularity allocations by requesting +/// full pages from the backend and subdividing them via bump allocation. +pub struct PoolAllocator { + /// Reference to the underlying page allocator. + page_allocator: &'static P, + /// Head of the intrusive linked list of pool blocks. + head: Mutex<*mut PoolBlockHeader>, +} + +// SAFETY: The PoolAllocator uses internal locking (spin::Mutex) for all accesses +// to the block linked list. The raw pointer is only dereferenced under the lock. +unsafe impl Send for PoolAllocator

{} +unsafe impl Sync for PoolAllocator

{} + +impl PoolAllocator

{ + /// Creates a new pool allocator backed by the given page allocator. + pub const fn new(page_allocator: &'static P) -> Self { + Self { + page_allocator, + head: Mutex::new(ptr::null_mut()), + } + } + + /// Allocate a new page block large enough for `min_size` bytes of payload + /// and prepend it to the linked list. + fn allocate_new_block<'a>( + &self, + head: &mut *mut PoolBlockHeader, + min_size: usize, + ) -> Option<&'a mut PoolBlockHeader> { + let header_size = mem::size_of::(); + let needed = min_size + header_size; + let num_pages = ((needed + PAGE_SIZE - 1) / PAGE_SIZE).max(1); + + let base = match self.page_allocator.allocate_pages(num_pages) { + Ok(addr) => addr, + Err(e) => { + log::warn!("Pool allocator: failed to allocate {} pages: {:?}", num_pages, e); + return None; + } + }; + + // SAFETY: `base` is a freshly allocated, page-aligned region of at least + // `num_pages * PAGE_SIZE` bytes. We place our header at offset 0. + let header = unsafe { &mut *(base as *mut PoolBlockHeader) }; + header.next = *head; + header.num_pages = num_pages; + header.offset = header_size; + header.alloc_count = 0; + + *head = header as *mut PoolBlockHeader; + + log::trace!( + "Pool allocator: new block at {:#018x} ({} pages)", + base, + num_pages, + ); + + Some(header) + } +} + +unsafe impl GlobalAlloc for PoolAllocator

{ + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if !self.page_allocator.is_initialized() { + return ptr::null_mut(); + } + + let mut head = self.head.lock(); + + // Walk the linked list, try to bump-alloc from an existing block. + { + let mut current = *head; + while !current.is_null() { + // SAFETY: `current` was written by us under the same lock. + let block = unsafe { &mut *current }; + if let Some(ptr) = block.try_alloc(layout) { + return ptr; + } + current = block.next; + } + } + + // No existing block had space — allocate a new one and retry. + if let Some(block) = self.allocate_new_block(&mut head, layout.size()) { + if let Some(ptr) = block.try_alloc(layout) { + return ptr; + } + } + + ptr::null_mut() + } + + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + if ptr.is_null() { + return; + } + + let mut head = self.head.lock(); + let addr = ptr as usize; + + let mut prev: *mut PoolBlockHeader = ptr::null_mut(); + let mut current = *head; + + while !current.is_null() { + // SAFETY: `current` was written by us under the same lock. + let block = unsafe { &mut *current }; + + if block.contains(addr) { + block.alloc_count = block.alloc_count.saturating_sub(1); + + // If the block is now empty, unlink it and free the pages. + if block.alloc_count == 0 { + let next = block.next; + let base = block.base() as u64; + let num_pages = block.num_pages; + + if prev.is_null() { + *head = next; + } else { + // SAFETY: `prev` is a valid block we visited earlier. + unsafe { (*prev).next = next }; + } + + if let Err(e) = self.page_allocator.free_pages(base, num_pages) { + log::warn!( + "Pool allocator: failed to free block at {:#018x}: {:?}", + base, + e, + ); + } else { + log::trace!("Pool allocator: freed block at {:#018x} ({} pages)", base, num_pages); + } + } + + return; + } + + prev = current; + current = block.next; + } + + log::warn!( + "Pool allocator: dealloc called with unknown pointer {:#018x}", + addr, + ); + } +} diff --git a/patina_mm_user_core/src/protocol_db.rs b/patina_mm_user_core/src/protocol_db.rs new file mode 100644 index 000000000..d97ab2e22 --- /dev/null +++ b/patina_mm_user_core/src/protocol_db.rs @@ -0,0 +1,92 @@ +//! Protocol/Handle Database +//! +//! This module provides a simplified protocol database for the MM User Core. +//! It tracks installed protocols for depex evaluation and driver service use. +//! +//! In the DXE Core, the protocol database is a full handle-protocol mapping +//! (handles can have multiple protocols, protocols can be on multiple handles). +//! The MM User Core simplifies this to a flat set of installed protocol GUIDs +//! since MM drivers primarily need: +//! - `MmInstallProtocolInterface`: Register that a protocol is available +//! - `MmLocateProtocol`: Check if a protocol is available (for depex evaluation) +//! - `registered_protocols()`: Get the list of all installed protocols (for depex eval) +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use alloc::vec::Vec; + +use r_efi::efi; +use spin::Mutex; + +// ============================================================================= +// Protocol Database +// ============================================================================= + +/// A simplified protocol/handle database for the MM User Core. +/// +/// Tracks installed protocol GUIDs for depex evaluation and protocol location. +pub struct ProtocolDatabase { + /// Internal state protected by a mutex. + inner: Mutex, +} + +struct ProtocolDatabaseInner { + /// The set of installed protocol GUIDs. + protocols: Vec, +} + +impl ProtocolDatabase { + /// Creates a new empty `ProtocolDatabase`. + pub const fn new() -> Self { + Self { + inner: Mutex::new(ProtocolDatabaseInner { + protocols: Vec::new(), + }), + } + } + + /// Install a protocol interface. + /// + /// Registers the given protocol GUID as available. Duplicate GUIDs are allowed + /// (multiple instances of the same protocol can be installed on different handles). + pub fn install_protocol(&self, protocol_guid: &efi::Guid) -> Result<(), efi::Status> { + let mut inner = self.inner.lock(); + inner.protocols.push(*protocol_guid); + log::debug!("Installed protocol: {:?}", protocol_guid); + Ok(()) + } + + /// Uninstall a protocol interface. + /// + /// Removes the first occurrence of the given protocol GUID. + pub fn uninstall_protocol(&self, protocol_guid: &efi::Guid) -> Result<(), efi::Status> { + let mut inner = self.inner.lock(); + if let Some(pos) = inner.protocols.iter().position(|g| g == protocol_guid) { + inner.protocols.remove(pos); + log::debug!("Uninstalled protocol: {:?}", protocol_guid); + Ok(()) + } else { + log::warn!("Protocol {:?} not found for uninstall.", protocol_guid); + Err(efi::Status::NOT_FOUND) + } + } + + /// Check if a protocol is installed. + pub fn is_protocol_installed(&self, protocol_guid: &efi::Guid) -> bool { + let inner = self.inner.lock(); + inner.protocols.contains(protocol_guid) + } + + /// Get the list of all installed protocol GUIDs. + /// + /// This is used by the depex evaluator to determine which dependencies are satisfied. + pub fn registered_protocols(&self) -> Vec { + let inner = self.inner.lock(); + inner.protocols.clone() + } +} diff --git a/sdk/patina/Cargo.toml b/sdk/patina/Cargo.toml index 8542bfcd6..07388f0c0 100644 --- a/sdk/patina/Cargo.toml +++ b/sdk/patina/Cargo.toml @@ -27,15 +27,15 @@ name = "basic_hob_usage" [dependencies] patina_macro = { workspace = true } -fixedbitset = { workspace = true } +fixedbitset = { workspace = true, optional = true } cfg-if = { workspace = true } compile-time = { workspace = true } -goblin = { workspace = true, features = ["pe32", "pe64", "te", "alloc"]} +goblin = { workspace = true, features = ["pe32", "pe64", "te", "alloc"], optional = true } log = { workspace = true } r-efi = { workspace = true } mockall = { workspace = true, optional = true } -mu_rust_helpers = { workspace = true } +mu_rust_helpers = { workspace = true, optional = true } num-traits = { workspace = true } indoc = { workspace = true } fallible-streaming-iterator = { workspace = true } @@ -69,15 +69,11 @@ trybuild = { version = "1.0", features = ["diff"] } core = ['alloc'] std = ['alloc'] doc = ['alloc'] -alloc = [] +alloc = ["dep:goblin", "dep:mu_rust_helpers", "dep:fixedbitset"] mockall = ["dep:mockall", "std"] global_allocator = [] -default = ['alloc'] -# Opting in to the `enable_patina_tests` feature requires registering at least one test -# with the `#[patina_test]` attribute. Otherwise, a linker crash or failure will -# occur! -enable_patina_tests = ["patina_macro/enable_patina_tests"] -serde = ["alloc", "dep:serde", "dep:serde_json"] +default = [] +serde = ["alloc", "dep:serde", "serde/alloc", "dep:serde_json"] serde-with-yaml = ["serde", "dep:serde_yaml"] unstable = ["unstable-device-path"] diff --git a/sdk/patina/README.md b/sdk/patina/README.md index 7f8bff558..1fd875d44 100644 --- a/sdk/patina/README.md +++ b/sdk/patina/README.md @@ -15,7 +15,7 @@ Add the crate to your manifest and enable the features needed by your firmware o ```toml [dependencies] -patina = { version = "13.1.0", default-features = false, features = ["enable_patina_tests"] } +patina = { version = "13.1.0", default-features = false } ``` The crate is `no_std` unless `std` is selected. Tests or host utilities can enable `std` or `mockall` as needed. @@ -30,7 +30,6 @@ The crate is `no_std` unless `std` is selected. Tests or host utilities can enab | `doc` | Pull in items needed to build documentation. | | `mockall` | Provide mock implementations for Boot Services and other traits (implies `std`). | | `global_allocator` | Install the global allocator support used by Patina firmware images. | -| `enable_patina_tests` | Enable the `#[patina_test]` attribute and link-time test registration (requires at least one test). | | `serde` | Enable serialization support for configuration and PI data structures. | | `unstable` | Opt into experimental APIs gated behind `unstable-*` flags, including device path helpers. | | `unstable-device-path` | Activate the current device-path parsing and construction prototypes. | diff --git a/sdk/patina/benches/bench_guid.rs b/sdk/patina/benches/bench_guid.rs index 1f1416503..40c787266 100644 --- a/sdk/patina/benches/bench_guid.rs +++ b/sdk/patina/benches/bench_guid.rs @@ -108,7 +108,7 @@ fn bench_r_efi_eq_same(b: &mut Bencher<'_>, _input: &usize) { fn bench_r_efi_eq_different(b: &mut Bencher<'_>, _input: &usize) { let r_efi_guid1 = create_r_efi_guid(); - let r_efi_guid_different = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0; 6]); + let r_efi_guid_different: efi::Guid = patina::guids::ZERO.into(); b.iter(|| r_efi_guid1 == r_efi_guid_different) } diff --git a/sdk/patina/examples/basic_guid_usage.rs b/sdk/patina/examples/basic_guid_usage.rs index ccab1f9ca..6ef1bfcea 100644 --- a/sdk/patina/examples/basic_guid_usage.rs +++ b/sdk/patina/examples/basic_guid_usage.rs @@ -194,13 +194,13 @@ fn demonstrate_practical_usage() { vec![("Performance Protocol", &PERFORMANCE_PROTOCOL), ("SMM Communication", &SMM_COMMUNICATION_PROTOCOL)]; for (name, guid) in protocols { - println!(" [INFO] Loading protocol '{}' with GUID: {}", name, Guid::from(guid)); + println!(" [INFO] Loading protocol '{}' with GUID: {}", name, guid); } // (2) Event group identification println!("\n Event Group Example:"); - println!(" [EVENT] End of DXE event signaled: {}", Guid::from(&EVENT_GROUP_END_OF_DXE)); - println!(" [EVENT] Exit Boot Services failed: {}", Guid::from(&EBS_FAILED)); + println!(" [EVENT] End of DXE event signaled: {}", EVENT_GROUP_END_OF_DXE); + println!(" [EVENT] Exit Boot Services failed: {}", EBS_FAILED); // (3) Configuration file or user input parsing println!("\n Configuration Parsing Example:"); @@ -226,14 +226,12 @@ fn demonstrate_practical_usage() { let user_input = "00000000-0000-0000-0000-000000000000"; match OwnedGuid::try_from_string(user_input) { Ok(parsed_guid) => { - let zero_guid = Guid::from(&ZERO); - println!(" User input: {}", parsed_guid); - println!(" Zero GUID: {}", zero_guid); + println!(" Zero GUID: {}", ZERO); // Direct equality comparison! - println!(" Direct equality: {}", parsed_guid == zero_guid); - println!(" String format match: {}", format!("{}", parsed_guid) == format!("{}", zero_guid)); + println!(" Direct equality: {}", parsed_guid == ZERO); + println!(" String format match: {}", format!("{}", parsed_guid) == format!("{}", ZERO)); } Err(e) => println!(" Error parsing user input: {}", e), } @@ -242,14 +240,13 @@ fn demonstrate_practical_usage() { println!("\n Cross-Format Equality Examples:"); let compact_result = OwnedGuid::try_from_string("00000000000000000000000000000000"); let hyphenated_result = OwnedGuid::try_from_string("00000000-0000-0000-0000-000000000000"); - let ref_zero = Guid::from(&ZERO); match (compact_result, hyphenated_result) { (Ok(compact_zero), Ok(hyphenated_zero)) => { println!(" Compact format: {}", compact_zero); println!(" Hyphenated format: {}", hyphenated_zero); - println!(" From reference: {}", ref_zero); - println!(" All equal? {}", compact_zero == hyphenated_zero && hyphenated_zero == ref_zero); + println!(" From reference: {}", ZERO); + println!(" All equal? {}", compact_zero == hyphenated_zero && hyphenated_zero == ZERO); } (Err(e1), _) => println!(" Error parsing compact format: {}", e1), (_, Err(e2)) => println!(" Error parsing hyphenated format: {}", e2), @@ -272,11 +269,9 @@ fn demonstrate_practical_usage() { // (7) Comparing different protocol GUIDs println!("\n Different Protocol Comparison Example:"); - let guid1 = Guid::from(&PERFORMANCE_PROTOCOL); - let guid2 = Guid::from(&SMM_COMMUNICATION_PROTOCOL); - println!(" Performance Protocol: {}", guid1); - println!(" SMM Communication: {}", guid2); - println!(" Are they equal? {}", guid1 == guid2); + println!(" Performance Protocol: {}", PERFORMANCE_PROTOCOL); + println!(" SMM Communication: {}", SMM_COMMUNICATION_PROTOCOL); + println!(" Are they equal? {}", PERFORMANCE_PROTOCOL == SMM_COMMUNICATION_PROTOCOL); println!(); } diff --git a/sdk/patina/examples/basic_hob_usage.rs b/sdk/patina/examples/basic_hob_usage.rs index d47580c8c..e40e7a16f 100644 --- a/sdk/patina/examples/basic_hob_usage.rs +++ b/sdk/patina/examples/basic_hob_usage.rs @@ -15,7 +15,7 @@ //! HOBs and their respective parsers are automatically gathered when a component is registered, and one step of Core //! initialization is to parse the HOB list and use any registered parsers to parse a GUIDed HOB. use patina::{ - Guid, OwnedGuid, + BinaryGuid, component::{IntoComponent, Storage, component, prelude::*}, }; use zerocopy_derive::FromBytes; @@ -42,7 +42,7 @@ pub struct CustomHob1 { pub struct CustomHob2(String); impl FromHob for CustomHob2 { - const HOB_GUID: OwnedGuid = Guid::from_fields(0x0, 0x0, 0x0, 0x0, 0x0, [0x00, 0x00, 0x00, 0x0, 0x0, 0x02]); + const HOB_GUID: BinaryGuid = BinaryGuid::from_string("00000000-0000-0000-0000-000000000002"); fn parse(bytes: &[u8]) -> Self { let out = String::from_utf8(bytes.to_vec()).expect("Failed to parse string from bytes"); @@ -163,7 +163,7 @@ mod util { for hob in hob_list.iter() { match hob { patina::pi::hob::Hob::GuidHob(hob, data) => { - for parser in storage.get_hob_parsers(&patina::Guid::from(hob.name)) { + for parser in storage.get_hob_parsers(&hob.name) { parser(data, storage); } } @@ -189,7 +189,7 @@ mod util { length: std::mem::size_of::() as u16, reserved: 0, }, - name: r_efi::efi::Guid::from_fields(0x0, 0x0, 0x0, 0x0, 0x0, &[0x00, 0x00, 0x00, 0x0, 0x0, 0x01]), + name: patina::BinaryGuid::from_string("00000000-0000-0000-0000-000000000001"), })); hob_list.push(patina::pi::hob::Hob::GuidHob(hob, as_slice)); } @@ -207,7 +207,7 @@ mod util { length: std::mem::size_of::() as u16, reserved: 0, }, - name: CustomHob2::HOB_GUID.to_efi_guid(), + name: CustomHob2::HOB_GUID, })); hob_list.push(patina::pi::hob::Hob::GuidHob(hob, as_slice)); } diff --git a/sdk/patina/src/base/guid.rs b/sdk/patina/src/base/guid.rs index 8487b8f75..c4a57f137 100644 --- a/sdk/patina/src/base/guid.rs +++ b/sdk/patina/src/base/guid.rs @@ -200,7 +200,7 @@ pub type OwnedGuid = Guid<'static>; /// println!("Header GUID: {}", header.guid.as_guid()); /// ``` #[repr(transparent)] -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct BinaryGuid(pub efi::Guid); impl BinaryGuid { @@ -212,7 +212,7 @@ impl BinaryGuid { } /// Create a BinaryGuid from a 16-byte array. - pub fn from_bytes(bytes: &[u8; 16]) -> Self { + pub const fn from_bytes(bytes: &[u8; 16]) -> Self { Self(efi::Guid::from_bytes(bytes)) } @@ -1581,6 +1581,40 @@ mod tests { assert_ne!(hasher1.finish(), hasher3.finish()); } + #[test] + fn binary_guid_ord_matches_guid_byte_ordering() { + // BinaryGuid derives Ord from efi::Guid's field-level comparison, while Guid<'a> + // implements Ord via as_bytes() byte comparison. These must produce the same ordering + // for all GUID pairs, or BTreeMap keys could silently produce different iteration order + // depending on which type is used. + + let guids = [ + BinaryGuid::from_string("00000000-0000-0000-0000-000000000000"), + BinaryGuid::from_string("00000000-0000-0000-0000-000000000001"), + BinaryGuid::from_string("00000000-0000-0000-0001-000000000000"), + BinaryGuid::from_string("00000000-0000-0001-0000-000000000000"), + BinaryGuid::from_string("00000000-0001-0000-0000-000000000000"), + BinaryGuid::from_string("00000001-0000-0000-0000-000000000000"), + BinaryGuid::from_string("01000000-0000-0000-0000-000000000000"), + BinaryGuid::from_string("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF"), + BinaryGuid::from_string("550E8400-E29B-41D4-A716-446655440000"), + BinaryGuid::from_string("550E8400-E29B-41D4-A716-446655440001"), + BinaryGuid::from_string("23C9322F-2AF2-476A-BC4C-26BC88266C71"), + ]; + + for (i, a) in guids.iter().enumerate() { + for (j, b) in guids.iter().enumerate() { + let binary_ord = a.cmp(b); + let guid_ord = a.to_owned_guid().cmp(&b.to_owned_guid()); + assert_eq!( + binary_ord, guid_ord, + "Ordering mismatch at guids[{i}] vs guids[{j}]: BinaryGuid is {:?}, Guid is {:?}", + binary_ord, guid_ord + ); + } + } + } + #[test] fn binary_guid_in_c_struct() { // Test usage in a typical C-compatible structure diff --git a/sdk/patina/src/boot_services.rs b/sdk/patina/src/boot_services.rs index c67d2d070..c19de4667 100644 --- a/sdk/patina/src/boot_services.rs +++ b/sdk/patina/src/boot_services.rs @@ -10,8 +10,6 @@ #[cfg(feature = "global_allocator")] pub mod global_allocator; -extern crate alloc; - pub mod allocation; pub mod boxed; pub mod c_ptr; @@ -1674,6 +1672,7 @@ impl BootServices for StandardBootServices { #[cfg(test)] #[coverage(off)] mod tests { + use crate::BinaryGuid; use c_ptr::CPtr; use efi::{Boolean, Char16, OpenProtocolInformationEntry, protocols::device_path}; @@ -1708,8 +1707,8 @@ mod tests { // Safety: TestProtocol provides a test protocol interface with a unique GUID for unit tests. // The GUID constant meets the requirements of the ProtocolInterface trait. unsafe impl ProtocolInterface for TestProtocol { - const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_bytes(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + const PROTOCOL_GUID: BinaryGuid = + BinaryGuid::from_bytes(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); } #[derive(Debug)] @@ -1718,8 +1717,8 @@ mod tests { // Safety: TestProtocolEmpty provides a test protocol interface with a unique GUID for unit tests. // The GUID constant meets the requirements of the ProtocolInterface trait. Zero-sized type is valid. unsafe impl ProtocolInterface for TestProtocolEmpty { - const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_bytes(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + const PROTOCOL_GUID: BinaryGuid = + BinaryGuid::from_bytes(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); } extern "efiapi" fn efi_allocate_pool_use_box( @@ -2964,9 +2963,9 @@ mod tests { assert_ne!(ptr::null_mut(), protocol_buffer); assert_ne!(ptr::null_mut(), protocol_buffer_count); - static PROTOCOL_GUID: efi::Guid = TestProtocol::PROTOCOL_GUID; + static PROTOCOL_GUID: BinaryGuid = TestProtocol::PROTOCOL_GUID; #[allow(unused_allocation)] - let buff = Box::new(ptr::addr_of!(PROTOCOL_GUID) as *mut efi::Guid).into_mut_ptr(); + let buff = Box::new(PROTOCOL_GUID.as_efi_guid() as *const efi::Guid as *mut efi::Guid).into_mut_ptr(); // SAFETY: Test mock - writing protocol GUID buffer pointer and count to output parameters. unsafe { diff --git a/sdk/patina/src/component.rs b/sdk/patina/src/component.rs index 3e2982e3d..e40262f75 100644 --- a/sdk/patina/src/component.rs +++ b/sdk/patina/src/component.rs @@ -136,8 +136,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - pub mod hob; mod metadata; pub mod params; @@ -220,7 +218,7 @@ mod tests { use super::*; use crate as patina; use crate::{ - Guid, OwnedGuid, + BinaryGuid, component::{ component, hob::{FromHob, Hob}, @@ -294,8 +292,7 @@ mod tests { #[test] fn test_component_run_return_handling() { - const HOB_GUID: OwnedGuid = - Guid::from_fields(0xd4ffc718, 0xfb82, 0x4274, 0x9a, 0xfc, [0xaa, 0x8b, 0x1e, 0xef, 0x52, 0x93]); + const HOB_GUID: BinaryGuid = BinaryGuid::from_string("D4FFC718-FB82-4274-9AFC-AA8B1EEF5293"); let mut storage = storage::Storage::new(); diff --git a/sdk/patina/src/component/hob.rs b/sdk/patina/src/component/hob.rs index bc781a983..a4c40e8f0 100644 --- a/sdk/patina/src/component/hob.rs +++ b/sdk/patina/src/component/hob.rs @@ -12,7 +12,7 @@ //! use patina::{ //! error::Result, //! component::hob::{Hob, FromHob}, -//! Guid, OwnedGuid +//! BinaryGuid //! }; //! //! /// A HOB that is a simple pointer cast from byte array to a struct. @@ -34,7 +34,7 @@ //! } //! //! impl FromHob for MyComplexHobStruct { -//! const HOB_GUID: OwnedGuid = Guid::from_fields(0, 0, 0, 0, 0, [0; 6]); +//! const HOB_GUID: BinaryGuid = patina::guids::ZERO; //! //! fn parse(bytes: &[u8]) -> Self { //! Self::default() // Simple for example @@ -57,11 +57,9 @@ //! Copyright (c) Microsoft Corporation. //! //! SPDX-License-Identifier: Apache-2.0 -extern crate alloc; - use alloc::{borrow::Cow, boxed::Box, vec::Vec}; -use crate::OwnedGuid; +use crate::BinaryGuid; use core::{any::Any, ops::Deref}; use super::{ @@ -82,7 +80,7 @@ use super::{ /// /// ```rust /// use patina::component::hob::FromHob; -/// use patina::{Guid, OwnedGuid}; +/// use patina::BinaryGuid; /// /// #[derive(Default, Clone, Copy)] /// #[repr(C)] @@ -92,7 +90,7 @@ use super::{ /// } /// /// impl FromHob for MyConfig { -/// const HOB_GUID: OwnedGuid = Guid::from_fields(0, 0, 0, 0, 0, [0; 6]); +/// const HOB_GUID: BinaryGuid = patina::guids::ZERO; /// /// fn parse(bytes: &[u8]) -> Self { /// // SAFETY: Specification defined requirement that the byte array is this underlying C type. @@ -110,7 +108,7 @@ use super::{ /// ``` pub trait FromHob: Sized + 'static { /// The guid value associated with the guided HOB to parse. - const HOB_GUID: OwnedGuid; + const HOB_GUID: BinaryGuid; /// Registers the parsed hob with the provided [Storage] instance. fn register(bytes: &[u8], storage: &mut Storage) { @@ -136,7 +134,7 @@ pub use patina_macro::FromHob; /// # #[derive(Debug)] /// # struct MyStruct{ value: u32 }; /// # impl FromHob for MyStruct { -/// # const HOB_GUID: patina::OwnedGuid = patina::Guid::from_fields(0, 0, 0, 0, 0, [0; 6]); +/// # const HOB_GUID: patina::BinaryGuid = patina::guids::ZERO; /// # fn parse(bytes: &[u8]) -> Self { /// # MyStruct { value: 5 } /// # } @@ -164,12 +162,12 @@ impl<'h, T: FromHob + 'static> Hob<'h, T> { /// /// ```rust /// use patina::component::hob::{FromHob, Hob}; - /// use patina::{Guid, OwnedGuid}; + /// use patina::BinaryGuid; /// /// struct MyStruct; /// /// impl FromHob for MyStruct { - /// const HOB_GUID: OwnedGuid = Guid::from_fields(0, 0, 0, 0, 0, [0; 6]); + /// const HOB_GUID: BinaryGuid = patina::guids::ZERO; /// /// fn parse(bytes: &[u8]) -> Self { /// MyStruct @@ -249,7 +247,7 @@ unsafe impl Param for Hob<'_, T> { /// # use patina::component::hob::{FromHob, Hob}; /// # struct MyStruct(u32); /// # impl FromHob for MyStruct { -/// # const HOB_GUID: patina::OwnedGuid = patina::Guid::from_fields(0, 0, 0, 0, 0, [0; 6]); +/// # const HOB_GUID: patina::BinaryGuid = patina::guids::ZERO; /// # fn parse(bytes: &[u8]) -> Self { /// # MyStruct(5) /// # } @@ -294,7 +292,7 @@ impl<'h, T: FromHob + 'static> IntoIterator for &Hob<'h, T> { mod tests { use crate as patina; use crate::{ - Guid, OwnedGuid, + BinaryGuid, component::{IntoComponent, component}, error::{EfiError, Result}, }; @@ -307,7 +305,7 @@ mod tests { } impl FromHob for MyStruct { - const HOB_GUID: OwnedGuid = Guid::ZERO; + const HOB_GUID: BinaryGuid = patina::guids::ZERO; fn parse(_bytes: &[u8]) -> Self { MyStruct::default() diff --git a/sdk/patina/src/component/params.rs b/sdk/patina/src/component/params.rs index f27ac0db9..ec546bfc3 100644 --- a/sdk/patina/src/component/params.rs +++ b/sdk/patina/src/component/params.rs @@ -94,8 +94,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use core::{ cell::{Ref, RefCell, RefMut}, marker::PhantomData, diff --git a/sdk/patina/src/component/service.rs b/sdk/patina/src/component/service.rs index b24895b2e..07ddfc6fe 100644 --- a/sdk/patina/src/component/service.rs +++ b/sdk/patina/src/component/service.rs @@ -115,8 +115,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use alloc::{borrow::Cow, boxed::Box}; use core::{any::Any, cell::OnceCell, marker::PhantomData, ops::Deref}; @@ -126,8 +124,8 @@ use crate::component::{ storage::{Storage, UnsafeStorageCell}, }; +pub mod dxe_dispatch; pub mod memory; -pub mod perf_timer; pub use patina_macro::IntoService; diff --git a/sdk/patina/src/component/service/dxe_dispatch.rs b/sdk/patina/src/component/service/dxe_dispatch.rs new file mode 100644 index 000000000..2e129c967 --- /dev/null +++ b/sdk/patina/src/component/service/dxe_dispatch.rs @@ -0,0 +1,32 @@ +//! DXE Dispatch Service Definition. +//! +//! This module contains the [`DxeDispatch`] trait for services that expose +//! DXE driver dispatch capability. See [`DxeDispatch`] for the primary interface. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! +#[cfg(any(test, feature = "mockall"))] +use mockall::automock; + +use crate::error::Result; + +/// Service interface for DXE driver dispatch. +/// +/// Provides access to the PI dispatcher for components that need to trigger +/// additional driver dispatch passes beyond the core's built-in dispatch loop +/// (e.g., to interleave controller connection with driver dispatch during boot). +/// +/// Note: The DXE core already runs a PI dispatch loop automatically. This +/// service is only needed when a component must explicitly trigger a dispatch +/// pass at a specific point in its execution. +#[cfg_attr(any(test, feature = "mockall"), automock)] +pub trait DxeDispatch { + /// Performs a single DXE driver dispatch pass. + /// + /// Returns `true` if any drivers were dispatched, `false` if no drivers were dispatched. + fn dispatch(&self) -> Result; +} diff --git a/sdk/patina/src/component/service/memory.rs b/sdk/patina/src/component/service/memory.rs index c15b28bca..588a893ad 100644 --- a/sdk/patina/src/component/service/memory.rs +++ b/sdk/patina/src/component/service/memory.rs @@ -869,6 +869,11 @@ mod mock { } } + /// Frees the block of pages at the given address of the given size. + /// + /// ## Safety + /// Caller must ensure that the given address corresponds to a valid block of pages that was allocated with + /// [Self::allocate_pages] unsafe fn free_pages(&self, address: usize, page_count: usize) -> Result<(), MemoryError> { let ptr = address as *mut u8; let layout = Layout::from_size_align(page_count * UEFI_PAGE_SIZE, UEFI_PAGE_SIZE).unwrap(); diff --git a/sdk/patina/src/component/service/perf_timer.rs b/sdk/patina/src/component/service/perf_timer.rs deleted file mode 100644 index 30a42f374..000000000 --- a/sdk/patina/src/component/service/perf_timer.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Arch-specific timer functionality -//! By default, this module attempts to determine the timer frequency via architecture specific methods. -//! (cpuid for x86, `CNTFRQ_EL0` for aarch64) -//! -//! Platforms can override this with a custom performance frequency by providing the Core with the correct frequency: -//! -//! -//! ```rust,ignore -//! let frequency_hz: u64 = 1_000_000_000; // Compute with platform-specific methods. -//! -//! Core::default() -//! .init_timer_frequency(Some(frequency_hz)) -//!``` -//! -//! ## License -//! -//! Copyright (c) Microsoft Corporation. -//! -//! SPDX-License-Identifier: Apache-2.0 -//! - -/// Trait that provides architecture-specific timer functionality. -/// Components that need timing functionality can request this service. -pub trait ArchTimerFunctionality: Send + Sync { - /// Value of the counter (ticks). - fn cpu_count(&self) -> u64; - /// Value in Hz of how often the counter increment. - fn perf_frequency(&self) -> u64; - /// Value that the performance counter starts with. - fn cpu_count_start(&self) -> u64 { - 0 - } - /// Value that the performance counter ends with before it rolls over. - fn cpu_count_end(&self) -> u64 { - u64::MAX - } -} diff --git a/sdk/patina/src/component/storage.rs b/sdk/patina/src/component/storage.rs index 9e7abfd20..96971bdb5 100644 --- a/sdk/patina/src/component/storage.rs +++ b/sdk/patina/src/component/storage.rs @@ -6,14 +6,12 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use crate::{ component::{metadata::MetaData, params::Param}, runtime_services::StandardRuntimeServices, }; -use crate::{OwnedGuid, boot_services::StandardBootServices}; +use crate::{BinaryGuid, boot_services::StandardBootServices}; use alloc::{borrow::Cow, boxed::Box, collections::BTreeMap, vec::Vec}; use core::{ any::{Any, TypeId}, @@ -29,7 +27,7 @@ use super::{ service::{IntoService, Service}, }; -type HobParsers = BTreeMap>; +type HobParsers = BTreeMap>; /// A vector whose elements are sparsely populated. #[derive(Debug)] @@ -433,7 +431,7 @@ impl Storage { } /// Attempts to retrieve a HOB parser from the storage. - pub fn get_hob_parsers(&self, guid: &OwnedGuid) -> Vec { + pub fn get_hob_parsers(&self, guid: &BinaryGuid) -> Vec { self.hob_parsers.get(guid).map(|type_map| type_map.values().copied().collect()).unwrap_or_default() } } diff --git a/sdk/patina/src/component/struct_component.rs b/sdk/patina/src/component/struct_component.rs index 31fa90a94..537806625 100644 --- a/sdk/patina/src/component/struct_component.rs +++ b/sdk/patina/src/component/struct_component.rs @@ -20,8 +20,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use crate::{ component::{ Component, diff --git a/sdk/patina/src/device_path/walker.rs b/sdk/patina/src/device_path/walker.rs index b94136aae..4b15638c4 100644 --- a/sdk/patina/src/device_path/walker.rs +++ b/sdk/patina/src/device_path/walker.rs @@ -8,8 +8,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use alloc::{boxed::Box, format, string::String, vec, vec::Vec}; use core::{mem::size_of_val, ptr::slice_from_raw_parts, slice::from_raw_parts}; use r_efi::protocols::device_path::{End, Hardware, Media}; diff --git a/sdk/patina/src/driver_binding.rs b/sdk/patina/src/driver_binding.rs index 77349435c..c933970d4 100644 --- a/sdk/patina/src/driver_binding.rs +++ b/sdk/patina/src/driver_binding.rs @@ -57,8 +57,6 @@ #[cfg(any(test, feature = "mockall"))] use mockall::automock; -extern crate alloc; - use alloc::boxed::Box; use core::{ mem::{self, ManuallyDrop}, diff --git a/sdk/patina/src/guids.rs b/sdk/patina/src/guids.rs index 06295caf0..ed4ed92dc 100644 --- a/sdk/patina/src/guids.rs +++ b/sdk/patina/src/guids.rs @@ -10,16 +10,14 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use r_efi::efi; - /// Cache Attribute Change Event Group GUID /// /// The GUID for an event group signaled when the cache attributes for a memory region are changed. The event group /// is intended for architectures, such as x86, that require cache attribute changes to be propagated to all APs. /// -/// (`b8e477c7-26a9-4b9a-a7c9-5f8f1f3d9c7b`) -pub const CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP: efi::Guid = - efi::Guid::from_fields(0xb8e477c7, 0x26a9, 0x4b9a, 0xa7, 0xc9, &[0x5f, 0x8f, 0x1f, 0x3d, 0x9c, 0x7b]); +/// (`B8E477C7-26A9-4B9A-A7C9-5F8F1F3D9C7B`) +pub const CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP: crate::BinaryGuid = + crate::BinaryGuid::from_string("B8E477C7-26A9-4B9A-A7C9-5F8F1F3D9C7B"); /// DXE Core Module GUID /// @@ -31,11 +29,10 @@ pub const CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP: efi::Guid = /// /// (`23C9322F-2AF2-476A-BC4C-26BC88266C71`) /// ``` -/// # use patina::{Guid, guids::DXE_CORE}; -/// # assert_eq!("23C9322F-2AF2-476A-BC4C-26BC88266C71", format!("{:?}", Guid::from_ref(&DXE_CORE))); +/// # use patina::guids::DXE_CORE; +/// # assert_eq!("23C9322F-2AF2-476A-BC4C-26BC88266C71", format!("{}", DXE_CORE)); /// ``` -pub const DXE_CORE: efi::Guid = - efi::Guid::from_fields(0x23C9322F, 0x2AF2, 0x476A, 0xBC, 0x4C, &[0x26, 0xBC, 0x88, 0x26, 0x6C, 0x71]); +pub const DXE_CORE: crate::BinaryGuid = crate::BinaryGuid::from_string("23C9322F-2AF2-476A-BC4C-26BC88266C71"); /// Exit Boot Services Failed GUID /// @@ -43,13 +40,12 @@ pub const DXE_CORE: efi::Guid = /// implementation may find that the memory map key provided does not match the current memory map key and return /// an error code. This event group will be signaled in that case just before returning to the caller. /// -/// (`4f6c5507-232f-4787-b95e-72f862490cb1`) +/// (`4F6C5507-232F-4787-B95E-72F862490CB1`) /// ``` -/// # use patina::{Guid, guids::EBS_FAILED}; -/// # assert_eq!("4F6C5507-232F-4787-B95E-72F862490CB1", format!("{:?}", Guid::from_ref(&EBS_FAILED))); +/// # use patina::guids::EBS_FAILED; +/// # assert_eq!("4F6C5507-232F-4787-B95E-72F862490CB1", format!("{}", EBS_FAILED)); /// ``` -pub const EBS_FAILED: efi::Guid = - efi::Guid::from_fields(0x4f6c5507, 0x232f, 0x4787, 0xb9, 0x5e, &[0x72, 0xf8, 0x62, 0x49, 0x0c, 0xb1]); +pub const EBS_FAILED: crate::BinaryGuid = crate::BinaryGuid::from_string("4F6C5507-232F-4787-B95E-72F862490CB1"); /// EDKII FPDT (Firmware Performance Data Table) extender firmware performance. /// @@ -59,32 +55,64 @@ pub const EBS_FAILED: efi::Guid = /// /// (`3B387BFD-7ABC-4CF2-A0CA-B6A16C1B1B25`) /// ``` -/// # use patina::{Guid, guids::EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE}; -/// # assert_eq!("3B387BFD-7ABC-4CF2-A0CA-B6A16C1B1B25", format!("{:?}", Guid::from_ref(&EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE))); +/// # use patina::guids::EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE; +/// # assert_eq!("3B387BFD-7ABC-4CF2-A0CA-B6A16C1B1B25", format!("{}", EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE)); /// ``` -pub const EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE: efi::Guid = - efi::Guid::from_fields(0x3b387bfd, 0x7abc, 0x4cf2, 0xa0, 0xca, &[0xb6, 0xa1, 0x6c, 0x1b, 0x1b, 0x25]); +pub const EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE: crate::BinaryGuid = + crate::BinaryGuid::from_string("3B387BFD-7ABC-4CF2-A0CA-B6A16C1B1B25"); + +/// Exit Boot Services event group GUID. +/// +/// The GUID for the event group signaled when `ExitBootServices()` is called. +/// +/// In MM, this is forwarded as an MMI to allow MM drivers to perform cleanup. +/// +/// Defined in UEFI/PI as `gEfiEventExitBootServicesGuid`. +/// +/// (`27ABF055-B1B8-4C26-8048-748F37BAA2DF`) +/// ``` +/// # use patina::{Guid, guids::EVENT_EXIT_BOOT_SERVICES}; +/// # assert_eq!("27ABF055-B1B8-4C26-8048-748F37BAA2DF", format!("{:?}", Guid::from_ref(&EVENT_EXIT_BOOT_SERVICES))); +/// ``` +pub const EVENT_EXIT_BOOT_SERVICES: crate::BinaryGuid = + crate::BinaryGuid::from_string("27ABF055-B1B8-4C26-8048-748F37BAA2DF"); /// End of dxe event group GUID. /// /// (`02CE967A-DD7E-4FFC-9EE7-810CF0470880`) /// ``` -/// # use patina::{Guid, guids::EVENT_GROUP_END_OF_DXE}; -/// # assert_eq!("02CE967A-DD7E-4FFC-9EE7-810CF0470880", format!("{:?}", Guid::from_ref(&EVENT_GROUP_END_OF_DXE))); +/// # use patina::guids::EVENT_GROUP_END_OF_DXE; +/// # assert_eq!("02CE967A-DD7E-4FFC-9EE7-810CF0470880", format!("{}", EVENT_GROUP_END_OF_DXE)); +/// ``` +pub const EVENT_GROUP_END_OF_DXE: crate::BinaryGuid = + crate::BinaryGuid::from_string("02CE967A-DD7E-4FFC-9EE7-810CF0470880"); + +/// Ready to Boot event group GUID. +/// +/// The GUID for the event group signaled when the platform is ready to boot. +/// +/// In MM, this is forwarded as an MMI to allow MM drivers to perform final setup. +/// +/// Defined in UEFI/PI as `gEfiEventReadyToBootGuid`. +/// +/// (`7CE88FB3-4BD7-4679-87A8-A8D8DEE50D2B`) +/// ``` +/// # use patina::{Guid, guids::EVENT_READY_TO_BOOT}; +/// # assert_eq!("7CE88FB3-4BD7-4679-87A8-A8D8DEE50D2B", format!("{:?}", Guid::from_ref(&EVENT_READY_TO_BOOT))); /// ``` -pub const EVENT_GROUP_END_OF_DXE: efi::Guid = - efi::Guid::from_fields(0x2ce967a, 0xdd7e, 0x4ffc, 0x9e, 0xe7, &[0x81, 0xc, 0xf0, 0x47, 0x8, 0x80]); +pub const EVENT_READY_TO_BOOT: crate::BinaryGuid = + crate::BinaryGuid::from_string("7CE88FB3-4BD7-4679-87A8-A8D8DEE50D2B"); /// Hardware Interrupt protocol GUID. /// This protocol provides a means of registering and unregistering interrupt handlers for AARCH64 systems. /// /// (`2890B3EA-053D-1643-AD0C-D64808DA3FF1`) /// ``` -/// # use patina::{Guid, guids::HARDWARE_INTERRUPT_PROTOCOL}; -/// # assert_eq!("2890B3EA-053D-1643-AD0C-D64808DA3FF1", format!("{:?}", Guid::from_ref(&HARDWARE_INTERRUPT_PROTOCOL))); +/// # use patina::guids::HARDWARE_INTERRUPT_PROTOCOL; +/// # assert_eq!("2890B3EA-053D-1643-AD0C-D64808DA3FF1", format!("{}", HARDWARE_INTERRUPT_PROTOCOL)); /// ``` -pub const HARDWARE_INTERRUPT_PROTOCOL: efi::Guid = - efi::Guid::from_fields(0x2890B3EA, 0x053D, 0x1643, 0xAD, 0x0C, &[0xD6, 0x48, 0x08, 0xDA, 0x3F, 0xF1]); +pub const HARDWARE_INTERRUPT_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("2890B3EA-053D-1643-AD0C-D64808DA3FF1"); /// Hardware Interrupt v2 protocol GUID. /// This protocol provides a means of registering and unregistering interrupt handlers for AARCH64 systems. @@ -92,11 +120,11 @@ pub const HARDWARE_INTERRUPT_PROTOCOL: efi::Guid = /// /// (`32898322-2DA1-474A-BAAA-F3F7CF569470`) /// ``` -/// # use patina::{Guid, guids::HARDWARE_INTERRUPT_PROTOCOL_V2}; -/// # assert_eq!("32898322-2DA1-474A-BAAA-F3F7CF569470", format!("{:?}", Guid::from_ref(&HARDWARE_INTERRUPT_PROTOCOL_V2))); +/// # use patina::guids::HARDWARE_INTERRUPT_PROTOCOL_V2; +/// # assert_eq!("32898322-2DA1-474A-BAAA-F3F7CF569470", format!("{}", HARDWARE_INTERRUPT_PROTOCOL_V2)); /// ``` -pub const HARDWARE_INTERRUPT_PROTOCOL_V2: efi::Guid = - efi::Guid::from_fields(0x32898322, 0x2da1, 0x474a, 0xba, 0xaa, &[0xf3, 0xf7, 0xcf, 0x56, 0x94, 0x70]); +pub const HARDWARE_INTERRUPT_PROTOCOL_V2: crate::BinaryGuid = + crate::BinaryGuid::from_string("32898322-2DA1-474A-BAAA-F3F7CF569470"); /// Memory Type Info GUID /// @@ -109,11 +137,91 @@ pub const HARDWARE_INTERRUPT_PROTOCOL_V2: efi::Guid = /// /// (`4C19049F-4137-4DD3-9C10-8B97A83FFDFA`) /// ``` -/// # use patina::{Guid, guids::MEMORY_TYPE_INFORMATION}; -/// # assert_eq!("4C19049F-4137-4DD3-9C10-8B97A83FFDFA", format!("{:?}", Guid::from_ref(&MEMORY_TYPE_INFORMATION))); +/// # use patina::guids::MEMORY_TYPE_INFORMATION; +/// # assert_eq!("4C19049F-4137-4DD3-9C10-8B97A83FFDFA", format!("{}", MEMORY_TYPE_INFORMATION)); +/// ``` +pub const MEMORY_TYPE_INFORMATION: crate::BinaryGuid = + crate::BinaryGuid::from_string("4C19049F-4137-4DD3-9C10-8B97A83FFDFA"); + +/// MM Dispatch Event GUID. +/// +/// An MMI handler is registered with this GUID to trigger driver dispatch. +/// +/// When the supervisor sends an MMI with this GUID, the core attempts to +/// dispatch any previously-discovered-but-not-yet-dispatched drivers. +/// +/// Defined in StandaloneMmPkg as `gEventMmDispatchGuid`. +/// +/// (`7E6EFFFA-69B4-4C1B-A4C7-AFF9C9244FEE`) +/// ``` +/// # use patina::{Guid, guids::MM_DISPATCH_EVENT}; +/// # assert_eq!("7E6EFFFA-69B4-4C1B-A4C7-AFF9C9244FEE", format!("{:?}", Guid::from_ref(&MM_DISPATCH_EVENT))); +/// ``` +pub const MM_DISPATCH_EVENT: crate::BinaryGuid = crate::BinaryGuid::from_string("7E6EFFFA-69B4-4C1B-A4C7-AFF9C9244FEE"); + +/// DXE MM Ready To Lock Protocol GUID. +/// +/// This protocol GUID is used to signal that the DXE phase is ready to lock +/// down MM. When an MMI with this GUID is received, the MM core begins the +/// ready-to-lock sequence. +/// +/// Defined in PI as `gEfiDxeMmReadyToLockProtocolGuid`. +/// +/// (`60FF8964-E906-41D0-AFED-F241E974E08E`) +/// ``` +/// # use patina::{Guid, guids::MM_DXE_READY_TO_LOCK_PROTOCOL}; +/// # assert_eq!("60FF8964-E906-41D0-AFED-F241E974E08E", format!("{:?}", Guid::from_ref(&MM_DXE_READY_TO_LOCK_PROTOCOL))); +/// ``` +pub const MM_DXE_READY_TO_LOCK_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("60FF8964-E906-41D0-AFED-F241E974E08E"); + +/// MM End of DXE Protocol GUID. +/// +/// This protocol is installed in the MM handle database when an End-of-DXE MMI +/// is received. MM drivers can register a protocol notification for this GUID +/// to perform actions that must happen after all DXE drivers have been dispatched +/// but before 3rd-party OpROMs execute. +/// +/// Defined in PI as `gEfiMmEndOfDxeProtocolGuid`. +/// +/// (`24E70042-D5C5-4260-8C39-0AD3AA32E93D`) +/// ``` +/// # use patina::{Guid, guids::MM_END_OF_DXE_PROTOCOL}; +/// # assert_eq!("24E70042-D5C5-4260-8C39-0AD3AA32E93D", format!("{:?}", Guid::from_ref(&MM_END_OF_DXE_PROTOCOL))); +/// ``` +pub const MM_END_OF_DXE_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("24E70042-D5C5-4260-8C39-0AD3AA32E93D"); + +/// MM End of PEI Protocol GUID. +/// +/// This protocol is installed in the MM handle database when an End-of-PEI MMI +/// is received. It signals that the PEI phase has completed. +/// +/// Defined in PI as `gEfiMmEndOfPeiProtocol`. +/// +/// (`F33E1BF3-980B-4BFB-A29A-B29C86453732`) +/// ``` +/// # use patina::{Guid, guids::MM_END_OF_PEI_PROTOCOL}; +/// # assert_eq!("F33E1BF3-980B-4BFB-A29A-B29C86453732", format!("{:?}", Guid::from_ref(&MM_END_OF_PEI_PROTOCOL))); +/// ``` +pub const MM_END_OF_PEI_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("F33E1BF3-980B-4BFB-A29A-B29C86453732"); + +/// MM Ready To Lock Protocol GUID. +/// +/// This protocol is installed in the MM handle database when the ready-to-lock +/// handler runs. MM drivers can register a protocol notification for this GUID +/// to be informed that MMRAM is about to be locked. +/// +/// Defined in PI as `gEfiMmReadyToLockProtocolGuid`. +/// +/// (`47B7FA8C-F4BD-4AF6-8200-333086F0D2C8`) +/// ``` +/// # use patina::{Guid, guids::MM_READY_TO_LOCK_PROTOCOL}; +/// # assert_eq!("47B7FA8C-F4BD-4AF6-8200-333086F0D2C8", format!("{:?}", Guid::from_ref(&MM_READY_TO_LOCK_PROTOCOL))); /// ``` -pub const MEMORY_TYPE_INFORMATION: efi::Guid = - efi::Guid::from_fields(0x4C19049F, 0x4137, 0x4DD3, 0x9C, 0x10, &[0x8B, 0x97, 0xA8, 0x3F, 0xFD, 0xFA]); +pub const MM_READY_TO_LOCK_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("47B7FA8C-F4BD-4AF6-8200-333086F0D2C8"); /// Performance Protocol GUID. /// @@ -121,11 +229,11 @@ pub const MEMORY_TYPE_INFORMATION: efi::Guid = /// /// (`76B6BDFA-2ACD-4462-9E3F-CB58C969D937`) /// ``` -/// # use patina::{Guid, guids::PERFORMANCE_PROTOCOL}; -/// # assert_eq!("76B6BDFA-2ACD-4462-9E3F-CB58C969D937", format!("{:?}", Guid::from_ref(&PERFORMANCE_PROTOCOL))); +/// # use patina::guids::PERFORMANCE_PROTOCOL; +/// # assert_eq!("76B6BDFA-2ACD-4462-9E3F-CB58C969D937", format!("{}", PERFORMANCE_PROTOCOL)); /// ``` -pub const PERFORMANCE_PROTOCOL: efi::Guid = - efi::Guid::from_fields(0x76b6bdfa, 0x2acd, 0x4462, 0x9E, 0x3F, &[0xcb, 0x58, 0xC9, 0x69, 0xd9, 0x37]); +pub const PERFORMANCE_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("76B6BDFA-2ACD-4462-9E3F-CB58C969D937"); /// EFI SMM Communication Protocol GUID as defined in the PI 1.2 specification. /// @@ -134,11 +242,11 @@ pub const PERFORMANCE_PROTOCOL: efi::Guid = /// /// (`C68ED8E2-9DC6-4CBD-9D94-DB65ACC5C332`) /// ``` -/// # use patina::{Guid, guids::SMM_COMMUNICATION_PROTOCOL}; -/// # assert_eq!("C68ED8E2-9DC6-4CBD-9D94-DB65ACC5C332", format!("{:?}", Guid::from_ref(&SMM_COMMUNICATION_PROTOCOL))); +/// # use patina::guids::SMM_COMMUNICATION_PROTOCOL; +/// # assert_eq!("C68ED8E2-9DC6-4CBD-9D94-DB65ACC5C332", format!("{}", SMM_COMMUNICATION_PROTOCOL)); /// ``` -pub const SMM_COMMUNICATION_PROTOCOL: efi::Guid = - efi::Guid::from_fields(0xc68ed8e2, 0x9dc6, 0x4cbd, 0x9d, 0x94, &[0xdb, 0x65, 0xac, 0xc5, 0xc3, 0x32]); +pub const SMM_COMMUNICATION_PROTOCOL: crate::BinaryGuid = + crate::BinaryGuid::from_string("C68ED8E2-9DC6-4CBD-9D94-DB65ACC5C332"); /// Zero GUID /// @@ -146,10 +254,10 @@ pub const SMM_COMMUNICATION_PROTOCOL: efi::Guid = /// /// (`00000000-0000-0000-0000-000000000000`) /// ``` -/// # use patina::{Guid, guids::ZERO}; -/// # assert_eq!("00000000-0000-0000-0000-000000000000", format!("{:?}", Guid::from_ref(&ZERO))); +/// # use patina::guids::ZERO; +/// # assert_eq!("00000000-0000-0000-0000-000000000000", format!("{}", ZERO)); /// ``` -pub const ZERO: efi::Guid = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0, 0, 0, 0, 0, 0]); +pub const ZERO: crate::BinaryGuid = crate::BinaryGuid::from_string("00000000-0000-0000-0000-000000000000"); /// EFI_HOB_MEMORY_ALLOC_STACK_GUID /// @@ -158,8 +266,23 @@ pub const ZERO: efi::Guid = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0, 0, 0, 0, /// /// (`4ED4BF27-4092-42E9-807D-527B1D00C9BD`) /// ``` -/// # use patina::{Guid, guids::HOB_MEMORY_ALLOC_STACK}; -/// # assert_eq!("4ED4BF27-4092-42E9-807D-527B1D00C9BD", format!("{:?}", Guid::from_ref(&HOB_MEMORY_ALLOC_STACK))); +/// # use patina::guids::HOB_MEMORY_ALLOC_STACK; +/// # assert_eq!("4ED4BF27-4092-42E9-807D-527B1D00C9BD", format!("{}", HOB_MEMORY_ALLOC_STACK)); +/// ``` +pub const HOB_MEMORY_ALLOC_STACK: crate::BinaryGuid = + crate::BinaryGuid::from_string("4ED4BF27-4092-42E9-807D-527B1D00C9BD"); + +/// EFI HOB List GUID +/// +/// The GUID used to identify the HOB list when it is installed as a configuration table entry +/// in the EFI System Table or the MM System Table. Drivers can locate the HOB list by searching +/// the configuration table for this GUID. +/// +/// Defined in the PI Specification as `gEfiHobListGuid`. +/// +/// (`7739F24C-93D7-11D4-9A3A-0090273FC14D`) +/// ``` +/// # use patina::{Guid, guids::HOB_LIST}; +/// # assert_eq!("7739F24C-93D7-11D4-9A3A-0090273FC14D", format!("{:?}", Guid::from_ref(&HOB_LIST))); /// ``` -pub const HOB_MEMORY_ALLOC_STACK: efi::Guid = - efi::Guid::from_fields(0x4ed4bf27, 0x4092, 0x42e9, 0x80, 0x7d, &[0x52, 0x7b, 0x1d, 0x00, 0xc9, 0xbd]); +pub const HOB_LIST: crate::BinaryGuid = crate::BinaryGuid::from_string("7739F24C-93D7-11D4-9A3A-0090273FC14D"); diff --git a/sdk/patina/src/lib.rs b/sdk/patina/src/lib.rs index 60a40cf47..2e2a0c8d7 100644 --- a/sdk/patina/src/lib.rs +++ b/sdk/patina/src/lib.rs @@ -49,6 +49,7 @@ pub mod error; pub mod guids; pub mod hash; pub mod log; +pub mod management_mode; #[cfg(any(test, feature = "alloc"))] pub mod performance; pub mod pi; @@ -56,7 +57,9 @@ pub mod pi; pub mod runtime_services; pub mod serial; #[cfg(any(test, feature = "alloc"))] -pub mod test; -#[cfg(any(test, feature = "alloc"))] pub mod tpl_mutex; +pub mod timer; +#[cfg(any(test, feature = "alloc"))] pub mod uefi_protocol; +#[cfg(any(test, feature = "alloc"))] +pub mod mm_services; diff --git a/sdk/patina/src/management_mode.rs b/sdk/patina/src/management_mode.rs new file mode 100644 index 000000000..d99a5c5ab --- /dev/null +++ b/sdk/patina/src/management_mode.rs @@ -0,0 +1,16 @@ +//! Management Mode (MM) SDK for Patina +//! +//! This crate provides the Management Mode (MM) related definitions for Patina. +//! +//! ## License +//! +//! Copyright (C) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +pub mod comm_buffer_hob; +pub mod protocol; + +// Re-export commonly used items for easier access +pub use comm_buffer_hob::MmCommBufferStatus; diff --git a/sdk/patina/src/management_mode/comm_buffer_hob.rs b/sdk/patina/src/management_mode/comm_buffer_hob.rs new file mode 100644 index 000000000..cc6db44a0 --- /dev/null +++ b/sdk/patina/src/management_mode/comm_buffer_hob.rs @@ -0,0 +1,91 @@ +//! Management Mode (MM) Header and Buffer HOB Definitions +//! +//! Defines the header and buffer HOB structures necessary for the MM environment to be initialized and used by components +//! dependent on MM details. +//! +//! ## MM HOB Usage +//! +//! It is expected that the MM HOB buffer will be initialized by the environment that registers services for the +//! platform. The HOBs can have platform-fixed values assigned during their initialization. It should be common +//! for at least the communication buffers to be populated as a mutable HOB during boot time. It is +//! recommended for a "MM HOB" component to handle all MM HOB details with minimal other MM related +//! dependencies and lock the HOBs so they are available for components that depend on the immutable HOB +//! to perform MM operations. +//! +//! ## License +//! +//! Copyright (C) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use crate::BinaryGuid; +use zerocopy_derive::{FromBytes, Immutable, KnownLayout}; + +/// GUID for the MM communication buffer HOB (`gMmCommBufferHobGuid`). +/// +/// `{ 0x6c2a2520, 0x0131, 0x4aee, { 0xa7, 0x50, 0xcc, 0x38, 0x4a, 0xac, 0xe8, 0xc6 } }` +pub const MM_COMM_BUFFER_HOB_GUID: BinaryGuid = BinaryGuid::from_string("6c2a2520-0131-4aee-a750-cc384aace8c6"); + +/// MM Common Buffer HOB Data Structure. +/// +/// Describes the communication buffer region passed via HOB from PEI to MM. +#[repr(C, packed)] +#[derive(Debug, Clone, Copy)] +pub struct MmCommonBufferHobData { + /// Physical start address of the common region. + pub physical_start: u64, + /// Number of pages in the communication buffer region. + pub number_of_pages: u64, + /// Pointer to `MmCommBufferStatus` structure. + pub status_buffer: u64, +} + +/// MM Communication Buffer Status +/// +/// Shared structure between DXE and MM environments to communicate the status +/// of MM communication operations. This structure is written by DXE before +/// triggering an MMI and read/written by MM during MMI processing. +/// +/// This is a structure currently used in some MM Supervisor MM implementations. +#[derive(Debug, Clone, Copy, FromBytes, Immutable, KnownLayout)] +#[repr(C)] +pub struct MmCommBufferStatus { + /// Whether the data in the fixed MM communication buffer is valid when entering from non-MM to MM. + /// Must be set to TRUE before triggering MMI, will be set to FALSE by MM after processing. + pub is_comm_buffer_valid: u8, + + /// The channel used to communicate with MM. + /// FALSE = user buffer, TRUE = supervisor buffer + pub talk_to_supervisor: u8, + + /// Padding to align to 8 bytes. + /// This padding is necessary to match the structure layout defined in edk2 and mu_basecore. + pub _padding: [u8; 6], + + /// The return status when returning from MM to non-MM. + pub return_status: u64, + + /// The size in bytes of the output buffer when returning from MM to non-MM. + pub return_buffer_size: u64, +} + +impl Default for MmCommBufferStatus { + #[coverage(off)] + fn default() -> Self { + Self::new() + } +} + +impl MmCommBufferStatus { + /// Create a new mailbox status with all fields zeroed + pub const fn new() -> Self { + Self { + is_comm_buffer_valid: 0, + talk_to_supervisor: 0, + _padding: [0; 6], + return_status: 0, + return_buffer_size: 0, + } + } +} diff --git a/components/patina_mm/src/protocol.rs b/sdk/patina/src/management_mode/protocol.rs similarity index 90% rename from components/patina_mm/src/protocol.rs rename to sdk/patina/src/management_mode/protocol.rs index cf73116f5..89493e062 100644 --- a/components/patina_mm/src/protocol.rs +++ b/sdk/patina/src/management_mode/protocol.rs @@ -9,3 +9,4 @@ //! SPDX-License-Identifier: Apache-2.0 pub mod mm_comm_buffer_update; +pub mod mm_supervisor_request; diff --git a/components/patina_mm/src/protocol/mm_comm_buffer_update.rs b/sdk/patina/src/management_mode/protocol/mm_comm_buffer_update.rs similarity index 98% rename from components/patina_mm/src/protocol/mm_comm_buffer_update.rs rename to sdk/patina/src/management_mode/protocol/mm_comm_buffer_update.rs index 4e8ce0ea1..609669c70 100644 --- a/components/patina_mm/src/protocol/mm_comm_buffer_update.rs +++ b/sdk/patina/src/management_mode/protocol/mm_comm_buffer_update.rs @@ -10,7 +10,7 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use patina::BinaryGuid; +use crate::BinaryGuid; use zerocopy_derive::{FromBytes, Immutable, IntoBytes, KnownLayout}; /// GUID for the MM Communication Buffer Update Protocol diff --git a/sdk/patina/src/management_mode/protocol/mm_supervisor_request.rs b/sdk/patina/src/management_mode/protocol/mm_supervisor_request.rs new file mode 100644 index 000000000..443495cf3 --- /dev/null +++ b/sdk/patina/src/management_mode/protocol/mm_supervisor_request.rs @@ -0,0 +1,221 @@ +//! MM Supervisor Request Protocol Definitions +//! +//! This module provides the shared protocol structures and constants for MM Supervisor +//! request handling. These types define the communication contract between the supervisor +//! and its clients (DXE, tests, etc.). +//! +//! ## Overview +//! +//! The MM Supervisor uses a structured request/response protocol. Requests are sent via +//! the MM communicate buffer and consist of an [`MmSupervisorRequestHeader`] followed by +//! request-specific payload data. The supervisor processes the request and writes back +//! a response header (with result status) followed by response-specific data. +//! +//! ## License +//! +//! Copyright (C) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 + +use crate::BinaryGuid; +use r_efi::efi; +use zerocopy::FromBytes; + +/// Signature value for the request header ('MSUP' as little-endian u32). +pub const SIGNATURE: u32 = u32::from_le_bytes([b'M', b'S', b'U', b'P']); + +/// Current revision of the request protocol. +pub const REVISION: u32 = 1; + +// GUID for gMmSupervisorRequestHandlerGuid +// { 0x8c633b23, 0x1260, 0x4ea6, { 0x83, 0xf, 0x7d, 0xdc, 0x97, 0x38, 0x21, 0x11 } } +/// GUID for the MM Supervisor Request Handler protocol. +pub const MM_SUPERVISOR_REQUEST_HANDLER_GUID: BinaryGuid = + BinaryGuid::from_string("8c633b23-1260-4ea6-830f-7ddc97382111"); + +/// MM Supervisor request header. +/// +/// This header is present at the start of every supervisor request buffer. It identifies +/// the request type and carries the result status on response. +/// +/// ## Layout +/// +/// ```text +/// Offset Size Field +/// 0x00 4 signature - Must be [`SIGNATURE`] ('MSUP' as little-endian u32) +/// 0x04 4 revision - Protocol revision, must be <= [`REVISION`] +/// 0x08 4 request - Request type (see [`RequestType`] enum) +/// 0x0C 4 reserved - Reserved for alignment, must be 0 +/// 0x10 8 result - Return status (0 = success, set by supervisor on response) +/// ``` +#[derive(Debug, Clone, Copy, zerocopy_derive::FromBytes, zerocopy_derive::IntoBytes, zerocopy_derive::Immutable)] +#[repr(C)] +pub struct MmSupervisorRequestHeader { + /// Signature to identify the request ('MSUP' as little-endian). + pub signature: u32, + /// Revision of the request protocol. + pub revision: u32, + /// The specific request type (see [`RequestType`] enum). + pub request: u32, + /// Reserved for alignment, must be 0. + pub reserved: u32, + /// Result status. The value of this field follows the [`efi::Status`] definitions. + pub result: u64, +} + +impl MmSupervisorRequestHeader { + /// Size of the header in bytes. + pub const SIZE: usize = core::mem::size_of::(); + + /// Validates the header signature and revision. + pub fn is_valid(&self) -> bool { + self.signature == SIGNATURE && self.revision <= REVISION + } + + /// Reads a header from a byte slice. + /// + /// Returns `None` if the slice is too small or misaligned. + pub fn from_bytes(bytes: &[u8]) -> Option { + Self::read_from_bytes(bytes.get(..Self::SIZE)?).ok() + } +} + +/// Response from MM Supervisor version info request. +/// +/// Returned as the payload following an [`MmSupervisorRequestHeader`] when the request +/// type is [`RequestType::VersionInfo`]. +/// +/// ## Layout +/// +/// ```text +/// Offset Size Field +/// 0x00 4 version - Supervisor version +/// 0x04 4 patch_level - Supervisor patch level +/// 0x08 8 max_supervisor_request_level - Highest supported request type +/// ``` +#[derive( + Debug, + Clone, + Copy, + zerocopy_derive::FromBytes, + zerocopy_derive::IntoBytes, + zerocopy_derive::Immutable, + zerocopy_derive::KnownLayout +)] +#[repr(C)] +pub struct MmSupervisorVersionInfo { + /// Version of the MM Supervisor. + pub version: u32, + /// Patch level. + pub patch_level: u32, + /// Maximum supported supervisor request level (highest valid request type value). + pub max_supervisor_request_level: u64, +} + +impl MmSupervisorVersionInfo { + /// Size of the version info structure in bytes. + pub const SIZE: usize = core::mem::size_of::(); + + /// Reads version info from a byte slice. + /// + /// Returns `None` if the slice is too small or misaligned. + pub fn from_bytes(bytes: &[u8]) -> Option { + Self::read_from_bytes(bytes.get(..Self::SIZE)?).ok() + } +} + +/// MM Supervisor request types. +/// +/// Each variant corresponds to a specific supervisor operation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestType { + /// Request to unblock memory regions. + UnblockMem = 0x0001, + /// Request to fetch security policy. + FetchPolicy = 0x0002, + /// Request for version information. + VersionInfo = 0x0003, + /// Request to update communication buffer. + CommUpdate = 0x0004, +} + +impl RequestType { + /// Tries to convert a raw u64 value into a `RequestType`. + /// + /// Returns `Err(value)` if the value does not correspond to a valid request type. + pub const MAX_REQUEST_TYPE: u64 = Self::CommUpdate as u64; +} + +impl TryFrom for RequestType { + type Error = u32; + + fn try_from(value: u32) -> Result { + match value { + 0x0001 => Ok(Self::UnblockMem), + 0x0002 => Ok(Self::FetchPolicy), + 0x0003 => Ok(Self::VersionInfo), + 0x0004 => Ok(Self::CommUpdate), + other => Err(other), + } + } +} + +impl From for u32 { + fn from(request_type: RequestType) -> Self { + request_type as u32 + } +} + +/// Standard MM Supervisor response types. +/// +/// Each variant corresponds to a specific response status that the supervisor can return. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ResponseType { + /// Error: Invalid request index. + InvalidRequest, + /// Error: Invalid data buffer. + InvalidDataBuffer, + /// Error: Communication buffer initialization failed. + CommBufferInitError, +} + +/// Maps `ResponseType` variants to corresponding [`efi::Status`] codes, because this is how +/// the supervisor request handlers map the [`MmSupervisorRequestHeader::result`] field in the +/// response header. +impl From for efi::Status { + fn from(response_type: ResponseType) -> Self { + match response_type { + ResponseType::InvalidRequest => efi::Status::INVALID_PARAMETER, + ResponseType::InvalidDataBuffer => efi::Status::BUFFER_TOO_SMALL, + ResponseType::CommBufferInitError => efi::Status::DEVICE_ERROR, + } + } +} + +/// MM Supervisor Unblock Memory Parameters. +/// +/// Matches the C `MM_SUPERVISOR_UNBLOCK_MEMORY_PARAMS` layout. The C header +/// defines this under `#pragma pack(push, 1)`, but because `efi::MemoryDescriptor` +/// (40 bytes) and `Guid` (16 bytes) are both naturally aligned, the packed +/// and natural layouts are identical (56 bytes total). +/// +/// ## Layout +/// +/// ```text +/// Offset Size Field +/// 0x00 40 memory_descriptor - EFI_MEMORY_DESCRIPTOR (r-efi efi::MemoryDescriptor) +/// 0x28 16 identifier_guid - Requester identification GUID +/// ``` +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct MmSupervisorUnblockMemoryParams { + /// Memory descriptor identifying the region to unblock. + pub memory_descriptor: efi::MemoryDescriptor, + /// GUID identifying the requesting driver/module. + pub identifier_guid: BinaryGuid, +} + +impl MmSupervisorUnblockMemoryParams { + /// Size of this structure in bytes. + pub const SIZE: usize = core::mem::size_of::(); +} diff --git a/sdk/patina/src/mm_services.rs b/sdk/patina/src/mm_services.rs new file mode 100644 index 000000000..4176b85de --- /dev/null +++ b/sdk/patina/src/mm_services.rs @@ -0,0 +1,370 @@ +//! MM (Management Mode) Services type definitions and trait. +//! +//! This module provides the Rust definitions for the PI `EFI_MM_SYSTEM_TABLE` +//! and an `MmServices` trait that wraps the raw C function-pointer table with +//! safe Rust method signatures, following the same pattern as +//! [`boot_services::BootServices`](crate::boot_services::BootServices). +//! +//! ## Layout +//! +//! * [`EfiMmSystemTable`] — `#[repr(C)]` struct matching the C +//! `_EFI_MM_SYSTEM_TABLE` layout from `PiMmCis.h`. +//! * [`MmServices`] — Safe Rust trait exposing the system-table services. +//! * [`StandardMmServices`] — Concrete wrapper around `*mut EfiMmSystemTable` +//! that implements `MmServices` by calling through the function pointers. +//! +//! Cores (e.g., `patina_mm_user_core`) allocate an `EfiMmSystemTable`, populate +//! its function pointers with their own `extern "efiapi"` thunks, and hand the +//! raw pointer to dispatched MM drivers. Drivers that want safe access can wrap +//! it in a `StandardMmServices`. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use core::ffi::c_void; + +use r_efi::efi; +use spin::Once; +use crate::pi::mm_cis::{EfiMmSystemTable, MmiHandlerEntryPoint}; + +// SAFETY: The system table is allocated once and its pointer is shared read-only +// with dispatched drivers. Internal mutation goes through synchronized databases. +unsafe impl Send for EfiMmSystemTable {} +unsafe impl Sync for EfiMmSystemTable {} + +// ============================================================================= +// StandardMmServices +// ============================================================================= + +/// Wrapper around a raw `*mut EfiMmSystemTable` pointer that implements +/// [`MmServices`] by calling through the C function-pointer table. +/// +/// This is the MM equivalent of +/// [`StandardBootServices`](crate::boot_services::StandardBootServices). +pub struct StandardMmServices { + efi_mm_system_table: Once<*mut EfiMmSystemTable>, +} + +// SAFETY: The raw pointer is only written once (protected by `Once`) and the +// underlying table is not expected to change after initialisation. +unsafe impl Sync for StandardMmServices {} +unsafe impl Send for StandardMmServices {} + +impl StandardMmServices { + /// Create a new `StandardMmServices` from an existing system table pointer. + pub fn new(mm_system_table: *mut EfiMmSystemTable) -> Self { + let this = Self::new_uninit(); + this.init(mm_system_table); + this + } + + /// Create an uninitialised instance. + pub const fn new_uninit() -> Self { + Self { efi_mm_system_table: Once::new() } + } + + /// Initialise with the given system table pointer. + pub fn init(&self, mm_system_table: *mut EfiMmSystemTable) { + self.efi_mm_system_table.call_once(|| mm_system_table); + } + + /// Returns `true` if the instance has been initialised. + pub fn is_init(&self) -> bool { + self.efi_mm_system_table.is_completed() + } + + /// Returns the raw system table pointer (panics if uninitialised). + pub fn as_mut_ptr(&self) -> *mut EfiMmSystemTable { + *self.efi_mm_system_table.get().expect("StandardMmServices is not initialized!") + } +} + +impl Clone for StandardMmServices { + fn clone(&self) -> Self { + if let Some(ptr) = self.efi_mm_system_table.get() { + StandardMmServices::new(*ptr) + } else { + StandardMmServices::new_uninit() + } + } +} + +impl core::fmt::Debug for StandardMmServices { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + if !self.is_init() { + return f.debug_struct("StandardMmServices").field("table", &"Not Initialized").finish(); + } + f.debug_struct("StandardMmServices").field("table", &self.as_mut_ptr()).finish() + } +} + +// ============================================================================= +// MmServices Trait +// ============================================================================= + +/// Safe Rust interface to the MM System Table services. +/// +/// This is the MM analogue of +/// [`BootServices`](crate::boot_services::BootServices). +/// Each method maps 1:1 to a function pointer in [`EfiMmSystemTable`]. +pub trait MmServices { + // ---- Memory services ------------------------------------------------ + + /// Allocate pool memory. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmAllocatePool` + fn allocate_pool(&self, pool_type: efi::MemoryType, size: usize) -> Result<*mut u8, efi::Status>; + + /// Free pool memory. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmFreePool` + fn free_pool(&self, buffer: *mut u8) -> Result<(), efi::Status>; + + /// Allocate pages. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmAllocatePages` + fn allocate_pages( + &self, + alloc_type: efi::AllocateType, + memory_type: efi::MemoryType, + pages: usize, + ) -> Result; + + /// Free pages. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmFreePages` + fn free_pages(&self, memory: u64, pages: usize) -> Result<(), efi::Status>; + + // ---- Protocol services ---------------------------------------------- + + /// Install a protocol interface on a handle. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmInstallProtocolInterface` + /// + /// # Safety + /// + /// `interface` must be a valid pointer to the protocol structure or null. + unsafe fn install_protocol_interface( + &self, + handle: *mut efi::Handle, + protocol: &efi::Guid, + interface_type: efi::InterfaceType, + interface: *mut c_void, + ) -> Result<(), efi::Status>; + + /// Uninstall a protocol interface from a handle. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmUninstallProtocolInterface` + /// + /// # Safety + /// + /// `interface` must match the pointer that was installed. + unsafe fn uninstall_protocol_interface( + &self, + handle: efi::Handle, + protocol: &efi::Guid, + interface: *mut c_void, + ) -> Result<(), efi::Status>; + + /// Query a handle for a protocol. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmHandleProtocol` + /// + /// # Safety + /// + /// The returned pointer must be used carefully to avoid aliasing violations. + unsafe fn handle_protocol( + &self, + handle: efi::Handle, + protocol: &efi::Guid, + ) -> Result<*mut c_void, efi::Status>; + + /// Locate the first device that supports a protocol. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmLocateProtocol` + /// + /// # Safety + /// + /// The returned pointer must be used carefully to avoid aliasing violations. + unsafe fn locate_protocol( + &self, + protocol: &efi::Guid, + ) -> Result<*mut c_void, efi::Status>; + + // ---- MMI management ------------------------------------------------- + + /// Manage (dispatch) an MMI. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmiManage` + fn mmi_manage( + &self, + handler_type: Option<&efi::Guid>, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, + ) -> efi::Status; + + /// Register an MMI handler. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmiHandlerRegister` + fn mmi_handler_register( + &self, + handler: MmiHandlerEntryPoint, + handler_type: Option<&efi::Guid>, + ) -> Result; + + /// Unregister an MMI handler. + /// + /// PI Spec: `EFI_MM_SYSTEM_TABLE.MmiHandlerUnRegister` + fn mmi_handler_unregister( + &self, + dispatch_handle: efi::Handle, + ) -> Result<(), efi::Status>; +} + +// ============================================================================= +// MmServices implementation for StandardMmServices +// ============================================================================= + +impl MmServices for StandardMmServices { + fn allocate_pool(&self, pool_type: efi::MemoryType, size: usize) -> Result<*mut u8, efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let mut buffer: *mut c_void = core::ptr::null_mut(); + let status = unsafe { (mmst.mm_allocate_pool)(pool_type, size, &mut buffer) }; + if status == efi::Status::SUCCESS { + Ok(buffer as *mut u8) + } else { + Err(status) + } + } + + fn free_pool(&self, buffer: *mut u8) -> Result<(), efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let status = unsafe { (mmst.mm_free_pool)(buffer as *mut c_void) }; + if status == efi::Status::SUCCESS { Ok(()) } else { Err(status) } + } + + fn allocate_pages( + &self, + alloc_type: efi::AllocateType, + memory_type: efi::MemoryType, + pages: usize, + ) -> Result { + let mmst = unsafe { &*self.as_mut_ptr() }; + let mut memory: efi::PhysicalAddress = 0; + let status = unsafe { (mmst.mm_allocate_pages)(alloc_type, memory_type, pages, &mut memory) }; + if status == efi::Status::SUCCESS { Ok(memory) } else { Err(status) } + } + + fn free_pages(&self, memory: u64, pages: usize) -> Result<(), efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let status = unsafe { (mmst.mm_free_pages)(memory, pages) }; + if status == efi::Status::SUCCESS { Ok(()) } else { Err(status) } + } + + unsafe fn install_protocol_interface( + &self, + handle: *mut efi::Handle, + protocol: &efi::Guid, + interface_type: efi::InterfaceType, + interface: *mut c_void, + ) -> Result<(), efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let status = unsafe { + (mmst.mm_install_protocol_interface)( + handle, + protocol as *const efi::Guid as *mut efi::Guid, + interface_type, + interface, + ) + }; + if status == efi::Status::SUCCESS { Ok(()) } else { Err(status) } + } + + unsafe fn uninstall_protocol_interface( + &self, + handle: efi::Handle, + protocol: &efi::Guid, + interface: *mut c_void, + ) -> Result<(), efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let status = unsafe { + (mmst.mm_uninstall_protocol_interface)( + handle, + protocol as *const efi::Guid as *mut efi::Guid, + interface, + ) + }; + if status == efi::Status::SUCCESS { Ok(()) } else { Err(status) } + } + + unsafe fn handle_protocol( + &self, + handle: efi::Handle, + protocol: &efi::Guid, + ) -> Result<*mut c_void, efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let mut interface: *mut c_void = core::ptr::null_mut(); + let status = unsafe { + (mmst.mm_handle_protocol)( + handle, + protocol as *const efi::Guid as *mut efi::Guid, + &mut interface, + ) + }; + if status == efi::Status::SUCCESS { Ok(interface) } else { Err(status) } + } + + unsafe fn locate_protocol( + &self, + protocol: &efi::Guid, + ) -> Result<*mut c_void, efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let mut interface: *mut c_void = core::ptr::null_mut(); + let status = unsafe { + (mmst.mm_locate_protocol)( + protocol as *const efi::Guid as *mut efi::Guid, + core::ptr::null_mut(), + &mut interface, + ) + }; + if status == efi::Status::SUCCESS { Ok(interface) } else { Err(status) } + } + + fn mmi_manage( + &self, + handler_type: Option<&efi::Guid>, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, + ) -> efi::Status { + let mmst = unsafe { &*self.as_mut_ptr() }; + let guid_ptr = handler_type.map_or(core::ptr::null(), |g| g as *const efi::Guid); + unsafe { (mmst.mmi_manage)(guid_ptr, context, comm_buffer, comm_buffer_size) } + } + + fn mmi_handler_register( + &self, + handler: MmiHandlerEntryPoint, + handler_type: Option<&efi::Guid>, + ) -> Result { + let mmst = unsafe { &*self.as_mut_ptr() }; + let guid_ptr = handler_type.map_or(core::ptr::null(), |g| g as *const efi::Guid); + let mut dispatch_handle: efi::Handle = core::ptr::null_mut(); + let status = unsafe { (mmst.mmi_handler_register)(handler, guid_ptr, &mut dispatch_handle) }; + if status == efi::Status::SUCCESS { Ok(dispatch_handle) } else { Err(status) } + } + + fn mmi_handler_unregister( + &self, + dispatch_handle: efi::Handle, + ) -> Result<(), efi::Status> { + let mmst = unsafe { &*self.as_mut_ptr() }; + let status = unsafe { (mmst.mmi_handler_unregister)(dispatch_handle) }; + if status == efi::Status::SUCCESS { Ok(()) } else { Err(status) } + } +} diff --git a/sdk/patina/src/performance/globals.rs b/sdk/patina/src/performance/globals.rs index dd48d0daa..1e6ae7fd3 100644 --- a/sdk/patina/src/performance/globals.rs +++ b/sdk/patina/src/performance/globals.rs @@ -8,8 +8,9 @@ //! use crate::{ boot_services::{StandardBootServices, tpl::Tpl}, - component::service::{Service, perf_timer::ArchTimerFunctionality}, + component::service::Service, performance::table::FBPT, + timer::ArchTimerFunctionality, tpl_mutex::TplMutex, }; use core::{ diff --git a/sdk/patina/src/performance/measurement.rs b/sdk/patina/src/performance/measurement.rs index b4b8cb3d0..36fd3a9d2 100644 --- a/sdk/patina/src/performance/measurement.rs +++ b/sdk/patina/src/performance/measurement.rs @@ -6,12 +6,9 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use alloc::boxed::Box; use core::{ clone::Clone, - convert::AsRef, ffi::c_void, mem, ops::BitOr, @@ -21,7 +18,7 @@ use core::{ use crate::{ boot_services::BootServices, - component::service::{Service, perf_timer::ArchTimerFunctionality}, + component::service::Service, error::EfiError, guids::EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE, performance::{ @@ -38,6 +35,7 @@ use crate::{ table::FirmwareBasicBootPerfTable, }, runtime_services::RuntimeServices, + timer::ArchTimerFunctionality, tpl_mutex::TplMutex, uefi_protocol::{performance_measurement::PerfAttribute, status_code::StatusCodeRuntimeProtocol}, }; @@ -55,29 +53,25 @@ pub mod event_callback { use super::*; /// Reports the Firmware Basic Boot Performance Table (FBPT) record buffer. - pub extern "efiapi" fn report_fbpt_record_buffer( - event: efi::Event, - ctx: Box<(BB, RR, &TplMutex)>, - ) where - BB: AsRef + Clone, - B: BootServices + 'static, - RR: AsRef + Clone + 'static, - R: RuntimeServices + 'static, + pub extern "efiapi" fn report_fbpt_record_buffer(event: efi::Event, ctx: Box<(B, R, &TplMutex)>) + where + B: BootServices + Clone + 'static, + R: RuntimeServices + Clone + 'static, F: FirmwareBasicBootPerfTable, { let (boot_services, runtime_services, fbpt) = *ctx; - let _ = boot_services.as_ref().close_event(event); + let _ = boot_services.close_event(event); - let Ok(fbpt_address) = fbpt.lock().report_table( - performance::table::find_previous_table_address(runtime_services.as_ref()), - boot_services.as_ref(), - ) else { + let Ok(fbpt_address) = fbpt + .lock() + .report_table(performance::table::find_previous_table_address(&runtime_services), &boot_services) + else { log::error!("Performance: Fail to report FBPT."); return; }; // SAFETY: `p` is the only mutable reference to the `StatusCodeRuntimeProtocol` in this scope. - let Ok(p) = (unsafe { boot_services.as_ref().locate_protocol::(None) }) else { + let Ok(p) = (unsafe { boot_services.locate_protocol::(None) }) else { log::error!("Performance: Fail to find status code protocol."); return; }; @@ -87,7 +81,7 @@ pub mod event_callback { EFI_SOFTWARE_DXE_BS_DRIVER, 0, &mu_rust_helpers::guid::CALLER_ID, - efi::Guid::clone(&EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE), + efi::Guid::clone(&*EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE), fbpt_address, ); if status.is_err() { @@ -97,7 +91,7 @@ pub mod event_callback { // SAFETY: This operation is valid because the expected configuration type of a entry with guid `EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE` // is a usize and the memory address is a valid and point to an FBPT. let status = unsafe { - boot_services.as_ref().install_configuration_table_unchecked( + boot_services.install_configuration_table_unchecked( &EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE, fbpt_address as *mut c_void, ) @@ -318,7 +312,14 @@ where return Err(EfiError::InvalidParameter.into()); }; let module_guid = caller_identifier.as_guid().ok_or(EfiError::InvalidParameter)?; - let record = DualGuidStringEventRecord::new(perf_id, 0, timestamp, *module_guid, *guid, function_string); + let record = DualGuidStringEventRecord::new( + perf_id, + 0, + timestamp, + (*module_guid).into(), + (*guid).into(), + function_string, + ); fbpt.lock().add_record(record)?; } @@ -331,7 +332,7 @@ where | KnownPerfId::PerfEvent => { let module_guid = caller_identifier.as_guid().ok_or(EfiError::InvalidParameter)?; let string = string.unwrap_or("unknown name"); - let record = DynamicStringEventRecord::new(perf_id, 0, timestamp, *module_guid, string); + let record = DynamicStringEventRecord::new(perf_id, 0, timestamp, (*module_guid).into(), string); fbpt.lock().add_record(record)?; } } @@ -428,8 +429,8 @@ impl PerformanceProperty { fn get_module_guid_from_handle( boot_services: &impl BootServices, handle: efi::Handle, -) -> Result { - let mut guid = efi::Guid::from_fields(0, 0, 0, 0, 0, &[0; 6]); +) -> Result { + let mut guid = crate::guids::ZERO; let loaded_image_protocol = 'find_loaded_image_protocol: { if let Ok(loaded_image_protocol) = @@ -478,7 +479,7 @@ fn get_module_guid_from_handle( unsafe { let guid_ptr = (loaded_image.file_path as *const u8) .add(mem::size_of::()) - as *const efi::Guid; + as *const crate::BinaryGuid; guid = ptr::read(guid_ptr); } }; @@ -494,7 +495,6 @@ mod tests { use crate::{self as patina, device_path::fv_types::MediaFwVolDevicePath}; - use alloc::rc::Rc; use core::{ mem::MaybeUninit, ptr, @@ -514,6 +514,8 @@ mod tests { runtime_services::MockRuntimeServices, }; + use crate::guids::EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE; + #[derive(IntoService)] #[service(dyn ArchTimerFunctionality)] struct MockTimer {} @@ -554,7 +556,7 @@ mod tests { boot_services .expect_install_configuration_table_unchecked() .once() - .with(predicate::eq(&EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE), predicate::always()) + .with(predicate::eq(&*EDKII_FPDT_EXTENDED_FIRMWARE_PERFORMANCE), predicate::always()) .return_const(Ok(())); boot_services @@ -578,7 +580,7 @@ mod tests { event_callback::report_fbpt_record_buffer( 1_usize as efi::Event, - Box::new((Rc::new(boot_services), Rc::new(runtime_services), fbpt)), + Box::new((boot_services, runtime_services, fbpt)), ); assert!(REPORT_STATUS_CODE_CALLED.load(Ordering::Relaxed)); diff --git a/sdk/patina/src/performance/record.rs b/sdk/patina/src/performance/record.rs index 0a40ee59c..eabcfe31f 100644 --- a/sdk/patina/src/performance/record.rs +++ b/sdk/patina/src/performance/record.rs @@ -632,8 +632,6 @@ mod tests { use super::*; use core::{assert_eq, slice, unreachable}; - use r_efi::efi; - use extended::{ DualGuidStringEventRecord, DynamicStringEventRecord, GuidEventRecord, GuidQwordEventRecord, GuidQwordStringEventRecord, @@ -648,7 +646,7 @@ mod tests { #[test] fn test_performance_record_buffer_push_record() { - let guid = efi::Guid::from_bytes(&[0; 16]); + let guid = crate::guids::ZERO; let mut performance_record_buffer = PerformanceRecordBuffer::new(); let mut size = 0; @@ -673,7 +671,7 @@ mod tests { #[test] fn test_performance_record_buffer_iter() { - let guid = efi::Guid::from_bytes(&[0; 16]); + let guid = crate::guids::ZERO; let mut performance_record_buffer = PerformanceRecordBuffer::new(); performance_record_buffer.push_record(GuidEventRecord::new(1, 0, 10, guid)).unwrap(); @@ -711,7 +709,7 @@ mod tests { #[test] fn test_performance_record_buffer_reported_table() { - let guid = efi::Guid::from_bytes(&[0; 16]); + let guid = crate::guids::ZERO; let mut performance_record_buffer = PerformanceRecordBuffer::new(); performance_record_buffer.push_record(GuidEventRecord::new(1, 0, 10, guid)).unwrap(); diff --git a/sdk/patina/src/performance/record/extended.rs b/sdk/patina/src/performance/record/extended.rs index e62451c68..cab88e4a9 100644 --- a/sdk/patina/src/performance/record/extended.rs +++ b/sdk/patina/src/performance/record/extended.rs @@ -9,8 +9,6 @@ use core::fmt::Debug; -use r_efi::efi; - use super::PerformanceRecord; use crate::performance::error::Error; @@ -28,7 +26,7 @@ pub struct GuidEventRecord { /// 64-bit value (nanosecond) describing elapsed time since the most recent deassertion of processor reset. pub timestamp: u64, /// If ProgressID < 0x10, GUID of the referenced module; otherwise, GUID of the module logging the event. - pub guid: efi::Guid, + pub guid: crate::BinaryGuid, } impl GuidEventRecord { @@ -38,7 +36,7 @@ impl GuidEventRecord { pub const REVISION: u8 = 1; /// Creates a new `GuidEventRecord`. - pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: efi::Guid) -> Self { + pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: crate::BinaryGuid) -> Self { Self { progress_id, acpi_id, timestamp, guid } } } @@ -75,7 +73,7 @@ pub struct DynamicStringEventRecord<'a> { /// 64-bit value (nanosecond) describing elapsed time since the most recent deassertion of processor reset. pub timestamp: u64, /// If ProgressID < 0x10, GUID of the referenced module; otherwise, GUID of the module logging the event. - pub guid: efi::Guid, + pub guid: crate::BinaryGuid, /// ASCII string describing the module. Padding supplied at the end if necessary with null characters (0x00). /// It may be module name, function name, or token name. pub string: &'a str, @@ -88,7 +86,7 @@ impl<'a> DynamicStringEventRecord<'a> { pub const REVISION: u8 = 1; /// Creates a new `DynamicStringEventRecord`. - pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: efi::Guid, string: &'a str) -> Self { + pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: crate::BinaryGuid, string: &'a str) -> Self { Self { progress_id, acpi_id, timestamp, guid, string } } } @@ -127,9 +125,9 @@ pub struct DualGuidStringEventRecord<'a> { /// 64-bit value (nanosecond) describing elapsed time since the most recent deassertion of processor reset. pub timestamp: u64, /// GUID of the module logging the event. - pub guid_1: efi::Guid, + pub guid_1: crate::BinaryGuid, /// Event or Ppi or Protocol GUID for Callback. - pub guid_2: efi::Guid, + pub guid_2: crate::BinaryGuid, /// ASCII string describing the module. /// It is the function name. pub string: &'a str, @@ -146,8 +144,8 @@ impl<'a> DualGuidStringEventRecord<'a> { progress_id: u16, acpi_id: u32, timestamp: u64, - guid_1: efi::Guid, - guid_2: efi::Guid, + guid_1: crate::BinaryGuid, + guid_2: crate::BinaryGuid, string: &'a str, ) -> Self { Self { progress_id, acpi_id, timestamp, guid_1, guid_2, string } @@ -189,7 +187,7 @@ pub struct GuidQwordEventRecord { /// 64-bit value (nanosecond) describing elapsed time since the most recent deassertion of processor reset. pub timestamp: u64, /// GUID of the module logging the event. - pub guid: efi::Guid, + pub guid: crate::BinaryGuid, /// Qword of misc data, meaning depends on the ProgressId. pub qword: u64, } @@ -201,7 +199,7 @@ impl GuidQwordEventRecord { pub const REVISION: u8 = 1; /// Creates a new `GuidQwordEventRecord`. - pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: efi::Guid, qword: u64) -> Self { + pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: crate::BinaryGuid, qword: u64) -> Self { Self { progress_id, acpi_id, timestamp, guid, qword } } } @@ -239,7 +237,7 @@ pub struct GuidQwordStringEventRecord<'a> { /// 64-bit value (nanosecond) describing elapsed time since the most recent deassertion of processor reset. pub timestamp: u64, /// GUID of the module logging the event - pub guid: efi::Guid, + pub guid: crate::BinaryGuid, /// Qword of misc data, meaning depends on the ProgressId pub qword: u64, /// ASCII string describing the module. @@ -253,7 +251,14 @@ impl<'a> GuidQwordStringEventRecord<'a> { pub const REVISION: u8 = 1; /// Creates a new `GuidQwordStringEventRecord`. - pub fn new(progress_id: u16, acpi_id: u32, timestamp: u64, guid: efi::Guid, qword: u64, string: &'a str) -> Self { + pub fn new( + progress_id: u16, + acpi_id: u32, + timestamp: u64, + guid: crate::BinaryGuid, + qword: u64, + string: &'a str, + ) -> Self { Self { progress_id, acpi_id, timestamp, guid, qword, string } } } diff --git a/sdk/patina/src/performance/record/hob.rs b/sdk/patina/src/performance/record/hob.rs index 5250626a6..a0c3727dc 100644 --- a/sdk/patina/src/performance/record/hob.rs +++ b/sdk/patina/src/performance/record/hob.rs @@ -40,8 +40,7 @@ pub struct HobPerformanceData { } impl FromHob for HobPerformanceData { - const HOB_GUID: crate::OwnedGuid = - crate::Guid::from_fields(0x3b387bfd, 0x7abc, 0x4cf2, 0xa0, 0xca, [0xb6, 0xa1, 0x6c, 0x1b, 0x1b, 0x25]); + const HOB_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("3B387BFD-7ABC-4CF2-A0CA-B6A16C1B1B25"); fn parse(bytes: &[u8]) -> HobPerformanceData { let mut offset = 0; diff --git a/sdk/patina/src/performance/table.rs b/sdk/patina/src/performance/table.rs index 0a7911ec3..3d68e161b 100644 --- a/sdk/patina/src/performance/table.rs +++ b/sdk/patina/src/performance/table.rs @@ -32,7 +32,6 @@ use crate::{ runtime_services::RuntimeServices, }; -use r_efi::efi; use scroll::Pwrite; /// The number of extra space in byte that will be allocated when publishing the performance buffer. @@ -211,8 +210,8 @@ pub struct FirmwarePerformanceVariable { } impl FirmwarePerformanceVariable { - const ADDRESS_VARIABLE_GUID: efi::Guid = - efi::Guid::from_fields(0xc095791a, 0x3001, 0x47b2, 0x80, 0xc9, &[0xea, 0xc7, 0x31, 0x9f, 0x2f, 0xa4]); + const ADDRESS_VARIABLE_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("C095791A-3001-47B2-80C9-EAC7319F2FA4"); } impl TryFrom> for FirmwarePerformanceVariable { @@ -380,7 +379,7 @@ mod tests { .returning(move |_, _, _| Ok(address)); let mut fbpt = FBPT::new(); - let guid = efi::Guid::from_bytes(&[0; 16]); + let guid = crate::guids::ZERO; fbpt.add_record(GuidEventRecord::new(1, 0, 10, guid)).unwrap(); fbpt.add_record(DynamicStringEventRecord::new(1, 0, 10, guid, "test")).unwrap(); @@ -437,7 +436,7 @@ mod tests { .returning(move |_, _, _| Ok(address)); let mut fbpt = FBPT::new(); - let guid = efi::Guid::from_bytes(&[0; 16]); + let guid = crate::guids::ZERO; fbpt.add_record(GuidEventRecord::new(1, 0, 10, guid)).unwrap(); fbpt.add_record(DynamicStringEventRecord::new(1, 0, 10, guid, "test")).unwrap(); @@ -465,7 +464,7 @@ mod tests { .returning(move |_, _, _| Ok(address)); let mut fbpt = FBPT::new(); - let guid = efi::Guid::from_bytes(&[0; 16]); + let guid = crate::guids::ZERO; fbpt.add_record(GuidEventRecord::new(1, 0, 10, guid)).unwrap(); fbpt.add_record(DynamicStringEventRecord::new(1, 0, 10, guid, "test")).unwrap(); diff --git a/sdk/patina/src/pi.rs b/sdk/patina/src/pi.rs index dd108358c..aa41fea47 100644 --- a/sdk/patina/src/pi.rs +++ b/sdk/patina/src/pi.rs @@ -31,12 +31,15 @@ mod boot_mode; pub mod dxe_services; pub mod error_codes; +#[cfg(any(test, feature = "alloc"))] pub mod fw_fs; pub mod hob; pub mod list_entry; +pub mod mm_cis; pub mod protocols; #[cfg(feature = "serde")] pub mod serializable; +pub mod spec_version; pub mod status_code; pub use boot_mode::Mode as BootMode; diff --git a/sdk/patina/src/pi/dxe_services.rs b/sdk/patina/src/pi/dxe_services.rs index a021f78ae..98dbca0eb 100644 --- a/sdk/patina/src/pi/dxe_services.rs +++ b/sdk/patina/src/pi/dxe_services.rs @@ -32,8 +32,8 @@ use r_efi::{ /// Configuration Table array. The DXE Services Table provides services for /// managing the Global Coherency Domain memory and I/O space maps, /// and dispatcher functions for managing driver execution dependencies. -pub const DXE_SERVICES_TABLE_GUID: Guid = - Guid::from_fields(0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, &[0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9]); +pub const DXE_SERVICES_TABLE_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("05AD34BA-6F02-4214-952E-4DA0398E2BB9"); /// Adds memory or memory-mapped I/O resources to the Global Coherency Domain (GCD) /// diff --git a/sdk/patina/src/pi/fw_fs.rs b/sdk/patina/src/pi/fw_fs.rs index 660cf80b9..bbd9071ec 100644 --- a/sdk/patina/src/pi/fw_fs.rs +++ b/sdk/patina/src/pi/fw_fs.rs @@ -14,9 +14,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! - -extern crate alloc; - use core::{fmt, mem, num::Wrapping, slice}; pub mod ffs; @@ -43,7 +40,7 @@ pub use fv::{ pub use fvb::attributes::{EfiFvbAttributes2, Fvb2 as Fvb2Attributes, raw::fvb2 as Fvb2RawAttributes}; use zerocopy::FromBytes; -use crate::base::align_up; +use crate::{BinaryGuid, base::align_up}; use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; use num_traits::WrappingSub; use r_efi::efi; @@ -53,26 +50,22 @@ use r_efi::efi; /// These GUIDs identify the compression algorithm or encapsulation format used in GUID-defined /// sections within the Firmware File System. Based on the PI Specification Volume 3. pub mod guid { - use r_efi::efi; - /// GUID for Brotli compressed sections. - pub const BROTLI_SECTION: efi::Guid = - efi::Guid::from_fields(0x3D532050, 0x5CDA, 0x4FD0, 0x87, 0x9E, &[0x0F, 0x7F, 0x63, 0x0D, 0x5A, 0xFB]); + pub const BROTLI_SECTION: crate::BinaryGuid = + crate::BinaryGuid::from_string("3D532050-5CDA-4FD0-879E-0F7F630D5AFB"); /// GUID for CRC32 checksum sections. - pub const CRC32_SECTION: efi::Guid = - efi::Guid::from_fields(0xFC1BCDB0, 0x7D31, 0x49aa, 0x93, 0x6A, &[0xA4, 0x60, 0x0D, 0x9D, 0xD0, 0x83]); + pub const CRC32_SECTION: crate::BinaryGuid = crate::BinaryGuid::from_string("FC1BCDB0-7D31-49AA-936A-A4600D9DD083"); /// GUID for LZMA compressed sections. - pub const LZMA_SECTION: efi::Guid = - efi::Guid::from_fields(0xEE4E5898, 0x3914, 0x4259, 0x9D, 0x6E, &[0xDC, 0x7B, 0xD7, 0x94, 0x03, 0xCF]); + pub const LZMA_SECTION: crate::BinaryGuid = crate::BinaryGuid::from_string("EE4E5898-3914-4259-9D6E-DC7BD79403CF"); /// GUID for LZMA F86 compressed sections. - pub const LZMA_F86_SECTION: efi::Guid = - efi::Guid::from_fields(0xD42AE6BD, 0x1352, 0x4BFB, 0x90, 0x9A, &[0xCA, 0x72, 0xA6, 0xEA, 0xE8, 0x89]); + pub const LZMA_F86_SECTION: crate::BinaryGuid = + crate::BinaryGuid::from_string("D42AE6BD-1352-4BFB-909A-CA72A6EAE889"); /// GUID for LZMA parallel compressed sections. - pub const LZMA_PARALLEL_SECTION: efi::Guid = - efi::Guid::from_fields(0xBD9921EA, 0xED91, 0x404A, 0x8B, 0x2F, &[0xB4, 0xD7, 0x24, 0x74, 0x7C, 0x8C]); + pub const LZMA_PARALLEL_SECTION: crate::BinaryGuid = + crate::BinaryGuid::from_string("BD9921EA-ED91-404A-8B2F-B4D724747C8C"); /// GUID for Tiano decompression sections. - pub const TIANO_DECOMPRESS_SECTION: efi::Guid = - efi::Guid::from_fields(0xA31280AD, 0x481E, 0x41B6, 0x95, 0xE8, &[0x12, 0x7F, 0x4C, 0x98, 0x47, 0x79]); + pub const TIANO_DECOMPRESS_SECTION: crate::BinaryGuid = + crate::BinaryGuid::from_string("A31280AD-481E-41B6-95E8-127F4C984779"); } /// Defines an interface that can be implemented to provide extraction logic for encapsulation sections. @@ -344,7 +337,7 @@ impl<'a> FirmwareVolume<'a> { } /// Returns the GUID name of the FV, if any. - pub fn fv_name(&self) -> Option { + pub fn fv_name(&self) -> Option { self.ext_header.as_ref().map(|ext_header| ext_header.header.fv_name) } @@ -428,7 +421,7 @@ impl fmt::Debug for FirmwareVolume<'_> { #[derive(Clone)] pub struct File<'a> { data: &'a [u8], - name: efi::Guid, + name: BinaryGuid, file_type: u8, attributes: u8, header_size: usize, @@ -588,7 +581,7 @@ impl<'a> File<'a> { } /// Returns the file name GUID. - pub fn name(&self) -> efi::Guid { + pub fn name(&self) -> BinaryGuid { self.name } @@ -1230,7 +1223,7 @@ mod unit_tests { let fv_header = fv_bytes.as_mut_ptr() as *mut fv::Header; // SAFETY: Test intentionally corrupts FV header to validate error handling unsafe { - (*fv_header).file_system_guid = efi::Guid::from_bytes(&[0xa5; 16]); + (*fv_header).file_system_guid = crate::BinaryGuid::from(efi::Guid::from_bytes(&[0xa5; 16])); }; assert_eq!(FirmwareVolume::new(&fv_bytes).unwrap_err(), efi::Status::VOLUME_CORRUPTED); diff --git a/sdk/patina/src/pi/fw_fs/ffs/file.rs b/sdk/patina/src/pi/fw_fs/ffs/file.rs index f2aabc4ed..ea4dc8a8c 100644 --- a/sdk/patina/src/pi/fw_fs/ffs/file.rs +++ b/sdk/patina/src/pi/fw_fs/ffs/file.rs @@ -10,8 +10,6 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use r_efi::efi; - /// Raw FFS file constant definitions pub mod raw { /// File State Bits @@ -161,7 +159,7 @@ pub enum State { /// Firmware file header structure per PI Specification pub struct Header { /// Unique file GUID identifier - pub name: efi::Guid, + pub name: crate::BinaryGuid, /// Header checksum value pub integrity_check_header: u8, /// File checksum value diff --git a/sdk/patina/src/pi/fw_fs/ffs/guid.rs b/sdk/patina/src/pi/fw_fs/ffs/guid.rs index 6d2e624ca..8796f9cbe 100644 --- a/sdk/patina/src/pi/fw_fs/ffs/guid.rs +++ b/sdk/patina/src/pi/fw_fs/ffs/guid.rs @@ -9,19 +9,17 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use r_efi::efi; - // {8C8CE578-8A3D-4F1C-9935-896185C32DD3} /// Firmware File System version 2 GUID identifier per PI Specification -pub const EFI_FIRMWARE_FILE_SYSTEM2_GUID: efi::Guid = - efi::Guid::from_fields(0x8c8ce578, 0x8a3d, 0x4f1c, 0x99, 0x35, &[0x89, 0x61, 0x85, 0xc3, 0x2d, 0xd3]); +pub const EFI_FIRMWARE_FILE_SYSTEM2_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("8C8CE578-8A3D-4F1C-9935-896185C32DD3"); // {5473C07A-3DCB-4DCA-BD6F-1E9689E7349A} /// Firmware File System version 3 GUID identifier per PI Specification -pub const EFI_FIRMWARE_FILE_SYSTEM3_GUID: efi::Guid = - efi::Guid::from_fields(0x5473c07a, 0x3dcb, 0x4dca, 0xbd, 0x6f, &[0x1e, 0x96, 0x89, 0xe7, 0x34, 0x9a]); +pub const EFI_FIRMWARE_FILE_SYSTEM3_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("5473C07A-3DCB-4DCA-BD6F-1E9689E7349A"); // {1BA0062E-C779-4582-8566-336AE8F78F09} /// GUID for the file at the top of a firmware volume -pub const EFI_FFS_VOLUME_TOP_FILE_GUID: efi::Guid = - efi::Guid::from_fields(0x1ba0062e, 0xc779, 0x4582, 0x85, 0x66, &[0x33, 0x6a, 0xe8, 0xf7, 0x8f, 0x9]); +pub const EFI_FFS_VOLUME_TOP_FILE_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("1BA0062E-C779-4582-8566-336AE8F78F09"); diff --git a/sdk/patina/src/pi/fw_fs/ffs/section.rs b/sdk/patina/src/pi/fw_fs/ffs/section.rs index 2ab40362f..bc6555cb5 100644 --- a/sdk/patina/src/pi/fw_fs/ffs/section.rs +++ b/sdk/patina/src/pi/fw_fs/ffs/section.rs @@ -119,7 +119,6 @@ pub struct Header { /// Section header structures and definitions pub mod header { - use r_efi::efi; use zerocopy_derive::{FromBytes, KnownLayout, Unaligned}; #[repr(C, packed)] @@ -167,7 +166,7 @@ pub mod header { /// GUID-defined section header pub struct GuidDefined { /// GUID identifying the section format - pub section_definition_guid: efi::Guid, + pub section_definition_guid: crate::BinaryGuid, /// Offset to section data from start of header pub data_offset: u16, /// Section attributes @@ -191,6 +190,6 @@ pub mod header { /// Freeform GUID subtype section header pub struct FreeformSubtypeGuid { /// Subtype GUID identifier - pub sub_type_guid: efi::Guid, + pub sub_type_guid: crate::BinaryGuid, } } diff --git a/sdk/patina/src/pi/fw_fs/fv.rs b/sdk/patina/src/pi/fw_fs/fv.rs index c211b738c..9a95794db 100644 --- a/sdk/patina/src/pi/fw_fs/fv.rs +++ b/sdk/patina/src/pi/fw_fs/fv.rs @@ -48,7 +48,7 @@ pub struct Header { /// First 16 bytes are zeros for compatibility pub zero_vector: [u8; 16], /// File system type GUID - pub file_system_guid: r_efi::efi::Guid, + pub file_system_guid: crate::BinaryGuid, /// Total volume length in bytes pub fv_length: u64, /// Firmware volume signature @@ -85,7 +85,7 @@ pub struct BlockMapEntry { /// Extended firmware volume header pub struct ExtHeader { /// Firmware volume name GUID - pub fv_name: r_efi::efi::Guid, + pub fv_name: crate::BinaryGuid, /// Size of this extended header pub ext_header_size: u32, } diff --git a/sdk/patina/src/pi/hob.rs b/sdk/patina/src/pi/hob.rs index e28f79f82..0850dd406 100644 --- a/sdk/patina/src/pi/hob.rs +++ b/sdk/patina/src/pi/hob.rs @@ -6,16 +6,11 @@ //! HOBs describe the physical memory layout, CPU information, firmware volumes, //! and other platform-specific data that DXE needs to continue the boot process. //! -//! The HOB list is a contiguous list of HOB structures, each with a common header -//! followed by type-specific data. Typically, the PEI Foundation creates and manages -//! the HOB list during the PEI phase, and it is passed to the DXE Foundation -//! during the PEI-to-DXE handoff. -//! //! Based on the UEFI Platform Initialization Specification Volume III. //! //! ## Example //! ``` -//! use patina::pi::{BootMode, hob, hob::Hob, hob::HobList}; +//! use patina::pi::{BootMode, hob, hob::Hob}; //! use core::mem::size_of; //! //! // Generate HOBs to initialize a new HOB list @@ -32,8 +27,8 @@ //! header, //! base_address: 0, //! length: 0x0123456789abcdef, -//! fv_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), -//! file_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), +//! fv_name: patina::BinaryGuid::from_string("00000001-0002-0003-0405-060708090A0B"), +//! file_name: patina::BinaryGuid::from_string("00000001-0002-0003-0405-060708090A0B"), //! } //! } //! @@ -60,14 +55,6 @@ //! let capsule = gen_capsule(); //! let firmware_volume2 = gen_firmware_volume2(); //! let end_of_hob_list = gen_end_of_hoblist(); -//! -//! // Create a new empty HOB list -//! let mut hoblist = HobList::new(); -//! -//! // Push the example HOBs onto the HOB list -//! hoblist.push(Hob::Capsule(&capsule)); -//! hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); -//! hoblist.push(Hob::Handoff(&end_of_hob_list)); //! ``` //! //! ## License @@ -77,22 +64,21 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use crate::{ - base::{align_down, align_up}, - pi::BootMode, -}; +use crate::pi::BootMode; use core::{ ffi::c_void, - fmt, marker::PhantomData, mem::{self, size_of}, slice, }; -use indoc::indoc; -// Expectation is someone will provide alloc -extern crate alloc; -use alloc::{boxed::Box, vec::Vec}; +// if alloc is available, export the hob list module +#[cfg(any(test, feature = "alloc"))] +pub mod hob_list; + +// export hob_list::HobList as HobList if alloc is available +#[cfg(any(test, feature = "alloc"))] +pub use hob_list::HobList; // If the target is x86_64, then EfiPhysicalAddress is u64 #[cfg(target_arch = "x86_64")] @@ -196,7 +182,7 @@ pub mod header { /// indicate additional data structures that follow this header. Well-known /// GUIDs include allocations for stack, BSP store, and module images. /// - pub name: r_efi::base::Guid, + pub name: crate::BinaryGuid, /// The base address of memory allocated by this HOB. /// This is the physical address where the memory allocation begins, @@ -351,7 +337,7 @@ pub struct MemoryAllocationModule { /// The GUID specifying the values of the firmware file system name /// that contains the HOB consumer phase component. /// - pub module_name: r_efi::base::Guid, // EFI_GUID + pub module_name: crate::BinaryGuid, /// The address of the memory-mapped firmware volume /// that contains the HOB consumer phase firmware file. @@ -538,7 +524,7 @@ pub struct ResourceDescriptor { /// This GUID is used by HOB consumer phase components to correlate device /// ownership of a resource. /// - pub owner: r_efi::base::Guid, + pub owner: crate::BinaryGuid, /// Resource type enumeration as defined by EFI_RESOURCE_TYPE. /// Identifies whether this resource is system memory, memory-mapped I/O, @@ -618,7 +604,7 @@ pub struct GuidHob { /// A GUID that defines the contents of this HOB. /// - pub name: r_efi::base::Guid, + pub name: crate::BinaryGuid, // Guid specific data goes here // } @@ -676,11 +662,11 @@ pub struct FirmwareVolume2 { /// The name of the firmware volume. /// - pub fv_name: r_efi::base::Guid, + pub fv_name: crate::BinaryGuid, /// The name of the firmware file which contained this firmware volume. /// - pub file_name: r_efi::base::Guid, + pub file_name: crate::BinaryGuid, } /// Details the location of a firmware volume including authentication information, @@ -723,12 +709,12 @@ pub struct FirmwareVolume3 { /// The name GUID of the firmware volume. /// Valid only if IsExtractedFv is TRUE. /// - pub fv_name: r_efi::base::Guid, + pub fv_name: crate::BinaryGuid, /// The name GUID of the firmware file which contained this firmware volume. /// Valid only if IsExtractedFv is TRUE. /// - pub file_name: r_efi::base::Guid, + pub file_name: crate::BinaryGuid, } /// Describes processor information, such as address space and I/O space capabilities. @@ -784,18 +770,6 @@ pub struct Capsule { pub length: u64, } -/// Represents a HOB list. -/// -/// This is a parsed Rust representation of the HOB list that provides better type safety and ergonomics but does not -/// have binary compatibility with the original PI Spec HOB list structure. -pub struct HobList<'a>(Vec>); - -impl Default for HobList<'_> { - fn default() -> Self { - HobList::new() - } -} - /// Union of all the possible HOB Types. /// #[derive(Clone, Debug)] @@ -922,494 +896,6 @@ pub unsafe fn get_pi_hob_list_size(hob_list: *const c_void) -> usize { hob_list_len } -impl<'a> HobList<'a> { - /// Instantiates a Hoblist. - pub const fn new() -> Self { - HobList(Vec::new()) - } - - /// Implements iter for Hoblist. - /// - /// # Example(s) - /// - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// for hob in the_hob_list.iter() { - /// // ... do something with the hob(s) - /// } - /// } - /// ``` - pub fn iter(&self) -> impl Iterator> { - self.0.iter() - } - - /// Returns a mutable pointer to the underlying data. - /// - /// # Example(s) - /// - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// let ptr: *mut c_void = the_hob_list.as_mut_ptr(); - /// // ... do something with the pointer - /// } - /// ``` - pub fn as_mut_ptr(&mut self) -> *mut T { - self.0.as_mut_ptr() as *mut T - } - - /// Returns the size of the Hoblist in bytes. - /// - /// # Example(s) - /// - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// let length = the_hob_list.size(); - /// println!("size_of_hobs: {:?}", length); - /// } - pub fn size(&self) -> usize { - let mut size_of_hobs = 0; - - for hob in self.iter() { - size_of_hobs += hob.size() - } - - size_of_hobs - } - - /// Implements len for Hoblist. - /// Returns the number of hobs in the list. - /// - /// # Example(s) - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// let length = the_hob_list.len(); - /// println!("length_of_hobs: {:?}", length); - /// } - /// ``` - pub fn len(&self) -> usize { - self.0.len() - } - - /// Implements is_empty for Hoblist. - /// Returns true if the list is empty. - /// - /// # Example(s) - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// let is_empty = the_hob_list.is_empty(); - /// println!("is_empty: {:?}", is_empty); - /// } - /// ``` - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Implements push for Hoblist. - /// - /// Parameters: - /// * hob: Hob<'a> - the hob to add to the list - /// - /// # Example(s) - /// ```no_run - /// use core::{ffi::c_void, mem::size_of}; - /// use patina::pi::hob::{HobList, Hob, header, FirmwareVolume, FV}; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// // example pushing a hob onto the list - /// let header = header::Hob { - /// r#type: FV, - /// length: size_of::() as u16, - /// reserved: 0, - /// }; - /// - /// let firmware_volume = FirmwareVolume { - /// header, - /// base_address: 0, - /// length: 0x0123456789abcdef, - /// }; - /// - /// let hob = Hob::FirmwareVolume(&firmware_volume); - /// the_hob_list.push(hob); - /// } - /// ``` - pub fn push(&mut self, hob: Hob<'a>) { - let cloned_hob = hob.clone(); - self.0.push(cloned_hob); - } - - /// Discovers hobs from a C style void* and adds them to a rust structure. - /// - /// # Example(s) - /// - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// } - /// ``` - pub fn discover_hobs(&mut self, hob_list: *const c_void) { - const NOT_NULL: &str = "Ptr should not be NULL"; - fn assert_hob_size(hob: &header::Hob) { - let hob_len = hob.length as usize; - let hob_size = mem::size_of::(); - assert_eq!( - hob_len, hob_size, - "Trying to cast hob of length {hob_len} into a pointer of size {hob_size}. Hob type: {:?}", - hob.r#type - ); - } - - let mut hob_header: *const header::Hob = hob_list as *const header::Hob; - - loop { - // SAFETY: hob_header points to valid HOB data provided by firmware. Each HOB has a valid header. - let current_header = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - match current_header.r#type { - HANDOFF => { - assert_hob_size::(current_header); - // SAFETY: HOB type is HANDOFF and size was validated. Cast to specific HOB type is valid. - let phit_hob = - unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::Handoff(phit_hob)); - } - MEMORY_ALLOCATION => { - if current_header.length == mem::size_of::() as u16 { - // SAFETY: HOB type is MEMORY_ALLOCATION with correct size for Module variant. - let mem_alloc_hob = - unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::MemoryAllocationModule(mem_alloc_hob)); - } else { - assert_hob_size::(current_header); - // SAFETY: HOB type is MEMORY_ALLOCATION and size was validated. - let mem_alloc_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::MemoryAllocation(mem_alloc_hob)); - } - } - RESOURCE_DESCRIPTOR => { - assert_hob_size::(current_header); - // SAFETY: HOB type is RESOURCE_DESCRIPTOR and size was validated. - let resource_desc_hob = - unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::ResourceDescriptor(resource_desc_hob)); - } - GUID_EXTENSION => { - // SAFETY: HOB type is GUID_EXTENSION. GuidHob header is valid, and data follows immediately after. - // Data length is calculated from HOB length minus header size. Pointer arithmetic is within HOB bounds. - let (guid_hob, data) = unsafe { - let hob = hob_header.cast::().as_ref().expect(NOT_NULL); - let data_ptr = hob_header.byte_add(mem::size_of::()) as *mut u8; - let data_len = hob.header.length as usize - mem::size_of::(); - (hob, slice::from_raw_parts(data_ptr, data_len)) - }; - self.0.push(Hob::GuidHob(guid_hob, data)); - } - FV => { - assert_hob_size::(current_header); - // SAFETY: HOB type is FV and size was validated. - let fv_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::FirmwareVolume(fv_hob)); - } - FV2 => { - assert_hob_size::(current_header); - // SAFETY: HOB type is FV2 and size was validated. - let fv2_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::FirmwareVolume2(fv2_hob)); - } - FV3 => { - assert_hob_size::(current_header); - // SAFETY: HOB type is FV3 and size was validated. - let fv3_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::FirmwareVolume3(fv3_hob)); - } - CPU => { - assert_hob_size::(current_header); - // SAFETY: HOB type is CPU and size was validated. - let cpu_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::Cpu(cpu_hob)); - } - UEFI_CAPSULE => { - assert_hob_size::(current_header); - // SAFETY: HOB type is UEFI_CAPSULE and size was validated. - let capsule_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::Capsule(capsule_hob)); - } - RESOURCE_DESCRIPTOR2 => { - assert_hob_size::(current_header); - // SAFETY: HOB type is RESOURCE_DESCRIPTOR2 and size was validated. - let resource_desc_hob = - unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; - self.0.push(Hob::ResourceDescriptorV2(resource_desc_hob)); - } - END_OF_HOB_LIST => { - break; - } - _ => { - self.0.push(Hob::Misc(current_header.r#type)); - } - } - let next_hob = hob_header as usize + current_header.length as usize; - hob_header = next_hob as *const header::Hob; - } - } - - /// Relocates all HOBs in the list to new memory locations. - /// - /// This function creates new instances of each HOB in the list and updates the list to point to these new instances. - /// - /// # Example(s) - /// - /// ```no_run - /// use core::ffi::c_void; - /// use patina::pi::hob::HobList; - /// - /// fn example(hob_list: *const c_void) { - /// // example discovering and adding hobs to a hob list - /// let mut the_hob_list = HobList::default(); - /// the_hob_list.discover_hobs(hob_list); - /// - /// // relocate hobs to new memory locations - /// the_hob_list.relocate_hobs(); - /// } - /// ``` - pub fn relocate_hobs(&mut self) { - for hob in self.0.iter_mut() { - match hob { - Hob::Handoff(hob) => *hob = Box::leak(Box::new(PhaseHandoffInformationTable::clone(hob))), - Hob::MemoryAllocation(hob) => *hob = Box::leak(Box::new(MemoryAllocation::clone(hob))), - Hob::MemoryAllocationModule(hob) => *hob = Box::leak(Box::new(MemoryAllocationModule::clone(hob))), - Hob::Capsule(hob) => *hob = Box::leak(Box::new(Capsule::clone(hob))), - Hob::ResourceDescriptor(hob) => *hob = Box::leak(Box::new(ResourceDescriptor::clone(hob))), - Hob::GuidHob(hob, data) => { - *hob = Box::leak(Box::new(GuidHob::clone(hob))); - *data = Box::leak(data.to_vec().into_boxed_slice()); - } - Hob::FirmwareVolume(hob) => *hob = Box::leak(Box::new(FirmwareVolume::clone(hob))), - Hob::FirmwareVolume2(hob) => *hob = Box::leak(Box::new(FirmwareVolume2::clone(hob))), - Hob::FirmwareVolume3(hob) => *hob = Box::leak(Box::new(FirmwareVolume3::clone(hob))), - Hob::Cpu(hob) => *hob = Box::leak(Box::new(Cpu::clone(hob))), - Hob::ResourceDescriptorV2(hob) => *hob = Box::leak(Box::new(ResourceDescriptorV2::clone(hob))), - Hob::Misc(_) => (), // Data is owned in Misc (nothing to move), - }; - } - } -} - -/// Implements IntoIterator for HobList. -/// -/// Defines how it will be converted to an iterator. -impl<'a> IntoIterator for HobList<'a> { - type Item = Hob<'a>; - type IntoIter = > as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - -impl<'a> IntoIterator for &'a HobList<'a> { - type Item = &'a Hob<'a>; - type IntoIter = core::slice::Iter<'a, Hob<'a>>; - - fn into_iter(self) -> Self::IntoIter { - self.0.iter() - } -} - -/// Implements Debug for Hoblist. -/// -/// Writes Hoblist debug information to stdio -/// -impl fmt::Debug for HobList<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for hob in self.0.clone().into_iter() { - match hob { - Hob::Handoff(hob) => { - write!( - f, - indoc! {" - PHASE HANDOFF INFORMATION TABLE (PHIT) HOB - HOB Length: 0x{:x} - Version: 0x{:x} - Boot Mode: {} - Memory Bottom: 0x{:x} - Memory Top: 0x{:x} - Free Memory Bottom: 0x{:x} - Free Memory Top: 0x{:x} - End of HOB List: 0x{:x}\n"}, - hob.header.length, - hob.version, - hob.boot_mode, - align_up(hob.memory_bottom, 0x1000).unwrap_or(0), - align_down(hob.memory_top, 0x1000).unwrap_or(0), - align_up(hob.free_memory_bottom, 0x1000).unwrap_or(0), - align_down(hob.free_memory_top, 0x1000).unwrap_or(0), - hob.end_of_hob_list - )?; - } - Hob::MemoryAllocation(hob) => { - write!( - f, - indoc! {" - MEMORY ALLOCATION HOB - HOB Length: 0x{:x} - Memory Base Address: 0x{:x} - Memory Length: 0x{:x} - Memory Type: {:?}\n"}, - hob.header.length, - hob.alloc_descriptor.memory_base_address, - hob.alloc_descriptor.memory_length, - hob.alloc_descriptor.memory_type - )?; - } - Hob::ResourceDescriptor(hob) => { - write!( - f, - indoc! {" - RESOURCE DESCRIPTOR HOB - HOB Length: 0x{:x} - Resource Type: 0x{:x} - Resource Attribute Type: 0x{:x} - Resource Start Address: 0x{:x} - Resource Length: 0x{:x}\n"}, - hob.header.length, - hob.resource_type, - hob.resource_attribute, - hob.physical_start, - hob.resource_length - )?; - } - Hob::GuidHob(hob, _data) => { - let (f0, f1, f2, f3, f4, &[f5, f6, f7, f8, f9, f10]) = hob.name.as_fields(); - write!( - f, - indoc! {" - GUID HOB - Type: {:#x} - Length: {:#x}, - GUID: {{{:08x}-{:04x}-{:04x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}}}\n"}, - hob.header.r#type, hob.header.length, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, - )?; - } - Hob::FirmwareVolume(hob) => { - write!( - f, - indoc! {" - FIRMWARE VOLUME (FV) HOB - HOB Length: 0x{:x} - Base Address: 0x{:x} - Length: 0x{:x}\n"}, - hob.header.length, hob.base_address, hob.length - )?; - } - Hob::FirmwareVolume2(hob) => { - write!( - f, - indoc! {" - FIRMWARE VOLUME 2 (FV2) HOB - Base Address: 0x{:x} - Length: 0x{:x}\n"}, - hob.base_address, hob.length - )?; - } - Hob::FirmwareVolume3(hob) => { - write!( - f, - indoc! {" - FIRMWARE VOLUME 3 (FV3) HOB - Base Address: 0x{:x} - Length: 0x{:x}\n"}, - hob.base_address, hob.length - )?; - } - Hob::Cpu(hob) => { - write!( - f, - indoc! {" - CPU HOB - Memory Space Size: 0x{:x} - IO Space Size: 0x{:x}\n"}, - hob.size_of_memory_space, hob.size_of_io_space - )?; - } - Hob::Capsule(hob) => { - write!( - f, - indoc! {" - CAPSULE HOB - Base Address: 0x{:x} - Length: 0x{:x}\n"}, - hob.base_address, hob.length - )?; - } - Hob::ResourceDescriptorV2(hob) => { - write!( - f, - indoc! {" - RESOURCE DESCRIPTOR 2 HOB - HOB Length: 0x{:x} - Resource Type: 0x{:x} - Resource Attribute Type: 0x{:x} - Resource Start Address: 0x{:x} - Resource Length: 0x{:x} - Attributes: 0x{:x}\n"}, - hob.v1.header.length, - hob.v1.resource_type, - hob.v1.resource_attribute, - hob.v1.physical_start, - hob.v1.resource_length, - hob.attributes - )?; - } - _ => (), - } - } - write!(f, "Parsed HOBs") - } -} - impl Hob<'_> { /// Returns the HOB header for this Hand-Off Block pub fn header(&self) -> header::Hob { @@ -1500,8 +986,8 @@ impl<'a> Iterator for HobIter<'a> { // Well-known GUID Extension HOB type definitions /// Memory Type Information GUID Extension Hob GUID. -pub const MEMORY_TYPE_INFO_HOB_GUID: r_efi::efi::Guid = - r_efi::efi::Guid::from_fields(0x4c19049f, 0x4137, 0x4dd3, 0x9c, 0x10, &[0x8b, 0x97, 0xa8, 0x3f, 0xfd, 0xfa]); +pub const MEMORY_TYPE_INFO_HOB_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("4C19049F-4137-4DD3-9C10-8B97A83FFDFA"); /// Memory Type Information GUID Extension Hob structure definition. #[derive(Debug)] @@ -1514,30 +1000,23 @@ pub struct EFiMemoryTypeInformation { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::pi::{ BootMode, hob, hob::{ - Capsule, Cpu, FirmwareVolume, Hob, HobList, HobTrait, MemoryAllocation, PhaseHandoffInformationTable, - ResourceDescriptor, get_pi_hob_list_size, + Capsule, Cpu, FirmwareVolume, MemoryAllocation, PhaseHandoffInformationTable, ResourceDescriptor, + get_pi_hob_list_size, }, }; - use core::{ - ffi::c_void, - mem::{drop, forget, size_of}, - ptr, - slice::from_raw_parts, - }; + use core::{mem::size_of, slice::from_raw_parts}; - // Expectation is someone will provide alloc - extern crate alloc; - use alloc::vec::Vec; + use std::vec::Vec; // Generate a test firmware volume hob // # Returns // A FirmwareVolume hob - fn gen_firmware_volume() -> hob::FirmwareVolume { + pub(crate) fn gen_firmware_volume() -> hob::FirmwareVolume { let header = hob::header::Hob { r#type: hob::FV, length: size_of::() as u16, reserved: 0 }; hob::FirmwareVolume { header, base_address: 0, length: 0x0123456789abcdef } @@ -1546,7 +1025,7 @@ mod tests { // Generate a test firmware volume 2 hob // # Returns // A FirmwareVolume2 hob - fn gen_firmware_volume2() -> hob::FirmwareVolume2 { + pub(crate) fn gen_firmware_volume2() -> hob::FirmwareVolume2 { let header = hob::header::Hob { r#type: hob::FV2, length: size_of::() as u16, reserved: 0 }; @@ -1554,15 +1033,15 @@ mod tests { header, base_address: 0, length: 0x0123456789abcdef, - fv_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), - file_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + fv_name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + file_name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), } } // Generate a test firmware volume 3 hob // # Returns // A FirmwareVolume3 hob - fn gen_firmware_volume3() -> hob::FirmwareVolume3 { + pub(crate) fn gen_firmware_volume3() -> hob::FirmwareVolume3 { let header = hob::header::Hob { r#type: hob::FV3, length: size_of::() as u16, reserved: 0 }; @@ -1572,15 +1051,15 @@ mod tests { length: 0x0123456789abcdef, authentication_status: 0, extracted_fv: false.into(), - fv_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), - file_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + fv_name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + file_name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), } } // Generate a test resource descriptor hob // # Returns // A ResourceDescriptor hob - fn gen_resource_descriptor() -> hob::ResourceDescriptor { + pub(crate) fn gen_resource_descriptor() -> hob::ResourceDescriptor { let header = hob::header::Hob { r#type: hob::RESOURCE_DESCRIPTOR, length: size_of::() as u16, @@ -1589,7 +1068,7 @@ mod tests { hob::ResourceDescriptor { header, - owner: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + owner: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), resource_type: hob::EFI_RESOURCE_SYSTEM_MEMORY, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT, physical_start: 0, @@ -1600,7 +1079,7 @@ mod tests { // Generate a test resource descriptor hob // # Returns // A ResourceDescriptor hob - fn gen_resource_descriptor_v2() -> hob::ResourceDescriptorV2 { + pub(crate) fn gen_resource_descriptor_v2() -> hob::ResourceDescriptorV2 { let mut v1 = gen_resource_descriptor(); v1.header.r#type = hob::RESOURCE_DESCRIPTOR2; v1.header.length = size_of::() as u16; @@ -1611,7 +1090,7 @@ mod tests { // Generate a test phase handoff information table hob // # Returns // A MemoryAllocation hob - fn gen_memory_allocation() -> hob::MemoryAllocation { + pub(crate) fn gen_memory_allocation() -> hob::MemoryAllocation { let header = hob::header::Hob { r#type: hob::MEMORY_ALLOCATION, length: size_of::() as u16, @@ -1619,7 +1098,7 @@ mod tests { }; let alloc_descriptor = hob::header::MemoryAllocation { - name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), memory_base_address: 0, memory_length: 0x0123456789abcdef, memory_type: 0, @@ -1629,7 +1108,7 @@ mod tests { hob::MemoryAllocation { header, alloc_descriptor } } - fn gen_memory_allocation_module() -> hob::MemoryAllocationModule { + pub(crate) fn gen_memory_allocation_module() -> hob::MemoryAllocationModule { let header = hob::header::Hob { r#type: hob::MEMORY_ALLOCATION, length: size_of::() as u16, @@ -1637,7 +1116,7 @@ mod tests { }; let alloc_descriptor = hob::header::MemoryAllocation { - name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), memory_base_address: 0, memory_length: 0x0123456789abcdef, memory_type: 0, @@ -1647,12 +1126,12 @@ mod tests { hob::MemoryAllocationModule { header, alloc_descriptor, - module_name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + module_name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), entry_point: 0, } } - fn gen_capsule() -> hob::Capsule { + pub(crate) fn gen_capsule() -> hob::Capsule { let header = hob::header::Hob { r#type: hob::UEFI_CAPSULE, length: size_of::() as u16, reserved: 0 }; @@ -1666,7 +1145,7 @@ mod tests { /// so `HobTrait::as_ptr()` + `size()` correctly spans the entire HOB. /// /// Use `guid_hob_refs()` to extract typed references from the returned buffer. - fn gen_guid_hob() -> Vec { + pub(crate) fn gen_guid_hob() -> Vec { let data: &[u8] = &[1_u8, 2, 3, 4, 5, 6, 7, 8]; let hob = hob::GuidHob { header: hob::header::Hob { @@ -1674,7 +1153,7 @@ mod tests { length: (size_of::() + data.len()) as u16, reserved: 0, }, - name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), }; // Build a contiguous buffer: [GuidHob struct bytes | data bytes] @@ -1691,7 +1170,7 @@ mod tests { /// # Safety /// /// The buffer must have been produced by `gen_guid_hob()` and must outlive the returned references. - fn guid_hob_refs(buf: &[u8]) -> (&hob::GuidHob, &[u8]) { + pub(crate) fn guid_hob_refs(buf: &[u8]) -> (&hob::GuidHob, &[u8]) { assert!(buf.len() >= size_of::(), "Buffer too small for GuidHob"); // SAFETY: Test code - the buffer was constructed by gen_guid_hob(), so the buffer layout matches. let guid_hob = unsafe { &*(buf.as_ptr() as *const hob::GuidHob) }; @@ -1699,7 +1178,7 @@ mod tests { (guid_hob, data) } - fn gen_phase_handoff_information_table() -> hob::PhaseHandoffInformationTable { + pub(crate) fn gen_phase_handoff_information_table() -> hob::PhaseHandoffInformationTable { let header = hob::header::Hob { r#type: hob::HANDOFF, length: size_of::() as u16, @@ -1721,7 +1200,7 @@ mod tests { // Generate a test end of hoblist hob // # Returns // A PhaseHandoffInformationTable hob - fn gen_end_of_hoblist() -> hob::PhaseHandoffInformationTable { + pub(crate) fn gen_end_of_hoblist() -> hob::PhaseHandoffInformationTable { let header = hob::header::Hob { r#type: hob::END_OF_HOB_LIST, length: size_of::() as u16, @@ -1740,505 +1219,12 @@ mod tests { } } - fn gen_cpu() -> hob::Cpu { + pub(crate) fn gen_cpu() -> hob::Cpu { let header = hob::header::Hob { r#type: hob::CPU, length: size_of::() as u16, reserved: 0 }; hob::Cpu { header, size_of_memory_space: 0, size_of_io_space: 0, reserved: [0; 6] } } - // Converts the Hoblist to a C array. - // # Arguments - // * `hob_list` - A reference to the HobList. - // - // # Returns - // A tuple containing a pointer to the C array and the length of the C array. - pub fn to_c_array(hob_list: &hob::HobList) -> (*const c_void, usize) { - let size = hob_list.size(); - let mut c_array: Vec = Vec::with_capacity(size); - - for hob in hob_list.iter() { - // SAFETY: Test code - creating a slice from HOB pointer for serialization. - // All HOB variants must have contiguous backing memory where as_ptr() points to - // the start and size() covers the remainder. - let slice = unsafe { from_raw_parts(hob.as_ptr(), hob.size()) }; - c_array.extend_from_slice(slice); - } - - let void_ptr = c_array.as_ptr() as *const c_void; - - // in order to not call the destructor on the Vec at the end of this function, we need to forget it - forget(c_array); - - (void_ptr, size) - } - - // Implements a function to manually free a C array. - // - // # Arguments - // * `c_array_ptr` - A pointer to the C array. - // * `len` - The length of the C array. - // - // # Safety - // - // The caller must ensure that the pointer and length match a Vec originally created by to_c_array. - pub fn manually_free_c_array(c_array_ptr: *const c_void, len: usize) { - let ptr = c_array_ptr as *mut u8; - // SAFETY: Caller is responsible for ensuring the pointer and length are valid per the function contract. - unsafe { - drop(Vec::from_raw_parts(ptr, len, len)); - } - } - - #[test] - fn test_hoblist_empty() { - let hoblist = HobList::new(); - assert_eq!(hoblist.len(), 0); - assert!(hoblist.is_empty()); - } - - #[test] - fn test_hoblist_push() { - let mut hoblist = HobList::new(); - let resource = gen_resource_descriptor(); - hoblist.push(Hob::ResourceDescriptor(&resource)); - assert_eq!(hoblist.len(), 1); - - let firmware_volume = gen_firmware_volume(); - hoblist.push(Hob::FirmwareVolume(&firmware_volume)); - - assert_eq!(hoblist.len(), 2); - - let resource_v2 = gen_resource_descriptor_v2(); - hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); - - assert_eq!(hoblist.len(), 3); - } - - #[test] - fn test_hoblist_iterate() { - let mut hoblist = HobList::default(); - let resource = gen_resource_descriptor(); - let firmware_volume = gen_firmware_volume(); - let firmware_volume2 = gen_firmware_volume2(); - let firmware_volume3 = gen_firmware_volume3(); - let end_of_hob_list = gen_end_of_hoblist(); - let capsule = gen_capsule(); - let guid_hob_buf = gen_guid_hob(); - let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); - let memory_allocation = gen_memory_allocation(); - let memory_allocation_module = gen_memory_allocation_module(); - - hoblist.push(Hob::ResourceDescriptor(&resource)); - hoblist.push(Hob::FirmwareVolume(&firmware_volume)); - hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); - hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); - hoblist.push(Hob::Capsule(&capsule)); - hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); - hoblist.push(Hob::MemoryAllocation(&memory_allocation)); - hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); - hoblist.push(Hob::Handoff(&end_of_hob_list)); - - let mut count = 0; - hoblist.iter().for_each(|hob| { - match hob { - Hob::ResourceDescriptor(resource) => { - assert_eq!(resource.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); - } - Hob::MemoryAllocation(memory_allocation) => { - assert_eq!(memory_allocation.alloc_descriptor.memory_length, 0x0123456789abcdef); - } - Hob::MemoryAllocationModule(memory_allocation_module) => { - assert_eq!(memory_allocation_module.alloc_descriptor.memory_length, 0x0123456789abcdef); - } - Hob::Capsule(capsule) => { - assert_eq!(capsule.base_address, 0); - } - Hob::GuidHob(guid_hob, data) => { - assert_eq!(guid_hob.name, r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11])); - assert_eq!(*data, &[1_u8, 2, 3, 4, 5, 6, 7, 8]); - } - Hob::FirmwareVolume(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::FirmwareVolume2(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::FirmwareVolume3(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::Handoff(handoff) => { - assert_eq!(handoff.memory_top, 0xdeadbeef); - } - _ => { - panic!("Unexpected hob type"); - } - } - count += 1; - }); - assert_eq!(count, 9); - } - - #[test] - fn test_hoblist_discover() { - // generate some test hobs - let resource = gen_resource_descriptor(); - let handoff = gen_phase_handoff_information_table(); - let firmware_volume = gen_firmware_volume(); - let firmware_volume2 = gen_firmware_volume2(); - let firmware_volume3 = gen_firmware_volume3(); - let capsule = gen_capsule(); - let guid_hob_buf = gen_guid_hob(); - let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); - let memory_allocation = gen_memory_allocation(); - let memory_allocation_module = gen_memory_allocation_module(); - let cpu = gen_cpu(); - let resource_v2 = gen_resource_descriptor_v2(); - let end_of_hob_list = gen_end_of_hoblist(); - - // create a new hoblist - let mut hoblist = HobList::new(); - - // Push the resource descriptor to the hoblist - hoblist.push(Hob::ResourceDescriptor(&resource)); - hoblist.push(Hob::Handoff(&handoff)); - hoblist.push(Hob::FirmwareVolume(&firmware_volume)); - hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); - hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); - hoblist.push(Hob::Capsule(&capsule)); - hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); - hoblist.push(Hob::MemoryAllocation(&memory_allocation)); - hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); - hoblist.push(Hob::Cpu(&cpu)); - hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); - hoblist.push(Hob::Handoff(&end_of_hob_list)); - - // assert that the hoblist has 3 hobs and they are of the correct type - - let mut count = 0; - hoblist.iter().for_each(|hob| { - match hob { - Hob::ResourceDescriptor(resource) => { - assert_eq!(resource.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); - } - Hob::MemoryAllocation(memory_allocation) => { - assert_eq!(memory_allocation.alloc_descriptor.memory_length, 0x0123456789abcdef); - } - Hob::MemoryAllocationModule(memory_allocation_module) => { - assert_eq!(memory_allocation_module.alloc_descriptor.memory_length, 0x0123456789abcdef); - } - Hob::Capsule(capsule) => { - assert_eq!(capsule.base_address, 0); - } - Hob::GuidHob(guid_hob, data) => { - assert_eq!(guid_hob.name, r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11])); - assert_eq!(&data[..], guid_hob_data); - } - Hob::FirmwareVolume(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::FirmwareVolume2(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::FirmwareVolume3(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::Handoff(handoff) => { - assert_eq!(handoff.memory_top, 0xdeadbeef); - } - Hob::Cpu(cpu) => { - assert_eq!(cpu.size_of_memory_space, 0); - } - Hob::ResourceDescriptorV2(resource) => { - assert_eq!(resource.v1.header.r#type, hob::RESOURCE_DESCRIPTOR2); - assert_eq!(resource.v1.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); - } - _ => { - panic!("Unexpected hob type"); - } - } - count += 1; - }); - - assert_eq!(count, 12); - - // c_hoblist is a pointer to the hoblist - we need to manually free it later - let (c_array_hoblist, length) = to_c_array(&hoblist); - - // create a new hoblist - let mut cloned_hoblist = HobList::new(); - cloned_hoblist.discover_hobs(c_array_hoblist); - - // assert that the hoblist has 2 hobs and they are of the correct type - // we don't need to check the end of hoblist hob as it will not be 'discovered' - // by the discover_hobs function and simply end the iteration - count = 0; - hoblist.into_iter().for_each(|hob| { - match hob { - Hob::ResourceDescriptor(resource) => { - assert_eq!(resource.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); - } - Hob::MemoryAllocation(memory_allocation) => { - assert_eq!(memory_allocation.alloc_descriptor.memory_length, 0x0123456789abcdef); - } - Hob::MemoryAllocationModule(memory_allocation_module) => { - assert_eq!(memory_allocation_module.alloc_descriptor.memory_length, 0x0123456789abcdef); - } - Hob::Capsule(capsule) => { - assert_eq!(capsule.base_address, 0); - } - Hob::GuidHob(guid_hob, data) => { - assert_eq!(guid_hob.name, r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11])); - assert_eq!(data, &[1_u8, 2, 3, 4, 5, 6, 7, 8]); - } - Hob::FirmwareVolume(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::FirmwareVolume2(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::FirmwareVolume3(firmware_volume) => { - assert_eq!(firmware_volume.length, 0x0123456789abcdef); - } - Hob::Handoff(handoff) => { - assert_eq!(handoff.memory_top, 0xdeadbeef); - } - Hob::ResourceDescriptorV2(resource) => { - assert_eq!(resource.v1.header.r#type, hob::RESOURCE_DESCRIPTOR2); - assert_eq!(resource.v1.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); - } - Hob::Cpu(cpu) => { - assert_eq!(cpu.size_of_memory_space, 0); - } - _ => { - panic!("Unexpected hob type"); - } - } - count += 1; - }); - - assert_eq!(count, 12); - - // free the c array - manually_free_c_array(c_array_hoblist, length); - } - - #[test] - fn test_hob_iterator() { - // generate some test hobs - let resource = gen_resource_descriptor(); - let handoff = gen_phase_handoff_information_table(); - let firmware_volume = gen_firmware_volume(); - let firmware_volume2 = gen_firmware_volume2(); - let firmware_volume3 = gen_firmware_volume3(); - let capsule = gen_capsule(); - let guid_hob_buf = gen_guid_hob(); - let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); - let memory_allocation = gen_memory_allocation(); - let memory_allocation_module = gen_memory_allocation_module(); - let cpu = gen_cpu(); - let end_of_hob_list = gen_end_of_hoblist(); - - // create a new hoblist - let mut hoblist = HobList::new(); - - // Push the resource descriptor to the hoblist - hoblist.push(Hob::ResourceDescriptor(&resource)); - hoblist.push(Hob::Handoff(&handoff)); - hoblist.push(Hob::FirmwareVolume(&firmware_volume)); - hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); - hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); - hoblist.push(Hob::Capsule(&capsule)); - hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); - hoblist.push(Hob::MemoryAllocation(&memory_allocation)); - hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); - hoblist.push(Hob::Cpu(&cpu)); - hoblist.push(Hob::Handoff(&end_of_hob_list)); - - let (c_array_hoblist, length) = to_c_array(&hoblist); - - // SAFETY: Test code - creating a reference from C array pointer for HOB testing. - let hob = Hob::ResourceDescriptor(unsafe { - (c_array_hoblist as *const hob::ResourceDescriptor).as_ref::<'static>().unwrap() - }); - for h in &hob { - println!("{:?}", h.header()); - } - - manually_free_c_array(c_array_hoblist, length); - } - - #[test] - fn test_hob_iterator2() { - let resource = gen_resource_descriptor(); - let handoff = gen_phase_handoff_information_table(); - let firmware_volume = gen_firmware_volume(); - let firmware_volume2 = gen_firmware_volume2(); - let firmware_volume3 = gen_firmware_volume3(); - let capsule = gen_capsule(); - let guid_hob_buf = gen_guid_hob(); - let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); - let memory_allocation = gen_memory_allocation(); - let memory_allocation_module = gen_memory_allocation_module(); - let cpu = gen_cpu(); - let resource_v2 = gen_resource_descriptor_v2(); - let end_of_hob_list = gen_end_of_hoblist(); - - // create a new hoblist - let mut hoblist = HobList::new(); - - // Push the resource descriptor to the hoblist - hoblist.push(Hob::ResourceDescriptor(&resource)); - hoblist.push(Hob::Handoff(&handoff)); - hoblist.push(Hob::FirmwareVolume(&firmware_volume)); - hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); - hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); - hoblist.push(Hob::Capsule(&capsule)); - hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); - hoblist.push(Hob::MemoryAllocation(&memory_allocation)); - hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); - hoblist.push(Hob::Cpu(&cpu)); - hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); - hoblist.push(Hob::Handoff(&end_of_hob_list)); - - // Make sure we can iterate over a reference to a HobList without - // consuming it. - for hob in &hoblist { - println!("{:?}", hob.header()); - } - - for hob in hoblist { - println!("{:?}", hob.header()); - } - } - - #[test] - fn test_relocate_hobs() { - // generate some test hobs - let resource = gen_resource_descriptor(); - let handoff = gen_phase_handoff_information_table(); - let firmware_volume = gen_firmware_volume(); - let firmware_volume2 = gen_firmware_volume2(); - let firmware_volume3 = gen_firmware_volume3(); - let capsule = gen_capsule(); - let guid_hob_buf = gen_guid_hob(); - let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); - let memory_allocation = gen_memory_allocation(); - let memory_allocation_module = gen_memory_allocation_module(); - let cpu = gen_cpu(); - let resource_v2 = gen_resource_descriptor_v2(); - let end_of_hob_list = gen_end_of_hoblist(); - - // create a new hoblist - let mut hoblist = HobList::new(); - - // Push the resource descriptor to the hoblist - hoblist.push(Hob::ResourceDescriptor(&resource)); - hoblist.push(Hob::Handoff(&handoff)); - hoblist.push(Hob::FirmwareVolume(&firmware_volume)); - hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); - hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); - hoblist.push(Hob::Capsule(&capsule)); - hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); - hoblist.push(Hob::MemoryAllocation(&memory_allocation)); - hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); - hoblist.push(Hob::Cpu(&cpu)); - hoblist.push(Hob::Misc(12345)); - hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); - hoblist.push(Hob::Handoff(&end_of_hob_list)); - - let hoblist_address = hoblist.as_mut_ptr::<()>() as usize; - let hoblist_len = hoblist.len(); - hoblist.relocate_hobs(); - assert_eq!( - hoblist_address, - hoblist.as_mut_ptr::<()>() as usize, - "Only hobs need to be relocated, not the vector." - ); - assert_eq!(hoblist_len, hoblist.len()); - - for (i, hob) in hoblist.into_iter().enumerate() { - match hob { - Hob::ResourceDescriptor(hob) if i == 0 => { - assert_ne!(ptr::addr_of!(resource), hob); - assert_eq!(resource, *hob); - } - Hob::Handoff(hob) if i == 1 => { - assert_ne!(ptr::addr_of!(handoff), hob); - assert_eq!(handoff, *hob); - } - Hob::FirmwareVolume(hob) if i == 2 => { - assert_ne!(ptr::addr_of!(firmware_volume), hob); - assert_eq!(firmware_volume, *hob); - } - Hob::FirmwareVolume2(hob) if i == 3 => { - assert_ne!(ptr::addr_of!(firmware_volume2), hob); - assert_eq!(firmware_volume2, *hob); - } - Hob::FirmwareVolume3(hob) if i == 4 => { - assert_ne!(ptr::addr_of!(firmware_volume3), hob); - assert_eq!(firmware_volume3, *hob); - } - Hob::Capsule(hob) if i == 5 => { - assert_ne!(ptr::addr_of!(capsule), hob); - assert_eq!(capsule, *hob); - } - Hob::GuidHob(hob, hob_data) if i == 6 => { - assert_ne!(ptr::from_ref(guid_hob), ptr::from_ref(hob)); - assert_ne!(guid_hob_data.as_ptr(), hob_data.as_ptr()); - assert_eq!(guid_hob.header, hob.header); - assert_eq!(guid_hob.name, hob.name); - assert_eq!(guid_hob_data, hob_data); - } - Hob::MemoryAllocation(hob) if i == 7 => { - assert_ne!(ptr::addr_of!(memory_allocation), hob); - assert_eq!(memory_allocation.header, hob.header); - assert_eq!(memory_allocation.alloc_descriptor, hob.alloc_descriptor); - } - Hob::MemoryAllocationModule(hob) if i == 8 => { - assert_ne!(ptr::addr_of!(memory_allocation_module), hob); - assert_eq!(memory_allocation_module, *hob); - } - Hob::Cpu(hob) if i == 9 => { - assert_ne!(ptr::addr_of!(cpu), hob); - assert_eq!(cpu, *hob); - } - Hob::Misc(hob) if i == 10 => { - assert_eq!(12345, hob); - } - Hob::ResourceDescriptorV2(hob) if i == 11 => { - assert_ne!(ptr::addr_of!(resource_v2), hob); - assert_eq!(resource_v2, *hob); - } - Hob::Handoff(hob) if i == 12 => { - assert_ne!(ptr::addr_of!(end_of_hob_list), hob); - assert_eq!(end_of_hob_list, *hob); - } - _ => panic!("Hob at index: {i}."), - } - } - } - - #[test] - fn test_hoblist_debug_display() { - use alloc::format; - - let mut hoblist = HobList::new(); - let handoff = gen_phase_handoff_information_table(); - hoblist.push(Hob::Handoff(&handoff)); - - let debug_output = format!("{:?}", hoblist); - - assert!(debug_output.contains("PHASE HANDOFF INFORMATION TABLE")); - assert!(debug_output.contains("HOB Length:")); - assert!(debug_output.contains("Version:")); - assert!(debug_output.contains("Boot Mode:")); - assert!(debug_output.contains("Memory Bottom:")); - assert!(debug_output.contains("Memory Top:")); - assert!(debug_output.contains("Free Memory Bottom:")); - assert!(debug_output.contains("Free Memory Top:")); - assert!(debug_output.contains("End of HOB List:")); - } - #[test] fn test_get_pi_hob_list_size_single_hob() { use core::ffi::c_void; diff --git a/sdk/patina/src/pi/hob/hob_list.rs b/sdk/patina/src/pi/hob/hob_list.rs new file mode 100644 index 000000000..36b0a0849 --- /dev/null +++ b/sdk/patina/src/pi/hob/hob_list.rs @@ -0,0 +1,1163 @@ +//! Hand-Off Block List (HOB) +//! +//! The HOB list is a contiguous list of HOB structures, each with a common header +//! followed by type-specific data. Typically, the PEI Foundation creates and manages +//! the HOB list during the PEI phase, and it is passed to the DXE Foundation +//! during the PEI-to-DXE handoff. +//! +//! Based on the UEFI Platform Initialization Specification Volume III. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use crate::pi::hob::{ + CPU, Capsule, Cpu, END_OF_HOB_LIST, FV, FV2, FV3, FirmwareVolume, FirmwareVolume2, FirmwareVolume3, GUID_EXTENSION, + GuidHob, HANDOFF, Hob, HobTrait, MEMORY_ALLOCATION, MemoryAllocation, MemoryAllocationModule, + PhaseHandoffInformationTable, RESOURCE_DESCRIPTOR, RESOURCE_DESCRIPTOR2, ResourceDescriptor, ResourceDescriptorV2, + UEFI_CAPSULE, header, +}; +use core::{ffi::c_void, mem, slice}; + +use indoc::indoc; + +use crate::base::{align_down, align_up}; +use core::fmt; + +// Expectation is someone will provide alloc +use alloc::{boxed::Box, vec::Vec}; + +/// Represents a HOB list. +/// +/// This is a parsed Rust representation of the HOB list that provides better type safety and ergonomics but does not +/// have binary compatibility with the original PI Spec HOB list structure. +pub struct HobList<'a>(Vec>); + +impl Default for HobList<'_> { + fn default() -> Self { + HobList::new() + } +} + +impl<'a> HobList<'a> { + /// Instantiates a Hoblist. + pub const fn new() -> Self { + HobList(Vec::new()) + } + + /// Implements iter for Hoblist. + /// + /// # Example(s) + /// + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// for hob in the_hob_list.iter() { + /// // ... do something with the hob(s) + /// } + /// } + /// ``` + pub fn iter(&self) -> impl Iterator> { + self.0.iter() + } + + /// Returns a mutable pointer to the underlying data. + /// + /// # Example(s) + /// + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// let ptr: *mut c_void = the_hob_list.as_mut_ptr(); + /// // ... do something with the pointer + /// } + /// ``` + pub fn as_mut_ptr(&mut self) -> *mut T { + self.0.as_mut_ptr() as *mut T + } + + /// Returns the size of the Hoblist in bytes. + /// + /// # Example(s) + /// + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// let length = the_hob_list.size(); + /// println!("size_of_hobs: {:?}", length); + /// } + pub fn size(&self) -> usize { + let mut size_of_hobs = 0; + + for hob in self.iter() { + size_of_hobs += hob.size() + } + + size_of_hobs + } + + /// Implements len for Hoblist. + /// Returns the number of hobs in the list. + /// + /// # Example(s) + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// let length = the_hob_list.len(); + /// println!("length_of_hobs: {:?}", length); + /// } + /// ``` + pub fn len(&self) -> usize { + self.0.len() + } + + /// Implements is_empty for Hoblist. + /// Returns true if the list is empty. + /// + /// # Example(s) + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// let is_empty = the_hob_list.is_empty(); + /// println!("is_empty: {:?}", is_empty); + /// } + /// ``` + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Implements push for Hoblist. + /// + /// Parameters: + /// * hob: Hob<'a> - the hob to add to the list + /// + /// # Example(s) + /// ```no_run + /// use core::{ffi::c_void, mem::size_of}; + /// use patina::pi::hob::{HobList, Hob, header, FirmwareVolume, FV}; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// // example pushing a hob onto the list + /// let header = header::Hob { + /// r#type: FV, + /// length: size_of::() as u16, + /// reserved: 0, + /// }; + /// + /// let firmware_volume = FirmwareVolume { + /// header, + /// base_address: 0, + /// length: 0x0123456789abcdef, + /// }; + /// + /// let hob = Hob::FirmwareVolume(&firmware_volume); + /// the_hob_list.push(hob); + /// } + /// ``` + pub fn push(&mut self, hob: Hob<'a>) { + let cloned_hob = hob.clone(); + self.0.push(cloned_hob); + } + + /// Discovers hobs from a C style void* and adds them to a rust structure. + /// + /// # Example(s) + /// + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// } + /// ``` + pub fn discover_hobs(&mut self, hob_list: *const c_void) { + const NOT_NULL: &str = "Ptr should not be NULL"; + fn assert_hob_size(hob: &header::Hob) { + let hob_len = hob.length as usize; + let hob_size = mem::size_of::(); + assert_eq!( + hob_len, hob_size, + "Trying to cast hob of length {hob_len} into a pointer of size {hob_size}. Hob type: {:?}", + hob.r#type + ); + } + + let mut hob_header: *const header::Hob = hob_list as *const header::Hob; + + loop { + // SAFETY: hob_header points to valid HOB data provided by firmware. Each HOB has a valid header. + let current_header = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + match current_header.r#type { + HANDOFF => { + assert_hob_size::(current_header); + // SAFETY: HOB type is HANDOFF and size was validated. Cast to specific HOB type is valid. + let phit_hob = + unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::Handoff(phit_hob)); + } + MEMORY_ALLOCATION => { + if current_header.length == mem::size_of::() as u16 { + // SAFETY: HOB type is MEMORY_ALLOCATION with correct size for Module variant. + let mem_alloc_hob = + unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::MemoryAllocationModule(mem_alloc_hob)); + } else { + assert_hob_size::(current_header); + // SAFETY: HOB type is MEMORY_ALLOCATION and size was validated. + let mem_alloc_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::MemoryAllocation(mem_alloc_hob)); + } + } + RESOURCE_DESCRIPTOR => { + assert_hob_size::(current_header); + // SAFETY: HOB type is RESOURCE_DESCRIPTOR and size was validated. + let resource_desc_hob = + unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::ResourceDescriptor(resource_desc_hob)); + } + GUID_EXTENSION => { + // SAFETY: HOB type is GUID_EXTENSION. GuidHob header is valid, and data follows immediately after. + // Data length is calculated from HOB length minus header size. Pointer arithmetic is within HOB bounds. + let (guid_hob, data) = unsafe { + let hob = hob_header.cast::().as_ref().expect(NOT_NULL); + let data_ptr = hob_header.byte_add(mem::size_of::()) as *mut u8; + let data_len = hob.header.length as usize - mem::size_of::(); + (hob, slice::from_raw_parts(data_ptr, data_len)) + }; + self.0.push(Hob::GuidHob(guid_hob, data)); + } + FV => { + assert_hob_size::(current_header); + // SAFETY: HOB type is FV and size was validated. + let fv_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::FirmwareVolume(fv_hob)); + } + FV2 => { + assert_hob_size::(current_header); + // SAFETY: HOB type is FV2 and size was validated. + let fv2_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::FirmwareVolume2(fv2_hob)); + } + FV3 => { + assert_hob_size::(current_header); + // SAFETY: HOB type is FV3 and size was validated. + let fv3_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::FirmwareVolume3(fv3_hob)); + } + CPU => { + assert_hob_size::(current_header); + // SAFETY: HOB type is CPU and size was validated. + let cpu_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::Cpu(cpu_hob)); + } + UEFI_CAPSULE => { + assert_hob_size::(current_header); + // SAFETY: HOB type is UEFI_CAPSULE and size was validated. + let capsule_hob = unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::Capsule(capsule_hob)); + } + RESOURCE_DESCRIPTOR2 => { + assert_hob_size::(current_header); + // SAFETY: HOB type is RESOURCE_DESCRIPTOR2 and size was validated. + let resource_desc_hob = + unsafe { hob_header.cast::().as_ref().expect(NOT_NULL) }; + self.0.push(Hob::ResourceDescriptorV2(resource_desc_hob)); + } + END_OF_HOB_LIST => { + break; + } + _ => { + self.0.push(Hob::Misc(current_header.r#type)); + } + } + let next_hob = hob_header as usize + current_header.length as usize; + hob_header = next_hob as *const header::Hob; + } + } + + /// Relocates all HOBs in the list to new memory locations. + /// + /// This function creates new instances of each HOB in the list and updates the list to point to these new instances. + /// + /// # Example(s) + /// + /// ```no_run + /// use core::ffi::c_void; + /// use patina::pi::hob::HobList; + /// + /// fn example(hob_list: *const c_void) { + /// // example discovering and adding hobs to a hob list + /// let mut the_hob_list = HobList::default(); + /// the_hob_list.discover_hobs(hob_list); + /// + /// // relocate hobs to new memory locations + /// the_hob_list.relocate_hobs(); + /// } + /// ``` + pub fn relocate_hobs(&mut self) { + for hob in self.0.iter_mut() { + match hob { + Hob::Handoff(hob) => *hob = Box::leak(Box::new(PhaseHandoffInformationTable::clone(hob))), + Hob::MemoryAllocation(hob) => *hob = Box::leak(Box::new(MemoryAllocation::clone(hob))), + Hob::MemoryAllocationModule(hob) => *hob = Box::leak(Box::new(MemoryAllocationModule::clone(hob))), + Hob::Capsule(hob) => *hob = Box::leak(Box::new(Capsule::clone(hob))), + Hob::ResourceDescriptor(hob) => *hob = Box::leak(Box::new(ResourceDescriptor::clone(hob))), + Hob::GuidHob(hob, data) => { + *hob = Box::leak(Box::new(GuidHob::clone(hob))); + *data = Box::leak(data.to_vec().into_boxed_slice()); + } + Hob::FirmwareVolume(hob) => *hob = Box::leak(Box::new(FirmwareVolume::clone(hob))), + Hob::FirmwareVolume2(hob) => *hob = Box::leak(Box::new(FirmwareVolume2::clone(hob))), + Hob::FirmwareVolume3(hob) => *hob = Box::leak(Box::new(FirmwareVolume3::clone(hob))), + Hob::Cpu(hob) => *hob = Box::leak(Box::new(Cpu::clone(hob))), + Hob::ResourceDescriptorV2(hob) => *hob = Box::leak(Box::new(ResourceDescriptorV2::clone(hob))), + Hob::Misc(_) => (), // Data is owned in Misc (nothing to move), + }; + } + } +} + +/// Implements IntoIterator for HobList. +/// +/// Defines how it will be converted to an iterator. +impl<'a> IntoIterator for HobList<'a> { + type Item = Hob<'a>; + type IntoIter = > as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a> IntoIterator for &'a HobList<'a> { + type Item = &'a Hob<'a>; + type IntoIter = core::slice::Iter<'a, Hob<'a>>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +/// Implements Debug for Hoblist. +/// +/// Writes Hoblist debug information to stdio +/// +impl fmt::Debug for HobList<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for hob in self.0.clone().into_iter() { + match hob { + Hob::Handoff(hob) => { + write!( + f, + indoc! {" + PHASE HANDOFF INFORMATION TABLE (PHIT) HOB + HOB Length: 0x{:x} + Version: 0x{:x} + Boot Mode: {} + Memory Bottom: 0x{:x} + Memory Top: 0x{:x} + Free Memory Bottom: 0x{:x} + Free Memory Top: 0x{:x} + End of HOB List: 0x{:x}\n"}, + hob.header.length, + hob.version, + hob.boot_mode, + align_up(hob.memory_bottom, 0x1000).unwrap_or(0), + align_down(hob.memory_top, 0x1000).unwrap_or(0), + align_up(hob.free_memory_bottom, 0x1000).unwrap_or(0), + align_down(hob.free_memory_top, 0x1000).unwrap_or(0), + hob.end_of_hob_list + )?; + } + Hob::MemoryAllocation(hob) => { + write!( + f, + indoc! {" + MEMORY ALLOCATION HOB + HOB Length: 0x{:x} + Memory Base Address: 0x{:x} + Memory Length: 0x{:x} + Memory Type: {:?}\n"}, + hob.header.length, + hob.alloc_descriptor.memory_base_address, + hob.alloc_descriptor.memory_length, + hob.alloc_descriptor.memory_type + )?; + } + Hob::ResourceDescriptor(hob) => { + write!( + f, + indoc! {" + RESOURCE DESCRIPTOR HOB + HOB Length: 0x{:x} + Resource Type: 0x{:x} + Resource Attribute Type: 0x{:x} + Resource Start Address: 0x{:x} + Resource Length: 0x{:x}\n"}, + hob.header.length, + hob.resource_type, + hob.resource_attribute, + hob.physical_start, + hob.resource_length + )?; + } + Hob::GuidHob(hob, _data) => { + let (f0, f1, f2, f3, f4, &[f5, f6, f7, f8, f9, f10]) = hob.name.as_fields(); + write!( + f, + indoc! {" + GUID HOB + Type: {:#x} + Length: {:#x}, + GUID: {{{:08x}-{:04x}-{:04x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}}}\n"}, + hob.header.r#type, hob.header.length, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, + )?; + } + Hob::FirmwareVolume(hob) => { + write!( + f, + indoc! {" + FIRMWARE VOLUME (FV) HOB + HOB Length: 0x{:x} + Base Address: 0x{:x} + Length: 0x{:x}\n"}, + hob.header.length, hob.base_address, hob.length + )?; + } + Hob::FirmwareVolume2(hob) => { + write!( + f, + indoc! {" + FIRMWARE VOLUME 2 (FV2) HOB + Base Address: 0x{:x} + Length: 0x{:x}\n"}, + hob.base_address, hob.length + )?; + } + Hob::FirmwareVolume3(hob) => { + write!( + f, + indoc! {" + FIRMWARE VOLUME 3 (FV3) HOB + Base Address: 0x{:x} + Length: 0x{:x}\n"}, + hob.base_address, hob.length + )?; + } + Hob::Cpu(hob) => { + write!( + f, + indoc! {" + CPU HOB + Memory Space Size: 0x{:x} + IO Space Size: 0x{:x}\n"}, + hob.size_of_memory_space, hob.size_of_io_space + )?; + } + Hob::Capsule(hob) => { + write!( + f, + indoc! {" + CAPSULE HOB + Base Address: 0x{:x} + Length: 0x{:x}\n"}, + hob.base_address, hob.length + )?; + } + Hob::ResourceDescriptorV2(hob) => { + write!( + f, + indoc! {" + RESOURCE DESCRIPTOR 2 HOB + HOB Length: 0x{:x} + Resource Type: 0x{:x} + Resource Attribute Type: 0x{:x} + Resource Start Address: 0x{:x} + Resource Length: 0x{:x} + Attributes: 0x{:x}\n"}, + hob.v1.header.length, + hob.v1.resource_type, + hob.v1.resource_attribute, + hob.v1.physical_start, + hob.v1.resource_length, + hob.attributes + )?; + } + _ => (), + } + } + write!(f, "Parsed HOBs") + } +} + +#[cfg(test)] +mod tests { + use crate::pi::{ + hob, + hob::{ + Capsule, Cpu, FirmwareVolume, Hob, HobTrait, MemoryAllocation, PhaseHandoffInformationTable, + ResourceDescriptor, get_pi_hob_list_size, + hob_list::HobList, + tests::{ + gen_capsule, gen_cpu, gen_end_of_hoblist, gen_firmware_volume, gen_firmware_volume2, + gen_firmware_volume3, gen_guid_hob, gen_memory_allocation, gen_memory_allocation_module, + gen_phase_handoff_information_table, gen_resource_descriptor, gen_resource_descriptor_v2, + guid_hob_refs, + }, + }, + }; + + use core::{ + ffi::c_void, + mem::{drop, forget, size_of}, + ptr, + slice::from_raw_parts, + }; + + use std::vec::Vec; + + // Converts the Hoblist to a C array. + // # Arguments + // * `hob_list` - A reference to the HobList. + // + // # Returns + // A tuple containing a pointer to the C array and the length of the C array. + pub fn to_c_array(hob_list: &HobList) -> (*const c_void, usize) { + let size = hob_list.size(); + let mut c_array: Vec = Vec::with_capacity(size); + + for hob in hob_list.iter() { + // SAFETY: Test code - creating a slice from HOB pointer for serialization. + // All HOB variants must have contiguous backing memory where as_ptr() points to + // the start and size() covers the remainder. + let slice = unsafe { from_raw_parts(hob.as_ptr(), hob.size()) }; + c_array.extend_from_slice(slice); + } + + let void_ptr = c_array.as_ptr() as *const c_void; + + // in order to not call the destructor on the Vec at the end of this function, we need to forget it + forget(c_array); + + (void_ptr, size) + } + + // Implements a function to manually free a C array. + // + // # Arguments + // * `c_array_ptr` - A pointer to the C array. + // * `len` - The length of the C array. + // + // # Safety + // + // The caller must ensure that the pointer and length match a Vec originally created by to_c_array. + pub fn manually_free_c_array(c_array_ptr: *const c_void, len: usize) { + let ptr = c_array_ptr as *mut u8; + // SAFETY: Caller is responsible for ensuring the pointer and length are valid per the function contract. + unsafe { + drop(Vec::from_raw_parts(ptr, len, len)); + } + } + + #[test] + fn test_hoblist_empty() { + let hoblist = HobList::new(); + assert_eq!(hoblist.len(), 0); + assert!(hoblist.is_empty()); + } + + #[test] + fn test_hoblist_push() { + let mut hoblist = HobList::new(); + let resource = gen_resource_descriptor(); + hoblist.push(Hob::ResourceDescriptor(&resource)); + assert_eq!(hoblist.len(), 1); + + let firmware_volume = gen_firmware_volume(); + hoblist.push(Hob::FirmwareVolume(&firmware_volume)); + + assert_eq!(hoblist.len(), 2); + + let resource_v2 = gen_resource_descriptor_v2(); + hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); + + assert_eq!(hoblist.len(), 3); + } + + #[test] + fn test_hoblist_iterate() { + let mut hoblist = HobList::default(); + let resource = gen_resource_descriptor(); + let firmware_volume = gen_firmware_volume(); + let firmware_volume2 = gen_firmware_volume2(); + let firmware_volume3 = gen_firmware_volume3(); + let end_of_hob_list = gen_end_of_hoblist(); + let capsule = gen_capsule(); + let guid_hob_buf = gen_guid_hob(); + let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); + let memory_allocation = gen_memory_allocation(); + let memory_allocation_module = gen_memory_allocation_module(); + + hoblist.push(Hob::ResourceDescriptor(&resource)); + hoblist.push(Hob::FirmwareVolume(&firmware_volume)); + hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); + hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); + hoblist.push(Hob::Capsule(&capsule)); + hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); + hoblist.push(Hob::MemoryAllocation(&memory_allocation)); + hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); + hoblist.push(Hob::Handoff(&end_of_hob_list)); + + let mut count = 0; + hoblist.iter().for_each(|hob| { + match hob { + Hob::ResourceDescriptor(resource) => { + assert_eq!(resource.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); + } + Hob::MemoryAllocation(memory_allocation) => { + assert_eq!(memory_allocation.alloc_descriptor.memory_length, 0x0123456789abcdef); + } + Hob::MemoryAllocationModule(memory_allocation_module) => { + assert_eq!(memory_allocation_module.alloc_descriptor.memory_length, 0x0123456789abcdef); + } + Hob::Capsule(capsule) => { + assert_eq!(capsule.base_address, 0); + } + Hob::GuidHob(guid_hob, data) => { + assert_eq!(guid_hob.name, r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11])); + assert_eq!(*data, &[1_u8, 2, 3, 4, 5, 6, 7, 8]); + } + Hob::FirmwareVolume(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::FirmwareVolume2(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::FirmwareVolume3(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::Handoff(handoff) => { + assert_eq!(handoff.memory_top, 0xdeadbeef); + } + _ => { + panic!("Unexpected hob type"); + } + } + count += 1; + }); + assert_eq!(count, 9); + } + + #[test] + fn test_hoblist_discover() { + // generate some test hobs + let resource = gen_resource_descriptor(); + let handoff = gen_phase_handoff_information_table(); + let firmware_volume = gen_firmware_volume(); + let firmware_volume2 = gen_firmware_volume2(); + let firmware_volume3 = gen_firmware_volume3(); + let capsule = gen_capsule(); + let guid_hob_buf = gen_guid_hob(); + let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); + let memory_allocation = gen_memory_allocation(); + let memory_allocation_module = gen_memory_allocation_module(); + let cpu = gen_cpu(); + let resource_v2 = gen_resource_descriptor_v2(); + let end_of_hob_list = gen_end_of_hoblist(); + + // create a new hoblist + let mut hoblist = HobList::new(); + + // Push the resource descriptor to the hoblist + hoblist.push(Hob::ResourceDescriptor(&resource)); + hoblist.push(Hob::Handoff(&handoff)); + hoblist.push(Hob::FirmwareVolume(&firmware_volume)); + hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); + hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); + hoblist.push(Hob::Capsule(&capsule)); + hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); + hoblist.push(Hob::MemoryAllocation(&memory_allocation)); + hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); + hoblist.push(Hob::Cpu(&cpu)); + hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); + hoblist.push(Hob::Handoff(&end_of_hob_list)); + + // assert that the hoblist has 3 hobs and they are of the correct type + + let mut count = 0; + hoblist.iter().for_each(|hob| { + match hob { + Hob::ResourceDescriptor(resource) => { + assert_eq!(resource.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); + } + Hob::MemoryAllocation(memory_allocation) => { + assert_eq!(memory_allocation.alloc_descriptor.memory_length, 0x0123456789abcdef); + } + Hob::MemoryAllocationModule(memory_allocation_module) => { + assert_eq!(memory_allocation_module.alloc_descriptor.memory_length, 0x0123456789abcdef); + } + Hob::Capsule(capsule) => { + assert_eq!(capsule.base_address, 0); + } + Hob::GuidHob(guid_hob, data) => { + assert_eq!(guid_hob.name, r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11])); + assert_eq!(&data[..], guid_hob_data); + } + Hob::FirmwareVolume(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::FirmwareVolume2(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::FirmwareVolume3(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::Handoff(handoff) => { + assert_eq!(handoff.memory_top, 0xdeadbeef); + } + Hob::Cpu(cpu) => { + assert_eq!(cpu.size_of_memory_space, 0); + } + Hob::ResourceDescriptorV2(resource) => { + assert_eq!(resource.v1.header.r#type, hob::RESOURCE_DESCRIPTOR2); + assert_eq!(resource.v1.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); + } + _ => { + panic!("Unexpected hob type"); + } + } + count += 1; + }); + + assert_eq!(count, 12); + + // c_hoblist is a pointer to the hoblist - we need to manually free it later + let (c_array_hoblist, length) = to_c_array(&hoblist); + + // create a new hoblist + let mut cloned_hoblist = HobList::new(); + cloned_hoblist.discover_hobs(c_array_hoblist); + + // assert that the hoblist has 2 hobs and they are of the correct type + // we don't need to check the end of hoblist hob as it will not be 'discovered' + // by the discover_hobs function and simply end the iteration + count = 0; + hoblist.into_iter().for_each(|hob| { + match hob { + Hob::ResourceDescriptor(resource) => { + assert_eq!(resource.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); + } + Hob::MemoryAllocation(memory_allocation) => { + assert_eq!(memory_allocation.alloc_descriptor.memory_length, 0x0123456789abcdef); + } + Hob::MemoryAllocationModule(memory_allocation_module) => { + assert_eq!(memory_allocation_module.alloc_descriptor.memory_length, 0x0123456789abcdef); + } + Hob::Capsule(capsule) => { + assert_eq!(capsule.base_address, 0); + } + Hob::GuidHob(guid_hob, data) => { + assert_eq!(guid_hob.name, r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11])); + assert_eq!(data, &[1_u8, 2, 3, 4, 5, 6, 7, 8]); + } + Hob::FirmwareVolume(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::FirmwareVolume2(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::FirmwareVolume3(firmware_volume) => { + assert_eq!(firmware_volume.length, 0x0123456789abcdef); + } + Hob::Handoff(handoff) => { + assert_eq!(handoff.memory_top, 0xdeadbeef); + } + Hob::ResourceDescriptorV2(resource) => { + assert_eq!(resource.v1.header.r#type, hob::RESOURCE_DESCRIPTOR2); + assert_eq!(resource.v1.resource_type, hob::EFI_RESOURCE_SYSTEM_MEMORY); + } + Hob::Cpu(cpu) => { + assert_eq!(cpu.size_of_memory_space, 0); + } + _ => { + panic!("Unexpected hob type"); + } + } + count += 1; + }); + + assert_eq!(count, 12); + + // free the c array + manually_free_c_array(c_array_hoblist, length); + } + + #[test] + fn test_hob_iterator() { + // generate some test hobs + let resource = gen_resource_descriptor(); + let handoff = gen_phase_handoff_information_table(); + let firmware_volume = gen_firmware_volume(); + let firmware_volume2 = gen_firmware_volume2(); + let firmware_volume3 = gen_firmware_volume3(); + let capsule = gen_capsule(); + let guid_hob_buf = gen_guid_hob(); + let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); + let memory_allocation = gen_memory_allocation(); + let memory_allocation_module = gen_memory_allocation_module(); + let cpu = gen_cpu(); + let end_of_hob_list = gen_end_of_hoblist(); + + // create a new hoblist + let mut hoblist = HobList::new(); + + // Push the resource descriptor to the hoblist + hoblist.push(Hob::ResourceDescriptor(&resource)); + hoblist.push(Hob::Handoff(&handoff)); + hoblist.push(Hob::FirmwareVolume(&firmware_volume)); + hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); + hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); + hoblist.push(Hob::Capsule(&capsule)); + hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); + hoblist.push(Hob::MemoryAllocation(&memory_allocation)); + hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); + hoblist.push(Hob::Cpu(&cpu)); + hoblist.push(Hob::Handoff(&end_of_hob_list)); + + let (c_array_hoblist, length) = to_c_array(&hoblist); + + // SAFETY: Test code - creating a reference from C array pointer for HOB testing. + let hob = Hob::ResourceDescriptor(unsafe { + (c_array_hoblist as *const hob::ResourceDescriptor).as_ref::<'static>().unwrap() + }); + for h in &hob { + println!("{:?}", h.header()); + } + + manually_free_c_array(c_array_hoblist, length); + } + + #[test] + fn test_hob_iterator2() { + let resource = gen_resource_descriptor(); + let handoff = gen_phase_handoff_information_table(); + let firmware_volume = gen_firmware_volume(); + let firmware_volume2 = gen_firmware_volume2(); + let firmware_volume3 = gen_firmware_volume3(); + let capsule = gen_capsule(); + let guid_hob_buf = gen_guid_hob(); + let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); + let memory_allocation = gen_memory_allocation(); + let memory_allocation_module = gen_memory_allocation_module(); + let cpu = gen_cpu(); + let resource_v2 = gen_resource_descriptor_v2(); + let end_of_hob_list = gen_end_of_hoblist(); + + // create a new hoblist + let mut hoblist = HobList::new(); + + // Push the resource descriptor to the hoblist + hoblist.push(Hob::ResourceDescriptor(&resource)); + hoblist.push(Hob::Handoff(&handoff)); + hoblist.push(Hob::FirmwareVolume(&firmware_volume)); + hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); + hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); + hoblist.push(Hob::Capsule(&capsule)); + hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); + hoblist.push(Hob::MemoryAllocation(&memory_allocation)); + hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); + hoblist.push(Hob::Cpu(&cpu)); + hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); + hoblist.push(Hob::Handoff(&end_of_hob_list)); + + // Make sure we can iterate over a reference to a HobList without + // consuming it. + for hob in &hoblist { + println!("{:?}", hob.header()); + } + + for hob in hoblist { + println!("{:?}", hob.header()); + } + } + + #[test] + fn test_relocate_hobs() { + // generate some test hobs + let resource = gen_resource_descriptor(); + let handoff = gen_phase_handoff_information_table(); + let firmware_volume = gen_firmware_volume(); + let firmware_volume2 = gen_firmware_volume2(); + let firmware_volume3 = gen_firmware_volume3(); + let capsule = gen_capsule(); + let guid_hob_buf = gen_guid_hob(); + let (guid_hob, guid_hob_data) = guid_hob_refs(&guid_hob_buf); + let memory_allocation = gen_memory_allocation(); + let memory_allocation_module = gen_memory_allocation_module(); + let cpu = gen_cpu(); + let resource_v2 = gen_resource_descriptor_v2(); + let end_of_hob_list = gen_end_of_hoblist(); + + // create a new hoblist + let mut hoblist = HobList::new(); + + // Push the resource descriptor to the hoblist + hoblist.push(Hob::ResourceDescriptor(&resource)); + hoblist.push(Hob::Handoff(&handoff)); + hoblist.push(Hob::FirmwareVolume(&firmware_volume)); + hoblist.push(Hob::FirmwareVolume2(&firmware_volume2)); + hoblist.push(Hob::FirmwareVolume3(&firmware_volume3)); + hoblist.push(Hob::Capsule(&capsule)); + hoblist.push(Hob::GuidHob(guid_hob, guid_hob_data)); + hoblist.push(Hob::MemoryAllocation(&memory_allocation)); + hoblist.push(Hob::MemoryAllocationModule(&memory_allocation_module)); + hoblist.push(Hob::Cpu(&cpu)); + hoblist.push(Hob::Misc(12345)); + hoblist.push(Hob::ResourceDescriptorV2(&resource_v2)); + hoblist.push(Hob::Handoff(&end_of_hob_list)); + + let hoblist_address = hoblist.as_mut_ptr::<()>() as usize; + let hoblist_len = hoblist.len(); + hoblist.relocate_hobs(); + assert_eq!( + hoblist_address, + hoblist.as_mut_ptr::<()>() as usize, + "Only hobs need to be relocated, not the vector." + ); + assert_eq!(hoblist_len, hoblist.len()); + + for (i, hob) in hoblist.into_iter().enumerate() { + match hob { + Hob::ResourceDescriptor(hob) if i == 0 => { + assert_ne!(ptr::addr_of!(resource), hob); + assert_eq!(resource, *hob); + } + Hob::Handoff(hob) if i == 1 => { + assert_ne!(ptr::addr_of!(handoff), hob); + assert_eq!(handoff, *hob); + } + Hob::FirmwareVolume(hob) if i == 2 => { + assert_ne!(ptr::addr_of!(firmware_volume), hob); + assert_eq!(firmware_volume, *hob); + } + Hob::FirmwareVolume2(hob) if i == 3 => { + assert_ne!(ptr::addr_of!(firmware_volume2), hob); + assert_eq!(firmware_volume2, *hob); + } + Hob::FirmwareVolume3(hob) if i == 4 => { + assert_ne!(ptr::addr_of!(firmware_volume3), hob); + assert_eq!(firmware_volume3, *hob); + } + Hob::Capsule(hob) if i == 5 => { + assert_ne!(ptr::addr_of!(capsule), hob); + assert_eq!(capsule, *hob); + } + Hob::GuidHob(hob, hob_data) if i == 6 => { + assert_ne!(ptr::from_ref(guid_hob), ptr::from_ref(hob)); + assert_ne!(guid_hob_data.as_ptr(), hob_data.as_ptr()); + assert_eq!(guid_hob.header, hob.header); + assert_eq!(guid_hob.name, hob.name); + assert_eq!(guid_hob_data, hob_data); + } + Hob::MemoryAllocation(hob) if i == 7 => { + assert_ne!(ptr::addr_of!(memory_allocation), hob); + assert_eq!(memory_allocation.header, hob.header); + assert_eq!(memory_allocation.alloc_descriptor, hob.alloc_descriptor); + } + Hob::MemoryAllocationModule(hob) if i == 8 => { + assert_ne!(ptr::addr_of!(memory_allocation_module), hob); + assert_eq!(memory_allocation_module, *hob); + } + Hob::Cpu(hob) if i == 9 => { + assert_ne!(ptr::addr_of!(cpu), hob); + assert_eq!(cpu, *hob); + } + Hob::Misc(hob) if i == 10 => { + assert_eq!(12345, hob); + } + Hob::ResourceDescriptorV2(hob) if i == 11 => { + assert_ne!(ptr::addr_of!(resource_v2), hob); + assert_eq!(resource_v2, *hob); + } + Hob::Handoff(hob) if i == 12 => { + assert_ne!(ptr::addr_of!(end_of_hob_list), hob); + assert_eq!(end_of_hob_list, *hob); + } + _ => panic!("Hob at index: {i}."), + } + } + } + + #[test] + fn test_hoblist_debug_display() { + use alloc::format; + + let mut hoblist = HobList::new(); + let handoff = gen_phase_handoff_information_table(); + hoblist.push(Hob::Handoff(&handoff)); + + let debug_output = format!("{:?}", hoblist); + + assert!(debug_output.contains("PHASE HANDOFF INFORMATION TABLE")); + assert!(debug_output.contains("HOB Length:")); + assert!(debug_output.contains("Version:")); + assert!(debug_output.contains("Boot Mode:")); + assert!(debug_output.contains("Memory Bottom:")); + assert!(debug_output.contains("Memory Top:")); + assert!(debug_output.contains("Free Memory Bottom:")); + assert!(debug_output.contains("Free Memory Top:")); + assert!(debug_output.contains("End of HOB List:")); + } + + #[test] + fn test_get_pi_hob_list_size_single_hob() { + use core::ffi::c_void; + + let end_of_list = gen_end_of_hoblist(); + + // SAFETY: The list is created in this test with a valid end-of-list marker + let size = unsafe { get_pi_hob_list_size(&end_of_list as *const _ as *const c_void) }; + + assert_eq!(size, size_of::()); + } + + #[test] + fn test_get_pi_hob_list_size_multiple_hobs() { + use core::ffi::c_void; + + // Create a HOB list with multiple HOBs in contiguous memory + let capsule = gen_capsule(); + let firmware_volume = gen_firmware_volume(); + let end_of_list = gen_end_of_hoblist(); + + let expected_size = + size_of::() + size_of::() + size_of::(); + + // This buffer will hold the contiguous HOBs + let mut buffer = Vec::new(); + + // Add a capsule HOB + // SAFETY: Creating a byte slice from a struct for test purposes. + let capsule_bytes = + unsafe { core::slice::from_raw_parts(&capsule as *const Capsule as *const u8, size_of::()) }; + buffer.extend_from_slice(capsule_bytes); + + // Add a firmware volume HOB + // SAFETY: Creating a byte slice from a struct for test purposes. + let fv_bytes = unsafe { + core::slice::from_raw_parts( + &firmware_volume as *const FirmwareVolume as *const u8, + size_of::(), + ) + }; + buffer.extend_from_slice(fv_bytes); + + // Add an end-of-list HOB + // SAFETY: Creating a byte slice from a struct for test purposes. + let end_bytes = unsafe { + core::slice::from_raw_parts( + &end_of_list as *const PhaseHandoffInformationTable as *const u8, + size_of::(), + ) + }; + buffer.extend_from_slice(end_bytes); + + // SAFETY: The list is created in this test with headers and an end-of-list marker that should be valid + let size = unsafe { get_pi_hob_list_size(buffer.as_ptr() as *const c_void) }; + + assert_eq!(size, expected_size); + } + + #[test] + fn test_get_pi_hob_list_size_varied_hob_types() { + use core::ffi::c_void; + + // Create a HOB list with various HOB types + let cpu = gen_cpu(); + let resource = gen_resource_descriptor(); + let memory_alloc = gen_memory_allocation(); + let end_of_list = gen_end_of_hoblist(); + + let expected_size = size_of::() + + size_of::() + + size_of::() + + size_of::(); + + // This buffer will hold the contiguous HOBs + let mut buffer = Vec::new(); + + // SAFETY: Creating a byte slice from a struct for test purposes. + buffer.extend_from_slice(unsafe { + core::slice::from_raw_parts(&cpu as *const Cpu as *const u8, size_of::()) + }); + + // SAFETY: Creating a byte slice from a struct for test purposes. + buffer.extend_from_slice(unsafe { + core::slice::from_raw_parts( + &resource as *const ResourceDescriptor as *const u8, + size_of::(), + ) + }); + + // SAFETY: Creating a byte slice from a struct for test purposes. + buffer.extend_from_slice(unsafe { + core::slice::from_raw_parts( + &memory_alloc as *const MemoryAllocation as *const u8, + size_of::(), + ) + }); + + // SAFETY: Creating a byte slice from a struct for test purposes. + buffer.extend_from_slice(unsafe { + core::slice::from_raw_parts( + &end_of_list as *const PhaseHandoffInformationTable as *const u8, + size_of::(), + ) + }); + + // SAFETY: The list is created in this test with headers and an end-of-list marker that should be valid + let size = unsafe { get_pi_hob_list_size(buffer.as_ptr() as *const c_void) }; + + assert_eq!(size, expected_size); + } +} diff --git a/sdk/patina/src/pi/mm_cis.rs b/sdk/patina/src/pi/mm_cis.rs new file mode 100644 index 000000000..7b2aff6ba --- /dev/null +++ b/sdk/patina/src/pi/mm_cis.rs @@ -0,0 +1,397 @@ +//! Management Mode (MM) Core Interface Definitions +//! +//! This module contains definitions related to the MM Core Interface as defined +//! in the UEFI Platform Initialization Specification. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +use crate::pi::spec_version; +use core::ffi::c_void; +use r_efi::{ + efi, + efi::{ + BootAllocatePages, BootAllocatePool, BootFreePages, BootFreePool, BootHandleProtocol, + BootInstallProtocolInterface, BootLocateHandle, BootLocateProtocol, BootUninstallProtocolInterface, + }, +}; + +/// MMST signature: `'S', 'M', 'S', 'T'` (same as C `MM_MMST_SIGNATURE`). +pub const MM_MMST_SIGNATURE: u32 = u32::from_le_bytes([b'S', b'M', b'S', b'T']); + +/// MMST major revision, the same as the PI Specification major revision. +pub const MM_MMST_REVISION_MAJOR: u32 = spec_version::PI_SPECIFICATION_MAJOR_REVISION; + +/// MMST minor revision, the same as the PI Specification minor revision. +pub const MM_MMST_REVISION_MINOR: u32 = spec_version::PI_SPECIFICATION_MINOR_REVISION; + +/// PI Specification version encoded as `(major << 16) | minor`. +pub const MM_SYSTEM_TABLE_REVISION: u32 = (MM_MMST_REVISION_MAJOR << 16) | MM_MMST_REVISION_MINOR; + +// +// This EFI_MM_CPU_IO_PROTOCOL is embedded in MMST, so we need to define it here to be able to parse the MMST correctly. +// + +/// A single MM I/O access function pointer. +/// +/// Matches the C typedef `EFI_MM_CPU_IO`: +/// ```c +/// typedef EFI_STATUS (EFIAPI *EFI_MM_CPU_IO)( +/// IN CONST EFI_MM_CPU_IO_PROTOCOL *This, +/// IN EFI_MM_IO_WIDTH Width, +/// IN UINT64 Address, +/// IN UINTN Count, +/// IN OUT VOID *Buffer +/// ); +/// ``` +pub type MmCpuIoFn = unsafe extern "efiapi" fn( + this: *const MmCpuIoAccess, + width: usize, + address: u64, + count: usize, + buffer: *mut c_void, +) -> efi::Status; + +/// MM CPU I/O access pair (Read + Write). +/// +/// Matches `EFI_MM_IO_ACCESS`. +/// ```c +/// typedef struct { +/// /// +/// /// This service provides the various modalities of memory and I/O read. +/// /// +/// EFI_MM_CPU_IO Read; +/// /// +/// /// This service provides the various modalities of memory and I/O write. +/// /// +/// EFI_MM_CPU_IO Write; +/// } EFI_MM_IO_ACCESS; +/// ``` +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct MmCpuIoAccess { + /// This service provides the various modalities of memory and I/O read. + pub read: MmCpuIoFn, + /// This service provides the various modalities of memory and I/O write. + pub write: MmCpuIoFn, +} + +/// The `EFI_MM_CPU_IO_PROTOCOL` embedded in the system table. +/// +/// ```c +/// typedef struct _EFI_MM_CPU_IO_PROTOCOL { +/// EFI_MM_IO_ACCESS Mem; +/// EFI_MM_IO_ACCESS Io; +/// } EFI_MM_CPU_IO_PROTOCOL; +/// ``` +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct MmCpuIoProtocol { + /// MMIO access pair (Read + Write). + pub mem: MmCpuIoAccess, + /// I/O port access functions. + pub io: MmCpuIoAccess, +} + +/// Adds, updates, or removes a configuration table entry from the Management Mode System Table. +/// +/// This function matches the C typedef `EFI_MM_INSTALL_CONFIGURATION_TABLE`: +/// ```c +/// typedef +/// EFI_STATUS +/// (EFIAPI *EFI_MM_INSTALL_CONFIGURATION_TABLE)( +/// IN CONST EFI_MM_SYSTEM_TABLE *SystemTable, +/// IN CONST EFI_GUID *Guid, +/// IN VOID *Table, +/// IN UINTN TableSize +/// ); +/// ``` +pub type MmInstallConfigurationTableFn = unsafe extern "efiapi" fn( + system_table: *const EfiMmSystemTable, + guid: *const efi::Guid, + table: *mut c_void, + table_size: usize, +) -> efi::Status; + +/// Allocates pool memory from the specified memory type. +/// +/// This function matches the C typedef `EFI_MM_ALLOCATE_POOL`, which is already defined in `r_efi::efi` as `BootAllocatePool`. +pub type MmAllocatePoolFn = BootAllocatePool; + +/// Frees pool memory. +/// +/// This function matches the C typedef `EFI_MM_FREE_POOL`, which is already defined in `r_efi::efi` as `BootFreePool`. +pub type MmFreePoolFn = BootFreePool; + +/// Allocates memory pages from the system. +/// +/// This function matches the C typedef `EFI_ALLOCATE_PAGES`, which is already defined in `r_efi::efi` as `BootAllocatePages`. +pub type MmAllocatePagesFn = BootAllocatePages; + +/// Frees memory pages. +/// +/// This function matches the C typedef `EFI_FREE_PAGES`, which is already defined in `r_efi::efi` as `BootFreePages`. +pub type MmFreePagesFn = BootFreePages; + +/// `EFI_MM_STARTUP_THIS_AP` +pub type MmStartupThisApFn = + unsafe extern "efiapi" fn(procedure: usize, cpu_number: usize, proc_arguments: *mut c_void) -> efi::Status; + +/// Installs a protocol interface on a device handle. +/// +/// This function matches the C typedef `EFI_INSTALL_PROTOCOL_INTERFACE`, which is already defined in `r_efi::efi` as `BootInstallProtocolInterface`. +pub type MmInstallProtocolInterfaceFn = BootInstallProtocolInterface; + +/// Removes a protocol interface from a device handle. +/// +/// This function matches the C typedef `EFI_UNINSTALL_PROTOCOL_INTERFACE`, which is already defined in `r_efi::efi` as `BootUninstallProtocolInterface`. +pub type MmUninstallProtocolInterfaceFn = BootUninstallProtocolInterface; + +/// Queries a handle to determine if it supports a specified protocol. +/// +/// This function matches the C typedef `EFI_HANDLE_PROTOCOL`, which is already defined in `r_efi::efi` as `BootHandleProtocol`. +pub type MmHandleProtocolFn = BootHandleProtocol; + +/// Register a callback function be called when a particular protocol interface is installed. +/// +/// This function matches the C typedef `EFI_MM_REGISTER_PROTOCOL_NOTIFY`: +/// ```c +/// typedef +/// EFI_STATUS +/// (EFIAPI *EFI_MM_REGISTER_PROTOCOL_NOTIFY)( +/// IN CONST EFI_GUID *Protocol, +/// IN EFI_MM_NOTIFY_FN Function, +/// OUT VOID **Registration +/// ); +/// ``` +pub type MmRegisterProtocolNotifyFn = unsafe extern "efiapi" fn( + protocol: *const efi::Guid, + function: usize, + registration: *mut *mut c_void, +) -> efi::Status; + +/// Returns an array of handles that support a specified protocol. +/// +/// This function matches the C typedef `EFI_LOCATE_HANDLE`, which is already defined in `r_efi::efi` as `BootLocateHandle`. +pub type MmLocateHandleFn = BootLocateHandle; + +/// Returns the first protocol instance that matches the given protocol. +/// +/// This function matches the C typedef `EFI_LOCATE_PROTOCOL`, which is already defined in `r_efi::efi` as `BootLocateProtocol`. +pub type MmLocateProtocolFn = BootLocateProtocol; + +/// Manage MMI of a particular type. +/// +/// This function matches the C typedef `EFI_MM_INTERRUPT_MANAGE`: +/// ```c +/// typedef +/// EFI_STATUS +/// (EFIAPI *EFI_MM_INTERRUPT_MANAGE)( +/// IN CONST EFI_GUID *HandlerType, +/// IN CONST VOID *Context OPTIONAL, +/// IN OUT VOID *CommBuffer OPTIONAL, +/// IN OUT UINTN *CommBufferSize OPTIONAL +/// ); +/// ``` +pub type MmiManageFn = unsafe extern "efiapi" fn( + handler_type: *const efi::Guid, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, +) -> efi::Status; + +/// Main entry point for an MM handler dispatch or communicate-based callback. +/// +/// This function matches the C typedef `EFI_MM_HANDLER_ENTRY_POINT`: +/// ```c +/// typedef +/// EFI_STATUS +/// (EFIAPI *EFI_MM_HANDLER_ENTRY_POINT)( +/// IN EFI_HANDLE DispatchHandle, +/// IN CONST VOID *Context OPTIONAL, +/// IN OUT VOID *CommBuffer OPTIONAL, +/// IN OUT UINTN *CommBufferSize OPTIONAL +/// ); +/// ``` +pub type MmiHandlerEntryPoint = unsafe extern "efiapi" fn( + dispatch_handle: efi::Handle, + context: *const c_void, + comm_buffer: *mut c_void, + comm_buffer_size: *mut usize, +) -> efi::Status; + +/// Registers a handler entry point for a particular MMI handler type. +/// +/// This function matches the C typedef `EFI_MM_INTERRUPT_REGISTER`: +/// ```c +/// typedef +/// EFI_STATUS +/// (EFIAPI *EFI_MM_INTERRUPT_REGISTER)( +/// IN EFI_MM_HANDLER_ENTRY_POINT Handler, +/// IN CONST EFI_GUID *HandlerType OPTIONAL, +/// OUT EFI_HANDLE *DispatchHandle +/// ); +/// ``` +pub type MmiHandlerRegisterFn = unsafe extern "efiapi" fn( + handler: MmiHandlerEntryPoint, + handler_type: *const efi::Guid, + dispatch_handle: *mut efi::Handle, +) -> efi::Status; + +/// Unregister a handler in MM. +/// +/// This function matches the C typedef `EFI_MM_INTERRUPT_UNREGISTER`: +/// ```c +/// typedef +/// EFI_STATUS +/// (EFIAPI *EFI_MM_INTERRUPT_UNREGISTER)( +/// IN EFI_HANDLE DispatchHandle +/// ); +/// ``` +pub type MmiHandlerUnregisterFn = unsafe extern "efiapi" fn(dispatch_handle: efi::Handle) -> efi::Status; + +/// EFI_MM_ENTRY_CONTEXT structure. +/// +/// Processor information and functionality needed by MM Foundation. +/// Matches the C `EFI_MM_ENTRY_CONTEXT` from PI specification. +/// +/// Layout (x86_64, all fields 8 bytes): +/// - `mm_startup_this_ap`: Function pointer for `EFI_MM_STARTUP_THIS_AP` +/// - `currently_executing_cpu`: Index of the processor executing the MM Foundation +/// - `number_of_cpus`: Total number of possible processors in the platform (1-based) +/// - `cpu_save_state_size`: Pointer to array of save state sizes per CPU +/// - `cpu_save_state`: Pointer to array of CPU save state pointers +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct EfiMmEntryContext { + /// Function pointer for EFI_MM_STARTUP_THIS_AP. + pub mm_startup_this_ap: u64, + /// Index of the currently executing CPU. + pub currently_executing_cpu: u64, + /// Total number of CPUs (1-based). + pub number_of_cpus: u64, + /// Pointer to array of per-CPU save state sizes. + pub cpu_save_state_size: u64, + /// Pointer to array of per-CPU save state pointers. + pub cpu_save_state: u64, +} + +/// The Management Mode System Table (MMST). +/// +/// This is the `#[repr(C)]` Rust definition of the C `EFI_MM_SYSTEM_TABLE` structure +/// from `PiMmCis.h`. The table pointer is passed as the second argument to +/// every MM driver's entry point: +/// +/// ```c +/// EFI_STATUS EFIAPI DriverEntry(EFI_HANDLE ImageHandle, EFI_MM_SYSTEM_TABLE *MmSt); +/// ``` +#[repr(C)] +pub struct EfiMmSystemTable { + /// + /// The table header for the SMST. + /// + pub hdr: efi::TableHeader, + + /// + /// A pointer to a NULL-terminated Unicode string containing the vendor name. + /// It is permissible for this pointer to be NULL. + /// + pub mm_firmware_vendor: *mut u16, + /// + /// The particular revision of the firmware. + /// + pub mm_firmware_revision: u32, + + /// Function to add, update, or remove a configuration table entry from the MMST. + pub mm_install_configuration_table: MmInstallConfigurationTableFn, + + /// + /// I/O Service + /// + pub mm_io: MmCpuIoProtocol, + + /// + /// Runtime memory services + /// + /// This function matches the C typedef `EFI_MM_ALLOCATE_POOL`, which allocates pool memory from the specified memory type. + pub mm_allocate_pool: MmAllocatePoolFn, + /// This function matches the C typedef `EFI_MM_FREE_POOL`, which frees pool memory. + pub mm_free_pool: MmFreePoolFn, + /// This function matches the C typedef `EFI_ALLOCATE_PAGES`, which allocates memory pages from the system. + pub mm_allocate_pages: MmAllocatePagesFn, + /// This function matches the C typedef `EFI_FREE_PAGES`, which frees memory pages. + pub mm_free_pages: MmFreePagesFn, + + /// + /// MP service + /// + pub mm_startup_this_ap: MmStartupThisApFn, + + /// + /// CPU information records + /// + /// A number between zero and and the NumberOfCpus field. This field designates + /// which processor is executing the MM infrastructure. + /// + pub currently_executing_cpu: usize, + /// + /// The number of possible processors in the platform. This is a 1 based counter. + /// + pub number_of_cpus: usize, + /// + /// Points to an array, where each element describes the number of bytes in the + /// corresponding save state specified by CpuSaveState. There are always + /// NumberOfCpus entries in the array. + /// + pub cpu_save_state_size: *mut usize, + /// + /// Points to an array, where each element is a pointer to a CPU save state. The + /// corresponding element in CpuSaveStateSize specifies the number of bytes in the + /// save state area. There are always NumberOfCpus entries in the array. + /// + pub cpu_save_state: *mut *mut c_void, + + /// + /// Extensibility table + /// + /// + /// The number of UEFI Configuration Tables in the buffer MmConfigurationTable. + /// + pub number_of_table_entries: usize, + /// + /// A pointer to the UEFI Configuration Tables. The number of entries in the table is + /// NumberOfTableEntries. + /// + pub mm_configuration_table: *mut efi::ConfigurationTable, + + /// + /// Protocol services + /// + /// + /// This function matches the C typedef `EFI_INSTALL_PROTOCOL_INTERFACE`, which installs a protocol interface on a device handle. + pub mm_install_protocol_interface: MmInstallProtocolInterfaceFn, + /// This function matches the C typedef `EFI_UNINSTALL_PROTOCOL_INTERFACE`, which removes a protocol interface from a device handle. + pub mm_uninstall_protocol_interface: MmUninstallProtocolInterfaceFn, + /// This function matches the C typedef `EFI_HANDLE_PROTOCOL`, which queries a handle to determine if it supports a specified protocol. + pub mm_handle_protocol: MmHandleProtocolFn, + /// This function matches the C typedef `EFI_MM_REGISTER_PROTOCOL_NOTIFY`, which registers a callback function be called when a particular protocol interface is installed. + pub mm_register_protocol_notify: MmRegisterProtocolNotifyFn, + /// This function matches the C typedef `EFI_LOCATE_HANDLE`, which returns an array of handles that support a specified protocol. + pub mm_locate_handle: MmLocateHandleFn, + /// This function matches the C typedef `EFI_LOCATE_PROTOCOL`, which returns the first protocol instance that matches the given protocol. + pub mm_locate_protocol: MmLocateProtocolFn, + + /// + /// MMI Management functions + /// + /// This function matches the C typedef `EFI_MM_INTERRUPT_MANAGE`, which manages MMI of a particular type. + pub mmi_manage: MmiManageFn, + /// This function matches the C typedef `EFI_MM_HANDLER_REGISTER`, which registers a handler entry point for a particular MMI handler type. + pub mmi_handler_register: MmiHandlerRegisterFn, + /// This function matches the C typedef `EFI_MM_INTERRUPT_UNREGISTER`, which unregisters a handler in MM. + pub mmi_handler_unregister: MmiHandlerUnregisterFn, +} diff --git a/sdk/patina/src/pi/protocols.rs b/sdk/patina/src/pi/protocols.rs index 08977ae13..0f85ab27f 100644 --- a/sdk/patina/src/pi/protocols.rs +++ b/sdk/patina/src/pi/protocols.rs @@ -14,7 +14,9 @@ pub mod communication; pub mod communication2; pub mod communication3; pub mod cpu_arch; +#[cfg(any(test, feature = "alloc"))] pub mod firmware_volume; +#[cfg(any(test, feature = "alloc"))] pub mod firmware_volume_block; pub mod metronome; pub mod runtime; diff --git a/sdk/patina/src/pi/protocols/bds.rs b/sdk/patina/src/pi/protocols/bds.rs index 6927fec40..37e7e5bef 100644 --- a/sdk/patina/src/pi/protocols/bds.rs +++ b/sdk/patina/src/pi/protocols/bds.rs @@ -11,14 +11,11 @@ //! SPDX-License-Identifier: Apache-2.0 //! -use r_efi::efi; - /// BDS Architectural Protocol GUID /// /// # Documentation /// UEFI Platform Initialization Specification, Release 1.8, Section II-12.2.1 -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x665E3FF6, 0x46CC, 0x11d4, 0x9A, 0x38, &[0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("665E3FF6-46CC-11D4-9A38-0090273FC14D"); /// Performs Boot Device Selection (BDS) and transfers control from the DXE Foundation to the selected boot device. /// diff --git a/sdk/patina/src/pi/protocols/communication.rs b/sdk/patina/src/pi/protocols/communication.rs index 558cefae8..5658f49dc 100644 --- a/sdk/patina/src/pi/protocols/communication.rs +++ b/sdk/patina/src/pi/protocols/communication.rs @@ -11,16 +11,16 @@ //! SPDX-License-Identifier: Apache-2.0 //! +use crate::{BinaryGuid, Guid}; use core::ffi::c_void; -use r_efi::{efi, system}; +use r_efi::efi; /// MM Communication Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xc68ed8e2, 0x9dc6, 0x4cbd, 0x9d, 0x94, &[0xdb, 0x65, 0xac, 0xc5, 0xc3, 0x32]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("C68ED8E2-9DC6-4CBD-9D94-DB65ACC5C332"); /// MM Initialization GUID. -pub const EFI_MM_INITIALIZATION_GUID: efi::Guid = - efi::Guid::from_fields(0x99be0d8f, 0x3548, 0x48aa, 0xb5, 0x77, &[0xfc, 0xfb, 0xa5, 0x6a, 0x67, 0xf7]); +pub const EFI_MM_INITIALIZATION_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("99BE0D8F-3548-48AA-B577-FCFBA56A67F7"); /// Sends/receives a message for a registered handler. /// @@ -92,18 +92,54 @@ pub struct Protocol { /// MM communication header structure. pub struct EfiMmCommunicateHeader { /// To avoid confusion in interpreting frames, the communication buffer should always begin with the header. - pub header_guid: r_efi::base::Guid, + pub header_guid: BinaryGuid, /// Describes the size of Data (in bytes) and does not include the size of the header. pub message_length: usize, // Comm buffer data follows the header } -#[repr(C)] -#[derive(Copy, Clone, Debug)] -/// MM initialization header structure. -pub struct EfiMmInitializationHeader { - /// To avoid confusion in interpreting frames, the communication buffer should always begin with the header. - pub comm_header: EfiMmCommunicateHeader, - /// Describes the size of Data (in bytes) and does not include the size of the header. - pub system_table: *mut system::SystemTable, +impl EfiMmCommunicateHeader { + /// Create a new communicate header with the specified GUID and message length. + pub fn new(header_guid: Guid, message_length: usize) -> Self { + Self { header_guid: header_guid.to_efi_guid().into(), message_length } + } + + /// Returns the communicate header as a slice of bytes using safe conversion. + /// Useful if byte-level access to the header structure is needed. + /// + /// # Returns + /// + /// A slice of bytes representing the header. + pub fn as_bytes(&self) -> &[u8] { + // SAFETY: EfiMmCommunicateHeader is repr(C) with well-defined layout and size + unsafe { core::slice::from_raw_parts(self as *const _ as *const u8, Self::size()) } + } + + /// Function to get the size of the header in bytes. + /// + /// # Returns + /// + /// The size of the header in bytes. + pub const fn size() -> usize { + core::mem::size_of::() + } + + /// Get the header GUID from the communication buffer. + /// + /// # Returns + /// + /// The GUID from the communication header. + pub fn header_guid(&self) -> Guid<'_> { + Guid::from_ref(&self.header_guid) + } + + /// Returns the message length from this communicate header. + /// The length represents the size of the message data that follows the header. + /// + /// # Returns + /// + /// The length in bytes of the message data (excluding the header size). + pub const fn message_length(&self) -> usize { + self.message_length + } } diff --git a/sdk/patina/src/pi/protocols/communication2.rs b/sdk/patina/src/pi/protocols/communication2.rs index 903b021b7..dd0dc7e66 100644 --- a/sdk/patina/src/pi/protocols/communication2.rs +++ b/sdk/patina/src/pi/protocols/communication2.rs @@ -16,8 +16,7 @@ use core::ffi::c_void; use r_efi::efi; /// MM Communication Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x378daedc, 0xf06b, 0x4446, 0x83, 0x14, &[0x40, 0xab, 0x93, 0x3c, 0x87, 0xa3]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("378DAEDC-F06B-4446-8314-40AB933C87A3"); /// Sends/receives a message for a registered handler. /// diff --git a/sdk/patina/src/pi/protocols/communication3.rs b/sdk/patina/src/pi/protocols/communication3.rs index 13a1bc4fa..bbf592017 100644 --- a/sdk/patina/src/pi/protocols/communication3.rs +++ b/sdk/patina/src/pi/protocols/communication3.rs @@ -16,12 +16,11 @@ use core::ffi::c_void; use r_efi::efi; /// MM Communication Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xf7234a14, 0x0df2, 0x46c0, 0xad, 0x28, &[0x90, 0xe6, 0xb8, 0x83, 0xa7, 0x2f]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("F7234A14-0DF2-46C0-AD28-90E6B883A72F"); /// MM Communicate Header V3 GUID. -pub const COMMUNICATE_HEADER_V3_GUID: efi::Guid = - efi::Guid::from_fields(0x68e8c853, 0x2ba9, 0x4dd7, 0x9a, 0xc0, &[0x91, 0xe1, 0x61, 0x55, 0xc9, 0x35]); +pub const COMMUNICATE_HEADER_V3_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("68E8C853-2BA9-4DD7-9AC0-91E16155C935"); /// Sends/receives a message for a registered handler. /// diff --git a/sdk/patina/src/pi/protocols/cpu_arch.rs b/sdk/patina/src/pi/protocols/cpu_arch.rs index f4e99171b..b851bbbe3 100644 --- a/sdk/patina/src/pi/protocols/cpu_arch.rs +++ b/sdk/patina/src/pi/protocols/cpu_arch.rs @@ -17,8 +17,7 @@ use r_efi::efi; /// /// # Documentation /// UEFI Platform Initialization Specification, Release 1.8, Section II-12.3.1 -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x26baccb1, 0x6f42, 0x11d4, 0xbc, 0xe7, &[0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("26BACCB1-6F42-11D4-BCE7-0080C73C8881"); #[repr(C)] /// CPU cache flush types. diff --git a/sdk/patina/src/pi/protocols/firmware_volume.rs b/sdk/patina/src/pi/protocols/firmware_volume.rs index 5d257ab96..bcd390138 100644 --- a/sdk/patina/src/pi/protocols/firmware_volume.rs +++ b/sdk/patina/src/pi/protocols/firmware_volume.rs @@ -29,8 +29,7 @@ use r_efi::efi::{Guid, Handle, Status}; /// This protocol provides file-level access to firmware volumes. It abstracts /// the complexity of the firmware volume format to provide simple file-based /// read and write operations. -pub const PROTOCOL_GUID: Guid = - Guid::from_fields(0x220e73b6, 0x6bdb, 0x4413, 0x84, 0x5, &[0xb9, 0x74, 0xb1, 0x8, 0x61, 0x9a]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("220E73B6-6BDB-4413-8405-B974B108619A"); /// Enumeration of write policies for firmware volume operations. pub type EfiFvWritePolicy = u32; diff --git a/sdk/patina/src/pi/protocols/firmware_volume_block.rs b/sdk/patina/src/pi/protocols/firmware_volume_block.rs index 4e6bcf54f..56e5524d9 100644 --- a/sdk/patina/src/pi/protocols/firmware_volume_block.rs +++ b/sdk/patina/src/pi/protocols/firmware_volume_block.rs @@ -14,7 +14,7 @@ //! use core::ffi::c_void; -use r_efi::efi::{Guid, Handle, Lba, Status}; +use r_efi::efi::{Handle, Lba, Status}; use crate::pi::{fw_fs::EfiFvbAttributes2, hob::EfiPhysicalAddress}; @@ -23,8 +23,7 @@ use crate::pi::{fw_fs::EfiFvbAttributes2, hob::EfiPhysicalAddress}; /// This protocol provides control over block-oriented firmware devices. /// It abstracts the block-oriented nature of firmware volumes to allow consumers /// to read, write, and erase firmware volume blocks uniformly. -pub const PROTOCOL_GUID: Guid = - Guid::from_fields(0x8f644fa9, 0xe850, 0x4db1, 0x9c, 0xe2, &[0xb, 0x44, 0x69, 0x8e, 0x8d, 0xa4]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("8F644FA9-E850-4DB1-9CE2-0B44698E8DA4"); /// Retrieves the current attributes and capabilities of a firmware volume. /// diff --git a/sdk/patina/src/pi/protocols/metronome.rs b/sdk/patina/src/pi/protocols/metronome.rs index e7e51b138..b389ce3cc 100644 --- a/sdk/patina/src/pi/protocols/metronome.rs +++ b/sdk/patina/src/pi/protocols/metronome.rs @@ -18,8 +18,7 @@ use r_efi::efi; /// /// # Documentation /// UEFI Platform Initialization Specification, Release 1.8, Section II-12.4.1 -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x26baccb2, 0x6f42, 0x11d4, 0xbc, 0xe7, &[0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("26BACCB2-6F42-11D4-BCE7-0080C73C8881"); /// Waits for a specified number of ticks from a known time source in a platform. /// diff --git a/sdk/patina/src/pi/protocols/runtime.rs b/sdk/patina/src/pi/protocols/runtime.rs index 38d77f082..dbde3ad59 100644 --- a/sdk/patina/src/pi/protocols/runtime.rs +++ b/sdk/patina/src/pi/protocols/runtime.rs @@ -17,8 +17,7 @@ use crate::pi::list_entry; use r_efi::efi; /// Runtime Arch Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xb7dfb4e1, 0x052f, 0x449f, 0x87, 0xbe, &[0x98, 0x18, 0xfc, 0x91, 0xb7, 0x33]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("B7DFB4E1-052F-449F-87BE-9818FC91B733"); /// Allows the runtime functionality of the DXE Foundation to be contained /// in a separate driver. It also provides hooks for the DXE Foundation to diff --git a/sdk/patina/src/pi/protocols/security.rs b/sdk/patina/src/pi/protocols/security.rs index 1a2f54153..fd0b25aa4 100644 --- a/sdk/patina/src/pi/protocols/security.rs +++ b/sdk/patina/src/pi/protocols/security.rs @@ -17,8 +17,7 @@ use r_efi::efi; /// Security Arch Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xA46423E3, 0x4617, 0x49f1, 0xB9, 0xFF, &[0xD1, 0xBF, 0xA9, 0x11, 0x58, 0x39]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("A46423E3-4617-49F1-B9FF-D1BFA9115839"); /// The EFI_SECURITY_ARCH_PROTOCOL (SAP) is used to abstract platform-specific /// policy from the DXE core response to an attempt to use a file that returns a diff --git a/sdk/patina/src/pi/protocols/security2.rs b/sdk/patina/src/pi/protocols/security2.rs index 067c7a84d..6f7a5ebb2 100644 --- a/sdk/patina/src/pi/protocols/security2.rs +++ b/sdk/patina/src/pi/protocols/security2.rs @@ -28,8 +28,7 @@ use core::ffi::c_void; use r_efi::efi; /// Security2 Arch Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x94ab2f58, 0x1438, 0x4ef1, 0x91, 0x52, &[0x18, 0x94, 0x1a, 0x3a, 0x0e, 0x68]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("94AB2F58-1438-4EF1-9152-18941A3A0E68"); /// The DXE Foundation uses this service to measure and/or verify a UEFI image. /// diff --git a/sdk/patina/src/pi/protocols/status_code.rs b/sdk/patina/src/pi/protocols/status_code.rs index 67e3331a2..23300b599 100644 --- a/sdk/patina/src/pi/protocols/status_code.rs +++ b/sdk/patina/src/pi/protocols/status_code.rs @@ -14,8 +14,7 @@ use r_efi::efi; /// Status Code Runtime Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xD2B2B828, 0x0826, 0x48A7, 0xB3, 0xDF, &[0x98, 0x3C, 0x00, 0x60, 0x24, 0xF0]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("D2B2B828-0826-48A7-B3DF-983C006024F0"); /// Status Code Type Definition. /// diff --git a/sdk/patina/src/pi/protocols/timer.rs b/sdk/patina/src/pi/protocols/timer.rs index b9820d720..2ba2b57a3 100644 --- a/sdk/patina/src/pi/protocols/timer.rs +++ b/sdk/patina/src/pi/protocols/timer.rs @@ -15,8 +15,7 @@ use r_efi::efi; /// Timer Arch Protocol GUID. -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x26BACCB3, 0x6F42, 0x11D4, 0xBC, 0xE7, &[0x00, 0x80, 0xC7, 0x3C, 0x88, 0x81]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("26BACCB3-6F42-11D4-BCE7-0080C73C8881"); /// A function of this type is called when a timer interrupt fires. This /// function executes at TPL_HIGH_LEVEL. The DXE Core will register a function diff --git a/sdk/patina/src/pi/protocols/watchdog.rs b/sdk/patina/src/pi/protocols/watchdog.rs index f42c8e126..c32cdd4e8 100644 --- a/sdk/patina/src/pi/protocols/watchdog.rs +++ b/sdk/patina/src/pi/protocols/watchdog.rs @@ -20,8 +20,7 @@ use r_efi::efi; /// /// # Documentation /// UEFI Platform Initialization Specification, Release 1.8, Section II-12.14.1 -pub const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0x665E3FF5, 0x46CC, 0x11d4, 0x9A, 0x38, &[0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D]); +pub const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("665E3FF5-46CC-11D4-9A38-0090273FC14D"); /// Function type definition for watchdog timer notify. pub type WatchdogTimerNotify = extern "efiapi" fn(u64); diff --git a/sdk/patina/src/pi/serializable.rs b/sdk/patina/src/pi/serializable.rs index 5570c8ea4..10550055c 100644 --- a/sdk/patina/src/pi/serializable.rs +++ b/sdk/patina/src/pi/serializable.rs @@ -27,7 +27,7 @@ use alloc::{format, string::String, vec, vec::Vec}; /// Format a GUID as a string in the standard 8-4-4-4-12 format. /// This custom implementation is necessary because `r_efi::Guid` has private fields and cannot derive `Serialize` directly. /// -pub fn format_guid(guid: Guid) -> String { +pub fn format_guid(guid: &Guid) -> String { let (time_low, time_mid, time_hi_and_version, clk_seq_hi_res, clk_seq_low, node) = guid.as_fields(); format!( "{:08x}-{:04x}-{:04x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", diff --git a/sdk/patina/src/pi/serializable/serializable_fv.rs b/sdk/patina/src/pi/serializable/serializable_fv.rs index 77341c590..26aee91d8 100644 --- a/sdk/patina/src/pi/serializable/serializable_fv.rs +++ b/sdk/patina/src/pi/serializable/serializable_fv.rs @@ -22,7 +22,6 @@ use alloc::{ string::{String, ToString}, vec::Vec, }; -use r_efi::efi; use serde::{Deserialize, Serialize}; // This is the serialized version of the FV list. @@ -74,7 +73,7 @@ pub struct PeHeaderInfo { impl From> for FirmwareVolumeSerDe { fn from(fv: FirmwareVolume) -> Self { // Get the FV name, length, base address, and attributes - let fv_name = format_guid(fv.fv_name().unwrap_or(efi::Guid::from_bytes(&[0; 16]))); + let fv_name = format_guid(&fv.fv_name().unwrap_or(crate::guids::ZERO)); let fv_length = fv.size() as usize; let fv_attributes = fv.attributes(); let files = fv @@ -84,7 +83,7 @@ impl From> for FirmwareVolumeSerDe { let Ok(file) = file else { return None; }; - let file_name = format_guid(file.name()); + let file_name = format_guid(&file.name()); let file_length = file.size() as usize; let file_attributes = file.attributes_raw() as u32; let file_type = @@ -114,7 +113,7 @@ impl From> for FirmwareVolumeSerDe { LZMA_F86_SECTION => "LZMA F86 Compressed".to_string(), LZMA_PARALLEL_SECTION => "LZMA Parallel Compressed".to_string(), TIANO_DECOMPRESS_SECTION => "Tiano Compressed".to_string(), - _ => format_guid(guid.section_definition_guid), + _ => format_guid(&guid.section_definition_guid), }, _ => "uncompressed".to_string(), }; diff --git a/sdk/patina/src/pi/serializable/serializable_hob.rs b/sdk/patina/src/pi/serializable/serializable_hob.rs index 5a6064c42..18170953a 100644 --- a/sdk/patina/src/pi/serializable/serializable_hob.rs +++ b/sdk/patina/src/pi/serializable/serializable_hob.rs @@ -165,14 +165,14 @@ impl From<&Hob<'_>> for HobSerDe { }, Hob::MemoryAllocation(mem_alloc) => Self::MemoryAllocation { alloc_descriptor: MemAllocDescriptorSerDe { - name: format_guid(mem_alloc.alloc_descriptor.name), + name: format_guid(&mem_alloc.alloc_descriptor.name), memory_base_address: mem_alloc.alloc_descriptor.memory_base_address, memory_length: mem_alloc.alloc_descriptor.memory_length, memory_type: mem_alloc.alloc_descriptor.memory_type, }, }, Hob::ResourceDescriptor(resource_desc) => Self::ResourceDescriptor(ResourceDescriptorSerDe { - owner: format_guid(resource_desc.owner), + owner: format_guid(&resource_desc.owner), resource_type: resource_desc.resource_type, resource_attribute: resource_desc.resource_attribute, physical_start: resource_desc.physical_start, @@ -180,7 +180,7 @@ impl From<&Hob<'_>> for HobSerDe { }), Hob::ResourceDescriptorV2(resource_desc2) => Self::ResourceDescriptorV2 { v1: ResourceDescriptorSerDe { - owner: format_guid(resource_desc2.v1.owner), + owner: format_guid(&resource_desc2.v1.owner), resource_type: resource_desc2.v1.resource_type, resource_attribute: resource_desc2.v1.resource_attribute, physical_start: resource_desc2.v1.physical_start, @@ -188,7 +188,7 @@ impl From<&Hob<'_>> for HobSerDe { }, attributes: resource_desc2.attributes, }, - Hob::GuidHob(guid_ext, _) => Self::GuidExtension { name: format_guid(guid_ext.name) }, + Hob::GuidHob(guid_ext, _) => Self::GuidExtension { name: format_guid(&guid_ext.name) }, Hob::FirmwareVolume(fv) => Self::FirmwareVolume { base_address: fv.base_address, length: fv.length }, Hob::Cpu(cpu) => { Self::Cpu { size_of_memory_space: cpu.size_of_memory_space, size_of_io_space: cpu.size_of_io_space } @@ -363,7 +363,7 @@ mod tests { reserved: 0, }; let alloc_descriptor = hob::header::MemoryAllocation { - name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), memory_base_address: 0, memory_length: 0x0123456789abcdef, memory_type: 0, @@ -378,7 +378,7 @@ mod tests { }; let resource_desc_hob = hob::ResourceDescriptor { header, - owner: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + owner: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), resource_type: hob::EFI_RESOURCE_SYSTEM_MEMORY, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT, physical_start: 0, @@ -387,7 +387,7 @@ mod tests { let mut v1 = hob::ResourceDescriptor { header, - owner: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + owner: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), resource_type: hob::EFI_RESOURCE_SYSTEM_MEMORY, resource_attribute: hob::EFI_RESOURCE_ATTRIBUTE_PRESENT, physical_start: 0, @@ -405,7 +405,7 @@ mod tests { length: (size_of::() + data.len()) as u16, reserved: 0, }, - name: r_efi::efi::Guid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), + name: crate::BinaryGuid::from_fields(1, 2, 3, 4, 5, &[6, 7, 8, 9, 10, 11]), }, data, ); diff --git a/sdk/patina/src/pi/spec_version.rs b/sdk/patina/src/pi/spec_version.rs new file mode 100644 index 000000000..68d219221 --- /dev/null +++ b/sdk/patina/src/pi/spec_version.rs @@ -0,0 +1,16 @@ +//! Platform Initialization Specification Version +//! +//! This module contains definitions related to the version of the Platform Initialization (PI) +//! Specification that this implementation targets. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +/// The major revision number of the PI Specification that this implementation targets. +pub const PI_SPECIFICATION_MAJOR_REVISION: u32 = 1; +/// The minor revision number of the PI Specification that this implementation targets. +pub const PI_SPECIFICATION_MINOR_REVISION: u32 = 80; diff --git a/sdk/patina/src/runtime_services.rs b/sdk/patina/src/runtime_services.rs index 0c98ee7dc..15ca52b1d 100644 --- a/sdk/patina/src/runtime_services.rs +++ b/sdk/patina/src/runtime_services.rs @@ -10,8 +10,6 @@ //! ``` //! -extern crate alloc; - /// Variable-services-specific structs and utilities pub mod variable_services; @@ -468,6 +466,14 @@ impl RuntimeServices for StandardRuntimeServices { } } +/// Clone implementation for MockRuntimeServices that creates a new mock with default expectations. +#[cfg(any(test, feature = "mockall"))] +impl Clone for MockRuntimeServices { + fn clone(&self) -> Self { + Self::new() + } +} + #[cfg(test)] #[coverage(off)] pub(crate) mod test { diff --git a/sdk/patina/src/serial/uart.rs b/sdk/patina/src/serial/uart.rs index f8e1dfa75..a661ee7d5 100644 --- a/sdk/patina/src/serial/uart.rs +++ b/sdk/patina/src/serial/uart.rs @@ -28,7 +28,6 @@ impl super::SerialIO for UartNull { cfg_if::cfg_if! { if #[cfg(all(target_arch = "x86_64", any(target_os = "uefi", feature = "doc")))] { - extern crate alloc; use uart_16550::MmioSerialPort; use uart_16550::SerialPort as IoSerialPort; diff --git a/sdk/patina/src/test.rs b/sdk/patina/src/test.rs deleted file mode 100644 index 5bfe578e9..000000000 --- a/sdk/patina/src/test.rs +++ /dev/null @@ -1,860 +0,0 @@ -//! A Patina testing framework for on-platform unit testing -//! -//! This module provides a macro ([patina_test]) to register dependency injectable functions as on-platform unit tests -//! that can be discovered and executed by the [TestRunner] component. -//! -//! ## Writing Tests -//! -//! The patina test framework emulates the Rust provided testing framework as much as possible, so writing tests -//! should feel very similar to writing normal Rust unit tests with some additional configuration attributes available. -//! -//! 1. A developer should use `#[patina_test]` to mark a function as a test case, rather than `#[test]`. The function -//! must return a [Result] type, rather than panicking on failure, which differs from the standard Rust testing -//! framework. -//! 2. To assist with (1), this crate provides `assert` equivalent macros that return an error on failure rather than -//! panicking (See [crate::u_assert], [crate::u_assert_eq], [crate::u_assert_ne]). -//! 3. Tests can be configured with the same attributes as the standard Rust provided testing framework, such as -//! `#[should_fail]`, `#[should_fail = ""]`, and `#[skip]`. -//! 4. By default, tests are configured to run once during the boot process, but a macro attribute is provided to -//! change when/how often a test is triggered. See the [patina_test] macro documentation for more details. -//! 5. Test dependencies can be injected as function parameters, and the test framework will resolve them from the -//! component storage system. The test will not run if the dependency cannot be resolved. -//! -//! ## Running Tests -//! -//! Tests marked with `#[patina_test]` are not automatically executed by a platform. Instead, the platform must opt-in -//! to running tests by registering one or more [TestRunner] components with the Core. Each [TestRunner] component will -//! discover all test cases that match it's configuration and schedule them according to the component's configurations -//! and the test case's triggers. An overlap in test cases discovered by multiple [TestRunner] components is allowed, -//! but the test case will only be scheduled to run once based on it's triggers. The Test failure callbacks will be -//! called for each [TestRunner] that discovers the test case. `debug_mode=true` takes priority, so if any [TestRunner] -//! that discovers a test case has `debug_mode=true`, then debug messages will be enabled for that test case regardless -//! of the other [TestRunner]'s debug_mode configuration for that test case. -//! -//! ## Feature Flags -//! -//! - `patina-tests`: Will opt-in to compile any tests. -//! -//! ## Example -//! -//! ```rust -//! use patina::test::*; -//! use patina::boot_services::StandardBootServices; -//! use patina::test::patina_test; -//! use patina::{u_assert, u_assert_eq}; -//! use patina::guids::CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP; -//! -//! // Registered with the Core. -//! let test_config = patina::test::TestRunner::default() -//! .with_filter("aarch64") // Only run tests with "aarch64" in their name & path (my_crate::aarch64::test) -//! .debug_mode(true); // Allow any log messages from the test to be printed -//! -//! #[cfg_attr(target_arch = "aarch64", patina_test)] -//! fn test_case() -> Result { -//! u_assert_eq!(1, 1); -//! Ok(()) -//! } -//! -//! #[patina_test] -//! fn test_case2() -> Result { -//! u_assert_eq!(1, 1); -//! Ok(()) -//! } -//! -//! #[patina_test] -//! #[should_fail] -//! fn failing_test_case() -> Result { -//! u_assert_eq!(1, 2); -//! Ok(()) -//! } -//! -//! #[patina_test] -//! #[should_fail = "This test failed"] -//! fn failing_test_case_with_msg() -> Result { -//! u_assert_eq!(1, 2, "This test failed"); -//! Ok(()) -//! } -//! -//! #[patina_test] -//! #[skip] -//! fn skipped_test_case() -> Result { -//! todo!() -//! } -//! -//! #[patina_test] -//! #[cfg_attr(not(target_arch = "x86_64"), skip)] -//! fn x86_64_only_test_case(bs: StandardBootServices) -> Result { -//! todo!() -//! } -//! -//! #[patina_test] -//! #[on(event = CACHE_ATTRIBUTE_CHANGE_EVENT_GROUP)] -//! fn on_event_test_case() -> Result { -//! Ok(()) -//! } -//! ``` -//! -//! ## License -//! -//! Copyright (c) Microsoft Corporation. -//! -//! SPDX-License-Identifier: Apache-2.0 -//! -extern crate alloc; - -use core::{fmt::Display, ops::DerefMut, ptr::NonNull}; - -use alloc::{boxed::Box, collections::BTreeMap, vec::Vec}; -use patina_macro::{IntoService, component}; -use r_efi::efi::EVENT_GROUP_READY_TO_BOOT; - -use crate as patina; -use crate::{ - boot_services::{ - BootServices, StandardBootServices, - event::{EventTimerType, EventType}, - tpl::Tpl, - }, - component::Storage, - test::__private_api::{TestCase, TestTrigger}, -}; - -#[doc(hidden)] -pub use linkme; -// WARNING: this is not a part of the crate's public API and is subject to change at any time. -#[doc(hidden)] -pub mod __private_api; - -/// The result type for a test case, an alias for `Result<(), &'static str>`. -pub type Result = core::result::Result<(), &'static str>; - -pub use patina_macro::patina_test; - -/// A macro similar to [`core::assert!`] that returns an error message instead of panicking. -#[macro_export] -macro_rules! u_assert { - ($cond:expr, $msg:expr) => { - if !$cond { - return Err($msg); - } - }; - ($cond:expr) => { - u_assert!($cond, "Assertion failed"); - }; -} - -/// A macro similar to [`core::assert_eq!`] that returns an error message instead of panicking. -#[macro_export] -macro_rules! u_assert_eq { - ($left:expr, $right:expr, $msg:expr) => { - if $left != $right { - return Err($msg); - } - }; - ($left:expr, $right:expr) => { - u_assert_eq!($left, $right, concat!("assertion failed: `", stringify!($left), " == ", stringify!($right), "`")); - }; -} - -/// A macro similar to [`core::assert_ne!`] that returns an error message instead of panicking. -#[macro_export] -macro_rules! u_assert_ne { - ($left:expr, $right:expr, $msg:expr) => { - if $left == $right { - return Err($msg); - } - }; - ($left:expr, $right:expr) => { - u_assert_ne!($left, $right, concat!("assertion failed: `", stringify!($left), " != ", stringify!($right), "`")); - }; -} - -/// A private service to record test results. -#[derive(IntoService, Default)] -#[service(Recorder)] -struct Recorder { - records: spin::Mutex>, -} - -impl Recorder { - /// Allows updates to the test records via a closure to ensure interior mutability safety. - fn with_mut(&self, f: F) -> R - where - F: FnOnce(&mut BTreeMap<&'static str, TestRecord>) -> R, - { - let mut records = self.records.lock(); - f(records.deref_mut()) - } - - /// Registers UEFI event callbacks to log the test results at specific points in the boot process. - fn initialize(&self, storage: &mut Storage) -> patina::error::Result<()> { - // Log results at ready to boot - storage.boot_services().create_event_ex( - EventType::NOTIFY_SIGNAL, - Tpl::CALLBACK, - Some(Self::run_tests_and_report), - NonNull::from_ref(storage), - &EVENT_GROUP_READY_TO_BOOT, - )?; - - // log results at exit boot services - storage.boot_services().create_event( - EventType::SIGNAL_EXIT_BOOT_SERVICES, - Tpl::CALLBACK, - Some(Self::run_tests_and_report), - NonNull::from_ref(storage), - )?; - - Ok(()) - } - - /// Returns true if a test with the given name is already registered, false otherwise. - fn test_registered(&self, test_name: &str) -> bool { - self.with_mut(|data| data.contains_key(test_name)) - } - - // Updates an existing record or inserts a new record if it does not exist. - fn update_record(&self, record: TestRecord) { - let name = record.test_case.name; - - self.with_mut(|data| { - if let Some(existing_record) = data.get_mut(name) { - existing_record.merge(&record); - } else { - data.insert(name, record); - } - }); - } - - /// Runs all tests that are triggered by the [TestTrigger::Manual] trigger if they have not been run before. - fn run_manual_tests(&self, storage: &mut Storage) { - self.with_mut(|data| { - data.values_mut() - .filter(|record| { - record.test_case.triggers.contains(&TestTrigger::Manual) && record.pass == 0 && record.fail == 0 - }) - .for_each(|record| record.run(storage)); - }); - } - - /// An EFIAPI compatible event callback to run the manually triggered tests and log the current results of patina-test - extern "efiapi" fn run_tests_and_report(event: r_efi::efi::Event, mut storage: NonNull) { - // SAFETY: event callbacks are executed in series, so there exists no other mutable access to storage. - let storage = unsafe { storage.as_mut() }; - - if let Some(recorder) = storage.get_service::() { - recorder.run_manual_tests(storage); - - log::info!("{}", *recorder); - } - - let _ = storage.boot_services().close_event(event); - } -} - -impl Display for Recorder { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - self.with_mut(|records| { - let mut total_passes = 0; - let mut total_fails = 0; - writeln!(f, "Patina on-system unit-test results:")?; - for (name, record) in records.iter() { - total_passes += record.pass; - total_fails += record.fail; - if record.fail == 0 && record.pass == 0 { - writeln!(f, " {name} ... not triggered")?; - continue; - } - if record.fail == 0 { - writeln!(f, " {name} ... ok ({} passes)", record.pass)?; - } else { - writeln!( - f, - " {name} ... fail ({} fails, {} passes): {}", - record.fail, - record.pass, - record.err_msg.unwrap_or("") - )?; - } - } - writeln!(f, "Patina on-system unit-test result totals: {total_passes} passes, {total_fails} fails")?; - - Ok(()) - }) - } -} - -/// A structure containing all necessary data to execute a test at any time. -#[derive(Clone)] -struct TestRecord { - /// Whether or not to log debug messages in the test or not - debug_mode: bool, - /// The test case to execute. - test_case: &'static TestCase, - /// Callback functions to be called on test failure. - callback: Vec, - /// The number of times this test has executed and passed. - pass: u32, - /// The number of times this test has executed and failed. - fail: u32, - /// The error message from the most recent failure, if any. - err_msg: Option<&'static str>, -} - -impl TestRecord { - /// Creates a new instance of TestRecord. - fn new(debug_mode: bool, test_case: &'static TestCase, callback: Option) -> Self { - let callback = callback.into_iter().collect(); - Self { debug_mode, test_case, callback, pass: 0, fail: 0, err_msg: None } - } - - /// Merges another test record into this one, combining their results and callbacks. - fn merge(&mut self, other: &Self) { - assert_eq!(self.test_case.name, other.test_case.name, "Can only merge records for the same test case."); - self.debug_mode |= other.debug_mode; - self.pass += other.pass; - self.fail += other.fail; - self.callback.extend(other.callback.clone()); - if self.err_msg.is_none() && other.err_msg.is_some() { - self.err_msg = other.err_msg; - } - } - - /// Runs the test case case. - /// - /// Calls the test failure callbacks if the test fails. - fn run(&mut self, storage: &mut Storage) { - let result = self.test_case.run(storage, self.debug_mode); - - match result { - Ok(()) => self.pass += 1, - Err(msg) => { - self.fail += 1; - self.err_msg = Some(msg); - self.callback.iter().for_each(|cb| cb(self.test_case.name, msg)); - } - } - } - - /// Schedules the test to be run according to its triggers. - fn schedule_run(&self, storage: &mut Storage) -> patina::error::Result<()> { - let name = self.test_case.name; - - for trigger in self.test_case.triggers { - match trigger { - TestTrigger::Manual => { - // Do nothing. Test must be manually triggered. - } - TestTrigger::Event(guid) => { - storage.boot_services().create_event_ex( - EventType::NOTIFY_SIGNAL, - Tpl::CALLBACK, - Some(Self::run_test), - Box::leak(Box::new((name, NonNull::from_ref(storage)))), - guid, - )?; - } - TestTrigger::Timer(interval) => { - let event = storage.boot_services().create_event( - EventType::NOTIFY_SIGNAL | EventType::TIMER, - Tpl::CALLBACK, - Some(Self::run_test), - // We are setting up this timer to be periodic, so we need to leak it so it is available for - // multiple test runs - Box::leak(Box::new((name, NonNull::from_ref(storage)))), - )?; - - // We need to disable the timer at ReadyToBoot so it does not continue firing while a - // bootloader is running. - let _ = storage.boot_services().create_event_ex( - EventType::NOTIFY_SIGNAL, - Tpl::CALLBACK, - Some(Self::disable_timer), - NonNull::from_ref(Box::leak(Box::new((event, storage.boot_services().clone())))).as_ptr() - as *mut core::ffi::c_void, - &EVENT_GROUP_READY_TO_BOOT, - )?; - - storage.boot_services().set_timer(event, EventTimerType::Periodic, *interval)?; - } - } - } - - Ok(()) - } - - /// EFIAPI event callback to locate a specific test and run it. - extern "efiapi" fn run_test(_: r_efi::efi::Event, &(test, mut storage): &'static (&'static str, NonNull)) { - // SAFETY: Storage is a valid pointer as the pointer is generated from a static reference. - let storage = unsafe { storage.as_mut() }; - - if let Some(recorder) = storage.get_service::() { - let _ = recorder.with_mut(|records| records.get_mut(test).map(|record| record.run(storage))); - } - } - - #[coverage(off)] - /// An EFIAPI compatible event callback to disable a timer event at ReadyToBoot - extern "efiapi" fn disable_timer(rtb_event: r_efi::efi::Event, context: *mut core::ffi::c_void) { - // SAFETY: We set up the context pointer in `run_tests` to point to a valid tuple of (Event, &mut Storage). - let (timer_event, boot_services) = unsafe { &mut *(context as *mut (r_efi::efi::Event, StandardBootServices)) }; - let _ = boot_services.set_timer(*timer_event, EventTimerType::Cancel, 0); - let _ = boot_services.close_event(rtb_event); - } -} - -/// A component that runs all test cases marked with the `#[patina_test]` attribute when loaded by the DXE core. -#[derive(Default, Clone)] -pub struct TestRunner { - filters: Vec<&'static str>, - debug_mode: bool, - fail_callback: Option, -} - -#[component] -impl TestRunner { - /// Adds a filter that will reduce the tests ran to only those that contain the filter value in their test name. - /// - /// The `name` is not just the test name, but also the module path. For example, if a test is defined in - /// `my_crate::tests`, the name would be `my_crate::tests::test_case`. - /// - /// This filter is case-sensitive. It can be called multiple times to add multiple filters. - pub fn with_filter(mut self, filter: &'static str) -> Self { - self.filters.push(filter); - self - } - - /// Any log messages generated by the test case will be logged if this is set to true. - /// - /// Defaults to false. - pub fn debug_mode(mut self, debug_mode: bool) -> Self { - self.debug_mode = debug_mode; - self - } - - /// Attach a callback function that will be called on test failure. - /// - /// fn(test_name: &'static str, fail_msg: &'static str) - pub fn with_callback(mut self, callback: fn(&'static str, &'static str)) -> Self { - self.fail_callback = Some(callback); - self - } - - /// The entry point for the test runner component. - #[coverage(off)] - fn entry_point(self, storage: &mut Storage) -> patina::error::Result<()> { - let test_list: &'static [__private_api::TestCase] = __private_api::test_cases(); - self.register_tests(test_list, storage) - } - - /// Registers the tests to be executed by the test runner. - fn register_tests( - &self, - test_list: &'static [__private_api::TestCase], - storage: &mut Storage, - ) -> patina::error::Result<()> { - let recorder = match storage.get_service::() { - Some(recorder) => recorder, - None => { - let recorder = Recorder::default(); - recorder.initialize(storage)?; - storage.add_service(recorder); - storage.get_service::().expect("Recorder service should be registered.") - } - }; - - let records = test_list - .iter() - .filter(|&test_case| test_case.should_run(&self.filters)) - .map(|test_case| TestRecord::new(self.debug_mode, test_case, self.fail_callback)); - - for record in records { - // Only schedule a run if we have not already scheduled for this test. - if !recorder.test_registered(record.test_case.name) { - record.schedule_run(storage)?; - } - - recorder.update_record(record); - } - - Ok(()) - } -} - -#[cfg(test)] -#[coverage(off)] -mod tests { - use core::mem::MaybeUninit; - - use r_efi::efi::Guid; - - use super::*; - use crate::{ - boot_services::StandardBootServices, - component::{IntoComponent, Storage, params::Config}, - }; - - // A test function where we mock DxeComponentInterface to return what we want for the test. - fn test_function(config: Config) -> Result { - assert!(*config == 1); - Ok(()) - } - - fn test_function_fail() -> Result { - Err("Intentional Failure") - } - - fn test_function_invalid(_: &mut Storage, _: &mut Storage) -> Result { - Ok(()) - } - - #[test] - fn test_func_implements_into_component() { - let _ = super::TestRunner::default().into_component(); - } - - #[test] - fn verify_default_values() { - let config = super::TestRunner::default(); - assert_eq!(config.filters.len(), 0); - assert!(!config.debug_mode); - } - - #[test] - fn verify_config_sets_properly() { - let config = super::TestRunner::default().with_filter("aarch64").with_filter("test").debug_mode(true); - assert_eq!(config.filters.len(), 2); - assert!(config.debug_mode); - } - - // This is mirroring the logic in __private_api.rs to ensure we do properly register test cases. - #[linkme::distributed_slice] - static TEST_TESTS: [super::__private_api::TestCase]; - - #[linkme::distributed_slice(TEST_TESTS)] - static TEST_CASE1: super::__private_api::TestCase = super::__private_api::TestCase { - name: "test", - triggers: &[super::__private_api::TestTrigger::Manual], - skip: false, - should_fail: false, - fail_msg: None, - func: |storage| crate::test::__private_api::FunctionTest::new(test_function).run(storage.into()), - }; - - #[linkme::distributed_slice(TEST_TESTS)] - static TEST_CASE2: super::__private_api::TestCase = super::__private_api::TestCase { - name: "test", - triggers: &[super::__private_api::TestTrigger::Manual], - skip: true, - should_fail: false, - fail_msg: None, - func: |storage| crate::test::__private_api::FunctionTest::new(test_function).run(storage.into()), - }; - - static TEST_CASE3: super::__private_api::TestCase = super::__private_api::TestCase { - name: "test_that_fails", - triggers: &[super::__private_api::TestTrigger::Manual], - skip: false, - should_fail: false, - fail_msg: None, - func: |storage| crate::test::__private_api::FunctionTest::new(test_function_fail).run(storage.into()), - }; - - static TEST_CASE4: super::__private_api::TestCase = super::__private_api::TestCase { - name: "event_triggered_test", - triggers: &[super::__private_api::TestTrigger::Event(&Guid::from_bytes(&[0; 16]))], - skip: false, - should_fail: false, - fail_msg: None, - func: |storage| crate::test::__private_api::FunctionTest::new(test_function_fail).run(storage.into()), - }; - - static TEST_CASE5: super::__private_api::TestCase = super::__private_api::TestCase { - name: "timer_triggered_test", - triggers: &[super::__private_api::TestTrigger::Timer(1_000_000)], - skip: false, - should_fail: false, - fail_msg: None, - func: |storage| crate::test::__private_api::FunctionTest::new(test_function_fail).run(storage.into()), - }; - - static TEST_CASE_INVALID: super::__private_api::TestCase = super::__private_api::TestCase { - name: "invalid_test", - triggers: &[super::__private_api::TestTrigger::Event(&Guid::from_bytes(&[0; 16]))], - skip: false, - should_fail: false, - fail_msg: None, - func: |storage| crate::test::__private_api::FunctionTest::new(test_function_invalid).run(storage.into()), - }; - - #[test] - #[ignore = "Skipping test until the service for UEFI services is out, so we can mock it."] - fn test_we_can_initialize_the_component() { - let mut storage = Storage::new(); - - let mut component = super::TestRunner::default().into_component(); - component.initialize(&mut storage); - } - - #[test] - #[ignore = "Skipping test until the service for UEFI services is out, so we can mock it."] - fn test_we_can_collect_and_execute_tests() { - assert_eq!(TEST_TESTS.len(), 2); - let mut storage = Storage::new(); - storage.add_config(1_i32); - - let component = super::TestRunner::default(); - let result = component.register_tests(&TEST_TESTS, &mut storage); - assert!(result.is_ok()); - } - - #[test] - #[ignore = "Skipping test until the service for UEFI services is out, so we can mock it."] - fn test_handle_different_test_counts() { - let mut storage = Storage::new(); - storage.add_config(1_i32); - - let test_cases: &'static [TestCase] = Box::leak(Box::new([])); - let component = super::TestRunner::default(); - let result = component.register_tests(test_cases, &mut storage); - assert!(result.is_ok()); - - let test_cases: &'static [TestCase] = Box::leak(Box::new([TEST_CASE1])); - let result = component.register_tests(test_cases, &mut storage); - assert!(result.is_ok()); - - let test_cases: &'static [TestCase] = Box::leak(Box::new([TEST_CASE1, TEST_CASE2])); - let result = component.register_tests(test_cases, &mut storage); - assert!(result.is_ok()); - - let test_cases: &'static [TestCase] = Box::leak(Box::new([TEST_CASE1, TEST_CASE2, TEST_CASE3])); - let result = component.register_tests(test_cases, &mut storage); - assert!(result.is_ok()); - } - - #[test] - fn test_recorder_records_results() { - let recorder = Recorder::default(); - - let mut tr1 = TestRecord::new(false, &TEST_CASE2, None); - tr1.pass = 2; - tr1.fail = 1; - tr1.err_msg = Some("Failure 1"); - recorder.update_record(tr1); - - let mut tr2 = TestRecord::new(false, &TEST_CASE3, None); - tr2.pass = 0; - tr2.fail = 2; - tr2.err_msg = Some("Failure 2"); - recorder.update_record(tr2); - - let mut tr3 = TestRecord::new(false, &TEST_CASE4, None); - tr3.pass = 1; - recorder.update_record(tr3); - - let output = format!("{}", recorder); - assert!(output.contains("test ... fail (1 fails, 2 passes): Failure 1")); - assert!(output.contains("test_that_fails ... fail (2 fails, 0 passes): Failure 2")); - assert!(output.contains("event_triggered_test ... ok (1 passes)")); - } - - #[test] - fn test_test_data_test_running() { - let mut storage = Storage::new(); - storage.add_config(1_i32); - storage.add_service(Recorder::default()); - - let test_case = &TEST_CASE1; - let mut test_data = TestRecord::new(false, test_case, None); - - test_data.run(&mut storage); - - let recorder = storage.get_service::().expect("Recorder service should be registered."); - recorder.update_record(test_data); - - let output = format!("{}", *recorder); - println!("{}", output); - assert!(output.contains("test ... ok (1 passes)")); - } - - #[test] - #[should_panic(expected = "Callback called")] - fn test_test_failure_callback_handler() { - let test_runner = TestRunner::default().with_callback(|_, _| { - panic!("Callback called"); - }); - - let mut storage = Storage::new(); - storage.add_service(Recorder::default()); - let bs: MaybeUninit = MaybeUninit::uninit(); - - // SAFETY: This is very unsafe, because it is not initialized, however this code path only calls create_event - // and create_event_ex, which we will fill in with no-op functions. - let mut bs = unsafe { bs.assume_init() }; - extern "efiapi" fn noop_create_event( - _type: u32, - _tpl: r_efi::efi::Tpl, - _notify_function: Option, - _notify_context: *mut core::ffi::c_void, - _event: *mut r_efi::efi::Event, - ) -> r_efi::efi::Status { - r_efi::efi::Status::SUCCESS - } - - extern "efiapi" fn noop_create_event_ex( - _type: u32, - _tpl: r_efi::efi::Tpl, - _notify_function: Option, - _notify_context: *const core::ffi::c_void, - _guid: *const r_efi::efi::Guid, - _event: *mut r_efi::efi::Event, - ) -> r_efi::efi::Status { - r_efi::efi::Status::SUCCESS - } - - bs.create_event = noop_create_event; - bs.create_event_ex = noop_create_event_ex; - - storage.set_boot_services(StandardBootServices::new(Box::leak(Box::new(bs)))); - - // TEST_CASE3 is designed to fail. - let _ = test_runner.register_tests(Box::leak(Box::new([TEST_CASE3])), &mut storage); - storage.get_service::().unwrap().run_manual_tests(&mut storage); - } - - #[test] - fn test_filter_should_work() { - let test_runner = TestRunner::default().with_filter("triggered_test"); - - let mut storage = Storage::new(); - let bs: MaybeUninit = MaybeUninit::uninit(); - - // SAFETY: This is very unsafe, because it is not initialized, however this code path only calls create_event - // create_event_ex, and set_timer which we will fill in with no-op functions. - let mut bs = unsafe { bs.assume_init() }; - extern "efiapi" fn noop_create_event( - _type: u32, - _tpl: r_efi::efi::Tpl, - _notify_function: Option, - _notify_context: *mut core::ffi::c_void, - _event: *mut r_efi::efi::Event, - ) -> r_efi::efi::Status { - r_efi::efi::Status::SUCCESS - } - - extern "efiapi" fn noop_create_event_ex( - _type: u32, - _tpl: r_efi::efi::Tpl, - _notify_function: Option, - _notify_context: *const core::ffi::c_void, - _guid: *const r_efi::efi::Guid, - _event: *mut r_efi::efi::Event, - ) -> r_efi::efi::Status { - r_efi::efi::Status::SUCCESS - } - - extern "efiapi" fn noop_set_timer( - _event: r_efi::efi::Event, - _type: r_efi::efi::TimerDelay, - _trigger_time: u64, - ) -> r_efi::efi::Status { - r_efi::efi::Status::SUCCESS - } - - bs.create_event = noop_create_event; - bs.create_event_ex = noop_create_event_ex; - bs.set_timer = noop_set_timer; - - storage.set_boot_services(StandardBootServices::new(Box::leak(Box::new(bs)))); - - // Failure tests - assert!( - test_runner.register_tests(Box::leak(Box::new([TEST_CASE3, TEST_CASE4, TEST_CASE5])), &mut storage).is_ok() - ); - let recorder = storage.get_service::().expect("Recorder service should be registered."); - recorder.run_manual_tests(&mut storage); - - let output = format!("{}", *recorder); - - // This test is filtered out, so it should not even show up in the results. - assert!(!output.contains("test_that_fails")); - // This test is not filtered out, but never run, so should log as such. - println!("{}", output); - assert!(output.contains("event_triggered_test ... not triggered")); - } - - #[test] - fn test_test_with_invalid_param_combination_is_caught() { - assert_eq!( - TEST_CASE_INVALID.run(&mut Storage::new(), false), - Err("Test failed to run due to un-retrievable parameters.") - ); - } - - #[test] - fn test_update_record_with_existing_record() { - let mut record1 = TestRecord::new(false, &TEST_CASE1, Some(|_, _| ())); - record1.pass = 1; - record1.fail = 0; - - let mut record2 = TestRecord::new(true, &TEST_CASE1, Some(|_, _| ())); - record2.pass = 0; - record2.fail = 2; - record2.err_msg = Some("Failure"); - - let recorder = Recorder::default(); - recorder.update_record(record1); - recorder.update_record(record2); - - let record = recorder.with_mut(|data| data.get(&TEST_CASE1.name).cloned().expect("Record should exist.")); - - assert!(record.debug_mode); - assert_eq!(record.pass, 1); - assert_eq!(record.fail, 2); - assert_eq!(record.err_msg, Some("Failure")); - assert!(record.debug_mode); - assert_eq!(record.callback.len(), 2); - } - - #[test] - fn test_efiapi_run_test() { - let mut storage = Storage::new(); - storage.add_config(1_i32); - - let recorder = Recorder::default(); - recorder.update_record(TestRecord::new(false, &TEST_CASE1, None)); - storage.add_service(recorder); - - let context = Box::leak(Box::new(("test", NonNull::from_ref(&storage)))); - TestRecord::run_test(core::ptr::null_mut(), context); - } - - #[test] - fn test_efiapi_run_tests_and_report() { - let bs: MaybeUninit = MaybeUninit::uninit(); - // SAFETY: This is very unsafe, because it is not initialized, however this code path only calls create_event - // create_event_ex, and set_timer which we will fill in with no-op functions. - let mut bs = unsafe { bs.assume_init() }; - - extern "efiapi" fn noop_close_event(_: r_efi::efi::Event) -> r_efi::efi::Status { - r_efi::efi::Status::SUCCESS - } - - bs.close_event = noop_close_event; - - let mut storage = Storage::new(); - storage.set_boot_services(StandardBootServices::new(Box::leak(Box::new(bs)))); - storage.add_config(1_i32); - - let recorder = Recorder::default(); - recorder.update_record(TestRecord::new(false, &TEST_CASE1, None)); - storage.add_service(recorder); - - Recorder::run_tests_and_report(core::ptr::null_mut(), NonNull::from_ref(&storage)); - - // Check that the test run - let recorder = storage.get_service::().expect("Recorder service should be registered."); - let output = format!("{}", *recorder); - assert!(output.contains("test ... ok (1 passes)")); - } -} diff --git a/sdk/patina/src/timer/aarch64.rs b/sdk/patina/src/timer/aarch64.rs new file mode 100644 index 000000000..89c4f7ce5 --- /dev/null +++ b/sdk/patina/src/timer/aarch64.rs @@ -0,0 +1,15 @@ +//! AArch64-specific timer calibration routines. +//! +//! On AArch64 the generic timer (`CNTPCT_EL0` / `CNTFRQ_EL0`) typically +//! reports its frequency directly, so platform-specific calibration is +//! rarely required. +//! +//! This module is reserved for any future AArch64 calibration helpers that +//! may be needed on specific platforms. +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! diff --git a/sdk/patina/src/timer/mod.rs b/sdk/patina/src/timer/mod.rs new file mode 100644 index 000000000..dde53bf69 --- /dev/null +++ b/sdk/patina/src/timer/mod.rs @@ -0,0 +1,135 @@ +//! Arch-specific timer functionality +//! +//! Provides the [`ArchTimerFunctionality`] trait for components that need +//! timing services, plus portable helpers for reading the hardware performance +//! counter and determining its frequency. +//! +//! Architecture-specific calibration routines live in submodules: +//! +//! - [`x86_64`] – ACPI PM Timer-based TSC calibration. +//! - [`aarch64`] – (reserved for future use). +//! +//! ## Overriding the frequency +//! +//! By default, this module attempts to determine the timer frequency via architecture specific methods. +//! (cpuid for x86, `CNTFRQ_EL0` for aarch64) +//! +//! Platforms can override this with a custom performance frequency by providing the Core with the correct frequency: +//! +//! +//! ```rust,ignore +//! let frequency_hz: u64 = 1_000_000_000; // Compute with platform-specific methods. +//! +//! Core::default() +//! .init_timer_frequency(Some(frequency_hz)) +//!``` +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +#[cfg(target_arch = "x86_64")] +pub mod x86_64; + +#[cfg(target_arch = "aarch64")] +pub mod aarch64; + +/// Trait that provides architecture-specific timer functionality. +/// Components that need timing functionality can request this service. +pub trait ArchTimerFunctionality: Send + Sync { + /// Value of the counter (ticks). + fn cpu_count(&self) -> u64; + /// Value in Hz of how often the counter increment. + fn perf_frequency(&self) -> u64; + /// Value that the performance counter starts with. + fn cpu_count_start(&self) -> u64 { + 0 + } + /// Value that the performance counter ends with before it rolls over. + fn cpu_count_end(&self) -> u64 { + u64::MAX + } +} + +/// Returns the current CPU count using architecture-specific methods. +/// +/// Skip coverage as any value could be valid, including 0. +#[coverage(off)] +pub fn arch_cpu_count() -> u64 { + #[cfg(target_arch = "x86_64")] + { + use core::arch::x86_64; + unsafe { x86_64::_rdtsc() } + } + #[cfg(target_arch = "aarch64")] + { + use aarch64_cpu::registers::{self, Readable}; + registers::CNTPCT_EL0.get() + } +} + +/// Returns the performance frequency using architecture-specific methods. +/// In general, the performance frequency is a configurable value that may be +/// provided by the platform. This function is a fallback when no +/// platform-specific configuration is provided. +/// +/// Skip coverage as any value could be valid, including 0. +#[coverage(off)] +pub fn arch_perf_frequency() -> u64 { + // Try to get TSC frequency from CPUID (most Intel and AMD platforms). + #[cfg(target_arch = "x86_64")] + { + use core::arch::{x86_64, x86_64::CpuidResult}; + + let CpuidResult { eax, ebx, ecx, .. } = x86_64::__cpuid(0x15); + if eax != 0 && ebx != 0 && ecx != 0 { + // CPUID 0x15 gives TSC_frequency = (ECX * EAX) / EBX. + // Most modern x86 platforms support this leaf. + return (ecx as u64 * ebx as u64) / eax as u64; + } + + // CPUID 0x16 gives base frequency in MHz in EAX. + // This is supported on some older x86 platforms. + // This is a nominal frequency and is less accurate for reflecting actual operating conditions. + let CpuidResult { eax, .. } = x86_64::__cpuid(0x16); + if eax != 0 { + return (eax * 1_000_000) as u64; + } + + 0 + } + + // Use CNTFRQ_EL0 for aarch64 platforms. + #[cfg(target_arch = "aarch64")] + { + use patina::read_sysreg; + read_sysreg!(CNTFRQ_EL0) + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + 0 +} + +#[cfg(test)] +#[coverage(off)] +mod tests { + use super::*; + + #[test] + fn test_arch_cpu_count_is_monotonic() { + let a = arch_cpu_count(); + let b = arch_cpu_count(); + // TSC / system counter should be non-decreasing. + assert!(b >= a); + } + + #[test] + fn test_arch_perf_frequency_is_non_negative() { + // On a host that supports CPUID 0x15/0x16 this should be > 0. + // On CI VMs it may be 0 — that's acceptable. + let _f = arch_perf_frequency(); + } +} diff --git a/sdk/patina/src/timer/x86_64.rs b/sdk/patina/src/timer/x86_64.rs new file mode 100644 index 000000000..61063ac3f --- /dev/null +++ b/sdk/patina/src/timer/x86_64.rs @@ -0,0 +1,106 @@ +//! x86_64-specific timer calibration routines. +//! +//! Provides [`calibrate_tsc_from_pm_timer`] for platforms (e.g. QEMU Q35) +//! where CPUID leaves 0x15/0x16 do not report the TSC frequency. +//! +//! ## References +//! +//! - [ACPI PM Timer](https://uefi.org/specs/ACPI/6.5/04_ACPI_Hardware_Specification.html) +//! - [FADT Table Definition](https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/05_ACPI_Software_Programming_Model/ACPI_Software_Programming_Model.html#fixed-acpi-description-table-fadt) +//! +//! ## License +//! +//! Copyright (c) Microsoft Corporation. +//! +//! SPDX-License-Identifier: Apache-2.0 +//! + +/// Standard ACPI PM Timer frequency: 3.579545 MHz. +const ACPI_PM_TIMER_FREQUENCY: u64 = 3_579_545; + +/// Calibrates the TSC frequency by measuring TSC ticks over a known interval +/// of the ACPI Power Management Timer. +/// +/// This is the standard approach for platforms (e.g. QEMU Q35) where CPUID +/// leaves 0x15/0x16 do not report the TSC frequency. +/// +/// The function measures approximately 50 ms of PM Timer ticks and derives +/// the TSC frequency from the ratio of TSC delta to elapsed wall-clock time. +/// +/// # Safety +/// +/// The caller must ensure that `pm_timer_port` is a valid I/O port for the +/// ACPI PM Timer (e.g. `0x608` on Q35). Reading from an invalid port is +/// undefined behavior. +pub unsafe fn calibrate_tsc_from_pm_timer(pm_timer_port: u16) -> u64 { + use core::arch::x86_64 as arch; + + const MAX_WAIT_CYCLES: usize = 1_000_000; + + // Wait for a PM timer edge to avoid partial intervals. + let mut start_pm = unsafe { read_pm_timer(pm_timer_port) }; + let mut cycles_left = MAX_WAIT_CYCLES; + loop { + let next = unsafe { read_pm_timer(pm_timer_port) }; + if next != start_pm { + start_pm = next; + break; + } + cycles_left -= 1; + if cycles_left == 0 { + log::warn!("PM timer calibration: timeout waiting for edge"); + break; + } + } + + // Record starting TSC. + // SAFETY: `_rdtsc` reads the timestamp counter — no memory safety implications. + let start_tsc = unsafe { arch::_rdtsc() }; + + // Hz / 20 ≈ 50 ms worth of PM timer ticks. + const TARGET_INTERVAL_DIVISOR: u64 = 20; + let target_ticks = (ACPI_PM_TIMER_FREQUENCY / TARGET_INTERVAL_DIVISOR) as u32; + + let mut end_pm; + cycles_left = MAX_WAIT_CYCLES; + loop { + end_pm = unsafe { read_pm_timer(pm_timer_port) }; + if end_pm.wrapping_sub(start_pm) >= target_ticks { + break; + } + cycles_left -= 1; + if cycles_left == 0 { + log::warn!("PM timer calibration: timeout waiting for target ticks"); + return ACPI_PM_TIMER_FREQUENCY; // best-effort fallback + } + } + + // Record ending TSC. + let end_tsc = unsafe { arch::_rdtsc() }; + + // Compute: frequency = delta_tsc / delta_time + // where delta_time = delta_pm / ACPI_PM_TIMER_FREQUENCY (in seconds). + let delta_pm = end_pm.wrapping_sub(start_pm) as u64; + let delta_time_ns = (delta_pm * 1_000_000_000) / ACPI_PM_TIMER_FREQUENCY; + let delta_tsc = end_tsc - start_tsc; + + (delta_tsc * 1_000_000_000) / delta_time_ns +} + +/// Reads the 32-bit ACPI PM Timer value from `port`. +/// +/// # Safety +/// +/// `port` must be a valid I/O port address for the PM Timer. +unsafe fn read_pm_timer(port: u16) -> u32 { + let value: u32; + unsafe { + core::arch::asm!( + "in eax, dx", + in("dx") port, + out("eax") value, + options(nomem, nostack, preserves_flags), + ); + } + value +} diff --git a/sdk/patina/src/tpl_mutex.rs b/sdk/patina/src/tpl_mutex.rs index 755f984ac..877697035 100644 --- a/sdk/patina/src/tpl_mutex.rs +++ b/sdk/patina/src/tpl_mutex.rs @@ -6,8 +6,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! -extern crate alloc; - use core::{ cell::{OnceCell, UnsafeCell}, fmt::{self, Debug, Display}, diff --git a/sdk/patina/src/uefi_protocol.rs b/sdk/patina/src/uefi_protocol.rs index 4157d8f70..702e56dcc 100644 --- a/sdk/patina/src/uefi_protocol.rs +++ b/sdk/patina/src/uefi_protocol.rs @@ -8,13 +8,10 @@ //! pub mod decompress; -#[cfg(any(test, feature = "alloc"))] pub mod performance_measurement; pub mod status_code; -extern crate alloc; - -use r_efi::efi; +use crate::BinaryGuid; /// Define a binding between an Interface and the corresponding Guid /// @@ -23,7 +20,7 @@ use r_efi::efi; /// Make sure that the Protocol Guid interface had the same layout that the implementer of this struct. pub unsafe trait ProtocolInterface { /// The GUID of the UEFI protocol being implemented. - const PROTOCOL_GUID: efi::Guid; + const PROTOCOL_GUID: BinaryGuid; } macro_rules! impl_r_efi_protocol { @@ -33,7 +30,7 @@ macro_rules! impl_r_efi_protocol { // protocol definitions and GUIDs from the UEFI specification. The Protocol struct layout matches // the UEFI protocol interface requirements. unsafe impl ProtocolInterface for r_efi::efi::protocols::$protocol::Protocol { - const PROTOCOL_GUID: r_efi::efi::Guid = r_efi::efi::protocols::$protocol::PROTOCOL_GUID; + const PROTOCOL_GUID: BinaryGuid = BinaryGuid(r_efi::efi::protocols::$protocol::PROTOCOL_GUID); } }; } diff --git a/sdk/patina/src/uefi_protocol/decompress.rs b/sdk/patina/src/uefi_protocol/decompress.rs index 218875dc9..8bb1eaf44 100644 --- a/sdk/patina/src/uefi_protocol/decompress.rs +++ b/sdk/patina/src/uefi_protocol/decompress.rs @@ -117,6 +117,5 @@ impl Default for EfiDecompressProtocol { // The PROTOCOL_GUID matches the UEFI specification for the Decompress protocol. // The protocol structure layout matches the UEFI protocol requirements. unsafe impl ProtocolInterface for EfiDecompressProtocol { - const PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xd8117cfe, 0x94A6, 0x11D4, 0x9A, 0x3A, &[0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D]); + const PROTOCOL_GUID: crate::BinaryGuid = crate::BinaryGuid::from_string("D8117CFE-94A6-11D4-9A3A-0090273FC14D"); } diff --git a/sdk/patina/src/uefi_protocol/performance_measurement.rs b/sdk/patina/src/uefi_protocol/performance_measurement.rs index 552d04ec0..8e87c46b7 100644 --- a/sdk/patina/src/uefi_protocol/performance_measurement.rs +++ b/sdk/patina/src/uefi_protocol/performance_measurement.rs @@ -20,12 +20,12 @@ use r_efi::efi; use crate::{performance::measurement::CallerIdentifier, uefi_protocol::ProtocolInterface}; /// GUID for the EDKII Performance Measurement Protocol. -pub const EDKII_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xc85d06be, 0x5f75, 0x48ce, 0xa8, 0x0f, &[0x12, 0x36, 0xba, 0x3b, 0x87, 0xb1]); +pub const EDKII_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("C85D06BE-5F75-48CE-A80F-1236BA3B87B1"); /// GUID for the EDKII SMM Performance Measurement Protocol. -pub const EDKII_SMM_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID: efi::Guid = - efi::Guid::from_fields(0xd56b6d73, 0x1a7b, 0x4015, 0x9b, 0xb4, &[0x7b, 0x07, 0x17, 0x29, 0xed, 0x24]); +pub const EDKII_SMM_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID: crate::BinaryGuid = + crate::BinaryGuid::from_string("D56B6D73-1A7B-4015-9BB4-7B071729ED24"); /// The attribute of the measurement. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] @@ -71,5 +71,5 @@ pub struct EdkiiPerformanceMeasurement { // The PROTOCOL_GUID matches the EDK II defined value. The protocol structure layout matches the protocol // interface requirements. unsafe impl ProtocolInterface for EdkiiPerformanceMeasurement { - const PROTOCOL_GUID: efi::Guid = EDKII_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID; + const PROTOCOL_GUID: crate::BinaryGuid = EDKII_PERFORMANCE_MEASUREMENT_PROTOCOL_GUID; } diff --git a/sdk/patina/src/uefi_protocol/status_code.rs b/sdk/patina/src/uefi_protocol/status_code.rs index 2c3171e27..f79038473 100644 --- a/sdk/patina/src/uefi_protocol/status_code.rs +++ b/sdk/patina/src/uefi_protocol/status_code.rs @@ -10,9 +10,6 @@ //! //! SPDX-License-Identifier: Apache-2.0 //! - -extern crate alloc; - use core::{mem, ptr, slice}; use r_efi::efi; @@ -35,7 +32,7 @@ pub struct StatusCodeRuntimeProtocol { // The PROTOCOL_GUID matches the PI specification. The repr(transparent) ensures that the // structure layout matches the underlying r_efi protocol definition. unsafe impl ProtocolInterface for StatusCodeRuntimeProtocol { - const PROTOCOL_GUID: efi::Guid = status_code::PROTOCOL_GUID; + const PROTOCOL_GUID: crate::BinaryGuid = status_code::PROTOCOL_GUID; } impl StatusCodeRuntimeProtocol { diff --git a/sdk/patina_ffs/src/file.rs b/sdk/patina_ffs/src/file.rs index c850bac6d..3f7982de2 100644 --- a/sdk/patina_ffs/src/file.rs +++ b/sdk/patina_ffs/src/file.rs @@ -25,7 +25,6 @@ use crate::{ use alloc::vec::Vec; use core::{fmt, iter, mem, ptr, slice::from_raw_parts}; -use r_efi::efi; /// Zero-copy view over a Firmware File System (FFS) file backed by a byte slice. /// @@ -60,7 +59,7 @@ impl<'a> FileRef<'a> { /// use patina_ffs::section::{Section, SectionHeader}; /// /// // Build a file and then parse the resulting bytes back. - /// let guid = efi::Guid::from_bytes(&[0u8; 16]); + /// let guid = patina::guids::ZERO; /// let mut file = File::new(guid, 0x07); /// let data = b"hello".to_vec(); /// let section = Section::new_from_header_with_data( @@ -155,7 +154,7 @@ impl<'a> FileRef<'a> { } /// The file name GUID from the FFS header. - pub fn name(&self) -> efi::Guid { + pub fn name(&self) -> patina::BinaryGuid { self.header.name } @@ -246,7 +245,7 @@ impl<'a> FileRef<'a> { /// } /// /// // Build a simple file containing a single RAW section and parse it back. - /// let guid = efi::Guid::from_bytes(&[0u8; 16]); + /// let guid = patina::guids::ZERO; /// let mut file = File::new(guid, 0x07); /// let data = b"hello".to_vec(); /// let section = Section::new_from_header_with_data( @@ -294,7 +293,7 @@ impl fmt::Debug for FileRef<'_> { /// (e.g., data checksum, large-file), select erase polarity, and then /// [`serialize`](Self::serialize) into a well-formed byte stream. pub struct File { - name: efi::Guid, + name: patina::BinaryGuid, file_type_raw: u8, attributes: u8, erase_polarity: bool, @@ -303,7 +302,7 @@ pub struct File { impl File { /// Create a new, empty FFS file builder with the given name and type. - pub fn new(name: efi::Guid, file_type_raw: u8) -> Self { + pub fn new(name: patina::BinaryGuid, file_type_raw: u8) -> Self { Self { name, file_type_raw, attributes: 0, erase_polarity: true, sections: Vec::new() } } @@ -321,7 +320,7 @@ impl File { /// use patina_ffs::file::File; /// use patina_ffs::section::{Section, SectionHeader}; /// - /// let guid = efi::Guid::from_bytes(&[0u8; 16]); + /// let guid = patina::guids::ZERO; /// let mut file = File::new(guid, 0x07); /// file.set_data_checksum(true); /// @@ -532,7 +531,7 @@ impl File { /// } /// } /// - /// let guid = efi::Guid::from_bytes(&[0u8; 16]); + /// let guid = patina::guids::ZERO; /// let mut file = File::new(guid, 0x07); /// let data = b"hello".to_vec(); /// file.sections_mut().push(Section::new_from_header_with_data( @@ -568,7 +567,7 @@ impl File { /// } /// } /// - /// let guid = efi::Guid::from_bytes(&[0u8; 16]); + /// let guid = patina::guids::ZERO; /// let mut file = File::new(guid, 0x07); /// let data = b"hello".to_vec(); /// file.sections_mut().push(Section::new_from_header_with_data( @@ -614,7 +613,7 @@ impl File { } /// The file name GUID set for this file. - pub fn name(&self) -> efi::Guid { + pub fn name(&self) -> patina::BinaryGuid { self.name } diff --git a/sdk/patina_ffs/src/volume.rs b/sdk/patina_ffs/src/volume.rs index 15c876d9e..b21bd85dd 100644 --- a/sdk/patina_ffs/src/volume.rs +++ b/sdk/patina_ffs/src/volume.rs @@ -18,8 +18,7 @@ use core::{ fmt, iter, mem, ptr, slice::{self, from_raw_parts}, }; -use patina::{base::align_up, log_debug_assert}; -use r_efi::efi; +use patina::{BinaryGuid, base::align_up, log_debug_assert}; use patina::pi::fw_fs::{ ffs::{self, file}, @@ -268,7 +267,7 @@ impl<'a> VolumeRef<'a> { } /// The Firmware Volume name GUID from the extended header, if available. - pub fn fv_name(&self) -> Option { + pub fn fv_name(&self) -> Option { self.ext_header().map(|(hdr, _)| hdr.fv_name) } @@ -343,7 +342,7 @@ impl<'a> VolumeRef<'a> { self.fv_header.revision } - fn file_system_guid(&self) -> efi::Guid { + fn file_system_guid(&self) -> BinaryGuid { self.fv_header.file_system_guid } } @@ -430,7 +429,7 @@ enum Capacity { /// Use this to build an FV from a block map and a list of FFS files, set attributes, /// optionally attach an extended header, and then [`serialize`](Self::serialize) to bytes. pub struct Volume { - file_system_guid: efi::Guid, + file_system_guid: patina::BinaryGuid, attributes: fvb::attributes::EfiFvbAttributes2, ext_header: Option<(fv::ExtHeader, Vec)>, block_map: Vec, @@ -469,7 +468,7 @@ impl Volume { /// use r_efi::efi; /// /// let mut fv = Volume::new(vec![BlockMapEntry { num_blocks: 1, length: 4096 }]); - /// fv.files_mut().push(File::new(efi::Guid::from_bytes(&[0u8; 16]), ffs::file::raw::r#type::FFS_PAD)); + /// fv.files_mut().push(File::new(patina::BinaryGuid::from_bytes(&[0u8; 16]), ffs::file::raw::r#type::FFS_PAD)); /// assert_eq!(fv.files().count(), 1); /// ``` pub fn files_mut(&mut self) -> &mut Vec { @@ -497,7 +496,7 @@ impl Volume { /// let mut fv = Volume::new(vec![BlockMapEntry { num_blocks: 4, length: 4096 }]); /// /// for (i, payload) in ["alpha", "beta", "gamma"].into_iter().enumerate() { - /// let guid = efi::Guid::from_bytes(&[i as u8; 16]); + /// let guid = patina::BinaryGuid(efi::Guid::from_bytes(&[i as u8; 16])); /// let mut file = File::new(guid, 0x07); // arbitrary file type for example /// /// let data = payload.as_bytes().to_vec(); @@ -558,7 +557,7 @@ impl Volume { // ext_header data is added as a "Pad" file let mut ext_header_pad_file = - File::new(efi::Guid::from_bytes(&[0xffu8; 16]), ffs::file::raw::r#type::FFS_PAD); + File::new(patina::BinaryGuid::from_bytes(&[0xffu8; 16]), ffs::file::raw::r#type::FFS_PAD); let ext_header_section = Section::new_from_header_with_data( section::SectionHeader::Pad( ext_hdr_data.len().try_into().map_err(|_| FirmwareFileSystemError::InvalidHeader)?, @@ -619,7 +618,8 @@ impl Volume { 0 ); - let mut pad_file = File::new(efi::Guid::from_bytes(&[0xffu8; 16]), ffs::file::raw::r#type::FFS_PAD); + let mut pad_file = + File::new(patina::BinaryGuid::from_bytes(&[0xffu8; 16]), ffs::file::raw::r#type::FFS_PAD); let pad_section = Section::new_from_header_with_data( section::SectionHeader::Pad( pad_len.try_into().map_err(|_| FirmwareFileSystemError::InvalidHeader)?, @@ -701,7 +701,7 @@ impl Volume { /// /// let mut fv = Volume::new(vec![BlockMapEntry { num_blocks: 1, length: 4096 }]); /// // Add an empty PAD file to keep it simple - /// fv.files_mut().push(patina_ffs::file::File::new(efi::Guid::from_bytes(&[0u8; 16]), ffs::file::raw::r#type::FFS_PAD)); + /// fv.files_mut().push(patina_ffs::file::File::new(patina::BinaryGuid::from_bytes(&[0u8; 16]), ffs::file::raw::r#type::FFS_PAD)); /// fv.compose(&Passthrough).unwrap(); /// ``` pub fn compose(&mut self, composer: &dyn SectionComposer) -> Result<(), FirmwareFileSystemError> { @@ -1079,7 +1079,7 @@ mod test { let fv_header = fv_bytes.as_mut_ptr() as *mut fv::Header; //SAFETY: Deliberately corrupting the FV header for test purposes. unsafe { - (*fv_header).file_system_guid = efi::Guid::from_bytes(&[0xa5; 16]); + (*fv_header).file_system_guid = patina::BinaryGuid::from(efi::Guid::from_bytes(&[0xa5; 16])); }; assert_eq!(VolumeRef::new(&fv_bytes).unwrap_err(), FirmwareFileSystemError::InvalidHeader); diff --git a/sdk/patina_ffs_extractors/src/crc32.rs b/sdk/patina_ffs_extractors/src/crc32.rs index e9e058909..1befbf576 100644 --- a/sdk/patina_ffs_extractors/src/crc32.rs +++ b/sdk/patina_ffs_extractors/src/crc32.rs @@ -53,7 +53,6 @@ mod tests { use super::*; use patina::pi::fw_fs::ffs::section::header::GuidDefined; use patina_ffs::section::Section; - use r_efi::efi; #[test] fn test_crc32_extractor_valid() { @@ -93,8 +92,14 @@ mod tests { #[test] fn test_crc32_extractor_unsupported_guid() { - let wrong_guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0]); + let wrong_guid = patina::BinaryGuid::from_fields( + 0x12345678, + 0x1234, + 0x5678, + 0x12, + 0x34, + &[0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0], + ); let content = b"Test data"; let guid_header = GuidDefined { diff --git a/sdk/patina_ffs_extractors/src/lzma.rs b/sdk/patina_ffs_extractors/src/lzma.rs index 088644d3d..76a4aa681 100644 --- a/sdk/patina_ffs_extractors/src/lzma.rs +++ b/sdk/patina_ffs_extractors/src/lzma.rs @@ -8,17 +8,14 @@ //! use alloc::vec::Vec; use core::result::Result; +use patina::pi::fw_fs::guid::LZMA_SECTION; use patina_ffs::{ FirmwareFileSystemError, section::{Section, SectionExtractor, SectionHeader}, }; -use r_efi::efi; use patina_lzma_rs::io::Cursor; -pub const LZMA_SECTION_GUID: efi::Guid = - efi::Guid::from_fields(0xEE4E5898, 0x3914, 0x4259, 0x9D, 0x6E, &[0xDC, 0x7B, 0xD7, 0x94, 0x03, 0xCF]); - pub const LZMA_UNKNOWN_UNPACKED_SIZE_MAGIC_VALUE: u64 = 0xFFFF_FFFF_FFFF_FFFF; /// Provides decompression for LZMA GUIDed sections. @@ -36,7 +33,7 @@ impl LzmaSectionExtractor { impl SectionExtractor for LzmaSectionExtractor { fn extract(&self, section: &Section) -> Result, FirmwareFileSystemError> { if let SectionHeader::GuidDefined(guid_header, _, _) = section.header() - && guid_header.section_definition_guid == LZMA_SECTION_GUID + && guid_header.section_definition_guid == LZMA_SECTION { let data = section.try_content_as_slice()?; @@ -116,8 +113,14 @@ mod tests { #[test] fn test_lzma_extractor_unsupported_guid() { - let wrong_guid = - efi::Guid::from_fields(0x12345678, 0x1234, 0x5678, 0x12, 0x34, &[0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0]); + let wrong_guid = patina::BinaryGuid::from_fields( + 0x12345678, + 0x1234, + 0x5678, + 0x12, + 0x34, + &[0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0], + ); let dummy_data = b"Dummy data"; let guid_header = GuidDefined { diff --git a/sdk/patina_macro/README.md b/sdk/patina_macro/README.md index 5684cd0d2..8e30208e1 100644 --- a/sdk/patina_macro/README.md +++ b/sdk/patina_macro/README.md @@ -81,7 +81,7 @@ struct FirmwareVolumeHeader { - `#[skip]` ```rust -use patina::test::{patina_test, Result}; +use patina_test::{patina_test, error::Result}; #[cfg_attr(target_arch = "x86_64", patina_test)] fn spi_smoke_test() -> Result { diff --git a/sdk/patina_macro/src/hob_macro.rs b/sdk/patina_macro/src/hob_macro.rs index 9e94fc578..792d4aea6 100644 --- a/sdk/patina_macro/src/hob_macro.rs +++ b/sdk/patina_macro/src/hob_macro.rs @@ -36,20 +36,18 @@ impl HobConfig { return Err(syn::Error::new(attr.span(), "Expected #[hob = \"GUID\"]")); }; - let id = match uuid::Uuid::parse_str(&nv.value.to_token_stream().to_string().replace("\"", "")) { + let guid_str = nv.value.to_token_stream().to_string().replace("\"", ""); + // Validate the GUID format + let id = match uuid::Uuid::parse_str(&guid_str) { Err(_) => return Err(syn::Error::new(attr.span(), "Invalid GUID format")), Ok(id) => id, }; - let fields = id.as_fields(); - let node: &[u8; 6] = - &fields.3[2..].try_into().map_err(|_| syn::Error::new(attr.span(), "Invalid GUID format"))?; - let (a, b, c) = (fields.0, fields.1, fields.2); - let (d0, d1) = (fields.3[0], fields.3[1]); - let [d2, d3, d4, d5, d6, d7] = *node; + // Emit the GUID in the canonical uppercase hyphenated format + let canonical = id.as_hyphenated().to_string().to_uppercase(); Ok(quote! { - patina::OwnedGuid::from_fields(#a, #b, #c, #d0, #d1, [#d2, #d3, #d4, #d5, #d6, #d7]) + patina::BinaryGuid::from_string(#canonical) }) } @@ -130,7 +128,7 @@ pub fn hob_config2(item: proc_macro2::TokenStream) -> proc_macro2::TokenStream { quote! { impl #lhs patina::component::hob::FromHob for #name #rhs #where_clause { - const HOB_GUID: patina::OwnedGuid = #hob_guid; + const HOB_GUID: patina::BinaryGuid = #hob_guid; fn parse(bytes: &[u8]) -> Self { let hob = match ::read_from_prefix(bytes) { @@ -166,17 +164,11 @@ mod tests { struct MyStruct(u32); }; - const TEST_HOB_GUID: patina::OwnedGuid = patina::OwnedGuid::from_fields( - 2347032417u32, - 37834u16, - 4562u16, - 170u8, - 13u8, - [0u8, 224u8, 152u8, 3u8, 43u8, 140u8], - ); + const TEST_HOB_GUID: patina::BinaryGuid = + patina::BinaryGuid::from_string("8BE4DF61-93CA-11D2-AA0D-00E098032B8C"); let expected = quote! { impl patina::component::hob::FromHob for MyStruct { - const HOB_GUID: patina::OwnedGuid = patina::OwnedGuid::from_fields(2347032417u32, 37834u16, 4562u16, 170u8, 13u8, [0u8, 224u8, 152u8, 3u8, 43u8, 140u8]); + const HOB_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("8BE4DF61-93CA-11D2-AA0D-00E098032B8C"); fn parse(bytes: &[u8]) -> Self { let hob = match ::read_from_prefix(bytes) { Ok((hob, _)) => hob, @@ -210,18 +202,12 @@ mod tests { struct MyStruct(u32); }; - const TEST_HOB_GUID: patina::OwnedGuid = patina::OwnedGuid::from_fields( - 0xea296d92u32, - 0x0b69u16, - 0x423cu16, - 0x8cu8, - 0x28u8, - [0x33u8, 0xb4u8, 0xe0u8, 0xa9u8, 0x12u8, 0x68u8], - ); + const TEST_HOB_GUID: patina::BinaryGuid = + patina::BinaryGuid::from_string("EA296D92-0B69-423C-8C28-33B4E0A91268"); let expected = quote! { impl patina::component::hob::FromHob for MyStruct { - const HOB_GUID: patina::OwnedGuid = patina::OwnedGuid::from_fields(3928583570u32, 2921u16, 16956u16, 140u8, 40u8, [51u8, 180u8, 224u8, 169u8, 18u8, 104u8]); + const HOB_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("EA296D92-0B69-423C-8C28-33B4E0A91268"); fn parse(bytes: &[u8]) -> Self { let hob = match ::read_from_prefix(bytes) { Ok((hob, _)) => hob, @@ -258,7 +244,7 @@ mod tests { }; let expected = quote! { impl patina::component::hob::FromHob for MyStruct { - const HOB_GUID: patina::OwnedGuid = patina::OwnedGuid::from_fields(2347032417u32, 37834u16, 4562u16, 170u8, 13u8, [0u8, 224u8, 152u8, 3u8, 43u8, 140u8]); + const HOB_GUID: patina::BinaryGuid = patina::BinaryGuid::from_string("8BE4DF61-93CA-11D2-AA0D-00E098032B8C"); fn parse(bytes: &[u8]) -> Self { let hob = match ::read_from_prefix(bytes) { Ok((hob, _)) => hob, diff --git a/sdk/patina_macro/src/lib.rs b/sdk/patina_macro/src/lib.rs index aa2d613dc..9a4918e40 100644 --- a/sdk/patina_macro/src/lib.rs +++ b/sdk/patina_macro/src/lib.rs @@ -107,10 +107,8 @@ pub fn hob_config(item: proc_macro::TokenStream) -> proc_macro::TokenStream { /// ## Example /// /// ```ignore -/// use patina::test::*; +/// use patina_test::{patina_test, u_assert_eq, u_assert, error::Result}; /// use patina::boot_services::StandardBootServices; -/// use patina::test::patina_test; -/// use patina::{u_assert, u_assert_eq}; /// /// #[patina_test] /// fn test_case() -> Result { diff --git a/sdk/patina_macro/src/test_macro.rs b/sdk/patina_macro/src/test_macro.rs index 45dc150a9..b50cfc792 100644 --- a/sdk/patina_macro/src/test_macro.rs +++ b/sdk/patina_macro/src/test_macro.rs @@ -56,7 +56,7 @@ fn process_attributes(item: &mut ItemFn) -> syn::Result syn::Result { Meta::NameValue(nv) if nv.path.is_ident("event") => { let value = &nv.value; return Ok(quote! { - patina::test::__private_api::TestTrigger::Event(&#value) + patina_test::__private_api::TestTrigger::Event(#value) }); } // CASE2: $[on(timer = interval_in_100ns_units)] Meta::NameValue(nv) if nv.path.is_ident("timer") => { let value = &nv.value; return Ok(quote! { - patina::test::__private_api::TestTrigger::Timer(#value) + patina_test::__private_api::TestTrigger::Timer(#value) }); } // No other cases are supported right now. _ => { return Err(syn::Error::new( meta.span(), - "Unsupported attribute key. See patina::test::__private_api::TestTrigger for supported keys.", + "Unsupported attribute key. See patina_test::__private_api::TestTrigger for supported keys.", )); } } @@ -184,17 +184,17 @@ fn generate_expanded_test_case( let trigger = test_case_config.get(KEY_TRIGGER).expect("All configuration should have a default value set."); let expanded = quote! { - #[patina::test::linkme::distributed_slice(patina::test::__private_api::TEST_CASES)] - #[linkme(crate = patina::test::linkme)] + #[patina_test::linkme::distributed_slice(patina_test::__private_api::TEST_CASES)] + #[linkme(crate = patina_test::linkme)] #[allow(non_upper_case_globals)] - static #struct_name: patina::test::__private_api::TestCase = - patina::test::__private_api::TestCase { + static #struct_name: patina_test::__private_api::TestCase = + patina_test::__private_api::TestCase { name: concat!(module_path!(), "::", stringify!(#fn_name)), triggers: #trigger, skip: #skip, should_fail: #should_fail, fail_msg: #fail_msg, - func: |storage| patina::test::__private_api::FunctionTest::new(#fn_name).run(storage.into()), + func: |storage| patina_test::__private_api::FunctionTest::new(#fn_name).run(storage.into()), }; #item }; @@ -232,16 +232,16 @@ mod tests { let expanded = patina_test2(stream); let expected = quote! { - #[patina::test::linkme::distributed_slice(patina::test::__private_api::TEST_CASES)] - #[linkme(crate = patina::test::linkme)] + #[patina_test::linkme::distributed_slice(patina_test::__private_api::TEST_CASES)] + #[linkme(crate = patina_test::linkme)] #[allow(non_upper_case_globals)] - static __my_test_case_TestCase: patina::test::__private_api::TestCase = patina::test::__private_api::TestCase { + static __my_test_case_TestCase: patina_test::__private_api::TestCase = patina_test::__private_api::TestCase { name: concat!(module_path!(), "::", stringify!(my_test_case)), - triggers: &[patina::test::__private_api::TestTrigger::Manual], + triggers: &[patina_test::__private_api::TestTrigger::Manual], skip: false, should_fail: false, fail_msg: None, - func: |storage| patina::test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), + func: |storage| patina_test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), }; fn my_test_case() -> Result { assert!(true); @@ -264,17 +264,17 @@ mod tests { let expanded = patina_test2(stream); let expected = quote! { - #[patina::test::linkme::distributed_slice(patina::test::__private_api::TEST_CASES)] - #[linkme(crate = patina::test::linkme)] + #[patina_test::linkme::distributed_slice(patina_test::__private_api::TEST_CASES)] + #[linkme(crate = patina_test::linkme)] #[allow(non_upper_case_globals)] - static __my_test_case_TestCase: patina::test::__private_api::TestCase = - patina::test::__private_api::TestCase { + static __my_test_case_TestCase: patina_test::__private_api::TestCase = + patina_test::__private_api::TestCase { name: concat!(module_path!(), "::", stringify!(my_test_case)), - triggers: &[patina::test::__private_api::TestTrigger::Manual], + triggers: &[patina_test::__private_api::TestTrigger::Manual], skip: true, should_fail: false, fail_msg: None, - func: |storage| patina::test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), + func: |storage| patina_test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), }; fn my_test_case() -> Result { assert!(true); @@ -328,7 +328,7 @@ mod tests { let tokens = parse_on_attr(&attr).unwrap(); let expected = quote! { - patina::test::__private_api::TestTrigger::Event(&patina::guids::EVENT_GROUP_END_OF_DXE) + patina_test::__private_api::TestTrigger::Event(patina::guids::EVENT_GROUP_END_OF_DXE) }; assert_eq!(tokens.to_string(), expected.to_string()); } @@ -354,7 +354,7 @@ mod tests { let tokens = parse_on_attr(&attr).unwrap(); let expected = quote! { - patina::test::__private_api::TestTrigger::Timer(1000000) + patina_test::__private_api::TestTrigger::Timer(1000000) }; assert_eq!(tokens.to_string(), expected.to_string()); } @@ -400,7 +400,7 @@ mod tests { assert_eq!(tc_cfg.get(KEY_SKIP).unwrap().to_string(), "true"); assert_eq!( tc_cfg.get(KEY_TRIGGER).unwrap().to_string(), - "& [patina :: test :: __private_api :: TestTrigger :: Manual]" + "& [patina_test :: __private_api :: TestTrigger :: Manual]" ); } @@ -436,7 +436,7 @@ mod tests { let expanded = patina_test2(stream); let expected = quote! { - ::core::compile_error ! { "Unsupported attribute key. See patina::test::__private_api::TestTrigger for supported keys." } + ::core::compile_error ! { "Unsupported attribute key. See patina_test::__private_api::TestTrigger for supported keys." } }; assert_eq!(expanded.to_string(), expected.to_string()); } @@ -455,17 +455,17 @@ mod tests { let expanded = patina_test2(stream); let expected = quote! { - #[patina::test::linkme::distributed_slice(patina::test::__private_api::TEST_CASES)] - #[linkme(crate = patina::test::linkme)] + #[patina_test::linkme::distributed_slice(patina_test::__private_api::TEST_CASES)] + #[linkme(crate = patina_test::linkme)] #[allow(non_upper_case_globals)] - static __my_test_case_TestCase: patina::test::__private_api::TestCase = - patina::test::__private_api::TestCase { + static __my_test_case_TestCase: patina_test::__private_api::TestCase = + patina_test::__private_api::TestCase { name: concat!(module_path!(), "::", stringify!(my_test_case)), - triggers: &[patina::test::__private_api::TestTrigger::Event(&patina::guids::EVENT_GROUP_END_OF_DXE)], + triggers: &[patina_test::__private_api::TestTrigger::Event(patina::guids::EVENT_GROUP_END_OF_DXE)], skip: true, should_fail: true, fail_msg: Some("Expected Error"), - func: |storage| patina::test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), + func: |storage| patina_test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), }; fn my_test_case() -> Result { assert!(true); @@ -491,21 +491,21 @@ mod tests { let expanded = patina_test2(stream); let expected = quote! { - #[patina::test::linkme::distributed_slice(patina::test::__private_api::TEST_CASES)] - #[linkme(crate = patina::test::linkme)] + #[patina_test::linkme::distributed_slice(patina_test::__private_api::TEST_CASES)] + #[linkme(crate = patina_test::linkme)] #[allow(non_upper_case_globals)] - static __my_test_case_TestCase: patina::test::__private_api::TestCase = - patina::test::__private_api::TestCase { + static __my_test_case_TestCase: patina_test::__private_api::TestCase = + patina_test::__private_api::TestCase { name: concat!(module_path!(), "::", stringify!(my_test_case)), triggers: &[ - patina::test::__private_api::TestTrigger::Event(&patina::guids::EVENT_GROUP_END_OF_DXE), - patina::test::__private_api::TestTrigger::Timer(1000000), - patina::test::__private_api::TestTrigger::Event(&patina::guids::EVENT_GROUP_READY_TO_BOOT) + patina_test::__private_api::TestTrigger::Event(patina::guids::EVENT_GROUP_END_OF_DXE), + patina_test::__private_api::TestTrigger::Timer(1000000), + patina_test::__private_api::TestTrigger::Event(patina::guids::EVENT_GROUP_READY_TO_BOOT) ], skip: true, should_fail: true, fail_msg: Some("Expected Error"), - func: |storage| patina::test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), + func: |storage| patina_test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), }; fn my_test_case() -> Result { assert!(true); @@ -529,22 +529,22 @@ mod tests { config.insert(KEY_SHOULD_FAIL, quote! {true}); config.insert(KEY_FAIL_MSG, quote! {Some("Expected Error")}); config.insert(KEY_SKIP, quote! {false}); - config.insert(KEY_TRIGGER, quote! { patina::test::__private_api::TestTrigger::Manual }); + config.insert(KEY_TRIGGER, quote! { patina_test::__private_api::TestTrigger::Manual }); let expanded = generate_expanded_test_case(&item, &config); let expected = quote! { - #[patina::test::linkme::distributed_slice(patina::test::__private_api::TEST_CASES)] - #[linkme(crate = patina::test::linkme)] + #[patina_test::linkme::distributed_slice(patina_test::__private_api::TEST_CASES)] + #[linkme(crate = patina_test::linkme)] #[allow(non_upper_case_globals)] - static __my_test_case_TestCase: patina::test::__private_api::TestCase = - patina::test::__private_api::TestCase { + static __my_test_case_TestCase: patina_test::__private_api::TestCase = + patina_test::__private_api::TestCase { name: concat!(module_path!(), "::", stringify!(my_test_case)), - triggers: patina::test::__private_api::TestTrigger::Manual, + triggers: patina_test::__private_api::TestTrigger::Manual, skip: false, should_fail: true, fail_msg: Some("Expected Error"), - func: |storage| patina::test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), + func: |storage| patina_test::__private_api::FunctionTest::new(my_test_case).run(storage.into()), }; fn my_test_case() -> Result { assert!(true);