VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuMpPei/CpuPaging.c@ 99404

最後變更 在這個檔案從99404是 99404,由 vboxsync 提交於 2 年 前

Devices/EFI/FirmwareNew: Update to edk2-stable202302 and make it build, bugref:4643

  • 屬性 svn:eol-style 設為 native
檔案大小: 18.7 KB
 
1/** @file
2 Basic paging support for the CPU to enable Stack Guard.
3
4Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>
5
6SPDX-License-Identifier: BSD-2-Clause-Patent
7
8**/
9
10#include <Register/Intel/Cpuid.h>
11#include <Register/Intel/Msr.h>
12#include <Library/MemoryAllocationLib.h>
13#include <Library/CpuLib.h>
14#include <Library/BaseLib.h>
15#include <Guid/MigratedFvInfo.h>
16#ifdef VBOX
17# define IN_RING0
18# include <iprt/asm.h>
19#endif
20
21#include "CpuMpPei.h"
22
23#define IA32_PG_P BIT0
24#define IA32_PG_RW BIT1
25#define IA32_PG_U BIT2
26#define IA32_PG_A BIT5
27#define IA32_PG_D BIT6
28#define IA32_PG_PS BIT7
29#define IA32_PG_NX BIT63
30
31#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
32#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U | \
33 PAGE_ATTRIBUTE_BITS)
34
35#define PAGING_PAE_INDEX_MASK 0x1FF
36#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
37#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
38#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
39#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
40
41typedef enum {
42 PageNone = 0,
43 PageMin = 1,
44 Page4K = PageMin,
45 Page2M = 2,
46 Page1G = 3,
47 Page512G = 4,
48 PageMax = Page512G
49} PAGE_ATTRIBUTE;
50
51typedef struct {
52 PAGE_ATTRIBUTE Attribute;
53 UINT64 Length;
54 UINT64 AddressMask;
55 UINTN AddressBitOffset;
56 UINTN AddressBitLength;
57} PAGE_ATTRIBUTE_TABLE;
58
59PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
60 { PageNone, 0, 0, 0, 0 },
61 { Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9 },
62 { Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9 },
63 { Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9 },
64 { Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9 },
65};
66
67EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {
68 {
69 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
70 &gEfiPeiMemoryDiscoveredPpiGuid,
71 MemoryDiscoveredPpiNotifyCallback
72 }
73};
74
75#ifdef VBOX
76/**
77 Safe page table entry write function, make 104% sure the compiler won't
78 split up the access (fatal if modifying entries for current code or data).
79
80 @param[in] PageEntry The page table entry to modify.*
81 @param[in] CurrentPageEntry The old page table value (for cmpxchg8b).
82 @param[in] NewPageEntry What to write.
83**/
84static VOID SafePageTableEntryWrite64 (UINT64 volatile *PageEntry, UINT64 CurrentPageEntry, UINT64 NewPageEntry)
85{
86# ifdef VBOX
87 ASMAtomicWriteU64(PageEntry, NewPageEntry); RT_NOREF(CurrentPageEntry);
88# else
89 for (;;) {
90 UINT64 CurValue = InterlockedCompareExchange64(PageEntry, CurrentPageEntry, NewPageEntry);
91 if (CurValue == CurrentPageEntry)
92 return;
93 CurrentPageEntry = CurValue;
94 }
95# endif
96}
97#endif
98
99/**
100 The function will check if IA32 PAE is supported.
101
102 @retval TRUE IA32 PAE is supported.
103 @retval FALSE IA32 PAE is not supported.
104
105**/
106BOOLEAN
107IsIa32PaeSupported (
108 VOID
109 )
110{
111 UINT32 RegEax;
112 CPUID_VERSION_INFO_EDX RegEdx;
113
114 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
115 if (RegEax >= CPUID_VERSION_INFO) {
116 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
117 if (RegEdx.Bits.PAE != 0) {
118 return TRUE;
119 }
120 }
121
122 return FALSE;
123}
124
125/**
126 This API provides a way to allocate memory for page table.
127
128 @param Pages The number of 4 KB pages to allocate.
129
130 @return A pointer to the allocated buffer or NULL if allocation fails.
131
132**/
133VOID *
134AllocatePageTableMemory (
135 IN UINTN Pages
136 )
137{
138 VOID *Address;
139
140 Address = AllocatePages (Pages);
141 if (Address != NULL) {
142 ZeroMem (Address, EFI_PAGES_TO_SIZE (Pages));
143 }
144
145 return Address;
146}
147
148/**
149 Get the address width supported by current processor.
150
151 @retval 32 If processor is in 32-bit mode.
152 @retval 36-48 If processor is in 64-bit mode.
153
154**/
155UINTN
156GetPhysicalAddressWidth (
157 VOID
158 )
159{
160 UINT32 RegEax;
161
162 if (sizeof (UINTN) == 4) {
163 return 32;
164 }
165
166 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
167 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
168 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
169 RegEax &= 0xFF;
170 if (RegEax > 48) {
171 return 48;
172 }
173
174 return (UINTN)RegEax;
175 }
176
177 return 36;
178}
179
180/**
181 Get the type of top level page table.
182
183 @retval Page512G PML4 paging.
184 @retval Page1G PAE paging.
185
186**/
187PAGE_ATTRIBUTE
188GetPageTableTopLevelType (
189 VOID
190 )
191{
192 MSR_IA32_EFER_REGISTER MsrEfer;
193
194 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);
195
196 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
197}
198
199/**
200 Return page table entry matching the address.
201
202 @param[in] Address The address to be checked.
203 @param[out] PageAttributes The page attribute of the page entry.
204
205 @return The page entry.
206**/
207VOID *
208GetPageTableEntry (
209 IN PHYSICAL_ADDRESS Address,
210 OUT PAGE_ATTRIBUTE *PageAttribute
211 )
212{
213 INTN Level;
214 UINTN Index;
215 UINT64 *PageTable;
216 UINT64 AddressEncMask;
217
218 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
219 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
220 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
221 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
222 Index &= PAGING_PAE_INDEX_MASK;
223
224 //
225 // No mapping?
226 //
227 if (PageTable[Index] == 0) {
228 *PageAttribute = PageNone;
229 return NULL;
230 }
231
232 //
233 // Page memory?
234 //
235 if (((PageTable[Index] & IA32_PG_PS) != 0) || (Level == PageMin)) {
236 *PageAttribute = (PAGE_ATTRIBUTE)Level;
237 return &PageTable[Index];
238 }
239
240 //
241 // Page directory or table
242 //
243 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
244 ~AddressEncMask &
245 PAGING_4K_ADDRESS_MASK_64);
246 }
247
248 *PageAttribute = PageNone;
249 return NULL;
250}
251
252/**
253 This function splits one page entry to smaller page entries.
254
255 @param[in] PageEntry The page entry to be splitted.
256 @param[in] PageAttribute The page attribute of the page entry.
257 @param[in] SplitAttribute How to split the page entry.
258 @param[in] Recursively Do the split recursively or not.
259
260 @retval RETURN_SUCCESS The page entry is splitted.
261 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid
262 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
263**/
264RETURN_STATUS
265SplitPage (
266#ifdef VBOX
267 IN UINT64 volatile *PageEntry,
268#else
269 IN UINT64 *PageEntry,
270#endif
271 IN PAGE_ATTRIBUTE PageAttribute,
272 IN PAGE_ATTRIBUTE SplitAttribute,
273 IN BOOLEAN Recursively
274 )
275{
276#ifdef VBOX
277 UINT64 CurrentPageEntry;
278#endif
279 UINT64 BaseAddress;
280 UINT64 *NewPageEntry;
281 UINTN Index;
282 UINT64 AddressEncMask;
283 PAGE_ATTRIBUTE SplitTo;
284
285 if ((SplitAttribute == PageNone) || (SplitAttribute >= PageAttribute)) {
286 ASSERT (SplitAttribute != PageNone);
287 ASSERT (SplitAttribute < PageAttribute);
288 return RETURN_INVALID_PARAMETER;
289 }
290
291 NewPageEntry = AllocatePageTableMemory (1);
292 if (NewPageEntry == NULL) {
293 ASSERT (NewPageEntry != NULL);
294 return RETURN_OUT_OF_RESOURCES;
295 }
296
297 //
298 // One level down each step to achieve more compact page table.
299 //
300 SplitTo = PageAttribute - 1;
301 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
302 mPageAttributeTable[SplitTo].AddressMask;
303#ifdef VBOX
304 CurrentPageEntry = *PageEntry;
305 BaseAddress = CurrentPageEntry &
306#else
307 BaseAddress = *PageEntry &
308#endif
309 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
310 mPageAttributeTable[PageAttribute].AddressMask;
311 for (Index = 0; Index < SIZE_4KB / sizeof (UINT64); Index++) {
312 NewPageEntry[Index] = BaseAddress | AddressEncMask |
313#ifdef VBOX
314 (CurrentPageEntry & PAGE_PROGATE_BITS);
315#else
316 ((*PageEntry) & PAGE_PROGATE_BITS);
317#endif
318
319 if (SplitTo != PageMin) {
320 NewPageEntry[Index] |= IA32_PG_PS;
321 }
322
323 if (Recursively && (SplitTo > SplitAttribute)) {
324 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
325 }
326
327 BaseAddress += mPageAttributeTable[SplitTo].Length;
328 }
329
330#ifdef VBOX
331 SafePageTableEntryWrite64 (PageEntry, CurrentPageEntry,
332 (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS);
333#else
334 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;
335#endif
336
337 return RETURN_SUCCESS;
338}
339
340/**
341 This function modifies the page attributes for the memory region specified
342 by BaseAddress and Length from their current attributes to the attributes
343 specified by Attributes.
344
345 Caller should make sure BaseAddress and Length is at page boundary.
346
347 @param[in] BaseAddress Start address of a memory region.
348 @param[in] Length Size in bytes of the memory region.
349 @param[in] Attributes Bit mask of attributes to modify.
350
351 @retval RETURN_SUCCESS The attributes were modified for the memory
352 region.
353 @retval RETURN_INVALID_PARAMETER Length is zero; or,
354 Attributes specified an illegal combination
355 of attributes that cannot be set together; or
356 Addressis not 4KB aligned.
357 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify
358 the attributes.
359 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.
360
361**/
362RETURN_STATUS
363EFIAPI
364ConvertMemoryPageAttributes (
365 IN PHYSICAL_ADDRESS BaseAddress,
366 IN UINT64 Length,
367 IN UINT64 Attributes
368 )
369{
370#ifdef VBOX
371 UINT64 volatile *PageEntry;
372 UINT64 CurrentPageEntry;
373#else
374 UINT64 *PageEntry;
375#endif
376 PAGE_ATTRIBUTE PageAttribute;
377 RETURN_STATUS Status;
378 EFI_PHYSICAL_ADDRESS MaximumAddress;
379
380 if ((Length == 0) ||
381 ((BaseAddress & (SIZE_4KB - 1)) != 0) ||
382 ((Length & (SIZE_4KB - 1)) != 0))
383 {
384 ASSERT (Length > 0);
385 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
386 ASSERT ((Length & (SIZE_4KB - 1)) == 0);
387
388 return RETURN_INVALID_PARAMETER;
389 }
390
391 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
392 if ((BaseAddress > MaximumAddress) ||
393 (Length > MaximumAddress) ||
394 (BaseAddress > MaximumAddress - (Length - 1)))
395 {
396 return RETURN_UNSUPPORTED;
397 }
398
399 //
400 // Below logic is to check 2M/4K page to make sure we do not waste memory.
401 //
402 while (Length != 0) {
403 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
404 if (PageEntry == NULL) {
405 return RETURN_UNSUPPORTED;
406 }
407
408 if (PageAttribute != Page4K) {
409 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
410 if (RETURN_ERROR (Status)) {
411 return Status;
412 }
413
414 //
415 // Do it again until the page is 4K.
416 //
417 continue;
418 }
419
420 //
421 // Just take care of 'present' bit for Stack Guard.
422 //
423#ifdef VBOX
424 CurrentPageEntry = *PageEntry;
425 if ((CurrentPageEntry & IA32_PG_P) != (Attributes & IA32_PG_P))
426 SafePageTableEntryWrite64 (PageEntry, CurrentPageEntry,
427 (CurrentPageEntry & ~(UINT64)IA32_PG_P) | (Attributes & IA32_PG_P));
428#else
429 if ((Attributes & IA32_PG_P) != 0) {
430 *PageEntry |= (UINT64)IA32_PG_P;
431 } else {
432 *PageEntry &= ~((UINT64)IA32_PG_P);
433 }
434#endif
435
436 //
437 // Convert success, move to next
438 //
439 BaseAddress += SIZE_4KB;
440 Length -= SIZE_4KB;
441 }
442
443 return RETURN_SUCCESS;
444}
445
446/**
447 Get maximum size of page memory supported by current processor.
448
449 @param[in] TopLevelType The type of top level page entry.
450
451 @retval Page1G If processor supports 1G page and PML4.
452 @retval Page2M For all other situations.
453
454**/
455PAGE_ATTRIBUTE
456GetMaxMemoryPage (
457 IN PAGE_ATTRIBUTE TopLevelType
458 )
459{
460 UINT32 RegEax;
461 UINT32 RegEdx;
462
463 if (TopLevelType == Page512G) {
464 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
465 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
466 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
467 if ((RegEdx & BIT26) != 0) {
468 return Page1G;
469 }
470 }
471 }
472
473 return Page2M;
474}
475
476/**
477 Create PML4 or PAE page table.
478
479 @return The address of page table.
480
481**/
482UINTN
483CreatePageTable (
484 VOID
485 )
486{
487 RETURN_STATUS Status;
488 UINTN PhysicalAddressBits;
489 UINTN NumberOfEntries;
490 PAGE_ATTRIBUTE TopLevelPageAttr;
491 UINTN PageTable;
492 PAGE_ATTRIBUTE MaxMemoryPage;
493 UINTN Index;
494 UINT64 AddressEncMask;
495 UINT64 *PageEntry;
496 EFI_PHYSICAL_ADDRESS PhysicalAddress;
497
498 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
499 PhysicalAddressBits = GetPhysicalAddressWidth ();
500 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
501 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
502
503 PageTable = (UINTN)AllocatePageTableMemory (1);
504 if (PageTable == 0) {
505 return 0;
506 }
507
508 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
509 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
510 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
511 PageEntry = (UINT64 *)PageTable;
512
513 PhysicalAddress = 0;
514 for (Index = 0; Index < NumberOfEntries; ++Index) {
515 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
516
517 //
518 // Split the top page table down to the maximum page size supported
519 //
520 if (MaxMemoryPage < TopLevelPageAttr) {
521 Status = SplitPage (PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
522 ASSERT_EFI_ERROR (Status);
523 }
524
525 if (TopLevelPageAttr == Page1G) {
526 //
527 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
528 //
529 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
530 }
531
532 PageEntry += 1;
533 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
534 }
535
536 return PageTable;
537}
538
539/**
540 Setup page tables and make them work.
541
542**/
543VOID
544EnablePaging (
545 VOID
546 )
547{
548 UINTN PageTable;
549
550 PageTable = CreatePageTable ();
551 ASSERT (PageTable != 0);
552 if (PageTable != 0) {
553 AsmWriteCr3 (PageTable);
554 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE
555 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG
556 }
557}
558
559/**
560 Get the base address of current AP's stack.
561
562 This function is called in AP's context and assumes that whole calling stacks
563 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
564
565 PcdCpuApStackSize must be configured with value taking the Guard page into
566 account.
567
568 @param[in,out] Buffer The pointer to private data buffer.
569
570**/
571VOID
572EFIAPI
573GetStackBase (
574 IN OUT VOID *Buffer
575 )
576{
577 EFI_PHYSICAL_ADDRESS StackBase;
578
579 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
580 StackBase += BASE_4KB;
581 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
582 StackBase -= PcdGet32 (PcdCpuApStackSize);
583
584 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
585}
586
587/**
588 Setup stack Guard page at the stack base of each processor. BSP and APs have
589 different way to get stack base address.
590
591**/
592VOID
593SetupStackGuardPage (
594 VOID
595 )
596{
597 EFI_PEI_HOB_POINTERS Hob;
598 EFI_PHYSICAL_ADDRESS StackBase;
599 UINTN NumberOfProcessors;
600 UINTN Bsp;
601 UINTN Index;
602
603 //
604 // One extra page at the bottom of the stack is needed for Guard page.
605 //
606 if (PcdGet32 (PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
607 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
608 ASSERT (FALSE);
609 }
610
611 MpInitLibGetNumberOfProcessors (&NumberOfProcessors, NULL);
612 MpInitLibWhoAmI (&Bsp);
613 for (Index = 0; Index < NumberOfProcessors; ++Index) {
614 StackBase = 0;
615
616 if (Index == Bsp) {
617 Hob.Raw = GetHobList ();
618 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
619 if (CompareGuid (
620 &gEfiHobMemoryAllocStackGuid,
621 &(Hob.MemoryAllocationStack->AllocDescriptor.Name)
622 ))
623 {
624 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
625 break;
626 }
627
628 Hob.Raw = GET_NEXT_HOB (Hob);
629 }
630 } else {
631 //
632 // Ask AP to return is stack base address.
633 //
634 MpInitLibStartupThisAP (GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
635 }
636
637 ASSERT (StackBase != 0);
638 //
639 // Set Guard page at stack base address.
640 //
641 ConvertMemoryPageAttributes (StackBase, EFI_PAGE_SIZE, 0);
642 DEBUG ((
643 DEBUG_INFO,
644 "Stack Guard set at %lx [cpu%lu]!\n",
645 (UINT64)StackBase,
646 (UINT64)Index
647 ));
648 }
649
650 //
651 // Publish the changes of page table.
652 //
653 CpuFlushTlb ();
654}
655
656/**
657 Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
658
659 Doing this in the memory-discovered callback is to make sure the Stack Guard
660 feature to cover as most PEI code as possible.
661
662 @param[in] PeiServices General purpose services available to every PEIM.
663 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.
664 @param[in] Ppi The memory discovered PPI. Not used.
665
666 @retval EFI_SUCCESS The function completed successfully.
667 @retval others There's error in MP initialization.
668**/
669EFI_STATUS
670EFIAPI
671MemoryDiscoveredPpiNotifyCallback (
672 IN EFI_PEI_SERVICES **PeiServices,
673 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,
674 IN VOID *Ppi
675 )
676{
677 EFI_STATUS Status;
678 BOOLEAN InitStackGuard;
679 EDKII_MIGRATED_FV_INFO *MigratedFvInfo;
680 EFI_PEI_HOB_POINTERS Hob;
681
682 //
683 // Paging must be setup first. Otherwise the exception TSS setup during MP
684 // initialization later will not contain paging information and then fail
685 // the task switch (for the sake of stack switch).
686 //
687 InitStackGuard = FALSE;
688 Hob.Raw = NULL;
689 if (IsIa32PaeSupported ()) {
690 Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
691 InitStackGuard = PcdGetBool (PcdCpuStackGuard);
692 }
693
694 if (InitStackGuard || (Hob.Raw != NULL)) {
695 EnablePaging ();
696 }
697
698 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
699 ASSERT_EFI_ERROR (Status);
700
701 if (InitStackGuard) {
702 SetupStackGuardPage ();
703 }
704
705 while (Hob.Raw != NULL) {
706 MigratedFvInfo = GET_GUID_HOB_DATA (Hob);
707
708 //
709 // Enable #PF exception, so if the code access SPI after disable NEM, it will generate
710 // the exception to avoid potential vulnerability.
711 //
712 ConvertMemoryPageAttributes (MigratedFvInfo->FvOrgBase, MigratedFvInfo->FvLength, 0);
713
714 Hob.Raw = GET_NEXT_HOB (Hob);
715 Hob.Raw = GetNextGuidHob (&gEdkiiMigratedFvInfoGuid, Hob.Raw);
716 }
717
718 CpuFlushTlb ();
719
720 return Status;
721}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette