|
84 | 84 | #define ACPI_DEVFLAG_LINT1 0x80 |
85 | 85 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 |
86 | 86 |
|
| 87 | +#define LOOP_TIMEOUT 100000 |
87 | 88 | /* |
88 | 89 | * ACPI table definitions |
89 | 90 | * |
@@ -388,6 +389,10 @@ static void iommu_disable(struct amd_iommu *iommu) |
388 | 389 | iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); |
389 | 390 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); |
390 | 391 |
|
| 392 | + /* Disable IOMMU GA_LOG */ |
| 393 | + iommu_feature_disable(iommu, CONTROL_GALOG_EN); |
| 394 | + iommu_feature_disable(iommu, CONTROL_GAINT_EN); |
| 395 | + |
391 | 396 | /* Disable IOMMU hardware itself */ |
392 | 397 | iommu_feature_disable(iommu, CONTROL_IOMMU_EN); |
393 | 398 | } |
@@ -673,6 +678,99 @@ static void __init free_ppr_log(struct amd_iommu *iommu) |
673 | 678 | free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); |
674 | 679 | } |
675 | 680 |
|
| 681 | +static void free_ga_log(struct amd_iommu *iommu) |
| 682 | +{ |
| 683 | +#ifdef CONFIG_IRQ_REMAP |
| 684 | + if (iommu->ga_log) |
| 685 | + free_pages((unsigned long)iommu->ga_log, |
| 686 | + get_order(GA_LOG_SIZE)); |
| 687 | + if (iommu->ga_log_tail) |
| 688 | + free_pages((unsigned long)iommu->ga_log_tail, |
| 689 | + get_order(8)); |
| 690 | +#endif |
| 691 | +} |
| 692 | + |
| 693 | +static int iommu_ga_log_enable(struct amd_iommu *iommu) |
| 694 | +{ |
| 695 | +#ifdef CONFIG_IRQ_REMAP |
| 696 | + u32 status, i; |
| 697 | + |
| 698 | + if (!iommu->ga_log) |
| 699 | + return -EINVAL; |
| 700 | + |
| 701 | + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
| 702 | + |
| 703 | + /* Check if already running */ |
| 704 | + if (status & (MMIO_STATUS_GALOG_RUN_MASK)) |
| 705 | + return 0; |
| 706 | + |
| 707 | + iommu_feature_enable(iommu, CONTROL_GAINT_EN); |
| 708 | + iommu_feature_enable(iommu, CONTROL_GALOG_EN); |
| 709 | + |
| 710 | + for (i = 0; i < LOOP_TIMEOUT; ++i) { |
| 711 | + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
| 712 | + if (status & (MMIO_STATUS_GALOG_RUN_MASK)) |
| 713 | + break; |
| 714 | + } |
| 715 | + |
| 716 | + if (i >= LOOP_TIMEOUT) |
| 717 | + return -EINVAL; |
| 718 | +#endif /* CONFIG_IRQ_REMAP */ |
| 719 | + return 0; |
| 720 | +} |
| 721 | + |
| 722 | +#ifdef CONFIG_IRQ_REMAP |
| 723 | +static int iommu_init_ga_log(struct amd_iommu *iommu) |
| 724 | +{ |
| 725 | + u64 entry; |
| 726 | + |
| 727 | + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) |
| 728 | + return 0; |
| 729 | + |
| 730 | + iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 731 | + get_order(GA_LOG_SIZE)); |
| 732 | + if (!iommu->ga_log) |
| 733 | + goto err_out; |
| 734 | + |
| 735 | + iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 736 | + get_order(8)); |
| 737 | + if (!iommu->ga_log_tail) |
| 738 | + goto err_out; |
| 739 | + |
| 740 | + entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; |
| 741 | + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, |
| 742 | + &entry, sizeof(entry)); |
| 743 | + entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; |
| 744 | + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, |
| 745 | + &entry, sizeof(entry)); |
| 746 | + writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); |
| 747 | + writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); |
| 748 | + |
| 749 | + return 0; |
| 750 | +err_out: |
| 751 | + free_ga_log(iommu); |
| 752 | + return -EINVAL; |
| 753 | +} |
| 754 | +#endif /* CONFIG_IRQ_REMAP */ |
| 755 | + |
| 756 | +static int iommu_init_ga(struct amd_iommu *iommu) |
| 757 | +{ |
| 758 | + int ret = 0; |
| 759 | + |
| 760 | +#ifdef CONFIG_IRQ_REMAP |
| 761 | + /* Note: We have already checked GASup from IVRS table. |
| 762 | + * Now, we need to make sure that GAMSup is set. |
| 763 | + */ |
| 764 | + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && |
| 765 | + !iommu_feature(iommu, FEATURE_GAM_VAPIC)) |
| 766 | + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; |
| 767 | + |
| 768 | + ret = iommu_init_ga_log(iommu); |
| 769 | +#endif /* CONFIG_IRQ_REMAP */ |
| 770 | + |
| 771 | + return ret; |
| 772 | +} |
| 773 | + |
676 | 774 | static void iommu_enable_gt(struct amd_iommu *iommu) |
677 | 775 | { |
678 | 776 | if (!iommu_feature(iommu, FEATURE_GT)) |
@@ -1146,6 +1244,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu) |
1146 | 1244 | free_command_buffer(iommu); |
1147 | 1245 | free_event_buffer(iommu); |
1148 | 1246 | free_ppr_log(iommu); |
| 1247 | + free_ga_log(iommu); |
1149 | 1248 | iommu_unmap_mmio_space(iommu); |
1150 | 1249 | } |
1151 | 1250 |
|
@@ -1438,6 +1537,7 @@ static int iommu_init_pci(struct amd_iommu *iommu) |
1438 | 1537 | { |
1439 | 1538 | int cap_ptr = iommu->cap_ptr; |
1440 | 1539 | u32 range, misc, low, high; |
| 1540 | + int ret; |
1441 | 1541 |
|
1442 | 1542 | iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), |
1443 | 1543 | iommu->devid & 0xff); |
@@ -1494,13 +1594,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) |
1494 | 1594 | if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) |
1495 | 1595 | return -ENOMEM; |
1496 | 1596 |
|
1497 | | - /* Note: We have already checked GASup from IVRS table. |
1498 | | - * Now, we need to make sure that GAMSup is set. |
1499 | | - */ |
1500 | | - if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && |
1501 | | - !iommu_feature(iommu, FEATURE_GAM_VAPIC)) |
1502 | | - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; |
1503 | | - |
| 1597 | + ret = iommu_init_ga(iommu); |
| 1598 | + if (ret) |
| 1599 | + return ret; |
1504 | 1600 |
|
1505 | 1601 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) |
1506 | 1602 | amd_iommu_np_cache = true; |
@@ -1667,6 +1763,8 @@ static int iommu_init_msi(struct amd_iommu *iommu) |
1667 | 1763 | if (iommu->ppr_log != NULL) |
1668 | 1764 | iommu_feature_enable(iommu, CONTROL_PPFINT_EN); |
1669 | 1765 |
|
| 1766 | + iommu_ga_log_enable(iommu); |
| 1767 | + |
1670 | 1768 | return 0; |
1671 | 1769 | } |
1672 | 1770 |
|
|
0 commit comments