diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000000..1b1899293e783 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,5 @@ +# Specify reviewers for all files except the CODEOWNERS file +* @rabara @ngboonkhai @tiensung @mtham2021 @hoyin0722 @tanmaykathpalia @kuhanh @rohangt07 @skarnan + +# Specify reviewers for the CODEOWNERS file +/.github/CODEOWNERS @vvijayak-devops diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000000..6625e0f70e70c --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,13 @@ +Description: + +Impact Analysis: + +What is the scope of the change? + + +Purpose of the change? + + +Reach of the change? + +Regression Test result: diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml new file mode 100644 index 0000000000000..f17a306fce27f --- /dev/null +++ b/.github/workflows/ci-pipeline.yml @@ -0,0 +1,264 @@ +name: altera-linux-ci-pipeline +############################################################################### +# Altera SOCFPGA linux Build Pipeline +############################################################################### + +on: + pull_request: + branches: [ "socfpga-*" ] + types: [opened, synchronize, reopened, review_requested, review_submission] + + # Allows to run this workflow manually from the Actions tab + workflow_dispatch: + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: +#This action extracts the new commits and performs checkpatch checks on it. + ci-checkpatch: + runs-on: [self-hosted,pg-embedded-runner] + container: + image: amr-registry.sc.altera.com/pse-pswe-software-ba/embedded_coverity:ubuntu20.04.0_6-new-proxy-inbuilt + options: -v /mnt/nfs_share/site/proj/psg:/p/psg + # Check patch only can run at pull request as it is hard to determine the commit during the push event. + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 # This will pull the whole repo, otherwise cannot compare commit. + ref: ${{ github.event.pull_request.base.ref }} + + - name: Checkpatch.pl + run: | + git config --global --add safe.directory /__w/${{ github.event.repository.name }}/${{ github.event.repository.name }} + git clone https://${{ secrets.GIT_USER }}:${{ secrets.GIT_TOKEN }}@github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + bash workflow_Checkpatch.sh ../ ${{ github.event.repository.name }} ${{ secrets.GITHUB_TOKEN }} ${{ github.event.pull_request.number }} ${{ github.head_ref }} ${{ github.base_ref }} + + ci-coverity: + runs-on: [self-hosted,pg-embedded-runner] + container: + image: amr-registry.sc.altera.com/pse-pswe-software-ba/embedded_coverity:ubuntu20.04.0_6-new-proxy-inbuilt + env: + # Disable the warnings of Security violation. We need to check why this is trigger later, plaintext password. + EC_DISABLE_VAL: project,system + + steps: + - uses: actions/checkout@v4 + - name : Download coverity script + run: | + git config --global credential.helper store + git clone https://${{ secrets.GIT_USER }}:${{ secrets.GIT_TOKEN }}@github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + + - name : Setup Coverity + uses: intel-innersource/frameworks.actions.setup-coverity@v1 + env: + https_proxy: proxy-dmz.intel.com:912 # If the self-hosted runner has issues with the proxy + with: + version: '2022.3.1' + + - name: Run Coverity Build + run: | + cd application.devops.github.pr.workflow + bash workflow_Coverity.sh ${{ github.workspace }} ${{ github.event.repository.name }} linux-socfpga-github-workflow ${{ github.head_ref }} ${{ github.base_ref }} + + - name: linux-coverity-prechange-report + uses: actions/upload-artifact@v4 + with: + name: linux-coverity-prechange-report + path: linux_output_defect_prechange.json + retention-days: 1 + + - name: linux-coverity-postchange-report + uses: actions/upload-artifact@v4 + with: + name: linux-coverity-postchange-report + path: linux_output_defect_postchange.json + retention-days: 1 + + ci-coverity-parse-result: + needs: ci-coverity + runs-on: [self-hosted,pg-embedded-runner] + container: + image: amr-registry.sc.altera.com/pse-pswe-software-ba/embedded_coverity:ubuntu20.04.0_6-new-proxy-inbuilt + + steps: + - name: Download prechange coverity result + uses: actions/download-artifact@v4 + with: + name: linux-coverity-prechange-report + - run: | + ls + pwd + rm -rf /tmp/coverity-result-json + mkdir /tmp/coverity-result-json + cp linux_output_defect_prechange.json /tmp/coverity-result-json/ + ls /tmp/coverity-result-json/* + + - name: Download postchange coverity result + uses: actions/download-artifact@v4 + with: + name: linux-coverity-postchange-report + - run: | + cp linux_output_defect_postchange.json /tmp/coverity-result-json/ + ls /tmp/coverity-result-json/* + + - uses: actions/checkout@v4 + - name : Download coverity parser script + run: | + git config --global credential.helper store + git clone https://${{ secrets.GIT_USER }}:${{ secrets.GIT_TOKEN }}@github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + + - name : Run Coverity Parser + run: | + cd application.devops.github.pr.workflow + mv /tmp/coverity-result-json/* . + rm -rf /tmp/coverity-result-json + bash workflow_Coverity_Parser.sh ${{ github.workspace }} ${{ github.event.repository.name }} ${{ secrets.GITHUB_TOKEN }} ${{ github.event.pull_request.number }} + + - name: linux-coverity-report + if: always() + uses: actions/upload-artifact@v4 + with: + name: linux-coverity-report + path: | + linux_output_defect.csv + retention-days: 1 + +#This action performs a build test using an ubuntu container. + ci-build: + # The type of runner that the job will run on + runs-on: [self-hosted,pg-embedded-runner] + container: + image: amr-registry.sc.altera.com/pse-pswe-software-ba/embedded_coverity:ubuntu20.04.0_6-new-proxy-inbuilt + env: + # Disable the warnings of Security violation. We need to check why this is trigger later, plaintext password. + EC_DISABLE_VAL: project,system + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + - uses: actions/checkout@v4 + - name: Linux Build + run: | + DIR=$(pwd) + git config --global credential.helper store + git clone https://${{ secrets.GIT_USER }}:${{ secrets.GIT_TOKEN }}@github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + bash workflow_Build.sh $DIR ${{ github.event.repository.name }} linux-socfpga-github-workflow + + - name: linux-image + uses: actions/upload-artifact@v4 + with: + name: linux-image + path: ${{ github.workspace }}/linux_output.tar + retention-days: 1 + +#This action runs the simic regression test + ci-verification: + runs-on: [self-hosted,linux-arc-runner] + needs: ci-build + env: + # Disable the warnings of Security violation. We need to check why this is trigger later, plaintext password. + EC_DISABLE_VAL: project,system + GITHUB_EVENT_NAME: ${{ github.event_name }} + steps: + - name: Cleaning workspace + run: | + rm -rf application.devops.github.pr.workflow + rm -rf ${GITHUB_WORKSPACE}/.git* + + - name: Download linux artifacts + uses: actions/download-artifact@v4 + with: + name: linux-image + - run: | + tar -xvf linux_output.tar + ls + pwd + + - name: Simics Regtest + run: | + git clone https://github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + bash workflow_Regtest.sh ${{ github.workspace }} ${{ github.event.repository.name }} ${{ github.event.pull_request.number }} + +#This action runs the on-board regression test + ci-on-board-verification: + runs-on: [self-hosted,linux-arc-runner] + needs: ci-build + env: + # Disable the warnings of Security violation. We need to check why this is trigger later, plaintext password. + EC_DISABLE_VAL: project,system + GITHUB_EVENT_NAME: ${{ github.event_name }} + steps: + - name: Cleaning workspace + run: | + rm -rf application.devops.github.pr.workflow + rm -rf ${GITHUB_WORKSPACE}/.git* + + - name: Download linux artifacts + uses: actions/download-artifact@v4 + with: + name: linux-image + - run: | + tar -xvf linux_output.tar + ls + pwd + + - name: On-board Regtest + run: | + git clone https://github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + bash workflow_On_Board_Regtest.sh ${{ github.workspace }} ${{ github.event.repository.name }} ${{ github.event.pull_request.number }} + +#This action checks if the branch has been released and if so block PR until approved by repo Admin + ci-check-tag: + runs-on: [self-hosted,pg-embedded-runner] + container: + image: amr-registry.sc.altera.com/pse-pswe-software-ba/embedded_coverity:ubuntu20.04.0_6-new-proxy-inbuilt + options: -v /mnt/nfs_share/site/proj/psg:/p/psg + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + steps: + - uses: actions/checkout@v4 + - name : Download check_tag script + run: | + git config --global credential.helper store + git clone https://${{ secrets.GIT_USER }}:${{ secrets.GIT_TOKEN }}@github.com/intel-sandbox/application.devops.github.pr.workflow + cd application.devops.github.pr.workflow + git checkout master + + - name: Check_Tag + run: | + cd application.devops.github.pr.workflow + bash workflow_Check_Tag.sh ${{ github.workspace }} ${{ github.event.repository.name }} ${{ github.event.pull_request.number }} ${{ github.base_ref }} ${{ secrets.GITHUB_TOKEN }} + +#This is the commit gate that ensures all required checks are passing before the PR is allowed to be merged. + ci-check-pr-status: + runs-on: [self-hosted,pg-embedded-runner] + container: + image: amr-registry.sc.altera.com/pse-pswe-software-ba/embedded_coverity:ubuntu20.04.0_6-new-proxy-inbuilt + options: -v /mnt/nfs_share/site/proj/psg:/p/psg + needs: [ci-coverity-parse-result, ci-verification, ci-checkpatch, ci-check-tag, ci-on-board-verification] + steps: + - name: On failure + if: ${{ needs.ci-coverity-parse-result.result != 'success' || needs.ci-verification.result != 'success' || needs.ci-checkpatch.result != 'success' || needs.ci-check-tag.result != 'success' || needs.ci-on-board-verification.result != 'success'}} + run: | + echo "Status check has failed." + exit 1 diff --git a/Documentation/devicetree/bindings/arm/altera/fpga-dma.txt b/Documentation/devicetree/bindings/arm/altera/fpga-dma.txt new file mode 100644 index 0000000000000..a08e9010d4e52 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/altera/fpga-dma.txt @@ -0,0 +1,25 @@ +Altera FPGA DMA FIFO driver + +Required properties: +- compatible : "altr,fpga-dma"; + +- reg : CSR and DATA register resource definitions (address and length). + +- reg-names : Names of the register resources. Should be "csr", "data". + +- dmas : DMA request lines. Should be <&pdma 0 &pdma 1> + +- dma-names : Names of DMA request lines. Should be "tx", "rx". + +Example: + + fpgadma: fifo { + #address-cells = <1>; + #size-cells = <1>; + compatible = "altr,fpga-dma"; + reg = <0xff230000 0x20>, <0xc0011000 0x400>; + reg-names = "csr", "data"; + dmas = <&pdma 0 &pdma 1>; + dma-names = "tx", "rx"; + }; + diff --git a/Documentation/devicetree/bindings/arm/intel,socfpga.yaml b/Documentation/devicetree/bindings/arm/intel,socfpga.yaml index 2ee0c740eb56d..6b9bf11cb5259 100644 --- a/Documentation/devicetree/bindings/arm/intel,socfpga.yaml +++ b/Documentation/devicetree/bindings/arm/intel,socfpga.yaml @@ -20,12 +20,18 @@ properties: - intel,n5x-socdk - intel,socfpga-agilex-n6000 - intel,socfpga-agilex-socdk + - intel,socfpga-agilex5-socdk - const: intel,socfpga-agilex - description: Agilex5 boards items: - enum: - intel,socfpga-agilex5-socdk - const: intel,socfpga-agilex5 + - description: Agilex3 boards + items: + - enum: + - intel,socfpga-agilex3-socdk + - const: intel,socfpga-agilex3 additionalProperties: true diff --git a/Documentation/devicetree/bindings/clock/intel,agilex5.yaml b/Documentation/devicetree/bindings/clock/intel,agilex5.yaml new file mode 100644 index 0000000000000..035feba8cdd1c --- /dev/null +++ b/Documentation/devicetree/bindings/clock/intel,agilex5.yaml @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/intel,agilex5.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel SoCFPGA Agilex5 platform clock controller binding + +maintainers: + - Teh Wen Ping + +description: + The Intel Agilex5 Clock controller is an integrated clock controller, which + generates and supplies to all modules. + +properties: + compatible: + const: intel,agilex5-clkmgr + + '#clock-cells': + const: 1 + + reg: + maxItems: 1 + +required: + - compatible + - reg + - '#clock-cells' + +additionalProperties: false + +examples: + # Clock controller node: + - | + clkmgr: clock-controller@10d10000 { + compatible = "intel,agilex5-clkmgr"; + reg = <0x10d10000 0x1000>; + #clock-cells = <1>; + }; +... diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml index 525f5f3932f54..f94e89cafef34 100644 --- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml @@ -76,6 +76,11 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32 enum: [0, 1, 2, 3, 4, 5, 6] + snps,dma-40-bit-mask: + description: | + Sets dma bit-mask to 40 bits + type: boolean + snps,priority: description: | Channel priority specifier associated with the DMA channels. diff --git a/Documentation/devicetree/bindings/edac/altr,socfpga-ecc-manager.yaml b/Documentation/devicetree/bindings/edac/altr,socfpga-ecc-manager.yaml new file mode 100644 index 0000000000000..fbed3278b9458 --- /dev/null +++ b/Documentation/devicetree/bindings/edac/altr,socfpga-ecc-manager.yaml @@ -0,0 +1,384 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright (C) 2025 Altera Corporation +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/edac/altr,socfpga-ecc-manager.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Altera SoCFPGA ECC Manager + +maintainers: + - Matthew Gerlach + +description: + This binding describes the device tree nodes required for the Altera SoCFPGA + ECC Manager for the Cyclone5, Arria5, Arria10, Stratix10, and Agilex chip + families. + +properties: + + compatible: + oneOf: + - items: + - const: altr,socfpga-s10-ecc-manager + - const: altr,socfpga-a10-ecc-manager + - const: altr,socfpga-a10-ecc-manager + - const: altr,socfpga-ecc-manager + + "#address-cells": + const: 1 + + "#size-cells": + const: 1 + + interrupts: + minItems: 1 + maxItems: 7 + + interrupt-names: + items: + - const: global_sbe + - enum: [global_dbe, io96b0, io96b1, sdm_qspi_sbe, sdm_qspi_dbe, sdm_seu] + - enum: [global_dbe, io96b0, io96b1, sdm_qspi_sbe, sdm_qspi_dbe, sdm_seu] + - enum: [global_dbe, io96b0, io96b1, sdm_qspi_sbe, sdm_qspi_dbe, sdm_seu] + - enum: [global_dbe, io96b0, io96b1, sdm_qspi_sbe, sdm_qspi_dbe, sdm_seu] + - enum: [global_dbe, io96b0, io96b1, sdm_qspi_sbe, sdm_qspi_dbe, sdm_seu] + - enum: [global_dbe, io96b0, io96b1, sdm_qspi_sbe, sdm_qspi_dbe, sdm_seu] + + interrupt-controller: true + + "#interrupt-cells": + const: 2 + + ranges: true + + altr,sysmgr-syscon: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to Stratix10 System Manager Block with the ECC manager registers + + sdramedac: + type: object + additionalProperties: false + + properties: + compatible: + enum: + - altr,sdram-edac-a10 + - altr,sdram-edac-s10 + + interrupts: + minItems: 1 + maxItems: 2 + + altr,sdr-syscon: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to SDRAM parent + + required: + - compatible + - interrupts + - altr,sdr-syscon + +patternProperties: + "^ocram-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + oneOf: + - items: + - const: altr,socfpga-s10-ocram-ecc + - const: altr,socfpga-a10-ocram-ecc + - const: altr,socfpga-a10-ocram-ecc + - const: altr,socfpga-ocram-ecc + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 2 + + iram: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to OCRAM parent + + altr,ecc-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to OCRAM parent + + required: + - compatible + - reg + - interrupts + + "^(usb[0-9]-(ecc|[tr]x-ecc|cache-ecc)@[a-f0-9]+)$": + type: object + additionalProperties: false + + properties: + compatible: + oneOf: + - items: + - const: altr,socfpga-s10-usb-ecc + - const: altr,socfpga-usb-ecc + - const: altr,socfpga-usb-ecc + - const: altr,socfpga-usb3-ecc + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 2 + + altr,ecc-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to USB parent + + required: + - compatible + - reg + - interrupts + - altr,ecc-parent + + "^emac[0-9]-[t,r]x-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + oneOf: + - items: + - const: altr,socfpga-s10-eth-mac-ecc + - const: altr,socfpga-eth-mac-ecc + - const: altr,socfpga-eth-mac-ecc + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 2 + + altr,ecc-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to ethernet parent + + required: + - compatible + - reg + - interrupts + - altr,ecc-parent + + "^sdmmc[a-f]-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + oneOf: + - items: + - const: altr,socfpga-s10-sdmmc-ecc + - const: altr,socfpga-sdmmc-ecc + - const: altr,socfpga-sdmmc-ecc + + reg: + maxItems: 1 + + interrupts: + minItems: 2 + maxItems: 4 + + altr,ecc-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to SD/MMC parent + + required: + - compatible + - reg + - interrupts + - altr,ecc-parent + + "^sdm-qspi-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + items: + - const: altr,socfpga-sdm-qspi-ecc + + reg: + maxItems: 1 + + required: + - compatible + - reg + + "^io96b[0-9]-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + items: + - enum: + - altr,socfpga-io96b0-ecc + - altr,socfpga-io96b1-ecc + + reg: + maxItems: 1 + + required: + - compatible + - reg + + "^cram-seu@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + items: + - const: altr,socfpga-cram-seu + + reg: + maxItems: 1 + + required: + - compatible + - reg + + "^l2-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + enum: + - altr,socfpga-a10-l2-ecc + - altr,socfpga-l2-ecc + + reg: + maxItems: 1 + + interrupts: + maxItems: 2 + + required: + - compatible + - reg + - interrupts + + "^dma-ecc@[a-f0-9]+$": + type: object + additionalProperties: false + + properties: + compatible: + const: altr,socfpga-dma-ecc + reg: + maxItems: 1 + + interrupts: + maxItems: 2 + + altr,ecc-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to SD/MMC parent + + required: + - compatible + - reg + - interrupts + - altr,ecc-parent + +if: + properties: + compatible: + contains: + const: altr,socfpga-ecc-manager +then: + required: + - compatible + - "#address-cells" + - "#size-cells" + - ranges + +else: + required: + - compatible + - "#address-cells" + - "#size-cells" + - interrupts + - interrupt-controller + - "#interrupt-cells" + - ranges + - altr,sysmgr-syscon + +additionalProperties: false + +examples: + - | + #include + #include + eccmgr { + compatible = "altr,socfpga-s10-ecc-manager", + "altr,socfpga-a10-ecc-manager"; + altr,sysmgr-syscon = <&sysmgr>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <2>; + ranges; + + sdramedac { + compatible = "altr,sdram-edac-s10"; + altr,sdr-syscon = <&sdr>; + interrupts = <16 IRQ_TYPE_LEVEL_HIGH>; + }; + + ocram-ecc@ff8cc000 { + compatible = "altr,socfpga-s10-ocram-ecc", + "altr,socfpga-a10-ocram-ecc"; + reg = <0xff8cc000 0x100>; + altr,ecc-parent = <&ocram>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; + }; + + usb0-ecc@ff8c4000 { + compatible = "altr,socfpga-s10-usb-ecc", + "altr,socfpga-usb-ecc"; + reg = <0xff8c4000 0x100>; + altr,ecc-parent = <&usb0>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac0-rx-ecc@ff8c0000 { + compatible = "altr,socfpga-s10-eth-mac-ecc", + "altr,socfpga-eth-mac-ecc"; + reg = <0xff8c0000 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac0-tx-ecc@ff8c0400 { + compatible = "altr,socfpga-s10-eth-mac-ecc", + "altr,socfpga-eth-mac-ecc"; + reg = <0xff8c0400 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + }; + + sdmmca-ecc@ff8c8c00 { + compatible = "altr,socfpga-s10-sdmmc-ecc", + "altr,socfpga-sdmmc-ecc"; + reg = <0xff8c8c00 0x100>; + altr,ecc-parent = <&mmc>; + interrupts = <14 IRQ_TYPE_LEVEL_HIGH>, + <15 IRQ_TYPE_LEVEL_HIGH>; + }; + }; diff --git a/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt b/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt deleted file mode 100644 index 8f52206cfd2a1..0000000000000 --- a/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt +++ /dev/null @@ -1,383 +0,0 @@ -Altera SoCFPGA ECC Manager -This driver uses the EDAC framework to implement the SOCFPGA ECC Manager. -The ECC Manager counts and corrects single bit errors and counts/handles -double bit errors which are uncorrectable. - -Cyclone5 and Arria5 ECC Manager -Required Properties: -- compatible : Should be "altr,socfpga-ecc-manager" -- #address-cells: must be 1 -- #size-cells: must be 1 -- ranges : standard definition, should translate from local addresses - -Subcomponents: - -L2 Cache ECC -Required Properties: -- compatible : Should be "altr,socfpga-l2-ecc" -- reg : Address and size for ECC error interrupt clear registers. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt. Note the rising edge type. - -On Chip RAM ECC -Required Properties: -- compatible : Should be "altr,socfpga-ocram-ecc" -- reg : Address and size for ECC error interrupt clear registers. -- iram : phandle to On-Chip RAM definition. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt. Note the rising edge type. - -Example: - - eccmgr: eccmgr@ffd08140 { - compatible = "altr,socfpga-ecc-manager"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - - l2-ecc@ffd08140 { - compatible = "altr,socfpga-l2-ecc"; - reg = <0xffd08140 0x4>; - interrupts = <0 36 1>, <0 37 1>; - }; - - ocram-ecc@ffd08144 { - compatible = "altr,socfpga-ocram-ecc"; - reg = <0xffd08144 0x4>; - iram = <&ocram>; - interrupts = <0 178 1>, <0 179 1>; - }; - }; - -Arria10 SoCFPGA ECC Manager -The Arria10 SoC ECC Manager handles the IRQs for each peripheral -in a shared register instead of individual IRQs like the Cyclone5 -and Arria5. Therefore the device tree is different as well. - -Required Properties: -- compatible : Should be "altr,socfpga-a10-ecc-manager" -- altr,sysgr-syscon : phandle to Arria10 System Manager Block - containing the ECC manager registers. -- #address-cells: must be 1 -- #size-cells: must be 1 -- interrupts : Should be single bit error interrupt, then double bit error - interrupt. -- interrupt-controller : boolean indicator that ECC Manager is an interrupt controller -- #interrupt-cells : must be set to 2. -- ranges : standard definition, should translate from local addresses - -Subcomponents: - -L2 Cache ECC -Required Properties: -- compatible : Should be "altr,socfpga-a10-l2-ecc" -- reg : Address and size for ECC error interrupt clear registers. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -On-Chip RAM ECC -Required Properties: -- compatible : Should be "altr,socfpga-a10-ocram-ecc" -- reg : Address and size for ECC block registers. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -Ethernet FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-eth-mac-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent Ethernet node. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -NAND FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-nand-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent NAND node. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -DMA FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-dma-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent DMA node. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -USB FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-usb-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent USB node. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -QSPI FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-qspi-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent QSPI node. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order. - -SDMMC FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-sdmmc-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent SD/MMC node. -- interrupts : Should be single bit error interrupt, then double bit error - interrupt, in this order for port A, and then single bit error interrupt, - then double bit error interrupt in this order for port B. - -Example: - - eccmgr: eccmgr@ffd06000 { - compatible = "altr,socfpga-a10-ecc-manager"; - altr,sysmgr-syscon = <&sysmgr>; - #address-cells = <1>; - #size-cells = <1>; - interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>, - <0 0 IRQ_TYPE_LEVEL_HIGH>; - interrupt-controller; - #interrupt-cells = <2>; - ranges; - - l2-ecc@ffd06010 { - compatible = "altr,socfpga-a10-l2-ecc"; - reg = <0xffd06010 0x4>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>, - <32 IRQ_TYPE_LEVEL_HIGH>; - }; - - ocram-ecc@ff8c3000 { - compatible = "altr,socfpga-a10-ocram-ecc"; - reg = <0xff8c3000 0x90>; - interrupts = <1 IRQ_TYPE_LEVEL_HIGH>, - <33 IRQ_TYPE_LEVEL_HIGH> ; - }; - - emac0-rx-ecc@ff8c0800 { - compatible = "altr,socfpga-eth-mac-ecc"; - reg = <0xff8c0800 0x400>; - altr,ecc-parent = <&gmac0>; - interrupts = <4 IRQ_TYPE_LEVEL_HIGH>, - <36 IRQ_TYPE_LEVEL_HIGH>; - }; - - emac0-tx-ecc@ff8c0c00 { - compatible = "altr,socfpga-eth-mac-ecc"; - reg = <0xff8c0c00 0x400>; - altr,ecc-parent = <&gmac0>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH>, - <37 IRQ_TYPE_LEVEL_HIGH>; - }; - - nand-buf-ecc@ff8c2000 { - compatible = "altr,socfpga-nand-ecc"; - reg = <0xff8c2000 0x400>; - altr,ecc-parent = <&nand>; - interrupts = <11 IRQ_TYPE_LEVEL_HIGH>, - <43 IRQ_TYPE_LEVEL_HIGH>; - }; - - nand-rd-ecc@ff8c2400 { - compatible = "altr,socfpga-nand-ecc"; - reg = <0xff8c2400 0x400>; - altr,ecc-parent = <&nand>; - interrupts = <13 IRQ_TYPE_LEVEL_HIGH>, - <45 IRQ_TYPE_LEVEL_HIGH>; - }; - - nand-wr-ecc@ff8c2800 { - compatible = "altr,socfpga-nand-ecc"; - reg = <0xff8c2800 0x400>; - altr,ecc-parent = <&nand>; - interrupts = <12 IRQ_TYPE_LEVEL_HIGH>, - <44 IRQ_TYPE_LEVEL_HIGH>; - }; - - dma-ecc@ff8c8000 { - compatible = "altr,socfpga-dma-ecc"; - reg = <0xff8c8000 0x400>; - altr,ecc-parent = <&pdma>; - interrupts = <10 IRQ_TYPE_LEVEL_HIGH>, - <42 IRQ_TYPE_LEVEL_HIGH>; - - usb0-ecc@ff8c8800 { - compatible = "altr,socfpga-usb-ecc"; - reg = <0xff8c8800 0x400>; - altr,ecc-parent = <&usb0>; - interrupts = <2 IRQ_TYPE_LEVEL_HIGH>, - <34 IRQ_TYPE_LEVEL_HIGH>; - }; - - qspi-ecc@ff8c8400 { - compatible = "altr,socfpga-qspi-ecc"; - reg = <0xff8c8400 0x400>; - altr,ecc-parent = <&qspi>; - interrupts = <14 IRQ_TYPE_LEVEL_HIGH>, - <46 IRQ_TYPE_LEVEL_HIGH>; - }; - - sdmmc-ecc@ff8c2c00 { - compatible = "altr,socfpga-sdmmc-ecc"; - reg = <0xff8c2c00 0x400>; - altr,ecc-parent = <&mmc>; - interrupts = <15 IRQ_TYPE_LEVEL_HIGH>, - <47 IRQ_TYPE_LEVEL_HIGH>, - <16 IRQ_TYPE_LEVEL_HIGH>, - <48 IRQ_TYPE_LEVEL_HIGH>; - }; - }; - -Stratix10 SoCFPGA ECC Manager (ARM64) -The Stratix10 SoC ECC Manager handles the IRQs for each peripheral -in a shared register similar to the Arria10. However, Stratix10 ECC -requires access to registers that can only be read from Secure Monitor -with SMC calls. Therefore the device tree is slightly different. Note -that only 1 interrupt is sent in Stratix10 because the double bit errors -are treated as SErrors in ARM64 instead of IRQs in ARM32. - -Required Properties: -- compatible : Should be "altr,socfpga-s10-ecc-manager" -- altr,sysgr-syscon : phandle to Stratix10 System Manager Block - containing the ECC manager registers. -- interrupts : Should be single bit error interrupt. -- interrupt-controller : boolean indicator that ECC Manager is an interrupt controller -- #interrupt-cells : must be set to 2. -- #address-cells: must be 1 -- #size-cells: must be 1 -- ranges : standard definition, should translate from local addresses - -Subcomponents: - -SDRAM ECC -Required Properties: -- compatible : Should be "altr,sdram-edac-s10" -- interrupts : Should be single bit error interrupt. - -On-Chip RAM ECC -Required Properties: -- compatible : Should be "altr,socfpga-s10-ocram-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent OCRAM node. -- interrupts : Should be single bit error interrupt. - -Ethernet FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-s10-eth-mac-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent Ethernet node. -- interrupts : Should be single bit error interrupt. - -NAND FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-s10-nand-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent NAND node. -- interrupts : Should be single bit error interrupt. - -DMA FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-s10-dma-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent DMA node. -- interrupts : Should be single bit error interrupt. - -USB FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-s10-usb-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent USB node. -- interrupts : Should be single bit error interrupt. - -SDMMC FIFO ECC -Required Properties: -- compatible : Should be "altr,socfpga-s10-sdmmc-ecc" -- reg : Address and size for ECC block registers. -- altr,ecc-parent : phandle to parent SD/MMC node. -- interrupts : Should be single bit error interrupt for port A - and then single bit error interrupt for port B. - -Example: - - eccmgr { - compatible = "altr,socfpga-s10-ecc-manager"; - altr,sysmgr-syscon = <&sysmgr>; - #address-cells = <1>; - #size-cells = <1>; - interrupts = <0 15 4>; - interrupt-controller; - #interrupt-cells = <2>; - ranges; - - sdramedac { - compatible = "altr,sdram-edac-s10"; - interrupts = <16 IRQ_TYPE_LEVEL_HIGH>; - }; - - ocram-ecc@ff8cc000 { - compatible = "altr,socfpga-s10-ocram-ecc"; - reg = ; - altr,ecc-parent = <&ocram>; - interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; - }; - - emac0-rx-ecc@ff8c0000 { - compatible = "altr,socfpga-s10-eth-mac-ecc"; - reg = <0xff8c0000 0x100>; - altr,ecc-parent = <&gmac0>; - interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; - }; - - emac0-tx-ecc@ff8c0400 { - compatible = "altr,socfpga-s10-eth-mac-ecc"; - reg = <0xff8c0400 0x100>; - altr,ecc-parent = <&gmac0>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH>' - }; - - nand-buf-ecc@ff8c8000 { - compatible = "altr,socfpga-s10-nand-ecc"; - reg = <0xff8c8000 0x100>; - altr,ecc-parent = <&nand>; - interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; - }; - - nand-rd-ecc@ff8c8400 { - compatible = "altr,socfpga-s10-nand-ecc"; - reg = <0xff8c8400 0x100>; - altr,ecc-parent = <&nand>; - interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; - }; - - nand-wr-ecc@ff8c8800 { - compatible = "altr,socfpga-s10-nand-ecc"; - reg = <0xff8c8800 0x100>; - altr,ecc-parent = <&nand>; - interrupts = <12 IRQ_TYPE_LEVEL_HIGH>; - }; - - dma-ecc@ff8c9000 { - compatible = "altr,socfpga-s10-dma-ecc"; - reg = <0xff8c9000 0x100>; - altr,ecc-parent = <&pdma>; - interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; - - usb0-ecc@ff8c4000 { - compatible = "altr,socfpga-s10-usb-ecc"; - reg = <0xff8c4000 0x100>; - altr,ecc-parent = <&usb0>; - interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; - }; - - sdmmc-ecc@ff8c8c00 { - compatible = "altr,socfpga-s10-sdmmc-ecc"; - reg = <0xff8c8c00 0x100>; - altr,ecc-parent = <&mmc>; - interrupts = <14 IRQ_TYPE_LEVEL_HIGH>, - <15 IRQ_TYPE_LEVEL_HIGH>; - }; - }; diff --git a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt index 6eff1afd8daf9..dceba40dd57e9 100644 --- a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt +++ b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt @@ -23,7 +23,7 @@ Required properties: The svc node has the following mandatory properties, must be located under the firmware node. -- compatible: "intel,stratix10-svc" or "intel,agilex-svc" +- compatible: "intel,stratix10-svc" or "intel,agilex-svc" or "intel,agilex5-svc" - method: smc or hvc smc - Secure Monitor Call hvc - Hypervisor Call @@ -32,6 +32,15 @@ the firmware node. Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt for details +Optional properties: +------------------- +The svc node has the following optional properties, must be located under +the firmware node. + +- interrupts: specifies the interrupt number for mailbox doorbell interrupt. + +- altr,smmu_enable_quirk: set when SMMU is enabled for the platform. + Example: ------- @@ -53,5 +62,6 @@ Example: compatible = "intel,stratix10-svc"; method = "smc"; memory-region = <&service_reserved>; + interrupts = ; }; }; diff --git a/Documentation/devicetree/bindings/fpga/altera-partial-reconfig.txt b/Documentation/devicetree/bindings/fpga/altera-partial-reconfig.txt new file mode 100644 index 0000000000000..bbbb9cdb3da7d --- /dev/null +++ b/Documentation/devicetree/bindings/fpga/altera-partial-reconfig.txt @@ -0,0 +1,12 @@ +Altera Partial Reconfiguration IP Core + +Required properties: +- compatible : should contain "altr,pr-ip-core" +- reg : base address and size for memory mapped io. + +Example: + + fpga_mgr: fpga-mgr@ff20c000 { + compatible = "altr,pr-ip-core"; + reg = <0xff20c000 0x8>; + }; diff --git a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt index 0f874137ca469..fc23b48743ac0 100644 --- a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt +++ b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt @@ -5,7 +5,13 @@ The fpga_mgr node has the following mandatory property, must be located under firmware/svc node. - compatible : should contain "intel,stratix10-soc-fpga-mgr" or - "intel,agilex-soc-fpga-mgr" + "intel,agilex-soc-fpga-mgr" or "intel,agilex5-soc-fpga-mgr" + +Optional properties: +The svc node has the following optional properties, must be located under +the fpga_mgr node. + +- altr,smmu_enable_quirk: set when SMMU is enabled for the platform. Example: diff --git a/Documentation/devicetree/bindings/hwmon/intel,soc64-hwmon.yaml b/Documentation/devicetree/bindings/hwmon/intel,soc64-hwmon.yaml new file mode 100644 index 0000000000000..b17075fae3557 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/intel,soc64-hwmon.yaml @@ -0,0 +1,285 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwmon/intel,soc64-hwmon.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel Hardware monitor SOC FPGA + +maintainers: + - Ang Tien Sung + +description: | + The Intel SoC FPGA hardware monitor unit provides on-chip voltage and + temperature sensors. You can use these sensors to monitor external + voltages and on-chip operating conditions such as internal power rails + and on-chip junction temperatures. + + The specific sensor configurations vary for each device family and + each device within a family does not offer all potential sensor + options. The information below attempts to illustrate the super set of + possible sensor options that are potentially available within each + device family, but the user should check the documentation for the + specific device they are using to verify which sensor options it + actually provides. + + Stratix 10 Device Family + + Stratix 10 Voltage Sensors + + page 0, channel 2 = 0.8V VCC + page 0, channel 3 = 1.0V VCCIO + page 0, channel 6 = 0.9V VCCERAM + + Stratix 10 Temperature Sensors + + page 0, channel 0 = main die + page 0, channel 1 = tile bottom left + page 0, channel 2 = tile middle left + page 0, channel 3 = tile top left + page 0, channel 4 = tile bottom right + page 0, channel 5 = tile middle right + page 0, channel 6 = tile top right + page 0, channel 7 = hbm2 bottom + page 0, channel 8 = hbm2 top + + Agilex Device Family + + Agilex Voltage Sensors + + page 0, channel 2 = 0.8V VCC + page 0, channel 3 = 1.8V VCCIO_SDM + page 0, channel 4 = 1.8V VCCPT + page 0, channel 5 = 1.2V VCCRCORE + page 0, channel 6 = 0.9V VCCH + page 0, channel 7 = 0.8V VCCL + + Agilex Temperature Sensors + + page 0, channel 0 = main die sdm max + page 0, channel 1 = main die sdm 1 + + page 1, channel 0 = main die corner bottom left max + page 1, channel 1 = main die corner bottom left 1 + page 1, channel 2 = main die corner bottom left 2 + + page 2, channel 0 = main die corner top left max + page 2, channel 1 = main die corner top left 1 + page 2, channel 2 = main die corner top left 2 + + page 3, channel 0 = main die corner bottom right max + page 3, channel 1 = main die corner bottom right 1 + page 3, channel 2 = main die corner bottom right 2 + + page 4, channel 0 = main die corner top right max + page 4, channel 1 = main die corner top right 1 + page 4, channel 2 = main die corner top right 2 + + page 5, channel 0 = tile die bottom left max + page 5, channel 1 = tile die bottom left 1 + page 5, channel 6..2 = tile die bottom left 6..2 R-tile only + page 5, channel 5..2 = tile die bottom left 5..2 F-tile only + page 5, channel 4..2 = tile die bottom left 4..2 E-tile only + + page 7, channel 0 = tile die top left max + page 7, channel 1 = tile die top left 1 + page 7, channel 6..2 = tile die top left 6..2 R-tile only + page 7, channel 5..2 = tile die top left 5..2 F-tile only + page 7, channel 4..2 = tile die top left 4..2 E-tile only + + page 8, channel 0 = tile die bottom right max + page 8, channel 1 = tile die bottom right 1 + page 8, channel 6..2 = tile die bottom right 6..2 R-tile only + page 8, channel 5..2 = tile die bottom right 5..2 F-tile only + page 8, channel 4..2 = tile die bottom right 4..2 E-tile only + + page 10, channel 0 = tile die top right max + page 10, channel 1 = tile die top right 1 + page 10, channel 6..2 = tile die top right 6..2 R-tile only + page 10, channel 5..2 = tile die top right 5..2 F-tile only + page 10, channel 4..2 = tile die top right 4..2 E-tile only + + N5X Device Family + + N5X Voltage Sensors + + page 0, channel 2 = 0.8V VDD + page 0, channel 3 = 0.8V VDD_SDM + page 0, channel 4 = 1.8V VCCADC + page 0, channel 5 = 1.8V VCCPD + page 0, channel 6 = 1.8V VCCIO_SDM + page 0, channel 7 = 0.8V VDD_HPS + + N5X Temperature Sensors + + page 0, channel 0 = main die + +properties: + + compatible: + const: intel,soc64-hwmon + + temperature: + description: + The temperature node specifies mappings of temperature sensor diodes on + the SoC FPGA main die and tile die. + type: object + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + patternProperties: + "^input(@[0-9a-f]+)?$": + description: + The input node specifies each individual temperature sensor. + type: object + properties: + reg: + description: + The temperature sensor address format contains a page number and + a channel number to identify a specific temperature sensor. The + page number selects the region of the device that the sensor + resides. The channel number selects the temperature sensor diode + in the page. The page number is defined in the upper 16-bits of + the reg value while the channel number is defined in the lower + 16-bits of the reg value. Channel 0 is represented by the value 0 + and channel 1 is represented by the value 1, and so on. + label: + description: + A label to describe the sensor. + required: + - reg + additionalProperties: false + required: + - '#address-cells' + - '#size-cells' + additionalProperties: false + + voltage: + description: + The voltage node specifies mappings of voltage sensorson the SoC FPGA + analog to digital converter of the Secure Device Manager(SDM). + type: object + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + patternProperties: + "^input(@[0-9a-f]+)?$": + description: + The input node specifies each individual voltage sensor. + type: object + properties: + reg: + description: + The voltage sensor address format contains a channel number to + identify a specific voltage sensor. The channel number is defined + in the lower 16-bits of the reg value. Channel 0 is represented by + the value 0 and channel 1 is represented by the value 1, and so + on. + label: + description: + A label to describe the sensor. + required: + - reg + additionalProperties: false + required: + - '#address-cells' + - '#size-cells' + additionalProperties: false + +required: + - compatible + +additionalProperties: false + +examples: + - | + temp_volt { + compatible = "intel,soc64-hwmon"; + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10001 { + label = "Main Die corner bottom left 1"; + reg = <0x10001>; + }; + + input@10002 { + label = "Main Die corner bottom left 2"; + reg = <0x10002>; + }; + + input@20001 { + label = "Main Die corner top left 1"; + reg = <0x20001>; + }; + + input@20002 { + label = "Main Die corner top left 2"; + reg = <0x20002>; + }; + + input@30001 { + label = "Main Die corner bottom right 1"; + reg = <0x30001>; + }; + + input@30002 { + label = "Main Die corner bottom right 2"; + reg = <0x30002>; + }; + + input@40001 { + label = "Main Die corner top right 1 HPS"; + reg = <0x40001>; + }; + + input@40002 { + label = "Main Die corner top right 2"; + reg = <0x40002>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml index 60035a787e5c0..1858b4967c693 100644 --- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml @@ -37,6 +37,8 @@ properties: items: - const: thead,th1520-i2c - const: snps,designware-i2c + - description: Intel's SoCFPGA I2C controller + const: intel,socfpga-i2c reg: minItems: 1 diff --git a/Documentation/devicetree/bindings/misc/altera-hwmutex.txt b/Documentation/devicetree/bindings/misc/altera-hwmutex.txt new file mode 100644 index 0000000000000..6a583d08ece43 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/altera-hwmutex.txt @@ -0,0 +1,22 @@ +Altera hardware mutex +Altera hardware mutex can provide hardware assistance for synchronization and +mutual exclusion between processors in asymmetric/symmetric multiprocessing +(AMP/SMP) system or multi processes/threads in uniprocessor system. + +Required properties: +- compatible : "altr,mutex-1.0". +- reg : physical base address of the mutex and length of memory mapped + region. + +Example: + mutex0: mutex0@0x100 { + compatible = "altr,hwmutex-1.0"; + reg = <0x100 0x8>; + }; + +Example of mutex's client node that includes mutex phandle. + mclient0: mclient0@0x200 { + compatible = "client-1.0"; + reg = <0x200 0x10>; + mutex = <&mutex0>; + }; diff --git a/Documentation/devicetree/bindings/misc/altera-interrupt-latency-counter.txt b/Documentation/devicetree/bindings/misc/altera-interrupt-latency-counter.txt new file mode 100644 index 0000000000000..09f6820576163 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/altera-interrupt-latency-counter.txt @@ -0,0 +1,49 @@ +Altera Interrupt Latency Counter soft IP +Altera Interrupt Latency Counter IP core driver provides a sysfs interface +for user to obtain interrupt latency values from Altera Interrupt Latency +Counter soft IP. + +The sysfs interface is located at path, +/sys/bus/platform/devices/{addr}.ilc/ilc_data/{int_#} +with +- {addr} = the base address of the soft ip +- {int_#} = the interrupt number + +Example use case: +# cat /sys/bus/platform/devices/c0010000.ilc/ilc_data/40 + +Required properties: +- compatible : + - "altr,ilc-1.0" +- reg : + - physical base address of the soft ip and length of memory mapped region +- interrupt-parent : + - interrupt source phandle similiar to the interrupt source node +- interrupts : + -interrupt number. The interrupt specifier format depends on the interrupt + controller parent + +Altera specific properties: +- altr,sw-fifo-depth : + - define software fifo depth needed to record latency values + +Note: +- For edge triggered interrupt, the order of loading the ILC driver relative + to driver of the actual interrupt source affects the meaning of the ILC + values. If the ILC driver is loaded first, then the count values represent + the time to the start of the interrupt handler of the of the interrupt source. + If the order is switched, then the counts represent the time to finish the + interrupt handler for the interrupt source. + +- The driver for the interrupt source must be changed to request a shared irq. + +Example: + interrupt_latency_counter_0: intc@0x10000000 { + compatible = "altr,ilc-1.0"; + reg = <0x10000000 0x00000100>; + interrupt-parent = < &interrupt_parent >; + interrupts = < 0 1 4 >; + altr,sw-fifo-depth = < 32 >; + }; + + diff --git a/Documentation/devicetree/bindings/misc/altera_sysid.txt b/Documentation/devicetree/bindings/misc/altera_sysid.txt new file mode 100644 index 0000000000000..c3bbd576b74b6 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/altera_sysid.txt @@ -0,0 +1,11 @@ +Altera Sysid IP core driver + +Required properties: +- compatible: altr,sysid-1.0 + +Example: + +sysid_qsys: sysid@0x10000 { + compatible = "altr,sysid-1.0"; + reg = < 0x10000 0x00000008 >; +}; diff --git a/Documentation/devicetree/bindings/misc/intel,agilex5-soc-fcs-config.yaml b/Documentation/devicetree/bindings/misc/intel,agilex5-soc-fcs-config.yaml new file mode 100644 index 0000000000000..0e3b8ab160fe9 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/intel,agilex5-soc-fcs-config.yaml @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/misc/intel,agilex5-soc-fcs-config.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Altera SoC FPGA Security features + +maintainers: + - Balsundar Ponnusamy + - Sagar Khadgi + - Santosh Male + - Mahesh Rao + +properties: + compatible: + enum: + - intel,agilex5-soc-fcs-config + - intel,agilex-soc-fcs-config + +additionalProperties: false + +required: + - compatible + +examples: + - | + fcs_config: fcs-config { + compatible = "intel,agilex5-soc-fcs-config"; + }; diff --git a/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml b/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml index 6c40611405a08..63b617756e843 100644 --- a/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml @@ -16,6 +16,7 @@ properties: - amd,pensando-elba-sd4hc - microchip,mpfs-sd4hc - socionext,uniphier-sd4hc + - intel,agilex5-sd4hc - const: cdns,sd4hc reg: @@ -113,6 +114,373 @@ properties: minimum: 0 maximum: 0x7f + # COMBOPHY value generated from Cadence script: + # They are used to program the PHY based on different SD speed selected. + + cdns,phy-use-ext-lpbk-dqs: + description: | + Bit to choose lpbk_dqs to capture data for reads. It is valid + when 'use_phony_dqs' and 'use_lpbk_dqs' fields are set high. + 0 - use internal lpbk_dqs (mem_rebar_ipad) for data + capture. + 1 - use external lpbk_dqs (lpbk_dqs connected to the + lpbk_dqs_IO PAD) for data capture. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 1 + + cdns,phy-use-lpbk-dqs: + description: | + Bit to choose lpbk_dqs to capture data for reads. It is valid + when 'use_phony_dqs' is set high. + 0 - Use phony DQS for data capture. + 1 - Use lpbk_dqs for data capture. Recommended setting + for SD/eMMC controller. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 1 + + cdns,phy-use-phony-dqs: + description: | + Bit to choose lpbk_dqs or phony DQS (generated in the control + slice logic) or DQS from the device to capture data for reads. + 0 - Use DQS from device for data capture. + 1 - Use phony DQS or lpbk_dqs for data capture. Bit 21 + of the phy_dqs_timing_reg is used the choose the source + signal. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 1 + + cdns,phy-use-phony-dqs-cmd: + description: | + Bit to choose phony DQS (or lpbk_dqs) from the control slice + logic or DQS from the device to capture command data for + reads. + 0 - Use DQS from device for command data capture. + 1 - Use phony DQS or lpbk_dqs for command data capture. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 1 + + cdns,phy-io-mask-always-on: + description: | + Defines if the IO mask for DATA/CMD is always enabled. + 0 = disable ie. start/end defines the IO mask functionality. + Recommended setting for SD/eMMC controller. + 1 = IO mask is always ON + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 1 + + cdns,phy-io-mask-end: + description: | + Adjusts the ending point of the DQ/CMD pad input mask + enable. Defines the delay after dfi_wrdata_en/dfi_wrcmd_en + goes high when the mask is disabled (data/cmd are blocked + and 1'b1 are passed to PHY) + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x7 + + cdns,phy-io-mask-start: + description: | + Adjusts the starting point of the DQ/CMD pad input mask + enable. Defines the delay after dfi_wrdata_en/dfi_wrcmd_en + goes low when the mask is enabled (data/cmd are passed to + PHY). + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x7 + + cdns,phy-data-select-oe-end: + description: | + Adjusts the ending point of the DQ pad output enable window. + Lower numbers pull the falling edge earlier in time and larger + numbers cause the falling edge to be delayed. Each bit changes + the output enable time by a 1/2 cycle resolution. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x7 + + cdns,phy-sync-method: + description: | + Adjusts the ending point of the DQ pad output enable window. + Lower numbers pull the falling edge earlier in time and larger + numbers cause the falling edge to be delayed. Each bit changes + the output enable time by a 1/2 cycle resolution. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,phy-sw-half-cycle-shift: + description: | + 1'b0 - No effect. + 1'b1 - Adds a half clock delay to the write data path + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,phy-rd-del-sel: + description: | + Defines the read data delay. Holds the number of cycles + to delay the dfi_rddata_en signal prior to enabling the read + FIFO. After this delay, the read pointers begin incrementing + the read FIFO. If 'sync_method' is set high the value of this + field must take into account the synchronization time of the + pointers in the entry FIFO (adding three clock cycles should + be sufficient) + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x40 + + cdns,phy-underrun-suppress: + description: | + This field turns off the generation of the underrun signal when + 'sync_method' is set high. Recommended value is zero with + an expetion for Cadence SD/eMMC controller for which this + field need to be set high. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,phy-gate-cfg-always-on: + description: | + This parameter cause the gate to be always on. Recommended + setting for SD/eMMC controller is 1. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,phy-param-dll-bypass-mode: + description: | + DLL bypass mode control. Controls the bypass mode of + the master and slave DLLs. The param_dll_bypass_mode is + intended to be used only for debug. + 0 - Normal operational mode. DLL functioning in normal + mode of operation where the slave delay line settings are + used as fractional delay of the master delay line encoder + reading of the number of delays in one cycle. + 1 - Bypass mode on. Delays are defined in + phy_dll_slave_ctrl_reg. Master DLL is disabled with only + 1 delay element in its delay line. The slave delay + lines decode delays in absolute delay elements rather than + as fractional delays. The dll_lock field (bit [0]) of the + phy_dll_obs_reg_0 parameter will be forced high. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,phy-param-phase-detect-sel: + description: | + Selects the number of delay elements to be inserted between + the phase detect flip-flops. Defaults to 0x0 although the + recommended value is 2 elements but if a lock condition is + not detected, the user should increase the number of delay + elements. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x7 + + cdns,phy-param-dll-start-point: + description: | + This value is the initial delay value for the DLL. This value + is also used as the increment value if the initial value is less + than a half-clock cycle. This field should be set such that it + is not greater than 7/8ths of a clock period given the worst + case element delay. For example, if the frequency is 200MHz + (5ns cycle time) with a worst case element 80ps delay, this + field should be set to = 5 * (7/8) / .080 = 54 elements. This + calculation helps determine the start point which achieves the + fastest lock. However, a small value such as 0x04 may be used + instead to ensure that the DLL does not lock on a harmonic. + Note that with a small value like this, the initial lock time will + be longer. Value smaller than 0x04 may cause no lock by DLL. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x7 + + cdns,phy-read-dqs-cmd-delay: + description: | + Controls the read command DQS delay which adjusts the + timing in 1/256th of the clock period when in normal DLL + locked mode. In bypass mode, this field directly programs the + number of delay elements. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x100 + + cdns,phy-clk-wrdqs-delay: + description: | + Controls the clk_wrdqs delay line which adjusts the write DQS + timing in 1/256th steps of the clock period in normal DLL + locked mode. In bypass mode, this field directly programs the + number of delay elements. clk_wrdqs delay line is used to + adjust the write CMD bit timing. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x100 + + cdns,phy-clk-wr-delay: + description: | + Controls the clk_wr delay line which adjusts the write DQ bit + timing in 1/256th steps of the clock period in normal DLL + locked mode. In bypass mode, this field directly programs the + number of delay elements. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x100 + + cdns,phy-read-dqs-delay: + description: | + Controls the read DQS delay which adjusts the timing in + 1/256th of the clock period when in normal DLL locked mode. + In bypass mode, this field directly programs the number of + delay elements. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x100 + + cdns,phy-phony-dqs-timing: + description: | + The timing of assertion of phony DQS to the data slices. If + the extended_read_mode is disabled the value should be zero. + If the extended_read_mode is enabled the value should match + the width of the rebar pulse in terms of clock PHY clock cycles + reduced by 1. e.g. if rebar pulse width is 4 clock cycles the + value of this field should be 3. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x40 + + cdns,hrs09-rddata-en: + description: | + If 1, dfi_rddata_en is forced to 1, else host logic controls the signal. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs09-rdcmd-en: + description: | + If 1, dfi_rdcmd_en is forced to 1, else host logic controls the signal. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs09-extended-wr-mode: + description: | + Controls sdphy_param_extended_wr_mode port. Non of software resets clear this register. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs09-extended-rd-mode: + description: | + Controls sdphy_param_extended_rd_mode port. + Non of software resets clear this register. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs10-hcsdclkadj: + description: | + This field allows to adjust flow control mechanism which disables SDCLK. + With value 0, the clock (dfi_webar/dfi_webar_high) will be disabled + right after end bit of the data block. Increasing this value will + cause that clock signal is to be disabled earlier with SDCLK period step. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0xf + + cdns,hrs16-wrdata1-sdclk-dly: + description: | + Value in this field defines a delay of the dfi_wrdata[15:8] signal. + The delay is equal value * t_SDCLK/2. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrdata0-sdclk-dly: + description: | + Value in this field defines a delay of the dfi_wrdata[7:0] signal. + The delay is equal value * t_SDCLK/2. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrcmd1-sdclk-dly: + description: | + Value in this field defines a delay of the dfi_wrcmd signal. + The delay is equal value * t_SDCLK/2. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrcmd0-sdclk-dly: + description: | + Value in this field defines a delay of the dfi_wrcmd signal. + The delay is equal value * t_SDCLK/2. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrdata1-dly: + description: | + Value in this field defines a delay of the dfi_wrdata[15:8] signal. + The delay is equal value * t_SDMCLK. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrdata0-dly: + description: | + Value in this field defines a delay of the dfi_wrdata[7:0] signal. + The delay is equal value * t_SDMCLK. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrcmd1-dly: + description: | + Value in this field defines a delay of the dfi_wrcmd[1] signal. + The delay is equal value * t_SDMCLK. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs16-wrcmd0-dly: + description: | + Value in this field defines a delay of the dfi_wrcmd[0] signal. + The delay is equal value * t_SDMCLK. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs07-rw-compensate: + description: | + According to delays between PAD and dfi_rddata, dfi_wrdata and PAD + and to Read Wait timing requirements, the signal dat[2] should be set + to 0 earlier than controller read the end bit of read data. + Designer should update this register with delay of data path count + in sdmclk clock cycles. If the value is greater than 10 and value of field + SDCLK Frequency Select (concatenation of SRS11.SDCFSH, SRS11.SDCFSL) + is equal 0, then io_mask_start parameter in PHY register + phy_dq_timing_reg should be set with value equal (RW_COMPENSATE-10)*2. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + + cdns,hrs07-idelay-val: + description: | + Designer should update this register with delay value of IO with + appriopriate input delay. Delay is count in half of period of sdmclk. + If sdmclk is working at 200MHz frequency, then 1 is 2,5 ns. + This value will be used to compensate delay of DAT line when controller + is reading Card Interrupt. + $ref: "/schemas/types.yaml#/definitions/uint32" + minimum: 0 + maximum: 0x1 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/altera_qse.txt b/Documentation/devicetree/bindings/net/altera_qse.txt new file mode 100644 index 0000000000000..7a7ca3219f7c4 --- /dev/null +++ b/Documentation/devicetree/bindings/net/altera_qse.txt @@ -0,0 +1,112 @@ +* Altera Quad-Speed Ethernet MAC driver (QSE) + +Required properties: +- compatible: Should be "altr,qse-msgdma-2.0" for MSGDMA with prefetcher based + implementations. +- reg: Address and length of the register set for the device. It contains + the information of registers in the same order as described by reg-names +- reg-names: Should contain the reg names +- interrupts: Should contain the QSE interrupts and it's mode. +- interrupt-names: Should contain the interrupt names + "rx_irq": DMA Rx dispatcher interrupt + "tx_irq": DMA Tx dispatcher interrupt +- rx-fifo-depth: MAC receive FIFO buffer depth in bytes +- tx-fifo-depth: MAC transmit FIFO buffer depth in bytes +- rx-fifo-almost-full: Value that indicates RX FIFO buffer is getting full +- rx-fifo-almost-empty: Value that indicates RX FIFO buffer is getting empty +- phy-mode: See ethernet.txt in the same directory. +- sfp: See sff,sfp.txt in the same directory. +- dma-coherent: Present if DMA operations are coherent. + +- altr,has-supplementary-unicast: + If present, QSE supports additional unicast addresses. + Otherwise additional unicast addresses are not supported. +- altr,has-ptp: + If present, QSE supports 1588 timestamping. Currently only + supported with the msgdma prefetcher. +- altr,tx-pma-delay-ns: + MAC Tx PMA digital delay in nanoseconds. +- altr,rx-pma-delay-ns: + MAC RX PMA digital delay in nanoseconds. +- altr,tx-pma-delay-fns: + MAX TX PMA digital delay in fractional nanoseconds. +- altr,rx-pma-delay-fns: + MAX RX PMA digital delay in fractional nanoseconds. +- tx-poll-freq: + Optional cycle count for Tx prefetcher to poll descriptor + list. If not present, defaults to 128, which at 125MHz is + roughly 1usec. +- rx-poll-freq: + Optional cycle count for Rx prefetcher to poll descriptor + list. If not present, defaults to 128, which at 125MHz is + roughly 1usec. + +Required registers by compatibility string: +- "altr,qse-msgdma-2.0" + "control_port": MAC configuration space region. + "xcvr_ctrl": PHY transceiver (XCVR) address space region. + "tx_csr": DMA Tx dispatcher control and status space region. + "tx_pref": DMA Tx prefetcher configuration space region. + "rx_csr" : DMA Rx dispatcher control and status space region. + "rx_pref": DMA Rx prefetcher configuration space region. + "tod_ctrl": Time of Day Control register only required when + timestamping support is enabled. Timestamping is + only supported with the msgdma-2.0 implementation. + "rx_fifo": RX FIFO address space region. + "phy_reconfig_csr": PHY reconfiguration controller address space region. + "chan_ready": Channel ready address space region. + +Optional properties: +- local-mac-address: See ethernet.txt in the same directory. +- max-frame-size: See ethernet.txt in the same directory. + +Example: + + qse_0_qse: ethernet@0x100020000 { + compatible = "altr,qse-msgdma-2.0"; + reg-names = "control_port", "xcvr_ctrl", "tod_ctrl", + "tx_csr", "tx_pref", "rx_csr", "rx_pref", + "rx_fifo", "phy_reconfig_csr", "chan_ready"; + reg = <0x00000001 0x00020000 0x00001000>, + <0x00000001 0x00022000 0x00002000>, + <0x00000001 0x00026000 0x00000040>, + <0x00000001 0x00000420 0x00000020>, + <0x00000001 0x00000400 0x00000020>, + <0x00000001 0x00000520 0x00000020>, + <0x00000001 0x00000500 0x00000020>, + <0x00000001 0x00000540 0x00000020>, + <0x00000001 0x00030100 0x00000010>, + <0x00000001 0x00000330 0x00000010>; + dma-coherent; + phy-mode = "10gbase-kr"; + sfp = <&sfp_eth0>; + clocks = <&ptp_ctrl_10G_clk>; + clock-names = "tod_clk"; + interrupt-parent = <&intc>; + interrupt-names = "tx_irq", "rx_irq"; + interrupts = <0 21 4>, + <0 22 4>; + rx-fifo-depth = <0x4000>; + tx-fifo-depth = <0x4000>; + rx-fifo-almost-full = <0x10000>; + rx-fifo-almost-empty = <0x8000>; + local-mac-address = [00 00 00 00 00 00]; + altr,tx-pma-delay-ns = <0xb>; + altr,rx-pma-delay-ns = <0xb>; + altr,tx-pma-delay-fns = <0>; + altr,rx-pma-delay-fns = <0>; + altr,has-ptp; + }; + + sfp_eth0: sfp-eth0 { + compatible = "sff,sfp"; + i2c-bus = <&i2c0>; + los-gpio = <&sfp_gpio 0 GPIO_ACTIVE_HIGH>; + mod-def0-gpio = <&sfp_gpio 2 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <1000>; + pinctrl-names = "default"; + pinctrl-0 = <&sfp_gpio>; + tx-disable-gpio = <&sfp_gpio 0 GPIO_ACTIVE_HIGH>; + tx-fault-gpio = <&sfp_gpio 1 GPIO_ACTIVE_HIGH>; + rate-select0-gpio = <&sfp_gpio 2 GPIO_ACTIVE_HIGH>; + }; \ No newline at end of file diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 4e2ba1bf788c9..d00c240c53271 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -106,19 +106,39 @@ properties: interrupts: minItems: 1 - items: - - description: Combined signal for various interrupt events - - description: The interrupt to manage the remote wake-up packet detection - - description: The interrupt that occurs when Rx exits the LPI state - - description: The interrupt that occurs when HW safety error triggered + maxItems: 19 + description: + DWMAC supports following interrupts, + * Combined signal for various interrupt events + * The interrupt to manage the remote wake-up packet detection + * The interrupt that occurs when Rx exits the LPI state + * Individual signal for per channel interrupt events + Except combined interrupt, all interrupts are optional. If per channel + interrupts are enabled there should be interrupt entries for every tx and + rx channels. interrupt-names: minItems: 1 items: - const: macirq - - enum: [eth_wake_irq, eth_lpi, sfty] - - enum: [eth_wake_irq, eth_lpi, sfty] - - enum: [eth_wake_irq, eth_lpi, sfty] + - pattern: '^eth_wake_irq|eth_lpi|macirq_[tr]x[0-7]$' + - pattern: '^eth_lpi|macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' + - pattern: '^macirq_[tr]x[0-7]$' clocks: minItems: 1 @@ -144,17 +164,18 @@ properties: resets: minItems: 1 + maxItems: 8 + additionalItems: true items: - - description: GMAC stmmaceth reset - - description: AHB reset + - description: MAC Reset signal reset-names: - oneOf: - - items: - - enum: [stmmaceth, ahb] - - items: - - const: stmmaceth - - const: ahb + minItems: 1 + maxItems: 8 + additionalItems: true + contains: + enum: + - stmmaceth power-domains: maxItems: 1 @@ -394,6 +415,10 @@ properties: type: boolean description: TX checksum offload is unsupported by the TX queue. + snps,tbs-enable: + type: boolean + description: Enable Time Based Scheduling(TBS) for TX queue. + allOf: - if: required: @@ -489,6 +514,11 @@ properties: description: Enable gating of the MAC TX clock during TX low-power mode + snps,multi-irq-en: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable interrupt for each TX or RX channel. + snps,multicast-filter-bins: $ref: /schemas/types.yaml#/definitions/uint32 description: @@ -519,6 +549,21 @@ properties: Enables the TSO feature otherwise it will be managed by MAC HW capability register. + snps,rx-vlan-offload: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable hardware-accelerated RX VLAN stripping. + + altr,smtg-hub: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable Synchronized Multidrop Timestamp Gathering(SMTG). + + snps,pagepool-tx-buf-quirk: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable use of page pool buffers instead of streaming buffers for transmit + mdio: $ref: mdio.yaml# unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt index 612a8e8abc887..99c78a969805f 100644 --- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt +++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt @@ -34,7 +34,7 @@ Required properties: Example: -gmii_to_sgmii_converter: phy@100000240 { +gmii_to_sgmii_converter: phy@0x100000240 { compatible = "altr,gmii-to-sgmii-2.0"; reg = <0x00000001 0x00000240 0x00000008>, <0x00000001 0x00000200 0x00000040>; diff --git a/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml index 52533fccc134a..c956fbf229d08 100644 --- a/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml +++ b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml @@ -15,6 +15,9 @@ properties: enum: - altr,pcie-root-port-1.0 - altr,pcie-root-port-2.0 + - altr,pcie-root-port-3.0-f-tile + - altr,pcie-root-port-3.0-p-tile + - altr,pcie-root-port-3.0-r-tile reg: items: diff --git a/Documentation/devicetree/bindings/tty/newhaven_lcd.txt b/Documentation/devicetree/bindings/tty/newhaven_lcd.txt new file mode 100644 index 0000000000000..5ff0438640d69 --- /dev/null +++ b/Documentation/devicetree/bindings/tty/newhaven_lcd.txt @@ -0,0 +1,21 @@ +* TTY on a Newhaven NHD‐0216K3Z‐NSW‐BBW LCD connected to I2C + +Required properties: +- compatible: Should be "newhaven,nhd‐0216k3z‐nsw‐bbw"; +- reg: i2c address +- height: should be 2 lines +- width: should be 16 characters +- brightness: backlight brightness. Range is 1 to 8, where + 1=OFF and 8=maximum brightness. + +Example: + +&i2c0 { + lcd: lcd@28 { + compatible = "newhaven,nhd‐0216k3z‐nsw‐bbw"; + reg = <0x28>; + height = <2>; + width = <16>; + brightness = <8>; + }; + diff --git a/Documentation/devicetree/bindings/usb/intel,socfpga-dwc3.yaml b/Documentation/devicetree/bindings/usb/intel,socfpga-dwc3.yaml new file mode 100644 index 0000000000000..d0f9dc451538b --- /dev/null +++ b/Documentation/devicetree/bindings/usb/intel,socfpga-dwc3.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/intel,socfpga-dwc3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel SoCFPGA DWC3 USB controller + +maintainers: + - Adrian Ng Ho Yin + +properties: + compatible: + const: intel,agilex5-dwc3 + + reg: + maxItems: 1 + + clocks: + maxItems: 2 + + ranges: true + + resets: + maxItems: 2 + + reset-names: + items: + - const: dwc3 + - const: dwc3-ecc + + '#address-cells': + enum: [ 1, 2 ] + + '#size-cells': + enum: [ 1, 2 ] + +# Required child node: + +patternProperties: + "^usb@[0-9a-f]+$": + $ref: snps,dwc3.yaml# + +required: + - compatible + - reg + - clocks + - resets + - ranges + +additionalProperties: false + +examples: + - | + #include + #include + #include + #include + + usb1@11000000 { + compatible = "intel,agilex5-dwc3"; + reg = <0x11000000 0x100000>; + ranges; + clocks = <&clkmgr AGILEX5_USB31_SUSPEND_CLK>, + <&clkmgr AGILEX5_USB31_BUS_CLK_EARLY>; + resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>; + reset-names = "dwc3", "dwc3-ecc"; + #address-cells = <1>; + #size-cells = <1>; + + usb@11000000 { + compatible = "snps,dwc3"; + reg = <0x11000000 0x100000>; + interrupts = ; + dr_mode = "host"; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml index 1cd0ca90127d9..496d4fcccd757 100644 --- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml @@ -464,6 +464,11 @@ properties: description: Enable USB remote wakeup. + snps,dma_set_40_bit_mask_quirk: + description: + Set dma bit-mask to 40 bits. + type: boolean + unevaluatedProperties: false required: diff --git a/Documentation/devicetree/bindings/video/altr,vip-fb2.txt b/Documentation/devicetree/bindings/video/altr,vip-fb2.txt new file mode 100644 index 0000000000000..296988aec88d2 --- /dev/null +++ b/Documentation/devicetree/bindings/video/altr,vip-fb2.txt @@ -0,0 +1,22 @@ +Altera Video and Image Processing(VIP) Frame Buffer II bindings + +Required properties: +- compatible: "altr,vip-frame-buffer-2.0" +- reg: Physical base address and length of the framebuffer controller's + registers. +- max-width: The width of the framebuffer in pixels. +- max-height: The height of the framebuffer in pixels. +- bits-per-symbol: only "8" is currently supported +- mem-port-width = the bus width of the avalon master port on the frame reader + +Example: + +dp_0_frame_buf: vip@0x100000280 { + compatible = "altr,vip-frame-buffer-2.0"; + reg = <0x00000001 0x00000280 0x00000040>; + altr,max-width = <1920>; + altr,max-height = <1080>; + altr,bits-per-symbol = <8>; + altr,mem-port-width = <128>; +}; + diff --git a/Documentation/devicetree/configfs-overlays.txt b/Documentation/devicetree/configfs-overlays.txt new file mode 100644 index 0000000000000..185d85ef52e49 --- /dev/null +++ b/Documentation/devicetree/configfs-overlays.txt @@ -0,0 +1,31 @@ +Howto use the configfs overlay interface. + +A device-tree configfs entry is created in /config/device-tree/overlays +and and it is manipulated using standard file system I/O. +Note that this is a debug level interface, for use by developers and +not necessarily something accessed by normal users due to the +security implications of having direct access to the kernel's device tree. + +* To create an overlay you mkdir the directory: + + # mkdir /config/device-tree/overlays/foo + +* Either you echo the overlay firmware file to the path property file. + + # echo foo.dtbo >/config/device-tree/overlays/foo/path + +* Or you cat the contents of the overlay to the dtbo file + + # cat foo.dtbo >/config/device-tree/overlays/foo/dtbo + +The overlay file will be applied, and devices will be created/destroyed +as required. + +To remove it simply rmdir the directory. + + # rmdir /config/device-tree/overlays/foo + +The rationale for the dual interface (firmware & direct copy) is that each is +better suited to different use patterns. The firmware interface is what's +intended to be used by hardware managers in the kernel, while the copy interface +make sense for developers (since it avoids problems with namespaces). diff --git a/Documentation/fpga/debugfs.txt b/Documentation/fpga/debugfs.txt new file mode 100644 index 0000000000000..b01950f76e20d --- /dev/null +++ b/Documentation/fpga/debugfs.txt @@ -0,0 +1,39 @@ +FPGA Manager DebugFS interface for FPGA reprogramming. + +Alan Tull 2016 + +Each FPGA gets its own directory such as /fpga_manager/fpga0 and +three files: + + - [RW] flags: flags as defined in fpga-mgr.h. For example: + + $ echo 1 > /sys/kernel/debug/fpga_manager/fpga0/flags + + - [RW] config_complete_timeout_us: time out in microseconds to wait for + FPGA to go to operating state after + region has been programmed. + + $ echo 4 > /sys/kernel/debug/fpga_manager/fpga0/config_complete_timeout_us + + - [RW] firmware_name: Name of an FPGA image firmware file. Writing initiates + a complete FPGA programming cycle. Note that the image + file must be in a directory on the firmware search path + such as /lib/firmware. + + $ echo image.rbf > /sys/kernel/debug/fpga_manager/fpga0/firmware_name + + - [WO] image: Raw FPGA image data. Writing the FPGA image data will + initiate a complete FPGA programming cycle. Data must + be written in one chunk, for example: + + $ dd bs=10M if=./image.rbf of=/sys/kernel/debug/fpga_manager/fpga0/image + (where image.rbf < 10M) + +To program the FPGA, write the flags (if needed), then use either the +firmware_name or image file to program. + +This interface does not handle bridges or loading/unloading of soft IP device +drivers. This makes it really easy to mess things up by doing things like +reprogramming the hardware out from under a driver or reprogramming while a +bridge is enabled, causing gunk to go out on a cpu bus. It should go without +saying that this interface is for debug only. Not intended for production use. diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst index ea3b5be8fe4f7..bf715f6e3952e 100644 --- a/Documentation/hwmon/index.rst +++ b/Documentation/hwmon/index.rst @@ -218,6 +218,7 @@ Hardware Monitoring Kernel Drivers smsc47b397 smsc47m192 smsc47m1 + soc64-hwmon sparx5-temp spd5118 stpddc60 diff --git a/Documentation/hwmon/soc64-hwmon.rst b/Documentation/hwmon/soc64-hwmon.rst new file mode 100644 index 0000000000000..6605f201db113 --- /dev/null +++ b/Documentation/hwmon/soc64-hwmon.rst @@ -0,0 +1,28 @@ +.. SPDX-License-Identifier: GPL-2.0 +Kernel driver soc64-hwmon +========================= + +Supported chips: + + * Intel N5X + +Author: Kris Chaplin + +Description +----------- + +This driver supports hardware monitoring for 64-Bit SoC FPGA and eASIC devices +based around the Secure Device Manager and Stratix 10 Service layer. + +The following sensor types are supported + + * temperature + * voltage + + +Usage Notes +----------- + +The driver relies on a device tree node to enumerate support present on the +specific device. See Documentation/devicetree/bindings/hwmon/soc64-hwmon.txt +for details of the device-tree node. diff --git a/MAINTAINERS b/MAINTAINERS index de04c7ba8571b..35b9b0efab795 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3036,6 +3036,11 @@ M: Dinh Nguyen S: Maintained F: drivers/clk/socfpga/ +ARM/SOCFPGA EDAC BINDINGS +M: Matthew Gerlach +S: Maintained +F: Documentation/devicetree/bindings/edac/altr,socfpga-ecc-manager.yaml + ARM/SOCFPGA EDAC SUPPORT M: Dinh Nguyen S: Maintained diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga.dtsi b/arch/arm/boot/dts/intel/socfpga/socfpga.dtsi index 35be14150f416..b02786ff72a0f 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga.dtsi +++ b/arch/arm/boot/dts/intel/socfpga/socfpga.dtsi @@ -87,7 +87,7 @@ }; }; - base_fpga_region { + base_fpga_region: base-fpga-region { compatible = "fpga-region"; fpga-mgr = <&fpgamgr0>; @@ -670,7 +670,7 @@ i2c0: i2c@ffc04000 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc04000 0x1000>; resets = <&rst I2C0_RESET>; clocks = <&l4_sp_clk>; @@ -681,7 +681,7 @@ i2c1: i2c@ffc05000 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc05000 0x1000>; resets = <&rst I2C1_RESET>; clocks = <&l4_sp_clk>; @@ -692,7 +692,7 @@ i2c2: i2c@ffc06000 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc06000 0x1000>; resets = <&rst I2C2_RESET>; clocks = <&l4_sp_clk>; @@ -703,7 +703,7 @@ i2c3: i2c@ffc07000 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc07000 0x1000>; resets = <&rst I2C3_RESET>; clocks = <&l4_sp_clk>; diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi index b108265e9bde4..2994ad36ae541 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi @@ -77,6 +77,7 @@ clock-names = "apb_pclk"; resets = <&rst DMA_RESET>, <&rst DMA_OCP_RESET>; reset-names = "dma", "dma-ocp"; + microcode-cached; }; }; @@ -557,7 +558,7 @@ i2c0: i2c@ffc02200 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02200 0x100>; interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; @@ -568,7 +569,7 @@ i2c1: i2c@ffc02300 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02300 0x100>; interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; @@ -579,7 +580,7 @@ i2c2: i2c@ffc02400 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02400 0x100>; interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; @@ -590,7 +591,7 @@ i2c3: i2c@ffc02500 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02500 0x100>; interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; @@ -601,7 +602,7 @@ i2c4: i2c@ffc02600 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02600 0x100>; interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk.dtsi b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk.dtsi index ec7365444a3b8..98dc3f00c6420 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk.dtsi +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk.dtsi @@ -121,6 +121,10 @@ compatible = "altr,a10sr-reset"; #reset-cells = <1>; }; + + ps_alarm { + compatible = "altr,a10sr-hwmon"; + }; }; }; @@ -147,6 +151,14 @@ vref-supply = <&ref_033v>; }; + lcd: lcd@28 { + compatible = "newhaven,nhd-0216k3z-nsw-bbw"; + reg = <0x28>; + height = <2>; + width = <16>; + brightness = <8>; + }; + eeprom@51 { compatible = "atmel,24c32"; reg = <0x51>; @@ -158,6 +170,11 @@ reg = <0x68>; }; + max@4c { + compatible = "max1619"; + reg = <0x4c>; + }; + ltc@5c { compatible = "ltc2977"; reg = <0x5c>; diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk_nand.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk_nand.dts index a662df319a840..5f9b98b0b393a 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk_nand.dts +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10_socdk_nand.dts @@ -18,9 +18,9 @@ label = "Boot and fpga data"; reg = <0x0 0x02500000>; }; - partition@1c00000 { + partition@2500000 { label = "Root Filesystem - JFFS2"; - reg = <0x02500000 0x05500000>; + reg = <0x02500000 0x3db00000>; }; }; }; diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_arria5_socdk.dts index 7342f5942b0d0..495cb09cd9c91 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria5_socdk.dts +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria5_socdk.dts @@ -95,6 +95,14 @@ i2c-sda-falling-time-ns = <5000>; i2c-scl-falling-time-ns = <5000>; + lcd: lcd@28 { + compatible = "newhaven,nhd-0216k3z-nsw-bbw"; + reg = <0x28>; + height = <2>; + width = <16>; + brightness = <8>; + }; + eeprom@51 { compatible = "atmel,24c32"; reg = <0x51>; diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts index d37a982e85719..3366b58444f7b 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts @@ -99,6 +99,14 @@ i2c-sda-falling-time-ns = <5000>; i2c-scl-falling-time-ns = <5000>; + lcd: lcd@28 { + compatible = "newhaven,nhd-0216k3z-nsw-bbw"; + reg = <0x28>; + height = <2>; + width = <16>; + brightness = <8>; + }; + eeprom@51 { compatible = "atmel,24c32"; reg = <0x51>; diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 294906c8f16e8..a5b5b569155e4 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -39,20 +39,24 @@ CONFIG_PCIE_ALTERA=y CONFIG_PCIE_ALTERA_MSI=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMA_CMA=y CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y -CONFIG_MTD_M25P80=y CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_DENALI_DT=y CONFIG_MTD_SPI_NOR=y # CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set CONFIG_OF_OVERLAY=y +CONFIG_OF_CONFIGFS=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=m CONFIG_SRAM=y +CONFIG_ALTERA_SYSID=m +CONFIG_ALTERA_ILC=m CONFIG_EEPROM_AT24=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set @@ -76,6 +80,7 @@ CONFIG_TOUCHSCREEN_STMPE=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_AMBAKMI=y CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_NEWHAVEN_LCD=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 @@ -95,6 +100,7 @@ CONFIG_GPIO_SYSFS=y CONFIG_GPIO_ALTERA=y CONFIG_GPIO_DWAPB=y CONFIG_GPIO_ALTERA_A10SR=y +CONFIG_SENSORS_ALTERA_A10SR=y CONFIG_SENSORS_MAX1619=y CONFIG_PMBUS=y CONFIG_SENSORS_LTC2978=y @@ -106,6 +112,9 @@ CONFIG_MFD_ALTERA_SYSMGR=y CONFIG_MFD_STMPE=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_FB=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_USB=y CONFIG_USB_STORAGE=y CONFIG_USB_DWC2=y @@ -133,6 +142,7 @@ CONFIG_FPGA_BRIDGE=y CONFIG_SOCFPGA_FPGA_BRIDGE=y CONFIG_ALTERA_FREEZE_BRIDGE=y CONFIG_FPGA_REGION=y +CONFIG_OF_FPGA_REGION=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -160,3 +170,4 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_FUNCTION_TRACER=y CONFIG_DEBUG_USER=y +CONFIG_HAVE_ARM_ARCH_TIMER=y diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index eb72c240c2486..2d88be47d3fb9 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -19,6 +19,7 @@ menuconfig ARCH_INTEL_SOCFPGA select PL310_ERRATA_753970 if PL310 select PL310_ERRATA_769419 select RESET_CONTROLLER + select HAVE_ARM_ARCH_TIMER if ARCH_INTEL_SOCFPGA config SOCFPGA_SUSPEND @@ -27,3 +28,8 @@ config SOCFPGA_SUSPEND Select this if you want to enable Suspend-to-RAM on SOCFPGA platforms. endif +config FPGADMA + tristate "FPGA DMA FIFO driver" + depends on DMA_ENGINE + help + Sample FPGA DMA driver, for testing with special FPGA FIFO image diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile index 9ec31fad71362..01ad570d7c8e2 100644 --- a/arch/arm/mach-socfpga/Makefile +++ b/arch/arm/mach-socfpga/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_SMP) += headsmp.o platsmp.o obj-$(CONFIG_SOCFPGA_SUSPEND) += pm.o self-refresh.o obj-$(CONFIG_EDAC_ALTERA_L2C) += l2_cache.o obj-$(CONFIG_EDAC_ALTERA_OCRAM) += ocram.o +obj-$(CONFIG_FPGADMA) += fpga-dma.o diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 18f01190dcfd4..fb96bd78700da 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -39,4 +39,7 @@ extern unsigned long socfpga_cpu1start_addr; #define SOCFPGA_SCU_VIRT_BASE 0xfee00000 +/* Clock manager defines */ +#define SOCFPGA_ENABLE_PLL_REG 0xA0 + #endif diff --git a/arch/arm/mach-socfpga/fpga-dma.c b/arch/arm/mach-socfpga/fpga-dma.c new file mode 100644 index 0000000000000..23ed0a0d8e28f --- /dev/null +++ b/arch/arm/mach-socfpga/fpga-dma.c @@ -0,0 +1,689 @@ +/* + * FPGA DMA transfer module + * + * Copyright Altera Corporation (C) 2014. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/****************************************************************************/ + +static unsigned int max_burst_words = 16; +module_param(max_burst_words, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_burst_words, "Size of a burst in words " + "(in this case a word is 64 bits)"); + +static int timeout = 1000; +module_param(timeout, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 1000), " + "Pass -1 for infinite timeout"); + +#define ALT_FPGADMA_DATA_WRITE 0x00 +#define ALT_FPGADMA_DATA_READ 0x08 + +#define ALT_FPGADMA_CSR_WR_WTRMK 0x00 +#define ALT_FPGADMA_CSR_RD_WTRMK 0x04 +#define ALT_FPGADMA_CSR_BURST 0x08 +#define ALT_FPGADMA_CSR_FIFO_STATUS 0x0C +#define ALT_FPGADMA_CSR_DATA_WIDTH 0x10 +#define ALT_FPGADMA_CSR_FIFO_DEPTH 0x14 +#define ALT_FPGADMA_CSR_FIFO_CLEAR 0x18 +#define ALT_FPGADMA_CSR_ZERO 0x1C + +#define ALT_FPGADMA_CSR_BURST_TX_SINGLE (1 << 0) +#define ALT_FPGADMA_CSR_BURST_TX_BURST (1 << 1) +#define ALT_FPGADMA_CSR_BURST_RX_SINGLE (1 << 2) +#define ALT_FPGADMA_CSR_BURST_RX_BURST (1 << 3) + +#define ALT_FPGADMA_FIFO_FULL (1 << 25) +#define ALT_FPGADMA_FIFO_EMPTY (1 << 24) +#define ALT_FPGADMA_FIFO_USED_MASK ((1 << 24)-1) + +struct fpga_dma_pdata { + + struct platform_device *pdev; + + struct dentry *root; + + unsigned int data_reg_phy; + void __iomem *data_reg; + void __iomem *csr_reg; + + unsigned int fifo_size_bytes; + unsigned int fifo_depth; + unsigned int data_width; + unsigned int data_width_bytes; + unsigned char *read_buf; + unsigned char *write_buf; + + struct dma_chan *txchan; + struct dma_chan *rxchan; + dma_addr_t tx_dma_addr; + dma_addr_t rx_dma_addr; + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; +}; + +static DECLARE_COMPLETION(dma_read_complete); +static DECLARE_COMPLETION(dma_write_complete); + +#define IS_DMA_READ (true) +#define IS_DMA_WRITE (false) + +static int fpga_dma_dma_start_rx(struct platform_device *pdev, + unsigned datalen, unsigned char *databuf, + u32 burst_size); +static int fpga_dma_dma_start_tx(struct platform_device *pdev, + unsigned datalen, unsigned char *databuf, + u32 burst_size); + +/* --------------------------------------------------------------------- */ + +static void dump_csr(struct fpga_dma_pdata *pdata) +{ + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_WR_WTRMK %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_WR_WTRMK)); + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_RD_WTRMK %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_RD_WTRMK)); + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_BURST %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_BURST)); + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_FIFO_STATUS %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_FIFO_STATUS)); + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_DATA_WIDTH %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_DATA_WIDTH)); + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_FIFO_DEPTH %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_FIFO_DEPTH)); + dev_info(&pdata->pdev->dev, "ALT_FPGADMA_CSR_ZERO %08x\n", + readl(pdata->csr_reg + ALT_FPGADMA_CSR_ZERO)); +} + +/* --------------------------------------------------------------------- */ + +static void recalc_burst_and_words(struct fpga_dma_pdata *pdata, + int *burst_size, int *num_words) +{ + /* adjust size and maxburst so that total bytes transferred + is a multiple of burst length and width */ + if (*num_words < max_burst_words) { + /* we have only a few words left, make it our burst size */ + *burst_size = *num_words; + } else { + /* here we may not transfer all words to FIFO, but next + call will pick them up... */ + *num_words = max_burst_words * (*num_words / max_burst_words); + *burst_size = max_burst_words; + } +} + +static int word_to_bytes(struct fpga_dma_pdata *pdata, int num_bytes) +{ + return (num_bytes + pdata->data_width_bytes - 1) + / pdata->data_width_bytes; +} + +static ssize_t dbgfs_write_dma(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_dma_pdata *pdata = file->private_data; + int ret = 0; + int bytes_to_transfer; + int num_words; + u32 burst_size; + int pad_index; + + *ppos = 0; + + /* get user data into kernel buffer */ + bytes_to_transfer = simple_write_to_buffer(pdata->write_buf, + pdata->fifo_size_bytes, ppos, + user_buf, count); + pad_index = bytes_to_transfer; + + num_words = word_to_bytes(pdata, bytes_to_transfer); + recalc_burst_and_words(pdata, &burst_size, &num_words); + /* we sometimes send more than asked for, padded with zeros */ + bytes_to_transfer = num_words * pdata->data_width_bytes; + for (; pad_index < bytes_to_transfer; pad_index++) + pdata->write_buf[pad_index] = 0; + + ret = fpga_dma_dma_start_tx(pdata->pdev, + bytes_to_transfer, pdata->write_buf, + burst_size); + if (ret) { + dev_err(&pdata->pdev->dev, "Error starting TX DMA %d\n", ret); + return ret; + } + + if (!wait_for_completion_timeout(&dma_write_complete, + msecs_to_jiffies(timeout))) { + dev_err(&pdata->pdev->dev, "Timeout waiting for TX DMA!\n"); + dev_err(&pdata->pdev->dev, + "count %d burst_size %d num_words %d bytes_to_transfer %d\n", + count, burst_size, num_words, bytes_to_transfer); + dmaengine_terminate_all(pdata->txchan); + return -ETIMEDOUT; + } + + return bytes_to_transfer; +} + +static ssize_t dbgfs_read_dma(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_dma_pdata *pdata = file->private_data; + int ret; + int num_words; + int num_bytes; + u32 burst_size; + + num_words = readl(pdata->csr_reg + ALT_FPGADMA_CSR_FIFO_STATUS); + num_words &= ALT_FPGADMA_FIFO_USED_MASK; + + num_bytes = num_words * pdata->data_width_bytes; + if (num_bytes > count) { + dev_dbg(&pdata->pdev->dev, + "dbgfs_read_dma num_bytes %d > count %d\n", + num_bytes, count); + num_bytes = count; + num_words = num_bytes / (pdata->data_width_bytes); + } + if (num_bytes > pdata->fifo_size_bytes) { + dev_dbg(&pdata->pdev->dev, + "dbgfs_read_dma num_bytes %d > pdata->fifo_size_bytes %d\n", + num_bytes, pdata->fifo_size_bytes); + num_bytes = pdata->fifo_size_bytes; + num_words = num_bytes / (pdata->data_width_bytes); + } + + recalc_burst_and_words(pdata, &burst_size, &num_words); + num_bytes = num_words * pdata->data_width_bytes; + + if (num_bytes > 0) { + ret = fpga_dma_dma_start_rx(pdata->pdev, num_bytes, + pdata->read_buf, burst_size); + if (ret) { + dev_err(&pdata->pdev->dev, + "Error starting RX DMA %d\n", ret); + return ret; + } + + if (!wait_for_completion_timeout(&dma_read_complete, + msecs_to_jiffies(timeout))) { + dev_err(&pdata->pdev->dev, + "Timeout waiting for RX DMA!\n"); + dmaengine_terminate_all(pdata->rxchan); + return -ETIMEDOUT; + } + *ppos = 0; + } + return simple_read_from_buffer(user_buf, count, ppos, + pdata->read_buf, num_bytes); +} + +static const struct file_operations dbgfs_dma_fops = { + .write = dbgfs_write_dma, + .read = dbgfs_read_dma, + .open = simple_open, + .llseek = no_llseek, +}; + +/* --------------------------------------------------------------------- */ + +static ssize_t dbgfs_read_csr(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_dma_pdata *pdata = file->private_data; + dump_csr(pdata); + return 0; +} + +static const struct file_operations dbgfs_csr_fops = { + .read = dbgfs_read_csr, + .open = simple_open, + .llseek = no_llseek, +}; + +/* --------------------------------------------------------------------- */ + +static ssize_t dbgfs_write_clear(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct fpga_dma_pdata *pdata = file->private_data; + writel(1, pdata->csr_reg + ALT_FPGADMA_CSR_FIFO_CLEAR); + return count; +} + +static const struct file_operations dbgfs_clear_fops = { + .write = dbgfs_write_clear, + .open = simple_open, + .llseek = no_llseek, +}; + +/* --------------------------------------------------------------------- */ + +static ssize_t dbgfs_write_wrwtrmk(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_dma_pdata *pdata = file->private_data; + char buf[32]; + unsigned long val; + int ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) + return -EFAULT; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + writel(val, pdata->csr_reg + ALT_FPGADMA_CSR_WR_WTRMK); + return count; +} + +static const struct file_operations dbgfs_wrwtrmk_fops = { + .write = dbgfs_write_wrwtrmk, + .open = simple_open, + .llseek = no_llseek, +}; + +/* --------------------------------------------------------------------- */ + +static ssize_t dbgfs_write_rdwtrmk(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_dma_pdata *pdata = file->private_data; + char buf[32]; + int ret; + unsigned long val; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) + return -EFAULT; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + writel(val, pdata->csr_reg + ALT_FPGADMA_CSR_RD_WTRMK); + return count; +} + +static const struct file_operations dbgfs_rdwtrmk_fops = { + .write = dbgfs_write_rdwtrmk, + .open = simple_open, + .llseek = no_llseek, +}; + +/* --------------------------------------------------------------------- */ + +static int fpga_dma_register_dbgfs(struct fpga_dma_pdata *pdata) +{ + struct dentry *d; + + d = debugfs_create_dir("fpga_dma", NULL); + if (IS_ERR(d)) + return PTR_ERR(d); + if (!d) { + dev_err(&pdata->pdev->dev, "Failed to initialize debugfs\n"); + return -ENOMEM; + } + + pdata->root = d; + + debugfs_create_file("dma", S_IWUSR | S_IRUGO, pdata->root, pdata, + &dbgfs_dma_fops); + + debugfs_create_file("csr", S_IRUGO, pdata->root, pdata, + &dbgfs_csr_fops); + + debugfs_create_file("clear", S_IWUSR, pdata->root, pdata, + &dbgfs_clear_fops); + + debugfs_create_file("wrwtrmk", S_IWUSR, pdata->root, pdata, + &dbgfs_wrwtrmk_fops); + + debugfs_create_file("rdwtrmk", S_IWUSR, pdata->root, pdata, + &dbgfs_rdwtrmk_fops); + + return 0; +} + +/* --------------------------------------------------------------------- */ + +static void fpga_dma_dma_rx_done(void *arg) +{ + complete(&dma_read_complete); +} + +static void fpga_dma_dma_tx_done(void *arg) +{ + complete(&dma_write_complete); +} + +static void fpga_dma_dma_cleanup(struct platform_device *pdev, + unsigned datalen, bool do_read) +{ + struct fpga_dma_pdata *pdata = platform_get_drvdata(pdev); + if (do_read) + dma_unmap_single(&pdev->dev, pdata->rx_dma_addr, + datalen, DMA_FROM_DEVICE); + else + dma_unmap_single(&pdev->dev, pdata->tx_dma_addr, + datalen, DMA_TO_DEVICE); +} + +static int fpga_dma_dma_start_rx(struct platform_device *pdev, + unsigned datalen, unsigned char *databuf, + u32 burst_size) +{ + struct fpga_dma_pdata *pdata = platform_get_drvdata(pdev); + struct dma_chan *dmachan; + struct dma_slave_config dmaconf; + struct dma_async_tx_descriptor *dmadesc = NULL; + + int num_words; + + num_words = word_to_bytes(pdata, datalen); + + dmachan = pdata->rxchan; + memset(&dmaconf, 0, sizeof(dmaconf)); + dmaconf.direction = DMA_DEV_TO_MEM; + dmaconf.src_addr = pdata->data_reg_phy + ALT_FPGADMA_DATA_READ; + dmaconf.src_addr_width = 8; + dmaconf.src_maxburst = burst_size; + + pdata->rx_dma_addr = dma_map_single(&pdev->dev, + databuf, datalen, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, pdata->rx_dma_addr)) { + dev_err(&pdev->dev, "dma_map_single for RX failed\n"); + return -EINVAL; + } + + /* set up slave config */ + dmaengine_slave_config(dmachan, &dmaconf); + + /* get dmadesc */ + dmadesc = dmaengine_prep_slave_single(dmachan, + pdata->rx_dma_addr, + datalen, + dmaconf.direction, + DMA_PREP_INTERRUPT); + if (!dmadesc) { + fpga_dma_dma_cleanup(pdev, datalen, IS_DMA_READ); + return -ENOMEM; + } + dmadesc->callback = fpga_dma_dma_rx_done; + dmadesc->callback_param = pdata; + + /* start DMA */ + pdata->rx_cookie = dmaengine_submit(dmadesc); + if (dma_submit_error(pdata->rx_cookie)) + dev_err(&pdev->dev, "rx_cookie error on dmaengine_submit\n"); + dma_async_issue_pending(dmachan); + + return 0; +} + +static int fpga_dma_dma_start_tx(struct platform_device *pdev, + unsigned datalen, unsigned char *databuf, + u32 burst_size) +{ + struct fpga_dma_pdata *pdata = platform_get_drvdata(pdev); + struct dma_chan *dmachan; + struct dma_slave_config dmaconf; + struct dma_async_tx_descriptor *dmadesc = NULL; + + int num_words; + + num_words = word_to_bytes(pdata, datalen); + + dmachan = pdata->txchan; + memset(&dmaconf, 0, sizeof(dmaconf)); + dmaconf.direction = DMA_MEM_TO_DEV; + dmaconf.dst_addr = pdata->data_reg_phy + ALT_FPGADMA_DATA_WRITE; + dmaconf.dst_addr_width = 8; + dmaconf.dst_maxburst = burst_size; + pdata->tx_dma_addr = dma_map_single(&pdev->dev, + databuf, datalen, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, pdata->tx_dma_addr)) { + dev_err(&pdev->dev, "dma_map_single for TX failed\n"); + return -EINVAL; + } + + /* set up slave config */ + dmaengine_slave_config(dmachan, &dmaconf); + + /* get dmadesc */ + dmadesc = dmaengine_prep_slave_single(dmachan, + pdata->tx_dma_addr, + datalen, + dmaconf.direction, + DMA_PREP_INTERRUPT); + if (!dmadesc) { + fpga_dma_dma_cleanup(pdev, datalen, IS_DMA_WRITE); + return -ENOMEM; + } + dmadesc->callback = fpga_dma_dma_tx_done; + dmadesc->callback_param = pdata; + + /* start DMA */ + pdata->tx_cookie = dmaengine_submit(dmadesc); + if (dma_submit_error(pdata->tx_cookie)) + dev_err(&pdev->dev, "tx_cookie error on dmaengine_submit\n"); + dma_async_issue_pending(dmachan); + + return 0; +} + +static void fpga_dma_dma_shutdown(struct fpga_dma_pdata *pdata) +{ + if (pdata->txchan) { + dmaengine_terminate_all(pdata->txchan); + dma_release_channel(pdata->txchan); + } + if (pdata->rxchan) { + dmaengine_terminate_all(pdata->rxchan); + dma_release_channel(pdata->rxchan); + } + pdata->rxchan = pdata->txchan = NULL; +} + +static int fpga_dma_dma_init(struct fpga_dma_pdata *pdata) +{ + struct platform_device *pdev = pdata->pdev; + + pdata->txchan = dma_request_slave_channel(&pdev->dev, "tx"); + if (pdata->txchan) + dev_dbg(&pdev->dev, "TX channel %s %d selected\n", + dma_chan_name(pdata->txchan), pdata->txchan->chan_id); + else + dev_err(&pdev->dev, "could not get TX dma channel\n"); + + pdata->rxchan = dma_request_slave_channel(&pdev->dev, "rx"); + if (pdata->rxchan) + dev_dbg(&pdev->dev, "RX channel %s %d selected\n", + dma_chan_name(pdata->rxchan), pdata->rxchan->chan_id); + else + dev_err(&pdev->dev, "could not get RX dma channel\n"); + + if (!pdata->rxchan && !pdata->txchan) + /* both channels not there, maybe it's + bcs dma isn't loaded... */ + return -EPROBE_DEFER; + + if (!pdata->rxchan || !pdata->txchan) + return -ENOMEM; + + return 0; +} + +/* --------------------------------------------------------------------- */ + +static void __iomem *request_and_map(struct platform_device *pdev, + const struct resource *res) +{ + void __iomem *ptr; + + if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), + pdev->name)) { + dev_err(&pdev->dev, "unable to request %s\n", res->name); + return NULL; + } + + ptr = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); + if (!ptr) + dev_err(&pdev->dev, "ioremap_nocache of %s failed!", res->name); + + return ptr; +} + +static int fpga_dma_remove(struct platform_device *pdev) +{ + struct fpga_dma_pdata *pdata = platform_get_drvdata(pdev); + dev_dbg(&pdev->dev, "fpga_dma_remove\n"); + debugfs_remove_recursive(pdata->root); + fpga_dma_dma_shutdown(pdata); + return 0; +} + +static int fpga_dma_probe(struct platform_device *pdev) +{ + struct resource *csr_reg, *data_reg; + struct fpga_dma_pdata *pdata; + int ret; + + pdata = devm_kzalloc(&pdev->dev, sizeof(struct fpga_dma_pdata), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + csr_reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); + data_reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "data"); + if (!csr_reg || !data_reg) { + dev_err(&pdev->dev, "registers not completely defined\n"); + return -EINVAL; + } + + pdata->csr_reg = request_and_map(pdev, csr_reg); + if (!pdata->csr_reg) + return -ENOMEM; + + pdata->data_reg = request_and_map(pdev, data_reg); + if (!pdata->data_reg) + return -ENOMEM; + pdata->data_reg_phy = data_reg->start; + + /* read HW and calculate fifo size in bytes */ + pdata->fifo_depth = readl(pdata->csr_reg + ALT_FPGADMA_CSR_FIFO_DEPTH); + pdata->data_width = readl(pdata->csr_reg + ALT_FPGADMA_CSR_DATA_WIDTH); + /* 64-bit bus to FIFO */ + pdata->data_width_bytes = pdata->data_width / sizeof(u64); + pdata->fifo_size_bytes = pdata->fifo_depth * pdata->data_width_bytes; + + pdata->read_buf = devm_kzalloc(&pdev->dev, pdata->fifo_size_bytes, + GFP_KERNEL); + if (!pdata->read_buf) + return -ENOMEM; + + pdata->write_buf = devm_kzalloc(&pdev->dev, pdata->fifo_size_bytes, + GFP_KERNEL); + if (!pdata->write_buf) + return -ENOMEM; + + ret = fpga_dma_register_dbgfs(pdata); + if (ret) + return ret; + + pdata->pdev = pdev; + platform_set_drvdata(pdev, pdata); + + ret = fpga_dma_dma_init(pdata); + if (ret) { + fpga_dma_remove(pdev); + return ret; + } + + /* OK almost ready, set up the watermarks */ + /* we may need to tweak this for single/burst, etc */ + writel(pdata->fifo_depth - max_burst_words, + pdata->csr_reg + ALT_FPGADMA_CSR_WR_WTRMK); + /* we use read watermark of 0 so that rx_burst line + is always asserted, i.e. no single-only requests */ + writel(0, pdata->csr_reg + ALT_FPGADMA_CSR_RD_WTRMK); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id fpga_dma_of_match[] = { + {.compatible = "altr,fpga-dma",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, fpga_dma_of_match); +#endif + +static struct platform_driver fpga_dma_driver = { + .probe = fpga_dma_probe, + .remove = fpga_dma_remove, + .driver = { + .name = "fpga_dma", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(fpga_dma_of_match), + }, +}; + +static int __init fpga_dma_init(void) +{ + return platform_driver_probe(&fpga_dma_driver, fpga_dma_probe); +} + +static void __exit fpga_dma_exit(void) +{ + platform_driver_unregister(&fpga_dma_driver); +} + +late_initcall(fpga_dma_init); +module_exit(fpga_dma_exit); + +MODULE_AUTHOR("Graham Moore (Altera)"); +MODULE_DESCRIPTION("Altera FPGA DMA Example Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 4332af2d8b862..5cbce7480eec3 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -18,6 +18,7 @@ void __iomem *sys_manager_base_addr; void __iomem *rst_manager_base_addr; void __iomem *sdr_ctl_base_addr; unsigned long socfpga_cpu1start_addr; +void __iomem *clkmgr_base_addr; static void __init socfpga_sysmgr_init(void) { @@ -38,6 +39,10 @@ static void __init socfpga_sysmgr_init(void) np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); rst_manager_base_addr = of_iomap(np, 0); + np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); + clkmgr_base_addr = of_iomap(np, 0); + WARN_ON(!clkmgr_base_addr); + np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl"); sdr_ctl_base_addr = of_iomap(np, 0); } @@ -69,6 +74,9 @@ static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) { u32 temp; + /* Turn on all periph PLL clocks */ + writel(0xffff, clkmgr_base_addr + SOCFPGA_ENABLE_PLL_REG); + temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); if (mode == REBOOT_WARM) diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 0def0b0daaf73..dee14eeea978a 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -6,6 +6,7 @@ /dts-v1/; #include #include +#include #include / { @@ -74,10 +75,21 @@ compatible = "intel,stratix10-svc"; method = "smc"; memory-region = <&service_reserved>; + interrupts = ; + interrupt-parent = <&intc>; fpga_mgr: fpga-mgr { compatible = "intel,stratix10-soc-fpga-mgr"; }; + + fcs: fcs { + compatible = "intel,stratix10-soc-fcs"; + platform = "stratix10"; + }; + + temp_volt: hwmon { + compatible = "intel,soc64-hwmon"; + }; }; }; @@ -265,7 +277,7 @@ i2c0: i2c@ffc02800 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02800 0x100>; interrupts = <0 103 4>; resets = <&rst I2C0_RESET>; @@ -276,7 +288,7 @@ i2c1: i2c@ffc02900 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02900 0x100>; interrupts = <0 104 4>; resets = <&rst I2C1_RESET>; @@ -287,8 +299,8 @@ i2c2: i2c@ffc02a00 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; reg = <0xffc02a00 0x100>; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; interrupts = <0 105 4>; resets = <&rst I2C2_RESET>; clocks = <&clkmgr STRATIX10_L4_SP_CLK>; @@ -298,7 +310,7 @@ i2c3: i2c@ffc02b00 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02b00 0x100>; interrupts = <0 106 4>; resets = <&rst I2C3_RESET>; @@ -309,7 +321,7 @@ i2c4: i2c@ffc02c00 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02c00 0x100>; interrupts = <0 107 4>; resets = <&rst I2C4_RESET>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_qse.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10_qse.dtsi new file mode 100644 index 0000000000000..ca5d307564fc4 --- /dev/null +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_qse.dtsi @@ -0,0 +1,116 @@ +/* +* Add this piece of dtsi fragment as #include "socfpga_stratix10_qse.dtsi" +* in the file socfpga_stratix10_socdk.dts. Compile it in the kernel along with +* socfpga_stratix10.dtsi. +*/ + +/{ + soc { + clocks { + ptp_ctrl_10G_clk: ptp_ctrl_10G_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "ptp_ctrl_10G_clk-clk"; + }; + }; + + i2c0: i2c@ffc02800 { + status = "okay"; + }; + + s10_hps_bridges: bridge@80000000 { + compatible = "simple-bus"; + reg = <0x80000000 0x60000000>, + <0xf9000000 0x00100000>; + reg-names = "axi_h2f", "axi_h2f_lw"; + #address-cells = <0x2>; + #size-cells = <0x1>; + ranges = <0x00000000 0x00000000 0x80000000 0x00040000>, + <0x00000001 0x00020000 0xf9020000 0x00001000>, + <0x00000001 0x00022000 0xf9022000 0x00002000>, + <0x00000001 0x00030000 0xf9030000 0x00000040>, + <0x00000001 0x00002020 0xf9002020 0x00000020>, + <0x00000001 0x00002000 0xf9002000 0x00000020>, + <0x00000001 0x00002120 0xf9002120 0x00000020>, + <0x00000001 0x00002100 0xf9002100 0x00000020>, + <0x00000001 0x00002140 0xf9002140 0x00000020>, + <0x00000001 0x00030100 0xf9030100 0x00000010>, + <0x00000001 0x00000300 0xf9000300 0x00000010>, + <0x00000001 0x00000310 0xf9000310 0x00000010>; + + qse_0_qse: ethernet@0x100020000 { + compatible = "altr,qse-msgdma-2.0"; + reg-names = "control_port", "xcvr_ctrl", "tod_ctrl", + "tx_csr", "tx_pref", "rx_csr", "rx_pref", + "rx_fifo", "phy_reconfig_csr"; + reg = <0x00000001 0x00020000 0x00001000>, + <0x00000001 0x00022000 0x00002000>, + <0x00000001 0x00030000 0x00000040>, + <0x00000001 0x00002020 0x00000020>, + <0x00000001 0x00002000 0x00000020>, + <0x00000001 0x00002120 0x00000020>, + <0x00000001 0x00002100 0x00000020>, + <0x00000001 0x00002140 0x00000020>, + <0x00000001 0x00030100 0x00000010>; + dma-coherent; + phy-mode = "10gbase-r"; + sfp = <&sfp_eth0>; + clocks = <&ptp_ctrl_10G_clk>; + clock-names = "tod_clk"; + interrupt-parent = <&intc>; + interrupt-names = "tx_irq", "rx_irq"; + interrupts = <0 21 4>, + <0 22 4>; + rx-fifo-depth = <0x20000>; + tx-fifo-depth = <0x1000>; + rx-fifo-almost-full = <0x10000>; + rx-fifo-almost-empty = <0x8000>; + local-mac-address = [00 00 00 00 00 00]; + altr,tx-pma-delay-ns = <0xD>; + altr,rx-pma-delay-ns = <0x8>; + altr,tx-pma-delay-fns = <0x24D>; + altr,rx-pma-delay-fns = <0x3E97>; + altr,has-ptp; + status = "okay"; + }; + + sfp_eth0: sfp-eth0 { + compatible = "sff,sfp"; + i2c-bus = <&i2c0>; + los-gpio = <&mge_10g_status_pio 0 GPIO_ACTIVE_HIGH>; + mod-def0-gpio = <&mge_10g_status_pio 2 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <1000>; + pinctrl-names = "default"; + pinctrl-0 = <&sfp_ctrl_pio &mge_10g_status_pio>; + tx-disable-gpio = <&sfp_ctrl_pio 0 GPIO_ACTIVE_HIGH>; + tx-fault-gpio = <&mge_10g_status_pio 1 GPIO_ACTIVE_HIGH>; + rate-select0-gpio = <&sfp_ctrl_pio 2 GPIO_ACTIVE_HIGH>; + }; + + sfp_ctrl_pio: gpio@300 { + compatible = "altr,pio-1.0"; + reg = <0x00000001 0x00000300 0x10>; + interrupt-parent = <&intc>; + interrupts = <0 23 4>; + altr,gpio-bank-width = <4>; + altr,interrupt-type = <2>; + altr,interrupt_type = <2>; + #gpio-cells = <2>; + gpio-controller; + }; + + mge_10g_status_pio: gpio@310 { + compatible = "altr,pio-1.0"; + reg = <0x00000001 0x00000310 0x10>; + interrupt-parent = <&intc>; + interrupts = <0 24 4>; + altr,gpio-bank-width = <4>; + altr,interrupt-type = <2>; + altr,interrupt_type = <2>; + #gpio-cells = <2>; + gpio-controller; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 4eee777ef1a14..5ab63daf61fef 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -190,11 +190,14 @@ cdns,tsd2d-ns = <50>; cdns,tchsh-ns = <4>; cdns,tslch-ns = <4>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; + rsu-handle = <&qspi_boot>; qspi_boot: partition@0 { label = "Boot and fpga data"; @@ -208,3 +211,35 @@ }; }; }; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.0V VCCIO"; + reg = <3>; + }; + + input@6 { + label = "0.9V VCCERAM"; + reg = <6>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile index d39cfb723f5b6..cf71020080720 100644 --- a/arch/arm64/boot/dts/intel/Makefile +++ b/arch/arm64/boot/dts/intel/Makefile @@ -1,7 +1,28 @@ # SPDX-License-Identifier: GPL-2.0-only dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_n6000.dtb \ socfpga_agilex_socdk.dtb \ + socfpga_agilex_socdk_atfboot.dtb \ socfpga_agilex_socdk_nand.dtb \ + socfpga_agilex_bittware.dtb \ + socfpga_agilex3_socdk.dtb \ socfpga_agilex5_socdk.dtb \ + socfpga_agilex_n6010.dtb \ + socfpga_agilex5_socdk.dtb \ + socfpga_agilex5_socdk_b0.dtb \ + socfpga_agilex5_socdk_debug.dtb \ + socfpga_agilex5_socdk_emmc.dtb \ + socfpga_agilex5_socdk_nand.dtb \ + socfpga_agilex5_socdk_nand_b0.dtb \ + socfpga_agilex5_socdk_nand_b0_atfboot.dtb \ + socfpga_agilex5_socdk_swvp_b0.dtb \ + socfpga_agilex5_socdk_tsn_cfg2.dtb \ + socfpga_agilex5_socdk_tsn_cfg2_b0.dtb \ + socfpga_agilex5_socdk_modular.dtb \ + socfpga_agilex7m_socdk.dtb \ + socfpga_agilex7f_socdk_pcie_root_port.dtb \ + socfpga_agilex7i_socdk_pcie_root_port.dtb \ + socfpga_agilex7m_socdk_pcie_root_port.dtb \ + socfpga_agilex7f_socdk_multiqspi.dtb \ socfpga_n5x_socdk.dtb dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb +dtb-$(CONFIG_ARCH_DM) += socfpga_dm_simics.dtb diff --git a/arch/arm64/boot/dts/intel/fm87_ftile_10g_2port_ptp.dtsi b/arch/arm64/boot/dts/intel/fm87_ftile_10g_2port_ptp.dtsi new file mode 100644 index 0000000000000..c835c331f07c0 --- /dev/null +++ b/arch/arm64/boot/dts/intel/fm87_ftile_10g_2port_ptp.dtsi @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2022, Intel Corporation + */ + +/* Add this piece of dtsi fragment as #include "fm87_ftile_25g_ptp.dtsi" + * in the file socfpga_fm87_ftile_25g_ptp.dts. Compile it in the kernel along with + * socfpga_agilex.dtsi + */ + +/{ + soc { + agilex_hps_bridges: bus@88000000 { + compatible = "simple-bus"; + reg = <0x80000000 0x60000000>, + <0xf9000000 0x00100000>; + reg-names = "axi_h2f", "axi_h2f_lw"; + #address-cells = <2>; + #size-cells = <1>; + ranges = <0x00000000 0x00000000 0xf9000000 0x00001000>, + <0x00000001 0x00000000 0x80000000 0x00040000>, + <0x00000001 0x04040050 0x84040050 0x00000010>, + <0x00000001 0x04040040 0x84040040 0x00000010>; + + + qsfp_eth0: qsfp-eth0 { + compatible = "sff,qsfp"; + i2c-bus = <&i2c0>; + qsfpdd_initmode-gpio = <&qsfpdd_ctrl_pio 1 GPIO_ACTIVE_HIGH>; + qsfpdd_modseln-gpio = <&qsfpdd_ctrl_pio 2 GPIO_ACTIVE_LOW>; + qsfpdd_modprsn-gpio = <&qsfpdd_status_pio 0 GPIO_ACTIVE_LOW>; + qsfpdd_resetn-gpio = <&qsfpdd_ctrl_pio 0 GPIO_ACTIVE_HIGH>; + qsfpdd_intn-gpio = <&qsfpdd_status_pio 1 GPIO_ACTIVE_LOW>; + agilex_hps_spim = <&qsfpdd_ctrl_pio 3 GPIO_ACTIVE_HIGH>; + maximum-power-milliwatt = <1000>; + status = "disable"; +/* status = "okay"; */ + }; + + qsfpdd_status_pio: gpio@4040050 { + compatible = "altr,pio-1.0"; + reg = <0x00000001 0x04040050 0x10>; + interrupt-parent = <&intc>; + interrupts = <0 22 4>; + altr,gpio-bank-width = <4>; + altr,interrupt-type = <2>; + + altr,interrupt_type = <2>; + #gpio-cells = <2>; + gpio-controller; + status = "okay"; + /*status = "disable";*/ + }; + + qsfpdd_ctrl_pio: gpio@4040040 { + compatible = "altr,pio-1.0"; + reg = <0x00000001 0x04040040 0x10>; + interrupt-parent = <&intc>; + interrupts = <0 23 4>; + altr,gpio-bank-width = <4>; + altr,interrupt-type = <2>; + altr,interrupt_type = <2>; + #gpio-cells = <2>; + gpio-controller; + status = "okay"; + /*status = "disable"; */ + }; + + }; + clocks { + tod_in_clock: tod_in_clock { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <156250000>; + clock-output-names = "tod_in_clock"; + }; + }; + + ptp_clockcleaner: ptp_clockcleaner { + compatible = "intel, freq-steering-zl-i2c"; + dpll-name = "zl30733"; + interface = "i2c"; + bus-num = <1>; + bus-address = <0x70>; + }; + + tod_0_clk: tod_0_clk { + compatible = "intel, tod"; + reg-names = "tod_ctrl", + "pps_ctrl"; + reg = <0x84040000 0x00000040>, + <0x84040100 0x00000040>; + interrupt-parent = <&intc>; + interrupt-names = "pps_irq"; + interrupts = <0 19 4>; + clocks = <&tod_in_clock>; + clock-names = "tod_clock"; + status = "okay"; + altr,has-ptp-clockcleaner; + clock-cleaner = <&ptp_clockcleaner>; + }; + + hssiss_0_hssiss: hssiss_0_hssiss { + compatible = "intel, hssiss-1.0"; + reg-names = "sscsr"; + reg = <0x88000000 0x04000000>; + reset-mode ="reg"; + }; + hssi_0_eth: hssi_0_eth@88000000 { + reg-names = "tx_pref" , + "tx_csr" , + "tx_fifo" , + "rx_pref" , + "rx_csr" , + "rx_fifo" ; + + reg = <0x8c480000 0x00000020>, + <0x8c480020 0x00000020>, + <0x8c480040 0x00000020>, + <0x8c480080 0x00000020>, + <0x8c4800A0 0x00000020>, + <0x8c4800C0 0x00000010>; + + compatible = "altr,hssi-ftile-1.0"; + tile_chan = <0x8>; + hssi_port = <0x8>; + phy-mode = "10gbase-r"; + tod = <&tod_0_clk>; + hssiss = <&hssiss_0_hssiss>; + pma_type = <0x0>; // FGT - 0x00, FHT = 0x1000 + altr,tx-pma-delay-ns = <0xD>; + altr,rx-pma-delay-ns = <0x8>; + altr,tx-pma-delay-fns = <0x24D>; + altr,rx-pma-delay-fns = <0x3E97>; + altr,tx-external-phy-delay-ns = <0x0>; + altr,rx-external-phy-delay-ns = <0x0>; + fec-cw-pos-rx = <0x0>; + fec-type="no-fec"; + interrupt-parent = <&intc>; + interrupt-names = "tx_irq", "rx_irq"; + interrupts = <0 24 4>, <0 25 4>; + qsfp-lane = <0x0>; + rx-fifo-depth = <0x4000>; + tx-fifo-depth = <0x1000>; + rx-fifo-almost-full = <0x2000>; + rx-fifo-almost-empty = <0x1000>; + altr,has-ptp; + ptp_accu_mode = "Advanced"; + ptp_tx_routing_adj = <0xDE9F>; //56,991 + ptp_rx_routing_adj = <0xD625>; //54,821 + status = "okay"; + fixed-link { + speed =<10000>; + full-duplex; + }; + }; + hssi_1_eth: hssi_1_eth@88000000 { + reg-names = "tx_pref" , + "tx_csr" , + "tx_fifo" , + "rx_pref" , + "rx_csr" , + "rx_fifo" ; + + reg = <0x8c4C0000 0x00000020>, + <0x8c4C0020 0x00000020>, + <0x8c4C0040 0x00000020>, + <0x8c4C0080 0x00000020>, + <0x8c4C00A0 0x00000020>, + <0x8c4C00C0 0x00000010>; + + compatible = "altr,hssi-ftile-1.0"; + tile_chan = <0x9>; + hssi_port = <0x9>; + phy-mode = "10gbase-r"; + tod = <&tod_0_clk>; + hssiss = <&hssiss_0_hssiss>; + pma_type = <0x0>; // FGT - 0x00, FHT = 0x1000 + altr,tx-pma-delay-ns = <0xD>; + altr,rx-pma-delay-ns = <0x8>; + altr,tx-pma-delay-fns = <0x24D>; + altr,rx-pma-delay-fns = <0x3E97>; + altr,tx-external-phy-delay-ns = <0x0>; + altr,rx-external-phy-delay-ns = <0x0>; + fec-cw-pos-rx = <0x0>; + fec-type="no-fec"; + interrupt-parent = <&intc>; + interrupt-names = "tx_irq", "rx_irq"; + interrupts = <0 26 4>, <0 27 4>; + qsfp-lane = <0x0>; + rx-fifo-depth = <0x4000>; + tx-fifo-depth = <0x1000>; + rx-fifo-almost-full = <0x2000>; + rx-fifo-almost-empty = <0x1000>; + altr,has-ptp; + ptp_accu_mode = "Advanced"; + ptp_tx_routing_adj = <0xDE3C>; //56,892 + ptp_rx_routing_adj = <0xD73F>; //55,103 + status = "okay"; + fixed-link { + speed =<10000>; + full-duplex; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 2a5eeb21da474..45b56cb4a3162 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -65,10 +65,26 @@ compatible = "intel,agilex-svc"; method = "smc"; memory-region = <&service_reserved>; + interrupts = ; + interrupt-parent = <&intc>; fpga_mgr: fpga-mgr { compatible = "intel,agilex-soc-fpga-mgr"; }; + + fcs_config: fcs-config { + compatible = "intel,agilex-soc-fcs-config"; + }; + + fcs: fcs { + compatible = "intel,agilex-soc-fcs"; + platform = "agilex"; + status = "disabled"; + }; + + temp_volt: hwmon { + compatible = "intel,soc64-hwmon"; + }; }; }; @@ -101,10 +117,13 @@ compatible = "arm,gic-400", "arm,cortex-a15-gic"; #interrupt-cells = <3>; interrupt-controller; + interrupt-parent = <&intc>; reg = <0x0 0xfffc1000 0x0 0x1000>, <0x0 0xfffc2000 0x0 0x2000>, <0x0 0xfffc4000 0x0 0x2000>, <0x0 0xfffc6000 0x0 0x2000>; + /* VGIC maintenance interrupt */ + interrupts = ; }; clocks { @@ -149,7 +168,7 @@ compatible = "usb-nop-xceiv"; }; - soc@0 { + soc0: soc { #address-cells = <1>; #size-cells = <1>; compatible = "simple-bus"; @@ -260,7 +279,7 @@ i2c0: i2c@ffc02800 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02800 0x100>; interrupts = ; resets = <&rst I2C0_RESET>; @@ -271,7 +290,7 @@ i2c1: i2c@ffc02900 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02900 0x100>; interrupts = ; resets = <&rst I2C1_RESET>; @@ -282,7 +301,7 @@ i2c2: i2c@ffc02a00 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02a00 0x100>; interrupts = ; resets = <&rst I2C2_RESET>; @@ -293,7 +312,7 @@ i2c3: i2c@ffc02b00 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02b00 0x100>; interrupts = ; resets = <&rst I2C3_RESET>; @@ -304,7 +323,7 @@ i2c4: i2c@ffc02c00 { #address-cells = <1>; #size-cells = <0>; - compatible = "snps,designware-i2c"; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; reg = <0xffc02c00 0x100>; interrupts = ; resets = <&rst I2C4_RESET>; @@ -438,7 +457,6 @@ ; stream-match-mask = <0x7ff0>; clocks = <&clkmgr AGILEX_MPU_CCU_CLK>, - <&clkmgr AGILEX_L3_MAIN_FREE_CLK>, <&clkmgr AGILEX_L4_MAIN_CLK>; status = "disabled"; }; @@ -454,6 +472,8 @@ reg-io-width = <4>; num-cs = <4>; clocks = <&clkmgr AGILEX_L4_MAIN_CLK>; + dmas = <&pdma 16>, <&pdma 17>; + dma-names ="tx", "rx"; status = "disabled"; }; @@ -468,6 +488,8 @@ reg-io-width = <4>; num-cs = <4>; clocks = <&clkmgr AGILEX_L4_MAIN_CLK>; + dmas = <&pdma 20>, <&pdma 21>; + dma-names ="tx", "rx"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex3_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex3_socdk.dts new file mode 100644 index 0000000000000..38af7c651bff2 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex3_socdk.dts @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025, Altera Corporation + */ +#include "socfpga_agilex5.dtsi" + +/ { + model = "SoCFPGA Agilex3 SoCDK"; + compatible = "intel,socfpga-agilex3-socdk", "intel,socfpga-agilex3"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + leds { + compatible = "gpio-leds"; + + led0 { + label = "hps_led0"; + gpios = <&porta 1 GPIO_ACTIVE_HIGH>; + }; + + led1 { + label = "hps_led1"; + gpios = <&porta 12 GPIO_ACTIVE_HIGH>; + }; + + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0x0 0x80000000 0x0 0x0>; + }; +}; + +&cpu2 { + status = "disabled"; +}; + +&cpu3 { + status = "disabled"; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gmac2 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac2_phy0>; + max-frame-size = <9000>; + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac2_phy0: ethernet-phy@0 { + reg = <0>; + rxc-skew-ps = <0>; + rxdv-skew-ps = <0>; + rxd0-skew-ps = <0>; + rxd1-skew-ps = <0>; + rxd2-skew-ps = <0>; + rxd3-skew-ps = <0>; + txc-skew-ps = <0>; + txen-skew-ps = <60>; + txd0-skew-ps = <60>; + txd1-skew-ps = <60>; + txd2-skew-ps = <60>; + txd3-skew-ps = <60>; + }; + }; +}; + +&i3c0 { + status = "okay"; +}; + +&i3c1 { + status = "okay"; +}; + +&mmc { + status = "okay"; + bus-width = <4>; + no-1-8-v; + cap-sd-highspeed; + cap-mmc-highspeed; + disable-wp; + sd-uhs-sdr50; + cdns,phy-use-ext-lpbk-dqs = <1>; + cdns,phy-use-lpbk-dqs = <1>; + cdns,phy-use-phony-dqs = <1>; + cdns,phy-use-phony-dqs-cmd = <1>; + cdns,phy-io-mask-always-on = <0>; + cdns,phy-io-mask-end = <5>; + cdns,phy-io-mask-start = <0>; + cdns,phy-data-select-oe-end = <1>; + cdns,phy-sync-method = <1>; + cdns,phy-sw-half-cycle-shift = <0>; + cdns,phy-rd-del-sel = <52>; + cdns,phy-underrun-suppress = <1>; + cdns,phy-gate-cfg-always-on = <1>; + cdns,phy-param-dll-bypass-mode = <1>; + cdns,phy-param-phase-detect-sel = <2>; + cdns,phy-param-dll-start-point = <254>; + cdns,phy-read-dqs-cmd-delay = <0>; + cdns,phy-clk-wrdqs-delay = <0>; + cdns,phy-clk-wr-delay = <0>; + cdns,phy-read-dqs-delay = <0>; + cdns,phy-phony-dqs-timing = <0>; + cdns,hrs09-rddata-en = <1>; + cdns,hrs09-rdcmd-en = <1>; + cdns,hrs09-extended-wr-mode = <1>; + cdns,hrs09-extended-rd-mode = <1>; + cdns,hrs10-hcsdclkadj = <3>; + cdns,hrs16-wrdata1-sdclk-dly = <0>; + cdns,hrs16-wrdata0-sdclk-dly = <0>; + cdns,hrs16-wrcmd1-sdclk-dly = <0>; + cdns,hrs16-wrcmd0-sdclk-dly = <0>; + cdns,hrs16-wrdata1-dly = <0>; + cdns,hrs16-wrdata0-dly = <0>; + cdns,hrs16-wrcmd1-dly = <0>; + cdns,hrs16-wrcmd0-dly = <0>; + cdns,hrs07-rw-compensate = <10>; + cdns,hrs07-idelay-val = <0>; +}; + +&osc1 { + clock-frequency = <25000000>; +}; + +&pmu0 { + cpus = <&cpu0>, <&cpu1>; +}; + +&qspi { + status = "okay"; + cdns,fifo-depth = <0x400>; + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + m25p,fast-read; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + spi-tx-bus-width=<4>; + spi-rx-bus-width=<4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot>; + + qspi_boot: partition@0 { + label = "u-boot"; + reg = <0x0 0x00600000>; + }; + + root: partition@4200000 { + label = "root"; + reg = <0x00600000 0x03a00000>; + }; + }; + }; +}; + +&smmu { + status = "okay"; +}; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10001 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@30001 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40001 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; + +&uart0 { + status = "okay"; +}; + +&usb0 { + status = "okay"; + disable-over-current; +}; + +&usb31 { + status = "okay"; + dr_mode = "host"; + maximum-speed = "high-speed"; +}; + +&watchdog0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi index 1162978329c16..6661540c87aab 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi @@ -37,6 +37,7 @@ reg = <0x0>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&L2>; }; cpu1: cpu@1 { @@ -44,6 +45,7 @@ reg = <0x100>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&L2>; }; cpu2: cpu@2 { @@ -51,6 +53,7 @@ reg = <0x200>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&L2>; }; cpu3: cpu@3 { @@ -58,7 +61,19 @@ reg = <0x300>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&L2>; }; + + L2: l2-cache { + compatible = "cache"; + cache-level = <2>; + next-level-cache = <&L3>; + }; + + L3: l3-cache { + compatible = "cache"; + }; + }; psci { @@ -75,13 +90,17 @@ #address-cells = <2>; #size-cells = <2>; interrupt-controller; + interrupt-parent = <&intc>; #redistributor-regions = <1>; redistributor-stride = <0x0 0x20000>; + /* VGIC maintenance interrupt */ + interrupts = ; its: msi-controller@1d040000 { compatible = "arm,gic-v3-its"; reg = <0x0 0x1d040000 0x0 0x20000>; msi-controller; + dma-32bit-quirk; #msi-cells = <1>; }; }; @@ -133,6 +152,101 @@ compatible = "usb-nop-xceiv"; }; + pmu0: pmu { + compatible = "arm,armv8-pmuv3"; + interrupt-parent = <&intc>; + interrupts = ; + cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; + }; + + pmu0_tcu: pmu-tcu@16002000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x16002000 0x0 0x1000>, + <0x0 0x16022000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + pmu0_tbu0: pmu-tbu@16042000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x16042000 0x0 0x1000>, + <0x0 0x16052000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + pmu0_tbu1: pmu-tbu@16062000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x16062000 0x0 0x1000>, + <0x0 0x16072000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + pmu0_tbu2: pmu-tbu@16082000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x16082000 0x0 0x1000>, + <0x0 0x16092000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + pmu0_tbu3: pmu-tbu@160A2000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x160A2000 0x0 0x1000>, + <0x0 0x160B2000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + pmu0_tbu4: pmu-tbu@160C2000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x160C2000 0x0 0x1000>, + <0x0 0x160D2000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + pmu0_tbu5: pmu-tbu@160E2000 { + compatible = "arm,smmu-v3-pmcg"; + reg = <0x0 0x160E2000 0x0 0x1000>, + <0x0 0x160F2000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = ; + }; + + firmware { + svc { + compatible = "intel,agilex5-svc"; + method = "smc"; + memory-region = <&service_reserved>; + iommus = <&smmu 10>; + interrupts = ; + interrupt-parent = <&intc>; + altr,smmu_enable_quirk; + + fpga_mgr: fpga-mgr { + compatible = "intel,agilex5-soc-fpga-mgr"; + altr,smmu_enable_quirk; + }; + + temp_volt: hwmon { + compatible = "intel,soc64-hwmon"; + }; + + fcs_config: fcs-config { + compatible = "intel,agilex5-soc-fcs-config"; + }; + }; + }; + + fpga-region { + compatible = "fpga-region"; + #address-cells = <0x2>; + #size-cells = <0x2>; + fpga-mgr = <&fpga_mgr>; + }; + soc: soc@0 { compatible = "simple-bus"; ranges = <0 0 0 0xffffffff>; @@ -147,6 +261,471 @@ #clock-cells = <1>; }; + gmac2: ethernet@10830000 { + compatible = "altr,socfpga-stmmac-a10-s10", + "snps,dwxgmac-2.10", + "snps,dwxgmac"; + reg = <0x10830000 0x3500>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "macirq", + "macirq_tx0", + "macirq_tx1", + "macirq_tx2", + "macirq_tx3", + "macirq_tx4", + "macirq_tx5", + "macirq_tx6", + "macirq_tx7", + "macirq_rx0", + "macirq_rx1", + "macirq_rx2", + "macirq_rx3", + "macirq_rx4", + "macirq_rx5", + "macirq_rx6", + "macirq_rx7"; + resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>; + reset-names = "stmmaceth", "stmmaceth-ocp"; + clocks = <&clkmgr AGILEX5_EMAC2_CLK>, + <&clkmgr AGILEX5_EMAC_PTP_CLK>; + clock-names = "stmmaceth", "ptp_ref"; + mac-address = [00 00 00 00 00 00]; + tx-fifo-depth = <32768>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <64>; + snps,axi-config = <&stmmac_axi_emac2_setup>; + snps,mtl-rx-config = <&mtl_rx_emac2_setup>; + snps,mtl-tx-config = <&mtl_tx_emac2_setup>; + snps,pbl = <32>; + snps,pblx8; + snps,multi-irq-en; + snps,tso; + snps,rx-vlan-offload; + snps,pagepool-tx-buf-quirk; + altr,sysmgr-syscon = <&sysmgr 0x4c 0>; + altr,smtg-hub; + iommus = <&smmu 3>; + snps,clk-csr = <0>; + dma-coherent; + status = "disabled"; + + stmmac_axi_emac2_setup: stmmac-axi-config { + snps,wr_osr_lmt = <31>; + snps,rd_osr_lmt = <31>; + snps,blen = <0 0 0 32 16 8 4>; + }; + + mtl_rx_emac2_setup: rx-queues-config { + snps,rx-queues-to-use = <8>; + snps,rx-sched-sp; + queue0 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x0>; + }; + queue1 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x1>; + }; + queue2 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x2>; + }; + queue3 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x3>; + }; + queue4 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x4>; + }; + queue5 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x5>; + }; + queue6 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x6>; + }; + queue7 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x7>; + }; + }; + + mtl_tx_emac2_setup: tx-queues-config { + snps,tx-queues-to-use = <8>; + snps,tx-queues-with-coe = <2>; + snps,tx-sched-wrr; + queue0 { + snps,weight = <0x09>; + snps,dcb-algorithm; + }; + queue1 { + snps,weight = <0x0A>; + snps,dcb-algorithm; + }; + queue2 { + snps,weight = <0x0B>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue3 { + snps,weight = <0x0C>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue4 { + snps,weight = <0x0D>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue5 { + snps,weight = <0x0E>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue6 { + snps,weight = <0x0F>; + snps,coe-unsupported; + snps,dcb-algorithm; + snps,tbs-enable; + }; + queue7 { + snps,weight = <0x10>; + snps,coe-unsupported; + snps,dcb-algorithm; + snps,tbs-enable; + }; + }; + }; + + gmac0: ethernet@10810000 { + compatible = "altr,socfpga-stmmac-a10-s10", + "snps,dwxgmac-2.10", + "snps,dwxgmac"; + reg = <0x10810000 0x3500>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "macirq", + "macirq_tx0", + "macirq_tx1", + "macirq_tx2", + "macirq_tx3", + "macirq_tx4", + "macirq_tx5", + "macirq_tx6", + "macirq_tx7", + "macirq_rx0", + "macirq_rx1", + "macirq_rx2", + "macirq_rx3", + "macirq_rx4", + "macirq_rx5", + "macirq_rx6", + "macirq_rx7"; + resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>; + reset-names = "stmmaceth", "stmmaceth-ocp"; + clocks = <&clkmgr AGILEX5_EMAC0_CLK>, + <&clkmgr AGILEX5_EMAC_PTP_CLK>; + clock-names = "stmmaceth", "ptp_ref"; + mac-address = [00 00 00 00 00 00]; + tx-fifo-depth = <32768>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <64>; + snps,axi-config = <&stmmac_axi_emac0_setup>; + snps,mtl-rx-config = <&mtl_rx_emac0_setup>; + snps,mtl-tx-config = <&mtl_tx_emac0_setup>; + snps,pbl = <32>; + snps,pblx8; + snps,multi-irq-en; + snps,tso; + snps,rx-vlan-offload; + snps,pagepool-tx-buf-quirk; + altr,sysmgr-syscon = <&sysmgr 0x44 0>; + altr,smtg-hub; + iommus = <&smmu 1>; + snps,clk-csr = <0>; + dma-coherent; + status = "disabled"; + + stmmac_axi_emac0_setup: stmmac-axi-config { + snps,wr_osr_lmt = <31>; + snps,rd_osr_lmt = <31>; + snps,blen = <0 0 0 32 16 8 4>; + }; + + mtl_rx_emac0_setup: rx-queues-config { + snps,rx-queues-to-use = <8>; + snps,rx-sched-sp; + queue0 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x0>; + }; + queue1 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x1>; + }; + queue2 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x2>; + }; + queue3 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x3>; + }; + queue4 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x4>; + }; + queue5 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x5>; + }; + queue6 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x6>; + }; + queue7 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x7>; + }; + }; + + mtl_tx_emac0_setup: tx-queues-config { + snps,tx-queues-to-use = <8>; + snps,tx-queues-with-coe = <2>; + snps,tx-sched-wrr; + queue0 { + snps,weight = <0x09>; + snps,dcb-algorithm; + }; + queue1 { + snps,weight = <0x0A>; + snps,dcb-algorithm; + }; + queue2 { + snps,weight = <0x0B>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue3 { + snps,weight = <0x0C>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue4 { + snps,weight = <0x0D>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue5 { + snps,weight = <0x0E>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue6 { + snps,weight = <0x0F>; + snps,coe-unsupported; + snps,dcb-algorithm; + snps,tbs-enable; + }; + queue7 { + snps,weight = <0x10>; + snps,coe-unsupported; + snps,dcb-algorithm; + snps,tbs-enable; + }; + }; + }; + + gmac1: ethernet@10820000 { + compatible = "altr,socfpga-stmmac-a10-s10", + "snps,dwxgmac-2.10", + "snps,dwxgmac"; + reg = <0x10820000 0x3500>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "macirq", + "macirq_tx0", + "macirq_tx1", + "macirq_tx2", + "macirq_tx3", + "macirq_tx4", + "macirq_tx5", + "macirq_tx6", + "macirq_tx7", + "macirq_rx0", + "macirq_rx1", + "macirq_rx2", + "macirq_rx3", + "macirq_rx4", + "macirq_rx5", + "macirq_rx6", + "macirq_rx7"; + resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>; + reset-names = "stmmaceth", "stmmaceth-ocp"; + clocks = <&clkmgr AGILEX5_EMAC1_CLK>, + <&clkmgr AGILEX5_EMAC_PTP_CLK>; + clock-names = "stmmaceth", "ptp_ref"; + mac-address = [00 00 00 00 00 00]; + tx-fifo-depth = <32768>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <64>; + snps,axi-config = <&stmmac_axi_emac1_setup>; + snps,mtl-rx-config = <&mtl_rx_emac1_setup>; + snps,mtl-tx-config = <&mtl_tx_emac1_setup>; + snps,pbl = <32>; + snps,pblx8; + snps,multi-irq-en; + snps,tso; + snps,rx-vlan-offload; + snps,pagepool-tx-buf-quirk; + altr,sysmgr-syscon = <&sysmgr 0x48 0>; + altr,smtg-hub; + iommus = <&smmu 2>; + snps,clk-csr = <0>; + dma-coherent; + status = "disabled"; + + stmmac_axi_emac1_setup: stmmac-axi-config { + snps,wr_osr_lmt = <31>; + snps,rd_osr_lmt = <31>; + snps,blen = <0 0 0 32 16 8 4>; + }; + + mtl_rx_emac1_setup: rx-queues-config { + snps,rx-queues-to-use = <8>; + snps,rx-sched-sp; + queue0 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x0>; + }; + queue1 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x1>; + }; + queue2 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x2>; + }; + queue3 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x3>; + }; + queue4 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x4>; + }; + queue5 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x5>; + }; + queue6 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x6>; + }; + queue7 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x7>; + }; + }; + + mtl_tx_emac1_setup: tx-queues-config { + snps,tx-queues-to-use = <8>; + snps,tx-queues-with-coe = <2>; + snps,tx-sched-wrr; + queue0 { + snps,weight = <0x09>; + snps,dcb-algorithm; + }; + queue1 { + snps,weight = <0x0A>; + snps,dcb-algorithm; + }; + queue2 { + snps,weight = <0x0B>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue3 { + snps,weight = <0x0C>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue4 { + snps,weight = <0x0D>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue5 { + snps,weight = <0x0E>; + snps,coe-unsupported; + snps,dcb-algorithm; + }; + queue6 { + snps,weight = <0x0F>; + snps,coe-unsupported; + snps,dcb-algorithm; + snps,tbs-enable; + }; + queue7 { + snps,weight = <0x10>; + snps,coe-unsupported; + snps,dcb-algorithm; + snps,tbs-enable; + }; + }; + }; + i2c0: i2c@10c02800 { compatible = "snps,designware-i2c"; reg = <0x10c02800 0x100>; @@ -222,7 +801,27 @@ status = "disabled"; }; - gpio1: gpio@10c03300 { + gpio0: gpio@10c03200 { + compatible = "snps,dw-apb-gpio"; + reg = <0x10c03200 0x100>; + #address-cells = <1>; + #size-cells = <0>; + resets = <&rst GPIO0_RESET>; + status = "disabled"; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + reg = <0>; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <24>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + }; + }; + + gpio1: gpio@10C03300 { compatible = "snps,dw-apb-gpio"; reg = <0x10c03300 0x100>; #address-cells = <1>; @@ -242,17 +841,36 @@ }; }; + mmc: mmc0@10808000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,agilex5-sd4hc", "cdns,sd4hc"; + reg = <0x10808000 0x1000>; + interrupts = ; + fifo-depth = <0x800>; + resets = <&rst SDMMC_RESET>; + reset-names = "reset"; + clocks = <&clkmgr AGILEX5_L4_MP_CLK>, <&clkmgr AGILEX5_SDMCLK>; + clock-names = "biu", "ciu"; + iommus = <&smmu 5>; + dma-coherent; + status = "disabled"; + }; + nand: nand-controller@10b80000 { compatible = "cdns,hp-nfc"; reg = <0x10b80000 0x10000>, - <0x10840000 0x10000>; + <0x10840000 0x1000>; reg-names = "reg", "sdma"; #address-cells = <1>; #size-cells = <0>; interrupts = ; clocks = <&clkmgr AGILEX5_NAND_NF_CLK>; cdns,board-delay-ps = <4830>; + clock-names = "nf_clk"; + iommus = <&smmu 4>; status = "disabled"; + dma-coherent; }; ocram: sram@0 { @@ -274,10 +892,14 @@ #dma-cells = <1>; dma-channels = <4>; snps,dma-masters = <1>; - snps,data-width = <2>; + snps,data-width = <3>; snps,block-size = <32767 32767 32767 32767>; snps,priority = <0 1 2 3>; snps,axi-max-burst-len = <8>; + snps,dma-40-bit-mask; + iommus = <&smmu 8>; + status = "okay"; + dma-coherent; }; dmac1: dma-controller@10dc0000 { @@ -291,10 +913,14 @@ #dma-cells = <1>; dma-channels = <4>; snps,dma-masters = <1>; - snps,data-width = <2>; + snps,data-width = <3>; snps,block-size = <32767 32767 32767 32767>; snps,priority = <0 1 2 3>; snps,axi-max-burst-len = <8>; + snps,dma-40-bit-mask; + iommus = <&smmu 9>; + status = "okay"; + dma-coherent; }; rst: rstmgr@10d11000 { @@ -303,6 +929,18 @@ #reset-cells = <1>; }; + smmu: iommu@16000000 { + compatible = "arm,smmu-v3"; + reg = <0x16000000 0x30000>; + interrupts = , + , + ; + interrupt-names = "eventq", "gerror", "priq"; + dma-coherent; + #iommu-cells = <1>; + status = "disabled"; + }; + spi0: spi@10da4000 { compatible = "snps,dw-apb-ssi"; reg = <0x10da4000 0x1000>; @@ -314,10 +952,9 @@ reg-io-width = <4>; num-cs = <4>; clocks = <&clkmgr AGILEX5_L4_MAIN_CLK>; - dmas = <&dmac0 2>, <&dmac0 3>; - dma-names = "tx", "rx"; + dmas = <&dmac0 16>, <&dmac0 17>; + dma-names ="tx", "rx"; status = "disabled"; - }; spi1: spi@10da5000 { @@ -331,6 +968,8 @@ reg-io-width = <4>; num-cs = <4>; clocks = <&clkmgr AGILEX5_L4_MAIN_CLK>; + dmas = <&dmac0 20>, <&dmac0 21>; + dma-names ="tx", "rx"; status = "disabled"; }; @@ -403,9 +1042,148 @@ reset-names = "dwc2", "dwc2-ecc"; clocks = <&clkmgr AGILEX5_USB2OTG_HCLK>; clock-names = "otg"; + iommus = <&smmu 6>; + status = "disabled"; + }; + + usb31: usb1@11000000 { + compatible = "intel,agilex5-dwc3", "snps,dwc3"; + reg = <0x11000000 0x100000>; + ranges; + #address-cells = <1>; + #size-cells = <1>; + interrupts = ; + clocks = <&clkmgr AGILEX5_USB31_SUSPEND_CLK>, + <&clkmgr AGILEX5_USB31_BUS_CLK_EARLY>; + resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>; + reset-names = "dwc3", "dwc3-ecc"; + iommus = <&smmu 7>; + phys = <&usbphy0>, <&usbphy0>; + phy-names = "usb2-phy", "usb3-phy"; + maximum-speed = "super-speed"; + snps,dis_u2_susphy_quirk; + snps,dis_u3_susphy_quirk; + snps,dma_set_40_bit_mask_quirk; status = "disabled"; }; + eccmgr { + compatible = "altr,socfpga-a10-ecc-manager"; + altr,sysmgr-syscon = <&sysmgr>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = , + , + , + , + , + , + ; + interrupt-names = "global_sbe", "global_dbe", "io96b0" , "io96b1", + "sdm_qspi_sbe", "sdm_qspi_dbe", "sdm_seu"; + interrupt-controller; + #interrupt-cells = <2>; + ranges; + + ocram-ecc@108cc000 { + compatible = "altr,socfpga-a10-ocram-ecc"; + reg = <0x108cc000 0x100>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>, <33 IRQ_TYPE_LEVEL_HIGH>; + }; + + usb0-ecc@108c4000 { + compatible = "altr,socfpga-usb-ecc"; + reg = <0x108c4000 0x100>; + altr,ecc-parent = <&usb0>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>, <34 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + emac0-rx-ecc@108c0000 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0x108c0000 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>, <38 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac0-tx-ecc@108c0400 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0x108c0400 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>, <37 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac1-rx-ecc@108c0800 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0x108c0800 0x100>; + altr,ecc-parent = <&gmac1>; + interrupts = <6 IRQ_TYPE_LEVEL_HIGH>, <38 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac1-tx-ecc@108c0c00 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0x108c0c00 0x100>; + altr,ecc-parent = <&gmac1>; + interrupts = <7 IRQ_TYPE_LEVEL_HIGH>, <39 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac2-rx-ecc@108c1000 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0x108c1000 0x100>; + altr,ecc-parent = <&gmac2>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>, <40 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac2-tx-ecc@108c1400 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0x108c1400 0x100>; + altr,ecc-parent = <&gmac2>; + interrupts = <9 IRQ_TYPE_LEVEL_HIGH>, <41 IRQ_TYPE_LEVEL_HIGH>; + }; + + usb1-rx-ecc@108c4400 { + compatible = "altr,socfpga-usb3-ecc"; + reg = <0x108c4400 0x100>; + altr,ecc-parent = <&usb31>; + interrupts = <12 IRQ_TYPE_LEVEL_HIGH>, <44 IRQ_TYPE_LEVEL_HIGH>; + }; + + usb1-tx-ecc@108c4800 { + compatible = "altr,socfpga-usb3-ecc"; + reg = <0x108c4800 0x100>; + altr,ecc-parent = <&usb31>; + interrupts = <11 IRQ_TYPE_LEVEL_HIGH>, <43 IRQ_TYPE_LEVEL_HIGH>; + }; + + usb1-cache-ecc@108c4c00 { + compatible = "altr,socfpga-usb3-ecc"; + reg = <0x108c4c00 0x100>; + altr,ecc-parent = <&usb31>; + interrupts = <3 IRQ_TYPE_LEVEL_HIGH>, <35 IRQ_TYPE_LEVEL_HIGH>; + }; + + io96b0-ecc@18400000 { + compatible = "altr,socfpga-io96b0-ecc"; + reg = <0x18400000 0x1000>; + }; + + io96b1-ecc@18800000 { + compatible = "altr,socfpga-io96b1-ecc"; + reg = <0x18800000 0x1000>; + status = "disabled"; + }; + + sdm-qspi-ecc@10a22000 { + compatible = "altr,socfpga-sdm-qspi-ecc"; + reg = <0x10a22000 0x100>; + }; + + cram-seu@0 { + compatible = "altr,socfpga-cram-seu"; + reg = <0x0 0x0>; + }; + }; + watchdog0: watchdog@10d00200 { compatible = "snps,dw-wdt"; reg = <0x10d00200 0x100>; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts index c533e5a3a6106..30c4d7347ee8b 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk.dts @@ -10,17 +10,151 @@ aliases { serial0 = &uart0; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &gmac2; }; chosen { stdout-path = "serial0:115200n8"; + bootargs = "console=uart8250,mmio32,0x10c02000,115200n8 \ + root=/dev/ram0 rw initrd=0x10000000 init=/sbin/init \ + ramdisk_size=10000000 earlycon=uart8250,mmio32,0x10c02000,115200n8 \ + panic=-1 rootfstype=ext3"; }; + + leds { + compatible = "gpio-leds"; + + hps1 { + label = "hps_led1"; + gpios = <&porta 11 GPIO_ACTIVE_HIGH>; + }; + + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0x80000000 0 0x80000000>; + }; +}; + +&gpio0 { + status = "okay"; }; &gpio1 { status = "okay"; }; +&gmac0 { + status = "disabled"; + phy-mode = "rgmii"; + phy-handle = <&emac0_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac0_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac1 { + status = "disabled"; + phy-mode = "rgmii"; + mac-mode = "gmii"; + phy-handle = <&emac1_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac1_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac2 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac2_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac2_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&i3c0 { + status = "okay"; +}; + +&i3c1 { + status = "okay"; +}; + +&mmc { + status = "okay"; + bus-width = <4>; + no-1-8-v; + cap-sd-highspeed; + disable-wp; + sd-uhs-sdr50; + sdhci-caps = <0x00000000 0x0000c800>; + sdhci-caps-mask = <0x00002000 0x0000ff00>; + cdns,phy-use-ext-lpbk-dqs = <1>; + cdns,phy-use-lpbk-dqs = <1>; + cdns,phy-use-phony-dqs = <1>; + cdns,phy-use-phony-dqs-cmd = <1>; + cdns,phy-io-mask-always-on = <0>; + cdns,phy-io-mask-end = <5>; + cdns,phy-io-mask-start = <0>; + cdns,phy-data-select-oe-end = <1>; + cdns,phy-sync-method = <1>; + cdns,phy-sw-half-cycle-shift = <0>; + cdns,phy-rd-del-sel = <52>; + cdns,phy-underrun-suppress = <1>; + cdns,phy-gate-cfg-always-on = <1>; + cdns,phy-param-dll-bypass-mode = <1>; + cdns,phy-param-phase-detect-sel = <2>; + cdns,phy-param-dll-start-point = <254>; + cdns,phy-read-dqs-cmd-delay = <0>; + cdns,phy-clk-wrdqs-delay = <0>; + cdns,phy-clk-wr-delay = <0>; + cdns,phy-read-dqs-delay = <0>; + cdns,phy-phony-dqs-timing = <0>; + cdns,hrs09-rddata-en = <1>; + cdns,hrs09-rdcmd-en = <1>; + cdns,hrs09-extended-wr-mode = <1>; + cdns,hrs09-extended-rd-mode = <1>; + cdns,hrs10-hcsdclkadj = <3>; + cdns,hrs16-wrdata1-sdclk-dly = <0>; + cdns,hrs16-wrdata0-sdclk-dly = <0>; + cdns,hrs16-wrcmd1-sdclk-dly = <0>; + cdns,hrs16-wrcmd0-sdclk-dly = <0>; + cdns,hrs16-wrdata1-dly = <0>; + cdns,hrs16-wrdata0-dly = <0>; + cdns,hrs16-wrcmd1-dly = <0>; + cdns,hrs16-wrcmd0-dly = <0>; + cdns,hrs07-rw-compensate = <10>; + cdns,hrs07-idelay-val = <0>; +}; + &osc1 { clock-frequency = <25000000>; }; @@ -34,6 +168,128 @@ disable-over-current; }; +&usb31 { + status = "okay"; + dr_mode = "host"; +}; + +&smmu { + status = "okay"; +}; + &watchdog0 { status = "okay"; }; +&watchdog1 { + status = "okay"; +}; +&watchdog2 { + status = "okay"; +}; +&watchdog3 { + status = "okay"; +}; +&watchdog4 { + status = "okay"; + disable-over-current; +}; + +&qspi { + status = "okay"; + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot>; + + qspi_boot: partition@0 { + label = "u-boot"; + reg = <0x0 0x04200000>; + }; + + root: partition@4200000 { + label = "root"; + reg = <0x04200000 0x0be00000>; + }; + }; + }; +}; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10001 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@30001 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40001 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_b0.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_b0.dts new file mode 100644 index 0000000000000..e7251051b3aac --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_b0.dts @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, Intel Corporation + */ +#include "socfpga_agilex5.dtsi" + +/ { + model = "SoCFPGA Agilex5 SoCDK"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex5"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + bootargs = "console=uart8250,mmio32,0x10c02000,115200n8 \ + root=/dev/ram0 rw initrd=0x10000000 init=/sbin/init \ + ramdisk_size=10000000 earlycon=uart8250,mmio32,0x10c02000,115200n8 \ + panic=-1 nosmp rootfstype=ext3"; + }; + + leds { + compatible = "gpio-leds"; + + hps1 { + label = "hps_led1"; + gpios = <&porta 11 GPIO_ACTIVE_HIGH>; + }; + + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0x0 0x0 0x0 0x0>; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gmac0 { + status = "disabled"; + phy-mode = "rgmii"; + phy-handle = <&emac0_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac0_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac1 { + status = "disabled"; + phy-mode = "rgmii"; + mac-mode = "gmii"; + phy-handle = <&emac1_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac1_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac2 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac2_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac2_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&i3c0 { + status = "okay"; +}; + +&i3c1 { + status = "okay"; +}; + +&mmc { + status = "okay"; + bus-width = <4>; + no-1-8-v; + cap-sd-highspeed; + cap-mmc-highspeed; + disable-wp; + sd-uhs-sdr50; + cdns,phy-use-ext-lpbk-dqs = <1>; + cdns,phy-use-lpbk-dqs = <1>; + cdns,phy-use-phony-dqs = <1>; + cdns,phy-use-phony-dqs-cmd = <1>; + cdns,phy-io-mask-always-on = <0>; + cdns,phy-io-mask-end = <5>; + cdns,phy-io-mask-start = <0>; + cdns,phy-data-select-oe-end = <1>; + cdns,phy-sync-method = <1>; + cdns,phy-sw-half-cycle-shift = <0>; + cdns,phy-rd-del-sel = <52>; + cdns,phy-underrun-suppress = <1>; + cdns,phy-gate-cfg-always-on = <1>; + cdns,phy-param-dll-bypass-mode = <1>; + cdns,phy-param-phase-detect-sel = <2>; + cdns,phy-param-dll-start-point = <254>; + cdns,phy-read-dqs-cmd-delay = <0>; + cdns,phy-clk-wrdqs-delay = <0>; + cdns,phy-clk-wr-delay = <0>; + cdns,phy-read-dqs-delay = <0>; + cdns,phy-phony-dqs-timing = <0>; + cdns,hrs09-rddata-en = <1>; + cdns,hrs09-rdcmd-en = <1>; + cdns,hrs09-extended-wr-mode = <1>; + cdns,hrs09-extended-rd-mode = <1>; + cdns,hrs10-hcsdclkadj = <3>; + cdns,hrs16-wrdata1-sdclk-dly = <0>; + cdns,hrs16-wrdata0-sdclk-dly = <0>; + cdns,hrs16-wrcmd1-sdclk-dly = <0>; + cdns,hrs16-wrcmd0-sdclk-dly = <0>; + cdns,hrs16-wrdata1-dly = <0>; + cdns,hrs16-wrdata0-dly = <0>; + cdns,hrs16-wrcmd1-dly = <0>; + cdns,hrs16-wrcmd0-dly = <0>; + cdns,hrs07-rw-compensate = <10>; + cdns,hrs07-idelay-val = <0>; +}; + +&osc1 { + clock-frequency = <25000000>; +}; + +&uart0 { + status = "okay"; +}; + +&usb0 { + status = "okay"; + disable-over-current; +}; + +&usb31 { + status = "okay"; + dr_mode = "host"; +}; + +&smmu { + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; +&watchdog1 { + status = "okay"; +}; +&watchdog2 { + status = "okay"; +}; +&watchdog3 { + status = "okay"; +}; +&watchdog4 { + status = "okay"; + disable-over-current; +}; + +&qspi { + status = "okay"; + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot>; + + qspi_boot: partition@0 { + label = "u-boot"; + reg = <0x0 0x04200000>; + }; + + root: partition@4200000 { + label = "root"; + reg = <0x04200000 0x0BE00000>; + }; + }; + }; +}; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10001 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@30001 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40001 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_debug.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_debug.dts new file mode 100644 index 0000000000000..d4871ee76e7e0 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_debug.dts @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023, Intel Corporation + */ +#include "socfpga_agilex5_socdk.dts" + +/ { + model = "SoCFPGA Agilex5 SoCDK - debug daughter card"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex"; + + leds { + hps0 { + label = "hps_led0"; + gpios = <&portb 12 GPIO_ACTIVE_HIGH>; + }; + hps1 { + status = "disabled"; + }; + }; +}; + +&gpio0 { + status = "disabled"; +}; + +&gpio1 { + status = "okay"; +}; + +&gmac0 { + status = "okay"; +}; + +&gmac2 { + status = "disabled"; +}; + +&spi0 { + status= "okay"; + spidev@0{ + compatible = "rohm,dh2228fv"; + reg = <0>; + spi-max-frequency = <10000000>; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_emmc.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_emmc.dts new file mode 100644 index 0000000000000..fb5590e1f92f7 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_emmc.dts @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ +#include "socfpga_agilex5_socdk_nand.dts" + +/ { + model = "SoCFPGA Agilex5 SoCDK eMMC"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex"; +}; + +&mmc { + status = "okay"; + bus-width = <8>; + no-1-8-v; + cap-sd-highspeed; + cap-mmc-highspeed; + disable-wp; + sd-uhs-sdr50; + sdhci-caps = <0x00000000 0x0000c800>; + sdhci-caps-mask = <0x00002000 0x0000ff00>; + no-sdio; + cdns,phy-use-ext-lpbk-dqs = <1>; + cdns,phy-use-lpbk-dqs = <1>; + cdns,phy-use-phony-dqs = <1>; + cdns,phy-use-phony-dqs-cmd = <1>; + cdns,phy-io-mask-always-on = <0>; + cdns,phy-io-mask-end = <5>; + cdns,phy-io-mask-start = <0>; + cdns,phy-data-select-oe-end = <1>; + cdns,phy-sync-method = <1>; + cdns,phy-sw-half-cycle-shift = <0>; + cdns,phy-rd-del-sel = <52>; + cdns,phy-underrun-suppress = <1>; + cdns,phy-gate-cfg-always-on = <1>; + cdns,phy-param-dll-bypass-mode = <1>; + cdns,phy-param-phase-detect-sel = <2>; + cdns,phy-param-dll-start-point = <254>; + cdns,phy-read-dqs-cmd-delay = <0>; + cdns,phy-clk-wrdqs-delay = <0>; + cdns,phy-clk-wr-delay = <0>; + cdns,phy-read-dqs-delay = <0>; + cdns,phy-phony-dqs-timing = <0>; + cdns,hrs09-rddata-en = <1>; + cdns,hrs09-rdcmd-en = <1>; + cdns,hrs09-extended-wr-mode = <1>; + cdns,hrs09-extended-rd-mode = <1>; + cdns,hrs10-hcsdclkadj = <3>; + cdns,hrs16-wrdata1-sdclk-dly = <0>; + cdns,hrs16-wrdata0-sdclk-dly = <0>; + cdns,hrs16-wrcmd1-sdclk-dly = <0>; + cdns,hrs16-wrcmd0-sdclk-dly = <0>; + cdns,hrs16-wrdata1-dly = <0>; + cdns,hrs16-wrdata0-dly = <0>; + cdns,hrs16-wrcmd1-dly = <0>; + cdns,hrs16-wrcmd0-dly = <0>; + cdns,hrs07-rw-compensate = <10>; + cdns,hrs07-idelay-val = <0>; +}; + +&nand { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_modular.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_modular.dts new file mode 100644 index 0000000000000..4449ecf6f5daf --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_modular.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025, Altera Corporation + */ +#include "socfpga_agilex5_socdk.dts" + +/ { + model = "SoCFPGA Agilex5 SoCDK - Modular development kit"; + + leds { + led0 { + label = "hps_led0"; + gpios = <&porta 0x0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts new file mode 100644 index 0000000000000..140e1c6011b58 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand.dts @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023, Intel Corporation + */ +#include "socfpga_agilex5.dtsi" + +/ { + model = "SoCFPGA Agilex5 SoCDK"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + leds { + compatible = "gpio-leds"; + hps0 { + label = "hps_led0"; + gpios = <&porta 6 GPIO_ACTIVE_HIGH>; + }; + + hps1 { + label = "hps_led1"; + gpios = <&porta 7 GPIO_ACTIVE_HIGH>; + }; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + #address-cells = <0x2>; + #size-cells = <0x2>; + u-boot,dm-pre-reloc; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; +}; + +&i3c0 { + status = "okay"; +}; + +&i3c1 { + status = "okay"; +}; + +&gmac0 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac0_phy0>; + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac0_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac2 { + status = "disabled"; + phy-mode = "rgmii"; + phy-handle = <&emac2_phy0>; + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac2_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&osc1 { + clock-frequency = <25000000>; +}; + +&uart0 { + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&watchdog2 { + status = "okay"; +}; + +&watchdog3 { + status = "okay"; +}; + +&watchdog4 { + status = "okay"; +}; + +&smmu { + status = "okay"; +}; + +&nand { + status = "okay"; + disable-ecc; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + nand-bus-width = <8>; + + partition@0 { + label = "u-boot"; + reg = <0 0x200000>; + }; + partition@200000 { + label = "root"; + reg = <0x200000 0xffe00000>; + }; + }; +}; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10001 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@30001 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40001 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand_b0.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand_b0.dts new file mode 100644 index 0000000000000..807ed69d90c6e --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand_b0.dts @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023, Intel Corporation + */ +#include "socfpga_agilex5.dtsi" + +/ { + model = "SoCFPGA Agilex5 SoCDK"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + leds { + compatible = "gpio-leds"; + hps0 { + label = "hps_led0"; + gpios = <&porta 6 GPIO_ACTIVE_HIGH>; + }; + + hps1 { + label = "hps_led1"; + gpios = <&porta 7 GPIO_ACTIVE_HIGH>; + }; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + #address-cells = <0x2>; + #size-cells = <0x2>; + u-boot,dm-pre-reloc; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; +}; + +&i3c0 { + status = "okay"; +}; + +&i3c1 { + status = "okay"; +}; + +&gmac0 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac0_phy0>; + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac0_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac2 { + status = "disabled"; + phy-mode = "rgmii"; + phy-handle = <&emac2_phy0>; + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac2_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&osc1 { + clock-frequency = <25000000>; +}; + +&uart0 { + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&watchdog2 { + status = "okay"; +}; + +&watchdog3 { + status = "okay"; +}; + +&watchdog4 { + status = "okay"; +}; + +&smmu { + status = "okay"; +}; + +&nand { + status = "okay"; + reg = <0x10b80000 0x10000>, + <0x10840000 0x10000>; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + nand-bus-width = <8>; + + partition@0 { + label = "u-boot"; + reg = <0 0x200000>; + }; + partition@200000 { + label = "root"; + reg = <0x200000 0xffe00000>; + }; + }; +}; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10001 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@30001 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40001 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand_b0_atfboot.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand_b0_atfboot.dts new file mode 100644 index 0000000000000..221be09ea1969 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_nand_b0_atfboot.dts @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025, Altera Corporation. All rights reserved. + */ + +#include "socfpga_agilex5_socdk_nand_b0.dts" + +/ { + model = "SoCFPGA Agilex5 SoCDK"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex5"; + + chosen { + stdout-path = "serial0:115200n8"; + bootargs = "console=uart8250,mmio32,0x10c02000,115200n8 \ + earlycon=uart8250,mmio32,0x10c02000,115200n8 \ + panic=-1 \ + root=ubi0:rootfs rw rootwait rootfstype=ubifs ubi.mtd=2"; + }; +}; + +&nand { + status = "okay"; + /delete-node/ flash@0; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + nand-bus-width = <8>; + + partition@0 { + label = "u-boot"; + reg = <0 0x200000>; + }; + + partition@200000 { + label = "fip"; + reg = <0x200000 0x03200000>; + }; + + partition@3400000 { + label = "root"; + reg = <0x03400000 0xfcc00000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_swvp_b0.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_swvp_b0.dts new file mode 100755 index 0000000000000..e8165905750c8 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_swvp_b0.dts @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023, Intel Corporation + */ +#include "socfpga_agilex5.dtsi" + +/ { + model = "SoCFPGA Agilex5 SoCDK"; + compatible = "intel,socfpga-agilex5-socdk", "intel,socfpga-agilex"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + bootargs = "console=uart8250,mmio32,0x10c02000,115200n8 \ + root=/dev/ram0 rw initrd=0x10000000 init=/sbin/init \ + ramdisk_size=10000000 earlycon=uart8250,mmio32,0x10c02000,115200n8 \ + panic=-1 nosmp rootfstype=ext3"; + }; + + leds { + compatible = "gpio-leds"; + hps0 { + label = "hps_led0"; + gpios = <&portb 20 GPIO_ACTIVE_HIGH>; + }; + + hps1 { + label = "hps_led1"; + gpios = <&portb 19 GPIO_ACTIVE_HIGH>; + }; + + hps2 { + label = "hps_led2"; + gpios = <&portb 21 GPIO_ACTIVE_HIGH>; + }; + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0x0 0x0 0x0 0x0>; + }; + + extcon_usb1: extcon_usb1 { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&portb 23 GPIO_ACTIVE_HIGH>; + }; +}; + +&pmu0 { + status = "disabled"; +}; + +&pmu0_tcu { + status = "disabled"; +}; + +&pmu0_tbu0 { + status = "disabled"; +}; + +&pmu0_tbu1 { + status = "disabled"; +}; + +&pmu0_tbu2 { + status = "disabled"; +}; + +&pmu0_tbu3 { + status = "disabled"; +}; + +&pmu0_tbu4 { + status = "disabled"; +}; + +&pmu0_tbu5 { + status = "disabled"; +}; + +&gpio1 { + status = "okay"; +}; + +&gmac0 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac0_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac0_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&gmac2 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&emac2_phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + emac2_phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + +&mmc { + status = "okay"; + bus-width = <4>; + sd-uhs-sdr50; + no-sdio; + cdns,phy-use-ext-lpbk-dqs = <1>; + cdns,phy-use-lpbk-dqs = <1>; + cdns,phy-use-phony-dqs = <1>; + cdns,phy-use-phony-dqs-cmd = <1>; + cdns,phy-io-mask-always-on = <0>; + cdns,phy-io-mask-end = <5>; + cdns,phy-io-mask-start = <0>; + cdns,phy-data-select-oe-end = <1>; + cdns,phy-sync-method = <1>; + cdns,phy-sw-half-cycle-shift = <0>; + cdns,phy-rd-del-sel = <52>; + cdns,phy-underrun-suppress = <1>; + cdns,phy-gate-cfg-always-on = <1>; + cdns,phy-param-dll-bypass-mode = <1>; + cdns,phy-param-phase-detect-sel = <2>; + cdns,phy-param-dll-start-point = <254>; + cdns,phy-read-dqs-cmd-delay = <0>; + cdns,phy-clk-wrdqs-delay = <0>; + cdns,phy-clk-wr-delay = <0>; + cdns,phy-read-dqs-delay = <0>; + cdns,phy-phony-dqs-timing = <0>; + cdns,hrs09-rddata-en = <1>; + cdns,hrs09-rdcmd-en = <1>; + cdns,hrs09-extended-wr-mode = <1>; + cdns,hrs09-extended-rd-mode = <1>; + cdns,hrs10-hcsdclkadj = <3>; + cdns,hrs16-wrdata1-sdclk-dly = <0>; + cdns,hrs16-wrdata0-sdclk-dly = <0>; + cdns,hrs16-wrcmd1-sdclk-dly = <0>; + cdns,hrs16-wrcmd0-sdclk-dly = <0>; + cdns,hrs16-wrdata1-dly = <0>; + cdns,hrs16-wrdata0-dly = <0>; + cdns,hrs16-wrcmd1-dly = <0>; + cdns,hrs16-wrcmd0-dly = <0>; + cdns,hrs07-rw-compensate = <10>; + cdns,hrs07-idelay-val = <0>; +}; + +&i2c0 { + status = "okay"; +}; + +&i2c1 { + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&i2c3 { + status = "okay"; +}; + +&i2c4 { + status = "okay"; +}; + +&i3c0 { + status = "okay"; +}; + +&i3c1 { + status = "okay"; +}; + +&osc1 { + clock-frequency = <25000000>; +}; + +&uart0 { + status = "okay"; +}; + +&usb0 { + status = "okay"; + disable-over-current; +}; + +&usb31 { + status = "okay"; + extcon = <&extcon_usb1>; + dr_mode = "host"; +}; + +&watchdog0 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&watchdog2 { + status = "okay"; +}; + +&watchdog3 { + status = "okay"; +}; + +&watchdog4 { + status = "okay"; +}; + +&qspi { + status = "okay"; + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + qspi_boot: partition@0 { + label = "u-boot"; + reg = <0x0 0x04200000>; + }; + + root: partition@4200000 { + label = "root"; + reg = <0x04200000 0x0BE00000>; + }; + }; + }; +}; + +&nand { + status = "okay"; + reg = <0x10b80000 0x10000>, + <0x10840000 0x10000>; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + nand-bus-width = <8>; + + partition@0 { + label = "u-boot"; + reg = <0 0x200000>; + }; + partition@200000 { + label = "root"; + reg = <0x200000 0xffe00000>; + }; + }; +}; + +&spi0 { + status = "okay"; + + flash: m25p128@0 { + compatible = "st,m25p80"; + reg = <0>; + spi-max-frequency = <25000000>; + m25p,fast-read; + status = "okay"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spi_flash_part0"; + reg = <0x0 0x100000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_tsn_cfg2.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_tsn_cfg2.dts new file mode 100644 index 0000000000000..0bf1c92099fdc --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_tsn_cfg2.dts @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024, Intel Corporation + */ +#include "socfpga_agilex5_socdk.dts" + +&gmac1 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_tsn_cfg2_b0.dts b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_tsn_cfg2_b0.dts new file mode 100644 index 0000000000000..6a121cfea6b1d --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex5_socdk_tsn_cfg2_b0.dts @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025, Altera Corporation + */ +#include "socfpga_agilex5_socdk_b0.dts" + +&gmac1 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex7f_socdk_multiqspi.dts b/arch/arm64/boot/dts/intel/socfpga_agilex7f_socdk_multiqspi.dts new file mode 100644 index 0000000000000..339b7606c862e --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex7f_socdk_multiqspi.dts @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ + +#include "socfpga_agilex_socdk.dts" + +&qspi { + status = "okay"; + /delete-node/ flash@0; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <25000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot0>; + + qspi_boot0: partition@0 { + label = "u-boot"; + reg = <0x0 0x10000000>; + }; + }; + }; + + flash@1 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <1>; + spi-max-frequency = <25000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot1>; + + qspi_boot1: partition@0 { + label = "u-boot"; + reg = <0x0 0x10000000>; + }; + }; + }; + + flash@2 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <2>; + spi-max-frequency = <25000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot2>; + + qspi_boot2: partition@0 { + label = "u-boot"; + reg = <0x0 0x10000000>; + }; + }; + }; + + flash@3 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <3>; + spi-max-frequency = <25000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <2>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + rsu-handle = <&qspi_boot3>; + + qspi_boot3: partition@0 { + label = "u-boot"; + reg = <0x0 0x10000000>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex7f_socdk_pcie_root_port.dts b/arch/arm64/boot/dts/intel/socfpga_agilex7f_socdk_pcie_root_port.dts new file mode 100644 index 0000000000000..0afee81537754 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex7f_socdk_pcie_root_port.dts @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ + +#include "socfpga_agilex_socdk.dts" +#include "socfpga_agilex_pcie_root_port.dtsi" + +&pcie_0_pcie_aglx { + status = "okay"; + compatible = "altr,pcie-root-port-3.0-p-tile"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex7i_socdk_pcie_root_port.dts b/arch/arm64/boot/dts/intel/socfpga_agilex7i_socdk_pcie_root_port.dts new file mode 100644 index 0000000000000..a6f68d3b32fa1 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex7i_socdk_pcie_root_port.dts @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ + +#include "socfpga_agilex_socdk.dts" +#include "socfpga_agilex_pcie_root_port.dtsi" + +&pcie_0_pcie_aglx { + status = "okay"; + compatible = "altr,pcie-root-port-3.0-f-tile"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex7m_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex7m_socdk.dts new file mode 100644 index 0000000000000..179c41e1baa79 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex7m_socdk.dts @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +// Copyright (c) 2023 Intel Corporation + +#include "socfpga_agilex_socdk.dts" + +/ { + model = "SoCFPGA Agilex7-M SoCDK"; + compatible = "intel,socfpga-agilex-socdk", "intel,socfpga-agilex"; +}; + +&qspi { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex7m_socdk_pcie_root_port.dts b/arch/arm64/boot/dts/intel/socfpga_agilex7m_socdk_pcie_root_port.dts new file mode 100644 index 0000000000000..84b9f14ded433 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex7m_socdk_pcie_root_port.dts @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ + +#include "socfpga_agilex7m_socdk.dts" +#include "socfpga_agilex_pcie_root_port.dtsi" + +&pcie_0_pcie_aglx { + status = "okay"; + compatible = "altr,pcie-root-port-3.0-r-tile"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_bittware.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_bittware.dts new file mode 100644 index 0000000000000..e41c6be93043a --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_bittware.dts @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, Intel Corporation + */ +#include "socfpga_agilex_bittware.dtsi" +#include "socfpga_agilex_pcie_root_port.dtsi" +#include "fm87_ftile_10g_2port_ptp.dtsi" + +/ { + model = "SoCFPGA Agilex BittWare"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:921600n8"; + }; + + leds { + compatible = "gpio-leds"; + led0 { + label = "hps_led0"; + gpios = <&portb 20 GPIO_ACTIVE_HIGH>; + }; + + led1 { + label = "hps_led1"; + gpios = <&portb 19 GPIO_ACTIVE_HIGH>; + }; + + led2 { + label = "hps_led2"; + gpios = <&portb 21 GPIO_ACTIVE_HIGH>; + }; + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0 0 0>; + }; +}; + +&gpio1 { + status = "okay"; +}; + +&gmac2 { + status = "okay"; + phy-mode = "gmii"; /* gmii from rgmii */ + phy-handle = <&phy0>; + + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@0 { + reg = <4>; + + txd0-skew-ps = <0>; /* -420ps */ + txd1-skew-ps = <0>; /* -420ps */ + txd2-skew-ps = <0>; /* -420ps */ + txd3-skew-ps = <0>; /* -420ps */ + rxd0-skew-ps = <420>; /* 0ps */ + rxd1-skew-ps = <420>; /* 0ps */ + rxd2-skew-ps = <420>; /* 0ps */ + rxd3-skew-ps = <420>; /* 0ps */ + txen-skew-ps = <0>; /* -420ps */ + txc-skew-ps = <900>; /* 0ps */ + rxdv-skew-ps = <420>; /* 0ps */ + rxc-skew-ps = <1680>; /* 780ps */ + }; + }; +}; + +&nand { + status = "okay"; + nand-bus-width = <8>; + + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + nand-bus-width = <8>; + + partition@0 { + label = "u-boot"; + reg = <0 0x200000>; + }; + partition@200000 { + label = "root"; + reg = <0x200000 0x1fe00000>; + }; + }; +}; + +&osc1 { + clock-frequency = <25000000>; +}; + +&uart0 { + status = "okay"; +}; + +&usb0 { + status = "okay"; + disable-over-current; +}; + +&watchdog0 { + status = "okay"; +}; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10000 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@20000 { + label = "Main Die corner top left max"; + reg = <0x20000>; + }; + + input@30000 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40000 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; + +&pcie_0_pcie_aglx { + status = "okay"; + compatible = "altr,pcie-root-port-3.0-f-tile"; +}; + +&gmac0 { + status = "okay"; + phy-mode = "gmii"; /* gmii from rgmii */ + //phy-handle = <&phy0>; /* added after tcpdump */ + + fixed-link { + speed = <1000>; + full-duplex; + pause; + asym-pause; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_bittware.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex_bittware.dtsi new file mode 100644 index 0000000000000..a353beef6454a --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_bittware.dtsi @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, Intel Corporation + */ + +/dts-v1/; +#include +#include +#include +#include + +/ { + compatible = "intel,socfpga-agilex"; + #address-cells = <2>; + #size-cells = <2>; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + service_reserved: svcbuffer@0 { + compatible = "shared-dma-pool"; + reg = <0x0 0x0 0x0 0x2000000>; + alignment = <0x1000>; + no-map; + }; + service_reserved1: svcbuffer@1 { + compatible = "shared-dma-pool"; + reusable; + reg = <0x0 0x40000000 0x0 0x40000000>; + alignment = <0x1000>; + }; + service_reserved2: svcbuffer@2 { + compatible = "shared-dma-pool"; + no-map-fixup; + reg = <0x20 0x80000000 0x0 0x80000000>; + alignment = <0x1000>; + }; + }; + udmabuf@0 { + compatible = "ikwzm,u-dma-buf"; + device-name = "udmabuf0"; + size = <0x40000000>; // 1GiB + memory-region = <&service_reserved1>; + }; + + udmabuf@1 { + compatible = "ikwzm,u-dma-buf"; + device-name = "udmabuf1"; + size = <0x80000000>; // 2GiB + memory-region = <&service_reserved2>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x0>; + }; + + cpu1: cpu@1 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x1>; + }; + + cpu2: cpu@2 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x2>; + }; + + cpu3: cpu@3 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x3>; + }; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = , + , + , + ; + interrupt-affinity = <&cpu0>, + <&cpu1>, + <&cpu2>, + <&cpu3>; + interrupt-parent = <&intc>; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + intc: interrupt-controller@fffc1000 { + compatible = "arm,gic-400", "arm,cortex-a15-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x0 0xfffc1000 0x0 0x1000>, + <0x0 0xfffc2000 0x0 0x2000>, + <0x0 0xfffc4000 0x0 0x2000>, + <0x0 0xfffc6000 0x0 0x2000>; + }; + + clocks { + cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + cb_intosc_ls_clk: cb-intosc-ls-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + f2s_free_clk: f2s-free-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + osc1: osc1 { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + qspi_clk: qspi-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <200000000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupt-parent = <&intc>; + interrupts = , + , + , + ; + }; + + usbphy0: usbphy { + #phy-cells = <0>; + compatible = "usb-nop-xceiv"; + }; + + soc0: soc { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + device_type = "soc"; + interrupt-parent = <&intc>; + ranges = <0 0 0 0xffffffff>; + + base_fpga_region { + #address-cells = <0x2>; + #size-cells = <0x2>; + compatible = "fpga-region"; + fpga-mgr = <&fpga_mgr>; + }; + + clkmgr: clock-controller@ffd10000 { + compatible = "intel,agilex-clkmgr"; + reg = <0xffd10000 0x1000>; + #clock-cells = <1>; + }; + + gmac0: ethernet@ff800000 { + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; + reg = <0xff800000 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + mac-address = [00 00 00 00 00 00]; + resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>; + reset-names = "stmmaceth", "ahb"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + iommus = <&smmu 1>; + altr,sysmgr-syscon = <&sysmgr 0x44 0>; + clocks = <&clkmgr AGILEX_EMAC0_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>; + clock-names = "stmmaceth", "ptp_ref"; + status = "disabled"; + }; + + gmac1: ethernet@ff802000 { + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; + reg = <0xff802000 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + mac-address = [00 00 00 00 00 00]; + resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>; + reset-names = "stmmaceth", "ahb"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + iommus = <&smmu 2>; + altr,sysmgr-syscon = <&sysmgr 0x48 0>; + clocks = <&clkmgr AGILEX_EMAC1_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>; + clock-names = "stmmaceth", "ptp_ref"; + status = "disabled"; + }; + + gmac2: ethernet@ff804000 { + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; + reg = <0xff804000 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + mac-address = [00 00 00 00 00 00]; + resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>; + reset-names = "stmmaceth", "ahb"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + iommus = <&smmu 3>; + altr,sysmgr-syscon = <&sysmgr 0x4c 0>; + clocks = <&clkmgr AGILEX_EMAC2_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>; + clock-names = "stmmaceth", "ptp_ref"; + status = "disabled"; + }; + + gpio0: gpio@ffc03200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dw-apb-gpio"; + reg = <0xffc03200 0x100>; + resets = <&rst GPIO0_RESET>; + status = "disabled"; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <24>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + }; + }; + + gpio1: gpio@ffc03300 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dw-apb-gpio"; + reg = <0xffc03300 0x100>; + resets = <&rst GPIO1_RESET>; + status = "disabled"; + + portb: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <24>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + }; + }; + + i2c0: i2c@ffc02800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; + reg = <0xffc02800 0x100>; + interrupts = ; + resets = <&rst I2C0_RESET>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + status = "disabled"; + }; + + i2c1: i2c@ffc02900 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; + reg = <0xffc02900 0x100>; + interrupts = ; + resets = <&rst I2C1_RESET>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + status = "disabled"; + }; + + i2c2: i2c@ffc02a00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; + reg = <0xffc02a00 0x100>; + interrupts = ; + resets = <&rst I2C2_RESET>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + status = "disabled"; + }; + + i2c3: i2c@ffc02b00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; + reg = <0xffc02b00 0x100>; + interrupts = ; + resets = <&rst I2C3_RESET>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + status = "disabled"; + }; + + i2c4: i2c@ffc02c00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "intel,socfpga-i2c", "snps,designware-i2c"; + reg = <0xffc02c00 0x100>; + interrupts = ; + resets = <&rst I2C4_RESET>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + status = "disabled"; + }; + + mmc: mmc@ff808000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "altr,socfpga-dw-mshc"; + reg = <0xff808000 0x1000>; + interrupts = ; + fifo-depth = <0x400>; + resets = <&rst SDMMC_RESET>; + reset-names = "reset"; + clocks = <&clkmgr AGILEX_L4_MP_CLK>, + <&clkmgr AGILEX_SDMMC_CLK>; + clock-names = "biu", "ciu"; + iommus = <&smmu 5>; + altr,sysmgr-syscon = <&sysmgr 0x28 4>; + status = "disabled"; + }; + + nand: nand-controller@ffb90000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "altr,socfpga-denali-nand"; + reg = <0xffb90000 0x10000>, + <0xffb80000 0x1000>; + reg-names = "nand_data", "denali_reg"; + interrupts = ; + clocks = <&clkmgr AGILEX_NAND_CLK>, + <&clkmgr AGILEX_NAND_X_CLK>, + <&clkmgr AGILEX_NAND_ECC_CLK>; + clock-names = "nand", "nand_x", "ecc"; + resets = <&rst NAND_RESET>, <&rst NAND_OCP_RESET>; + status = "disabled"; + }; + + ocram: sram@ffe00000 { + compatible = "mmio-sram"; + reg = <0xffe00000 0x40000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0xffe00000 0x40000>; + }; + + pdma: dma-controller@ffda0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0xffda0000 0x1000>; + interrupts = , + , + , + , + , + , + , + , + ; + #dma-cells = <1>; + resets = <&rst DMA_RESET>, <&rst DMA_OCP_RESET>; + reset-names = "dma", "dma-ocp"; + clocks = <&clkmgr AGILEX_L4_MAIN_CLK>; + clock-names = "apb_pclk"; + }; + + pinctrl0: pinctrl@ffd13000 { + compatible = "pinctrl-single"; + #pinctrl-cells = <1>; + reg = <0xffd13000 0xa0>; + pinctrl-single,register-width = <32>; + pinctrl-single,function-mask = <0x0000000f>; + }; + + pinctrl1: pinconf@ffd13100 { + compatible = "pinctrl-single"; + #pinctrl-cells = <1>; + reg = <0xffd13100 0x20>; + pinctrl-single,register-width = <32>; + }; + + rst: rstmgr@ffd11000 { + compatible = "altr,stratix10-rst-mgr", "altr,rst-mgr"; + reg = <0xffd11000 0x100>; + #reset-cells = <1>; + }; + + smmu: iommu@fa000000 { + compatible = "arm,mmu-500", "arm,smmu-v2"; + reg = <0xfa000000 0x40000>; + #global-interrupts = <2>; + #iommu-cells = <1>; + interrupt-parent = <&intc>; + /* Global Secure Fault */ + interrupts = , + /* Global Non-secure Fault */ + , + /* Non-secure Context Interrupts (32) */ + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + stream-match-mask = <0x7ff0>; + clocks = <&clkmgr AGILEX_MPU_CCU_CLK>, + <&clkmgr AGILEX_L3_MAIN_FREE_CLK>, + <&clkmgr AGILEX_L4_MAIN_CLK>; + status = "disabled"; + }; + + spi0: spi@ffda4000 { + compatible = "snps,dw-apb-ssi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xffda4000 0x1000>; + interrupts = ; + resets = <&rst SPIM0_RESET>; + reset-names = "spi"; + reg-io-width = <4>; + num-cs = <4>; + clocks = <&clkmgr AGILEX_L4_MAIN_CLK>; + status = "disabled"; + }; + + spi1: spi@ffda5000 { + compatible = "snps,dw-apb-ssi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xffda5000 0x1000>; + interrupts = ; + resets = <&rst SPIM1_RESET>; + reset-names = "spi"; + reg-io-width = <4>; + num-cs = <4>; + clocks = <&clkmgr AGILEX_L4_MAIN_CLK>; + status = "disabled"; + }; + + sysmgr: sysmgr@ffd12000 { + compatible = "altr,sys-mgr-s10","altr,sys-mgr"; + reg = <0xffd12000 0x500>; + }; + + timer0: timer0@ffc03000 { + compatible = "snps,dw-apb-timer"; + interrupts = ; + reg = <0xffc03000 0x100>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + clock-names = "timer"; + }; + + timer1: timer1@ffc03100 { + compatible = "snps,dw-apb-timer"; + interrupts = ; + reg = <0xffc03100 0x100>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + clock-names = "timer"; + }; + + timer2: timer2@ffd00000 { + compatible = "snps,dw-apb-timer"; + interrupts = ; + reg = <0xffd00000 0x100>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + clock-names = "timer"; + }; + + timer3: timer3@ffd00100 { + compatible = "snps,dw-apb-timer"; + interrupts = ; + reg = <0xffd00100 0x100>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + clock-names = "timer"; + }; + + uart0: serial@ffc02000 { + compatible = "snps,dw-apb-uart"; + reg = <0xffc02000 0x100>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + resets = <&rst UART0_RESET>; + status = "disabled"; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + }; + + uart1: serial@ffc02100 { + compatible = "snps,dw-apb-uart"; + reg = <0xffc02100 0x100>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + resets = <&rst UART1_RESET>; + clocks = <&clkmgr AGILEX_L4_SP_CLK>; + status = "disabled"; + }; + + usb0: usb@ffb00000 { + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; + reg = <0xffb00000 0x40000>; + interrupts = ; + phys = <&usbphy0>; + phy-names = "usb2-phy"; + resets = <&rst USB0_RESET>, <&rst USB0_OCP_RESET>; + reset-names = "dwc2", "dwc2-ecc"; + clocks = <&clkmgr AGILEX_USB_CLK>; + clock-names = "otg"; + iommus = <&smmu 6>; + status = "disabled"; + }; + + usb1: usb@ffb40000 { + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; + reg = <0xffb40000 0x40000>; + interrupts = ; + phys = <&usbphy0>; + phy-names = "usb2-phy"; + resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>; + reset-names = "dwc2", "dwc2-ecc"; + iommus = <&smmu 7>; + clocks = <&clkmgr AGILEX_USB_CLK>; + status = "disabled"; + }; + + watchdog0: watchdog@ffd00200 { + compatible = "snps,dw-wdt"; + reg = <0xffd00200 0x100>; + interrupts = ; + resets = <&rst WATCHDOG0_RESET>; + clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; + status = "disabled"; + }; + + watchdog1: watchdog@ffd00300 { + compatible = "snps,dw-wdt"; + reg = <0xffd00300 0x100>; + interrupts = ; + resets = <&rst WATCHDOG1_RESET>; + clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; + status = "disabled"; + }; + + watchdog2: watchdog@ffd00400 { + compatible = "snps,dw-wdt"; + reg = <0xffd00400 0x100>; + interrupts = ; + resets = <&rst WATCHDOG2_RESET>; + clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; + status = "disabled"; + }; + + watchdog3: watchdog@ffd00500 { + compatible = "snps,dw-wdt"; + reg = <0xffd00500 0x100>; + interrupts = ; + resets = <&rst WATCHDOG3_RESET>; + clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>; + status = "disabled"; + }; + + sdr: sdr@f8011100 { + compatible = "altr,sdr-ctl", "syscon"; + reg = <0xf8011100 0xc0>; + }; + + eccmgr { + compatible = "altr,socfpga-s10-ecc-manager", + "altr,socfpga-a10-ecc-manager"; + altr,sysmgr-syscon = <&sysmgr>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <2>; + ranges; + + sdramedac { + compatible = "altr,sdram-edac-s10"; + altr,sdr-syscon = <&sdr>; + interrupts = <16 IRQ_TYPE_LEVEL_HIGH>; + }; + + ocram-ecc@ff8cc000 { + compatible = "altr,socfpga-s10-ocram-ecc", + "altr,socfpga-a10-ocram-ecc"; + reg = <0xff8cc000 0x100>; + altr,ecc-parent = <&ocram>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; + }; + + usb0-ecc@ff8c4000 { + compatible = "altr,socfpga-s10-usb-ecc", + "altr,socfpga-usb-ecc"; + reg = <0xff8c4000 0x100>; + altr,ecc-parent = <&usb0>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac0-rx-ecc@ff8c0000 { + compatible = "altr,socfpga-s10-eth-mac-ecc", + "altr,socfpga-eth-mac-ecc"; + reg = <0xff8c0000 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + }; + + emac0-tx-ecc@ff8c0400 { + compatible = "altr,socfpga-s10-eth-mac-ecc", + "altr,socfpga-eth-mac-ecc"; + reg = <0xff8c0400 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + }; + + sdmmca-ecc@ff8c8c00 { + compatible = "altr,socfpga-s10-sdmmc-ecc", + "altr,socfpga-sdmmc-ecc"; + reg = <0xff8c8c00 0x100>; + altr,ecc-parent = <&mmc>; + interrupts = <14 IRQ_TYPE_LEVEL_HIGH>, + <15 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + qspi: spi@ff8d2000 { + compatible = "intel,socfpga-qspi", "cdns,qspi-nor"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xff8d2000 0x100>, + <0xff900000 0x100000>; + interrupts = ; + cdns,fifo-depth = <128>; + cdns,fifo-width = <4>; + cdns,trigger-address = <0x00000000>; + clocks = <&qspi_clk>; + + status = "disabled"; + }; + + firmware { + svc { + compatible = "intel,agilex-svc"; + method = "smc"; + memory-region = <&service_reserved>; + + fpga_mgr: fpga-mgr { + compatible = "intel,agilex-soc-fpga-mgr"; + }; + + fcs: fcs { + compatible = "intel,agilex-soc-fcs"; + platform = "agilex"; + }; + + temp_volt: hwmon { + compatible = "intel,soc64-hwmon"; + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_etile.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex_etile.dtsi new file mode 100644 index 0000000000000..ebdf8037eb850 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_etile.dtsi @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2022, Intel Corporation + */ + +/* Add this piece of dtsi fragment as #include "socfpga_agilex_etile.dtsi" + * in the file socfpga_agilex_socdk.dts. Compile it in the kernel along with + * socfpga_agilex.dtsi in single channel design rsfec and tod_ctrl should be mapped + * for fec-cw-pos-rx + */ + +/{ + soc { + clocks { + ptp_ctrl_10G_clk: ptp_ctrl_10G_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <156250000>; + clock-output-names = "ptp_ctrl_10G_clk-clk"; + }; + }; + + agilex_hps_bridges: bridge@80000000 { + compatible = "simple-bus"; + reg = <0x80000000 0x60000000>, + <0xf9000000 0x00100000>; + reg-names = "axi_h2f", "axi_h2f_lw"; + #address-cells = <0x2>; + #size-cells = <0x1>; + ranges = <0x00000000 0x00000000 0xf9000000 0x00100000>, + <0x00000001 0x02220010 0x82220010 0x00000010>, + <0x00000001 0x02220020 0x82220020 0x00000010>, + <0x00000001 0x02221000 0x82221000 0x00000800>, + <0x00000001 0x0220C000 0x8220C000 0x00004000>, + <0x00000001 0x02180000 0x82180000 0x00080000>, + <0x00000001 0x02220040 0x82220040 0x00000020>, + <0x00000001 0x022103C0 0x822103C0 0x00000020>, + <0x00000001 0x02210700 0x82210700 0x00000020>, + <0x00000001 0x02210720 0x82210720 0x00000020>, + <0x00000001 0x02210B00 0x82210B00 0x00000020>, + <0x00000001 0x02210B20 0x82210B20 0x00000020>, + <0x00000001 0x02210B40 0x82210B40 0x00000020>, + <0x00000001 0x00000300 0xf9000300 0x00000010>, + <0x00000001 0x02000000 0x82000000 0x00000010>; + + etile_0_etile: ethernet@0x102001000 { + compatible = "altr,etile-msgdma-2.0"; + reg-names = "rsfec", "eth_reconfig", "xcvr", "tod_ctrl", "tod_pio", + "tx_pref", "tx_csr", "rx_pref", "rx_csr", + "rx_fifo"; + reg = <0x00000001 0x02221000 0x00000800>, + <0x00000001 0x0220C000 0x00004000>, + <0x00000001 0x02180000 0x00080000>, + <0x00000001 0x02220040 0x00000040>, + <0x00000001 0x022103C0 0x00000020>, + <0x00000001 0x02210700 0x00000020>, + <0x00000001 0x02210720 0x00000020>, + <0x00000001 0x02210B00 0x00000020>, + <0x00000001 0x02210B20 0x00000020>, + <0x00000001 0x02210B40 0x00000020>; + //dma-coherent; + phy-mode = "25gbase-r"; + qsfp = <&qsfp_eth0>; + clocks = <&ptp_ctrl_10G_clk>; + clock-names = "tod_clk"; + interrupt-parent = <&intc>; + interrupt-names = "tx_irq", "rx_irq"; + interrupts = <0 24 4>, + <0 25 4>; + rx-fifo-depth = <0x4000>; + tx-fifo-depth = <0x1000>; + rx-fifo-almost-full = <0x2000>; + rx-fifo-almost-empty = <0x1000>; + //local-mac-address = [fa b1 0a 12 72 44]; + altr,tx-pma-delay-ns = <0xD>; + altr,rx-pma-delay-ns = <0x8>; + altr,tx-pma-delay-fns = <0x24D>; + altr,rx-pma-delay-fns = <0x3E97>; + altr,tx-external-phy-delay-ns = <0x0>; + altr,rx-external-phy-delay-ns = <0x0>; + fec-type = "kr-fec"; + fec-cw-pos-rx = <3>; + altr,has-ptp; + status = "okay"; + }; + + qsfp_eth0: qsfp-eth0 { + compatible = "sff,qsfp"; + i2c-bus = <&i2c0>; + qsfpdd_initmode-gpio = <&qsfpdd_status_pio_1 0 GPIO_ACTIVE_HIGH>; + qsfpdd_modseln-gpio = <&qsfpdd_status_pio_1 2 GPIO_ACTIVE_LOW>; + qsfpdd_modprsn-gpio = <&qsfpdd_status_pio 0 GPIO_ACTIVE_LOW>; + qsfpdd_resetn-gpio = <&qsfpdd_status_pio_1 1 GPIO_ACTIVE_LOW>; + qsfpdd_intn-gpio = <&qsfpdd_status_pio 1 GPIO_ACTIVE_LOW>; + maximum-power-milliwatt = <1000>; + status = "okay"; + }; + + qsfpdd_status_pio: gpio@2220010 { + compatible = "altr,pio-1.0"; + reg = <0x00000001 0x02220010 0x10>; + interrupt-parent = <&intc>; + interrupts = <0 23 4>; + altr,gpio-bank-width = <4>; + altr,interrupt-type = <2>; + altr,interrupt_type = <2>; + #gpio-cells = <2>; + gpio-controller; + status = "okay"; + }; + + qsfpdd_status_pio_1: gpio@2220020 { + compatible = "altr,pio-1.0"; + reg = <0x00000001 0x02220020 0x10>; + interrupt-parent = <&intc>; + interrupts = <0 23 4>; + altr,gpio-bank-width = <4>; + altr,interrupt-type = <2>; + altr,interrupt_type = <2>; + #gpio-cells = <2>; + gpio-controller; + status = "okay"; + }; + }; + + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts index d22de06e98396..f45a76bff82ad 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts @@ -64,3 +64,69 @@ &fpga_mgr { status = "disabled"; }; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10000 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@20000 { + label = "Main Die corner top left max"; + reg = <0x20000>; + }; + + input@30000 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40000 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_n6010.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_n6010.dts new file mode 100644 index 0000000000000..55fc6f224c66a --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_n6010.dts @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021, Intel Corporation + */ +#include "socfpga_agilex.dtsi" + +/ { + model = "SoCFPGA Agilex n6010"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0 0 0>; + }; + + soc { + clocks { + osc1 { + clock-frequency = <25000000>; + }; + }; + agilex_hps_bridges: bridge@80000000 { + compatible = "simple-bus"; + reg = <0x80000000 0x60000000>, + <0xf9000000 0x00100000>; + reg-names = "axi_h2f", "axi_h2f_lw"; + #address-cells = <0x2>; + #size-cells = <0x1>; + ranges = <0x00000000 0x00000000 0xf9000000 0x00001000>, + <0x00000001 0x02001000 0x82001000 0x00000800>, + <0x00000001 0x02080000 0x82080000 0x00004000>, + <0x00000001 0x02100000 0x82100000 0x00080000>, + <0x00000001 0x02000040 0x82000040 0x00000020>, + <0x00000001 0x02000800 0x82000800 0x00000020>, + <0x00000001 0x02000820 0x82000820 0x00000020>, + <0x00000001 0x02000900 0x82000900 0x00000020>, + <0x00000001 0x02000920 0x82000920 0x00000020>, + <0x00000001 0x02000940 0x82000940 0x00000020>, + <0x00000001 0x00000300 0xf9000300 0x00000010>, + <0x00000001 0x02000000 0x82000000 0x00000010>; + + uio_cp_eng@0xf9000000 { + compatible = "generic-uio"; + reg = <0x00000000 0x00000000 0x00001000>; + status = "okay"; + }; + }; + }; +}; + +&uart0 { + status = "okay"; +}; + +&spi0 { + status = "okay"; + + spidev: spidev@0 { + status = "okay"; + compatible = "linux,spidev"; + spi-max-frequency = <25000000>; + reg = <0>; + }; +}; + +&watchdog0 { + status = "okay"; +}; + +&fpga_mgr { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_pcie_root_port.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex_pcie_root_port.dtsi new file mode 100644 index 0000000000000..d8ed797d08bb6 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_pcie_root_port.dtsi @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ +&soc0 { + aglx_hps_bridges: bridge@80000000 { + compatible = "simple-bus"; + reg = <0x80000000 0x20200000>, + <0xf9000000 0x00100000>; + reg-names = "axi_h2f", "axi_h2f_lw"; + #address-cells = <0x2>; + #size-cells = <0x1>; + ranges = <0x00000000 0x00000000 0x80000000 0x00040000>, + <0x00000000 0x10000000 0x90100000 0x0ff00000>, + <0x00000000 0x20000000 0xa0000000 0x00200000>, + <0x00000001 0x00010000 0xf9010000 0x00008000>, + <0x00000001 0x00018000 0xf9018000 0x00000080>, + <0x00000001 0x00018080 0xf9018080 0x00000010>; + + pcie_0_pcie_aglx: pcie@200000000 { + reg = <0x00000000 0x10000000 0x10000000>, + <0x00000001 0x00010000 0x00008000>, + <0x00000000 0x20000000 0x00200000>; + reg-names = "Txs", "Cra", "Hip"; + interrupt-parent = <&intc>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <0x1>; + device_type = "pci"; + bus-range = <0x0000000 0x000000ff>; + ranges = <0x82000000 0x00000000 0x00100000 0x00000000 0x10000000 0x00000000 0x0ff00000>; + msi-parent = <&pcie_0_msi_irq>; + #address-cells = <0x3>; + #size-cells = <0x2>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &pcie_0_pcie_aglx 0x1>, + <0x0 0x0 0x0 0x2 &pcie_0_pcie_aglx 0x2>, + <0x0 0x0 0x0 0x3 &pcie_0_pcie_aglx 0x3>, + <0x0 0x0 0x0 0x4 &pcie_0_pcie_aglx 0x4>; + status = "disabled"; + }; + + pcie_0_msi_irq: msi@10008080 { + compatible = "altr,msi-1.0"; + reg = <0x00000001 0x00018080 0x00000010>, + <0x00000001 0x00018000 0x00000080>; + reg-names = "csr", "vector_slave"; + interrupt-parent = <&intc>; + interrupts = ; + msi-controller; + num-vectors = <0x20>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts index b31cfa6b802d9..fc0a62678a2f7 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts @@ -16,7 +16,7 @@ }; chosen { - stdout-path = "serial0:115200n8"; + stdout-path = "serial0:921600n8"; }; leds { @@ -116,11 +116,14 @@ cdns,tsd2d-ns = <50>; cdns,tchsh-ns = <4>; cdns,tslch-ns = <4>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; + rsu-handle = <&qspi_boot>; qspi_boot: partition@0 { label = "Boot and fpga data"; @@ -134,3 +137,69 @@ }; }; }; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10000 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@20000 { + label = "Main Die corner top left max"; + reg = <0x20000>; + }; + + input@30000 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40000 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_atfboot.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_atfboot.dts new file mode 100644 index 0000000000000..0ec32b701ded8 --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_atfboot.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024, Intel Corporation + */ +#include "socfpga_agilex_socdk.dts" + +/ { + model = "SoCFPGA Agilex SoCDK"; + compatible = "intel,socfpga-agilex-socdk", "intel,socfpga-agilex"; + + chosen { + stdout-path = "serial0:115200n8"; + bootargs = "console=ttys0,115200 root=/dev/ram0 rw \ + initrd=0x10000000 init=/sbin/init ramdisk_size=10000000 \ + earlycon panic=-1 rootfstype=ext3"; + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0x00000000 0 0x80000000>; + #address-cells = <0x2>; + #size-cells = <0x2>; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts index 0f9020bd0c52a..bdf010bd814f9 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts @@ -114,3 +114,70 @@ &watchdog0 { status = "okay"; }; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VCC"; + reg = <2>; + }; + + input@3 { + label = "1.8V VCCIO_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCPT"; + reg = <4>; + }; + + input@5 { + label = "1.2V VCCCRCORE"; + reg = <5>; + }; + + input@6 { + label = "0.9V VCCH"; + reg = <6>; + }; + + input@7 { + label = "0.8V VCCL"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + + input@10000 { + label = "Main Die corner bottom left max"; + reg = <0x10000>; + }; + + input@20000 { + label = "Main Die corner top left max"; + reg = <0x20000>; + }; + + input@30000 { + label = "Main Die corner bottom right max"; + reg = <0x30000>; + }; + + input@40000 { + label = "Main Die corner top right max"; + reg = <0x40000>; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/intel/socfpga_dm.dtsi b/arch/arm64/boot/dts/intel/socfpga_dm.dtsi new file mode 100644 index 0000000000000..70996ab30cc9f --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_dm.dtsi @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + */ + +/dts-v1/; +#include +#include +#include + +/ { + compatible = "intel,socfpga-agilex"; + #address-cells = <2>; + #size-cells = <2>; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + service_reserved: svcbuffer@0 { + compatible = "shared-dma-pool"; + reg = <0x0 0x0 0x0 0x2000000>; + alignment = <0x1000>; + no-map; + }; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x0>; + }; + + cpu1: cpu@1 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x1>; + }; + + cpu2: cpu@2 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x2>; + }; + + cpu3: cpu@3 { + compatible = "arm,cortex-a53"; + device_type = "cpu"; + enable-method = "psci"; + reg = <0x3>; + }; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; + interrupt-affinity = <&cpu0>, + <&cpu1>, + <&cpu2>, + <&cpu3>; + interrupt-parent = <&intc>; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + intc: intc@fffc1000 { + compatible = "arm,gic-400", "arm,cortex-a15-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x0 0xfffc1000 0x0 0x1000>, + <0x0 0xfffc2000 0x0 0x2000>, + <0x0 0xfffc4000 0x0 0x2000>, + <0x0 0xfffc6000 0x0 0x2000>; + }; + + soc { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + device_type = "soc"; + interrupt-parent = <&intc>; + ranges = <0 0 0 0xffffffff>; + +/* Use fixed frequency clocks in Simics - remove clk manager for initial DT */ + clocks { + wdt_clk: wdt-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <100000000>; + }; + }; + + gmac0: ethernet@ff800000 { + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; + reg = <0xff800000 0x2000>; + interrupts = <0 90 4>; + interrupt-names = "macirq"; + mac-address = [00 00 00 00 00 00]; + resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>; + reset-names = "stmmaceth", "stmmaceth-ocp"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + iommus = <&smmu 1>; + altr,sysmgr-syscon = <&sysmgr 0x44 0>; + clock-frequency = <250000000>; + status = "disabled"; + }; + + gmac1: ethernet@ff802000 { + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; + reg = <0xff802000 0x2000>; + interrupts = <0 91 4>; + interrupt-names = "macirq"; + mac-address = [00 00 00 00 00 00]; + resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>; + reset-names = "stmmaceth", "stmmaceth-ocp"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + iommus = <&smmu 2>; + altr,sysmgr-syscon = <&sysmgr 0x48 8>; + clock-frequency = <250000000>; + status = "disabled"; + }; + + gmac2: ethernet@ff804000 { + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; + reg = <0xff804000 0x2000>; + interrupts = <0 92 4>; + interrupt-names = "macirq"; + mac-address = [00 00 00 00 00 00]; + resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>; + reset-names = "stmmaceth", "stmmaceth-ocp"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + iommus = <&smmu 3>; + altr,sysmgr-syscon = <&sysmgr 0x4c 16>; + clock-frequency = <250000000>; + status = "disabled"; + }; + + gpio0: gpio@ffc03200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dw-apb-gpio"; + reg = <0xffc03200 0x100>; + resets = <&rst GPIO0_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <24>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <0 110 4>; + }; + }; + + gpio1: gpio@ffc03300 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dw-apb-gpio"; + reg = <0xffc03300 0x100>; + resets = <&rst GPIO1_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + + portb: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <24>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <0 111 4>; + }; + }; + + i2c0: i2c@ffc02800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xffc02800 0x100>; + interrupts = <0 103 4>; + resets = <&rst I2C0_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + i2c1: i2c@ffc02900 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xffc02900 0x100>; + interrupts = <0 104 4>; + resets = <&rst I2C1_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + i2c2: i2c@ffc02a00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xffc02a00 0x100>; + interrupts = <0 105 4>; + resets = <&rst I2C2_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + i2c3: i2c@ffc02b00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xffc02b00 0x100>; + interrupts = <0 106 4>; + resets = <&rst I2C3_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + i2c4: i2c@ffc02c00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xffc02c00 0x100>; + interrupts = <0 107 4>; + resets = <&rst I2C4_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + mmc: dwmmc0@ff808000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "altr,socfpga-dw-mshc"; + reg = <0xff808000 0x1000>; + interrupts = <0 96 4>; + fifo-depth = <0x400>; + resets = <&rst SDMMC_RESET>; + reset-names = "reset"; + iommus = <&smmu 5>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + ocram: sram@ffe00000 { + compatible = "mmio-sram"; + reg = <0xffe00000 0x40000>; + status = "disabled"; + }; + + pdma: pdma@ffda0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0xffda0000 0x1000>; + interrupts = <0 81 4>, + <0 82 4>, + <0 83 4>, + <0 84 4>, + <0 85 4>, + <0 86 4>, + <0 87 4>, + <0 88 4>, + <0 89 4>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; + resets = <&rst DMA_RESET>, <&rst DMA_OCP_RESET>; + reset-names = "dma", "dma-ocp"; + clock-frequency = <100000000>; + status = "disabled"; + }; + + rst: rstmgr@ffd11000 { + #reset-cells = <1>; + compatible = "altr,stratix10-rst-mgr"; + reg = <0xffd11000 0x100>; + }; + + smmu: iommu@fa000000 { + compatible = "arm,mmu-500", "arm,smmu-v2"; + reg = <0xfa000000 0x40000>; + #global-interrupts = <2>; + #iommu-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <0 128 4>, /* Global Secure Fault */ + <0 129 4>, /* Global Non-secure Fault */ + /* Non-secure Context Interrupts (32) */ + <0 138 4>, <0 139 4>, <0 140 4>, <0 141 4>, + <0 142 4>, <0 143 4>, <0 144 4>, <0 145 4>, + <0 146 4>, <0 147 4>, <0 148 4>, <0 149 4>, + <0 150 4>, <0 151 4>, <0 152 4>, <0 153 4>, + <0 154 4>, <0 155 4>, <0 156 4>, <0 157 4>, + <0 158 4>, <0 159 4>, <0 160 4>, <0 161 4>, + <0 162 4>, <0 163 4>, <0 164 4>, <0 165 4>, + <0 166 4>, <0 167 4>, <0 168 4>, <0 169 4>; + stream-match-mask = <0x7ff0>; + status = "disabled"; + }; + + spi0: spi@ffda4000 { + compatible = "snps,dw-apb-ssi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xffda4000 0x1000>; + interrupts = <0 99 4>; + resets = <&rst SPIM0_RESET>; + reg-io-width = <4>; + num-cs = <4>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + spi1: spi@ffda5000 { + compatible = "snps,dw-apb-ssi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xffda5000 0x1000>; + interrupts = <0 100 4>; + resets = <&rst SPIM1_RESET>; + reg-io-width = <4>; + num-cs = <4>; + clock-frequency = <100000000>; + status = "disabled"; + }; + sysmgr: sysmgr@ffd12000 { + compatible = "altr,sys-mgr-s10","altr,sys-mgr"; + reg = <0xffd12000 0x500>; + status = "disabled"; + }; + + /* Local timer */ + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 13 0xf08>, + <1 14 0xf08>, + <1 11 0xf08>, + <1 10 0xf08>; + clock-frequency = <200000000>; + }; + + timer0: timer0@ffc03000 { + compatible = "snps,dw-apb-timer"; + interrupts = <0 113 4>; + reg = <0xffc03000 0x100>; + clock-frequency = <100000000>; + clock-names = "timer"; + status = "disabled"; + }; + + timer1: timer1@ffc03100 { + compatible = "snps,dw-apb-timer"; + interrupts = <0 114 4>; + reg = <0xffc03100 0x100>; + clock-frequency = <100000000>; + clock-names = "timer"; + status = "disabled"; + }; + + timer2: timer2@ffd00000 { + compatible = "snps,dw-apb-timer"; + interrupts = <0 115 4>; + reg = <0xffd00000 0x100>; + clock-frequency = <100000000>; + clock-names = "timer"; + status = "disabled"; + }; + + timer3: timer3@ffd00100 { + compatible = "snps,dw-apb-timer"; + interrupts = <0 116 4>; + reg = <0xffd00100 0x100>; + clock-frequency = <100000000>; + clock-names = "timer"; + status = "disabled"; + }; + + uart0: serial0@ffc02000 { + compatible = "snps,dw-apb-uart"; + reg = <0xffc02000 0x100>; + interrupts = <0 108 4>; + reg-shift = <2>; + reg-io-width = <4>; + resets = <&rst UART0_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + uart1: serial1@ffc02100 { + compatible = "snps,dw-apb-uart"; + reg = <0xffc02100 0x100>; + interrupts = <0 109 4>; + reg-shift = <2>; + reg-io-width = <4>; + resets = <&rst UART1_RESET>; + clock-frequency = <100000000>; + status = "disabled"; + }; + + watchdog0: watchdog@ffd00200 { + compatible = "snps,dw-wdt"; + reg = <0xffd00200 0x100>; + interrupts = <0 117 4>; + resets = <&rst WATCHDOG0_RESET>; + clocks = <&wdt_clk>; + status = "disabled"; + }; + + watchdog1: watchdog@ffd00300 { + compatible = "snps,dw-wdt"; + reg = <0xffd00300 0x100>; + interrupts = <0 118 4>; + resets = <&rst WATCHDOG1_RESET>; + clocks = <&wdt_clk>; + status = "disabled"; + }; + + watchdog2: watchdog@ffd00400 { + compatible = "snps,dw-wdt"; + reg = <0xffd00400 0x100>; + interrupts = <0 125 4>; + resets = <&rst WATCHDOG2_RESET>; + clocks = <&wdt_clk>; + status = "disabled"; + }; + + watchdog3: watchdog@ffd00500 { + compatible = "snps,dw-wdt"; + reg = <0xffd00500 0x100>; + interrupts = <0 126 4>; + resets = <&rst WATCHDOG3_RESET>; + clocks = <&wdt_clk>; + status = "disabled"; + }; + + firmware { + svc { + compatible = "intel,agilex-svc"; + method = "smc"; + memory-region = <&service_reserved>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_dm_simics.dts b/arch/arm64/boot/dts/intel/socfpga_dm_simics.dts new file mode 100644 index 0000000000000..66dccaacaa1ac --- /dev/null +++ b/arch/arm64/boot/dts/intel/socfpga_dm_simics.dts @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation + */ +#include "socfpga_dm.dtsi" + +/ { + model = "SoCFPGA Diamond Mesa Simics"; + + aliases { + serial0 = &uart0; + }; + + chosen { + bootargs = "earlycon"; + stdout-path = "serial0:4800n8"; + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0 0 0x4000000>; + }; +}; + +&uart0 { + clock-frequency = <76800>; + status = "okay"; +}; + +&mmc { + clock-frequency = <50000000>; + cap-sd-highspeed; + broken-cd; + bus-width = <4>; + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts index 7952c7f47cc2f..7de6fa1a5aad8 100644 --- a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts +++ b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts @@ -93,11 +93,14 @@ cdns,tsd2d-ns = <50>; cdns,tchsh-ns = <4>; cdns,tslch-ns = <4>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; + rsu-handle = <&qspi_boot>; qspi_boot: partition@0 { label = "Boot and fpga data"; @@ -124,3 +127,59 @@ &watchdog0 { status = "okay"; }; + +&temp_volt { + voltage { + #address-cells = <1>; + #size-cells = <0>; + input@2 { + label = "0.8V VDD"; + reg = <2>; + }; + + input@3 { + label = "0.8V VDD_SDM"; + reg = <3>; + }; + + input@4 { + label = "1.8V VCCADC"; + reg = <4>; + }; + + input@5 { + label = "1.8V VCCPD"; + reg = <5>; + }; + + input@6 { + label = "1.8V VCCIO_SDM"; + reg = <6>; + }; + + input@7 { + label = "0.8V VDD_HPS"; + reg = <7>; + }; + }; + + temperature { + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + label = "Main Die SDM"; + reg = <0x0>; + }; + }; +}; + +&fcs { + compatible = "intel,n5x-soc-fcs"; + platform = "n5x"; + status = "okay"; +}; + +&fcs_config { + status = "disabled"; +}; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 8fe7dbae33bf9..be09e54877bd7 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -43,6 +43,7 @@ CONFIG_ARCH_BCM2835=y CONFIG_ARCH_BCM_IPROC=y CONFIG_ARCH_BCMBCA=y CONFIG_ARCH_BRCMSTB=y +CONFIG_ARCH_DM=y CONFIG_ARCH_BERLIN=y CONFIG_ARCH_EXYNOS=y CONFIG_ARCH_SPARX5=y @@ -116,6 +117,7 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y CONFIG_ACPI_APEI_EINJ=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y +CONFIG_CRYPTO_DEV_INTEL_FCS=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -282,7 +284,7 @@ CONFIG_MTD_NAND_BRCMNAND=m CONFIG_MTD_NAND_FSL_IFC=y CONFIG_MTD_NAND_QCOM=y CONFIG_MTD_SPI_NOR=y -CONFIG_MTD_UBI=m +CONFIG_MTD_UBI=y CONFIG_MTD_HYPERBUS=m CONFIG_HBMC_AM654=m CONFIG_BLK_DEV_LOOP=y @@ -294,6 +296,8 @@ CONFIG_QCOM_FASTRPC=m CONFIG_SRAM=y CONFIG_PCI_ENDPOINT_TEST=m CONFIG_EEPROM_AT24=m +CONFIG_ALTERA_SYSID=m +CONFIG_ALTERA_ILC=m CONFIG_EEPROM_AT25=m CONFIG_UACCE=m # CONFIG_SCSI_PROC_FS is not set @@ -372,8 +376,8 @@ CONFIG_SMC91X=y CONFIG_SMSC911X=y CONFIG_SNI_AVE=y CONFIG_SNI_NETSEC=y -CONFIG_STMMAC_ETH=m CONFIG_DWMAC_TEGRA=m +CONFIG_STMMAC_ETH=y CONFIG_TI_K3_AM65_CPSW_NUSS=y CONFIG_TI_ICSSG_PRUETH=m CONFIG_QCOM_IPA=m @@ -505,7 +509,7 @@ CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m CONFIG_IPMI_DEVICE_INTERFACE=m CONFIG_IPMI_SI=m -CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM=m CONFIG_HW_RANDOM_VIRTIO=y CONFIG_TCG_TPM=y CONFIG_TCG_TIS=m @@ -1244,8 +1248,10 @@ CONFIG_RENESAS_USB_DMAC=m CONFIG_RZ_DMAC=y CONFIG_TI_K3_UDMA=y CONFIG_TI_K3_UDMA_GLUE_LAYER=y -CONFIG_VFIO=y -CONFIG_VFIO_PCI=y +CONFIG_UIO=y +CONFIG_UIO_PDRV_GENIRQ=y +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_MMIO=y @@ -1657,12 +1663,15 @@ CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_OVERLAY_FS=m +CONFIG_OF_OVERLAY=y +CONFIG_OF_CONFIGFS=y CONFIG_VFAT_FS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y -CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=n CONFIG_SQUASHFS=y CONFIG_PSTORE_RAM=m CONFIG_NFS_FS=y @@ -1704,7 +1713,7 @@ CONFIG_CRYPTO_DEV_HISI_HPRE=m CONFIG_CRYPTO_DEV_HISI_TRNG=m CONFIG_CRYPTO_DEV_SA2UL=m CONFIG_DMA_RESTRICTED_POOL=y -CONFIG_CMA_SIZE_MBYTES=32 +CONFIG_CMA_SIZE_MBYTES=2 CONFIG_PRINTK_TIME=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y @@ -1723,3 +1732,85 @@ CONFIG_CORESIGHT_STM=m CONFIG_CORESIGHT_CPU_DEBUG=m CONFIG_CORESIGHT_CTI=m CONFIG_MEMTEST=y +CONFIG_SENSORS_SOC64=m +CONFIG_ALTERA_SOCFPGA_CONFIG=m +CONFIG_STRICT_DEVMEM=n +CONFIG_IO_STRICT_DEVMEM=n +CONFIG_HUGETLB_PAGE=y +##MM includes +CONFIG_BLK_DEV_NVME=y +#CONFIG_NVME_MULTIPATH=y +#CONFIG_NVME_VERBOSE_ERRORS=y +#CONFIG_NVME_HWMON=y +#CONFIG_NVME_FC=y +CONFIG_DUMMY=m + +########################################## +# For Flannel enable config flags +# For NFT tables, STP, VXLAN and Wireguard +######################################### +CONFIG_CFS_BANDWIDTH=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CT=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_HASH=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_NAT=m +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_STP=m +CONFIG_VXLAN=m +CONFIG_WIREGUARD=m +CONFIG_PHYLIB=y +CONFIG_FIXED_PHY=y +CONFIG_DWMAC_SOCFPGA=y +CONFIG_DWMAC_ETH_FLOW_CTRL=n +/*Ethernet */ +CONFIG_PTP_1588_CLOCK=n +CONFIG_NET_PTP_CLASSIFY=n +CONFIG_PTP_1588_CLOCK_API=n +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_STMMAC_DEBUG=y # if available +CONFIG_DWMAC_DEBUG=y # if available +CONFIG_NETDEV_DEBUG=y diff --git a/arch/arm64/configs/dm_simics_defconfig b/arch/arm64/configs/dm_simics_defconfig new file mode 100644 index 0000000000000..854c0ce8750d3 --- /dev/null +++ b/arch/arm64/configs/dm_simics_defconfig @@ -0,0 +1,139 @@ +CONFIG_SYSVIPC=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_DM=y +CONFIG_ARM64_VA_BITS_48=y +CONFIG_SECCOMP=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_COMPAT=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_CPU_FREQ=y +CONFIG_CPUFREQ_DT=y +CONFIG_INTEL_STRATIX10_SERVICE=y +CONFIG_INTEL_STRATIX10_RSU=m +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_SHA512_ARM64_CE=m +CONFIG_CRYPTO_SHA3_ARM64=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_DEV_INTEL_FCS=m +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_SIMPLE_PM_BUS=y +CONFIG_MTD=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_RAW_NAND=y +CONFIG_MTD_NAND_DENALI_DT=y +CONFIG_MTD_NAND_MARVELL=y +CONFIG_MTD_NAND_QCOM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_SPI_CADENCE_QUADSPI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_VIRTIO_BLK=y +CONFIG_SRAM=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_XILINX_PS_UART=y +CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_WATCHDOG=y +CONFIG_DW_WATCHDOG=y +CONFIG_MFD_ALTERA_SYSMGR=y +CONFIG_MMC=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_OF_ARASAN=m +CONFIG_MMC_SDHCI_CADENCE=m +CONFIG_MMC_SDHCI_F_SDH30=m +CONFIG_MMC_DW=y +CONFIG_MMC_DW_EXYNOS=m +CONFIG_MMC_DW_HI3798CV200=m +CONFIG_MMC_DW_K3=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_CLK_QORIQ=y +CONFIG_HWSPINLOCK=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +CONFIG_MEMORY=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_SQUASHFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set +CONFIG_MEMTEST=y diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3b3f6b56e7330..81657073b2675 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -878,6 +878,9 @@ static void __noreturn local_cpu_stop(unsigned int cpu) cpu_park_loop(); } +#ifndef CONFIG_EDAC_ALTERA_ARM64_WARM_RESET +/* In ECC_DBE_WARM_RESET case, use EDAC panic_smp_self_stop() */ + /* * We need to implement panic_smp_self_stop() for parallel panic() calls, so * that cpu_online_mask gets correctly updated and smp_send_stop() can skip @@ -887,6 +890,7 @@ void __noreturn panic_smp_self_stop(void) { local_cpu_stop(smp_processor_id()); } +#endif static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) { diff --git a/drivers/clk/socfpga/Kconfig b/drivers/clk/socfpga/Kconfig index 0cf16b894efb3..e82c0cda3245d 100644 --- a/drivers/clk/socfpga/Kconfig +++ b/drivers/clk/socfpga/Kconfig @@ -4,7 +4,7 @@ config CLK_INTEL_SOCFPGA default ARCH_INTEL_SOCFPGA help Support for the clock controllers present on Intel SoCFPGA and eASIC - devices like Aria, Cyclone, Stratix 10, Agilex and N5X eASIC. + devices like Aria, Cyclone, Stratix 10, Agilex, N5X eASIC and Agilex5. if CLK_INTEL_SOCFPGA @@ -13,7 +13,7 @@ config CLK_INTEL_SOCFPGA32 default ARM && ARCH_INTEL_SOCFPGA config CLK_INTEL_SOCFPGA64 - bool "Intel Stratix / Agilex / N5X clock controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA) + bool "Intel Stratix / Agilex / N5X clock / Agilex5 controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA) default ARM64 && ARCH_INTEL_SOCFPGA endif # CLK_INTEL_SOCFPGA diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile index e8dfce339c915..a1ea2b988eaf4 100644 --- a/drivers/clk/socfpga/Makefile +++ b/drivers/clk/socfpga/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_CLK_INTEL_SOCFPGA32) += clk.o clk-gate.o clk-pll.o clk-periph.o \ clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o obj-$(CONFIG_CLK_INTEL_SOCFPGA64) += clk-s10.o \ clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o \ - clk-agilex.o + clk-agilex.o clk-agilex5.o diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c index 8dd94f64756b9..7f8756fb4901a 100644 --- a/drivers/clk/socfpga/clk-agilex.c +++ b/drivers/clk/socfpga/clk-agilex.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2019, Intel Corporation + * Copyright (C) 2019-2020, Intel Corporation */ #include #include diff --git a/drivers/clk/socfpga/clk-agilex5.c b/drivers/clk/socfpga/clk-agilex5.c new file mode 100644 index 0000000000000..cd01050db4be9 --- /dev/null +++ b/drivers/clk/socfpga/clk-agilex5.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022, Intel Corporation + */ +#include +#include +#include +#include +#include + +#include + +#include "stratix10-clk.h" + +static const struct clk_parent_data pll_mux[] = { + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data boot_mux[] = { + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, +}; + +static const struct clk_parent_data core0_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c0", + .name = "peri_pll_c0", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data core1_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c0", + .name = "peri_pll_c0", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data core2_free_mux[] = { + { + .fw_name = "main_pll_c0", + .name = "main_pll_c0", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data core3_free_mux[] = { + { + .fw_name = "main_pll_c0", + .name = "main_pll_c0", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data dsu_free_mux[] = { + { + .fw_name = "main_pll_c2", + .name = "main_pll_c2", + }, + { + .fw_name = "peri_pll_c0", + .name = "peri_pll_c0", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data noc_free_mux[] = { + { + .fw_name = "main_pll_c3", + .name = "main_pll_c3", + }, + { + .fw_name = "peri_pll_c1", + .name = "peri_pll_c1", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data emaca_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data emacb_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data emac_ptp_free_mux[] = { + { + .fw_name = "main_pll_c3", + .name = "main_pll_c3", + }, + { + .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data gpio_db_free_mux[] = { + { + .fw_name = "main_pll_c3", + .name = "main_pll_c3", + }, + { + .fw_name = "peri_pll_c1", + .name = "peri_pll_c1", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data psi_ref_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data usb31_free_mux[] = { + { + .fw_name = "main_pll_c3", + .name = "main_pll_c3", + }, + { + .fw_name = "peri_pll_c2", + .name = "peri_pll_c2", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data s2f_usr0_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data s2f_usr1_free_mux[] = { + { + .fw_name = "main_pll_c1", + .name = "main_pll_c1", + }, + { + .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", + }, + { + .fw_name = "osc1", + .name = "osc1", + }, + { + .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", + }, + { + .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", + }, +}; + +static const struct clk_parent_data core0_mux[] = { + { + .fw_name = "core0_free_clk", + .name = "core0_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data core1_mux[] = { + { + .fw_name = "core1_free_clk", + .name = "core1_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data core2_mux[] = { + { + .fw_name = "core2_free_clk", + .name = "core2_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data core3_mux[] = { + { + .fw_name = "core3_free_clk", + .name = "core3_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data dsu_mux[] = { + { + .fw_name = "dsu_free_clk", + .name = "dsu_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data emac_mux[] = { + { + .fw_name = "emaca_free_clk", + .name = "emaca_free_clk", + }, + { + .fw_name = "emacb_free_clk", + .name = "emacb_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data noc_mux[] = { + { + .fw_name = "noc_free_clk", + .name = "noc_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data s2f_user0_mux[] = { + { + .fw_name = "s2f_user0_free_clk", + .name = "s2f_user0_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data s2f_user1_mux[] = { + { + .fw_name = "s2f_user1_free_clk", + .name = "s2f_user1_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data psi_mux[] = { + { + .fw_name = "psi_ref_free_clk", + .name = "psi_ref_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data gpio_db_mux[] = { + { + .fw_name = "gpio_db_free_clk", + .name = "gpio_db_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data emac_ptp_mux[] = { + { + .fw_name = "emac_ptp_free_clk", + .name = "emac_ptp_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +static const struct clk_parent_data usb31_mux[] = { + { + .fw_name = "usb31_free_clk", + .name = "usb31_free_clk", + }, + { + .fw_name = "boot_clk", + .name = "boot_clk", + }, +}; + +/* + * TODO - Clocks in AO (always on) controller + * 2 main PLLs only + */ +static const struct stratix10_pll_clock agilex5_pll_clks[] = { + { AGILEX5_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0, + 0x0 }, + { AGILEX5_MAIN_PLL_CLK, "main_pll", pll_mux, ARRAY_SIZE(pll_mux), 0, + 0x48 }, + { AGILEX5_PERIPH_PLL_CLK, "periph_pll", pll_mux, ARRAY_SIZE(pll_mux), 0, + 0x9C }, +}; + +static const struct stratix10_perip_c_clock agilex5_main_perip_c_clks[] = { + { AGILEX5_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, + 0x5C }, + { AGILEX5_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, + 0x60 }, + { AGILEX5_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0, + 0x64 }, + { AGILEX5_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0, + 0x68 }, + { AGILEX5_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0, + 0xB0 }, + { AGILEX5_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0, + 0xB4 }, + { AGILEX5_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0, + 0xB8 }, + { AGILEX5_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0, + 0xBC }, +}; + +/* Non-SW clock-gated enabled clocks */ +static const struct stratix10_perip_cnt_clock agilex5_main_perip_cnt_clks[] = { + { AGILEX5_CORE0_FREE_CLK, "core0_free_clk", NULL, core0_free_mux, + ARRAY_SIZE(core0_free_mux), 0, 0x0104, 0, 0, 0}, + { AGILEX5_CORE1_FREE_CLK, "core1_free_clk", NULL, core1_free_mux, + ARRAY_SIZE(core1_free_mux), 0, 0x0104, 0, 0, 0}, + { AGILEX5_CORE2_FREE_CLK, "core2_free_clk", NULL, core2_free_mux, + ARRAY_SIZE(core2_free_mux), 0, 0x010C, 0, 0, 0}, + { AGILEX5_CORE3_FREE_CLK, "core3_free_clk", NULL, core3_free_mux, + ARRAY_SIZE(core3_free_mux), 0, 0x0110, 0, 0, 0}, + { AGILEX5_DSU_FREE_CLK, "dsu_free_clk", NULL, dsu_free_mux, + ARRAY_SIZE(dsu_free_mux), 0, 0x0100, 0, 0, 0}, + { AGILEX5_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, + ARRAY_SIZE(noc_free_mux), 0, 0x40, 0, 0, 0 }, + { AGILEX5_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, + ARRAY_SIZE(emaca_free_mux), 0, 0xD4, 0, 0x88, 0 }, + { AGILEX5_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, + ARRAY_SIZE(emacb_free_mux), 0, 0xD8, 0, 0x88, 1 }, + { AGILEX5_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, + emac_ptp_free_mux, ARRAY_SIZE(emac_ptp_free_mux), 0, 0xDC, 0, 0x88, + 2 }, + { AGILEX5_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, + ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3 }, + { AGILEX5_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, + s2f_usr0_free_mux, ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0x30, + 2 }, + { AGILEX5_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, + s2f_usr1_free_mux, ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, + 5 }, + { AGILEX5_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux, + ARRAY_SIZE(psi_ref_free_mux), 0, 0xF0, 0, 0x88, 6 }, + { AGILEX5_USB31_FREE_CLK, "usb31_free_clk", NULL, usb31_free_mux, + ARRAY_SIZE(usb31_free_mux), 0, 0xF8, 0, 0x88, 7}, +}; + +/* SW Clock gate enabled clocks */ +static const struct stratix10_gate_clock agilex5_gate_clks[] = { + + /* TODO HW Managed Clocks list */ + + /* TODO SW Managed Clocks list */ + + /* Main PLL0 Begin */ + /* MPU clocks */ + { AGILEX5_CORE0_CLK, "core0_clk", NULL, core0_mux, + ARRAY_SIZE(core0_mux), 0, 0x24, 8, 0, 0, 0, 0x30, 5, 0 }, + { AGILEX5_CORE1_CLK, "core1_clk", NULL, core1_mux, + ARRAY_SIZE(core1_mux), 0, 0x24, 9, 0, 0, 0, 0x30, 5, 0 }, + { AGILEX5_CORE2_CLK, "core2_clk", NULL, core2_mux, + ARRAY_SIZE(core2_mux), 0, 0x24, 10, 0, 0, 0, 0x30, 6, 0 }, + { AGILEX5_CORE3_CLK, "core3_clk", NULL, core3_mux, + ARRAY_SIZE(core3_mux), 0, 0x24, 11, 0, 0, 0, 0x30, 7, 0 }, + { AGILEX5_MPU_CLK, "dsu_clk", NULL, dsu_mux, ARRAY_SIZE(dsu_mux), 0, 0, + 0, 0, 0, 0, 0x34, 4, 0 }, + { AGILEX5_MPU_PERIPH_CLK, "mpu_periph_clk", NULL, dsu_mux, + ARRAY_SIZE(dsu_mux), 0, 0, 0, 0x44, 20, 2, 0x34, 4, 0 }, + { AGILEX5_MPU_CCU_CLK, "mpu_ccu_clk", NULL, dsu_mux, + ARRAY_SIZE(dsu_mux), 0, 0, 0, 0x44, 18, 2, 0x34, 4, 0 }, + + /* ANGTS TODO l4 main clk has no divider now. To check. */ + { AGILEX5_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, + ARRAY_SIZE(noc_mux), 0, 0x24, 1, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, + 0x24, 2, 0x44, 4, 2, 0x30, 1, 0 }, + { AGILEX5_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, + ARRAY_SIZE(noc_mux), 0, 0, 0, 0x44, 2, 2, 0x30, 1, 0 }, + { AGILEX5_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), + CLK_IS_CRITICAL, 0x24, 3, 0x44, 6, 2, 0x30, 1, 0 }, + + /* Core sight clocks*/ + { AGILEX5_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, + 0x24, 4, 0x44, 24, 2, 0x30, 1, 0 }, + { AGILEX5_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, + ARRAY_SIZE(noc_mux), 0, 0x24, 4, 0x44, 26, 2, 0x30, 1, 0 }, + { AGILEX5_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24, 4, + 0x44, 28, 1, 0, 0, 0 }, + /* Main PLL0 End */ + + /* Main Peripheral PLL1 Begin */ + { AGILEX5_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), + 0, 0x7C, 0, 0, 0, 0, 0x94, 26, 0 }, + { AGILEX5_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), + 0, 0x7C, 1, 0, 0, 0, 0x94, 27, 0 }, + { AGILEX5_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), + 0, 0x7C, 2, 0, 0, 0, 0x94, 28, 0 }, + { AGILEX5_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, + ARRAY_SIZE(emac_ptp_mux), 0, 0x7C, 3, 0, 0, 0, 0x88, 2, 0 }, + { AGILEX5_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, + ARRAY_SIZE(gpio_db_mux), 0, 0x7C, 4, 0x98, 0, 16, 0x88, 3, 1 }, + /* Main Peripheral PLL1 End */ + + /* Peripheral clocks */ + { AGILEX5_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_user0_mux, + ARRAY_SIZE(s2f_user0_mux), 0, 0x24, 6, 0, 0, 0, 0x30, 2, 0 }, + { AGILEX5_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, + ARRAY_SIZE(s2f_user1_mux), 0, 0x7C, 6, 0, 0, 0, 0x88, 5, 0 }, + { AGILEX5_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, + ARRAY_SIZE(psi_mux), 0, 0x7C, 7, 0, 0, 0, 0x88, 6, 0 }, + { AGILEX5_USB31_SUSPEND_CLK, "usb31_suspend_clk", NULL, usb31_mux, + ARRAY_SIZE(usb31_mux), 0, 0x7C, 25, 0, 0, 0, 0x88, 7, 0 }, + { AGILEX5_USB31_BUS_CLK_EARLY, "usb31_bus_clk_early", "l4_main_clk", + NULL, 1, 0, 0x7C, 25, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_USB2OTG_HCLK, "usb2otg_hclk", "l4_mp_clk", NULL, 1, 0, 0x7C, + 8, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SPIM_0_CLK, "spim_0_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, 9, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SPIM_1_CLK, "spim_1_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, 11, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SPIS_0_CLK, "spis_0_clk", "l4_sp_clk", NULL, 1, 0, 0x7C, 12, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SPIS_1_CLK, "spis_1_clk", "l4_sp_clk", NULL, 1, 0, 0x7C, 13, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_DMA_CORE_CLK, "dma_core_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, + 14, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_DMA_HS_CLK, "dma_hs_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, 14, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I3C_0_CORE_CLK, "i3c_0_core_clk", "l4_mp_clk", NULL, 1, 0, + 0x7C, 18, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I3C_1_CORE_CLK, "i3c_1_core_clk", "l4_mp_clk", NULL, 1, 0, + 0x7C, 19, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I2C_0_PCLK, "i2c_0_pclk", "l4_sp_clk", NULL, 1, 0, 0x7C, 15, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I2C_1_PCLK, "i2c_1_pclk", "l4_sp_clk", NULL, 1, 0, 0x7C, 16, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I2C_EMAC0_PCLK, "i2c_emac0_pclk", "l4_sp_clk", NULL, 1, 0, + 0x7C, 17, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I2C_EMAC1_PCLK, "i2c_emac1_pclk", "l4_sp_clk", NULL, 1, 0, + 0x7C, 22, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_I2C_EMAC2_PCLK, "i2c_emac2_pclk", "l4_sp_clk", NULL, 1, 0, + 0x7C, 27, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_UART_0_PCLK, "uart_0_pclk", "l4_sp_clk", NULL, 1, 0, 0x7C, 20, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_UART_1_PCLK, "uart_1_pclk", "l4_sp_clk", NULL, 1, 0, 0x7C, 21, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SPTIMER_0_PCLK, "sptimer_0_pclk", "l4_sp_clk", NULL, 1, 0, + 0x7C, 23, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SPTIMER_1_PCLK, "sptimer_1_pclk", "l4_sp_clk", NULL, 1, 0, + 0x7C, 24, 0, 0, 0, 0, 0, 0 }, + + /*NAND, SD/MMC and SoftPHY overall clocking*/ + { AGILEX5_DFI_CLK, "dfi_clk", "l4_mp_clk", NULL, 1, 0, 0, 0, 0x44, 16, + 2, 0, 0, 0 }, + { AGILEX5_NAND_NF_CLK, "nand_nf_clk", "dfi_clk", NULL, 1, 0, 0x7C, 10, + 0, 0, 0, 0, 0, 0 }, + { AGILEX5_NAND_BCH_CLK, "nand_bch_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, + 10, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SDMMC_SDPHY_REG_CLK, "sdmmc_sdphy_reg_clk", "l4_mp_clk", NULL, + 1, 0, 0x7C, 5, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SDMCLK, "sdmclk", "dfi_clk", NULL, 1, 0, 0x7C, 5, 0, 0, 0, 0, + 0, 0 }, + { AGILEX5_SOFTPHY_REG_PCLK, "softphy_reg_pclk", "l4_mp_clk", NULL, 1, 0, + 0x7C, 26, 0, 0, 0, 0, 0, 0 }, + { AGILEX5_SOFTPHY_PHY_CLK, "softphy_phy_clk", "l4_mp_clk", NULL, 1, 0, + 0x7C, 26, 0x44, 16, 2, 0, 0, 0 }, + { AGILEX5_SOFTPHY_CTRL_CLK, "softphy_ctrl_clk", "dfi_clk", NULL, 1, 0, + 0x7C, 26, 0, 0, 0, 0, 0, 0 }, +}; + +static int +agilex5_clk_register_c_perip(const struct stratix10_perip_c_clock *clks, + int nums, struct stratix10_clock_data *data) +{ + struct clk_hw *hw_clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + hw_clk = s10_register_periph(&clks[i], base); + if (IS_ERR(hw_clk)) { + pr_err("%s: failed to register clock %s\n", __func__, + clks[i].name); + continue; + } + data->clk_data.hws[clks[i].id] = hw_clk; + } + return 0; +} + +static int +agilex5_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *clks, + int nums, struct stratix10_clock_data *data) +{ + struct clk_hw *hw_clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + hw_clk = s10_register_cnt_periph(&clks[i], base); + if (IS_ERR(hw_clk)) { + pr_err("%s: failed to register clock %s\n", __func__, + clks[i].name); + continue; + } + data->clk_data.hws[clks[i].id] = hw_clk; + } + + return 0; +} + +static int agilex5_clk_register_gate(const struct stratix10_gate_clock *clks, + int nums, + struct stratix10_clock_data *data) +{ + struct clk_hw *hw_clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + hw_clk = agilex_register_gate(&clks[i], base); + if (IS_ERR(hw_clk)) { + pr_err("%s: failed to register clock %s\n", __func__, + clks[i].name); + continue; + } + data->clk_data.hws[clks[i].id] = hw_clk; + } + + return 0; +} + +static int agilex5_clk_register_pll(const struct stratix10_pll_clock *clks, + int nums, struct stratix10_clock_data *data) +{ + struct clk_hw *hw_clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + hw_clk = agilex5_register_pll(&clks[i], base); + if (IS_ERR(hw_clk)) { + pr_err("%s: failed to register clock %s\n", __func__, + clks[i].name); + continue; + } + data->clk_data.hws[clks[i].id] = hw_clk; + } + + return 0; +} + +static int agilex5_clkmgr_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct stratix10_clock_data *clk_data; + struct resource *res; + void __iomem *base; + int i, num_clks; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + num_clks = AGILEX5_NUM_CLKS; + + clk_data = devm_kzalloc( + dev, struct_size(clk_data, clk_data.hws, num_clks), GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + for (i = 0; i < num_clks; i++) + clk_data->clk_data.hws[i] = ERR_PTR(-ENOENT); + + clk_data->base = base; + clk_data->clk_data.num = num_clks; + + agilex5_clk_register_pll(agilex5_pll_clks, ARRAY_SIZE(agilex5_pll_clks), + clk_data); + + agilex5_clk_register_c_perip(agilex5_main_perip_c_clks, + ARRAY_SIZE(agilex5_main_perip_c_clks), + clk_data); + + agilex5_clk_register_cnt_perip(agilex5_main_perip_cnt_clks, + ARRAY_SIZE(agilex5_main_perip_cnt_clks), + clk_data); + + agilex5_clk_register_gate(agilex5_gate_clks, + ARRAY_SIZE(agilex5_gate_clks), clk_data); + + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data->clk_data); + return 0; +} + +static int agilex5_clkmgr_probe(struct platform_device *pdev) +{ + int (*probe_func)(struct platform_device *init_func); + + probe_func = of_device_get_match_data(&pdev->dev); + if (!probe_func) + return -ENODEV; + return probe_func(pdev); +} + +static const struct of_device_id agilex5_clkmgr_match_table[] = { + { .compatible = "intel,agilex5-clkmgr", .data = agilex5_clkmgr_init }, + {} +}; + +static struct platform_driver agilex5_clkmgr_driver = { + .probe = agilex5_clkmgr_probe, + .driver = { + .name = "agilex5-clkmgr", + .suppress_bind_attrs = true, + .of_match_table = agilex5_clkmgr_match_table, + }, +}; + +static int __init agilex5_clk_init(void) +{ + return platform_driver_register(&agilex5_clkmgr_driver); +} +core_initcall(agilex5_clk_init); diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 1d82737befd33..26f61430fb523 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -175,6 +175,14 @@ static const struct clk_ops agilex_clk_pll_ops = { .prepare = clk_pll_prepare, }; +/* TODO need to fix, Agilex5 SM requires change */ +static const struct clk_ops agilex5_clk_pll_ops = { + /* TODO This may require a custom Agilex5 implementation */ + .recalc_rate = agilex_clk_pll_recalc_rate, + .get_parent = clk_pll_get_parent, + .prepare = clk_pll_prepare, +}; + static const struct clk_ops clk_pll_ops = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, @@ -304,3 +312,43 @@ struct clk_hw *n5x_register_pll(const struct stratix10_pll_clock *clks, } return hw_clk; } + +struct clk_hw *agilex5_register_pll(const struct stratix10_pll_clock *clks, + void __iomem *reg) +{ + struct clk_hw *hw_clk; + struct socfpga_pll *pll_clk; + struct clk_init_data init; + const char *name = clks->name; + int ret; + + pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); + if (WARN_ON(!pll_clk)) + return NULL; + + pll_clk->hw.reg = reg + clks->offset; + + if (streq(name, SOCFPGA_BOOT_CLK)) + init.ops = &clk_boot_ops; + else + init.ops = &agilex5_clk_pll_ops; + + init.name = name; + init.flags = clks->flags; + + init.num_parents = clks->num_parents; + init.parent_names = NULL; + init.parent_data = clks->parent_data; + pll_clk->hw.hw.init = &init; + + pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER; + hw_clk = &pll_clk->hw.hw; + + ret = clk_hw_register(NULL, hw_clk); + if (ret) { + kfree(pll_clk); + return ERR_PTR(ret); + } + return hw_clk; +} + diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h index 83fe4eb3133cb..beb018515c090 100644 --- a/drivers/clk/socfpga/stratix10-clk.h +++ b/drivers/clk/socfpga/stratix10-clk.h @@ -79,6 +79,8 @@ struct clk_hw *agilex_register_pll(const struct stratix10_pll_clock *clks, void __iomem *reg); struct clk_hw *n5x_register_pll(const struct stratix10_pll_clock *clks, void __iomem *reg); +struct clk_hw *agilex5_register_pll(const struct stratix10_pll_clock *clks, + void __iomem *reg); struct clk_hw *s10_register_periph(const struct stratix10_perip_c_clock *clks, void __iomem *reg); struct clk_hw *n5x_register_periph(const struct n5x_perip_c_clock *clks, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 03733101e2317..a3b63a66dc199 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -266,6 +266,17 @@ struct ate_acpi_oem_info { u32 oem_revision; }; +int get_ptp_clocksource_id(enum clocksource_ids *cs_id) +{ + if (cs_id) + *cs_id = clocksource_counter.id; + else + return -EOPNOTSUPP; + + return 0; +} +EXPORT_SYMBOL_GPL(get_ptp_clocksource_id); + #ifdef CONFIG_FSL_ERRATUM_A008585 /* * The number of retries is an arbitrary value well beyond the highest number diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 08b1238bcd7b3..f8789f532c33d 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -273,6 +273,20 @@ config CRYPTO_DEV_TALITOS2 Say 'Y' here to use the Freescale Security Engine (SEC) version 2 and following as found on MPC83xx, MPC85xx, etc ... +config CRYPTO_DEV_INTEL_FCS + tristate "Intel FPGA Crypto Service support" + depends on INTEL_STRATIX10_SERVICE + select HW_RANDOM + help + Support crypto services on Intel SoCFPGA platforms. The crypto + services include security certificate, image boot validation, + security key cancellation, get provision data, random number + generation and secure data object storage services. If intel_fcs + is built as loadable module, then need to insmod hw_random.ko + before insmod intel_fcs.ko. + + Say Y here if you want Intel FCS support + config CRYPTO_DEV_PPC4XX tristate "Driver AMCC PPC4xx crypto accelerator" depends on PPC && 4xx diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index ad4ccef67d124..07b4661e7b410 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -19,6 +19,8 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o +obj-$(CONFIG_CRYPTO_DEV_INTEL_FCS) += intel_fcs.o +intel_fcs-objs := intel_fcs_main.o intel_fcs_smmu.o obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o diff --git a/drivers/crypto/intel_fcs_main.c b/drivers/crypto/intel_fcs_main.c new file mode 100644 index 0000000000000..cb45de9629fe7 --- /dev/null +++ b/drivers/crypto/intel_fcs_main.c @@ -0,0 +1,4043 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "intel_fcs_smmu.h" + +#define RANDOM_NUMBER_SIZE 32 +#define RANDOM_NUMBER_EXT_SIZE 4080 +#define RANDOM_NUMBER_EXT_OFFSET 12 +#define FILE_NAME_SIZE 32 +#define PS_BUF_SIZE 64 +#define SMMU_BUF_SIZE 128 +#define SHA384_SIZE 48 +#define INVALID_STATUS 0xFFFFFFFF +#define INVALID_ID 0xFFFFFFFF +#define ASYNC_POLL_SERVICE 0x00004F4E + +#define MIN_SDOS_BUF_SZ 16 +#define MAX_SDOS_BUF_SZ 32768 +#define DEC_MIN_SZ 72 +#define DEC_MAX_SZ 32712 +#define ENC_MIN_SZ 120 +#define ENC_MAX_SZ 32760 + +#define SUBKEY_CMD_MAX_SZ 4092 +#define SUBKEY_RSP_MAX_SZ 820 +#define MEASUREMENT_CMD_MAX_SZ 4092 +#define MEASUREMENT_RSP_MAX_SZ 4092 +#define CERTIFICATE_RSP_MAX_SZ 4096 + +#define CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ 364 +#define CRYPTO_GET_KEY_INFO_MAX_SZ 144 + +#define CRYPTO_ECC_PARAM_SZ 4 +#define CRYPTO_ECC_DIGEST_SZ_OFFSET 4 + +#define AES_CRYPT_CMD_MAX_SZ SZ_4M /* set 4 Mb for now */ +#define AES_BUFFER_CMD_MAX_SZ 0xE600000 /* set 230 Mb */ +#define HMAC_CMD_MAX_SZ 0x1D600000 /* set 470 Mb */ +#define ECDSA_CMD_MAX_SZ 0x1D600000 /* set 470 Mb */ +#define SMMU_MAX_ALLOC_SZ 0x1E000000 /* set 480 Mb */ +#define AES_CRYPT_MODE_ECB 0 +#define AES_CRYPT_MODE_CBC 1 +#define AES_CRYPT_MODE_CTR 2 +#define AES_CRYPT_PARAM_SIZE_ECB 12 +#define AES_CRYPT_PARAM_SIZE_CBC_CTR 28 + +#define FCS_REQUEST_TIMEOUT (msecs_to_jiffies(SVC_FCS_REQUEST_TIMEOUT_MS)) +#define FCS_COMPLETED_TIMEOUT (msecs_to_jiffies(SVC_COMPLETED_TIMEOUT_MS)) +#define SIGMA_SESSION_ID_ONE 0x1 +#define SIGMA_UNKNOWN_SESSION 0xffffffff + +#define SRC_BUFFER_STARTING_L2_IDX 17 +#define get_buffer_addr(a)(a*2*1024*1024) + +#define SDM_SMMU_FW_MIN_VER 0x2722C +#define ATF_SMMU_FW_MAJOR_VER 0x2 +#define ATF_SMMU_FW_MIN_VER 0x1 +#define AGILEX_PLATFORM "agilex" +#define AGILEX_PLATFORM_STR_LEN 6 + +#define SDOS_DECRYPTION_ERROR_102 0x102 +#define SDOS_DECRYPTION_ERROR_103 0x103 + +/*SDM required minimun 8 bytes of data for crypto service*/ +#define CRYPTO_SERVICE_MIN_DATA_SIZE 8 + +/** + * struct socfpga_fcs_data - FCS platform data structure. + * @hwrng Flag to indicate support for HW random number generator. + */ +struct socfpga_fcs_data { + bool have_hwrng; +}; + +static char *source_ptr; + +typedef void (*fcs_callback)(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data); + +static void fcs_atf_version_smmu_check_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = data->status; + if (data->status == BIT(SVC_STATUS_OK)) { + if ((*((unsigned int *)data->kaddr1) > ATF_SMMU_FW_MAJOR_VER) || + ((*((unsigned int *)data->kaddr1) == ATF_SMMU_FW_MAJOR_VER) && + (*((unsigned int *)data->kaddr2) >= ATF_SMMU_FW_MIN_VER))) + priv->status = 0; + else + priv->status = -1; + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } + + complete(&priv->completion); +} + +static void fcs_fw_version_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = -1; + if (data->status == BIT(SVC_STATUS_OK)) { + if (*((unsigned int *)data->kaddr1) > SDM_SMMU_FW_MIN_VER) + priv->status = 0; + } else { + dev_err(client->dev, "Failed to get FW version %lu\n", + BIT(data->status)); + } + + complete(&priv->completion); +} + +static void fcs_data_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + if ((data->status == BIT(SVC_STATUS_OK)) || + (data->status == BIT(SVC_STATUS_COMPLETED))) { + priv->status = 0; + priv->kbuf = data->kaddr2; + priv->size = *((unsigned int *)data->kaddr3); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "error, mbox_error=0x%x\n", priv->status); + priv->kbuf = data->kaddr2; + priv->size = (data->kaddr3) ? + *((unsigned int *)data->kaddr3) : 0; + } else if ((data->status == BIT(SVC_STATUS_BUSY)) || + (data->status == BIT(SVC_STATUS_NO_RESPONSE))) { + priv->status = 0; + priv->kbuf = NULL; + priv->size = 0; + } else { + dev_err(client->dev, "rejected, invalid param\n"); + priv->status = -EINVAL; + priv->kbuf = NULL; + priv->size = 0; + } + + complete(&priv->completion); +} + +static void fcs_vab_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } else if (data->status == BIT(SVC_STATUS_BUSY)) { + priv->status = -ETIMEDOUT; + dev_err(client->dev, "timeout to get completed status\n"); + } else if (data->status == BIT(SVC_STATUS_INVALID_PARAM)) { + priv->status = -EINVAL; + dev_err(client->dev, "request rejected\n"); + } else if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = 0; + } else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) { + priv->status = -EINVAL; + dev_err(client->dev, "firmware doesn't support...\n"); + } else { + priv->status = -EINVAL; + dev_err(client->dev, "rejected, invalid param\n"); + } + + complete(&priv->completion); +} + +static void fcs_chipid_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = data->status; + if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = 0; + priv->cid_low = *((unsigned int *)data->kaddr2); + priv->cid_high = *((unsigned int *)data->kaddr3); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } + + complete(&priv->completion); +} + +static void fcs_attestation_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = data->status; + if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = 0; + priv->kbuf = data->kaddr2; + priv->size = (data->kaddr3 != NULL) ? + *((unsigned int *)data->kaddr3) : 0; + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } + + complete(&priv->completion); +} + +static void fcs_crypto_sessionid_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = data->status; + if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = 0; + priv->sid = *((unsigned int *)data->kaddr2); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } + + complete(&priv->completion); +} + +static void fcs_hwrng_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = 0; + priv->kbuf = NULL; + priv->size = 0; + + if ((data->status == BIT(SVC_STATUS_OK)) || + (data->status == BIT(SVC_STATUS_COMPLETED))) { + priv->kbuf = data->kaddr2; + priv->size = *((unsigned int *)data->kaddr3); + } + + complete(&priv->completion); +} + +static void fcs_mbox_send_cmd_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = 0; + priv->size = *((unsigned int *)data->kaddr2); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } else if (data->status == BIT(SVC_STATUS_INVALID_PARAM)) { + priv->status = -EINVAL; + dev_err(client->dev, "request rejected\n"); + } else { + priv->status = -EINVAL; + dev_err(client->dev, "rejected, invalid param\n"); + } + + complete(&priv->completion); +} + +static void fcs_sdos_data_poll_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + if ((data->status == BIT(SVC_STATUS_OK)) || + (data->status == BIT(SVC_STATUS_COMPLETED))) { + priv->status = (data->kaddr1) ? + *((unsigned int *)data->kaddr1) : 0; + priv->kbuf = data->kaddr2; + priv->size = *((unsigned int *)data->kaddr3); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "error, mbox_error=0x%x\n", priv->status); + priv->kbuf = data->kaddr2; + priv->size = (data->kaddr3) ? + *((unsigned int *)data->kaddr3) : 0; + } else if ((data->status == BIT(SVC_STATUS_BUSY)) || + (data->status == BIT(SVC_STATUS_NO_RESPONSE))) { + priv->status = 0; + priv->kbuf = NULL; + priv->size = 0; + } else { + dev_err(client->dev, "rejected, invalid param\n"); + priv->status = -EINVAL; + priv->kbuf = NULL; + priv->size = 0; + } + + complete(&priv->completion); +} + +static void fcs_sdos_data_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + priv->status = data->status; + if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = *((unsigned int *)data->kaddr1); + priv->kbuf = data->kaddr2; + priv->size = *((unsigned int *)data->kaddr3); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((unsigned int *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } + + complete(&priv->completion); +} + +static int fcs_request_service(struct intel_fcs_priv *priv, + void *msg, unsigned long timeout) +{ + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)msg; + int ret; + + reinit_completion(&priv->completion); + + ret = stratix10_svc_send(priv->chan, p_msg); + if (ret) + return -EINVAL; + + ret = wait_for_completion_timeout(&priv->completion, + timeout); + if (!ret) { + dev_err(priv->client.dev, + "timeout waiting for SMC call\n"); + ret = -ETIMEDOUT; + } else + ret = 0; + + return ret; +} + +static void fcs_free_memory(struct intel_fcs_priv *priv, + void *buf1, void *buf2, void *buf3) +{ + if (buf1) + stratix10_svc_free_memory(priv->chan, buf1); + + if (buf2) + stratix10_svc_free_memory(priv->chan, buf2); + + if (buf3) + stratix10_svc_free_memory(priv->chan, buf3); +} + +static void fcs_close_services(struct intel_fcs_priv *priv, + void *sbuf, void *dbuf) +{ + fcs_free_memory(priv, sbuf, dbuf, NULL); + stratix10_svc_done(priv->chan); + mutex_unlock(&priv->lock); +} + +static long fcs_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct intel_fcs_dev_ioctl *data; + struct intel_fcs_priv *priv; + struct device *dev; + struct stratix10_svc_client_msg *msg; + const struct firmware *fw; + char filename[FILE_NAME_SIZE]; + size_t tsz, rsz, datasz, ud_sz; + uint32_t sid; + uint32_t kuid; + uint32_t cid; + void *s_buf; + void *d_buf; + void *ps_buf; + void *iv_field_buf; + void *input_file_pointer; + void *output_file_pointer; + unsigned int buf_sz, in_sz, out_sz; + uint32_t remaining_size, data_size, total_out_size; + uint32_t sign_size; + int ret = 0; + int i; + int timeout; + phys_addr_t src_addr; + phys_addr_t dst_addr; + + priv = container_of(file->private_data, struct intel_fcs_priv, miscdev); + dev = priv->client.dev; + mutex_lock(&priv->lock); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) { + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); + if (!msg) { + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + switch (cmd) { + case INTEL_FCS_DEV_VALIDATION_REQUEST: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + /* for bitstream */ + dev_dbg(dev, "file_name=%s, status=%d\n", + (char *)data->com_paras.s_request.src, data->status); + scnprintf(filename, FILE_NAME_SIZE, "%s", + (char *)data->com_paras.s_request.src); + ret = request_firmware(&fw, filename, priv->client.dev); + if (ret) { + dev_err(dev, "error requesting firmware %s\n", + (char *)data->com_paras.s_request.src); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + dev_dbg(dev, "FW size=%ld\n", fw->size); + s_buf = stratix10_svc_allocate_memory(priv->chan, fw->size); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed to allocate VAB buffer\n"); + release_firmware(fw); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + memcpy(s_buf, fw->data, fw->size); + + msg->payload_length = fw->size; + release_firmware(fw); + + msg->command = COMMAND_FCS_REQUEST_SERVICE; + msg->payload = s_buf; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + dev_dbg(dev, "fcs_request_service ret=%d\n", ret); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + dev_dbg(dev, "fcs_request_service ret=%d\n", ret); + if (!ret && !priv->status) + data->status = 0; + else + data->status = priv->status; + } else + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, NULL); + break; + + case INTEL_FCS_DEV_SEND_CERTIFICATE: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.c_request.size == 0 || + data->com_paras.c_request.addr == NULL) { + dev_err(dev, "Invalid VAB request param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + dev_dbg(dev, "Test=%d, Size=%d; Address=0x%p\n", + data->com_paras.c_request.test.test_word, + data->com_paras.c_request.size, + data->com_paras.c_request.addr); + + /* Allocate memory for certificate + test word */ + tsz = sizeof(struct intel_fcs_cert_test_word); + datasz = data->com_paras.c_request.size + tsz; + + s_buf = stratix10_svc_allocate_memory(priv->chan, datasz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed to allocate VAB buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, PS_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + stratix10_svc_free_memory(priv->chan, s_buf); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + /* Copy the test word */ + memcpy(s_buf, &data->com_paras.c_request.test, tsz); + + /* Copy in the certificate data (skipping over the test word) */ + ret = copy_from_user(s_buf + tsz, + data->com_paras.c_request.addr, + data->com_paras.c_request.size); + if (ret) { + dev_err(dev, "failed copy buf ret=%d\n", ret); + fcs_free_memory(priv, s_buf, ps_buf, NULL); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->payload_length = datasz; + msg->command = COMMAND_FCS_SEND_CERTIFICATE; + msg->payload = s_buf; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + dev_dbg(dev, "fcs_request_service ret=%d\n", ret); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = PS_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + if (!ret && !priv->status) { + data->status = 0; + data->mbox_status = 0; + } else { + if (priv->kbuf) { + data->com_paras.c_request.c_status = + (*(u32 *)priv->kbuf); + data->mbox_status = priv->status; + pr_info("data->mbox_status:0x%x\n", data->mbox_status); + } else + data->com_paras.c_request.c_status = + INVALID_STATUS; + } + } else + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, ps_buf); + break; + + case INTEL_FCS_DEV_COUNTER_SET_PREAUTHORIZED: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_COUNTER_SET_PREAUTHORIZED; + msg->arg[0] = data->com_paras.i_request.counter_type; + msg->arg[1] = data->com_paras.i_request.counter_value; + msg->arg[2] = data->com_paras.i_request.test.test_word; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the request,ret=%d\n", + ret); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + data->status = priv->status; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_RANDOM_NUMBER_GEN: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + s_buf = stratix10_svc_allocate_memory(priv->chan, + RANDOM_NUMBER_SIZE); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed to allocate RNG buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_RANDOM_NUMBER_GEN; + msg->payload = s_buf; + msg->payload_length = RANDOM_NUMBER_SIZE; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "failure on kbuf\n"); + fcs_close_services(priv, s_buf, NULL); + return -EFAULT; + } + + for (i = 0; i < 8; i++) + dev_dbg(dev, "output_data[%d]=%d\n", i, + *((int *)priv->kbuf + i)); + + for (i = 0; i < 8; i++) + data->com_paras.rn_gen.rndm[i] = + *((int *)priv->kbuf + i); + data->status = priv->status; + + } else { + /* failed to get RNG */ + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, NULL); + break; + + case INTEL_FCS_DEV_GET_PROVISION_DATA: + if (copy_from_user(data, (void __user *)arg, + sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.gp_data.size == 0 || + data->com_paras.gp_data.addr == NULL) { + dev_err(dev, "Invalid provision request param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + s_buf = stratix10_svc_allocate_memory(priv->chan, + data->com_paras.gp_data.size); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate provision buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_GET_PROVISION_DATA; + msg->payload = NULL; + msg->payload_length = 0; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->arg[0] = ASYNC_POLL_SERVICE; + msg->payload = s_buf; + msg->payload_length = data->com_paras.gp_data.size; + msg->command = COMMAND_POLL_SERVICE_STATUS_ASYNC; + priv->client.receive_cb = fcs_data_callback; + + timeout = 100; + while (timeout != 0) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + + if (!ret && !priv->status) { + if (priv->size) { + if (!priv->kbuf) { + dev_err(dev, "failure on kbuf\n"); + fcs_close_services(priv, s_buf, NULL); + return -EFAULT; + } + + data->com_paras.gp_data.size = priv->size; + ret = copy_to_user(data->com_paras.gp_data.addr, + priv->kbuf, priv->size); + if (ret) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, s_buf, NULL); + return -EFAULT; + } + break; + } + } else { + data->com_paras.gp_data.addr = NULL; + data->com_paras.gp_data.size = 0; + break; + } + timeout--; + mdelay(500); + } + } else { + data->com_paras.gp_data.addr = NULL; + data->com_paras.gp_data.size = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, s_buf, NULL); + return -EFAULT; + } + + fcs_close_services(priv, s_buf, NULL); + break; + + case INTEL_FCS_DEV_DATA_ENCRYPTION: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.d_encryption.src_size < DEC_MIN_SZ || + data->com_paras.d_encryption.src_size > DEC_MAX_SZ) { + dev_err(dev, "Invalid SDOS Buffer src size:%d\n", + data->com_paras.d_encryption.src_size); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.d_encryption.dst_size < ENC_MIN_SZ || + data->com_paras.d_encryption.dst_size > ENC_MAX_SZ) { + dev_err(dev, "Invalid SDOS Buffer dst size:%d\n", + data->com_paras.d_encryption.dst_size); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.d_encryption.src == NULL || + data->com_paras.d_encryption.dst == NULL) { + dev_err(dev, "Invalid SDOS Buffer pointer\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + /* allocate buffer for both source and destination */ + s_buf = stratix10_svc_allocate_memory(priv->chan, + MAX_SDOS_BUF_SZ); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate encrypt src buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + d_buf = stratix10_svc_allocate_memory(priv->chan, + MAX_SDOS_BUF_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate encrypt dst buf\n"); + stratix10_svc_free_memory(priv->chan, s_buf); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + ps_buf = stratix10_svc_allocate_memory(priv->chan, PS_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed allocate p-status buffer\n"); + fcs_free_memory(priv, s_buf, d_buf, NULL); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + ret = copy_from_user(s_buf, + data->com_paras.d_encryption.src, + data->com_paras.d_encryption.src_size); + if (ret) { + dev_err(dev, "failure on copy_from_user\n"); + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_DATA_ENCRYPTION; + msg->payload = s_buf; + msg->payload_length = + data->com_paras.d_encryption.src_size; + msg->payload_output = d_buf; + msg->payload_length_output = + data->com_paras.d_encryption.dst_size; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + msg->payload = ps_buf; + msg->payload_length = PS_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + + priv->client.receive_cb = fcs_data_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "failure on kbuf\n"); + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + buf_sz = *(unsigned int *)priv->kbuf; + data->com_paras.d_encryption.dst_size = buf_sz; + data->status = 0; + ret = copy_to_user(data->com_paras.d_encryption.dst, + d_buf, buf_sz); + if (ret) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } else { + data->com_paras.d_encryption.dst = NULL; + data->com_paras.d_encryption.dst_size = 0; + data->status = priv->status; + } + } else { + data->com_paras.d_encryption.dst = NULL; + data->com_paras.d_encryption.dst_size = 0; + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_DATA_DECRYPTION: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.d_encryption.src_size < ENC_MIN_SZ || + data->com_paras.d_encryption.src_size > ENC_MAX_SZ) { + dev_err(dev, "Invalid SDOS Buffer src size:%d\n", + data->com_paras.d_encryption.src_size); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.d_encryption.dst_size < DEC_MIN_SZ || + data->com_paras.d_encryption.dst_size > DEC_MAX_SZ) { + dev_err(dev, "Invalid SDOS Buffer dst size:%d\n", + data->com_paras.d_encryption.dst_size); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.d_encryption.src == NULL || + data->com_paras.d_encryption.dst == NULL) { + dev_err(dev, "Invalid SDOS Buffer pointer\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + /* allocate buffer for both source and destination */ + s_buf = stratix10_svc_allocate_memory(priv->chan, + MAX_SDOS_BUF_SZ); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate decrypt src buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + d_buf = stratix10_svc_allocate_memory(priv->chan, + MAX_SDOS_BUF_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate decrypt dst buf\n"); + stratix10_svc_free_memory(priv->chan, s_buf); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, + PS_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed allocate p-status buffer\n"); + fcs_free_memory(priv, s_buf, d_buf, NULL); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + ret = copy_from_user(s_buf, + data->com_paras.d_decryption.src, + data->com_paras.d_decryption.src_size); + if (ret) { + dev_err(dev, "failure on copy_from_user\n"); + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_DATA_DECRYPTION; + msg->payload = s_buf; + msg->payload_length = + data->com_paras.d_decryption.src_size; + msg->payload_output = d_buf; + msg->payload_length_output = + data->com_paras.d_decryption.dst_size; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + msg->command = COMMAND_POLL_SERVICE_STATUS; + msg->payload = ps_buf; + msg->payload_length = PS_BUF_SIZE; + priv->client.receive_cb = fcs_sdos_data_poll_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + if (!ret && + (!priv->status || + priv->status == SDOS_DECRYPTION_ERROR_102 || + priv->status == SDOS_DECRYPTION_ERROR_103)) { + if (!priv->kbuf) { + dev_err(dev, "failure on kbuf\n"); + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + buf_sz = *((unsigned int *)priv->kbuf); + data->com_paras.d_decryption.dst_size = buf_sz; + data->status = priv->status; + ret = copy_to_user(data->com_paras.d_decryption.dst, + d_buf, buf_sz); + if (ret) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } else { + data->com_paras.d_decryption.dst = NULL; + data->com_paras.d_decryption.dst_size = 0; + data->status = priv->status; + } + } else { + data->com_paras.d_decryption.dst = NULL; + data->com_paras.d_decryption.dst_size = 0; + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_free_memory(priv, ps_buf, s_buf, d_buf); + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_PSGSIGMA_TEARDOWN: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.tdown.sid; + if ((sid != SIGMA_SESSION_ID_ONE) && + (sid != SIGMA_UNKNOWN_SESSION)) { + dev_err(dev, "Invalid session ID:%d\n", sid); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_PSGSIGMA_TEARDOWN; + msg->arg[0] = sid; + priv->client.receive_cb = fcs_vab_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the request,ret=%d\n", + ret); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + data->status = priv->status; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_CHIP_ID: + msg->command = COMMAND_FCS_GET_CHIP_ID; + priv->client.receive_cb = fcs_chipid_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the request,ret=%d\n", + ret); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + data->status = priv->status; + data->com_paras.c_id.chip_id_low = priv->cid_low; + data->com_paras.c_id.chip_id_high = priv->cid_high; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_ATTESTATION_SUBKEY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.subkey.cmd_data_sz > SUBKEY_CMD_MAX_SZ) { + dev_err(dev, "Invalid subkey CMD size %d\n", + data->com_paras.subkey.cmd_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.subkey.rsp_data_sz > SUBKEY_RSP_MAX_SZ) { + dev_err(dev, "Invalid subkey RSP size %d\n", + data->com_paras.subkey.rsp_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.subkey.cmd_data == NULL || + data->com_paras.subkey.rsp_data == NULL) { + dev_err(dev, "Invalid subkey data pointer\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + /* allocate buffer for both soruce and destination */ + rsz = sizeof(struct intel_fcs_attestation_resv_word); + datasz = data->com_paras.subkey.cmd_data_sz + rsz; + + s_buf = stratix10_svc_allocate_memory(priv->chan, + SUBKEY_CMD_MAX_SZ + + rsz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate subkey CMD buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + SUBKEY_RSP_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate subkey RSP buf\n"); + stratix10_svc_free_memory(priv->chan, s_buf); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + /* copy the reserve word first then command payload */ + memcpy(s_buf, &data->com_paras.subkey.resv.resv_word, rsz); + + /* Copy user data from user space to kernel space */ + ret = copy_from_user(s_buf + rsz, + data->com_paras.subkey.cmd_data, + data->com_paras.subkey.cmd_data_sz); + if (ret) { + dev_err(dev, "failure on copy_from_user\n"); + fcs_free_memory(priv, s_buf, d_buf, NULL); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_ATTESTATION_SUBKEY; + msg->payload = s_buf; + msg->payload_length = datasz; + msg->payload_output = d_buf; + msg->payload_length_output = SUBKEY_RSP_MAX_SZ; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > SUBKEY_RSP_MAX_SZ) { + dev_err(dev, + "returned size is incorrect\n"); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.subkey.rsp_data, + priv->kbuf, priv->size); + data->com_paras.subkey.rsp_data_sz = priv->size; + data->status = priv->status; + + } else { + data->com_paras.subkey.rsp_data = NULL; + data->com_paras.subkey.rsp_data_sz = 0; + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_ATTESTATION_MEASUREMENT: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.measurement.cmd_data_sz > MEASUREMENT_CMD_MAX_SZ) { + dev_err(dev, "Invalid measurement CMD size %d\n", + data->com_paras.measurement.cmd_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.measurement.rsp_data_sz > MEASUREMENT_RSP_MAX_SZ) { + dev_err(dev, "Invalid measurement RSP size %d\n", + data->com_paras.measurement.rsp_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.measurement.cmd_data == NULL || + data->com_paras.measurement.rsp_data == NULL) { + dev_err(dev, "Invalid measurement data pointer\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + /* allocate buffer for both soruce and destination */ + rsz = sizeof(struct intel_fcs_attestation_resv_word); + datasz = data->com_paras.measurement.cmd_data_sz + rsz; + + s_buf = stratix10_svc_allocate_memory(priv->chan, + MEASUREMENT_CMD_MAX_SZ + + rsz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate measurement CMD buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + MEASUREMENT_RSP_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate measurement RSP buf\n"); + stratix10_svc_free_memory(priv->chan, s_buf); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + /* copy the reserve word first then command payload */ + memcpy(s_buf, &data->com_paras.measurement.resv.resv_word, rsz); + + /* Copy user data from user space to kernel space */ + ret = copy_from_user(s_buf + rsz, + data->com_paras.measurement.cmd_data, + data->com_paras.measurement.cmd_data_sz); + if (ret) { + dev_err(dev, "failure on copy_from_user\n"); + fcs_free_memory(priv, s_buf, d_buf, NULL); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_ATTESTATION_MEASUREMENTS; + msg->payload = s_buf; + msg->payload_length = datasz; + msg->payload_output = d_buf; + msg->payload_length_output = MEASUREMENT_RSP_MAX_SZ; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > MEASUREMENT_RSP_MAX_SZ) { + dev_err(dev, + "returned size is incorrect\n"); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.measurement.rsp_data, + priv->kbuf, priv->size); + data->com_paras.measurement.rsp_data_sz = priv->size; + data->status = priv->status; + } else { + data->com_paras.measurement.rsp_data = NULL; + data->com_paras.measurement.rsp_data_sz = 0; + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_ATTESTATION_GET_CERTIFICATE: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.certificate.rsp_data_sz > CERTIFICATE_RSP_MAX_SZ) { + dev_err(dev, "Invalid certificate RSP size %d\n", + data->com_paras.certificate.rsp_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + CERTIFICATE_RSP_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate certificate RSP buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_ATTESTATION_CERTIFICATE; + msg->payload = NULL; + msg->payload_length = 0; + msg->payload_output = d_buf; + msg->payload_length_output = CERTIFICATE_RSP_MAX_SZ; + msg->arg[0] = data->com_paras.certificate.c_request & 0x00ff; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > CERTIFICATE_RSP_MAX_SZ) { + dev_err(dev, + "returned size is incorrect\n"); + fcs_close_services(priv, NULL, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.certificate.rsp_data, + priv->kbuf, priv->size); + data->com_paras.certificate.rsp_data_sz = priv->size; + data->status = priv->status; + } else { + data->com_paras.certificate.rsp_data = NULL; + data->com_paras.certificate.rsp_data_sz = 0; + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, NULL, d_buf); + break; + + case INTEL_FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD; + msg->arg[0] = data->com_paras.c_reload.c_request & 0x00ff; + priv->client.receive_cb = fcs_vab_callback; + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the request,ret=%d\n", + ret); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + data->status = priv->status; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_GET_ROM_PATCH_SHA384: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + s_buf = stratix10_svc_allocate_memory(priv->chan, + SHA384_SIZE); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed to allocate RNG buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_GET_ROM_PATCH_SHA384; + msg->payload = s_buf; + msg->payload_length = SHA384_SIZE; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "failure on kbuf\n"); + fcs_close_services(priv, s_buf, NULL); + return -EFAULT; + } + + if (priv->size > SHA384_SIZE) { + dev_err(dev, "returned size is incorrect\n"); + fcs_close_services(priv, s_buf, NULL); + return -EFAULT; + } + + for (i = 0; i < 12; i++) + dev_dbg(dev, "output_data[%d]=%d\n", i, + *((int *)priv->kbuf + i)); + for (i = 0; i < 12; i++) + data->com_paras.sha384.checksum[i] = + *((int *)priv->kbuf + i); + data->status = priv->status; + + } else { + /* failed to get SHA */ + data->status = priv->status; + } + + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_OPEN_SESSION: + msg->command = COMMAND_FCS_CRYPTO_OPEN_SESSION; + priv->client.receive_cb = fcs_crypto_sessionid_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the cmd=%d,ret=%d\n", + COMMAND_FCS_CRYPTO_OPEN_SESSION, ret); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + data->status = priv->status; + data->com_paras.s_session.sid = priv->sid; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_CLOSE_SESSION: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_CRYPTO_CLOSE_SESSION; + msg->arg[0] = data->com_paras.s_session.sid; + priv->client.receive_cb = fcs_vab_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the request,ret=%d\n", + ret); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + data->status = priv->status; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_IMPORT_KEY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.k_import.obj_data_sz == 0 || + data->com_paras.k_import.obj_data == NULL) { + dev_err(dev, "Invalid key import request param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + /* Allocate memory for header + key object */ + tsz = sizeof(struct fcs_crypto_key_header); + datasz = data->com_paras.k_import.obj_data_sz + tsz; + + s_buf = stratix10_svc_allocate_memory(priv->chan, datasz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed to allocate key import buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, PS_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed allocate p-status buffer\n"); + fcs_free_memory(priv, s_buf, NULL, NULL); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + /* copy session ID from the header */ + memcpy(s_buf, &data->com_paras.k_import.hd.sid, sizeof(uint32_t)); + ret = copy_from_user(s_buf + tsz, + data->com_paras.k_import.obj_data, + data->com_paras.k_import.obj_data_sz); + if (ret) { + dev_err(dev, "failed copy buf ret=%d\n", ret); + fcs_free_memory(priv, ps_buf, s_buf, NULL); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->payload = s_buf; + msg->payload_length = datasz; + msg->command = COMMAND_FCS_CRYPTO_IMPORT_KEY; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = PS_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + if (!ret && !priv->status) + data->status = 0; + else { + data->status = priv->status; + if (priv->kbuf) + data->status |= ((*(u32 *)priv->kbuf) & 0xFF) + << 16; + } + } else { + data->status = priv->status; + } + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, s_buf, ps_buf); + return -EFAULT; + } + + fcs_close_services(priv, s_buf, ps_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_EXPORT_KEY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.k_object.obj_data_sz > + CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ) { + dev_err(dev, "Invalid key object size %d\n", + data->com_paras.k_object.obj_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate key object buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_CRYPTO_EXPORT_KEY; + msg->payload = NULL; + msg->payload_length = 0; + msg->payload_output = d_buf; + msg->payload_length_output = CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ; + msg->arg[0] = data->com_paras.k_object.sid; + msg->arg[1] = data->com_paras.k_object.kid; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, NULL, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.k_object.obj_data, + priv->kbuf, priv->size); + data->com_paras.k_object.obj_data_sz = priv->size; + } else { + data->com_paras.k_object.obj_data = NULL; + data->com_paras.k_object.obj_data_sz = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, NULL, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_REMOVE_KEY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_FCS_CRYPTO_REMOVE_KEY; + msg->arg[0] = data->com_paras.k_object.sid; + msg->arg[1] = data->com_paras.k_object.kid; + priv->client.receive_cb = fcs_vab_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret) { + dev_err(dev, "failed to send the request,ret=%d\n", + ret); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + data->status = priv->status; + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_GET_KEY_INFO: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.k_object.obj_data_sz > CRYPTO_GET_KEY_INFO_MAX_SZ) { + dev_err(dev, "Invalid key object size %d\n", + data->com_paras.k_object.obj_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + CRYPTO_GET_KEY_INFO_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate key object buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_CRYPTO_GET_KEY_INFO; + msg->payload = NULL; + msg->payload_length = 0; + msg->payload_output = d_buf; + msg->payload_length_output = CRYPTO_GET_KEY_INFO_MAX_SZ; + msg->arg[0] = data->com_paras.k_object.sid; + msg->arg[1] = data->com_paras.k_object.kid; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > CRYPTO_GET_KEY_INFO_MAX_SZ) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, NULL, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.k_object.obj_data, + priv->kbuf, priv->size); + data->com_paras.k_object.obj_data_sz = priv->size; + } else { + data->com_paras.k_object.obj_data = NULL; + data->com_paras.k_object.obj_data_sz = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, NULL, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_AES_CRYPT: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user data\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if ((data->com_paras.a_crypt.cpara.bmode == AES_CRYPT_MODE_ECB) && + (data->com_paras.a_crypt.cpara_size != AES_CRYPT_PARAM_SIZE_ECB)) { + dev_err(dev, "AES param size incorrect. Block mode=%d, size=%d\n", + data->com_paras.a_crypt.cpara.bmode, + data->com_paras.a_crypt.cpara_size); + mutex_unlock(&priv->lock); + return -EFAULT; + } else if (((data->com_paras.a_crypt.cpara.bmode == AES_CRYPT_MODE_CBC) || + (data->com_paras.a_crypt.cpara.bmode == AES_CRYPT_MODE_CTR)) && + (data->com_paras.a_crypt.cpara_size != AES_CRYPT_PARAM_SIZE_CBC_CTR)) { + dev_err(dev, "AES param size incorrect. Block mode=%d, size=%d\n", + data->com_paras.a_crypt.cpara.bmode, + data->com_paras.a_crypt.cpara_size); + mutex_unlock(&priv->lock); + return -EFAULT; + } else if (data->com_paras.a_crypt.cpara.bmode > AES_CRYPT_MODE_CTR) { + dev_err(dev, "Unknown AES block mode. Block mode=%d\n", + data->com_paras.a_crypt.cpara.bmode); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + iv_field_buf = stratix10_svc_allocate_memory(priv->chan, 28); + if (IS_ERR(iv_field_buf)) { + dev_err(dev, "failed allocate iv_field buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + sid = data->com_paras.a_crypt.sid; + cid = data->com_paras.a_crypt.cid; + kuid = data->com_paras.a_crypt.kuid; + + memcpy(iv_field_buf, &data->com_paras.a_crypt.cpara.bmode, 1); + memcpy(iv_field_buf + 1, &data->com_paras.a_crypt.cpara.aes_mode, 1); + memcpy(iv_field_buf + 12, data->com_paras.a_crypt.cpara.iv_field, 16); + + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_INIT; + msg->payload = iv_field_buf; + msg->payload_length = data->com_paras.a_crypt.cpara_size; + msg->payload_output = NULL; + msg->payload_length_output = 0; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d\n", + COMMAND_FCS_CRYPTO_AES_CRYPT_INIT, + ret); + fcs_close_services(priv, iv_field_buf, NULL); + return -EFAULT; + } + + fcs_free_memory(priv, iv_field_buf, NULL, NULL); + + s_buf = stratix10_svc_allocate_memory(priv->chan, + data->com_paras.a_crypt.src_size);; + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + data->com_paras.a_crypt.src_size); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + ret = copy_from_user(s_buf, data->com_paras.a_crypt.src, + data->com_paras.a_crypt.src_size); + if (ret) { + dev_err(dev, "failure on copy_from_user\n"); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, PS_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + fcs_close_services(priv, s_buf, d_buf); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = data->com_paras.a_crypt.src_size; + msg->payload_output = d_buf; + msg->payload_length_output = data->com_paras.a_crypt.dst_size; + priv->client.receive_cb = fcs_attestation_callback; + + while (remaining_size > 0) { + if (remaining_size > AES_CRYPT_CMD_MAX_SZ) { + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE; + data_size = AES_CRYPT_CMD_MAX_SZ; + dev_dbg(dev, "AES crypt update. data_size=%d\n", data_size); + } else { + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE; + data_size = remaining_size; + dev_dbg(dev, "AES crypt finalize. data_size=%d\n", data_size); + } + + ret = copy_from_user(s_buf, input_file_pointer, data_size); + + if (ret) { + dev_err(dev, "failure on copy_from_user s_buf\n"); + fcs_free_memory(priv, s_buf, d_buf, ps_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = data_size; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = PS_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + if (!ret && !priv->status) { + if (!priv->kbuf || priv->size != 16) { + dev_err(dev, "unregconize response\n"); + fcs_free_memory(priv, s_buf, d_buf, ps_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + buf_sz = ((u32 *)priv->kbuf)[3]; + + ret = copy_to_user(output_file_pointer, d_buf, buf_sz); + + total_out_size += buf_sz; + + if (ret) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_free_memory(priv, s_buf, d_buf, ps_buf); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } + } else { + data->com_paras.a_crypt.dst = NULL; + data->com_paras.a_crypt.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + break; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "AES crypt finish sending\n"); + data->com_paras.a_crypt.dst_size = total_out_size; + break; + } else { + input_file_pointer += data_size; + output_file_pointer += data_size; + dev_dbg(dev, "Complete one update. Remaining size = %d\n", + remaining_size); + } + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + fcs_free_memory(priv, s_buf, d_buf, ps_buf); + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_GET_DIGEST: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.s_mac_data.sid; + cid = data->com_paras.s_mac_data.cid; + kuid = data->com_paras.s_mac_data.kuid; + + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.s_mac_data.sha_op_mode | + (data->com_paras.s_mac_data.sha_digest_sz << + CRYPTO_ECC_DIGEST_SZ_OFFSET); + + priv->client.receive_cb = fcs_vab_callback; + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_GET_DIGEST_INIT, ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + input_file_pointer = data->com_paras.s_mac_data.src; + remaining_size = data->com_paras.s_mac_data.src_size; + + s_buf = stratix10_svc_allocate_memory(priv->chan, + AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, + AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + while (remaining_size > 0) { + if (remaining_size > AES_CRYPT_CMD_MAX_SZ) { + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE; + data_size = AES_CRYPT_CMD_MAX_SZ; + dev_dbg(dev, "Crypto get digest update. data_size=%d\n", + data_size); + } else { + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE; + data_size = remaining_size; + dev_dbg(dev, "Crypto get digest finalize. data_size=%d\n", + data_size); + } + + memcpy(s_buf, input_file_pointer, data_size); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = AES_CRYPT_CMD_MAX_SZ; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > AES_CRYPT_CMD_MAX_SZ) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + } else { + data->com_paras.s_mac_data.dst = NULL; + data->com_paras.s_mac_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + break; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "Crypto get digest finish sending\n"); + memcpy(data->com_paras.s_mac_data.dst, priv->kbuf, priv->size); + data->com_paras.s_mac_data.dst_size = priv->size; + break; + } else { + input_file_pointer += data_size; + dev_dbg(dev, "Complete update. Remaining size = %d\n", + remaining_size); + } + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_MAC_VERIFY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.s_mac_data.sid; + cid = data->com_paras.s_mac_data.cid; + kuid = data->com_paras.s_mac_data.kuid; + out_sz = data->com_paras.s_mac_data.dst_size; + ud_sz = data->com_paras.s_mac_data.userdata_sz; + + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.s_mac_data.sha_op_mode | + (data->com_paras.s_mac_data.sha_digest_sz << + CRYPTO_ECC_DIGEST_SZ_OFFSET); + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT, ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + input_file_pointer = data->com_paras.s_mac_data.src; + remaining_size = data->com_paras.s_mac_data.src_size; + sign_size = data->com_paras.s_mac_data.src_size + - data->com_paras.s_mac_data.userdata_sz; + + s_buf = stratix10_svc_allocate_memory(priv->chan, + AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (!d_buf) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + while (remaining_size > 0) { + if (remaining_size > AES_CRYPT_CMD_MAX_SZ) { + /* Finalize stage require minimun 8bytes data size */ + if ((remaining_size - AES_CRYPT_CMD_MAX_SZ) >= + (CRYPTO_SERVICE_MIN_DATA_SIZE + sign_size)) { + data_size = AES_CRYPT_CMD_MAX_SZ; + ud_sz = AES_CRYPT_CMD_MAX_SZ; + dev_dbg(dev, "Update full. data_size=%d, ud_sz=%ld\n", + data_size, ud_sz); + } else { + data_size = (remaining_size - CRYPTO_SERVICE_MIN_DATA_SIZE - + sign_size); + ud_sz = (remaining_size - CRYPTO_SERVICE_MIN_DATA_SIZE - + sign_size); + dev_dbg(dev, "Update partial. data_size=%d, ud_sz=%ld\n", + data_size, ud_sz); + } + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE; + } else { + data_size = remaining_size; + ud_sz = remaining_size - sign_size; + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE; + dev_dbg(dev, "Finalize. data_size=%d, ud_sz=%ld\n", data_size, + ud_sz); + } + + memcpy(s_buf, input_file_pointer, data_size); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = ud_sz; + msg->payload = s_buf; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + } else { + data->com_paras.s_mac_data.dst = NULL; + data->com_paras.s_mac_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + break; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "Crypto get verify finish sending\n"); + memcpy(data->com_paras.s_mac_data.dst, priv->kbuf, priv->size); + data->com_paras.s_mac_data.dst_size = priv->size; + break; + } else { + input_file_pointer += data_size; + dev_dbg(dev, "Complete one update. Remaining size = %d\n", + remaining_size); + } + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_data.sid; + cid = data->com_paras.ecdsa_data.cid; + kuid = data->com_paras.ecdsa_data.kuid; + in_sz = data->com_paras.ecdsa_data.src_size; + out_sz = data->com_paras.ecdsa_data.dst_size; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + s_buf = stratix10_svc_allocate_memory(priv->chan, in_sz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + memcpy(s_buf, data->com_paras.ecdsa_data.src, + data->com_paras.ecdsa_data.src_size); + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = in_sz; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.ecdsa_data.dst, + priv->kbuf, priv->size); + data->com_paras.ecdsa_data.dst_size = priv->size; + } else { + data->com_paras.ecdsa_data.dst = NULL; + data->com_paras.ecdsa_data.dst_size = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_data.sid; + cid = data->com_paras.ecdsa_data.cid; + kuid = data->com_paras.ecdsa_data.kuid; + in_sz = data->com_paras.ecdsa_data.src_size; + out_sz = data->com_paras.ecdsa_data.dst_size; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + input_file_pointer = data->com_paras.ecdsa_data.src; + + remaining_size = data->com_paras.ecdsa_data.src_size; + + s_buf = stratix10_svc_allocate_memory(priv->chan, + AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + while (remaining_size > 0) { + if (remaining_size > AES_CRYPT_CMD_MAX_SZ) { + msg->command = + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE; + data_size = AES_CRYPT_CMD_MAX_SZ; + dev_dbg(dev, "ECDSA data sign update stage. data_size=%d\n", + data_size); + } else { + msg->command = + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE; + data_size = remaining_size; + dev_dbg(dev, "ECDSA data sign finalize stage. data_size=%d\n", + data_size); + } + + memcpy(s_buf, input_file_pointer, data_size); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + } else { + data->com_paras.ecdsa_data.dst = NULL; + data->com_paras.ecdsa_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + break; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "ECDSA data sign finish sending\n"); + memcpy(data->com_paras.ecdsa_data.dst, priv->kbuf, priv->size); + data->com_paras.ecdsa_data.dst_size = priv->size; + break; + } else { + input_file_pointer += data_size; + dev_dbg(dev, "Complete update. Remaining size = %d\n", + remaining_size); + } + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_data.sid; + cid = data->com_paras.ecdsa_data.cid; + kuid = data->com_paras.ecdsa_data.kuid; + in_sz = data->com_paras.ecdsa_data.src_size; + out_sz = data->com_paras.ecdsa_data.dst_size; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + s_buf = stratix10_svc_allocate_memory(priv->chan, in_sz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + memcpy(s_buf, data->com_paras.ecdsa_data.src, + data->com_paras.ecdsa_data.src_size); + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = in_sz; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.ecdsa_data.dst, + priv->kbuf, priv->size); + data->com_paras.ecdsa_data.dst_size = priv->size; + } else { + data->com_paras.ecdsa_data.dst = NULL; + data->com_paras.ecdsa_data.dst_size = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_sha2_data.sid; + cid = data->com_paras.ecdsa_sha2_data.cid; + kuid = data->com_paras.ecdsa_sha2_data.kuid; + in_sz = data->com_paras.ecdsa_sha2_data.src_size; + out_sz = data->com_paras.ecdsa_sha2_data.dst_size; + ud_sz = data->com_paras.ecdsa_sha2_data.userdata_sz; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_sha2_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + input_file_pointer = data->com_paras.ecdsa_sha2_data.src; + remaining_size = data->com_paras.ecdsa_sha2_data.src_size; + sign_size = data->com_paras.ecdsa_sha2_data.src_size - + data->com_paras.ecdsa_sha2_data.userdata_sz; + + s_buf = stratix10_svc_allocate_memory(priv->chan, + AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + while (remaining_size > 0) { + if (remaining_size > AES_CRYPT_CMD_MAX_SZ) { + /* Finalize stage require minimun 8bytes data size */ + if ((remaining_size - AES_CRYPT_CMD_MAX_SZ) >= + (CRYPTO_SERVICE_MIN_DATA_SIZE + sign_size)) { + data_size = AES_CRYPT_CMD_MAX_SZ; + ud_sz = AES_CRYPT_CMD_MAX_SZ; + dev_dbg(dev, "Update full. data_size=%d, ud_sz=%ld\n", + data_size, ud_sz); + } else { + data_size = (remaining_size - CRYPTO_SERVICE_MIN_DATA_SIZE - + sign_size); + ud_sz = (remaining_size - CRYPTO_SERVICE_MIN_DATA_SIZE - + sign_size); + dev_dbg(dev, "Update partial. data_size=%d, ud_sz=%ld\n", + data_size, ud_sz); + } + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE; + } else { + data_size = remaining_size; + ud_sz = remaining_size - sign_size; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE; + dev_dbg(dev, "Finalize. data_size=%d, ud_sz=%ld\n", data_size, + ud_sz); + } + + memcpy(s_buf, input_file_pointer, data_size); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = ud_sz; + msg->payload = s_buf; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + } else { + data->com_paras.ecdsa_sha2_data.dst = NULL; + data->com_paras.ecdsa_sha2_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + break; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "ECDSA data verify finish sending\n"); + memcpy(data->com_paras.ecdsa_sha2_data.dst, priv->kbuf, + priv->size); + data->com_paras.ecdsa_sha2_data.dst_size = priv->size; + break; + } else { + input_file_pointer += data_size; + dev_dbg(dev, "Complete one update. Remaining size = %d\n", + remaining_size); + } + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_data.sid; + cid = data->com_paras.ecdsa_data.cid; + kuid = data->com_paras.ecdsa_data.kuid; + out_sz = data->com_paras.ecdsa_data.dst_size; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = NULL; + msg->payload_length = 0; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, NULL, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.ecdsa_data.dst, + priv->kbuf, priv->size); + data->com_paras.ecdsa_data.dst_size = priv->size; + } else { + data->com_paras.ecdsa_data.dst = NULL; + data->com_paras.ecdsa_data.dst_size = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, NULL, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDH_REQUEST: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.ecdsa_data.src_size == 0 || + data->com_paras.ecdsa_data.src == NULL) { + dev_err(dev, "Invalid ECDH request src param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.ecdsa_data.dst_size == 0 || + data->com_paras.ecdsa_data.dst == NULL) { + dev_err(dev, "Invalid ECDH request dst param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_data.sid; + cid = data->com_paras.ecdsa_data.cid; + kuid = data->com_paras.ecdsa_data.kuid; + in_sz = data->com_paras.ecdsa_data.src_size; + out_sz = data->com_paras.ecdsa_data.dst_size; + + msg->command = COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT, + ret, priv->status); + }; + + s_buf = stratix10_svc_allocate_memory(priv->chan, in_sz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, s_buf, NULL); + return -ENOMEM; + } + + /* Copy user data from user space to kernel space */ + ret = copy_from_user(s_buf, + data->com_paras.ecdsa_data.src, + data->com_paras.ecdsa_data.src_size); + if (ret) { + dev_err(dev, "failed copy buf ret=%d\n", ret); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + msg->command = COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = s_buf; + msg->payload_length = in_sz; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size > out_sz) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.ecdsa_data.dst, + priv->kbuf, priv->size); + data->com_paras.ecdsa_data.dst_size = priv->size; + } else { + data->com_paras.ecdsa_data.dst = NULL; + data->com_paras.ecdsa_data.dst_size = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_RANDOM_NUMBER_GEN_EXT: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.rn_gen_ext.sid; + cid = data->com_paras.rn_gen_ext.cid; + out_sz = data->com_paras.rn_gen_ext.rng_sz; + buf_sz = RANDOM_NUMBER_EXT_SIZE + RANDOM_NUMBER_EXT_OFFSET; + + d_buf = stratix10_svc_allocate_memory(priv->chan, buf_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed to allocate RNG_EXT output buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_RANDOM_NUMBER_GEN_EXT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + + timeout = 100; + if (!ret && !priv->status) { + /* to query the complete status */ + msg->arg[0] = ASYNC_POLL_SERVICE; + msg->payload = d_buf; + msg->payload_length = buf_sz; + msg->command = COMMAND_POLL_SERVICE_STATUS_ASYNC; + priv->client.receive_cb = fcs_data_callback; + + while (timeout != 0) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + dev_dbg(dev, "request service ret=%d\n", ret); + + if (!ret && !priv->status) { + if (priv->size == out_sz + RANDOM_NUMBER_EXT_OFFSET) { + memcpy(data->com_paras.rn_gen_ext.rng_data, + priv->kbuf + RANDOM_NUMBER_EXT_OFFSET, + out_sz); + data->com_paras.rn_gen_ext.rng_sz = out_sz; + break; + } + } else { + data->com_paras.rn_gen_ext.rng_data = NULL; + data->com_paras.rn_gen_ext.rng_sz = 0; + break; + } + timeout--; + mdelay(500); + } + } + + if (priv->status == 0 && timeout == 0) + data->status = -ETIMEDOUT; + else + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, NULL, d_buf); + return -EFAULT; + } + + fcs_close_services(priv, NULL, d_buf); + break; + + case INTEL_FCS_DEV_SDOS_DATA_EXT: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.data_sdos_ext.src_size == 0 || + data->com_paras.data_sdos_ext.src == NULL) { + dev_err(dev, "Invalid SDOS request src param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.data_sdos_ext.dst_size == 0 || + data->com_paras.data_sdos_ext.dst == NULL) { + dev_err(dev, "Invalid SDOS request dst param\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.data_sdos_ext.sid; + cid = data->com_paras.data_sdos_ext.cid; + in_sz = data->com_paras.data_sdos_ext.src_size; + + s_buf = stratix10_svc_allocate_memory(priv->chan, in_sz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + d_buf = stratix10_svc_allocate_memory(priv->chan, AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_free_memory(priv, s_buf, NULL, NULL); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + /* Copy user data from user space to kernel space */ + ret = copy_from_user(s_buf, + data->com_paras.data_sdos_ext.src, + data->com_paras.data_sdos_ext.src_size); + if (ret) { + dev_err(dev, "failed copy buf ret=%d\n", ret); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + msg->command = COMMAND_FCS_SDOS_DATA_EXT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = data->com_paras.data_sdos_ext.op_mode; + msg->payload = s_buf; + msg->payload_length = in_sz; + msg->payload_output = d_buf; + msg->payload_length_output = AES_CRYPT_CMD_MAX_SZ; + priv->client.receive_cb = fcs_sdos_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && + (!priv->status || + priv->status == SDOS_DECRYPTION_ERROR_102 || + priv->status == SDOS_DECRYPTION_ERROR_103)) { + if (priv->size > AES_CRYPT_CMD_MAX_SZ) { + dev_err(dev, "returned size %d is incorrect\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + memcpy(data->com_paras.data_sdos_ext.dst, + priv->kbuf, priv->size); + data->com_paras.data_sdos_ext.dst_size = priv->size; + } else { + data->com_paras.data_sdos_ext.dst = NULL; + data->com_paras.data_sdos_ext.dst_size = 0; + } + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + fcs_close_services(priv, s_buf, d_buf); + + break; + + case INTEL_FCS_DEV_MBOX_SEND: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.mbox_send_cmd.cmd_data_sz % 4) { + dev_err(dev, "Command data size (%d) is not 4 byte align\n", + data->com_paras.mbox_send_cmd.cmd_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.mbox_send_cmd.rsp_data_sz % 4) { + dev_err(dev, "Respond data size (%d) is not 4 byte align\n", + data->com_paras.mbox_send_cmd.rsp_data_sz); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + if (data->com_paras.mbox_send_cmd.cmd_data_sz) { + s_buf = stratix10_svc_allocate_memory(priv->chan, + data->com_paras.mbox_send_cmd.cmd_data_sz); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed allocate source CMD buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + } else { + s_buf = NULL; + } + + if (data->com_paras.mbox_send_cmd.rsp_data_sz) { + d_buf = stratix10_svc_allocate_memory(priv->chan, + data->com_paras.mbox_send_cmd.rsp_data_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destination RSP buf\n"); + fcs_free_memory(priv, s_buf, NULL, NULL); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + } else { + d_buf = NULL; + } + + if (s_buf != NULL) { + /* Copy user data from user space to kernel space */ + ret = copy_from_user(s_buf, + data->com_paras.mbox_send_cmd.cmd_data, + data->com_paras.mbox_send_cmd.cmd_data_sz); + if (ret) { + dev_err(dev, "failed copy buf ret=%d\n", ret); + fcs_free_memory(priv, s_buf, d_buf, NULL); + mutex_unlock(&priv->lock); + return -EFAULT; + } + } + + msg->command = COMMAND_MBOX_SEND_CMD; + msg->arg[0] = data->com_paras.mbox_send_cmd.mbox_cmd; + msg->arg[1] = data->com_paras.mbox_send_cmd.urgent; + msg->payload = s_buf; + msg->payload_length = data->com_paras.mbox_send_cmd.cmd_data_sz; + msg->payload_output = d_buf; + msg->payload_length_output = data->com_paras.mbox_send_cmd.rsp_data_sz; + priv->client.receive_cb = fcs_mbox_send_cmd_callback; + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + + if (!ret && !priv->status) { + if (priv->size > data->com_paras.mbox_send_cmd.rsp_data_sz) { + dev_err(dev, "Resp data size (%d) bigger than dest size\n", + priv->size); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + + data->com_paras.mbox_send_cmd.rsp_data_sz = priv->size; + + if (data->com_paras.mbox_send_cmd.rsp_data_sz) { + ret = copy_to_user(data->com_paras.mbox_send_cmd.rsp_data, d_buf, + data->com_paras.mbox_send_cmd.rsp_data_sz); + + if (ret) { + dev_err(dev, "failure on copy_to_user\n"); + fcs_close_services(priv, s_buf, d_buf); + return -EFAULT; + } + } + } else { + data->com_paras.mbox_send_cmd.rsp_data = NULL; + data->com_paras.mbox_send_cmd.rsp_data_sz = 0; + } + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, s_buf, d_buf); + break; + + case INTEL_FCS_DEV_CHECK_SMMU_ENABLED: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + msg->command = COMMAND_SMC_SVC_VERSION; + priv->client.receive_cb = fcs_atf_version_smmu_check_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + + if (!ret && !priv->status) + data->status = -1; + else { + mutex_unlock(&priv->lock); + return -EFAULT; + } + + data->status = priv->status; + + msg->command = COMMAND_FIRMWARE_VERSION; + priv->client.receive_cb = fcs_fw_version_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, NULL, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_AES_CRYPT_SMMU: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user data\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + iv_field_buf = stratix10_svc_allocate_memory(priv->chan, 28); + if (IS_ERR(iv_field_buf)) { + dev_err(dev, "failed allocate iv_field buf\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + sid = data->com_paras.a_crypt.sid; + cid = data->com_paras.a_crypt.cid; + kuid = data->com_paras.a_crypt.kuid; + + memcpy(iv_field_buf, &data->com_paras.a_crypt.cpara.bmode, 1); + memcpy(iv_field_buf + 1, &data->com_paras.a_crypt.cpara.aes_mode, 1); + memcpy(iv_field_buf + 12, data->com_paras.a_crypt.cpara.iv_field, 16); + + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_INIT; + msg->payload = iv_field_buf; + msg->payload_length = data->com_paras.a_crypt.cpara_size; + msg->payload_output = NULL; + msg->payload_length_output = 0; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + + priv->client.receive_cb = fcs_vab_callback; + + if (data->com_paras.a_crypt.init == true) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d\n", + COMMAND_FCS_CRYPTO_AES_CRYPT_INIT, + ret); + fcs_close_services(priv, iv_field_buf, NULL); + return -EFAULT; + } + } + fcs_free_memory(priv, iv_field_buf, NULL, NULL); + + remaining_size = data->com_paras.a_crypt.src_size; + + ps_buf = stratix10_svc_allocate_memory(priv->chan, PS_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + if (remaining_size > AES_BUFFER_CMD_MAX_SZ) { + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE_SMMU; + data_size = AES_BUFFER_CMD_MAX_SZ; + dev_dbg(dev, "AES crypt update. data_size=%d\n", data_size); + } else { + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE_SMMU; + data_size = remaining_size; + dev_dbg(dev, "AES crypt finalize. data_size=%d\n", data_size); + } + + src_addr = get_buffer_addr(SRC_BUFFER_STARTING_L2_IDX); + dst_addr = get_buffer_addr((SRC_BUFFER_STARTING_L2_IDX + + data->com_paras.a_crypt.buffer_offset)); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = &src_addr; + msg->payload_length = data_size; + msg->payload_output = &dst_addr; + msg->payload_length_output = data_size; + priv->client.receive_cb = fcs_attestation_callback; + + context_bank_enable(priv); + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = PS_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + if (!ret && !priv->status) { + if (!priv->kbuf || priv->size != 16) { + dev_err(dev, "unregconize response\n"); + context_bank_disable(priv); + fcs_close_services(priv, ps_buf, NULL); + return -EFAULT; + } + } + } else { + data->com_paras.a_crypt.dst = NULL; + data->com_paras.a_crypt.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + context_bank_disable(priv); + fcs_close_services(priv, ps_buf, NULL); + return -EFAULT; + } + + context_bank_disable(priv); + invalidate_smmu_tlb_entries(priv); + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, ps_buf, NULL); + break; + + case INTEL_FCS_DEV_CRYPTO_GET_DIGEST_SMMU: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.s_mac_data.sid; + cid = data->com_paras.s_mac_data.cid; + kuid = data->com_paras.s_mac_data.kuid; + + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.s_mac_data.sha_op_mode | + (data->com_paras.s_mac_data.sha_digest_sz << + CRYPTO_ECC_DIGEST_SZ_OFFSET); + + priv->client.receive_cb = fcs_vab_callback; + if (data->com_paras.s_mac_data.init == true) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_GET_DIGEST_INIT, ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } + + remaining_size = data->com_paras.s_mac_data.src_size; + + d_buf = stratix10_svc_allocate_memory(priv->chan, + AES_CRYPT_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, SMMU_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + fcs_close_services(priv, d_buf, NULL); + return -ENOMEM; + } + + if (remaining_size > HMAC_CMD_MAX_SZ) { + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE_SMMU; + data_size = HMAC_CMD_MAX_SZ; + dev_dbg(dev, "Crypto get digest update. data_size=%d\n", + data_size); + } else { + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE_SMMU; + data_size = remaining_size; + dev_dbg(dev, "Crypto get digest finalize. data_size=%d\n", + data_size); + } + + src_addr = get_buffer_addr(SRC_BUFFER_STARTING_L2_IDX); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = &src_addr; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = AES_CRYPT_CMD_MAX_SZ; + priv->client.receive_cb = fcs_attestation_callback; + + context_bank_enable(priv); + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = SMMU_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "unregconize response\n"); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + } + } else { + data->com_paras.s_mac_data.dst = NULL; + data->com_paras.s_mac_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "Crypto get digest finish sending\n"); + memcpy(data->com_paras.s_mac_data.dst, priv->kbuf, priv->size); + data->com_paras.s_mac_data.dst_size = priv->size; + } + + context_bank_disable(priv); + invalidate_smmu_tlb_entries(priv); + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, ps_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_MAC_VERIFY_SMMU: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.s_mac_data.sid; + cid = data->com_paras.s_mac_data.cid; + kuid = data->com_paras.s_mac_data.kuid; + out_sz = data->com_paras.s_mac_data.dst_size; + ud_sz = data->com_paras.s_mac_data.userdata_sz; + + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.s_mac_data.sha_op_mode | + (data->com_paras.s_mac_data.sha_digest_sz << + CRYPTO_ECC_DIGEST_SZ_OFFSET); + priv->client.receive_cb = fcs_vab_callback; + + if (data->com_paras.s_mac_data.init == true) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT, ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } + + remaining_size = data->com_paras.s_mac_data.src_size; + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, SMMU_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + fcs_close_services(priv, d_buf, NULL); + return -ENOMEM; + } + + if (remaining_size > HMAC_CMD_MAX_SZ) { + if (data->com_paras.s_mac_data.userdata_sz >= HMAC_CMD_MAX_SZ) { + data_size = HMAC_CMD_MAX_SZ; + ud_sz = HMAC_CMD_MAX_SZ; + } else { + data_size = data->com_paras.s_mac_data.userdata_sz; + ud_sz = data->com_paras.s_mac_data.userdata_sz; + } + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE_SMMU; + } else { + data_size = remaining_size; + ud_sz = data->com_paras.s_mac_data.userdata_sz; + memcpy(d_buf, (source_ptr+ud_sz), (remaining_size-ud_sz)); + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE_SMMU; + dev_dbg(dev, "Finalize. data_size=%d, ud_sz=%ld\n", data_size, + ud_sz); + + } + + src_addr = get_buffer_addr(SRC_BUFFER_STARTING_L2_IDX); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = ud_sz; + msg->payload = &src_addr; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + context_bank_enable(priv); + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = SMMU_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "unregconize response\n"); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + } + } else { + data->com_paras.s_mac_data.dst = NULL; + data->com_paras.s_mac_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + context_bank_disable(priv); + fcs_close_services(priv, ps_buf, d_buf); + return -EFAULT; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "Crypto get verify finish sending\n"); + memcpy(data->com_paras.s_mac_data.dst, priv->kbuf, priv->size); + data->com_paras.s_mac_data.dst_size = priv->size; + } + + context_bank_disable(priv); + invalidate_smmu_tlb_entries(priv); + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, ps_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_SMMU: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_data.sid; + cid = data->com_paras.ecdsa_data.cid; + kuid = data->com_paras.ecdsa_data.kuid; + in_sz = data->com_paras.ecdsa_data.src_size; + out_sz = data->com_paras.ecdsa_data.dst_size; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + if (data->com_paras.ecdsa_data.init == true) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } + + remaining_size = data->com_paras.ecdsa_data.src_size; + + d_buf = stratix10_svc_allocate_memory(priv->chan, out_sz); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, SMMU_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + fcs_close_services(priv, d_buf, NULL); + return -ENOMEM; + } + + if (remaining_size > ECDSA_CMD_MAX_SZ) { + msg->command = + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE_SMMU; + data_size = ECDSA_CMD_MAX_SZ; + dev_dbg(dev, "ECDSA data sign update stage. data_size=%d\n", + data_size); + } else { + msg->command = + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE_SMMU; + data_size = remaining_size; + dev_dbg(dev, "ECDSA data sign finalize stage. data_size=%d\n", + data_size); + } + + src_addr = get_buffer_addr(SRC_BUFFER_STARTING_L2_IDX); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->payload = &src_addr; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + context_bank_enable(priv); + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + msg->payload = ps_buf; + msg->payload_length = SMMU_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "unregconize response\n"); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + } + } else { + data->com_paras.ecdsa_data.dst = NULL; + data->com_paras.ecdsa_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "ECDSA data sign finish sending\n"); + memcpy(data->com_paras.ecdsa_data.dst, priv->kbuf, priv->size); + data->com_paras.ecdsa_data.dst_size = priv->size; + } + + context_bank_disable(priv); + invalidate_smmu_tlb_entries(priv); + + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, ps_buf, d_buf); + break; + + case INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_SMMU: + if (copy_from_user(data, (void __user *)arg, sizeof(*data))) { + dev_err(dev, "failure on copy_from_user\n"); + mutex_unlock(&priv->lock); + return -EFAULT; + } + + sid = data->com_paras.ecdsa_sha2_data.sid; + cid = data->com_paras.ecdsa_sha2_data.cid; + kuid = data->com_paras.ecdsa_sha2_data.kuid; + in_sz = data->com_paras.ecdsa_sha2_data.src_size; + out_sz = data->com_paras.ecdsa_sha2_data.dst_size; + ud_sz = data->com_paras.ecdsa_sha2_data.userdata_sz; + + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT; + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = kuid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = data->com_paras.ecdsa_sha2_data.ecc_algorithm & 0xF; + priv->client.receive_cb = fcs_vab_callback; + + if (data->com_paras.ecdsa_sha2_data.init == true) { + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (ret || priv->status) { + dev_err(dev, "failed to send the cmd=%d,ret=%d, status=%d\n", + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT, + ret, priv->status); + fcs_close_services(priv, NULL, NULL); + return -EFAULT; + } + } + + remaining_size = data->com_paras.ecdsa_sha2_data.src_size; + + d_buf = stratix10_svc_allocate_memory(priv->chan, SMMU_BUF_SIZE); + if (IS_ERR(d_buf)) { + dev_err(dev, "failed allocate destation buf\n"); + fcs_close_services(priv, NULL, NULL); + return -ENOMEM; + } + + ps_buf = stratix10_svc_allocate_memory(priv->chan, SMMU_BUF_SIZE); + if (IS_ERR(ps_buf)) { + dev_err(dev, "failed to allocate p-status buf\n"); + fcs_close_services(priv, d_buf, NULL); + return -ENOMEM; + } + + if (remaining_size > ECDSA_CMD_MAX_SZ) { + if (data->com_paras.s_mac_data.userdata_sz >= ECDSA_CMD_MAX_SZ) { + data_size = ECDSA_CMD_MAX_SZ; + ud_sz = ECDSA_CMD_MAX_SZ; + } else { + data_size = data->com_paras.ecdsa_sha2_data.userdata_sz; + ud_sz = data->com_paras.ecdsa_sha2_data.userdata_sz; + } + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE_SMMU; + } else { + data_size = remaining_size; + ud_sz = data->com_paras.ecdsa_sha2_data.userdata_sz; + memcpy(d_buf, (source_ptr+ud_sz), (remaining_size-ud_sz)); + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE_SMMU; + dev_dbg(dev, "Finalize. data_size=%d, ud_sz=%ld\n", data_size, + ud_sz); + } + + src_addr = get_buffer_addr(SRC_BUFFER_STARTING_L2_IDX); + + msg->arg[0] = sid; + msg->arg[1] = cid; + msg->arg[2] = ud_sz; + msg->payload = &src_addr; + msg->payload_length = data_size; + msg->payload_output = d_buf; + msg->payload_length_output = out_sz; + priv->client.receive_cb = fcs_attestation_callback; + + context_bank_enable(priv); + + ret = fcs_request_service(priv, (void *)msg, + 10 * FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + /* to query the complete status */ + msg->payload = ps_buf; + msg->payload_length = SMMU_BUF_SIZE; + msg->command = COMMAND_POLL_SERVICE_STATUS; + priv->client.receive_cb = fcs_data_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_COMPLETED_TIMEOUT); + if (!ret && !priv->status) { + if (!priv->kbuf) { + dev_err(dev, "unregconize response\n"); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + } + } else { + data->com_paras.ecdsa_sha2_data.dst = NULL; + data->com_paras.ecdsa_sha2_data.dst_size = 0; + dev_err(dev, "unregconize response. ret=%d. status=%d\n", + ret, priv->status); + context_bank_disable(priv); + fcs_close_services(priv, d_buf, ps_buf); + return -EFAULT; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + dev_dbg(dev, "ECDSA data verify finish sending\n"); + memcpy(data->com_paras.ecdsa_sha2_data.dst, priv->kbuf, + priv->size); + data->com_paras.ecdsa_sha2_data.dst_size = priv->size; + } + + context_bank_disable(priv); + invalidate_smmu_tlb_entries(priv); + data->status = priv->status; + + if (copy_to_user((void __user *)arg, data, sizeof(*data))) { + dev_err(dev, "failure on copy_to_user\n"); + ret = -EFAULT; + } + + fcs_close_services(priv, d_buf, ps_buf); + break; + + default: + mutex_unlock(&priv->lock); + dev_warn(dev, "shouldn't be here [0x%x]\n", cmd); + break; + } + + return ret; +} + +static int fcs_open(struct inode *inode, struct file *file) +{ + pr_debug("%s\n", __func__); + + return 0; +} + +static int fcs_close(struct inode *inode, struct file *file) +{ + + pr_debug("%s\n", __func__); + + return 0; +} + +static int fcs_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct stratix10_svc_client_msg *msg; + struct intel_fcs_priv *priv; + struct device *dev; + void *s_buf; + int ret = 0; + size_t size = 0; + + priv = (struct intel_fcs_priv *)rng->priv; + dev = priv->client.dev; + mutex_lock(&priv->lock); + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); + if (!msg) { + dev_err(dev, "failed to allocate msg buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + s_buf = stratix10_svc_allocate_memory(priv->chan, + RANDOM_NUMBER_SIZE); + if (IS_ERR(s_buf)) { + dev_err(dev, "failed to allocate random number buffer\n"); + mutex_unlock(&priv->lock); + return -ENOMEM; + } + + msg->command = COMMAND_FCS_RANDOM_NUMBER_GEN; + msg->payload = s_buf; + msg->payload_length = RANDOM_NUMBER_SIZE; + priv->client.receive_cb = fcs_hwrng_callback; + + ret = fcs_request_service(priv, (void *)msg, + FCS_REQUEST_TIMEOUT); + if (!ret && !priv->status) { + if (priv->size && priv->kbuf) { + if (max > priv->size) + size = priv->size; + else + size = max; + + memcpy((uint8_t *)buf, (uint8_t *)priv->kbuf, size); + } + } + + fcs_close_services(priv, s_buf, NULL); + + if (size == 0) + return -ENOTSUPP; + + return size; +} + +static int fcs_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long size, off; + struct page *page; + + if (!source_ptr) { + pr_err("vmalloc failed mmap %s", __func__); + return -ENOMEM; + } + + size = vma->vm_end - vma->vm_start; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vm_flags_set(vma, VM_DONTEXPAND); + for (off = 0; off < size; off += PAGE_SIZE) { + page = vmalloc_to_page(source_ptr + off); + if (vm_insert_page(vma, vma->vm_start + off, page)) + pr_err("vm_insert_page() failed"); + + } + + return 0; +} + +static const struct file_operations fcs_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = fcs_ioctl, + .open = fcs_open, + .release = fcs_close, + .mmap = fcs_mmap +}; + +static int fcs_driver_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct intel_fcs_priv *priv; + int ret, i; + const char *platform; + struct stratix10_svc_client_msg msg; + unsigned long off; + int l2_idx = SRC_BUFFER_STARTING_L2_IDX; + int l3_idx = 0; + uint64_t phys; + unsigned long pfn; + struct page *page; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->client.dev = dev; + priv->client.receive_cb = NULL; + priv->client.priv = priv; + priv->kbuf = NULL; + priv->size = 0; + priv->status = INVALID_STATUS; + priv->cid_low = INVALID_ID; + priv->cid_high = INVALID_ID; + priv->sid = INVALID_ID; + + mutex_init(&priv->lock); + priv->chan = stratix10_svc_request_channel_byname(&priv->client, + SVC_CLIENT_FCS); + if (IS_ERR(priv->chan)) { + dev_err(dev, "couldn't get service channel %s\n", + SVC_CLIENT_FCS); + return PTR_ERR(priv->chan); + } + + priv->miscdev.minor = MISC_DYNAMIC_MINOR; + priv->miscdev.name = "fcs"; + priv->miscdev.fops = &fcs_fops; + + init_completion(&priv->completion); + + ret = misc_register(&priv->miscdev); + if (ret) { + dev_err(dev, "can't register on minor=%d\n", + MISC_DYNAMIC_MINOR); + goto release_channel; + } + + priv->p_data = of_device_get_match_data(dev); + if (!priv->p_data) + goto cleanup; + + ret = of_property_read_string(dev->of_node, "platform", &platform); + if (ret) { + dev_err(dev, "can't find platform"); + goto cleanup; + } + + /* Proceed only if platform is agilex as + * register addresses are platform specific + */ + if (!strncmp(platform, AGILEX_PLATFORM, AGILEX_PLATFORM_STR_LEN)) { + + msg.command = COMMAND_SMC_SVC_VERSION; + priv->client.receive_cb = fcs_atf_version_smmu_check_callback; + + ret = stratix10_svc_send(priv->chan, &msg); + if (ret) + return -EINVAL; + + ret = wait_for_completion_timeout(&priv->completion, + FCS_REQUEST_TIMEOUT); + if (!ret) { + dev_err(priv->client.dev, "timeout waiting for SMC call\n"); + ret = -ETIMEDOUT; + return ret; + } + + /* Program registers only if ATF support programming + * SMMU secure register addresses + */ + if (priv->status == 0) { + l1_table = kmalloc((sizeof(uint64_t)*512), GFP_KERNEL); + if (!l1_table) + return -ENOMEM; + l2_table = kmalloc((sizeof(uint64_t)*512), GFP_KERNEL); + if (!l2_table) + return -ENOMEM; + + memcpy(l1_table, smmu_sdm_el3_l1_table, (sizeof(uint64_t)*512)); + memcpy(l2_table, smmu_sdm_el3_l2_table, (sizeof(uint64_t)*512)); + + for (i = 0; i < 512; i++) { + l3_tables[i] = kmalloc((sizeof(uint64_t)*512), GFP_KERNEL); + if (!l3_tables[i]) + return -ENOMEM; + memcpy(l3_tables[i], smmu_sdm_l3_def_table, (sizeof(uint64_t)*512)); + } + + if (source_ptr) + vfree(source_ptr); + + source_ptr = vmalloc_user(SMMU_MAX_ALLOC_SZ); + if (!source_ptr) { + pr_err("vmalloc failed probe %s", __func__); + return -ENOMEM; + } + + for (off = 0; off < SMMU_MAX_ALLOC_SZ; off += PAGE_SIZE) { + page = vmalloc_to_page(source_ptr + off); + pfn = page_to_pfn(page); + phys = __pa(pfn_to_kaddr(pfn)) + offset_in_page(source_ptr + off); + + if (l3_idx >= 512) { + l2_idx++; + l3_idx = 0; + } + fill_l3_table(phys, l2_idx, l3_idx); + l3_idx++; + } + + intel_fcs_smmu_init(priv); + stratix10_svc_done(priv->chan); + } + } + + /* only register the HW RNG if the platform supports it! */ + if (priv->p_data->have_hwrng) { + /* register hwrng device */ + priv->rng.name = "intel-rng"; + priv->rng.read = fcs_rng_read; + priv->rng.priv = (unsigned long)priv; + + ret = hwrng_register(&priv->rng); + if (ret) { + dev_err(dev, "can't register RNG device (%d)\n", ret); + return ret; + } + } else { + /* Notes of registering /dev/hwrng: + * 1 For now, /dev/hwrng is not supported on Agilex devices + * due to hardware implementation. + * 2 It means On Agilex devices, /dev/hwrng is a dummy node + * without HW backend. You can get the HW RNG function by + * IOCTL command provided from this driver on Agilex devices. + * 3 In the future, it may be implemented in a different way. + */ + dev_notice(dev, "/dev/hwrng is not supported on Agilex devices.\n"); + } + + platform_set_drvdata(pdev, priv); + + return 0; + +cleanup: + misc_deregister(&priv->miscdev); +release_channel: + stratix10_svc_free_channel(priv->chan); + return -ENODEV; +} + +static void fcs_driver_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct intel_fcs_priv *priv = platform_get_drvdata(pdev); + struct stratix10_svc_client_msg msg; + int i, ret; + const char *platform; + + ret = of_property_read_string(dev->of_node, "platform", &platform); + if (ret) + goto no_platform; + + if (!strncmp(platform, AGILEX_PLATFORM, AGILEX_PLATFORM_STR_LEN)) { + msg.command = COMMAND_SMC_SVC_VERSION; + priv->client.receive_cb = fcs_atf_version_smmu_check_callback; + + ret = stratix10_svc_send(priv->chan, &msg); + + ret = wait_for_completion_timeout(&priv->completion, + FCS_REQUEST_TIMEOUT); + + /* Program registers only if ATF support programming + * SMMU secure register addresses + */ + if (priv->status == 0) { + kfree(l1_table); + kfree(l2_table); + + for (i = 0; i < 512; i++) + kfree(l3_tables[i]); + + if (source_ptr) + vfree(source_ptr); + + context_bank_disable(priv); + } + } + +no_platform: + if (priv->p_data->have_hwrng) + hwrng_unregister(&priv->rng); + misc_deregister(&priv->miscdev); + stratix10_svc_free_channel(priv->chan); +} + +/* Note: /dev/hwrng is not supported on Agilex devices now! */ +static const struct socfpga_fcs_data agilex_fcs_data = { + .have_hwrng = false, +}; + +static const struct socfpga_fcs_data n5x_fcs_data = { + .have_hwrng = true, +}; + +static const struct socfpga_fcs_data s10_fcs_data = { + .have_hwrng = true, +}; + +static const struct of_device_id fcs_of_match[] = { + {.compatible = "intel,stratix10-soc-fcs", + .data = &s10_fcs_data + }, + {.compatible = "intel,agilex-soc-fcs", + .data = &agilex_fcs_data + }, + {.compatible = "intel,n5x-soc-fcs", + .data = &n5x_fcs_data + }, + {}, +}; + +static struct platform_driver fcs_driver = { + .probe = fcs_driver_probe, + .remove = fcs_driver_remove, + .driver = { + .name = "intel-fcs", + .of_match_table = of_match_ptr(fcs_of_match), + }, +}; + +MODULE_DEVICE_TABLE(of, fcs_of_match); + +static int __init fcs_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + int ret; + + fw_np = of_find_node_by_name(NULL, "svc"); + if (!fw_np) + return -ENODEV; + + of_node_get(fw_np); + np = of_find_matching_node(fw_np, fcs_of_match); + if (!np) { + of_node_put(fw_np); + return -ENODEV; + } + + of_node_put(np); + ret = of_platform_populate(fw_np, fcs_of_match, NULL, NULL); + of_node_put(fw_np); + if (ret) + return ret; + + return platform_driver_register(&fcs_driver); +} + +static void __exit fcs_exit(void) +{ + return platform_driver_unregister(&fcs_driver); +} + +module_init(fcs_init); +module_exit(fcs_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel FGPA Crypto Services Driver"); +MODULE_AUTHOR("Richard Gong "); diff --git a/drivers/crypto/intel_fcs_smmu.c b/drivers/crypto/intel_fcs_smmu.c new file mode 100644 index 0000000000000..dc31da02b9afc --- /dev/null +++ b/drivers/crypto/intel_fcs_smmu.c @@ -0,0 +1,3130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022, Intel Corporation + */ + + +#include "intel_fcs_smmu.h" + +#define SSD0_REG_ADDR 0xFA004000 +#define SMR8_REG_ADDR 0xFA000820 +#define S2CR8_REG_ADDR 0xFA000c20 +#define CB8_SCTRLR_REG_ADDR 0xFA028000 +#define CBAR8_REG_ADDR 0xFA001020 +#define TCR_LPAE_REG_ADDR 0xFA028030 +#define CB8_TTBR0_LOW_REG_ADDR 0xFA028020 +#define CB8_TTBR0_HIGH_REG_ADDR 0xFA028024 +#define CB8_PRRR_MIR0_REG_ADDR 0xFA028038 +#define CB8_PRRR_MIR1_REG_ADDR 0xFA02803C +#define TCR2_REG_ADDR 0xFA028010 + +#define INVALIDATE_TBU 0xFFFFFFFF +#define TRANSLATION_TABLE_LOWER_MASK 0xFFFFFFFF +#define TRANSLATION_TABLE_UPPER_BIT_SHIFT 32 +#define TRANSLATION_CONTEXT_BANK_ENABLE 0x1 +#define TRANSLATION_CONTEXT_BANK_DISABLE 0x0 +#define SSD0_VAL 0xF +#define SMR8_VAL 0xFC00000A +#define S2CR8_VAL 0x0b000008 +#define CB8_SCTLR_VAL 0x00000001 +#define CBAR8_VAL 0x00010100 +#define TCR2_VAL 0x00010002 + +#define FCS_SMMU_REQUEST_TIMEOUT (msecs_to_jiffies(SVC_FCS_REQUEST_TIMEOUT_MS)) + +#define PAGE_4KB (0x1000) +#define BLOCK_2MB (0x200000) +#define BLOCK_1GB (0x40000000ul) + +#define SMMU_LOWER_NOT_GLOBAL(bit) (((bit) & 0x1) << 11) +#define SMMU_LOWER_ACCESS_FLAG(flag) (((flag) & 0x1) << 10) +#define SMMU_LOWER_SHARABILITY(sh) (((sh) & 0x3) << 8) +#define SMMU_LOWER_ACCESS_PERM(ap) (((ap) & 0x3) << 6) +#define SMMU_LOWER_NON_SECURE(ns) (((ns) & 0x1) << 5) +#define SMMU_LOWER_ATTR_INDEX(attr) (((attr) & 0x7) << 2) + +#define SMMU_LOWER_SHARABILITY_E_NON (0x0) +#define SMMU_LOWER_SHARABILITY_E_OUTER (0x2) +#define SMMU_LOWER_SHARABILITY_E_INNER (0x3) + +#define SMMU_LOWER_ACCESS_EL3_RW (0 << 2) +#define SMMU_LOWER_ACCESS_EL3_R0 (1 << 2) + +// Attributes to be used only with Table entries +#define SMMU_UPPER_TABLE_DEFAULT ((unsigned long) 0UL) +#define SMMU_UPPER_TABLE_S ((unsigned long) 0UL << 63) +#define SMMU_UPPER_TABLE_NS ((unsigned long) 1UL << 63) +#define SMMU_UPPER_TABLE_AP(ap) ((unsigned long) ((ap) & (0x3)) << 61) +#define SMMU_UPPER_TABLE_XN ((unsigned long) 1UL << 60) +#define SMMU_UPPER_TABLE_PXN ((unsigned long) 1UL << 59) + +// Attributes to be used only with Block entries +#define SMMU_UPPER_BLOCK_DEFAULT ((unsigned long) 0) +#define SMMU_UPPER_BLOCK_UXN_XN (1ul << 54) +#define SMMU_UPPER_BLOCK_PXN (1ul << 53) +#define SMMU_UPPER_BLOCK_CONTIGUOUS (1ul << 52) + +// Attributes to be used only with Block entries. No option for Table entries. +#define SMMU_LOWER_MEMORY_EL3 (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_ACCESS_PERM(0b00)) +#define SMMU_LOWER_MEMORY_EL2 (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_ACCESS_PERM(0b00)) +/* + * Refer to tiny paragraph in D5.5.1 in AARCHv8 Spec that doesn't + * allow privlidged execution in EL1, when AP is 0b10 + */ +#define SMMU_LOWER_MEMORY_EL0_RW (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_NON_SECURE(0) | SMMU_LOWER_ACCESS_PERM(0b01)) +//Enable Read/Write Access to Memory in EL0 +#define SMMU_LOWER_MEMORY_EL1_EX (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_NON_SECURE(0) | SMMU_LOWER_ACCESS_PERM(0b00)) +//Enable Read/Write Access to Memory in EL0 +#define SMMU_LOWER_MEMORY_EL1_RO (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_ACCESS_PERM(0b10)) +//Enable Read/Write Access to Memory in EL0 +#define SMMU_LOWER_MEMORY_EL1_EL0_RO (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_ACCESS_PERM(0b11)) + + +#define SMMU_LOWER_MEMORY (SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_ACCESS_PERM(0b00)) +#define SMMU_LOWER_DEVICE (SMMU_LOWER_ATTR_INDEX(1) | SMMU_LOWER_ACCESS_FLAG(1)) +#define SMMU_LOWER_DEVICE_EL0_RW (SMMU_LOWER_ATTR_INDEX(1) | SMMU_LOWER_ACCESS_FLAG(1) | SMMU_LOWER_NON_SECURE(0) | SMMU_LOWER_ACCESS_PERM(0b01)) +#define SMMU_LOWER_STRONG (SMMU_LOWER_ATTR_INDEX(0) | SMMU_LOWER_ACCESS_FLAG(1)) +#define SMMU_LOWER_MEMORY_NONSECURE (SMMU_LOWER_NON_SECURE(1) | SMMU_LOWER_ATTR_INDEX(2) | SMMU_LOWER_ACCESS_FLAG(1)) + +/* +* Shareability for non Memory does not apply. Those locations are automatically marked outer +* shareable. +*/ +#define SMMU_LOWER_MEMORY_SHARED_INNER (SMMU_LOWER_MEMORY | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_INNER)) +#define SMMU_LOWER_MEMORY_SHARED_OUTER (SMMU_LOWER_MEMORY | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_OUTER)) +#define SMMU_LOWER_MEMORY_SHARED_OUTER_EL0_RW (SMMU_LOWER_MEMORY_EL0_RW | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_OUTER)) +#define SMMU_LOWER_MEMORY_SHARED_OUTER_EL1_EX (SMMU_LOWER_MEMORY_EL1_EX | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_OUTER)) +#define SMMU_LOWER_MEMORY_SHARED_OUTER_EL1_RO (SMMU_LOWER_MEMORY_EL1_RO | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_OUTER)) +#define SMMU_LOWER_MEMORY_SHARED_OUTER_EL1_EL0_RO (SMMU_LOWER_MEMORY_EL1_EL0_RO | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_OUTER)) +#define SMMU_LOWER_MEMORY_SHARED_OUTER_NS (SMMU_LOWER_MEMORY_NONSECURE | SMMU_LOWER_SHARABILITY(SMMU_LOWER_SHARABILITY_E_OUTER)) + +/* +* LSb 11: Table Descriptor +* LSb 01: Block Entry +* LSb 10: Table Entry +* LSb 00: Ignored +*/ + + +/* +* Level 0 Entries +*/ + +// For L0, output address is [47-39] + +#define SMMU_L0_FAULT_ENTRY(...) ((unsigned long)(0)) +/* + * Emit the possible address so if the fault is transformed into block, + * it would have a default address. Also make the definition interchangeable with BLOCK. + */ +#define SMMU_L1_FAULT_ENTRY(addr, ...) ((unsigned long)(((unsigned long)(addr) & 0xffffc0000000) | 0)) +/* + * Emit the possible address so if the fault is transformed into block, + * it would have a default address. Also make the definition interchangeable with BLOCK. + */ +#define SMMU_L2_FAULT_ENTRY(addr, ...) ((unsigned long)(((unsigned long)(addr) & 0xffffffe00000) | 0)) +/* + * Emit the possible address so if the fault is transformed into block, + * it would have a default address. Also make the definition interchangeable with BLOCK. + */ +#define SMMU_L3_FAULT_ENTRY(addr, ...) ((unsigned long)(((unsigned long)(addr) & 0xfffffffff000) | 0)) + +//mask off lower 12 bits and or it with table descriptor, OR in upper attributes +#define SMMU_L0_TABLE_ENTRY(table, upper) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(table) & 0xfffffffff000) | 3)) +// For L1, next level table is [47-12] => [31-12]. See diagram D4-16. +#define SMMU_L1_TABLE_ENTRY(table, upper) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(table) & 0xfffffffff000) | 3)) +// For L2, next level table is [47-12] => [31-12]. See diagram D4-16. Also, L2 table does not have upper attributes. +#define SMMU_L2_TABLE_ENTRY(table, upper) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(table) & 0xfffffffff000) | 3)) + +//mask off lower 16 bits and or it with table descriptor, OR in upper attributes +#define SMMU_L0_TABLE_ENTRY_V64(table, upper) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(table) & 0xffffffff0000) | 3)) +// For L1, next level table is [47-12] => [31-12]. See diagram D4-16. +#define SMMU_L1_TABLE_ENTRY_V64(table, upper) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(table) & 0xffffffff0000) | 3)) +// For L2, next level table is [47-12] => [31-12]. See diagram D4-16. Also, L2 table does not have upper attributes. +#define SMMU_L2_TABLE_ENTRY_V64(table, upper) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(table) & 0xffffffff0000) | 3)) + +// For L1, output address is [47-30] => [38-30]. See diagram D4-16. +#define SMMU_L1_BLOCK_ENTRY(block, upper, lower) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(block) & 0xffffc0000000) | ((lower) & (0x3ff << 2)) | 1)) +// For L2, output address is [47-21] => [31-21]. See diagram D4-16. +#define SMMU_L2_BLOCK_ENTRY(block, upper, lower) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(block) & 0xffffffe00000) | ((lower) & (0x3ff << 2)) | 1)) +// For L3, output address is [47-12] => [31-12]. See diagram D4-17. +#define SMMU_L3_BLOCK_ENTRY(block, upper, lower) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(block) & 0xfffffffff000) | ((lower) & (0x3ff << 2)) | 3)) + +// For L2, output address is [47-29] => [31-21]. See diagram D4-16. +#define SMMU_L2_BLOCK_ENTRY_V64(block, upper, lower) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(block) & 0xffffe0000000) | ((lower) & (0x3ff << 2)) | 1)) +// For L3, output address is [47-16] => [31-12]. See diagram D4-17. +#define SMMU_L3_BLOCK_ENTRY_V64(block, upper, lower) ((unsigned long)(((upper) & (0xffful << 52)) | ((unsigned long)(block) & 0xffffffff0000) | ((lower) & (0x3ff << 2)) | 1)) + +//Ensure Lower Bites are Table, Block Descriptor, or Fault +#define SMMU_L0_IS_FAULT(entry) (((entry) & 0x1ul) == 0x0ul) +#define SMMU_L0_IS_TABLE(entry) (((entry) & 0x3ul) == 0x3ul) + +#define SMMU_L1_IS_FAULT(entry) (((entry) & 0x1ul) == 0x0ul) +#define SMMU_L1_IS_BLOCK(entry) (((entry) & 0x3ul) == 0x1ul) +#define SMMU_L1_IS_TABLE(entry) (((entry) & 0x3ul) == 0x3ul) + +#define SMMU_L2_IS_FAULT(entry) (((entry) & 0x1ul) == 0x0ul) +#define SMMU_L2_IS_BLOCK(entry) (((entry) & 0x3ul) == 0x1ul) +#define SMMU_L2_IS_TABLE(entry) (((entry) & 0x3ul) == 0x3ul) + +#define SMMU_L3_IS_FAULT(entry) (((entry) & 0x1ul) == 0x0ul) +#define SMMU_L3_IS_BLOCK(entry) (((entry) & 0x3ul) == 0x1ul) + +#define TCR_RES1 ((1 << 31) | (0 << 23)) // Reserved, set as 1. +#define TCR_PS_1TiB (24 << 16) // Physical Address Size of 1 TiB maximum. +#define TCR_TG0_4KiB (0x0 << 14) // Granule size of 4 KiB +#define TCR_SH0_OUTER (0x2 << 12) // Shareability for TTBR0 +#define TCR_ORGN0_WBA (0x1 << 10) // Outer cacheablility +#define TCR_IRGN0_WBA (0x1 << 8) // Inner cacheability +#define TCR_T0SZ_1TiB (24 << 0) // Size offset of memory region as 2^(64 - val), set for 1 TiB. + +#define GEN_MAIR_INDEX(index, value) (((value) & 0xff) << (8 * index)) + +/* + * Strongly ordered memory: + * - no Gather, no Reorder, no Early write acknowledgements. + * #define MAIR_ATTR_DEVICE_nGnRnE (0x00) + * + * Device memory: + * - no Gather, no Reorder, Early write acknowledgements. + * This is suitable for performant device memory. + * #define MAIR_ATTR_DEVICE_nGnRE (0x04) + * + * Cacheable memory, most performant options: + * - Inner and Outer are: Write Back Allocate for Reads and Writes. + */ + +#define OUTER_WB (1 << 6) // Write back +#define OUTER_NONTRANS (1 << 7) // non-transient (hint to the cache, ignored on Cortex-A53) +#define OUTER_WALLOC (1 << 4) // Write allocate +#define OUTER_RALLOC (1 << 5) // Read allocate +#define INNER_WB (1 << 2) +#define INNER_NONTRANS (1 << 3) +#define INNER_WALLOC (1 << 0) +#define INNER_RALLOC (1 << 1) + +#define MAIR_ATTR_MEMORY_IO_WBRWA (OUTER_WB | OUTER_NONTRANS | OUTER_WALLOC | OUTER_RALLOC |\ + INNER_WB | INNER_NONTRANS | INNER_WALLOC | INNER_RALLOC) + +// Default MAIR attribute +#define MAIR_ATTR_DEFAULT MAIR_ATTR_DEVICE_nGnRnE + +uint64_t *l1_table; +uint64_t *l2_table; +uint64_t *l3_tables[512]; + +uint64_t smmu_sdm_el3_l1_table[512]__attribute__ ((aligned(0x1000))) = { + + SMMU_L1_BLOCK_ENTRY((0 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L1_BLOCK_ENTRY((1 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((2 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((3 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((4 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((5 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((6 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((7 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((8 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + SMMU_L1_BLOCK_ENTRY((9 * BLOCK_1GB), (SMMU_UPPER_BLOCK_DEFAULT), 0), + + SMMU_L1_FAULT_ENTRY((10 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((11 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((12 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((13 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((14 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((15 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((16 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((17 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((18 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((19 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((20 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((21 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((22 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((23 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((24 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((25 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((26 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((27 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((28 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((29 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((30 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((31 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((32 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((33 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((34 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((35 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((36 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((37 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((38 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((39 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((40 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((41 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((42 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((43 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((44 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((45 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((46 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((47 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((48 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((49 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((50 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((51 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((52 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((53 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((54 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((55 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((56 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((57 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((58 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((59 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((60 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((61 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((62 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((63 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((64 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((65 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((66 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((67 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((68 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((69 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((70 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((71 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((72 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((73 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((74 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((75 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((76 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((77 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((78 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((79 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((80 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((81 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((82 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((83 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((84 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((85 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((86 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((87 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((88 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((89 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((90 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((91 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((92 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((93 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((94 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((95 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((96 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((97 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((98 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((99 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((100 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((101 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((102 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((103 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((104 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((105 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((106 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((107 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((108 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((109 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((110 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((111 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((112 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((113 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((114 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((115 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((116 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((117 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((118 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((119 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((120 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((121 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((122 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((123 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((124 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((125 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((126 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((127 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((128 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((129 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((130 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((131 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((132 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((133 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((134 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((135 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((136 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((137 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((138 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((139 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((140 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((141 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((142 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((143 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((144 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((145 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((146 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((147 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((148 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((149 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((150 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((151 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((152 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((153 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((154 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((155 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((156 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((157 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((158 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((159 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((160 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((161 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((162 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((163 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((164 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((165 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((166 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((167 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((168 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((169 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((170 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((171 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((172 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((173 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((174 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((175 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((176 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((177 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((178 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((179 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((180 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((181 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((182 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((183 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((184 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((185 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((186 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((187 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((188 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((189 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((190 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((191 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((192 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((193 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((194 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((195 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((196 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((197 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((198 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((199 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((200 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((201 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((202 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((203 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((204 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((205 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((206 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((207 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((208 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((209 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((210 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((211 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((212 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((213 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((214 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((215 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((216 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((217 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((218 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((219 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((220 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((221 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((222 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((223 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((224 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((225 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((226 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((227 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((228 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((229 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((230 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((231 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((232 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((233 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((234 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((235 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((236 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((237 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((238 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((239 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((240 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((241 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((242 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((243 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((244 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((245 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((246 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((247 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((248 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((249 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((250 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((251 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((252 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((253 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((254 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((255 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((256 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((257 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((258 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((259 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((260 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((261 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((262 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((263 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((264 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((265 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((266 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((267 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((268 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((269 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((270 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((271 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((272 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((273 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((274 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((275 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((276 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((277 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((278 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((279 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((280 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((281 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((282 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((283 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((284 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((285 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((286 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((287 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((288 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((289 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((290 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((291 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((292 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((293 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((294 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((295 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((296 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((297 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((298 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((299 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((300 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((301 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((302 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((303 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((304 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((305 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((306 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((307 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((308 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((309 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((310 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((311 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((312 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((313 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((314 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((315 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((316 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((317 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((318 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((319 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((320 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((321 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((322 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((323 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((324 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((325 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((326 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((327 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((328 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((329 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((330 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((331 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((332 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((333 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((334 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((335 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((336 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((337 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((338 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((339 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((340 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((341 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((342 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((343 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((344 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((345 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((346 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((347 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((348 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((349 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((350 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((351 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((352 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((353 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((354 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((355 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((356 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((357 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((358 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((359 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((360 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((361 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((362 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((363 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((364 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((365 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((366 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((367 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((368 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((369 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((370 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((371 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((372 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((373 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((374 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((375 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((376 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((377 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((378 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((379 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((380 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((381 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((382 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((383 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((384 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((385 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((386 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((387 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((388 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((389 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((390 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((391 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((392 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((393 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((394 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((395 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((396 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((397 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((398 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((399 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((400 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((401 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((402 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((403 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((404 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((405 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((406 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((407 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((408 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((409 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((410 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((411 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((412 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((413 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((414 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((415 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((416 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((417 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((418 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((419 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((420 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((421 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((422 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((423 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((424 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((425 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((426 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((427 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((428 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((429 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((430 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((431 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((432 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((433 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((434 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((435 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((436 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((437 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((438 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((439 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((440 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((441 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((442 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((443 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((444 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((445 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((446 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((447 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((448 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((449 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((450 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((451 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((452 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((453 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((454 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((455 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((456 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((457 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((458 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((459 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((460 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((461 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((462 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((463 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((464 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((465 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((466 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((467 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((468 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((469 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((470 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((471 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((472 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((473 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((474 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((475 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((476 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((477 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((478 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((479 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((480 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((481 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((482 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((483 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((484 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((485 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((486 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((487 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((488 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((489 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((490 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((491 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((492 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((493 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((494 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((495 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((496 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((497 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((498 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((499 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((500 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((501 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((502 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((503 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((504 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((505 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((506 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((507 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((508 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((509 * BLOCK_1GB)), + + SMMU_L1_FAULT_ENTRY((510 * BLOCK_1GB)), + SMMU_L1_FAULT_ENTRY((511 * BLOCK_1GB)), + +}; + +uint64_t smmu_sdm_el3_l2_table[512] __attribute__ ((aligned(0x1000))) = { + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (0 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (1 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (2 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (3 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (4 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (5 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (6 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (7 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), +//8 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (8 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (9 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (10 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (11 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (12 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (13 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (14 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (15 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), +//16 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (16 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (17 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (18 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (19 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (20 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (21 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (22 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (23 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), +//24 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (24 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (25 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (26 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (27 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (28 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (29 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (30 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (31 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), +//32 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (32 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (33 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (34 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (35 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (36 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (37 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (38 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (39 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//40 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (40 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (41 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (42 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (43 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (44 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (45 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (46 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (47 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//48 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (48 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (49 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (50 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (51 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (52 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (53 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (54 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (55 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//56 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (56 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (57 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (58 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (59 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (60 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (61 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (62 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (63 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//64 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (64 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (65 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (66 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (67 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (68 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (69 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (70 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (71 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//72 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (72 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (73 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (74 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (75 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (76 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (77 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (78 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (79 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//80 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (80 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (81 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (82 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (83 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (84 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (85 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (86 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (87 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//88 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (88 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (89 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (90 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (91 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (92 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (93 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (94 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (95 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//96 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (96 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (97 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (98 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (99 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (100 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (101 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (102 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (103 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//104 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (104 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (105 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (106 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (107 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (108 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (109 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (110 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (111 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//112 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (112 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (113 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (114 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (115 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (116 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (117 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (118 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (119 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//120 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (120 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (121 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (122 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (123 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (124 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (125 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (126 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (127 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//128 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (128 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (129 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (130 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (131 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (132 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (133 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (134 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (135 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//136 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (136 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (137 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (138 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (139 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (140 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (141 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (142 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (143 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//144 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (144 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (145 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (146 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (147 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (148 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (149 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (150 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (151 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//152 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (152 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (153 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (154 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (155 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (156 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (157 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (158 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (159 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//160 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (160 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (161 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (162 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (163 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (164 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (165 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (166 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (167 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//168 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (168 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (169 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (170 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (171 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (172 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (173 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (174 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (175 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//176 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (176 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (177 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (178 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (179 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (180 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (181 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (182 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (183 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//184 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (184 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (185 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (186 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (187 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (188 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (189 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (190 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (191 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//192 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (192 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (193 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (194 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (195 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (196 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (197 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (198 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (199 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//200 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (200 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (201 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (202 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (203 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (204 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (205 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (206 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (207 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//208 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (208 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (209 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (210 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (211 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (212 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (213 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (214 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (215 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//216 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (216 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (217 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (218 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (219 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (220 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (221 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (222 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (223 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//224 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (224 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (225 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (226 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (227 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (228 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (229 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (230 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (231 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//232 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (232 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (233 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (234 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (235 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (236 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (237 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (238 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (239 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//240 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (240 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (241 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (242 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (243 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (244 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (245 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (246 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (247 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//248 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (248 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (249 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (250 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (251 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (252 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (253 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (254 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (255 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//256 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (256 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (257 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (258 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (259 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (260 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (261 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (262 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (263 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//264 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (264 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (265 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (266 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (267 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (268 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (269 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (270 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (271 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//272 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (272 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (273 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (274 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (275 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (276 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (277 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (278 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (279 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//280 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (280 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (281 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (282 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (283 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (284 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (285 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (286 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (287 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//288 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (288 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (289 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (290 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (291 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (292 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (293 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (294 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (295 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//296 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (296 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (297 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (298 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (299 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (300 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (301 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (302 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (303 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//304 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (304 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (305 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (306 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (307 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (308 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (309 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (310 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (311 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//312 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (312 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (313 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (314 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (315 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (316 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (317 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (318 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (319 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//320 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (320 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (321 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (322 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (323 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (324 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (325 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (326 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (327 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//328 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (328 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (329 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (330 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (331 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (332 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (333 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (334 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (335 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//336 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (336 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (337 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (338 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (339 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (340 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (341 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (342 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (343 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//344 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (344 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (345 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (346 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (347 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (348 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (349 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (350 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (351 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//352 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (352 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (353 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (354 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (355 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (356 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (357 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (358 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (359 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//360 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (360 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (361 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (362 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (363 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (364 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (365 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (366 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (367 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//368 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (368 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (369 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (370 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (371 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (372 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (373 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (374 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (375 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//376 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (376 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (377 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (378 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (379 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (380 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (381 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (382 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (383 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//384 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (384 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (385 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (386 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (387 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (388 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (389 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (390 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (391 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//392 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (392 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (393 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (394 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (395 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (396 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (397 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (398 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (399 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//400 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (400 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (401 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (402 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (403 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (404 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (405 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (406 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (407 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//408 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (408 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (409 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (410 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (411 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (412 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (413 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (414 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (415 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//416 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (416 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (417 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (418 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (419 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (420 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (421 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (422 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (423 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//424 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (424 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (425 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (426 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (427 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (428 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (429 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (430 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (431 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//432 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (432 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (433 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (434 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (435 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (436 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (437 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (438 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (439 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//440 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (440 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (441 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (442 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (443 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (444 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (445 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (446 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (447 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//448 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (448 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (449 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (450 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (451 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (452 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (453 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (454 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (455 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//456 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (456 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (457 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (458 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (459 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (460 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (461 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (462 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (463 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//464 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (464 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (465 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (466 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (467 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (468 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (469 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (470 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (471 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//472 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (472 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (473 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (474 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (475 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (476 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (477 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (478 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (479 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//480 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (480 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (481 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (482 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (483 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (484 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (485 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (486 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (487 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//488 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (488 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (489 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (490 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (491 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (492 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (493 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (494 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (495 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//496 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (496 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (497 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (498 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (499 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (500 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (501 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (502 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (503 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +//504 + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (504 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (505 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (506 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (507 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (508 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (509 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (510 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L2_BLOCK_ENTRY(0x0000000000 + (511 * BLOCK_2MB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER) +}; + +uint64_t smmu_sdm_l3_def_table[512] __attribute__ ((aligned(0x1000))) = { + SMMU_L3_BLOCK_ENTRY(0x00000000 + (0 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (1 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (2 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (3 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (4 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (5 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (6 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (7 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (8 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (9 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (10 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (11 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (12 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (13 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (14 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (15 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (16 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (17 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (18 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (19 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (20 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (21 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (22 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (23 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (24 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (25 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (26 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (27 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (28 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (29 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (30 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (31 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (32 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (33 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (34 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (35 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (36 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (37 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (38 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (39 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (40 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (41 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (42 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (43 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (44 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (45 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (46 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (47 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (48 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (49 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (50 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (51 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (52 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (53 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (54 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (55 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (56 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (57 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (58 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (59 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (60 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (61 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (62 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (63 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (64 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (65 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (66 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (67 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (68 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (69 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (70 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (71 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (72 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (73 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (74 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (75 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (76 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (77 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (78 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (79 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (80 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (81 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (82 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (83 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (84 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (85 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (86 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (87 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (88 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (89 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (90 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (91 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (92 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (93 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (94 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (95 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (96 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (97 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (98 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (99 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (100 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (101 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (102 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (103 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (104 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (105 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (106 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (107 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (108 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (109 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (110 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (111 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (112 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (113 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (114 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (115 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (116 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (117 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (118 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (119 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (120 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (121 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (122 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (123 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (124 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (125 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (126 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (127 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (128 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (129 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (130 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (131 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (132 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (133 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (134 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (135 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (136 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (137 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (138 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (139 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (140 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (141 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (142 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (143 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (144 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (145 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (146 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (147 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (148 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (149 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (150 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (151 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (152 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (153 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (154 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (155 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (156 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (157 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (158 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (159 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (160 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (161 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (162 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (163 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (164 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (165 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (166 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (167 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (168 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (169 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (170 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (171 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (172 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (173 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (174 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (175 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (176 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (177 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (178 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (179 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (180 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (181 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (182 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (183 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (184 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (185 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (186 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (187 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (188 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (189 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (190 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (191 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (192 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (193 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (194 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (195 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (196 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (197 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (198 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (199 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (200 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (201 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (202 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (203 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (204 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (205 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (206 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (207 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (208 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (209 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (210 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (211 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (212 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (213 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (214 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (215 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (216 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (217 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (218 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (219 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (220 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (221 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (222 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (223 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (224 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (225 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (226 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (227 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (228 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (229 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (230 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (231 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (232 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (233 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (234 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (235 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (236 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (237 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (238 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (239 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (240 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (241 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (242 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (243 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (244 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (245 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (246 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (247 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (248 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (249 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (250 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (251 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (252 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (253 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (254 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (255 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (256 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (257 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (258 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (259 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (260 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (261 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (262 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (263 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (264 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (265 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (266 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (267 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (268 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (269 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (270 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (271 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (272 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (273 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (274 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (275 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (276 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (277 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (278 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (279 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (280 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (281 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (282 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (283 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (284 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (285 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (286 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (287 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (288 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (289 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (290 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (291 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (292 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (293 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (294 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (295 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (296 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (297 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (298 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (299 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (300 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (301 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (302 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (303 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (304 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (305 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (306 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (307 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (308 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (309 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (310 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (311 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (312 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (313 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (314 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (315 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (316 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (317 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (318 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (319 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (320 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (321 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (322 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (323 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (324 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (325 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (326 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (327 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (328 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (329 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (330 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (331 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (332 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (333 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (334 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (335 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (336 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (337 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (338 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (339 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (340 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (341 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (342 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (343 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (344 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (345 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (346 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (347 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (348 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (349 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (350 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (351 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (352 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (353 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (354 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (355 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (356 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (357 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (358 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (359 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (360 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (361 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (362 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (363 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (364 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (365 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (366 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (367 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (368 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (369 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (370 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (371 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (372 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (373 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (374 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (375 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (376 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (377 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (378 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (379 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (380 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (381 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (382 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (383 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (384 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (385 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (386 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (387 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (388 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (389 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (390 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (391 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (392 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (393 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (394 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (395 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (396 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (397 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (398 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (399 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (400 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (401 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (402 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (403 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (404 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (405 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (406 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (407 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (408 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (409 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (410 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (411 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (412 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (413 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (414 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (415 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (416 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (417 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (418 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (419 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (420 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (421 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (422 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (423 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (424 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (425 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (426 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (427 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (428 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (429 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (430 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (431 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (432 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (433 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (434 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (435 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (436 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (437 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (438 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (439 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (440 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (441 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (442 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (443 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (444 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (445 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (446 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (447 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (448 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (449 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (450 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (451 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (452 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (453 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (454 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (455 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (456 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (457 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (458 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (459 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (460 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (461 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (462 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (463 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (464 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (465 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (466 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (467 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (468 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (469 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (470 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (471 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (472 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (473 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (474 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (475 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (476 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (477 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (478 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (479 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (480 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (481 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (482 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (483 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (484 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (485 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (486 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (487 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (488 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (489 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (490 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (491 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (492 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (493 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (494 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (495 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (496 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (497 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (498 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (499 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (500 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (501 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (502 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (503 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (504 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (505 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (506 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (507 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (508 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (509 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + + SMMU_L3_BLOCK_ENTRY(0x00000000 + (510 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + SMMU_L3_BLOCK_ENTRY(0x00000000 + (511 * PAGE_4KB), (SMMU_UPPER_BLOCK_DEFAULT), + SMMU_LOWER_MEMORY_SHARED_OUTER), + +}; + +static void write_secure_reg_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct intel_fcs_priv *priv = client->priv; + + if (data->status == BIT(SVC_STATUS_OK)) + dev_dbg(client->dev, "write to secure reg successful"); + else if (data->status == BIT(SVC_STATUS_ERROR)) + dev_dbg(client->dev, "write to secure reg failed"); + else + dev_err(client->dev, "unhandled exception"); + + complete(&priv->completion); +} + +int smmu_program_reg(struct intel_fcs_priv *priv, uint32_t reg_add, uint32_t reg_value) +{ + struct stratix10_svc_chan *chan = priv->chan; + struct stratix10_svc_client_msg msg; + int ret; + + msg.command = COMMAND_WRITE_TO_SECURE_REG; + + msg.arg[0] = reg_add; + msg.arg[1] = reg_value; + + priv->client.receive_cb = write_secure_reg_callback; + + ret = stratix10_svc_send(chan, &msg); + + ret = wait_for_completion_timeout(&priv->completion, + FCS_SMMU_REQUEST_TIMEOUT); + + return ret; +} + +void intel_fcs_smmu_init(struct intel_fcs_priv *priv) +{ + uint64_t ttbr_paddr_l1; + uint64_t ttbr_paddr_l1_lower; + uint64_t ttbr_paddr_l1_upper; + uint64_t ttbr_paddr_l2; + uint32_t tcr_value = 0; + uint32_t mair_value = 0; + + tcr_value = (uint32_t) (TCR_RES1 | TCR_PS_1TiB | TCR_TG0_4KiB | TCR_IRGN0_WBA | + TCR_T0SZ_1TiB | TCR_SH0_OUTER | TCR_ORGN0_WBA); + + mair_value = (uint32_t)GEN_MAIR_INDEX(0, MAIR_ATTR_DEVICE_nGnRnE) | + GEN_MAIR_INDEX(1, MAIR_ATTR_DEVICE_nGnRE) | + GEN_MAIR_INDEX(2, MAIR_ATTR_MEMORY_IO_WBRWA); + + ttbr_paddr_l2 = virt_to_phys(l2_table); + l1_table[0] = SMMU_L1_TABLE_ENTRY(ttbr_paddr_l2, SMMU_UPPER_TABLE_DEFAULT); + + ttbr_paddr_l1 = virt_to_phys(l1_table); + ttbr_paddr_l1_lower = ttbr_paddr_l1 & TRANSLATION_TABLE_LOWER_MASK; + ttbr_paddr_l1_upper = ttbr_paddr_l1 >> TRANSLATION_TABLE_UPPER_BIT_SHIFT; + + //SSD0_REG + smmu_program_reg(priv, SSD0_REG_ADDR, SSD0_VAL); + + //SMR8 + smmu_program_reg(priv, SMR8_REG_ADDR, SMR8_VAL); + + //S2CR8 + smmu_program_reg(priv, S2CR8_REG_ADDR, S2CR8_VAL); + + //CBAR8 + smmu_program_reg(priv, CBAR8_REG_ADDR, CBAR8_VAL); + + //TCR_LPAE + smmu_program_reg(priv, TCR_LPAE_REG_ADDR, 0x00000000); + smmu_program_reg(priv, TCR_LPAE_REG_ADDR, tcr_value); + + //CB8_TTBR0_LOW + smmu_program_reg(priv, CB8_TTBR0_LOW_REG_ADDR, ttbr_paddr_l1_lower); + //CB8_TTBR0_HIGH + smmu_program_reg(priv, CB8_TTBR0_HIGH_REG_ADDR, ttbr_paddr_l1_upper); + + //CB8_PRRR_MIR0 + smmu_program_reg(priv, CB8_PRRR_MIR0_REG_ADDR, mair_value); + + //CB8_PRRR_MIR1 + smmu_program_reg(priv, CB8_PRRR_MIR1_REG_ADDR, 0x00000000); + + //TCR2 + smmu_program_reg(priv, TCR2_REG_ADDR, TCR2_VAL); + +} + +void context_bank_enable(struct intel_fcs_priv *priv) +{ + //Disable translation context bank CB8_SCTLR + smmu_program_reg(priv, CB8_SCTRLR_REG_ADDR, TRANSLATION_CONTEXT_BANK_ENABLE); +} + +void context_bank_disable(struct intel_fcs_priv *priv) +{ + //Disable translation context bank CB8_SCTLR + smmu_program_reg(priv, CB8_SCTRLR_REG_ADDR, TRANSLATION_CONTEXT_BANK_DISABLE); +} + +void fill_l3_table(uint64_t phys, int l2_idx, int l3_idx) +{ + uint64_t *l3_table_ptr = 0; + + if (l3_idx == 0) + l2_table[l2_idx] = SMMU_L2_TABLE_ENTRY(virt_to_phys(l3_tables[l2_idx]), SMMU_UPPER_TABLE_DEFAULT); + + l3_table_ptr = l3_tables[l2_idx]; + l3_table_ptr[l3_idx] = SMMU_L3_BLOCK_ENTRY(phys, (SMMU_UPPER_BLOCK_DEFAULT), SMMU_LOWER_MEMORY_SHARED_OUTER); +} + +void invalidate_smmu_tlb_entries(struct intel_fcs_priv *priv) +{ + smmu_program_reg(priv, 0xFA000060, INVALIDATE_TBU); + smmu_program_reg(priv, 0xFA000070, INVALIDATE_TBU); +} diff --git a/drivers/crypto/intel_fcs_smmu.h b/drivers/crypto/intel_fcs_smmu.h new file mode 100644 index 0000000000000..3b057f5e65201 --- /dev/null +++ b/drivers/crypto/intel_fcs_smmu.h @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022, Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef INTEL_FCS_SMMU_H +#define INTEL_FCS_SMMU_H + +extern uint64_t smmu_sdm_l3_def_table[512]; +extern uint64_t smmu_sdm_el3_l1_table[512]; +extern uint64_t smmu_sdm_el3_l2_table[512]; +extern uint64_t *l2_table; +extern uint64_t *l1_table; +extern uint64_t *l3_tables[512]; + +struct intel_fcs_priv { + struct stratix10_svc_chan *chan; + struct stratix10_svc_client client; + struct completion completion; + struct mutex lock; + struct miscdevice miscdev; + unsigned int status; + void *kbuf; + unsigned int size; + unsigned int cid_low; + unsigned int cid_high; + unsigned int sid; + struct hwrng rng; + const struct socfpga_fcs_data *p_data; +}; + +int smmu_program_reg(struct intel_fcs_priv *priv, uint32_t reg_add, uint32_t reg_value); +int smmu_read_reg(struct intel_fcs_priv *priv, uint32_t reg_add); +void invalidate_smmu_tlb_entries(struct intel_fcs_priv *priv); +void intel_fcs_smmu_init(struct intel_fcs_priv *priv); +void context_bank_disable(struct intel_fcs_priv *priv); +void context_bank_enable(struct intel_fcs_priv *priv); +void fill_l3_table(uint64_t phys, int l2_idx, int l3_idx); + +#endif diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index fffafa86d964e..5721904d9da76 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -271,7 +271,12 @@ static void axi_dma_hw_init(struct axi_dma_chip *chip) axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); axi_chan_disable(&chip->dw->chan[i]); } - ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64)); + + if (device_property_read_bool(chip->dev, "snps,dma-40-bit-mask")) + ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(40)); + else + ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64)); + if (ret) dev_warn(chip->dev, "Unable to set coherent mask\n"); } @@ -435,8 +440,6 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, return; } - axi_dma_enable(chan->chip); - config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 82a9fe88ad54c..ee40b26dc61f1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -473,6 +473,8 @@ struct pl330_dmac { /* Size of MicroCode buffers for each channel. */ unsigned mcbufsz; + /* True if microcode must reside in cached memory. */ + bool microcode_cached; /* ioremap'ed address of PL330 registers. */ void __iomem *base; /* Populated by the PL330 core driver during pl330_add */ @@ -1911,19 +1913,45 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330) return 0; } +static void *alloc_pl330_microcode_mem(struct pl330_dmac *pl330) +{ + int chans = pl330->pcfg.num_chan; + + if (pl330->microcode_cached) { + pl330->mcode_cpu = kzalloc(chans * pl330->mcbufsz, + GFP_KERNEL); + pl330->mcode_bus = virt_to_phys(pl330->mcode_cpu); + } else + pl330->mcode_cpu = + dma_alloc_coherent(pl330->ddma.dev, + chans * pl330->mcbufsz, + &pl330->mcode_bus, GFP_KERNEL); + + return pl330->mcode_cpu; +} + +static void free_pl330_microcode_mem(struct pl330_dmac *pl330) +{ + int chans = pl330->pcfg.num_chan; + + if (pl330->microcode_cached) + kfree(pl330->mcode_cpu); + else + dma_free_attrs(pl330->ddma.dev, + chans * pl330->mcbufsz, + pl330->mcode_cpu, pl330->mcode_bus, + DMA_ATTR_PRIVILEGED); +} + static int dmac_alloc_resources(struct pl330_dmac *pl330) { - int chans = pl330->pcfg.num_chan; int ret; /* * Alloc MicroCode buffer for 'chans' Channel threads. * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) */ - pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev, - chans * pl330->mcbufsz, - &pl330->mcode_bus, GFP_KERNEL, - DMA_ATTR_PRIVILEGED); + pl330->mcode_cpu = alloc_pl330_microcode_mem(pl330); if (!pl330->mcode_cpu) { dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", __func__, __LINE__); @@ -1934,10 +1962,7 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) if (ret) { dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", __func__, __LINE__); - dma_free_attrs(pl330->ddma.dev, - chans * pl330->mcbufsz, - pl330->mcode_cpu, pl330->mcode_bus, - DMA_ATTR_PRIVILEGED); + free_pl330_microcode_mem(pl330); return ret; } @@ -2016,9 +2041,7 @@ static void pl330_del(struct pl330_dmac *pl330) /* Free DMAC resources */ dmac_free_threads(pl330); - dma_free_attrs(pl330->ddma.dev, - pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, - pl330->mcode_bus, DMA_ATTR_PRIVILEGED); + free_pl330_microcode_mem(pl330); } /* forward declaration */ @@ -3028,7 +3051,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pl330->mcbufsz = 0; - /* get quirk */ + if (adev->dev.of_node) + pl330->microcode_cached = + of_property_read_bool(adev->dev.of_node, + "microcode-cached"); + + /* get quirk */ for (i = 0; i < ARRAY_SIZE(of_quirks); i++) if (of_property_read_bool(np, of_quirks[i].quirk)) pl330->quirks |= of_quirks[i].id; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 81af6c344d6ba..631c31e080614 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -398,6 +398,15 @@ config EDAC_ALTERA Altera SOCs. This is the global enable for the various Altera peripherals. +config EDAC_ALTERA_ARM64_WARM_RESET + bool "Altera ARM64 Peripheral Warm Reset" + depends on EDAC_ALTERA=y && ARM64 + help + Support for Warm Reset on peripheral FIFO double bit errors + on SoCFPGA ARM64 platforms. Otherwise a peripheral FIFO DBE + will cause a cold reset. SDRAM and OCRAM DBEs always cause + a cold reset. + config EDAC_ALTERA_SDRAM bool "Altera SDRAM ECC" depends on EDAC_ALTERA=y @@ -407,6 +416,16 @@ config EDAC_ALTERA_SDRAM preloader must initialize the SDRAM before loading the kernel. +config EDAC_ALTERA_IO96B + bool "Altera I096B ECC" + depends on EDAC_ALTERA=y && ARM64 + help + Support for SERR and DERR detection and correction on the + IO96B memory controller interface for Altera SoCFPGA. + + I096B memory controller provides dedicated mailbox registers + for error injection and error information. + config EDAC_ALTERA_L2C bool "Altera L2 Cache ECC" depends on EDAC_ALTERA=y && CACHE_L2X0 @@ -457,6 +476,17 @@ config EDAC_ALTERA_QSPI Support for error detection and correction on the Altera QSPI FIFO Memory for Altera SoCs. +config EDAC_ALTERA_SDM_QSPI + bool "Altera SDM QSPI FIFO ECC" + depends on EDAC_ALTERA=y && ARM64 && SPI_CADENCE_QUADSPI + help + Support for error detection and correction on the + Secure Device Manager (SDM) QSPI FIFO Memory that HPS + access on Agilex5 onwards platform. + + SDM QSPI ECC is always in secure mode, so access to register + is thru ATF using ARM Secure Monitor Call(SMC). + config EDAC_ALTERA_SDMMC bool "Altera SDMMC FIFO ECC" depends on EDAC_ALTERA=y && MMC_DW @@ -464,6 +494,13 @@ config EDAC_ALTERA_SDMMC Support for error detection and correction on the Altera SDMMC FIFO Memory for Altera SoCs. +config EDAC_ALTERA_CRAM_SEU + bool "Altera CRAM SEU" + depends on EDAC_ALTERA=y && 64BIT + help + Support for Single Event Upset(SEU) for FPGA Configuration RAM + on Altera SoCFPGA. + config EDAC_SIFIVE bool "Sifive platform EDAC driver" depends on EDAC=y && SIFIVE_CCACHE diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index fe89f5c4837f4..b37806b7c0e7f 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) if (status & priv->ecc_stat_ce_mask) { regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset, &err_addr); - if (priv->ecc_uecnt_offset) + if (priv->ecc_cecnt_offset) regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset, &err_count); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, @@ -349,7 +349,8 @@ static int altr_sdram_probe(struct platform_device *pdev) } /* Arria10 has a 2nd IRQ */ - irq2 = platform_get_irq(pdev, 1); + if (of_machine_is_compatible("altr,socfpga-arria10")) + irq2 = platform_get_irq(pdev, 1); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = 1; @@ -657,6 +658,45 @@ static const struct file_operations altr_edac_a10_device_inject_fops __maybe_unu .llseek = generic_file_llseek, }; +#if IS_ENABLED(CONFIG_EDAC_ALTERA_CRAM_SEU) +static ssize_t __maybe_unused +altr_edac_seu_trig(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos); + +static const struct file_operations +altr_edac_cram_inject_fops __maybe_unused = { + .open = simple_open, + .write = altr_edac_seu_trig, + .llseek = generic_file_llseek, +}; +#endif + +#ifdef CONFIG_EDAC_ALTERA_IO96B +static ssize_t __maybe_unused +altr_edac_io96b_device_trig(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos); + +static const struct file_operations +altr_edac_io96b_inject_fops __maybe_unused = { + .open = simple_open, + .write = altr_edac_io96b_device_trig, + .llseek = generic_file_llseek, +}; +#endif + +#if IS_ENABLED(CONFIG_EDAC_ALTERA_SDM_QSPI) +static ssize_t __maybe_unused +altr_edac_sdm_qspi_device_trig(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos); + +static const struct file_operations +altr_edac_sdm_qspi_device_inject_fops __maybe_unused = { + .open = simple_open, + .write = altr_edac_sdm_qspi_device_trig, + .llseek = generic_file_llseek, +}; +#endif + static ssize_t __maybe_unused altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); @@ -1005,9 +1045,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, } } - /* Interrupt mode set to every SBERR */ - regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, - ALTR_A10_ECC_INTMODE); /* Enable ECC */ ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); @@ -1125,6 +1162,33 @@ static const struct edac_device_prv_data s10_sdramecc_data = { }; #endif /* CONFIG_EDAC_ALTERA_SDRAM */ +/************************IO96B EDAC *************************************/ + +#ifdef CONFIG_EDAC_ALTERA_IO96B +static DEFINE_MUTEX(io96b_mb_mutex); + +static int altr_agilex5_io96b_ecc_init(struct altr_edac_device_dev *device) +{ + u32 ecc_status; + + ecc_status = readl(device->base + IO96B_ECC_ENABLE_INFO_OFST); + ecc_status &= GENMASK(1, 0); + + if (!ecc_status) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s: No ECC present or ECC disabled.\n", + device->edac_dev_name); + return -ENODEV; + } + return 0; +} + +static const struct edac_device_prv_data agilex5_io96b_data = { + .setup = altr_agilex5_io96b_ecc_init, + .inject_fops = &altr_edac_io96b_inject_fops, +}; +#endif /* CONFIG_EDAC_ALTERA_IO96B */ + /*********************** OCRAM EDAC Device Functions *********************/ #ifdef CONFIG_EDAC_ALTERA_OCRAM @@ -1190,8 +1254,14 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device) /* Verify OCRAM has been initialized */ if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA, - (base + ALTR_A10_ECC_INITSTAT_OFST))) - return -ENODEV; + (base + ALTR_A10_ECC_INITSTAT_OFST))) { + if (!ecc_test_bits(ALTR_A10_ECC_EN, + (base + ALTR_A10_ECC_CTRL_OFST))) + ecc_set_bits(ALTR_A10_ECC_EN, + (base + ALTR_A10_ECC_CTRL_OFST)); + else + return -ENODEV; + } /* Enable IRQ on Single Bit Error */ writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST)); @@ -1454,8 +1524,76 @@ static const struct edac_device_prv_data a10_usbecc_data = { .inject_fops = &altr_edac_a10_device_inject2_fops, }; +static int __init socfpga_init_usb3_ecc(struct altr_edac_device_dev *device) +{ + writel(ALTR_A10_ECC_EN, device->base + ALTR_A10_ECC_CTRL_OFST); + writel(ALTR_A10_ECC_SERRINTEN, device->base + ALTR_A10_ECC_ERRINTENS_OFST); + + return 0; +} + +static const struct edac_device_prv_data agilex5_usb3ecc_data = { + .setup = socfpga_init_usb3_ecc, + .ce_clear_mask = ALTR_A10_ECC_SERRPENA, + .ue_clear_mask = ALTR_A10_ECC_DERRPENA, + .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL, + .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST, + .ce_set_mask = ALTR_A10_ECC_TSERRA, + .ue_set_mask = ALTR_A10_ECC_TDERRA, + .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST, + .ecc_irq_handler = altr_edac_a10_ecc_irq, + .inject_fops = &altr_edac_a10_device_inject2_fops, +}; + #endif /* CONFIG_EDAC_ALTERA_USB */ +#if IS_ENABLED(CONFIG_EDAC_ALTERA_CRAM_SEU) +static irqreturn_t seu_irq_handler(int irq, void *dev_id) +{ + struct altr_edac_device_dev *dci = dev_id; + struct arm_smccc_res result; + + arm_smccc_smc(INTEL_SIP_SMC_SEU_ERR_STATUS, 0, + 0, 0, 0, 0, 0, 0, &result); + + if ((u32)result.a0) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "SEU %s: Count=0x%X, SecAddr=0x%X, ErrData=0x%X\n", + ((u32)result.a2 & BIT(28)) == 0 ? "UE" : "CE", + (u32)result.a0, (u32)result.a1, (u32)result.a2); + + if ((u32)result.a2 & BIT(28)) + edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name); + else + edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name); + } + return IRQ_HANDLED; +} + +static ssize_t __maybe_unused +altr_edac_seu_trig(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + u8 trig_type; + struct arm_smccc_res result; + + if (!user_buf || get_user(trig_type, user_buf)) + return -EFAULT; + + if (trig_type == ALTR_UE_TRIGGER_CHAR) + arm_smccc_smc(INTEL_SIP_SMC_SAFE_INJECT_SEU_ERR, + ((uint64_t)SEU_SAFE_INJECT_DB_UE_MSB << 32) | + SEU_SAFE_INJECT_DB_UE_LSB, + 2, 0, 0, 0, 0, 0, &result); + else + arm_smccc_smc(INTEL_SIP_SMC_SAFE_INJECT_SEU_ERR, + SEU_SAFE_INJECT_SB_CE, 2, 0, 0, 0, + 0, 0, &result); + + return count; +} +#endif + /********************** QSPI Device Functions **********************/ #ifdef CONFIG_EDAC_ALTERA_QSPI @@ -1486,6 +1624,104 @@ static const struct edac_device_prv_data a10_qspiecc_data = { #endif /* CONFIG_EDAC_ALTERA_QSPI */ +#if IS_ENABLED(CONFIG_EDAC_ALTERA_SDM_QSPI) + +static ssize_t __maybe_unused +altr_edac_sdm_qspi_device_trig(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct edac_device_ctl_info *edac_dci = file->private_data; + struct altr_edac_device_dev *drvdata = edac_dci->pvt_info; + unsigned long flags; + u8 trig_type; + struct arm_smccc_res result; + + if (!user_buf || get_user(trig_type, user_buf)) + return -EFAULT; + + local_irq_save(flags); + if (trig_type == ALTR_UE_TRIGGER_CHAR) + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + drvdata->sdm_qspi_addr + ALTR_A10_ECC_INTTEST_OFST, + ALTR_A10_ECC_TDERRA, 0, 0, 0, 0, 0, &result); + else + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + drvdata->sdm_qspi_addr + ALTR_A10_ECC_INTTEST_OFST, + ALTR_A10_ECC_TSERRA, 0, 0, 0, 0, 0, &result); + + /* Ensure the interrupt test bits are set */ + wmb(); + local_irq_restore(flags); + + return count; +} + +static int __init socfpga_init_sdm_qspi_ecc(struct altr_edac_device_dev *device) +{ + struct arm_smccc_res result; + u32 read_reg; + int limit = ALTR_A10_ECC_INIT_WATCHDOG_10US; + + /* Disable ECC */ + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + device->sdm_qspi_addr + ALTR_A10_ECC_ERRINTENR_OFST, + ALTR_A10_ECC_SERRINTEN, 0, 0, 0, 0, 0, &result); + + arm_smccc_smc(INTEL_SIP_SMC_REG_READ, + device->sdm_qspi_addr + ALTR_A10_ECC_CTRL_OFST, + 0, 0, 0, 0, 0, 0, &result); + read_reg = (unsigned int)result.a1 & 0x00; + + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + device->sdm_qspi_addr + ALTR_A10_ECC_CTRL_OFST, + read_reg, 0, 0, 0, 0, 0, &result); + + /* Ensure all writes complete */ + wmb(); + arm_smccc_smc(INTEL_SIP_SMC_REG_READ, + device->sdm_qspi_addr + ALTR_A10_ECC_CTRL_OFST, + 0, 0, 0, 0, 0, 0, &result); + read_reg = (unsigned int)result.a1 | ALTR_A10_ECC_INITA; + + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + device->sdm_qspi_addr + ALTR_A10_ECC_CTRL_OFST, + read_reg, 0, 0, 0, 0, 0, &result); + + while (limit--) { + arm_smccc_smc(INTEL_SIP_SMC_REG_READ, + device->sdm_qspi_addr + ALTR_A10_ECC_INITSTAT_OFST, + 0, 0, 0, 0, 0, 0, &result); + + if ((unsigned int)result.a1 & ALTR_A10_ECC_INITCOMPLETEA) + break; + udelay(1); + } + if (limit <= 0) + return -EBUSY; + + /* Enable ECC */ + arm_smccc_smc(INTEL_SIP_SMC_REG_READ, + device->sdm_qspi_addr + ALTR_A10_ECC_CTRL_OFST, + 0, 0, 0, 0, 0, 0, &result); + read_reg = (unsigned int)result.a1 | ALTR_A10_ECC_SERRINTEN; + + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + device->sdm_qspi_addr + ALTR_A10_ECC_CTRL_OFST, + read_reg, 0, 0, 0, 0, 0, &result); + + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + device->sdm_qspi_addr + ALTR_A10_ECC_ERRINTEN_OFST, + ALTR_A10_ECC_SERRINTEN, 0, 0, 0, 0, 0, &result); + return 0; +} + +static const struct edac_device_prv_data a10_sdmqspiecc_data = { + .setup = socfpga_init_sdm_qspi_ecc, + .inject_fops = &altr_edac_sdm_qspi_device_inject_fops, +}; + +#endif /* CONFIG_EDAC_ALTERA_SDM_QSPI */ + /********************* SDMMC Device Functions **********************/ #ifdef CONFIG_EDAC_ALTERA_SDMMC @@ -1712,20 +1948,76 @@ static const struct of_device_id altr_edac_a10_device_of_match[] = { #endif #ifdef CONFIG_EDAC_ALTERA_USB { .compatible = "altr,socfpga-usb-ecc", .data = &a10_usbecc_data }, + { .compatible = "altr,socfpga-usb3-ecc", .data = &agilex5_usb3ecc_data }, #endif #ifdef CONFIG_EDAC_ALTERA_QSPI { .compatible = "altr,socfpga-qspi-ecc", .data = &a10_qspiecc_data }, #endif +#if IS_ENABLED(CONFIG_EDAC_ALTERA_SDM_QSPI) + { .compatible = "altr,socfpga-sdm-qspi-ecc", + .data = &a10_sdmqspiecc_data }, +#endif #ifdef CONFIG_EDAC_ALTERA_SDMMC { .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data }, #endif #ifdef CONFIG_EDAC_ALTERA_SDRAM { .compatible = "altr,sdram-edac-s10", .data = &s10_sdramecc_data }, +#endif +#ifdef CONFIG_EDAC_ALTERA_IO96B + { .compatible = "altr,socfpga-io96b0-ecc", .data = &agilex5_io96b_data }, + { .compatible = "altr,socfpga-io96b1-ecc", .data = &agilex5_io96b_data }, #endif {}, }; MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match); +/* + * The IO96B EDAC Device Functions differ from the rest of the + * ECC peripherals. + */ + +#ifdef CONFIG_EDAC_ALTERA_IO96B +static ssize_t __maybe_unused +altr_edac_io96b_device_trig(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct edac_device_ctl_info *edac_dci = file->private_data; + struct altr_edac_device_dev *drvdata = edac_dci->pvt_info; + u8 trig_type; + u32 val; + struct arm_smccc_res result; + + if (!user_buf || get_user(trig_type, user_buf)) + return -EFAULT; + + mutex_lock(&io96b_mb_mutex); + if (readl(drvdata->base + IO96B_CMD_RESP_STATUS_OFST)) + writel(0, drvdata->base + IO96B_CMD_RESP_STATUS_OFST); + + arm_smccc_smc(INTEL_SIP_SMC_IO96B_INJECT_ECC_ERR, + (trig_type == ALTR_UE_TRIGGER_CHAR) ? + IO96B_DBE_SYNDROME : IO96B_SBE_SYNDROME, + IO96B_CMD_TRIG_ECC_ENJECT_OP, 0, 0, 0, 0, 0, &result); + + writel(IO06B_ECC_SCRUB_INTERVAL, drvdata->base + IO96B_CMD_PARAM_0_OFST); + writel(IO06B_ECC_SCRUB_LEN, drvdata->base + IO96B_CMD_PARAM_1_OFST); + writel(IO06B_ECC_SCRUB_FULL_MEM, drvdata->base + IO96B_CMD_PARAM_2_OFST); + writel(IO96B_CMD_ECC_SCRUB_MODE_0, drvdata->base + IO96B_CMD_REQ_OFST); + + if (readl_relaxed_poll_timeout(drvdata->base + IO96B_ECC_SCRUB_STAT0_OFST, + val, !(val & IO96B_ECC_SCRUB_COMPLETE), + IO96B_ECC_SCRUB_POLL_US, + IO96B_ECC_SCRUB_TIMEOUT)) + edac_printk(KERN_ALERT, EDAC_DEVICE, + "IO96B ECC Scrubing timeout - Try again.\n"); + + writel(0, drvdata->base + IO96B_CMD_RESP_STATUS_OFST); + mutex_unlock(&io96b_mb_mutex); + + return count; +} +#endif + /* * The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5 * because 2 IRQs are shared among the all ECC peripherals. The ECC @@ -1749,9 +2041,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); else - writel(priv->ce_set_mask, set_addr); + writew(priv->ce_set_mask, set_addr); /* Ensure the interrupt test bits are set */ wmb(); @@ -1781,7 +2073,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) { - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); } else { /* Setup read/write of 4 bytes */ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST); @@ -1823,6 +2115,89 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf, return count; } +static irqreturn_t io96b_irq_handler(int irq, void *dev_id) +{ + struct altr_edac_device_dev *dci = dev_id; + u32 err_word0; + u32 err_word1; + u32 cnt = 0; + u32 ecc_error_status; + u16 rbuf_overflow; + u16 err_count = 0; + bool dbe = false; + enum io96b_error_type error_type; + u32 err_queue = IO96B_ECC_ERR_ENTRIES_OFST; + + ecc_error_status = readl(dci->base + IO96B_ECC_ERR_REG_OFST); + rbuf_overflow = ecc_error_status & GENMASK(31, 16); + err_count = ecc_error_status & GENMASK(15, 0); + + if (!rbuf_overflow) { + while (cnt < err_count) { + err_word0 = readl(dci->base + err_queue); + err_word1 = readl(dci->base + (err_queue + 4)); + + error_type = (err_word0 & GENMASK(9, 6)) >> 6; + if (error_type == ECC_SINGLE_DBE || error_type == ECC_MULTI_DBE || + error_type == ECC_WRITE_LINK_DBE || + error_type == ECC_READ_LINK_DBE || + error_type == ECC_READ_LINK_RMW_DBE) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s: DBE: word0:0x%08X, word1:0x%08X\n", + dci->edac_dev_name, err_word0, err_word1); + dbe = true; + } else { + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s: SBE: word0:0x%08X, word1:0x%08X\n", + dci->edac_dev_name, err_word0, err_word1); + edac_device_handle_ce(dci->edac_dev, 0, 0, + dci->edac_dev_name); + } + cnt++; + err_queue += 8; + } + if (dbe) + panic("\nEDAC:IO96B[Uncorrectable errors]\n"); + } else { + rbuf_overflow = (err_word0 & GENMASK(9, 6)) >> 6; + if (error_type == ECC_SINGLE_DBE || error_type == ECC_MULTI_DBE || + error_type == ECC_WRITE_LINK_DBE || + error_type == ECC_READ_LINK_DBE || + error_type == ECC_READ_LINK_RMW_DBE) { + panic("\nEDAC: UE: %s: word0:0x%08X, word1:0x%08X\n", + dci->edac_dev_name, err_word0, err_word1); + } else { + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s: Buffer Overflow SBE:0x%08X\n", + dci->edac_dev_name, rbuf_overflow); + edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name); + } + } + + //Clear Queue + writel(IO96B_ECC_ERROR_QUEUE_CLEAR, dci->base + IO96B_CMD_REQ_OFST); + return IRQ_HANDLED; +} + +static irqreturn_t sdm_qspi_irq_handler(int irq, void *dev_id) +{ + struct altr_edac_device_dev *dci = dev_id; + struct arm_smccc_res result; + + if (irq == dci->sdm_qspi_sb_irq) { + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + dci->sdm_qspi_addr + ALTR_A10_ECC_INTSTAT_OFST, + ALTR_A10_ECC_SERRPENA, 0, 0, 0, 0, 0, &result); + edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name); + } else { + arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, + dci->sdm_qspi_addr + ALTR_A10_ECC_INTSTAT_OFST, + ALTR_A10_ECC_DERRPENA, 0, 0, 0, 0, 0, &result); + edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name); + } + return IRQ_HANDLED; +} + static void altr_edac_a10_irq_handler(struct irq_desc *desc) { int dberr, bit, sm_offset, irq_status; @@ -1880,6 +2255,84 @@ static int get_s10_sdram_edac_resource(struct device_node *np, return ret; } +#if IS_ENABLED(CONFIG_EDAC_ALTERA_CRAM_SEU) +static int altr_edac_device_add(struct altr_arria10_edac *edac, + struct platform_device *pdev, char *ecc_name) +{ + struct edac_device_ctl_info *dci; + struct altr_edac_device_dev *altdev; + int edac_idx; + int seu_irq; + int rc = 0; + + seu_irq = platform_get_irq_byname(pdev, "sdm_seu"); + if (seu_irq < 0) { + dev_warn(&pdev->dev, "no %s IRQ defined\n", "sdm_seu"); + return 0; + } + + edac_idx = edac_device_alloc_index(); + dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, + 1, ecc_name, 1, 0, edac_idx); + if (!dci) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s: Unable to allocate EDAC device\n", ecc_name); + rc = -ENOMEM; + goto err_release_group; + } + + altdev = dci->pvt_info; + dci->dev = edac->dev; + altdev->edac_dev_name = ecc_name; + altdev->edac_idx = edac_idx; + altdev->edac = edac; + altdev->edac_dev = dci; + altdev->ddev = *edac->dev; + dci->dev = &altdev->ddev; + dci->ctl_name = "Altera ECC Manager"; + dci->mod_name = ecc_name; + dci->dev_name = ecc_name; + + altdev->seu_irq = seu_irq; + rc = devm_request_threaded_irq(edac->dev, altdev->seu_irq, NULL, + seu_irq_handler, IRQF_ONESHOT, + ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No SEU IRQ resource\n"); + goto err_release_group1; + } + + rc = edac_device_add_device(dci); + if (rc) { + dev_err(edac->dev, "edac_device_add_device failed\n"); + rc = -ENOMEM; + goto err_release_group1; + } + + if (IS_ENABLED(CONFIG_EDAC_DEBUG)) { + altdev->debugfs_dir = edac_debugfs_create_dir(ecc_name); + if (!altdev->debugfs_dir) { + rc = -EBUSY; + goto err_release_group1; + } + + if (!edac_debugfs_create_file("altr_trigger", 0200, + altdev->debugfs_dir, dci, + &altr_edac_cram_inject_fops)) + debugfs_remove_recursive(altdev->debugfs_dir); + } + return 0; + +err_release_group1: + edac_device_free_ctl_info(dci); +err_release_group: + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s:Error setting up EDAC device: %d\n", ecc_name, rc); + + return rc; +} +#endif + static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, struct device_node *np) { @@ -1887,8 +2340,12 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, struct altr_edac_device_dev *altdev; char *ecc_name = (char *)np->name; struct resource res; + struct device_node *root_node; int edac_idx; int rc = 0; + bool sdm_qspi_ecc = false; + bool io96b0_ecc = false; + bool io96b1_ecc = false; const struct edac_device_prv_data *prv; /* Get matching node and check for valid result */ const struct of_device_id *pdev_id = @@ -1906,11 +2363,17 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL)) return -ENOMEM; - - if (of_device_is_compatible(np, "altr,sdram-edac-s10")) + if (of_device_is_compatible(np, "altr,socfpga-io96b0-ecc")) { + io96b0_ecc = true; + } else if (of_device_is_compatible(np, "altr,socfpga-io96b1-ecc")) { + io96b1_ecc = true; + } else if (of_device_is_compatible(np, "altr,socfpga-sdm-qspi-ecc")) { + sdm_qspi_ecc = true; + } else if (of_device_is_compatible(np, "altr,sdram-edac-s10")) { rc = get_s10_sdram_edac_resource(np, &res); - else + } else { rc = of_address_to_resource(np, 0, &res); + } if (rc < 0) { edac_printk(KERN_ERR, EDAC_DEVICE, @@ -1942,10 +2405,29 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, dci->mod_name = ecc_name; dci->dev_name = ecc_name; - altdev->base = devm_ioremap_resource(edac->dev, &res); - if (IS_ERR(altdev->base)) { - rc = PTR_ERR(altdev->base); - goto err_release_group1; + if (io96b0_ecc || io96b1_ecc) { + rc = of_address_to_resource(np, 0, &res); + if (rc) + goto err_release_group1; + + altdev->base = ioremap(res.start, resource_size(&res)); + if (IS_ERR(altdev->base)) { + rc = PTR_ERR(altdev->base); + goto err_release_group1; + } + } else if (sdm_qspi_ecc) { + altdev->sdm_qspi_addr = + (u32)of_translate_address(np, + of_get_address(np, + 0, + NULL, + NULL)); + } else { + altdev->base = devm_ioremap_resource(edac->dev, &res); + if (IS_ERR(altdev->base)) { + rc = PTR_ERR(altdev->base); + goto err_release_group1; + } } /* Check specific dependencies for the module */ @@ -1955,43 +2437,99 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, goto err_release_group1; } - altdev->sb_irq = irq_of_parse_and_map(np, 0); - if (!altdev->sb_irq) { - edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating SBIRQ\n"); - rc = -ENODEV; - goto err_release_group1; - } - rc = devm_request_irq(edac->dev, altdev->sb_irq, prv->ecc_irq_handler, - IRQF_ONESHOT | IRQF_TRIGGER_HIGH, - ecc_name, altdev); - if (rc) { - edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n"); - goto err_release_group1; - } + if (sdm_qspi_ecc) { + altdev->sdm_qspi_sb_irq = altdev->edac->sdm_qspi_sb_irq; + rc = devm_request_threaded_irq(edac->dev, altdev->sdm_qspi_sb_irq, NULL, + sdm_qspi_irq_handler, IRQF_ONESHOT, + ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No SDM QSPI SBE IRQ resource\n"); + goto err_release_group1; + } + altdev->sdm_qspi_db_irq = altdev->edac->sdm_qspi_db_irq; + rc = devm_request_threaded_irq(edac->dev, altdev->sdm_qspi_db_irq, NULL, + sdm_qspi_irq_handler, IRQF_ONESHOT, + ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No SDM QSPI DBE IRQ resource\n"); + goto err_release_group1; + } + } else if (io96b0_ecc) { + altdev->io96b0_irq = altdev->edac->io96b0_irq; + rc = devm_request_threaded_irq(edac->dev, altdev->io96b0_irq, NULL, + io96b_irq_handler, IRQF_ONESHOT, + ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No IO96B0 IRQ resource\n"); + goto err_release_group1; + } + } else if (io96b1_ecc) { + altdev->io96b1_irq = altdev->edac->io96b1_irq; + rc = devm_request_threaded_irq(edac->dev, altdev->io96b1_irq, NULL, + io96b_irq_handler, IRQF_ONESHOT, + ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No IO96B1 IRQ resource\n"); + goto err_release_group1; + } + } else { + altdev->sb_irq = irq_of_parse_and_map(np, 0); + if (!altdev->sb_irq) { + edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating SBIRQ\n"); + rc = -ENODEV; + goto err_release_group1; + } + rc = devm_request_irq(edac->dev, altdev->sb_irq, prv->ecc_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n"); + goto err_release_group1; + } #ifdef CONFIG_64BIT - /* Use IRQ to determine SError origin instead of assigning IRQ */ - rc = of_property_read_u32_index(np, "interrupts", 0, &altdev->db_irq); - if (rc) { - edac_printk(KERN_ERR, EDAC_DEVICE, - "Unable to parse DB IRQ index\n"); - goto err_release_group1; - } + root_node = of_root; + if (of_device_is_compatible(root_node, "intel,socfpga-agilex5")) { + altdev->db_irq = irq_of_parse_and_map(np, 1); + if (!altdev->db_irq) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "Error allocating DBIRQ\n"); + rc = -ENODEV; + goto err_release_group1; + } + rc = devm_request_irq(edac->dev, altdev->db_irq, + prv->ecc_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "No DBERR IRQ resource\n"); + goto err_release_group1; + } + } else { + /* Use IRQ to determine SError origin instead of assigning IRQ */ + rc = of_property_read_u32_index(np, "interrupts", 0, + &altdev->db_irq); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "Unable to parse DB IRQ index\n"); + goto err_release_group1; + } + } #else - altdev->db_irq = irq_of_parse_and_map(np, 1); - if (!altdev->db_irq) { - edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n"); - rc = -ENODEV; - goto err_release_group1; - } - rc = devm_request_irq(edac->dev, altdev->db_irq, prv->ecc_irq_handler, - IRQF_ONESHOT | IRQF_TRIGGER_HIGH, - ecc_name, altdev); - if (rc) { - edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n"); - goto err_release_group1; - } + altdev->db_irq = irq_of_parse_and_map(np, 1); + if (!altdev->db_irq) { + edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n"); + rc = -ENODEV; + goto err_release_group1; + } + rc = devm_request_irq(edac->dev, altdev->db_irq, prv->ecc_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n"); + goto err_release_group1; + } #endif + } rc = edac_device_add_device(dci); if (rc) { @@ -2058,6 +2596,18 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = { /* panic routine issues reboot on non-zero panic_timeout */ extern int panic_timeout; +#ifdef CONFIG_EDAC_ALTERA_ARM64_WARM_RESET +/* EL3 SMC call to setup CPUs for warm reset */ +void panic_smp_self_stop(void) +{ + struct arm_smccc_res result; + + arm_smccc_smc(INTEL_SIP_SMC_ECC_DBE, S10_WARM_RESET_WFI_FLAG, + S10_WARM_RESET_WFI_FLAG, 0, 0, 0, 0, 0, &result); + cpu_park_loop(); +} +#endif + /* * The double bit error is handled through SError which is fatal. This is * called as a panic notifier to printout ECC error info as part of the panic. @@ -2089,8 +2639,9 @@ static int s10_edac_dberr_handler(struct notifier_block *this, regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST, err_addr); edac_printk(KERN_ERR, EDAC_DEVICE, - "EDAC: [Fatal DBE on %s @ 0x%08X]\n", - ed->edac_dev_name, err_addr); + "EDAC: [Fatal DBE on %s [CPU=%d] @ 0x%08X]\n", + ed->edac_dev_name, raw_smp_processor_id(), + err_addr); break; } /* Notify the System through SMC. Reboot delay = 1 second */ @@ -2108,6 +2659,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev) { struct altr_arria10_edac *edac; struct device_node *child; + struct device_node *root_node; edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL); if (!edac) @@ -2127,6 +2679,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev) return PTR_ERR(edac->ecc_mgr_map); } + /* Set irq mask for DDR SBE to avoid any pending irq before registration */ + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, + (BIT(16) | BIT(17))); + edac->irq_chip.name = pdev->dev.of_node->name; edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; @@ -2145,9 +2701,21 @@ static int altr_edac_a10_probe(struct platform_device *pdev) altr_edac_a10_irq_handler, edac); + root_node = of_root; + if (of_device_is_compatible(root_node, "intel,socfpga-agilex5")) { + edac->db_irq = platform_get_irq_byname(pdev, "global_dbe"); + if (edac->db_irq < 0) + return edac->db_irq; + + irq_set_chained_handler_and_data(edac->db_irq, + altr_edac_a10_irq_handler, + edac); + } + #ifdef CONFIG_64BIT { int dberror, err_addr; + struct arm_smccc_res result; edac->panic_notifier.notifier_call = s10_edac_dberr_handler; atomic_notifier_chain_register(&panic_notifier_list, @@ -2157,18 +2725,63 @@ static int altr_edac_a10_probe(struct platform_device *pdev) regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, &dberror); if (dberror) { - regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST, - &err_addr); - edac_printk(KERN_ERR, EDAC_DEVICE, - "Previous Boot UE detected[0x%X] @ 0x%X\n", - dberror, err_addr); + /* Bit-31 is set if previous DDR UE happened */ + if (dberror & BIT(31)) { + /* Read previous DDR UE info */ + arm_smccc_smc(INTEL_SIP_SMC_SEU_ERR_STATUS, 0, + 0, 0, 0, 0, 0, 0, &result); + + if (!(int)result.a0) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "Previous DDR UE:Count=0x%X,Address=0x%X,ErrorData=0x%X\n" + , (unsigned int)result.a1, (unsigned int)result.a2 + , (unsigned int)result.a3); + } else { + edac_printk(KERN_ERR, EDAC_DEVICE, + "INTEL_SIP_SMC_SEU_ERR_STATUS failed\n"); + } + } else { + regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST, + &err_addr); + edac_printk(KERN_ERR, EDAC_DEVICE, + "Previous Boot UE detected[0x%X] @ 0x%X\n", + dberror, err_addr); + } /* Reset the sticky registers */ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, 0); regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST, 0); } + +#ifdef CONFIG_EDAC_ALTERA_IO96B + edac->io96b0_irq = platform_get_irq_byname(pdev, "io96b0"); + if (edac->io96b0_irq < 0) { + dev_err(&pdev->dev, "No io96b0 IRQ resource\n"); + return edac->io96b0_irq; + } + edac->io96b1_irq = platform_get_irq_byname(pdev, "io96b1"); + if (edac->io96b1_irq < 0) { + dev_err(&pdev->dev, "No io96b1 IRQ resource\n"); + return edac->io96b1_irq; + } +#endif + +#if IS_ENABLED(CONFIG_EDAC_ALTERA_SDM_QSPI) + edac->sdm_qspi_sb_irq = platform_get_irq_byname(pdev, "sdm_qspi_sbe"); + if (edac->sdm_qspi_sb_irq < 0) { + dev_err(&pdev->dev, "no %s IRQ defined\n", "sdm_qspi_sbe"); + return edac->sdm_qspi_sb_irq; + } + + edac->sdm_qspi_db_irq = platform_get_irq_byname(pdev, "sdm_qspi_dbe"); + if (edac->sdm_qspi_db_irq < 0) { + dev_err(&pdev->dev, "no %s IRQ defined\n", "sdm_qspi_dbe"); + return edac->sdm_qspi_db_irq; + } +#endif } + #else edac->db_irq = platform_get_irq(pdev, 1); if (edac->db_irq < 0) @@ -2184,6 +2797,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev) if (of_match_node(altr_edac_a10_device_of_match, child)) altr_edac_a10_device_add(edac, child); +#if IS_ENABLED(CONFIG_EDAC_ALTERA_CRAM_SEU) + else if (of_device_is_compatible(child, "altr,socfpga-cram-seu")) + altr_edac_device_add(edac, pdev, (char *)child->name); +#endif #ifdef CONFIG_EDAC_ALTERA_SDRAM else if (of_device_is_compatible(child, "altr,sdram-edac-a10")) @@ -2192,7 +2809,6 @@ static int altr_edac_a10_probe(struct platform_device *pdev) NULL, &pdev->dev); #endif } - return 0; } diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h index 3727e72c8c2e7..26ead13a881e2 100644 --- a/drivers/edac/altera_edac.h +++ b/drivers/edac/altera_edac.h @@ -350,8 +350,54 @@ struct altr_sdram_mc_data { #define ECC_READ_EOVR 0x2 #define ECC_READ_EDOVR 0x3 -struct altr_edac_device_dev; +/* DRAM and OCRAM require cold reset */ +#define S10_COLD_RESET_MASK 0x30002 +#define S10_WARM_RESET_WFI_FLAG BIT(31) + +/* Single Event Upset Defines */ +#define SEU_SAFE_INJECT_SB_CE 0x30000 +#define SEU_SAFE_INJECT_DB_UE_MSB 0x20 +#define SEU_SAFE_INJECT_DB_UE_LSB 0x30001 + +/************ IO96B ECC defines *******/ +#define IO96B_ECC_ENABLE_INFO_OFST 0x240 +#define IO96B_ECC_SCRUB_STAT0_OFST 0x244 +#define IO96B_ECC_ERR_REG_OFST 0x300 +#define IO96B_ECC_ERR_ENTRIES_OFST 0x310 + +#define IO96B_CMD_RESP_STATUS_OFST 0x45C +#define IO96B_CMD_RESP_DATA_0_OFST 0x458 +#define IO96B_CMD_RESP_DATA_1_OFST 0x454 +#define IO96B_CMD_RESP_DATA_2_OFST 0x450 +#define IO96B_CMD_REQ_OFST 0x43C +#define IO96B_CMD_PARAM_0_OFST 0x438 +#define IO96B_CMD_PARAM_1_OFST 0x434 +#define IO96B_CMD_PARAM_2_OFST 0x430 + +#define IO96B_CMD_TRIG_ECC_ENJECT_OP 0x20040109 +#define IO96B_CMD_ECC_SCRUB_MODE_0 0x20040202 +#define IO96B_ECC_ERROR_QUEUE_CLEAR 0x20040110 + +#define IO06B_ECC_SCRUB_INTERVAL 0x14 +#define IO06B_ECC_SCRUB_LEN 0x100 +#define IO06B_ECC_SCRUB_FULL_MEM 0x1 + +#define IO96B_SBE_SYNDROME 0xF4 +#define IO96B_DBE_SYNDROME 0xFF + +#define IO96B_ECC_SCRUB_TIMEOUT 400000 +#define IO96B_ECC_SCRUB_POLL_US 500 +#define IO96B_ECC_SCRUB_COMPLETE BIT(1) + +enum io96b_error_type { + ECC_SINGLE_DBE = 2, + ECC_MULTI_DBE = 3, + ECC_WRITE_LINK_DBE = 0xa, + ECC_READ_LINK_DBE = 0xc, + ECC_READ_LINK_RMW_DBE +}; +struct altr_edac_device_dev; struct edac_device_prv_data { int (*setup)(struct altr_edac_device_dev *device); int ce_clear_mask; @@ -382,6 +428,12 @@ struct altr_edac_device_dev { struct edac_device_ctl_info *edac_dev; struct device ddev; int edac_idx; + int io96b0_irq; + int io96b1_irq; + int sdm_qspi_sb_irq; + int sdm_qspi_db_irq; + u32 sdm_qspi_addr; + int seu_irq; }; struct altr_arria10_edac { @@ -393,6 +445,10 @@ struct altr_arria10_edac { struct irq_chip irq_chip; struct list_head a10_ecc_devices; struct notifier_block panic_notifier; + int io96b0_irq; + int io96b1_irq; + int sdm_qspi_sb_irq; + int sdm_qspi_db_irq; }; #endif /* #ifndef _ALTERA_EDAC_H */ diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index d7416166fd8a4..455c104c9d34d 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -485,7 +485,7 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv) clearval = readl(base + ECC_CLR_OFST) | ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; - writel(clearval, base + ECC_CLR_OFST); + writel(0x00, base + ECC_CLR_OFST); spin_unlock_irqrestore(&priv->reglock, flags); diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c index e20cee9c2d320..808f5f96477b7 100644 --- a/drivers/firmware/stratix10-rsu.c +++ b/drivers/firmware/stratix10-rsu.c @@ -14,11 +14,9 @@ #include #include #include +#include -#define RSU_STATE_MASK GENMASK_ULL(31, 0) -#define RSU_VERSION_MASK GENMASK_ULL(63, 32) -#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0) -#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32) +#define RSU_ERASE_SIZE_MASK GENMASK_ULL(63, 32) #define RSU_DCMF0_MASK GENMASK_ULL(31, 0) #define RSU_DCMF1_MASK GENMASK_ULL(63, 32) #define RSU_DCMF2_MASK GENMASK_ULL(31, 0) @@ -34,9 +32,15 @@ #define INVALID_DCMF_VERSION 0xFF #define INVALID_DCMF_STATUS 0xFFFFFFFF #define INVALID_SPT_ADDRESS 0x0 +#define INVALID_DEVICE_INFO 0x0 -#define RSU_GET_SPT_CMD 0x5A -#define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int)) +#define RSU_RETRY_SLEEP_MS (1U) +#define RSU_ASYNC_MSG_RETRY (3U) + +struct flash_device_info { + unsigned int size; + unsigned int erase_size; +}; typedef void (*rsu_callback)(struct stratix10_svc_client *client, struct stratix10_svc_cb_data *data); @@ -60,6 +64,8 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client, * @dcmf_status.dcmf1: dcmf1 status * @dcmf_status.dcmf2: dcmf2 status * @dcmf_status.dcmf3: dcmf3 status + * @device_info.size: flash size + * @device_info.erase_size: flash erase size * @retry_counter: the current image's retry counter * @max_retry: the preset max retry value * @spt0_address: address of spt0 @@ -94,52 +100,39 @@ struct stratix10_rsu_priv { unsigned int dcmf3; } dcmf_status; + struct flash_device_info device_info[4]; + unsigned int retry_counter; unsigned int max_retry; unsigned long spt0_address; unsigned long spt1_address; - - unsigned int *get_spt_response_buf; }; +typedef void (*rsu_async_callback)(struct device *dev, + struct stratix10_rsu_priv *priv, struct stratix10_svc_cb_data *data); + /** - * rsu_status_callback() - Status callback from Intel Service Layer - * @client: pointer to service client + * rsu_async_status_callback() - Status callback from Intel Service Layer + * @dev: pointer to device object + * @priv: pointer to priv object * @data: pointer to callback data structure * - * Callback from Intel service layer for RSU status request. Status is - * only updated after a system reboot, so a get updated status call is - * made during driver probe. + * Callback from rsu_async_send() to get the system rsu error status. */ -static void rsu_status_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) +static void rsu_async_status_callback(struct device *dev, + struct stratix10_rsu_priv *priv, + struct stratix10_svc_cb_data *data) { - struct stratix10_rsu_priv *priv = client->priv; - struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1; - - if (data->status == BIT(SVC_STATUS_OK)) { - priv->status.version = FIELD_GET(RSU_VERSION_MASK, - res->a2); - priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2); - priv->status.fail_image = res->a1; - priv->status.current_image = res->a0; - priv->status.error_location = - FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3); - priv->status.error_details = - FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3); - } else { - dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n", - res->a0); - priv->status.version = 0; - priv->status.state = 0; - priv->status.fail_image = 0; - priv->status.current_image = 0; - priv->status.error_location = 0; - priv->status.error_details = 0; - } - - complete(&priv->completion); + struct arm_smccc_1_2_regs *res = (struct arm_smccc_1_2_regs *)data->kaddr1; + + priv->status.current_image = res->a2; + priv->status.fail_image = res->a3; + priv->status.state = res->a4; + priv->status.version = res->a5; + priv->status.error_location = res->a7; + priv->status.error_details = res->a8; + priv->retry_counter = res->a9; } /** @@ -163,33 +156,6 @@ static void rsu_command_callback(struct stratix10_svc_client *client, complete(&priv->completion); } -/** - * rsu_retry_callback() - Callback from Intel service layer for getting - * the current image's retry counter from the firmware - * @client: pointer to client - * @data: pointer to callback data structure - * - * Callback from Intel service layer for retry counter, which is used by - * user to know how many times the images is still allowed to reload - * itself before giving up and starting RSU fail-over flow. - */ -static void rsu_retry_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) -{ - struct stratix10_rsu_priv *priv = client->priv; - unsigned int *counter = (unsigned int *)data->kaddr1; - - if (data->status == BIT(SVC_STATUS_OK)) - priv->retry_counter = *counter; - else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) - dev_warn(client->dev, "Secure FW doesn't support retry\n"); - else - dev_err(client->dev, "Failed to get retry counter %lu\n", - BIT(data->status)); - - complete(&priv->completion); -} - /** * rsu_max_retry_callback() - Callback from Intel service layer for getting * the max retry value from the firmware @@ -270,36 +236,65 @@ static void rsu_dcmf_status_callback(struct stratix10_svc_client *client, complete(&priv->completion); } -static void rsu_get_spt_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) +/** + * rsu_get_device_info_callback() - Callback from Intel service layer for getting + * the QSPI device info + * @client: pointer to client + * @data: pointer to callback data structure + * + * Callback from Intel service layer for QSPI device info + */ +static void rsu_get_device_info_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) { struct stratix10_rsu_priv *priv = client->priv; - unsigned long *mbox_err = (unsigned long *)data->kaddr1; - unsigned long *resp_len = (unsigned long *)data->kaddr2; - - if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) || - (*resp_len != RSU_GET_SPT_RESP_LEN)) - goto error; + struct arm_smccc_1_2_regs *res = (struct arm_smccc_1_2_regs *)data->kaddr1; - priv->spt0_address = priv->get_spt_response_buf[0]; - priv->spt0_address <<= 32; - priv->spt0_address |= priv->get_spt_response_buf[1]; - - priv->spt1_address = priv->get_spt_response_buf[2]; - priv->spt1_address <<= 32; - priv->spt1_address |= priv->get_spt_response_buf[3]; - - goto complete; + if (data->status == BIT(SVC_STATUS_OK)) { + priv->device_info[0].size = res->a1; + priv->device_info[0].erase_size = + FIELD_GET(RSU_ERASE_SIZE_MASK, res->a1); + priv->device_info[1].size = res->a2; + priv->device_info[1].erase_size = + FIELD_GET(RSU_ERASE_SIZE_MASK, res->a2); + priv->device_info[2].size = res->a3; + priv->device_info[2].erase_size = + FIELD_GET(RSU_ERASE_SIZE_MASK, res->a3); + priv->device_info[3].size = res->a4; + priv->device_info[3].erase_size = + FIELD_GET(RSU_ERASE_SIZE_MASK, res->a4); -error: - dev_err(client->dev, "failed to get SPTs\n"); + } else { + dev_err(client->dev, "COMMAND_RSU_GET_DEVICE_INFO returned 0x%lX\n", + res->a0); + priv->device_info[0].size = INVALID_DEVICE_INFO; + priv->device_info[1].size = INVALID_DEVICE_INFO; + priv->device_info[2].size = INVALID_DEVICE_INFO; + priv->device_info[3].size = INVALID_DEVICE_INFO; + priv->device_info[0].erase_size = INVALID_DEVICE_INFO; + priv->device_info[1].erase_size = INVALID_DEVICE_INFO; + priv->device_info[2].erase_size = INVALID_DEVICE_INFO; + priv->device_info[3].erase_size = INVALID_DEVICE_INFO; + } -complete: - stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf); - priv->get_spt_response_buf = NULL; complete(&priv->completion); } +/** + * rsu_async_get_spt_table_callback() - Callback to be used by the rsu_async_send() + * to retrieve the SPT table information. + * @dev: pointer to device object + * @priv: pointer to priv object + * @data: pointer to callback data structure + */ +static void rsu_async_get_spt_table_callback(struct device *dev, + struct stratix10_rsu_priv *priv, + struct stratix10_svc_cb_data *data) +{ + priv->spt0_address = *((unsigned long *)data->kaddr1); + priv->spt1_address = *((unsigned long *)data->kaddr2); +} + /** * rsu_send_msg() - send a message to Intel service layer * @priv: pointer to rsu private data @@ -321,7 +316,9 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv, struct stratix10_svc_client_msg msg; int ret; - mutex_lock(&priv->lock); + if (!mutex_trylock(&priv->lock)) + return -EAGAIN; + reinit_completion(&priv->completion); priv->client.receive_cb = callback; @@ -329,14 +326,6 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv, if (arg) msg.arg[0] = arg; - if (command == COMMAND_MBOX_SEND_CMD) { - msg.arg[1] = 0; - msg.payload = NULL; - msg.payload_length = 0; - msg.payload_output = priv->get_spt_response_buf; - msg.payload_length_output = RSU_GET_SPT_RESP_LEN; - } - ret = stratix10_svc_send(priv->chan, &msg); if (ret < 0) goto status_done; @@ -362,6 +351,99 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv, return ret; } +/** + * soc64_async_callback() - Callback from Intel service layer for async requests + * @ptr: pointer to the completion object + */ +static void soc64_async_callback(void *ptr) +{ + if (ptr) + complete(ptr); +} + +/** + * rsu_send_async_msg() - send an async message to Intel service layer + * @dev: pointer to device object + * @priv: pointer to rsu private data + * @command: RSU status or update command + * @arg: the request argument, notify status + * @callback: function pointer for the callback (status or update) + */ +static int rsu_send_async_msg(struct device *dev, struct stratix10_rsu_priv *priv, + enum stratix10_svc_command_code command, + unsigned long arg, + rsu_async_callback callback) +{ + struct stratix10_svc_client_msg msg = {0}; + struct stratix10_svc_cb_data data = {0}; + struct completion completion; + int status, index, ret; + void *handle = NULL; + + msg.command = command; + msg.arg[0] = arg; + + init_completion(&completion); + + for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_send(priv->chan, &msg, + &handle, soc64_async_callback, + &completion); + if (status == 0) + break; + dev_warn(dev, "Failed to send async message\n"); + msleep(RSU_RETRY_SLEEP_MS); + } + + if (status && !handle) { + dev_err(dev, "Failed to send async message\n"); + if (msg.payload_output) + stratix10_svc_free_memory(priv->chan, msg.payload_output); + return -ETIMEDOUT; + } + + ret = wait_for_completion_io_timeout(&completion, RSU_TIMEOUT); + if (ret > 0) + dev_dbg(dev, "Received async interrupt\n"); + else if (ret == 0) + dev_warn(dev, "Timeout occurred. Trying to poll the response\n"); + + for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_poll(priv->chan, handle, &data); + if (status == -EAGAIN) { + dev_dbg(dev, "Async message is still in progress\n"); + } else if (status < 0) { + dev_alert(dev, "Failed to poll async message\n"); + ret = -ETIMEDOUT; + } else if (status == 0) { + ret = 0; + break; + } + msleep(RSU_RETRY_SLEEP_MS); + } + + if (ret) { + dev_err(dev, "Failed to get async response\n"); + goto status_done; + } + + if (data.status == 0) { + ret = 0; + if (callback) + callback(dev, priv, &data); + } else { + dev_err(dev, "%s returned 0x%x from SDM\n", __func__, + data.status); + ret = -EFAULT; + } + +status_done: + if (msg.payload_output) + stratix10_svc_free_memory(priv->chan, msg.payload_output); + stratix10_svc_async_done(priv->chan, handle); + return ret; +} + /* * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA. * The sysfs interfaces exposed here are FPGA Remote System Update (RSU) @@ -454,8 +536,7 @@ static ssize_t max_retry_show(struct device *dev, if (!priv) return -ENODEV; - return scnprintf(buf, sizeof(priv->max_retry), - "0x%08x\n", priv->max_retry); + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", priv->max_retry); } static ssize_t dcmf0_show(struct device *dev, @@ -574,7 +655,9 @@ static ssize_t reboot_image_store(struct device *dev, ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE, address, rsu_command_callback); - if (ret) { + if (ret == -EAGAIN) + return 0; + else if (ret) { dev_err(dev, "Error, RSU update returned %i\n", ret); return ret; } @@ -597,30 +680,139 @@ static ssize_t notify_store(struct device *dev, if (ret) return ret; - ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY, - status, rsu_command_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_NOTIFY, status, NULL); if (ret) { dev_err(dev, "Error, RSU notify returned %i\n", ret); return ret; } /* to get the updated state */ - ret = rsu_send_msg(priv, COMMAND_RSU_STATUS, - 0, rsu_status_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0, + rsu_async_status_callback); if (ret) { dev_err(dev, "Error, getting RSU status %i\n", ret); return ret; } - ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); - if (ret) { - dev_err(dev, "Error, getting RSU retry %i\n", ret); - return ret; - } - return count; } +static ssize_t size0_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[0].size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", priv->device_info[0].size); +} + +static ssize_t size1_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[1].size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", priv->device_info[1].size); +} + +static ssize_t size2_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[2].size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", priv->device_info[2].size); +} + +static ssize_t size3_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[3].size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", priv->device_info[3].size); +} + +static ssize_t erase_size0_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[0].erase_size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", + priv->device_info[0].erase_size); +} + +static ssize_t erase_size1_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[1].erase_size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", + priv->device_info[1].erase_size); +} + +static ssize_t erase_size2_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[2].erase_size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", + priv->device_info[2].erase_size); +} + +static ssize_t erase_size3_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->device_info[3].erase_size == INVALID_DEVICE_INFO) + return -EIO; + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", + priv->device_info[3].erase_size); +} + static ssize_t spt0_address_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -665,6 +857,14 @@ static DEVICE_ATTR_RO(dcmf0_status); static DEVICE_ATTR_RO(dcmf1_status); static DEVICE_ATTR_RO(dcmf2_status); static DEVICE_ATTR_RO(dcmf3_status); +static DEVICE_ATTR_RO(size0); +static DEVICE_ATTR_RO(size1); +static DEVICE_ATTR_RO(size2); +static DEVICE_ATTR_RO(size3); +static DEVICE_ATTR_RO(erase_size0); +static DEVICE_ATTR_RO(erase_size1); +static DEVICE_ATTR_RO(erase_size2); +static DEVICE_ATTR_RO(erase_size3); static DEVICE_ATTR_WO(reboot_image); static DEVICE_ATTR_WO(notify); static DEVICE_ATTR_RO(spt0_address); @@ -687,6 +887,14 @@ static struct attribute *rsu_attrs[] = { &dev_attr_dcmf1_status.attr, &dev_attr_dcmf2_status.attr, &dev_attr_dcmf3_status.attr, + &dev_attr_size0.attr, + &dev_attr_size1.attr, + &dev_attr_size2.attr, + &dev_attr_size3.attr, + &dev_attr_erase_size0.attr, + &dev_attr_erase_size1.attr, + &dev_attr_erase_size2.attr, + &dev_attr_erase_size3.attr, &dev_attr_reboot_image.attr, &dev_attr_notify.attr, &dev_attr_spt0_address.attr, @@ -727,6 +935,14 @@ static int stratix10_rsu_probe(struct platform_device *pdev) priv->max_retry = INVALID_RETRY_COUNTER; priv->spt0_address = INVALID_SPT_ADDRESS; priv->spt1_address = INVALID_SPT_ADDRESS; + priv->device_info[0].size = INVALID_DEVICE_INFO; + priv->device_info[1].size = INVALID_DEVICE_INFO; + priv->device_info[2].size = INVALID_DEVICE_INFO; + priv->device_info[3].size = INVALID_DEVICE_INFO; + priv->device_info[0].erase_size = INVALID_DEVICE_INFO; + priv->device_info[1].erase_size = INVALID_DEVICE_INFO; + priv->device_info[2].erase_size = INVALID_DEVICE_INFO; + priv->device_info[3].erase_size = INVALID_DEVICE_INFO; mutex_init(&priv->lock); priv->chan = stratix10_svc_request_channel_byname(&priv->client, @@ -737,12 +953,19 @@ static int stratix10_rsu_probe(struct platform_device *pdev) return PTR_ERR(priv->chan); } + ret = stratix10_svc_add_async_client(priv->chan, false); + if (ret) { + dev_err(dev, "failed to add async client\n"); + stratix10_svc_free_channel(priv->chan); + return ret; + } + init_completion(&priv->completion); platform_set_drvdata(pdev, priv); /* get the initial state from firmware */ - ret = rsu_send_msg(priv, COMMAND_RSU_STATUS, - 0, rsu_status_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0, + rsu_async_status_callback); if (ret) { dev_err(dev, "Error, getting RSU status %i\n", ret); stratix10_svc_free_channel(priv->chan); @@ -763,12 +986,6 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } - ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); - if (ret) { - dev_err(dev, "Error, getting RSU retry %i\n", ret); - stratix10_svc_free_channel(priv->chan); - } - ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0, rsu_max_retry_callback); if (ret) { @@ -776,18 +993,19 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } - priv->get_spt_response_buf = - stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN); + /* get QSPI device info from firmware */ + ret = rsu_send_msg(priv, COMMAND_RSU_GET_DEVICE_INFO, + 0, rsu_get_device_info_callback); + if (ret) { + dev_err(dev, "Error, getting QSPI Device Info %i\n", ret); + stratix10_svc_free_channel(priv->chan); + } - if (IS_ERR(priv->get_spt_response_buf)) { - dev_err(dev, "failed to allocate get spt buffer\n"); - } else { - ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD, - RSU_GET_SPT_CMD, rsu_get_spt_callback); - if (ret) { - dev_err(dev, "Error, getting SPT table %i\n", ret); - stratix10_svc_free_channel(priv->chan); - } + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_GET_SPT_TABLE, 0, + rsu_async_get_spt_table_callback); + if (ret) { + dev_err(dev, "Error, getting SPT table %i\n", ret); + stratix10_svc_free_channel(priv->chan); } return ret; diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 528f37417aea4..7f0306bcb19c5 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -1,17 +1,21 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2017-2024, Intel Corporation */ +#include #include #include #include +#include +#include #include #include #include #include #include #include +#include #include #include #include @@ -19,6 +23,13 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include /** * SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO @@ -29,19 +40,64 @@ * from the secure world for FPGA manager to reuse, or to free the buffer(s) * when all bit-stream data had be send. * - * FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status, - * service layer will return error to FPGA manager when timeout occurs, - * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. + * FPGA_CONFIG_POLL_INTERVAL_MS_FAST - interval for polling the service status + * at secure world for fast response commands. Interval is set to 20ms. + * + * FPGA_CONFIG_POLL_INTERVAL_MS_SLOW - interval for polling the service status + * at secure world for slow response commands. Interval is set to 500ms. + * + * FPGA_CONFIG_POLL_COUNT_FAST - number of count for polling service status for + * fast response commands. Count is set to 50 (50*20ms=1sec) + * + * FPGA_CONFIG_POLL_COUNT_SLOW - number of count for polling service status for + * slow response commands. Count is set to 58 (58*500ms=29sec) */ -#define SVC_NUM_DATA_IN_FIFO 32 -#define SVC_NUM_CHANNEL 3 -#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 +#define SVC_NUM_DATA_IN_FIFO 8 +#define SVC_NUM_CHANNEL 4 +#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 2000 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 -#define BYTE_TO_WORD_SIZE 4 +#define FPGA_CONFIG_POLL_INTERVAL_MS_FAST 20 +#define FPGA_CONFIG_POLL_INTERVAL_MS_SLOW 500 +#define FPGA_CONFIG_POLL_COUNT_FAST 50 +#define FPGA_CONFIG_POLL_COUNT_SLOW 58 +#define AGILEX5_SDM_DMA_ADDR_OFFSET 0x80000000 +#define BYTE_TO_WORD_SIZE 4 +#define IOMMU_LIMIT_ADDR 0x20000000 +#define IOMMU_STARTING_ADDR 0x0 +#define ENABLE_REMAPPER false +#define DISABLE_REMAPPER true /* stratix10 service layer clients */ #define STRATIX10_RSU "stratix10-rsu" -#define INTEL_FCS "intel-fcs" + +/*Maximum number of SDM client IDs.*/ +#define MAX_SDM_CLIENT_IDS 16 +/*Client ID for SIP Service Version 1.*/ +#define SIP_SVC_V1_CLIENT_ID 0x1 +/*Maximum number of SDM job IDs.*/ +#define MAX_SDM_JOB_IDS 16 +/*Number of bits used for asynchronous transaction hashing.*/ +#define ASYNC_TRX_HASH_BITS 3 +/*Number of bits used for asynchronous transaction hashing.*/ +#define TOTAL_TRANSACTION_IDS (MAX_SDM_CLIENT_IDS * MAX_SDM_JOB_IDS) + +/*Minimum major version of the ATF for Asynchronous transactions.*/ +#define ASYNC_ATF_MINIMUM_MAJOR_VERSION 0x3 +/*Minimum minor version of the ATF for Asynchronous transactions.*/ +#define ASYNC_ATF_MINIMUM_MINOR_VERSION 0x0 + +/*Macro to extract the job ID from a transaction ID.*/ +#define STRATIX10_GET_JOBID(transaction_id) ((transaction_id) & 0xf) +/*Macro to set a transaction ID using a client ID and a transaction ID.*/ +#define STRATIX10_SET_TRANSACTIONID(clientid, transaction_id) \ + ((((clientid) & 0xf) << 4) | ((transaction_id) & 0xf)) + +/* Macro to set a transaction ID for SIP SMC using the lower 8 bits of the transaction ID.*/ +#define STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(transaction_id) \ + ((transaction_id) & 0xff) + +/* Macro to get the SDM mailbox error status */ +#define STRATIX10_GET_SDM_STATUS_CODE(status) ((status) & 0x3ff) typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, @@ -52,6 +108,7 @@ struct stratix10_svc_chan; /** * struct stratix10_svc - svc private data * @stratix10_svc_rsu: pointer to stratix10 RSU device + * @intel_svc_fcs: pointer to fcs client device */ struct stratix10_svc { struct platform_device *stratix10_svc_rsu; @@ -118,7 +175,72 @@ struct stratix10_svc_data { size_t size_output; u32 command; u32 flag; - u64 arg[3]; + u64 arg[6]; +}; + +/** + * struct stratix10_svc_async_handler - Asynchronous handler for Stratix 10 service layer + * @transaction_id: Unique identifier for the transaction + * @achan: Pointer to the asynchronous channel structure + * @cb_arg: Argument to be passed to the callback function + * @cb: Callback function to be called upon completion + * @msg: Pointer to the client message structure + * @input_handle: DMA handle for the input buffer + * @output_handle: DMA handle for the output buffer + * @next: Node in the hash list + * + * This structure is used to handle asynchronous transactions in the + * Stratix 10 service layer. It maintains the necessary information + * for processing and completing asynchronous requests. + */ + +struct stratix10_svc_async_handler { + u8 transaction_id; + struct stratix10_async_chan *achan; + void *cb_arg; + async_callback_t cb; + struct stratix10_svc_client_msg *msg; + dma_addr_t input_handle, output_handle; + struct hlist_node next; + struct arm_smccc_1_2_regs res; +}; + +/** + * struct stratix10_async_chan - Structure representing an asynchronous channel + * @async_client_id: Unique client identifier for the asynchronous operation + * @job_id_pool: Pointer to the job ID pool associated with this channel + */ + +struct stratix10_async_chan { + unsigned long async_client_id; + struct stratix10_sip_id_pool *job_id_pool; +}; + +/** + * struct stratix10_async_ctrl - Control structure for Stratix 10 asynchronous operations + * @irq: Interrupt request number associated with the asynchronous control + * @initialized: Flag indicating whether the control structure has been initialized + * @invoke_fn: Function pointer for invoking Stratix 10 service calls to EL3 secure firmware + * @async_id_pool: Pointer to the ID pool used for asynchronous operations + * @common_achan_refcount: Atomic reference count for the common asynchronous channel usage + * @common_async_chan: Pointer to the common asynchronous channel structure + * @trx_list_wr_lock: Spinlock for protecting the transaction list write operations + * @async_work: Work structure for scheduling asynchronous work + * @trx_list: Hash table for managing asynchronous transactions + */ + +struct stratix10_async_ctrl { + int irq; + bool initialized; + void (*invoke_fn)(struct stratix10_async_ctrl *actrl, + const struct arm_smccc_1_2_regs *args, struct arm_smccc_1_2_regs *res); + struct stratix10_sip_id_pool *async_id_pool; + atomic_t common_achan_refcount; + struct stratix10_async_chan *common_async_chan; + /* spinlock to protect the writes to trx_list hash table */ + spinlock_t trx_list_wr_lock; + struct work_struct async_work; + DECLARE_HASHTABLE(trx_list, ASYNC_TRX_HASH_BITS); }; /** @@ -130,10 +252,15 @@ struct stratix10_svc_data { * @node: list management * @genpool: memory pool pointing to the memory region * @task: pointer to the thread task which handles SMC or HVC call - * @svc_fifo: a queue for storing service message data * @complete_status: state for completion - * @svc_fifo_lock: protect access to service message data queue * @invoke_fn: function to issue secure monitor call or hypervisor call + * @sdm_lock: only allows a single command single response to SDM + * @domain: pointer to allocated iommu domain + * @is_smmu_enabled: flag to indicate whether is smmu_enabled for device + * @sdm_dma_addr_offset: dma addr offset to append to the IOVA sent to SDM + * @carveout: iova_domain used to allocate iova addr that is accessible by SDM + * @svc: manages the list of client svc drivers + * @actrl: async control structure * * This struct is used to create communication channels for service clients, to * handle secure monitor or hypervisor call. @@ -145,11 +272,19 @@ struct stratix10_svc_controller { int num_active_client; struct list_head node; struct gen_pool *genpool; - struct task_struct *task; - struct kfifo svc_fifo; struct completion complete_status; - spinlock_t svc_fifo_lock; svc_invoke_fn *invoke_fn; + struct mutex *sdm_lock; + struct iommu_domain *domain; + bool is_smmu_enabled; + dma_addr_t sdm_dma_addr_offset; + struct { + struct iova_domain domain; + unsigned long shift; + unsigned long limit; + } carveout; + struct stratix10_svc *svc; + struct stratix10_async_ctrl actrl; }; /** @@ -158,6 +293,9 @@ struct stratix10_svc_controller { * @scl: pointer to service client which owns the channel * @name: service client name associated with the channel * @lock: protect access to the channel + * @task: pointer to the thread task which handles SMC or HVC call + * @svc_fifo: svc fifo circular buffer + * @svc_fifo_lock: svc fifo lock * * This struct is used by service client to communicate with service layer, each * service client has its own channel created by service controller. @@ -166,11 +304,214 @@ struct stratix10_svc_chan { struct stratix10_svc_controller *ctrl; struct stratix10_svc_client *scl; char *name; + struct task_struct *task; + /* Separate fifo for every channel */ + struct kfifo svc_fifo; + spinlock_t svc_fifo_lock; spinlock_t lock; + struct stratix10_async_chan *async_chan; +}; + +/** + * struct stratix10_sip_id_pool - Structure representing a pool of IDs for + * asynchronous operations. + * @head: The head index of the ID pool. + * @size: The total size of the ID pool. + * @id_mask: Pointer to an array representing the mask of allocated IDs. + * @lock: Mutex lock to protect access to the ID pool. + */ +struct stratix10_sip_id_pool { + unsigned long head; + unsigned long size; + unsigned long *id_mask; + /* protects id pool*/ + struct mutex lock; }; static LIST_HEAD(svc_ctrl); static LIST_HEAD(svc_data_mem); +static DEFINE_MUTEX(svc_mem_lock); +static DEFINE_MUTEX(svc_async_lock); + +/** + * stratix10_id_pool_create - Create a new ID pool for Stratix10 async operation + * @size: The size of the ID pool to create + * + * This function allocates and initializes a new ID pool structure for + * Stratix10 async operations. It allocates memory for the ID pool structure + * and the associated bitmaps for ID management. If any allocation fails, it + * cleans up and returns NULL. + * + * Return: Pointer to the newly created ID pool structure, or NULL on failure. + */ +static struct stratix10_sip_id_pool *stratix10_id_pool_create(unsigned long size) +{ + struct stratix10_sip_id_pool *id_pool = NULL; + + if (size == 0) + return NULL; + + id_pool = kzalloc(sizeof(*id_pool), GFP_KERNEL); + if (!id_pool) + return NULL; + + id_pool->size = size; + + id_pool->id_mask = bitmap_zalloc(size, GFP_KERNEL); + if (!id_pool->id_mask) { + kfree(id_pool); + return NULL; + } + + id_pool->head = 0; + + mutex_init(&id_pool->lock); + + return id_pool; +} + +/** + * stratix10_id_pool_destroy - Destroy an ID pool for Stratix10 async operation + * @id_pool: Pointer to the ID pool structure + * + * This function destroys an ID pool for Stratix10 async operations. It first + * checks if the ID pool is valid, then frees the associated bitmap and the + * ID pool structure itself. Finally, it returns 0. + * + * Return: 0 on success, -EINVAL if the ID pool is invalid. + */ +static int stratix10_id_pool_destroy(struct stratix10_sip_id_pool *id_pool) +{ + if (!id_pool) + return -EINVAL; + + mutex_lock(&id_pool->lock); + + if (id_pool->id_mask) + bitmap_free(id_pool->id_mask); + + mutex_unlock(&id_pool->lock); + mutex_destroy(&id_pool->lock); + + kfree(id_pool); + + return 0; +} + +/** + * stratix10_reserve_id - Reserve an ID in the ID pool + * @id_pool: Pointer to the ID pool structure + * @id: The ID to be reserved + * + * This function reserves an ID in the given ID pool. It first checks if the + * ID pool is valid and if the ID is within the valid range. If the ID is + * already set, it returns an error. Otherwise, it reserves the ID and + * returns 0. + * + * Return: + * 0 on success, + * -EINVAL if the ID pool is invalid, the ID is out of range, or the ID is + * already reserved. + */ +static int stratix10_reserve_id(struct stratix10_sip_id_pool *id_pool, unsigned long id) +{ + if (!id_pool) + return -EINVAL; + + if (id >= id_pool->size) + return -EINVAL; + + mutex_lock(&id_pool->lock); + + if (test_bit(id, id_pool->id_mask)) { + mutex_unlock(&id_pool->lock); + return -EINVAL; + } + set_bit(id, id_pool->id_mask); + + mutex_unlock(&id_pool->lock); + return 0; +} + +/** + * stratix10_allocate_id - Allocate an ID from the ID pool + * @id_pool: Pointer to the ID pool structure + * + * This function allocates an ID from the given ID pool. It searches for the + * next available ID in the pool, marks it as allocated, and returns it. + * + * Return: + * A non-negative integer representing the allocated ID on success + * -EINVAL if the id_pool is NULL + * -ENOMEM if no IDs are available in the pool + */ +static int stratix10_allocate_id(struct stratix10_sip_id_pool *id_pool) +{ + int id; + unsigned long tries = 0; + + if (!id_pool) + return -EINVAL; + + if (id_pool->head >= id_pool->size) + return -ENOMEM; + + mutex_lock(&id_pool->lock); + + do { + id_pool->head = find_next_zero_bit(id_pool->id_mask, + id_pool->size, id_pool->head); + if (id_pool->head >= id_pool->size) { + id_pool->head = 0; + tries++; + } + /* cycle through the whole bitmap at least once*/ + } while (tries < 2 && test_bit(id_pool->head, id_pool->id_mask)); + + if (tries >= 2) { + mutex_unlock(&id_pool->lock); + return -ENOMEM; + } + + set_bit(id_pool->head, id_pool->id_mask); + id = id_pool->head; + id_pool->head = (id_pool->head + 1) % id_pool->size; + mutex_unlock(&id_pool->lock); + return id; +} + +/** + * stratix10_deallocate_id - Deallocate an ID in the ID pool + * @id_pool: Pointer to the ID pool structure + * @id: The ID to be deallocated + * + * This function deallocates an ID in the given ID pool. It first checks if the + * ID pool is valid and if the ID is within the valid range. If the ID is not + * set, it returns an error. Otherwise, it clears the ID and returns 0. + * + * Return: + * 0 on success, + * -EINVAL if the ID pool is invalid, the ID is out of range, or the ID is + * not set. + */ +static int stratix10_deallocate_id(struct stratix10_sip_id_pool *id_pool, unsigned long id) +{ + if (!id_pool) + return -EINVAL; + + if (id >= id_pool->size) + return -EINVAL; + + mutex_lock(&id_pool->lock); + if (!test_bit(id, id_pool->id_mask)) { + mutex_unlock(&id_pool->lock); + return -EINVAL; + } + clear_bit(id, id_pool->id_mask); + mutex_unlock(&id_pool->lock); + + return 0; +} /** * svc_pa_to_va() - translate physical address to virtual address @@ -184,11 +525,15 @@ static void *svc_pa_to_va(unsigned long addr) struct stratix10_svc_data_mem *pmem; pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr); + mutex_lock(&svc_mem_lock); list_for_each_entry(pmem, &svc_data_mem, node) - if (pmem->paddr == addr) + if (pmem->paddr == addr) { + mutex_unlock(&svc_mem_lock); return pmem->vaddr; + } /* physical address is not found */ + mutex_unlock(&svc_mem_lock); return NULL; } @@ -207,6 +552,8 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl, { struct arm_smccc_res res; unsigned long timeout; + void *buf_claim_addr[4] = {NULL}; + int buf_claim_count = 0; reinit_completion(&ctrl->complete_status); timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS); @@ -218,26 +565,79 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl, if (res.a0 == INTEL_SIP_SMC_STATUS_OK) { if (!res.a1) { + /* Transaction of 4 blocks are now done */ complete(&ctrl->complete_status); + cb_data->status = BIT(SVC_STATUS_BUFFER_DONE); + cb_data->kaddr1 = buf_claim_addr[0]; + cb_data->kaddr2 = buf_claim_addr[1]; + cb_data->kaddr3 = buf_claim_addr[2]; + cb_data->kaddr4 = buf_claim_addr[3]; + p_data->chan->scl->receive_cb(p_data->chan->scl, + cb_data); break; } - cb_data->status = BIT(SVC_STATUS_BUFFER_DONE); - cb_data->kaddr1 = svc_pa_to_va(res.a1); - cb_data->kaddr2 = (res.a2) ? - svc_pa_to_va(res.a2) : NULL; - cb_data->kaddr3 = (res.a3) ? - svc_pa_to_va(res.a3) : NULL; - p_data->chan->scl->receive_cb(p_data->chan->scl, - cb_data); - } else { - pr_debug("%s: secure world busy, polling again\n", - __func__); + if (buf_claim_count < 4) { + buf_claim_addr[buf_claim_count] + = svc_pa_to_va(res.a1); + buf_claim_count++; + } + if ((res.a2) && (buf_claim_count < 4)) { + buf_claim_addr[buf_claim_count] + = svc_pa_to_va(res.a2); + buf_claim_count++; + } + if ((res.a3) && (buf_claim_count < 4)) { + buf_claim_addr[buf_claim_count] + = svc_pa_to_va(res.a3); + buf_claim_count++; + } } } while (res.a0 == INTEL_SIP_SMC_STATUS_OK || res.a0 == INTEL_SIP_SMC_STATUS_BUSY || wait_for_completion_timeout(&ctrl->complete_status, timeout)); } +/** + * svc_cmd_poll_status() - poll for status + * @p_data: pointer to service data structure + * @ctrl: pointer to service layer controller + * @res: pointer to store response + * @poll_count: pointer to poll count value + * @poll_interval_in_ms: interval value in miliseconds + * + * Check whether the service at secure world has completed, and then inform the + * response. + */ +static void svc_cmd_poll_status(struct stratix10_svc_data *p_data, + struct stratix10_svc_controller *ctrl, + struct arm_smccc_res *res, + int *poll_count, int poll_interval_in_ms) +{ + unsigned long a0, a1, a2; + + a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE; + a1 = (unsigned long)p_data->paddr; + a2 = (unsigned long)p_data->size; + + if (p_data->command == COMMAND_POLL_SERVICE_STATUS) + a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; + + while (*poll_count) { + ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, res); + if ((res->a0 == INTEL_SIP_SMC_STATUS_OK) || + (res->a0 == INTEL_SIP_SMC_STATUS_ERROR) || + (res->a0 == INTEL_SIP_SMC_STATUS_REJECTED)) + break; + + /* + * request is still in progress, go to sleep then + * poll again + */ + msleep(poll_interval_in_ms); + (*poll_count)--; + } +} + /** * svc_thread_cmd_config_status() - check configuration status * @ctrl: pointer to service layer controller @@ -252,8 +652,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl, struct stratix10_svc_cb_data *cb_data) { struct arm_smccc_res res; - int count_in_sec; - unsigned long a0, a1, a2; + int poll_count; cb_data->kaddr1 = NULL; cb_data->kaddr2 = NULL; @@ -262,34 +661,22 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl, pr_debug("%s: polling config status\n", __func__); - a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE; - a1 = (unsigned long)p_data->paddr; - a2 = (unsigned long)p_data->size; - - if (p_data->command == COMMAND_POLL_SERVICE_STATUS) - a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; - - count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC; - while (count_in_sec) { - ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res); - if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) || - (res.a0 == INTEL_SIP_SMC_STATUS_ERROR) || - (res.a0 == INTEL_SIP_SMC_STATUS_REJECTED)) - break; - - /* - * request is still in progress, wait one second then - * poll again - */ - msleep(1000); - count_in_sec--; + poll_count = FPGA_CONFIG_POLL_COUNT_FAST; + svc_cmd_poll_status(p_data, ctrl, &res, &poll_count, + FPGA_CONFIG_POLL_INTERVAL_MS_FAST); + /* Inceased poll interval if response is still not ready */ + if (!poll_count) { + poll_count = FPGA_CONFIG_POLL_COUNT_SLOW; + svc_cmd_poll_status(p_data, ctrl, &res, &poll_count, + FPGA_CONFIG_POLL_INTERVAL_MS_SLOW); } - if (!count_in_sec) { + if (!poll_count) { pr_err("%s: poll status timeout\n", __func__); cb_data->status = BIT(SVC_STATUS_BUSY); } else if (res.a0 == INTEL_SIP_SMC_STATUS_OK) { cb_data->status = BIT(SVC_STATUS_COMPLETED); + cb_data->kaddr1 = (res.a1) ? &res.a1 : NULL; cb_data->kaddr2 = (res.a2) ? svc_pa_to_va(res.a2) : NULL; cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL; @@ -329,6 +716,22 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, case COMMAND_FCS_SEND_CERTIFICATE: case COMMAND_FCS_DATA_ENCRYPTION: case COMMAND_FCS_DATA_DECRYPTION: + case COMMAND_FCS_GET_PROVISION_DATA: + case COMMAND_FCS_PSGSIGMA_TEARDOWN: + case COMMAND_FCS_COUNTER_SET_PREAUTHORIZED: + case COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD: + case COMMAND_FCS_CRYPTO_CLOSE_SESSION: + case COMMAND_FCS_CRYPTO_IMPORT_KEY: + case COMMAND_FCS_CRYPTO_REMOVE_KEY: + case COMMAND_FCS_CRYPTO_AES_CRYPT_INIT: + case COMMAND_FCS_CRYPTO_GET_DIGEST_INIT: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT: + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT: cb_data->status = BIT(SVC_STATUS_OK); break; case COMMAND_RECONFIG_DATA_SUBMIT: @@ -341,13 +744,23 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, case COMMAND_RSU_MAX_RETRY: case COMMAND_RSU_DCMF_STATUS: case COMMAND_FIRMWARE_VERSION: + case COMMAND_HWMON_READTEMP: + case COMMAND_HWMON_READVOLT: + case COMMAND_READ_SECURE_REG: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; break; case COMMAND_SMC_SVC_VERSION: + case COMMAND_WRITE_TO_SECURE_REG: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr1 = &res.a1; + cb_data->kaddr2 = &res.a2; + break; + case COMMAND_SMC_ATF_BUILD_VER: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; cb_data->kaddr2 = &res.a2; + cb_data->kaddr3 = &res.a3; break; case COMMAND_RSU_DCMF_VERSION: cb_data->status = BIT(SVC_STATUS_OK); @@ -355,8 +768,10 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, cb_data->kaddr2 = &res.a2; break; case COMMAND_FCS_RANDOM_NUMBER_GEN: - case COMMAND_FCS_GET_PROVISION_DATA: case COMMAND_POLL_SERVICE_STATUS: + case COMMAND_POLL_SERVICE_STATUS_ASYNC: + case COMMAND_FCS_GET_ROM_PATCH_SHA384: + case COMMAND_FCS_SDOS_DATA_EXT: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; cb_data->kaddr2 = svc_pa_to_va(res.a2); @@ -369,13 +784,63 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, res.a2 = res.a2 * BYTE_TO_WORD_SIZE; cb_data->kaddr2 = &res.a2; break; + case COMMAND_FCS_GET_CHIP_ID: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr2 = &res.a2; + cb_data->kaddr3 = &res.a3; + break; + case COMMAND_FCS_ATTESTATION_SUBKEY: + case COMMAND_FCS_ATTESTATION_MEASUREMENTS: + case COMMAND_FCS_ATTESTATION_CERTIFICATE: + case COMMAND_FCS_CRYPTO_EXPORT_KEY: + case COMMAND_FCS_CRYPTO_GET_KEY_INFO: + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE: + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE: + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE: + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE: + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE: + case COMMAND_FCS_RANDOM_NUMBER_GEN_EXT: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr2 = svc_pa_to_va(res.a2); + cb_data->kaddr3 = &res.a3; + break; + case COMMAND_FCS_CRYPTO_OPEN_SESSION: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr2 = &res.a2; + break; + case COMMAND_RSU_GET_DEVICE_INFO: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr1 = &res; + cb_data->kaddr2 = NULL; + cb_data->kaddr3 = NULL; + break; default: pr_warn("it shouldn't happen\n"); break; } pr_debug("%s: call receive_cb\n", __func__); - p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data); + if (p_data->chan->scl->receive_cb) + p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data); } /** @@ -390,13 +855,14 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, */ static int svc_normal_to_secure_thread(void *data) { - struct stratix10_svc_controller - *ctrl = (struct stratix10_svc_controller *)data; - struct stratix10_svc_data *pdata; - struct stratix10_svc_cb_data *cbdata; + struct stratix10_svc_chan *chan = (struct stratix10_svc_chan *)data; + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_svc_data *pdata = NULL; + struct stratix10_svc_cb_data *cbdata = NULL; struct arm_smccc_res res; unsigned long a0, a1, a2, a3, a4, a5, a6, a7; int ret_fifo = 0; + bool sdm_lock_owned = false; pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) @@ -418,12 +884,13 @@ static int svc_normal_to_secure_thread(void *data) a6 = 0; a7 = 0; - pr_debug("smc_hvc_shm_thread is running\n"); + pr_debug("%s: %s: Thread is running!\n", __func__, chan->name); while (!kthread_should_stop()) { - ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo, - pdata, sizeof(*pdata), - &ctrl->svc_fifo_lock); + + ret_fifo = kfifo_out_spinlocked(&chan->svc_fifo, + pdata, sizeof(*pdata), + &chan->svc_fifo_lock); if (!ret_fifo) continue; @@ -432,6 +899,16 @@ static int svc_normal_to_secure_thread(void *data) (unsigned int)pdata->paddr, pdata->command, (unsigned int)pdata->size); + /* SDM can only processs one command at a time */ + if (sdm_lock_owned == false) { + /* Must not do mutex re-lock */ + pr_debug("%s: %s: Thread is waiting for mutex!\n", + __func__, chan->name); + mutex_lock(ctrl->sdm_lock); + } + + sdm_lock_owned = true; + switch (pdata->command) { case COMMAND_RECONFIG_DATA_CLAIM: svc_thread_cmd_data_claim(ctrl, pdata, cbdata); @@ -487,6 +964,16 @@ static int svc_normal_to_secure_thread(void *data) a1 = 0; a2 = 0; break; + case COMMAND_RSU_DCMF_STATUS: + a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS; + a1 = 0; + a2 = 0; + break; + case COMMAND_RSU_GET_DEVICE_INFO: + a0 = INTEL_SIP_SMC_RSU_GET_DEVICE_INFO; + a1 = 0; + a2 = 0; + break; /* for FCS */ case COMMAND_FCS_DATA_ENCRYPTION: @@ -520,77 +1007,494 @@ static int svc_normal_to_secure_thread(void *data) a1 = (unsigned long)pdata->paddr; a2 = (unsigned long)pdata->size; break; + case COMMAND_FCS_COUNTER_SET_PREAUTHORIZED: + a0 = INTEL_SIP_SMC_FCS_COUNTER_SET_PREAUTHORIZED; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + break; case COMMAND_FCS_GET_PROVISION_DATA: a0 = INTEL_SIP_SMC_FCS_GET_PROVISION_DATA; - a1 = (unsigned long)pdata->paddr; + a1 = 0; a2 = 0; break; - - /* for polling */ - case COMMAND_POLL_SERVICE_STATUS: - a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; + case COMMAND_FCS_PSGSIGMA_TEARDOWN: + a0 = INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN; + a1 = pdata->arg[0]; + a2 = 0; + break; + case COMMAND_FCS_GET_CHIP_ID: + a0 = INTEL_SIP_SMC_FCS_CHIP_ID; + a1 = 0; + a2 = 0; + break; + case COMMAND_FCS_ATTESTATION_SUBKEY: + a0 = INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY; a1 = (unsigned long)pdata->paddr; a2 = (unsigned long)pdata->size; + a3 = (unsigned long)pdata->paddr_output; + a4 = (unsigned long)pdata->size_output; break; - case COMMAND_RSU_DCMF_STATUS: - a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS; - a1 = 0; + case COMMAND_FCS_ATTESTATION_MEASUREMENTS: + a0 = INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; + a3 = (unsigned long)pdata->paddr_output; + a4 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_ATTESTATION_CERTIFICATE: + a0 = INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERTIFICATE; + a1 = pdata->arg[0]; + a2 = (unsigned long)pdata->paddr_output; + a3 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD: + a0 = INTEL_SIP_SMC_FCS_CREATE_CERTIFICATE_ON_RELOAD; + a1 = pdata->arg[0]; a2 = 0; break; - case COMMAND_SMC_SVC_VERSION: - a0 = INTEL_SIP_SMC_SVC_VERSION; + /* for crypto service */ + case COMMAND_FCS_CRYPTO_OPEN_SESSION: + a0 = INTEL_SIP_SMC_FCS_OPEN_CRYPTO_SERVICE_SESSION; a1 = 0; a2 = 0; break; - case COMMAND_MBOX_SEND_CMD: - a0 = INTEL_SIP_SMC_MBOX_SEND_CMD; + case COMMAND_FCS_CRYPTO_CLOSE_SESSION: + a0 = INTEL_SIP_SMC_FCS_CLOSE_CRYPTO_SERVICE_SESSION; a1 = pdata->arg[0]; - a2 = (unsigned long)pdata->paddr; - a3 = (unsigned long)pdata->size / BYTE_TO_WORD_SIZE; - a4 = pdata->arg[1]; - a5 = (unsigned long)pdata->paddr_output; - a6 = (unsigned long)pdata->size_output / BYTE_TO_WORD_SIZE; - break; - default: - pr_warn("it shouldn't happen\n"); + a2 = 0; break; - } - pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x", - __func__, - (unsigned int)a0, - (unsigned int)a1); - pr_debug(" a2=0x%016x\n", (unsigned int)a2); - pr_debug(" a3=0x%016x\n", (unsigned int)a3); - pr_debug(" a4=0x%016x\n", (unsigned int)a4); - pr_debug(" a5=0x%016x\n", (unsigned int)a5); - ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res); - - pr_debug("%s: after SMC call -- res.a0=0x%016x", - __func__, (unsigned int)res.a0); - pr_debug(" res.a1=0x%016x, res.a2=0x%016x", - (unsigned int)res.a1, (unsigned int)res.a2); - pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3); - - if (pdata->command == COMMAND_RSU_STATUS) { - if (res.a0 == INTEL_SIP_SMC_RSU_ERROR) - cbdata->status = BIT(SVC_STATUS_ERROR); - else - cbdata->status = BIT(SVC_STATUS_OK); - - cbdata->kaddr1 = &res; - cbdata->kaddr2 = NULL; - cbdata->kaddr3 = NULL; - pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); - continue; - } - switch (res.a0) { - case INTEL_SIP_SMC_STATUS_OK: - svc_thread_recv_status_ok(pdata, cbdata, res); + /* for service key management */ + case COMMAND_FCS_CRYPTO_IMPORT_KEY: + a0 = INTEL_SIP_SMC_FCS_IMPORT_CRYPTO_SERVICE_KEY; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; break; - case INTEL_SIP_SMC_STATUS_BUSY: - switch (pdata->command) { - case COMMAND_RECONFIG_DATA_SUBMIT: + case COMMAND_FCS_CRYPTO_EXPORT_KEY: + a0 = INTEL_SIP_SMC_FCS_EXPORT_CRYPTO_SERVICE_KEY; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr_output; + a4 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_REMOVE_KEY: + a0 = INTEL_SIP_SMC_FCS_REMOVE_CRYPTO_SERVICE_KEY; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + break; + case COMMAND_FCS_CRYPTO_GET_KEY_INFO: + a0 = INTEL_SIP_SMC_FCS_GET_CRYPTO_SERVICE_KEY_INFO; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr_output; + a4 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_INIT: + a0 = INTEL_SIP_SMC_FCS_AES_CRYPTO_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = (unsigned long)pdata->paddr; + a5 = (unsigned long)pdata->size; + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE: + a0 = INTEL_SIP_SMC_FCS_AES_CRYPTO_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_AES_CRYPTO_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE_SMMU: + a0 = INTEL_SIP_SMC_FCS_AES_CRYPTO_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE_SMMU: + a0 = INTEL_SIP_SMC_FCS_AES_CRYPTO_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_INIT: + a0 = INTEL_SIP_SMC_FCS_GET_DIGEST_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE: + a0 = INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE_SMMU: + a0 = INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE_SMMU: + a0 = INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT: + a0 = INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE: + a0 = INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE_SMMU: + a0 = INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE_SMMU: + a0 = INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT: + a0 = INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE_SMMU: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE_SMMU: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT: + a0 = INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE_SMMU: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_UPDATE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE_SMMU: + a0 = INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + a7 = pdata->arg[2]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT: + a0 = INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr_output; + a4 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT: + a0 = INTEL_SIP_SMC_FCS_ECDH_INIT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = pdata->arg[3]; + a5 = pdata->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE: + a0 = INTEL_SIP_SMC_FCS_ECDH_FINALIZE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = (unsigned long)pdata->paddr; + a4 = (unsigned long)pdata->size; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_RANDOM_NUMBER_GEN_EXT: + a0 = INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + break; + case COMMAND_FCS_SDOS_DATA_EXT: + a0 = INTEL_SIP_SMC_FCS_CRYPTION_EXT; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + a3 = pdata->arg[2]; + a4 = (unsigned long)pdata->paddr; + a5 = (unsigned long)pdata->size; + a6 = (unsigned long)pdata->paddr_output; + a7 = (unsigned long)pdata->size_output; + break; + /* for HWMON */ + case COMMAND_HWMON_READTEMP: + a0 = INTEL_SIP_SMC_HWMON_READTEMP; + a1 = pdata->arg[0]; + a2 = 0; + break; + case COMMAND_HWMON_READVOLT: + a0 = INTEL_SIP_SMC_HWMON_READVOLT; + a1 = pdata->arg[0]; + a2 = 0; + break; + /* for polling */ + case COMMAND_POLL_SERVICE_STATUS: + case COMMAND_POLL_SERVICE_STATUS_ASYNC: + a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; + a3 = pdata->arg[0]; + break; + case COMMAND_SMC_SVC_VERSION: + a0 = INTEL_SIP_SMC_SVC_VERSION; + a1 = 0; + a2 = 0; + break; + case COMMAND_SMC_ATF_BUILD_VER: + a0 = INTEL_SIP_SMC_ATF_BUILD_VER; + a1 = 0; + a2 = 0; + a3 = 0; + break; + case COMMAND_FCS_GET_ROM_PATCH_SHA384: + a0 = INTEL_SIP_SMC_FCS_GET_ROM_PATCH_SHA384; + a1 = (unsigned long)pdata->paddr; + a2 = 0; + break; + case COMMAND_MBOX_SEND_CMD: + a0 = INTEL_SIP_SMC_MBOX_SEND_CMD; + a1 = pdata->arg[0]; + a2 = (unsigned long)pdata->paddr; + a3 = (unsigned long)pdata->size / BYTE_TO_WORD_SIZE; + a4 = pdata->arg[1]; + a5 = (unsigned long)pdata->paddr_output; + a6 = (unsigned long)pdata->size_output / BYTE_TO_WORD_SIZE; + break; + case COMMAND_WRITE_TO_SECURE_REG: + a0 = INTEL_SIP_SMC_REG_WRITE; + a1 = pdata->arg[0]; + a2 = pdata->arg[1]; + break; + case COMMAND_READ_SECURE_REG: + a0 = INTEL_SIP_SMC_REG_READ; + a1 = pdata->arg[0]; + break; + default: + pr_warn("it shouldn't happen\n"); + break; + } + pr_debug("%s: %s: before SMC call -- a0=0x%016x a1=0x%016x", + __func__, chan->name, + (unsigned int)a0, + (unsigned int)a1); + pr_debug(" a2=0x%016x\n", (unsigned int)a2); + pr_debug(" a3=0x%016x\n", (unsigned int)a3); + pr_debug(" a4=0x%016x\n", (unsigned int)a4); + pr_debug(" a5=0x%016x\n", (unsigned int)a5); + ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res); + + pr_debug("%s: %s: after SMC call -- res.a0=0x%016x", + __func__, chan->name, (unsigned int)res.a0); + pr_debug(" res.a1=0x%016x, res.a2=0x%016x", + (unsigned int)res.a1, (unsigned int)res.a2); + pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3); + + if (pdata->command == COMMAND_RSU_STATUS) { + if (res.a0 == INTEL_SIP_SMC_RSU_ERROR) + cbdata->status = BIT(SVC_STATUS_ERROR); + else + cbdata->status = BIT(SVC_STATUS_OK); + + cbdata->kaddr1 = &res; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); + mutex_unlock(ctrl->sdm_lock); + sdm_lock_owned = false; + continue; + } + + switch (res.a0) { + case INTEL_SIP_SMC_STATUS_OK: + svc_thread_recv_status_ok(pdata, cbdata, res); + break; + case INTEL_SIP_SMC_STATUS_BUSY: + switch (pdata->command) { + case COMMAND_RECONFIG_DATA_SUBMIT: svc_thread_cmd_data_claim(ctrl, pdata, cbdata); break; @@ -599,6 +1503,14 @@ static int svc_normal_to_secure_thread(void *data) svc_thread_cmd_config_status(ctrl, pdata, cbdata); break; + case COMMAND_POLL_SERVICE_STATUS_ASYNC: + cbdata->status = BIT(SVC_STATUS_BUSY); + cbdata->kaddr1 = NULL; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + pdata->chan->scl->receive_cb(pdata->chan->scl, + cbdata); + break; default: pr_warn("it shouldn't happen\n"); break; @@ -615,6 +1527,55 @@ static int svc_normal_to_secure_thread(void *data) case COMMAND_FCS_DATA_DECRYPTION: case COMMAND_FCS_RANDOM_NUMBER_GEN: case COMMAND_MBOX_SEND_CMD: + case COMMAND_FCS_PSGSIGMA_TEARDOWN: + case COMMAND_FCS_GET_CHIP_ID: + case COMMAND_FCS_ATTESTATION_SUBKEY: + case COMMAND_FCS_ATTESTATION_MEASUREMENTS: + case COMMAND_FCS_COUNTER_SET_PREAUTHORIZED: + case COMMAND_FCS_ATTESTATION_CERTIFICATE: + case COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD: + case COMMAND_FCS_GET_ROM_PATCH_SHA384: + case COMMAND_FCS_CRYPTO_OPEN_SESSION: + case COMMAND_FCS_CRYPTO_CLOSE_SESSION: + case COMMAND_FCS_CRYPTO_IMPORT_KEY: + case COMMAND_FCS_CRYPTO_EXPORT_KEY: + case COMMAND_FCS_CRYPTO_REMOVE_KEY: + case COMMAND_FCS_CRYPTO_GET_KEY_INFO: + case COMMAND_FCS_CRYPTO_AES_CRYPT_INIT: + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE: + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE: + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_GET_DIGEST_INIT: + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE: + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE: + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE_SMMU: + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT: + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE: + case COMMAND_FCS_RANDOM_NUMBER_GEN_EXT: + case COMMAND_FCS_SDOS_DATA_EXT: cbdata->status = BIT(SVC_STATUS_INVALID_PARAM); cbdata->kaddr1 = NULL; cbdata->kaddr2 = NULL; @@ -634,28 +1595,37 @@ static int svc_normal_to_secure_thread(void *data) cbdata->kaddr3 = (res.a3) ? &res.a3 : NULL; pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); break; - default: - pr_warn("Secure firmware doesn't support...\n"); - - /* - * be compatible with older version firmware which - * doesn't support newer RSU commands - */ - if ((pdata->command != COMMAND_RSU_UPDATE) && - (pdata->command != COMMAND_RSU_STATUS)) { - cbdata->status = - BIT(SVC_STATUS_NO_SUPPORT); + case INTEL_SIP_SMC_STATUS_NO_RESPONSE: + switch (pdata->command) { + case COMMAND_POLL_SERVICE_STATUS_ASYNC: + cbdata->status = BIT(SVC_STATUS_NO_RESPONSE); cbdata->kaddr1 = NULL; cbdata->kaddr2 = NULL; cbdata->kaddr3 = NULL; - pdata->chan->scl->receive_cb( - pdata->chan->scl, cbdata); + pdata->chan->scl->receive_cb(pdata->chan->scl, + cbdata); + break; + default: + pr_warn("it shouldn't receive no response\n"); + break; } break; + default: + pr_warn("Secure firmware doesn't support...\n"); + + cbdata->status = BIT(SVC_STATUS_NO_SUPPORT); + cbdata->kaddr1 = NULL; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + if (pdata->chan->scl->receive_cb) + pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); + break; } } - + pr_debug("%s: %s: Exit thread\n", __func__, chan->name); + if (sdm_lock_owned == true) + mutex_unlock(ctrl->sdm_lock); kfree(cbdata); kfree(pdata); @@ -923,91 +1893,1390 @@ struct stratix10_svc_chan *stratix10_svc_request_channel_byname( EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname); /** - * stratix10_svc_free_channel() - free service channel - * @chan: service channel to be freed + * stratix10_svc_add_async_client - Add an asynchronous client to the Stratix10 service channel. + * @chan: Pointer to the Stratix10 service channel structure. + * @use_unique_clientid: Boolean flag indicating whether to use a unique client ID. * - * This function is used by service client to free a service channel. + * This function adds an asynchronous client to the specified Stratix10 service channel. + * If the `use_unique_clientid` flag is set to true, a unique client ID is allocated for + * the asynchronous channel. Otherwise, a common asynchronous channel is used. + * + * Return: 0 on success, or a negative error code on failure: + * -EINVAL if the channel is NULL or the async controller is not initialized. + * -EALREADY if the async channel is already allocated. + * -ENOMEM if memory allocation fails. + * Other negative values if ID allocation fails. */ -void stratix10_svc_free_channel(struct stratix10_svc_chan *chan) +int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, + bool use_unique_clientid) { - unsigned long flag; + int ret = 0; + struct stratix10_async_chan *achan; - spin_lock_irqsave(&chan->lock, flag); - chan->scl = NULL; - chan->ctrl->num_active_client--; - module_put(chan->ctrl->dev->driver->owner); - spin_unlock_irqrestore(&chan->lock, flag); + if (!chan) + return -EINVAL; + + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + + if (!actrl->initialized) { + dev_err(ctrl->dev, "Async controller not initialized\n"); + return -EINVAL; + } + + if (chan->async_chan) { + dev_err(ctrl->dev, "async channel already allocated\n"); + return -EALREADY; + } + + if (use_unique_clientid) { + achan = kzalloc(sizeof(*achan), GFP_KERNEL); + if (!achan) + return -ENOMEM; + + achan->job_id_pool = stratix10_id_pool_create(MAX_SDM_JOB_IDS); + if (!achan->job_id_pool) { + dev_err(ctrl->dev, "Failed to create job id pool\n"); + kfree(achan); + return -ENOMEM; + } + + ret = stratix10_allocate_id(actrl->async_id_pool); + if (ret < 0) { + dev_err(ctrl->dev, + "Failed to allocate async client id\n"); + stratix10_id_pool_destroy(achan->job_id_pool); + kfree(achan); + return ret; + } + achan->async_client_id = ret; + chan->async_chan = achan; + } else { + if (atomic_read(&actrl->common_achan_refcount) == 0) { + achan = kzalloc(sizeof(*achan), GFP_KERNEL); + if (!achan) + return -ENOMEM; + + achan->job_id_pool = + stratix10_id_pool_create(MAX_SDM_JOB_IDS); + if (!achan->job_id_pool) { + dev_err(ctrl->dev, + "Failed to create job id pool\n"); + kfree(achan); + return -ENOMEM; + } + + ret = stratix10_allocate_id(actrl->async_id_pool); + if (ret < 0) { + dev_err(ctrl->dev, + "Failed to allocate async client id\n"); + stratix10_id_pool_destroy(achan->job_id_pool); + kfree(achan); + return ret; + } + achan->async_client_id = ret; + actrl->common_async_chan = achan; + dev_info(ctrl->dev, + "Common async channel allocated with id %ld\n", + achan->async_client_id); + } + chan->async_chan = actrl->common_async_chan; + atomic_inc(&actrl->common_achan_refcount); + } + + return 0; } -EXPORT_SYMBOL_GPL(stratix10_svc_free_channel); +EXPORT_SYMBOL_GPL(stratix10_svc_add_async_client); /** - * stratix10_svc_send() - send a message data to the remote - * @chan: service channel assigned to the client - * @msg: message data to be sent, in the format of - * "struct stratix10_svc_client_msg" + * stratix10_svc_remove_async_client - Remove an asynchronous client from + * the Stratix10 service channel. + * @chan: Pointer to the Stratix10 service channel structure. * - * This function is used by service client to add a message to the service - * layer driver's queue for being sent to the secure world. + * This function removes an asynchronous client associated with the given service channel. + * It checks if the channel and the asynchronous channel are valid, and then proceeds to + * decrement the reference count for the common asynchronous channel if applicable. If the + * reference count reaches zero, it destroys the job ID pool and deallocates the asynchronous + * client ID. For non-common asynchronous channels, it directly destroys the job ID pool, + * deallocates the asynchronous client ID, and frees the memory allocated for the asynchronous + * channel. * - * Return: 0 for success, -ENOMEM or -ENOBUFS on error. + * Return: 0 on success, -EINVAL if the channel or asynchronous channel is invalid. */ -int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) +int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan) { - struct stratix10_svc_client_msg - *p_msg = (struct stratix10_svc_client_msg *)msg; - struct stratix10_svc_data_mem *p_mem; - struct stratix10_svc_data *p_data; - int ret = 0; - unsigned int cpu = 0; + if (!chan) + return -EINVAL; - p_data = kzalloc(sizeof(*p_data), GFP_KERNEL); - if (!p_data) - return -ENOMEM; + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + struct stratix10_async_chan *achan = chan->async_chan; - /* first client will create kernel thread */ - if (!chan->ctrl->task) { - chan->ctrl->task = - kthread_create_on_node(svc_normal_to_secure_thread, - (void *)chan->ctrl, - cpu_to_node(cpu), - "svc_smc_hvc_thread"); - if (IS_ERR(chan->ctrl->task)) { - dev_err(chan->ctrl->dev, - "failed to create svc_smc_hvc_thread\n"); - kfree(p_data); - return -EINVAL; - } - kthread_bind(chan->ctrl->task, cpu); - wake_up_process(chan->ctrl->task); + if (!achan) { + dev_err(ctrl->dev, "async channel not allocated\n"); + return -EINVAL; } - pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, - p_msg->payload, p_msg->command, - (unsigned int)p_msg->payload_length); - - if (list_empty(&svc_data_mem)) { + if (achan == actrl->common_async_chan) { + atomic_dec(&actrl->common_achan_refcount); + if (atomic_read(&actrl->common_achan_refcount) == 0) { + stratix10_id_pool_destroy(achan->job_id_pool); + stratix10_deallocate_id(actrl->async_id_pool, achan->async_client_id); + } + } else { + stratix10_id_pool_destroy(achan->job_id_pool); + stratix10_deallocate_id(actrl->async_id_pool, achan->async_client_id); + kfree(achan); + } + chan->async_chan = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_remove_async_client); + +static inline struct stratix10_svc_data_mem *stratix10_get_memobj(void *vaddr) +{ + struct stratix10_svc_data_mem *pmem = NULL; + + if (!vaddr) + return NULL; + + mutex_lock(&svc_mem_lock); + list_for_each_entry(pmem, &svc_data_mem, node) + if (pmem->vaddr == vaddr) { + mutex_unlock(&svc_mem_lock); + return pmem; + } + mutex_unlock(&svc_mem_lock); + return NULL; +} + +static inline int stratix10_dma_map_buffer(struct stratix10_svc_controller *ctrl, + dma_addr_t *handle, void *buffer, + enum dma_data_direction dir) +{ + int ret = 0; + struct stratix10_svc_data_mem *pmem; + + if (!handle || !buffer) + return -EINVAL; + + pmem = stratix10_get_memobj(buffer); + if (!pmem) { + dev_err(ctrl->dev, "Invalid payload memory\n"); + return -ENOENT; + } + + *handle = dma_map_single(ctrl->dev, pmem->vaddr, pmem->size, dir); + ret = dma_mapping_error(ctrl->dev, *handle); + if (ret) { + dev_err(ctrl->dev, "Failed to map payload memory\n"); + return ret; + } + + return 0; +} + +static inline void stratix10_dma_unmap_buffer(struct stratix10_svc_controller *ctrl, + dma_addr_t *handle, void *buffer, + enum dma_data_direction dir) +{ + struct stratix10_svc_data_mem *pmem; + + if (!*handle || !buffer) + return; + + pmem = stratix10_get_memobj(buffer); + if (!pmem) { + dev_err(ctrl->dev, "Invalid payload memory\n"); + return; + } + + if (*handle != 0) { + dma_unmap_single(ctrl->dev, *handle, pmem->size, dir); + *handle = 0; + } +} + +static inline unsigned long +stratix10_get_physical_address(struct stratix10_svc_controller *ctrl, + void *buffer) +{ + struct stratix10_svc_data_mem *pmem; + + if (!ctrl || !buffer) { + return (unsigned long)NULL; + } + + pmem = stratix10_get_memobj(buffer); + if (!pmem) { + dev_err(ctrl->dev, "Invalid payload memory\n"); + WARN_ON_ONCE(1); + return (unsigned long)NULL; + } + + return ((ctrl->is_smmu_enabled) ? virt_to_phys(buffer) : pmem->paddr); +} + +static inline unsigned long +stratix10_get_smmu_remapped_address(struct stratix10_svc_controller *ctrl, + void *buffer) +{ + struct stratix10_svc_data_mem *pmem; + + if (!ctrl || !buffer) { + return (unsigned long)NULL; + } + + if (!ctrl->is_smmu_enabled) + return (unsigned long)NULL; + + pmem = stratix10_get_memobj(buffer); + if (!pmem) { + dev_err(ctrl->dev, "Invalid payload memory\n"); + WARN_ON_ONCE(1); + return (unsigned long)NULL; + } + + return pmem->paddr; +} + +/** + * stratix10_svc_async_send - Send an asynchronous message to the Stratix10 service + * @chan: Pointer to the service channel structure + * @msg: Pointer to the message to be sent + * @handler: Pointer to the handler for the asynchronous message used by caller for later reference. + * @cb: Callback function to be called upon completion + * @cb_arg: Argument to be passed to the callback function + * + * This function sends an asynchronous message to the SDM mailbox in EL3 secure + * firmware. It performs various checks and setups, including allocating a job ID, + * setting up the transaction ID, and mapping the payload memory for DMA. + * The function handles different commands by setting up the appropriate + * arguments for the SMC call. If the SMC call is successful, the handler + * is set up and the function returns 0. If the SMC call fails, appropriate + * error handling is performed, including deallocating the job ID and unmapping + * the DMA memory. + * + * Return: 0 on success, negative error code on failure + */ +int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, void **handler, + async_callback_t cb, void *cb_arg) +{ + int ret = 0; + struct stratix10_svc_async_handler *handle = NULL; + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)msg; + struct arm_smccc_1_2_regs args = { 0 }, res = { 0 }; + + if (!chan || !msg || !handler) + return -EINVAL; + + struct stratix10_async_chan *achan = chan->async_chan; + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + + if (!actrl->initialized) { + dev_err(ctrl->dev, "Async controller not initialized\n"); + return -EINVAL; + } + + if (!achan) { + dev_err(ctrl->dev, "Async channel not allocated\n"); + return -EINVAL; + } + + handle = + kzalloc(sizeof(struct stratix10_svc_async_handler), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + ret = stratix10_allocate_id(achan->job_id_pool); + if (ret < 0) { + dev_err(ctrl->dev, "Failed to allocate job id\n"); + kfree(handle); + return -ENOMEM; + } + + handle->transaction_id = + STRATIX10_SET_TRANSACTIONID(achan->async_client_id, ret); + handle->cb = cb; + handle->msg = p_msg; + handle->cb_arg = cb_arg; + handle->achan = achan; + + /*set the transaction jobid in args.a1*/ + args.a1 = + STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id); + + switch (p_msg->command) { + case COMMAND_HWMON_READTEMP: + args.a0 = INTEL_SIP_SMC_ASYNC_HWMON_READTEMP; + args.a2 = p_msg->arg[0]; + break; + case COMMAND_HWMON_READVOLT: + args.a0 = INTEL_SIP_SMC_ASYNC_HWMON_READVOLT; + args.a2 = p_msg->arg[0]; + break; + case COMMAND_FCS_CRYPTO_OPEN_SESSION: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_OPEN_CS_SESSION; + break; + case COMMAND_FCS_CRYPTO_CLOSE_SESSION: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_CLOSE_CS_SESSION; + args.a2 = p_msg->arg[0]; + break; + case COMMAND_FCS_CRYPTO_IMPORT_KEY: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_IMPORT_CS_KEY; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a3 = (unsigned long)p_msg->payload_length; + break; + case COMMAND_FCS_CRYPTO_EXPORT_KEY: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_EXPORT_CS_KEY; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a5 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_FCS_CRYPTO_REMOVE_KEY: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_REMOVE_CS_KEY; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + break; + case COMMAND_FCS_CRYPTO_GET_KEY_INFO: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_GET_CS_KEY_INFO; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a5 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_FCS_RANDOM_NUMBER_GEN_EXT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_RANDOM_NUMBER_EXT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a5 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_FCS_GET_PROVISION_DATA: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_GET_PROVISION_DATA; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a3 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_FCS_SEND_CERTIFICATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_SEND_CERTIFICATE; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a3 = (unsigned long)p_msg->payload_length; + break; + case COMMAND_FCS_COUNTER_SET_PREAUTHORIZED: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_CNTR_SET_PREAUTH; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + break; + case COMMAND_FCS_CRYPTO_HKDF_REQUEST: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_HKDF_REQUEST; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a6 = p_msg->arg[3]; + args.a7 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_CREATE_KEY: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_CREATE_CRYPTO_SERVICE_KEY; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a3 = (unsigned long)p_msg->payload_length; + break; + case COMMAND_GET_IDCODE: + args.a0 = INTEL_SIP_SMC_ASYNC_GET_IDCODE; + break; + case COMMAND_FCS_CRYPTO_GET_DEVICE_IDENTITY: + args.a0 = INTEL_SIP_SMC_ASYNC_GET_DEVICE_IDENTITY; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a3 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_QSPI_OPEN: + args.a0 = INTEL_SIP_SMC_ASYNC_QSPI_OPEN; + break; + case COMMAND_QSPI_CLOSE: + args.a0 = INTEL_SIP_SMC_ASYNC_QSPI_CLOSE; + break; + case COMMAND_QSPI_SET_CS: + args.a0 = INTEL_SIP_SMC_ASYNC_QSPI_SET_CS; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + break; + case COMMAND_QSPI_READ: + args.a0 = INTEL_SIP_SMC_ASYNC_QSPI_READ; + args.a2 = p_msg->arg[0]; + args.a3 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a4 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_QSPI_WRITE: + args.a0 = INTEL_SIP_SMC_ASYNC_QSPI_WRITE; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a3 = (unsigned long)p_msg->payload_length; + break; + case COMMAND_QSPI_ERASE: + args.a0 = INTEL_SIP_SMC_ASYNC_QSPI_ERASE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + break; + case COMMAND_FCS_MCTP_SEND: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_MCTP; + args.a2 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a3 = (unsigned long)p_msg->payload_length; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a5 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_UPDATE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_UPDATE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = p_msg->arg[2]; + args.a9 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = p_msg->arg[2]; + args.a9 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a6 = p_msg->payload_length; + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_UPDATE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = p_msg->arg[2]; + args.a9 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + args.a10 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload_output); + break; + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = p_msg->arg[2]; + args.a9 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + args.a10 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload_output); + break; + case COMMAND_FCS_GET_CHIP_ID: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_CHIP_ID; + break; + case COMMAND_FCS_ATTESTATION_CERTIFICATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_GET_ATTESTATION_CERT; + args.a2 = p_msg->arg[0]; + args.a3 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a4 = (unsigned long)p_msg->payload_length_output; + break; + case COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_CREATE_CERT_ON_RELOAD; + args.a2 = p_msg->arg[0]; + break; + case COMMAND_FCS_SDOS_DATA_EXT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_CRYPTION_EXT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a6 = p_msg->payload_length; + args.a7 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a8 = p_msg->payload_length_output; + args.a9 = p_msg->arg[3]; + args.a10 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + args.a11 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload_output); + break; + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a5 = p_msg->payload_length_output; + break; + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = p_msg->arg[2]; + args.a9 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = p_msg->arg[2]; + args.a9 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_INIT; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = p_msg->arg[2]; + args.a5 = p_msg->arg[3]; + args.a6 = p_msg->arg[4]; + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE: + args.a0 = INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE; + args.a2 = p_msg->arg[0]; + args.a3 = p_msg->arg[1]; + args.a4 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a5 = p_msg->payload_length; + args.a6 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a7 = p_msg->payload_length_output; + args.a8 = stratix10_get_smmu_remapped_address(ctrl, p_msg->payload); + break; + case COMMAND_RSU_GET_SPT_TABLE: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_SPT; + break; + case COMMAND_MBOX_SEND_CMD: + args.a0 = INTEL_SIP_SMC_ASYNC_MBOX_SEND; + args.a2 = p_msg->arg[0]; + args.a3 = stratix10_get_physical_address(ctrl, p_msg->payload); + args.a4 = p_msg->payload_length; + args.a5 = stratix10_get_physical_address(ctrl, p_msg->payload_output); + args.a6 = p_msg->payload_length_output; + break; + case COMMAND_RSU_STATUS: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS; + break; + case COMMAND_RSU_NOTIFY: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_NOTIFY; + args.a2 = p_msg->arg[0]; + break; + + default: + dev_err(ctrl->dev, "Invalid command ,%d", p_msg->command); + ret = -EINVAL; + goto deallocate_id; + } + + if (p_msg->payload && p_msg->payload_length > 0 && + ctrl->is_smmu_enabled && !is_vmalloc_addr(p_msg->payload)) { + ret = stratix10_dma_map_buffer(ctrl, &handle->input_handle, + p_msg->payload, DMA_TO_DEVICE); + if (ret) + goto dma_unmap_buffer; + } + + if (p_msg->payload_output && p_msg->payload_length_output > 0 && + ctrl->is_smmu_enabled && !is_vmalloc_addr(p_msg->payload_output)) { + ret = stratix10_dma_map_buffer(ctrl, &handle->output_handle, + p_msg->payload_output, DMA_FROM_DEVICE); + if (ret) + goto dma_unmap_buffer; + } + + /** + * There is a chance that during the execution of async_send() in one core, + * An interrupt might be received in another core, so to mitigate this we are + * adding the handle to the DB and then send the smc call, if the smc call + * is rejected or busy then we will deallocate the handle for the client + * to retry again. + */ + spin_lock(&actrl->trx_list_wr_lock); + hash_add_rcu(actrl->trx_list, &handle->next, handle->transaction_id); + spin_unlock(&actrl->trx_list_wr_lock); + synchronize_rcu(); + + actrl->invoke_fn(actrl, &args, &res); + + switch (res.a0) { + case INTEL_SIP_SMC_STATUS_OK: + dev_dbg(ctrl->dev, + "Async message sent with transaction_id 0x%02x\n", + handle->transaction_id); + *handler = handle; + return 0; + case INTEL_SIP_SMC_STATUS_BUSY: + dev_warn(ctrl->dev, "Mailbox is busy, try after some time\n"); + ret = -EAGAIN; + break; + case INTEL_SIP_SMC_STATUS_REJECTED: + dev_err(ctrl->dev, "Async message rejected\n"); + ret = -EBADF; + break; + default: + dev_err(ctrl->dev, + "Failed to send async message ,got status as %ld\n", + res.a0); + ret = -EIO; + } + + spin_lock(&actrl->trx_list_wr_lock); + hash_del_rcu(&handle->next); + spin_unlock(&actrl->trx_list_wr_lock); + synchronize_rcu(); + +dma_unmap_buffer: + stratix10_dma_unmap_buffer(ctrl, &handle->input_handle, p_msg->payload, + DMA_TO_DEVICE); + stratix10_dma_unmap_buffer(ctrl, &handle->output_handle, p_msg->payload_output, + DMA_FROM_DEVICE); +deallocate_id: + stratix10_deallocate_id(achan->job_id_pool, + STRATIX10_GET_JOBID(handle->transaction_id)); + kfree(handle); + return ret; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_send); + +/** + * stratix10_svc_async_prepare_response - Prepare the response data for an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @handler: Pointer to the asynchronous handler structure. + * @data: Pointer to the callback data structure. + * + * This function prepares the response data for an asynchronous transaction. It + * extracts the response data from the SMC response structure and stores it in + * the callback data structure. The function also logs the completion of the + * asynchronous transaction. + * + * Return: 0 on success, -ENOENT if the command is invalid + */ +static int stratix10_svc_async_prepare_response(struct stratix10_svc_chan *chan, + struct stratix10_svc_async_handler *handle, + struct stratix10_svc_cb_data *data) +{ + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)handle->msg; + struct stratix10_svc_controller *ctrl = chan->ctrl; + + stratix10_dma_unmap_buffer(ctrl, &handle->input_handle, p_msg->payload, + DMA_TO_DEVICE); + stratix10_dma_unmap_buffer(ctrl, &handle->output_handle, p_msg->payload_output, + DMA_FROM_DEVICE); + + data->status = STRATIX10_GET_SDM_STATUS_CODE(handle->res.a1); + + switch (p_msg->command) { + case COMMAND_FCS_CRYPTO_CLOSE_SESSION: + case COMMAND_FCS_COUNTER_SET_PREAUTHORIZED: + case COMMAND_QSPI_OPEN: + case COMMAND_QSPI_CLOSE: + case COMMAND_QSPI_SET_CS: + case COMMAND_QSPI_WRITE: + case COMMAND_QSPI_ERASE: + case COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD: + case COMMAND_RSU_NOTIFY: + break; + case COMMAND_HWMON_READTEMP: + case COMMAND_HWMON_READVOLT: + case COMMAND_FCS_CRYPTO_OPEN_SESSION: + case COMMAND_FCS_CRYPTO_IMPORT_KEY: + case COMMAND_FCS_CRYPTO_REMOVE_KEY: + case COMMAND_FCS_CRYPTO_GET_KEY_INFO: + case COMMAND_FCS_SEND_CERTIFICATE: + case COMMAND_FCS_GET_PROVISION_DATA: + case COMMAND_FCS_CRYPTO_HKDF_REQUEST: + case COMMAND_FCS_CRYPTO_CREATE_KEY: + case COMMAND_FCS_MCTP_SEND: + case COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE: + case COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE: + case COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE: + case COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE: + case COMMAND_FCS_ATTESTATION_CERTIFICATE: + case COMMAND_FCS_RANDOM_NUMBER_GEN_EXT: + case COMMAND_FCS_CRYPTO_EXPORT_KEY: + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE: + case COMMAND_FCS_CRYPTO_GET_DEVICE_IDENTITY: + case COMMAND_FCS_SDOS_DATA_EXT: + case COMMAND_QSPI_READ: + case COMMAND_MBOX_SEND_CMD: + data->kaddr1 = (void *)&handle->res.a2; + break; + case COMMAND_GET_IDCODE: + case COMMAND_FCS_GET_CHIP_ID: + case COMMAND_RSU_GET_SPT_TABLE: + data->kaddr1 = (void *)&handle->res.a2; + data->kaddr2 = (void *)&handle->res.a3; + break; + case COMMAND_RSU_STATUS: + /* COMMAND_RSU_STATUS has more elements than the cb_data + * can acomodate, so passing the response structure to the + * response function to be handled before done command is + * executed by the client. + */ + data->kaddr1 = (void *)&handle->res; + break; + + default: + dev_alert(ctrl->dev, "Invalid command ,%d", p_msg->command); + return -ENOENT; + } + dev_dbg(ctrl->dev, "Async message completed transaction_id 0x%02x", + handle->transaction_id); + return 0; +} + +/** + * stratix10_svc_async_poll - Polls the status of an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being polled. + * @data: Pointer to the callback data structure. + * + * This function polls the status of an asynchronous transaction identified by the + * given transaction handle. It ensures that the necessary structures are initialized + * and valid before proceeding with the poll operation. The function sets up the + * necessary arguments for the SMC call, invokes the call, and prepares the response + * data if the call is successful. If the call fails, the function sets the status + * to SVC_STATUS_ERROR and returns an error code. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid, -EAGAIN if the + * transaction is still in progress, or other negative error codes on failure, + * -EPERM if the command is invalid. + */ +int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, void *tx_handle, + struct stratix10_svc_cb_data *data) +{ + int ret; + struct arm_smccc_1_2_regs args = { 0 }; + + if (!chan || !tx_handle || !data) + return -EINVAL; + + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + struct stratix10_async_chan *achan = chan->async_chan; + + if (!achan) { + dev_err(ctrl->dev, "Async channel not allocated\n"); + return -EINVAL; + } + + struct stratix10_svc_async_handler *handle = + (struct stratix10_svc_async_handler *)tx_handle; + if (!hash_hashed(&handle->next)) { + dev_err(ctrl->dev, "Invalid transaction handler\n"); + return -EINVAL; + } + + /** + * For certain operations like AES there are 2/3 stages of function + * called init, setup and finalize. But the init stage could be combined + * with setup or final stage. So for init we will do a non mailbox + * command instructing the ATF to store the context for the next two + * stages. And for these init stages poll command won't be supported. + */ + switch (handle->msg->command) { + case COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_GET_DIGEST_INIT: + case COMMAND_FCS_CRYPTO_AES_CRYPT_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT: + case COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT: + case COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT: + return -EPERM; + default: + break; + }; + + args.a0 = INTEL_SIP_SMC_ASYNC_POLL; + args.a1 = + STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id); + + actrl->invoke_fn(actrl, &args, &handle->res); + + /*clear data for response*/ + memset(data, 0, sizeof(*data)); + + if (handle->res.a0 == INTEL_SIP_SMC_STATUS_OK) { + ret = stratix10_svc_async_prepare_response(chan, handle, data); + if (ret) { + dev_err(ctrl->dev, "Error in preparation of response,%d\n", ret); + WARN_ON_ONCE(1); + } + return 0; + } else if (handle->res.a0 == INTEL_SIP_SMC_STATUS_BUSY) { + dev_dbg(ctrl->dev, "async message is still in progress\n"); + return -EAGAIN; + } + + dev_err(ctrl->dev, + "Failed to poll async message ,got status as %ld\n", + handle->res.a0); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_poll); + +/** + * stratix10_svc_async_done - Completes an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being completed. + * + * This function completes an asynchronous transaction identified by the given + * transaction handle. It ensures that the necessary structures are initialized + * and valid before proceeding with the completion operation. The function + * deallocates the transaction ID, frees the memory allocated for the handler, + * and removes the handler from the transaction list. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid, or other + * negative error codes on failure. + */ +int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle) +{ + if (!chan || !tx_handle) + return -EINVAL; + + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_async_chan *achan = chan->async_chan; + + if (!achan) { + dev_err(ctrl->dev, "async channel not allocated\n"); + return -EINVAL; + } + + struct stratix10_svc_async_handler *handle = + (struct stratix10_svc_async_handler *)tx_handle; + if (!hash_hashed(&handle->next)) { + dev_err(ctrl->dev, "Invalid transaction handle\n"); + return -EINVAL; + } + + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + + spin_lock(&actrl->trx_list_wr_lock); + hash_del_rcu(&handle->next); + spin_unlock(&actrl->trx_list_wr_lock); + synchronize_rcu(); + stratix10_deallocate_id(achan->job_id_pool, STRATIX10_GET_JOBID(handle->transaction_id)); + stratix10_dma_unmap_buffer(ctrl, &handle->input_handle, handle->msg->payload, + DMA_TO_DEVICE); + stratix10_dma_unmap_buffer(ctrl, &handle->output_handle, handle->msg->payload_output, + DMA_FROM_DEVICE); + kfree(handle); + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_done); + +static inline void stratix10_smc_1_2(struct stratix10_async_ctrl *actrl, + const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res) +{ + struct stratix10_svc_controller *ctrl = + container_of(actrl, struct stratix10_svc_controller, actrl); + ktime_t t1, t0; + + mutex_lock(&svc_async_lock); + dev_dbg(ctrl->dev, "args->a0=0x%016lx", args->a0); + dev_dbg(ctrl->dev, "args->a1=0x%016lx, args->a2=0x%016lx,", args->a1, args->a2); + dev_dbg(ctrl->dev, "args->a3=0x%016lx, args->a4=0x%016lx,", args->a3, args->a4); + dev_dbg(ctrl->dev, "args->a5=0x%016lx, args->a6=0x%016lx,", args->a5, args->a6); + dev_dbg(ctrl->dev, "args->a7=0x%016lx, args->a8=0x%016lx,", args->a7, args->a8); + dev_dbg(ctrl->dev, "args->a9=0x%016lx, args->a10=0x%016lx,", args->a9, args->a10); + dev_dbg(ctrl->dev, "args->a11=0x%016lx, args->a12=0x%016lx,", args->a11, args->a12); + t0 = ktime_get(); + arm_smccc_1_2_smc(args, res); + t1 = ktime_get(); + dev_dbg(ctrl->dev, "Duration is %lld ns", ktime_to_ns(ktime_sub(t1, t0))); + dev_dbg(ctrl->dev, "res->a0=0x%016lx", res->a0); + dev_dbg(ctrl->dev, "res->a1=0x%016lx, res->a2=0x%016lx,", res->a1, res->a2); + dev_dbg(ctrl->dev, "res->a3=0x%016lx, res->a4=0x%016lx,", res->a3, res->a4); + dev_dbg(ctrl->dev, "res->a5=0x%016lx, res->a6=0x%016lx,", res->a5, res->a6); + mutex_unlock(&svc_async_lock); +} + +static irqreturn_t stratix10_svc_async_irq_handler(int irq, void *dev_id) +{ + struct stratix10_svc_controller *ctrl = dev_id; + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + + schedule_work(&actrl->async_work); + disable_irq_nosync(actrl->irq); + return IRQ_HANDLED; +} + +/** + * stratix10_async_workqueue_handler - Handles asynchronous workqueue tasks + * @work: Pointer to the work_struct representing the work to be handled + * + * This function is the handler for the asynchronous workqueue. It performs + * the following tasks: + * - Invokes the asynchronous polling on interrupt supervisory call. + * - On success,it retrieves the bitmap of pending transactions from mailbox + * fifo in ATF. + * - It processes each pending transaction by calling the corresponding + * callback function. + * - Measures the time taken to handle the transactions and logs the information. + * + * The function ensures that the IRQ is enabled after processing the transactions + * and logs the total time taken to handle the transactions along with the number + * of transactions handled and the CPU on which the handler ran. + */ +static void stratix10_async_workqueue_handler(struct work_struct *work) +{ + unsigned long tid = 0, transaction_id = 0; + ktime_t t0, t1; + struct stratix10_svc_async_handler *handler; + struct stratix10_async_ctrl *actrl = + container_of(work, struct stratix10_async_ctrl, async_work); + struct stratix10_svc_controller *ctrl = + container_of(actrl, struct stratix10_svc_controller, actrl); + DECLARE_BITMAP(pend_on_irq, TOTAL_TRANSACTION_IDS); + u64 bitmap_array[4]; + struct arm_smccc_1_2_regs + args = { .a0 = INTEL_SIP_SMC_ASYNC_POLL_ON_IRQ }, + res; + t0 = ktime_get(); + + actrl->invoke_fn(actrl, &args, &res); + if (res.a0 == INTEL_SIP_SMC_STATUS_OK) { + bitmap_array[0] = res.a1; + bitmap_array[1] = res.a2; + bitmap_array[2] = res.a3; + bitmap_array[3] = res.a4; + bitmap_from_arr64(pend_on_irq, bitmap_array, + TOTAL_TRANSACTION_IDS); + rcu_read_lock(); + do { + transaction_id = find_next_bit(pend_on_irq, + TOTAL_TRANSACTION_IDS, + transaction_id); + if (transaction_id >= TOTAL_TRANSACTION_IDS) + break; + hash_for_each_possible_rcu_notrace(actrl->trx_list, + handler, next, + transaction_id) { + if (handler->transaction_id == transaction_id) { + handler->cb(handler->cb_arg); + tid++; + break; + } + } + transaction_id++; + } while (transaction_id < TOTAL_TRANSACTION_IDS); + rcu_read_unlock(); + } + t1 = ktime_get(); + dev_dbg(ctrl->dev, + "Async workqueue handled total time %lldns for %ld transactions on CPU%d\n", + ktime_to_ns(ktime_sub(t1, t0)), tid, smp_processor_id()); + enable_irq(actrl->irq); +} + +/** + * stratix10_svc_async_init - Initialize the Stratix 10 service controller + * for asynchronous operations. + * @controller: Pointer to the Stratix 10 service controller structure. + * + * This function initializes the asynchronous service controller by setting up + * the necessary data structures, initializing the transaction list, and + * registering the IRQ handler for asynchronous transactions. + * + * Return: 0 on success, -EINVAL if the controller is NULL or already initialized, + * -ENOMEM if memory allocation fails, -EADDRINUSE if the client ID is already + * reserved, or other negative error codes on failure. + */ +static int stratix10_svc_async_init(struct stratix10_svc_controller *controller) +{ + int ret, irq; + struct arm_smccc_res res; + + if (!controller) + return -EINVAL; + + struct stratix10_async_ctrl *actrl = &controller->actrl; + + if (actrl->initialized) + return -EINVAL; + + struct device *dev = controller->dev; + struct device_node *node = dev->of_node; + + controller->invoke_fn(INTEL_SIP_SMC_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != INTEL_SIP_SMC_STATUS_OK && + !(res.a1 > ASYNC_ATF_MINIMUM_MAJOR_VERSION || + (res.a1 == ASYNC_ATF_MINIMUM_MAJOR_VERSION && + res.a2 >= ASYNC_ATF_MINIMUM_MINOR_VERSION))) { + dev_err(dev, + "Intel Service Layer Driver: ATF version is not compatible for async operation\n"); + return -EINVAL; + } + + actrl->invoke_fn = stratix10_smc_1_2; + + actrl->async_id_pool = stratix10_id_pool_create(MAX_SDM_CLIENT_IDS); + if (!actrl->async_id_pool) + return -ENOMEM; + + ret = stratix10_reserve_id(actrl->async_id_pool, SIP_SVC_V1_CLIENT_ID); + if (ret < 0) { + dev_err(dev, + "Intel Service Layer Driver: Error on reserving SIP_SVC_V1_CLIENT_ID\n"); + stratix10_id_pool_destroy(actrl->async_id_pool); + actrl->invoke_fn = NULL; + return -EADDRINUSE; + } + + spin_lock_init(&actrl->trx_list_wr_lock); + hash_init(actrl->trx_list); + atomic_set(&actrl->common_achan_refcount, 0); + + irq = of_irq_get(node, 0); + if (irq < 0) { + dev_err(dev, "Failed to get IRQ, falling back to polling mode\n"); + } else { + ret = devm_request_any_context_irq(dev, irq, stratix10_svc_async_irq_handler, + IRQF_NO_AUTOEN, "stratix10_svc", controller); + if (ret == 0) { + dev_alert(dev, + "Registered IRQ %d for sip async operations\n", + irq); + actrl->irq = irq; + INIT_WORK(&actrl->async_work, stratix10_async_workqueue_handler); + enable_irq(actrl->irq); + } + } + + actrl->initialized = true; + return 0; +} + +/** + * stratix10_svc_async_exit - Clean up and exit the asynchronous service controller + * @ctrl: Pointer to the stratix10_svc_controller structure + * + * This function performs the necessary cleanup for the asynchronous service + * controller. It checks if the controller is valid and if it has been + * initialized. If the controller has an IRQ assigned, it frees the IRQ and + * flushes any pending asynchronous work. It then locks the transaction list + * and safely removes and deallocates each handler in the list. The function + * also removes any asynchronous clients associated with the controller's + * channels and destroys the asynchronous ID pool. Finally, it resets the + * asynchronous ID pool and invoke function pointers to NULL. + * + * Return: 0 on success, -EINVAL if the controller is invalid or not initialized. + */ +static int stratix10_svc_async_exit(struct stratix10_svc_controller *ctrl) +{ + int i; + struct hlist_node *tmp; + struct stratix10_svc_async_handler *handler; + + if (!ctrl) + return -EINVAL; + + struct stratix10_async_ctrl *actrl = &ctrl->actrl; + + if (!actrl->initialized) + return -EINVAL; + + actrl->initialized = false; + + if (actrl->irq > 0) { + free_irq(actrl->irq, ctrl); + flush_work(&actrl->async_work); + actrl->irq = 0; + } + + spin_lock(&actrl->trx_list_wr_lock); + hash_for_each_safe(actrl->trx_list, i, tmp, handler, next) { + stratix10_deallocate_id(handler->achan->job_id_pool, + STRATIX10_GET_JOBID(handler->transaction_id)); + hash_del_rcu(&handler->next); + kfree(handler); + } + spin_unlock(&actrl->trx_list_wr_lock); + + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + if (ctrl->chans[i].async_chan) { + stratix10_svc_remove_async_client(&ctrl->chans[i]); + ctrl->chans[i].async_chan = NULL; + } + } + + stratix10_id_pool_destroy(actrl->async_id_pool); + actrl->async_id_pool = NULL; + actrl->invoke_fn = NULL; + + return 0; +} + +/** + * stratix10_svc_free_channel() - free service channel + * @chan: service channel to be freed + * + * This function is used by service client to free a service channel. + */ +void stratix10_svc_free_channel(struct stratix10_svc_chan *chan) +{ + unsigned long flag; + + spin_lock_irqsave(&chan->lock, flag); + chan->scl = NULL; + chan->ctrl->num_active_client--; + module_put(chan->ctrl->dev->driver->owner); + spin_unlock_irqrestore(&chan->lock, flag); +} +EXPORT_SYMBOL_GPL(stratix10_svc_free_channel); + +/** + * stratix10_svc_send() - send a message data to the remote + * @chan: service channel assigned to the client + * @msg: message data to be sent, in the format of + * "struct stratix10_svc_client_msg" + * + * This function is used by service client to add a message to the service + * layer driver's queue for being sent to the secure world. + * + * Return: 0 for success, -ENOMEM or -ENOBUFS on error. + */ +int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) +{ + struct stratix10_svc_client_msg + *p_msg = (struct stratix10_svc_client_msg *)msg; + struct stratix10_svc_data_mem *p_mem; + struct stratix10_svc_data *p_data; + int ret = 0; + unsigned int cpu = 0; + phys_addr_t *src_addr; + phys_addr_t *dst_addr; + + p_data = kzalloc(sizeof(*p_data), GFP_KERNEL); + if (!p_data) + return -ENOMEM; + + /* first client will create kernel thread */ + if (!chan->task) { + chan->task = + kthread_create_on_node(svc_normal_to_secure_thread, + (void *)chan, + cpu_to_node(cpu), + "svc_smc_hvc_thread"); + if (IS_ERR(chan->task)) { + dev_err(chan->ctrl->dev, + "failed to create svc_smc_hvc_thread\n"); + kfree(p_data); + return -EINVAL; + } + kthread_bind(chan->task, cpu); + wake_up_process(chan->task); + } + + pr_debug("%s: %s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, + chan->name, p_msg->payload, p_msg->command, + (unsigned int)p_msg->payload_length); + + if (!list_empty(&svc_data_mem)) { if (p_msg->command == COMMAND_RECONFIG) { struct stratix10_svc_command_config_type *ct = (struct stratix10_svc_command_config_type *) p_msg->payload; p_data->flag = ct->flags; - } - } else { - list_for_each_entry(p_mem, &svc_data_mem, node) - if (p_mem->vaddr == p_msg->payload) { - p_data->paddr = p_mem->paddr; - p_data->size = p_msg->payload_length; - break; - } - if (p_msg->payload_output) { + } else if (p_msg->command == COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE_SMMU || + p_msg->command == COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE_SMMU){ + src_addr = (phys_addr_t *)p_msg->payload; + p_data->paddr = *src_addr; + p_data->size = p_msg->payload_length; + dst_addr = (phys_addr_t *)p_msg->payload_output; + p_data->paddr_output = *dst_addr; + p_data->size_output = p_msg->payload_length_output; + } else if ( + p_msg->command == + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE_SMMU || + p_msg->command == + COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE_SMMU) { + src_addr = (phys_addr_t *)p_msg->payload; + p_data->paddr = *src_addr; + p_data->size = p_msg->payload_length; + mutex_lock(&svc_mem_lock); list_for_each_entry(p_mem, &svc_data_mem, node) if (p_mem->vaddr == p_msg->payload_output) { - p_data->paddr_output = - p_mem->paddr; - p_data->size_output = - p_msg->payload_length_output; + p_data->paddr_output = p_mem->paddr; + p_data->size_output = p_msg->payload_length_output; break; } + mutex_unlock(&svc_mem_lock); + } else { + mutex_lock(&svc_mem_lock); + list_for_each_entry(p_mem, &svc_data_mem, node) + if (p_mem->vaddr == p_msg->payload) { + p_data->paddr = p_mem->paddr; + p_data->size = p_msg->payload_length; + if(p_msg->command == COMMAND_RECONFIG_DATA_SUBMIT && chan->ctrl->is_smmu_enabled) + p_data->paddr += chan->ctrl->sdm_dma_addr_offset; + } + mutex_unlock(&svc_mem_lock); + if (p_msg->payload_output) { + mutex_lock(&svc_mem_lock); + list_for_each_entry(p_mem, &svc_data_mem, node) + if (p_mem->vaddr == p_msg->payload_output) { + p_data->paddr_output = + (p_msg->command == COMMAND_MBOX_SEND_CMD + && chan->ctrl->is_smmu_enabled) ? + virt_to_phys(p_mem->vaddr) : p_mem->paddr; + p_data->size_output = + p_msg->payload_length_output; + break; + } + mutex_unlock(&svc_mem_lock); + } } } @@ -1015,14 +3284,20 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) p_data->arg[0] = p_msg->arg[0]; p_data->arg[1] = p_msg->arg[1]; p_data->arg[2] = p_msg->arg[2]; - p_data->size = p_msg->payload_length; + p_data->arg[3] = p_msg->arg[3]; + p_data->arg[4] = p_msg->arg[4]; + p_data->arg[5] = p_msg->arg[5]; p_data->chan = chan; - pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__, - (unsigned int)p_data->paddr, p_data->command, - (unsigned int)p_data->size); - ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data, - sizeof(*p_data), - &chan->ctrl->svc_fifo_lock); + pr_debug("%s: %s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", + __func__, + chan->name, + (unsigned int)p_data->paddr, + p_data->command, + (unsigned int)p_data->size); + + ret = kfifo_in_spinlocked(&chan->svc_fifo, p_data, + sizeof(*p_data), + &chan->svc_fifo_lock); kfree(p_data); @@ -1043,12 +3318,22 @@ EXPORT_SYMBOL_GPL(stratix10_svc_send); */ void stratix10_svc_done(struct stratix10_svc_chan *chan) { - /* stop thread when thread is running AND only one active client */ - if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) { - pr_debug("svc_smc_hvc_shm_thread is stopped\n"); - kthread_stop(chan->ctrl->task); - chan->ctrl->task = NULL; + /* stop thread when thread is running */ + if (chan->task) { + + if (!IS_ERR(chan->task)) { + struct task_struct *task_to_stop = chan->task; + + chan->task = NULL; + pr_debug("%s: %s: svc_smc_hvc_shm_thread is stopping\n", + __func__, chan->name); + kthread_stop(task_to_stop); + } + + chan->task = NULL; } + pr_debug("%s: %s: svc_smc_hvc_shm_thread has stopped\n", + __func__, chan->name); } EXPORT_SYMBOL_GPL(stratix10_svc_done); @@ -1066,29 +3351,78 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, size_t size) { struct stratix10_svc_data_mem *pmem; - unsigned long va; + unsigned long va_gen_pool; phys_addr_t pa; struct gen_pool *genpool = chan->ctrl->genpool; - size_t s = roundup(size, 1 << genpool->min_alloc_order); + size_t s; + void *va; + int ret; + struct iova *alloc; + dma_addr_t dma_addr; - pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL); + pmem = kzalloc(sizeof(*pmem), GFP_KERNEL); if (!pmem) return ERR_PTR(-ENOMEM); - va = gen_pool_alloc(genpool, s); - if (!va) - return ERR_PTR(-ENOMEM); + mutex_lock(&svc_mem_lock); + + if (chan->ctrl->is_smmu_enabled == true) { + s = PAGE_ALIGN(size); + va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_DMA, get_order(s)); + if (!va) { + pr_debug("%s get_free_pages_failes\n", __func__); + kfree(pmem); + mutex_unlock(&svc_mem_lock); + return ERR_PTR(-ENOMEM); + } - memset((void *)va, 0, s); - pa = gen_pool_virt_to_phys(genpool, va); + alloc = alloc_iova(&chan->ctrl->carveout.domain, + s >> chan->ctrl->carveout.shift, + chan->ctrl->carveout.limit >> chan->ctrl->carveout.shift, + true); + + dma_addr = iova_dma_addr(&chan->ctrl->carveout.domain, alloc); + + ret = iommu_map(chan->ctrl->domain, dma_addr, virt_to_phys(va), + s, IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO | IOMMU_CACHE, + GFP_KERNEL); + if (ret < 0) { + pr_debug("%s IOMMU map failed\n", __func__); + free_iova(&chan->ctrl->carveout.domain, + iova_pfn(&chan->ctrl->carveout.domain, + dma_addr)); + free_pages((unsigned long)va, get_order(size)); + kfree(pmem); + mutex_unlock(&svc_mem_lock); + return ERR_PTR(-ENOMEM); + } + + pmem->paddr = dma_addr; + } else { + s = roundup(size, 1 << genpool->min_alloc_order); - pmem->vaddr = (void *)va; - pmem->paddr = pa; + va_gen_pool = gen_pool_alloc(genpool, s); + if (!va_gen_pool) { + kfree(pmem); + mutex_unlock(&svc_mem_lock); + return ERR_PTR(-ENOMEM); + } + + va = (void *)va_gen_pool; + + memset(va, 0, s); + pa = gen_pool_virt_to_phys(genpool, va_gen_pool); + + pmem->paddr = pa; + } + + pmem->vaddr = va; pmem->size = s; list_add_tail(&pmem->node, &svc_data_mem); - pr_debug("%s: va=%p, pa=0x%016x\n", __func__, - pmem->vaddr, (unsigned int)pmem->paddr); + pr_debug("%s: %s: va=%p, pa=0x%016x\n", __func__, + chan->name, pmem->vaddr, (unsigned int)pmem->paddr); + mutex_unlock(&svc_mem_lock); return (void *)va; } EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); @@ -1104,25 +3438,79 @@ void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; + if (!chan || !kaddr) + return; + + mutex_lock(&svc_mem_lock); + + if (list_empty(&svc_data_mem)) { + mutex_unlock(&svc_mem_lock); + return; + } + list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { - gen_pool_free(chan->ctrl->genpool, - (unsigned long)kaddr, pmem->size); - pmem->vaddr = NULL; + if (chan->ctrl->is_smmu_enabled) { + iommu_unmap(chan->ctrl->domain, pmem->paddr, pmem->size); + free_iova(&chan->ctrl->carveout.domain, + iova_pfn(&chan->ctrl->carveout.domain, + pmem->paddr)); + free_pages((unsigned long)pmem->vaddr, get_order(pmem->size)); + } else { + gen_pool_free(chan->ctrl->genpool, + (unsigned long)kaddr, pmem->size); + pmem->vaddr = NULL; + } list_del(&pmem->node); + kfree(pmem); + mutex_unlock(&svc_mem_lock); return; } - - list_del(&svc_data_mem); + mutex_unlock(&svc_mem_lock); } EXPORT_SYMBOL_GPL(stratix10_svc_free_memory); +static int smp_psci_offline_cpus(void) +{ + int cpu, ret; + /* Iterate over all online CPUs except the CPU currently running the code*/ + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; // don't offline self + + ret = remove_cpu(cpu); // equivalent to cpu_down(cpu) + if (ret) + pr_warn("Failed to offline CPU%d: %d\n", cpu, ret); + } + return 0; +} + +static int psci_cpu_off_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + switch (action) { + case SYS_POWER_OFF: + case SYS_RESTART: + smp_psci_offline_cpus(); // Powers down secondary CPUs + break; + } + return NOTIFY_OK; +} + +static struct notifier_block psci_reboot_nb = { + .notifier_call = psci_cpu_off_reboot_notifier, + .priority = INT_MAX, // Make this one of the last notifiers called +}; + static const struct of_device_id stratix10_svc_drv_match[] = { {.compatible = "intel,stratix10-svc"}, {.compatible = "intel,agilex-svc"}, + {.compatible = "intel,agilex5-svc"}, {}, }; +static DEFINE_MUTEX(mailbox_lock); + static int stratix10_svc_drv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1131,10 +3519,13 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) struct gen_pool *genpool; struct stratix10_svc_sh_memory *sh_memory; struct stratix10_svc *svc; + struct device_node *node = pdev->dev.of_node; + struct arm_smccc_res res; svc_invoke_fn *invoke_fn; size_t fifo_size; int ret; + unsigned long order; /* get SMC or HVC function */ invoke_fn = get_invoke_func(dev); @@ -1173,32 +3564,129 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) controller->num_active_client = 0; controller->chans = chans; controller->genpool = genpool; - controller->task = NULL; controller->invoke_fn = invoke_fn; + controller->is_smmu_enabled = false; + controller->sdm_dma_addr_offset = 0x0; init_completion(&controller->complete_status); - fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; - ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); - if (ret) { - dev_err(dev, "failed to allocate FIFO\n"); - goto err_destroy_pool; + if (of_device_is_compatible(node, "intel,agilex-svc") + || of_device_is_compatible(node, "intel,stratix10-svc")) { + ret = register_reboot_notifier(&psci_reboot_nb); + WARN_ON(ret); + } + + if (of_device_is_compatible(node, "intel,agilex5-svc")) { + if (iommu_present(&platform_bus_type) && + device_property_read_bool(dev, "altr,smmu_enable_quirk")) { + controller->is_smmu_enabled = true; + controller->sdm_dma_addr_offset = AGILEX5_SDM_DMA_ADDR_OFFSET; + pr_debug("Intel Service Layer Driver: IOMMU Present\n"); + controller->domain = iommu_get_dma_domain(dev); + + if (!controller->domain) { + pr_debug("Intel Service Layer Driver: Error IOMMU domain\n"); + ret = -ENODEV; + goto err_destroy_pool; + } else { + ret = iova_cache_get(); + if (ret < 0) { + pr_debug("Intel Service Layer Driver: IOVA cache failed\n"); + iommu_domain_free(controller->domain); + ret = -ENODEV; + goto err_destroy_pool; + } + ret = iommu_attach_device(controller->domain, dev); + if (ret) { + pr_debug("Intel Service Layer Driver: Error IOMMU attach failed\n"); + iova_cache_put(); + iommu_domain_free(controller->domain); + ret = -ENODEV; + goto err_destroy_pool; + } + } + + order = __ffs(controller->domain->pgsize_bitmap); + init_iova_domain(&controller->carveout.domain, 1UL << order, + IOMMU_STARTING_ADDR); + + controller->carveout.shift = iova_shift(&controller->carveout.domain); + controller->carveout.limit = IOMMU_LIMIT_ADDR - PAGE_SIZE; + } else { + pr_debug("Intel Service Layer Driver: IOMMU Not Present\n"); + ret = -ENODEV; + goto err_destroy_pool; + } + + /* when controller->is_smmu_enabled is set to true the SDM remapper will be bypassed*/ + controller->invoke_fn(INTEL_SIP_SMC_SDM_REMAPPER_CONFIG, + controller->is_smmu_enabled? DISABLE_REMAPPER: ENABLE_REMAPPER, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != INTEL_SIP_SMC_STATUS_OK) { + pr_info("Failed to configure remapper!\n"); + ret = -ENODEV; + goto err_destroy_pool; + } } - spin_lock_init(&controller->svc_fifo_lock); + + ret = stratix10_svc_async_init(controller); + if (ret) + pr_debug("Intel Service Layer Driver: Error on stratix10_svc_async_init %d\n", ret); + + /* This mutex is used to block threads from utilizing + * SDM to prevent out of order command tx. + * And is only used for sync calls to SDM(v1 API's) + */ + controller->sdm_lock = &mailbox_lock; + + fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; chans[0].scl = NULL; chans[0].ctrl = controller; chans[0].name = SVC_CLIENT_FPGA; spin_lock_init(&chans[0].lock); + ret = kfifo_alloc(&chans[0].svc_fifo, fifo_size, GFP_KERNEL); + if (ret) { + dev_err(dev, "failed to allocate FIFO 0\n"); + return ret; + } + spin_lock_init(&chans[0].svc_fifo_lock); chans[1].scl = NULL; chans[1].ctrl = controller; chans[1].name = SVC_CLIENT_RSU; spin_lock_init(&chans[1].lock); + ret = kfifo_alloc(&chans[1].svc_fifo, fifo_size, GFP_KERNEL); + if (ret) { + dev_err(dev, "failed to allocate FIFO 1\n"); + return ret; + } + spin_lock_init(&chans[1].svc_fifo_lock); chans[2].scl = NULL; chans[2].ctrl = controller; chans[2].name = SVC_CLIENT_FCS; spin_lock_init(&chans[2].lock); + ret = kfifo_alloc(&chans[2].svc_fifo, fifo_size, GFP_KERNEL); + if (ret) { + dev_err(dev, "failed to allocate FIFO 2\n"); + return ret; + } + spin_lock_init(&chans[2].svc_fifo_lock); + + chans[3].scl = NULL; + chans[3].ctrl = controller; + chans[3].name = SVC_CLIENT_HWMON; + spin_lock_init(&chans[3].lock); + ret = kfifo_alloc(&chans[3].svc_fifo, fifo_size, GFP_KERNEL); + if (ret) { + dev_err(dev, "failed to allocate FIFO 3\n"); + return ret; + } + spin_lock_init(&chans[3].svc_fifo_lock); + + chans[3].scl = NULL; + chans[3].ctrl = controller; + chans[3].name = SVC_CLIENT_HWMON; + spin_lock_init(&chans[3].lock); list_add_tail(&controller->node, &svc_ctrl); platform_set_drvdata(pdev, controller); @@ -1207,63 +3695,65 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL); if (!svc) { ret = -ENOMEM; - goto err_free_kfifo; + return ret; } + controller->svc = svc; + svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0); if (!svc->stratix10_svc_rsu) { dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU); - ret = -ENOMEM; - goto err_free_kfifo; + return -ENOMEM; } ret = platform_device_add(svc->stratix10_svc_rsu); - if (ret) { - platform_device_put(svc->stratix10_svc_rsu); - goto err_free_kfifo; - } - - svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1); - if (!svc->intel_svc_fcs) { - dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); - ret = -ENOMEM; - goto err_unregister_dev; - } - - ret = platform_device_add(svc->intel_svc_fcs); - if (ret) { - platform_device_put(svc->intel_svc_fcs); - goto err_unregister_dev; - } + if (ret) + goto err_put_device; - dev_set_drvdata(dev, svc); + ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); + if (ret) + goto err_put_device; pr_info("Intel Service Layer Driver Initialized\n"); return 0; -err_unregister_dev: - platform_device_unregister(svc->stratix10_svc_rsu); -err_free_kfifo: - kfifo_free(&controller->svc_fifo); +err_put_device: + platform_device_put(svc->stratix10_svc_rsu); err_destroy_pool: gen_pool_destroy(genpool); + return ret; } static void stratix10_svc_drv_remove(struct platform_device *pdev) { - struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev); + int i; struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); + struct stratix10_svc *svc = ctrl->svc; + + of_platform_depopulate(ctrl->dev); + + if (ctrl->domain) { + put_iova_domain(&ctrl->carveout.domain); + iova_cache_put(); + iommu_detach_device(ctrl->domain, &pdev->dev); + iommu_domain_free(ctrl->domain); + } platform_device_unregister(svc->intel_svc_fcs); platform_device_unregister(svc->stratix10_svc_rsu); - kfifo_free(&ctrl->svc_fifo); - if (ctrl->task) { - kthread_stop(ctrl->task); - ctrl->task = NULL; + stratix10_svc_async_exit(ctrl); + + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + if (ctrl->chans[i].task) { + kthread_stop(ctrl->chans[i].task); + ctrl->chans[i].task = NULL; + } + kfifo_free(&ctrl->chans[i].svc_fifo); } + if (ctrl->genpool) gen_pool_destroy(ctrl->genpool); list_del(&ctrl->node); diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 37b35f58f0dfb..a5349e8f07249 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -10,6 +10,13 @@ menuconfig FPGA kernel. The FPGA framework adds an FPGA manager class and FPGA manager drivers. +config FPGA_MGR_DEBUG_FS + bool "FPGA Manager DebugFS" + depends on FPGA && DEBUG_FS + help + Say Y here if you want to expose a DebugFS interface for the + FPGA Manager Framework. + if FPGA config FPGA_MGR_SOCFPGA @@ -47,7 +54,7 @@ config FPGA_MGR_ALTERA_PS_SPI config FPGA_MGR_ALTERA_CVP tristate "Altera CvP FPGA Manager" - depends on PCI + depends on PCI && FPGA help FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V, Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe. diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index aeb89bb13517e..65c7b3cb09e51 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -4,7 +4,7 @@ # # Core FPGA Manager Framework -obj-$(CONFIG_FPGA) += fpga-mgr.o +obj-$(CONFIG_FPGA) += fpga-mgr.o fpga-mgr-debugfs.o # FPGA Manager Drivers obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 6b09144324453..0064fbbaa1868 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -24,6 +24,9 @@ /* Vendor Specific Extended Capability Registers */ #define VSE_PCIE_EXT_CAP_ID 0x0 #define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */ +#define VSE_PCIE_SPECIFIC_HEADER 0x4 /* VSEC ID, Revision, length*/ +#define VSEC_LENGTH_BIT_OFFSET 20 /* Bit 20 to 31 */ +#define AGILEX5_VSEC_LENGTH 0x60 /* Agilex5 only */ #define VSE_CVP_STATUS 0x1c /* 32bit */ #define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */ @@ -51,8 +54,9 @@ #define V1_VSEC_OFFSET 0x200 /* Vendor Specific Offset V1 */ /* V2 Defines */ #define VSE_CVP_TX_CREDITS 0x49 /* 8bit */ +#define VSE_CVP_AG5_TX_CREDITS 0x5C /* 8bit credits for Agilex5*/ -#define V2_CREDIT_TIMEOUT_US 20000 +#define V2_CREDIT_TIMEOUT_US 40000 #define V2_CHECK_CREDIT_US 10 #define V2_POLL_TIMEOUT_US 1000000 #define V2_USER_TIMEOUT_US 500000 @@ -61,11 +65,18 @@ #define DRV_NAME "altera-cvp" #define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager" +#define SOCFPGA_CVP_V1_OTHERS 0x1 +#define SOCFPGA_CVP_V2_OTHERS 0x2 +#define SOCFPGA_CVP_V2_AGILEX5 0x3 /* Write block sizes */ #define ALTERA_CVP_V1_SIZE 4 #define ALTERA_CVP_V2_SIZE 4096 +/* Tear-down retry */ +#define CVP_TEARDOWN_MAX_RETRY 10 +/* Sleep duration before polling CVP status for CVP recovery */ +#define CVP_STATUS_POLL_SLEEP 50 /* Optional CvP config error status check for debugging */ static bool altera_cvp_chkcfg; @@ -80,7 +91,9 @@ struct altera_cvp_conf { u8 numclks; u32 sent_packets; u32 vsec_offset; + u8 *send_buf; const struct cvp_priv *priv; + u32 device_family_type; }; struct cvp_priv { @@ -231,18 +244,33 @@ static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr, u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US; struct altera_cvp_conf *conf = mgr->priv; int ret; - u8 val; + u32 val; + u32 credit_mask = 0xFF; + u32 vse_cvp_tx_credits_offset = VSE_CVP_TX_CREDITS; + + if (conf->device_family_type == SOCFPGA_CVP_V2_AGILEX5) { + vse_cvp_tx_credits_offset = VSE_CVP_AG5_TX_CREDITS; + credit_mask = 0xFFF; + } do { - ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val); + /* READ DWORD is required for Agilex5 but READ BYTE is required for non-Agilex5 */ + if (conf->device_family_type == SOCFPGA_CVP_V2_AGILEX5) { + ret = altera_read_config_dword(conf, vse_cvp_tx_credits_offset, &val); + } else { + ret = altera_read_config_byte(conf, vse_cvp_tx_credits_offset, (u8 *) &val); + } + if (ret) { dev_err(&conf->pci_dev->dev, "Error reading CVP Credit Register\n"); return ret; } + val = val & credit_mask; + /* Return if there is space in FIFO */ - if (val - (u8)conf->sent_packets) + if (val - conf->sent_packets) return 0; ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE); @@ -297,6 +325,9 @@ static int altera_cvp_teardown(struct fpga_manager *mgr, val &= ~VSE_CVP_PROG_CTRL_CONFIG; altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); + /* Sleep before polling for CFG_RDY from CVP_STATUS */ + usleep_range(CVP_STATUS_POLL_SLEEP, CVP_STATUS_POLL_SLEEP + 1); + /* * STEP 14 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy @@ -308,12 +339,39 @@ static int altera_cvp_teardown(struct fpga_manager *mgr, /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, conf->priv->poll_time_us); - if (ret) + if (ret) { dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n"); + goto error_path; + } return ret; + +error_path: + /* reset CVP_MODE and HIP_CLK_SEL bit */ + altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); + val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL; + val &= ~VSE_CVP_MODE_CTRL_CVP_MODE; + altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val); + + return -EAGAIN; } +static int altera_cvp_recovery(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + int ret = 0, retry = 0; + + for (retry = 0; retry < CVP_TEARDOWN_MAX_RETRY; retry++) { + ret = altera_cvp_teardown(mgr, info); + if (!ret) + break; + dev_warn(&mgr->dev, + "%s: [%d] Tear-down failed. Retrying\n", + __func__, + retry); + } + return ret; +} static int altera_cvp_write_init(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count) @@ -346,7 +404,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, if (val & VSE_CVP_STATUS_CFG_RDY) { dev_warn(&mgr->dev, "CvP already started, tear down first\n"); - ret = altera_cvp_teardown(mgr, info); + ret = altera_cvp_recovery(mgr, info); if (ret) return ret; } @@ -388,6 +446,9 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, val |= VSE_CVP_PROG_CTRL_CONFIG; altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); + /* Sleep before polling for CFG_RDY from CVP_STATUS */ + usleep_range(CVP_STATUS_POLL_SLEEP, CVP_STATUS_POLL_SLEEP + 1); + /* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, VSE_CVP_STATUS_CFG_RDY, @@ -452,7 +513,11 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf, } len = min(conf->priv->block_size, remaining); - altera_cvp_send_block(conf, data, len); + /* Copy the requested host data into the transmit buffer */ + + memcpy(conf->send_buf, data, len); + altera_cvp_send_block(conf, (const u32 *)conf->send_buf, + conf->priv->block_size); data += len / sizeof(u32); done += len; remaining -= len; @@ -487,14 +552,17 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr, u32 mask, val; int ret; - ret = altera_cvp_teardown(mgr, info); + ret = altera_cvp_recovery(mgr, info); if (ret) return ret; - /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */ - altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val); - if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) { - dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n"); + /* + * STEP 16 - If bitstream error (truncated/miss-matched), + * we shall exit here. + */ + ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val); + if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) { + dev_err(&mgr->dev, "CVP_CONFIG_ERROR!\n"); return -EPROTO; } @@ -510,6 +578,8 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr, conf->priv->user_time_us); if (ret) dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n"); + else + dev_notice(&mgr->dev, "CVP write completed successfully.\n"); return ret; } @@ -560,7 +630,7 @@ static int altera_cvp_probe(struct pci_dev *pdev, static void altera_cvp_remove(struct pci_dev *pdev); static struct pci_device_id altera_cvp_id_tbl[] = { - { PCI_VDEVICE(ALTERA, PCI_ANY_ID) }, + { PCI_VDEVICE(ALTERA, 0x00) }, { } }; MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl); @@ -637,10 +707,23 @@ static int altera_cvp_probe(struct pci_dev *pdev, conf->pci_dev = pdev; conf->write_data = altera_cvp_write_data_iomem; - if (conf->vsec_offset == V1_VSEC_OFFSET) + /* To differentiate the target SOCFPGA */ + if (conf->vsec_offset == V1_VSEC_OFFSET) { conf->priv = &cvp_priv_v1; - else + conf->device_family_type = SOCFPGA_CVP_V1_OTHERS; + dev_notice(&pdev->dev, "V1 target SOCFPGA detected.\n"); + } else { + /* Agilex7, Stratix10, Agilex5*/ conf->priv = &cvp_priv_v2; + pci_read_config_dword(pdev, offset + VSE_PCIE_SPECIFIC_HEADER, ®val); + if ((regval >> VSEC_LENGTH_BIT_OFFSET) == AGILEX5_VSEC_LENGTH) { + conf->device_family_type = SOCFPGA_CVP_V2_AGILEX5; + dev_notice(&pdev->dev, "V2 target SOCFPGA Agilex5 detected.\n"); + } else { + conf->device_family_type = SOCFPGA_CVP_V2_OTHERS; + dev_notice(&pdev->dev, "V2 target SOCFPGA detected.\n"); + } + } conf->map = pci_iomap(pdev, CVP_BAR, 0); if (!conf->map) { @@ -660,6 +743,13 @@ static int altera_cvp_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, mgr); + /* Allocate the 4096 block size transmit buffer */ + conf->send_buf = devm_kzalloc(&pdev->dev, conf->priv->block_size, GFP_KERNEL); + if (!conf->send_buf) { + ret = -ENOMEM; + fpga_mgr_unregister(mgr); + goto err_unmap; + } return 0; err_unmap: diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c index 44061cb16f877..f8a6901f46be2 100644 --- a/drivers/fpga/altera-freeze-bridge.c +++ b/drivers/fpga/altera-freeze-bridge.c @@ -52,7 +52,7 @@ static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv, if (illegal) { dev_err(dev, "illegal request detected 0x%x", illegal); - writel(1, csr_illegal_req_addr); + writel(illegal, csr_illegal_req_addr); illegal = readl(csr_illegal_req_addr); if (illegal) diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 8ef395b49bf8a..22fe0921e5cab 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -290,7 +290,7 @@ static ssize_t name_show(struct device *dev, { struct fpga_bridge *bridge = to_fpga_bridge(dev); - return sprintf(buf, "%s\n", bridge->name); + return scnprintf(buf, PAGE_SIZE, "%s\n", bridge->name); } static ssize_t state_show(struct device *dev, diff --git a/drivers/fpga/fpga-mgr-debugfs.c b/drivers/fpga/fpga-mgr-debugfs.c new file mode 100644 index 0000000000000..6a47cc6326ca7 --- /dev/null +++ b/drivers/fpga/fpga-mgr-debugfs.c @@ -0,0 +1,235 @@ +/* + * FPGA Manager DebugFS + * + * Copyright (C) 2016 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_FPGA_MGR_DEBUG_FS) + +static struct dentry *fpga_mgr_debugfs_root; + +struct fpga_mgr_debugfs { + struct dentry *debugfs_dir; + struct fpga_image_info *info; +}; + +static ssize_t fpga_mgr_firmware_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_manager *mgr = file->private_data; + struct fpga_mgr_debugfs *debugfs = mgr->debugfs; + char *buf; + int ret; + + ret = fpga_mgr_lock(mgr); + if (ret) { + dev_err(&mgr->dev, "FPGA manager is busy\n"); + return -EBUSY; + } + + buf = devm_kzalloc(&mgr->dev, count, GFP_KERNEL); + if (!buf) { + fpga_mgr_unlock(mgr); + return -ENOMEM; + } + + if (copy_from_user(buf, user_buf, count)) { + fpga_mgr_unlock(mgr); + devm_kfree(&mgr->dev, buf); + return -EFAULT; + } + + buf[count] = 0; + if (buf[count - 1] == '\n') + buf[count - 1] = 0; + + /* Release previous firmware name (if any). Save current one. */ + if (debugfs->info->firmware_name) + devm_kfree(&mgr->dev, debugfs->info->firmware_name); + debugfs->info->firmware_name = buf; + + ret = fpga_mgr_load(mgr, debugfs->info); + if (ret) + dev_err(&mgr->dev, + "fpga_mgr_load returned with value %d\n", ret); + + fpga_mgr_unlock(mgr); + + return count; +} + +static ssize_t fpga_mgr_firmware_read_file(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_manager *mgr = file->private_data; + struct fpga_mgr_debugfs *debugfs = mgr->debugfs; + char *buf; + int ret; + + if (!debugfs->info->firmware_name) + return 0; + + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = snprintf(buf, PAGE_SIZE, "%s\n", debugfs->info->firmware_name); + if (ret < 0) { + kfree(buf); + return ret; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + kfree(buf); + + return ret; +} + +static const struct file_operations fpga_mgr_firmware_fops = { + .open = simple_open, + .read = fpga_mgr_firmware_read_file, + .write = fpga_mgr_firmware_write_file, + .llseek = default_llseek, +}; + +static ssize_t fpga_mgr_image_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fpga_manager *mgr = file->private_data; + struct fpga_mgr_debugfs *debugfs = mgr->debugfs; + char *buf; + int ret; + + dev_info(&mgr->dev, "writing %zu bytes to %s\n", count, mgr->name); + + ret = fpga_mgr_lock(mgr); + if (ret) { + dev_err(&mgr->dev, "FPGA manager is busy\n"); + return -EBUSY; + } + + buf = kzalloc(count, GFP_KERNEL); + if (!buf) { + fpga_mgr_unlock(mgr); + return -ENOMEM; + } + + if (copy_from_user(buf, user_buf, count)) { + fpga_mgr_unlock(mgr); + kfree(buf); + return -EFAULT; + } + + /* If firmware interface was previously used, forget it. */ + if (debugfs->info->firmware_name) + devm_kfree(&mgr->dev, debugfs->info->firmware_name); + debugfs->info->firmware_name = NULL; + + debugfs->info->buf = buf; + debugfs->info->count = count; + + ret = fpga_mgr_load(mgr, debugfs->info); + if (ret) + dev_err(&mgr->dev, + "fpga_mgr_buf_load returned with value %d\n", ret); + + fpga_mgr_unlock(mgr); + + debugfs->info->buf = NULL; + debugfs->info->count = 0; + + kfree(buf); + + return count; +} + +static const struct file_operations fpga_mgr_image_fops = { + .open = simple_open, + .write = fpga_mgr_image_write_file, + .llseek = default_llseek, +}; + +void fpga_mgr_debugfs_add(struct fpga_manager *mgr) +{ + struct fpga_mgr_debugfs *debugfs; + struct fpga_image_info *info; + + if (!fpga_mgr_debugfs_root) + return; + + debugfs = kzalloc(sizeof(*debugfs), GFP_KERNEL); + if (!debugfs) + return; + + info = fpga_image_info_alloc(&mgr->dev); + if (!info) { + kfree(debugfs); + return; + } + debugfs->info = info; + + debugfs->debugfs_dir = debugfs_create_dir(dev_name(&mgr->dev), + fpga_mgr_debugfs_root); + + debugfs_create_file("firmware_name", 0600, debugfs->debugfs_dir, mgr, + &fpga_mgr_firmware_fops); + + debugfs_create_file("image", 0200, debugfs->debugfs_dir, mgr, + &fpga_mgr_image_fops); + + debugfs_create_u32("flags", 0600, debugfs->debugfs_dir, &info->flags); + + debugfs_create_u32("config_complete_timeout_us", 0600, + debugfs->debugfs_dir, + &info->config_complete_timeout_us); + + mgr->debugfs = debugfs; +} + +void fpga_mgr_debugfs_remove(struct fpga_manager *mgr) +{ + struct fpga_mgr_debugfs *debugfs = mgr->debugfs; + + if (!fpga_mgr_debugfs_root) + return; + + debugfs_remove_recursive(debugfs->debugfs_dir); + + /* this function also frees debugfs->info->firmware_name */ + fpga_image_info_free(debugfs->info); + + kfree(debugfs); +} + +void fpga_mgr_debugfs_init(void) +{ + fpga_mgr_debugfs_root = debugfs_create_dir("fpga_manager", NULL); + if (!fpga_mgr_debugfs_root) + pr_warn("fpga_mgr: Failed to create debugfs root\n"); +} + +void fpga_mgr_debugfs_uninit(void) +{ + debugfs_remove_recursive(fpga_mgr_debugfs_root); +} + +#endif /* CONFIG_FPGA_MGR_DEBUG_FS */ diff --git a/drivers/fpga/fpga-mgr-debugfs.h b/drivers/fpga/fpga-mgr-debugfs.h new file mode 100644 index 0000000000000..1248a265b11f3 --- /dev/null +++ b/drivers/fpga/fpga-mgr-debugfs.h @@ -0,0 +1,35 @@ +/* + * FPGA Manager DebugFS + * + * Copyright (C) 2016 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#ifndef _LINUX_FPGA_MGR_DEBUGFS_H +#define _LINUX_FPGA_MGR_DEBUGFS_H + +void fpga_mgr_debugfs_add(struct fpga_manager *mgr); +void fpga_mgr_debugfs_remove(struct fpga_manager *mgr); +void fpga_mgr_debugfs_init(void); +void fpga_mgr_debugfs_uninit(void); + +#if !IS_ENABLED(CONFIG_FPGA_MGR_DEBUG_FS) + +void fpga_mgr_debugfs_add(struct fpga_manager *mgr) {} +void fpga_mgr_debugfs_remove(struct fpga_manager *mgr) {} +void fpga_mgr_debugfs_init(void) {} +void fpga_mgr_debugfs_uninit(void) {} + +#endif /* CONFIG_FPGA_MGR_DEBUG_FS */ + +#endif /*_LINUX_FPGA_MGR_DEBUGFS_H */ diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index 0f4035b089a2e..dee4bbeebc401 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -17,6 +17,7 @@ #include #include #include +#include "fpga-mgr-debugfs.h" static DEFINE_IDA(fpga_mgr_ida); static const struct class fpga_mgr_class; @@ -843,6 +844,10 @@ __fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info * return ERR_PTR(ret); } + fpga_mgr_debugfs_add(mgr); + + dev_info(&mgr->dev, "%s registered\n", mgr->name); + return mgr; error_device: @@ -894,6 +899,8 @@ void fpga_mgr_unregister(struct fpga_manager *mgr) { dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name); + fpga_mgr_debugfs_remove(mgr); + /* * If the low level driver provides a method for putting fpga into * a desired state upon unregister, do it. @@ -993,11 +1000,14 @@ static int __init fpga_mgr_class_init(void) { pr_info("FPGA manager framework\n"); + fpga_mgr_debugfs_init(); + return class_register(&fpga_mgr_class); } static void __exit fpga_mgr_class_exit(void) { + fpga_mgr_debugfs_uninit(); class_unregister(&fpga_mgr_class); ida_destroy(&fpga_mgr_ida); } diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 8526a5a86f0cb..a82d5c7c3f462 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -220,15 +220,25 @@ of_fpga_region_parse_ov(struct fpga_region *region, info->overlay = overlay; - /* Read FPGA region properties from the overlay */ - if (of_property_read_bool(overlay, "partial-fpga-config")) - info->flags |= FPGA_MGR_PARTIAL_RECONFIG; + /* + * Read FPGA region properties from the overlay. + * + * First check the integrity of the bitstream. If the + * authentication is passed, the user can perform other + * operations. + */ + if (of_property_read_bool(overlay, "authenticate-fpga-config")) { + info->flags |= FPGA_MGR_BITSTREAM_AUTHENTICATE; + } else { + if (of_property_read_bool(overlay, "partial-fpga-config")) + info->flags |= FPGA_MGR_PARTIAL_RECONFIG; - if (of_property_read_bool(overlay, "external-fpga-config")) - info->flags |= FPGA_MGR_EXTERNAL_CONFIG; + if (of_property_read_bool(overlay, "external-fpga-config")) + info->flags |= FPGA_MGR_EXTERNAL_CONFIG; - if (of_property_read_bool(overlay, "encrypted-fpga-config")) - info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM; + if (of_property_read_bool(overlay, "encrypted-fpga-config")) + info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM; + } if (!of_property_read_string(overlay, "firmware-name", &firmware_name)) { diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index 2c0def7d7cbb1..916c0c556c7ef 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -11,6 +11,7 @@ #include #include #include +#include /* * FPGA programming requires a higher level of privilege (EL3), per the SoC @@ -25,6 +26,10 @@ #define S10_BUFFER_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_BUFFER_TIMEOUT_MS)) #define S10_RECONFIG_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_REQUEST_TIMEOUT_MS)) +#define INVALID_FIRMWARE_VERSION 0xFFFF +typedef void (*s10_callback)(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data); + /* * struct s10_svc_buf * buf: virtual address of buf provided by service layer @@ -32,6 +37,7 @@ */ struct s10_svc_buf { char *buf; + dma_addr_t dma_addr; unsigned long lock; }; @@ -41,11 +47,14 @@ struct s10_priv { struct completion status_return_completion; struct s10_svc_buf svc_bufs[NUM_SVC_BUFS]; unsigned long status; + unsigned int fw_version; + bool is_smmu_enabled; }; static int s10_svc_send_msg(struct s10_priv *priv, enum stratix10_svc_command_code command, - void *payload, u32 payload_length) + void *payload, u32 payload_length, + s10_callback callback) { struct stratix10_svc_chan *chan = priv->chan; struct device *dev = priv->client.dev; @@ -58,6 +67,7 @@ static int s10_svc_send_msg(struct s10_priv *priv, msg.command = command; msg.payload = payload; msg.payload_length = payload_length; + priv->client.receive_cb = callback; ret = stratix10_svc_send(chan, &msg); dev_dbg(dev, "stratix10_svc_send returned status %d\n", ret); @@ -94,16 +104,16 @@ static bool s10_free_buffers(struct fpga_manager *mgr) } /* - * Returns count of how many buffers are not in use. + * Returns count of how many buffers are not in locked state. */ -static uint s10_free_buffer_count(struct fpga_manager *mgr) +static uint s10_get_unlocked_buffer_count(struct fpga_manager *mgr) { struct s10_priv *priv = mgr->priv; uint num_free = 0; uint i; for (i = 0; i < NUM_SVC_BUFS; i++) - if (!priv->svc_bufs[i].buf) + if (!priv->svc_bufs[i].lock) num_free++; return num_free; @@ -126,6 +136,8 @@ static void s10_unlock_bufs(struct s10_priv *priv, void *kaddr) for (i = 0; i < NUM_SVC_BUFS; i++) if (priv->svc_bufs[i].buf == kaddr) { + if (priv->is_smmu_enabled == true) + dma_unmap_single(priv->client.dev, priv->svc_bufs[i].dma_addr, SVC_BUF_SIZE, DMA_TO_DEVICE); clear_bit_unlock(SVC_BUF_LOCK, &priv->svc_bufs[i].lock); return; @@ -134,6 +146,29 @@ static void s10_unlock_bufs(struct s10_priv *priv, void *kaddr) WARN(1, "Unknown buffer returned from service layer %p\n", kaddr); } +/* + * s10_fw_version_callback - callback for the version of running firmware + * @client: service layer client struct + * @data: message from service layer + */ +static void s10_fw_version_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct s10_priv *priv = client->priv; + unsigned int *version = (unsigned int *)data->kaddr1; + + if (data->status == BIT(SVC_STATUS_OK)) + priv->fw_version = *version; + else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) + dev_warn(client->dev, + "FW doesn't support bitstream authentication\n"); + else + dev_err(client->dev, "Failed to get FW version %lu\n", + BIT(data->status)); + + complete(&priv->status_return_completion); +} + /* * s10_receive_callback - callback for service layer to use to provide client * (this driver) messages received through the mailbox. @@ -147,6 +182,7 @@ static void s10_receive_callback(struct stratix10_svc_client *client, u32 status; int i; + pr_debug("%s data %x\n", __func__, data->status); WARN_ONCE(!data, "%s: stratix10_svc_rc_data = NULL", __func__); status = data->status; @@ -163,6 +199,7 @@ static void s10_receive_callback(struct stratix10_svc_client *client, s10_unlock_bufs(priv, data->kaddr1); s10_unlock_bufs(priv, data->kaddr2); s10_unlock_bufs(priv, data->kaddr3); + s10_unlock_bufs(priv, data->kaddr4); } complete(&priv->status_return_completion); @@ -179,7 +216,6 @@ static int s10_ops_write_init(struct fpga_manager *mgr, struct s10_priv *priv = mgr->priv; struct device *dev = priv->client.dev; struct stratix10_svc_command_config_type ctype; - char *kbuf; uint i; int ret; @@ -187,45 +223,49 @@ static int s10_ops_write_init(struct fpga_manager *mgr, if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { dev_dbg(dev, "Requesting partial reconfiguration.\n"); ctype.flags |= BIT(COMMAND_RECONFIG_FLAG_PARTIAL); + } else if (info->flags & FPGA_MGR_BITSTREAM_AUTHENTICATE) { + if (priv->fw_version == INVALID_FIRMWARE_VERSION) { + dev_err(dev, "FW doesn't support\n"); + return -EINVAL; + } + + dev_dbg(dev, "Requesting bitstream authentication.\n"); + ctype.flags |= BIT(COMMAND_AUTHENTICATE_BITSTREAM); } else { dev_dbg(dev, "Requesting full reconfiguration.\n"); } reinit_completion(&priv->status_return_completion); ret = s10_svc_send_msg(priv, COMMAND_RECONFIG, - &ctype, sizeof(ctype)); + &ctype, sizeof(ctype), + s10_receive_callback); if (ret < 0) - goto init_done; + goto init_error; ret = wait_for_completion_timeout( &priv->status_return_completion, S10_RECONFIG_TIMEOUT); if (!ret) { dev_err(dev, "timeout waiting for RECONFIG_REQUEST\n"); ret = -ETIMEDOUT; - goto init_done; + goto init_error; } ret = 0; if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) { ret = -ETIMEDOUT; - goto init_done; + goto init_error; } - /* Allocate buffers from the service layer's pool. */ + /* Init buffer lock */ for (i = 0; i < NUM_SVC_BUFS; i++) { - kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); - if (IS_ERR(kbuf)) { - s10_free_buffers(mgr); - ret = PTR_ERR(kbuf); - goto init_done; - } - - priv->svc_bufs[i].buf = kbuf; priv->svc_bufs[i].lock = 0; } -init_done: + goto init_done; + +init_error: stratix10_svc_done(priv->chan); +init_done: return ret; } @@ -259,8 +299,11 @@ static int s10_send_buf(struct fpga_manager *mgr, const char *buf, size_t count) svc_buf = priv->svc_bufs[i].buf; memcpy(svc_buf, buf, xfer_sz); + if (priv->is_smmu_enabled == true) + priv->svc_bufs[i].dma_addr = dma_map_single(dev, svc_buf, SVC_BUF_SIZE, DMA_TO_DEVICE); + ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_DATA_SUBMIT, - svc_buf, xfer_sz); + svc_buf, xfer_sz, s10_receive_callback); if (ret < 0) { dev_err(dev, "Error while sending data to service layer (%d)", ret); @@ -288,7 +331,7 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf, * Loop waiting for buffers to be returned. When a buffer is returned, * reuse it to send more data or free if if all data has been sent. */ - while (count > 0 || s10_free_buffer_count(mgr) != NUM_SVC_BUFS) { + while (true) { reinit_completion(&priv->status_return_completion); if (count > 0) { @@ -299,23 +342,17 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf, count -= sent; buf += sent; } else { - if (s10_free_buffers(mgr)) + if (s10_get_unlocked_buffer_count(mgr) == NUM_SVC_BUFS) return 0; ret = s10_svc_send_msg( priv, COMMAND_RECONFIG_DATA_CLAIM, - NULL, 0); + NULL, 0, s10_receive_callback); if (ret < 0) break; } - /* - * If callback hasn't already happened, wait for buffers to be - * returned from service layer - */ - wait_status = 1; /* not timed out */ - if (!priv->status) - wait_status = wait_for_completion_timeout( + wait_status = wait_for_completion_timeout( &priv->status_return_completion, S10_BUFFER_TIMEOUT); @@ -339,8 +376,8 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf, } } - if (!s10_free_buffers(mgr)) - dev_err(dev, "%s not all buffers were freed\n", __func__); + if (ret < 0) + stratix10_svc_done(priv->chan); return ret; } @@ -353,12 +390,16 @@ static int s10_ops_write_complete(struct fpga_manager *mgr, unsigned long timeout; int ret; - timeout = usecs_to_jiffies(info->config_complete_timeout_us); + /* The time taken to process this is close to 600ms + * This MUST be increased over 1 second + */ + timeout = S10_RECONFIG_TIMEOUT; do { reinit_completion(&priv->status_return_completion); - ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_STATUS, NULL, 0); + ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_STATUS, + NULL, 0, s10_receive_callback); if (ret < 0) break; @@ -400,15 +441,24 @@ static int s10_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct s10_priv *priv; struct fpga_manager *mgr; - int ret; + int ret, i; + struct device_node *node = pdev->dev.of_node; + char *kbuf; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->fw_version = INVALID_FIRMWARE_VERSION; priv->client.dev = dev; - priv->client.receive_cb = s10_receive_callback; + priv->client.receive_cb = NULL; priv->client.priv = priv; + priv->is_smmu_enabled = false; + + if (of_device_is_compatible(node, "intel,agilex5-soc-fpga-mgr")) + priv->is_smmu_enabled = device_property_read_bool(dev, + "altr,smmu_enable_quirk"); + priv->chan = stratix10_svc_request_channel_byname(&priv->client, SVC_CLIENT_FPGA); @@ -428,6 +478,40 @@ static int s10_probe(struct platform_device *pdev) goto probe_err; } + /* get the running firmware version */ + ret = s10_svc_send_msg(priv, COMMAND_FIRMWARE_VERSION, + NULL, 0, s10_fw_version_callback); + if (ret) { + dev_err(dev, "couldn't get firmware version\n"); + stratix10_svc_done(priv->chan); + goto probe_err; + } + + ret = wait_for_completion_timeout( + &priv->status_return_completion, S10_RECONFIG_TIMEOUT); + if (!ret) { + dev_err(dev, "timeout waiting for firmware version\n"); + stratix10_svc_done(priv->chan); + ret = -ETIMEDOUT; + goto probe_err; + } + + ret = 0; + + /* Allocate buffers from the service layer's pool. */ + for (i = 0; i < NUM_SVC_BUFS; i++) { + kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); + if (IS_ERR(kbuf)) { + s10_free_buffers(mgr); + ret = PTR_ERR(kbuf); + goto probe_err; + } + + priv->svc_bufs[i].buf = kbuf; + priv->svc_bufs[i].lock = 0; + } + + stratix10_svc_done(priv->chan); platform_set_drvdata(pdev, mgr); return 0; @@ -440,6 +524,13 @@ static void s10_remove(struct platform_device *pdev) { struct fpga_manager *mgr = platform_get_drvdata(pdev); struct s10_priv *priv = mgr->priv; + int i; + + for (i = 0; i < NUM_SVC_BUFS; i++) { + if (priv->svc_bufs[i].buf) + stratix10_svc_free_memory(priv->chan, + priv->svc_bufs[i].buf); + } fpga_mgr_unregister(mgr); stratix10_svc_free_channel(priv->chan); @@ -448,6 +539,7 @@ static void s10_remove(struct platform_device *pdev) static const struct of_device_id s10_of_match[] = { {.compatible = "intel,stratix10-soc-fpga-mgr"}, {.compatible = "intel,agilex-soc-fpga-mgr"}, + {.compatible = "intel,agilex5-soc-fpga-mgr"}, {}, }; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 58480a3f4683f..c4355a1048748 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -277,6 +277,14 @@ config SENSORS_AS370 This driver can also be built as a module. If so, the module will be called as370-hwmon. +config SENSORS_ALTERA_A10SR + bool "Altera Arria10 System Status" + depends on MFD_ALTERA_A10SR + help + If you say yes here you get support for the power ready status + for the Arria10's external power supplies on the Arria10 DevKit. + These values are read over the SPI bus from the Arria10 System + Resource chip. config SENSORS_ASC7621 tristate "Andigilog aSC7621" @@ -2005,6 +2013,16 @@ config SENSORS_SMSC47M192 This driver can also be built as a module. If so, the module will be called smsc47m192. +config SENSORS_SOC64 + tristate "64-bit SoC FPGA Hardware monitoring features" + depends on INTEL_STRATIX10_SERVICE + help + If you say yes here you get support for the temperature and + voltage sensors of 64-bit SoC FPGA devices. + + This driver can also be built as a module. If so, the module + will be called soc64-hwmon + config SENSORS_SMSC47B397 tristate "SMSC LPC47B397-NC" depends on HAS_IOPORT diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 9554d2fdcf7bb..ee7d54c0f83ce 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o obj-$(CONFIG_SENSORS_AHT10) += aht10.o +obj-$(CONFIG_SENSORS_ALTERA_A10SR) += altera-a10sr-hwmon.o obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o obj-$(CONFIG_SENSORS_AQUACOMPUTER_D5NEXT) += aquacomputer_d5next.o obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o @@ -206,6 +207,7 @@ obj-$(CONFIG_SENSORS_SMPRO) += smpro-hwmon.o obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o +obj-$(CONFIG_SENSORS_SOC64) += soc64-hwmon.o obj-$(CONFIG_SENSORS_SPARX5) += sparx5-temp.o obj-$(CONFIG_SENSORS_SPD5118) += spd5118.o obj-$(CONFIG_SENSORS_STTS751) += stts751.o diff --git a/drivers/hwmon/altera-a10sr-hwmon.c b/drivers/hwmon/altera-a10sr-hwmon.c new file mode 100644 index 0000000000000..584cc48a9b1bb --- /dev/null +++ b/drivers/hwmon/altera-a10sr-hwmon.c @@ -0,0 +1,406 @@ +/* + * Copyright Intel Corporation (C) 2017-2018. All Rights Reserved + * Copyright Altera Corporation (C) 2014-2016. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * HW Monitor driver for Altera Arria10 MAX5 System Resource Chip + * Adapted from DA9052 + */ + +#include +#include +#include +#include +#include + +#define ALTR_A10SR_1V0_BIT_POS ALTR_A10SR_PG1_1V0_SHIFT +#define ALTR_A10SR_0V95_BIT_POS ALTR_A10SR_PG1_0V95_SHIFT +#define ALTR_A10SR_0V9_BIT_POS ALTR_A10SR_PG1_0V9_SHIFT +#define ALTR_A10SR_10V_BIT_POS ALTR_A10SR_PG1_10V_SHIFT +#define ALTR_A10SR_5V0_BIT_POS ALTR_A10SR_PG1_5V0_SHIFT +#define ALTR_A10SR_3V3_BIT_POS ALTR_A10SR_PG1_3V3_SHIFT +#define ALTR_A10SR_2V5_BIT_POS ALTR_A10SR_PG1_2V5_SHIFT +#define ALTR_A10SR_1V8_BIT_POS ALTR_A10SR_PG1_1V8_SHIFT +#define ALTR_A10SR_OP_FLAG_BIT_POS ALTR_A10SR_PG1_OP_FLAG_SHIFT +/* 2nd register needs an offset of 8 to get to 2nd register */ +#define ALTR_A10SR_FBC2MP_BIT_POS (8 + ALTR_A10SR_PG2_FBC2MP_SHIFT) +#define ALTR_A10SR_FAC2MP_BIT_POS (8 + ALTR_A10SR_PG2_FAC2MP_SHIFT) +#define ALTR_A10SR_FMCBVADJ_BIT_POS (8 + ALTR_A10SR_PG2_FMCBVADJ_SHIFT) +#define ALTR_A10SR_FMCAVADJ_BIT_POS (8 + ALTR_A10SR_PG2_FMCAVADJ_SHIFT) +#define ALTR_A10SR_HL_VDDQ_BIT_POS (8 + ALTR_A10SR_PG2_HL_VDDQ_SHIFT) +#define ALTR_A10SR_HL_VDD_BIT_POS (8 + ALTR_A10SR_PG2_HL_VDD_SHIFT) +#define ALTR_A10SR_HL_HPS_BIT_POS (8 + ALTR_A10SR_PG2_HL_HPS_SHIFT) +#define ALTR_A10SR_HPS_BIT_POS (8 + ALTR_A10SR_PG2_HPS_SHIFT) +/* 3rd register needs an offset of 16 to get to 3rd register */ +#define ALTR_A10SR_PCIE_WAKE_BIT_POS (16 + ALTR_A10SR_PG3_PCIE_WAKE_SHIFT) +#define ALTR_A10SR_PCIE_PR_BIT_POS (16 + ALTR_A10SR_PG3_PCIE_PR_SHIFT) +#define ALTR_A10SR_FMCB_PR_BIT_POS (16 + ALTR_A10SR_PG3_FMCB_PR_SHIFT) +#define ALTR_A10SR_FMCA_PR_BIT_POS (16 + ALTR_A10SR_PG3_FMCA_PR_SHIFT) +#define ALTR_A10SR_FILE_PR_BIT_POS (16 + ALTR_A10SR_PG3_FILE_PR_SHIFT) +#define ALTR_A10SR_BF_PR_BIT_POS (16 + ALTR_A10SR_PG3_BF_PR_SHIFT) +#define ALTR_A10SR_10V_FAIL_BIT_POS (16 + ALTR_A10SR_PG3_10V_FAIL_SHIFT) +#define ALTR_A10SR_FAM2C_BIT_POS (16 + ALTR_A10SR_PG3_FAM2C_SHIFT) +/* FMCA/B & PCIE Enables need an offset of 24 */ +#define ALTR_A10SR_FMCB_AUXEN_POS (24 + ALTR_A10SR_FMCB_AUXEN_SHIFT) +#define ALTR_A10SR_FMCB_EN_POS (24 + ALTR_A10SR_FMCB_EN_SHIFT) +#define ALTR_A10SR_FMCA_AUXEN_POS (24 + ALTR_A10SR_FMCA_AUXEN_SHIFT) +#define ALTR_A10SR_FMCA_EN_POS (24 + ALTR_A10SR_FMCA_EN_SHIFT) +#define ALTR_A10SR_PCIE_AUXEN_POS (24 + ALTR_A10SR_PCIE_AUXEN_SHIFT) +#define ALTR_A10SR_PCIE_EN_POS (24 + ALTR_A10SR_PCIE_EN_SHIFT) +/* HPS Resets need an offset of 32 */ +#define ALTR_A10SR_HPS_RST_UART_POS (32 + ALTR_A10SR_HPS_UARTA_RSTN_SHIFT) +#define ALTR_A10SR_HPS_RST_WARM_POS (32 + ALTR_A10SR_HPS_WARM_RSTN_SHIFT) +#define ALTR_A10SR_HPS_RST_WARM1_POS (32 + ALTR_A10SR_HPS_WARM_RST1N_SHIFT) +#define ALTR_A10SR_HPS_RST_COLD_POS (32 + ALTR_A10SR_HPS_COLD_RSTN_SHIFT) +#define ALTR_A10SR_HPS_RST_NPOR_POS (32 + ALTR_A10SR_HPS_NPOR_SHIFT) +#define ALTR_A10SR_HPS_RST_NRST_POS (32 + ALTR_A10SR_HPS_NRST_SHIFT) +#define ALTR_A10SR_HPS_RST_ENET_POS (32 + ALTR_A10SR_HPS_ENET_RSTN_SHIFT) +#define ALTR_A10SR_HPS_RST_ENETINT_POS (32 + ALTR_A10SR_HPS_ENET_INTN_SHIFT) +/* Peripheral Resets need an offset of 40 */ +#define ALTR_A10SR_PER_RST_USB_POS (40 + ALTR_A10SR_USB_RST_SHIFT) +#define ALTR_A10SR_PER_RST_BQSPI_POS (40 + ALTR_A10SR_BQSPI_RST_N_SHIFT) +#define ALTR_A10SR_PER_RST_FILE_POS (40 + ALTR_A10SR_FILE_RST_N_SHIFT) +#define ALTR_A10SR_PER_RST_PCIE_POS (40 + ALTR_A10SR_PCIE_PERST_N_SHIFT) +/* HWMON - Read Entire Register */ +#define ALTR_A10SR_ENTIRE_REG (88) +#define ALTR_A10SR_ENTIRE_REG_MASK (0xFF) +#define ALTR_A10SR_VERSION (0 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_LED (1 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PB (2 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PBF (3 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PG1 (4 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PG2 (5 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PG3 (6 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_FMCAB (7 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_HPS_RST (8 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PER_RST (9 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_SFPA (10 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_SFPB (11 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_I2C_MASTER (12 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_WARM_RST (13 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_WARM_RST_KEY (14 + ALTR_A10SR_ENTIRE_REG) +#define ALTR_A10SR_PMBUS (15 + ALTR_A10SR_ENTIRE_REG) + +/** + * struct altr_a10sr_hwmon - Altera Max5 HWMON device private data structure + * @device: hwmon class. + * @regmap: the regmap from the parent device. + */ +struct altr_a10sr_hwmon { + struct regmap *regmap; +}; + +static ssize_t altr_a10sr_read_status(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct altr_a10sr_hwmon *hwmon = dev_get_drvdata(dev); + int val, ret, index = to_sensor_dev_attr(devattr)->index; + int mask = ALTR_A10SR_REG_BIT_MASK(index); + unsigned char reg = ALTR_A10SR_PWR_GOOD1_REG + + ALTR_A10SR_REG_OFFSET(index); + + /* Check if this is an entire register read */ + if (index >= ALTR_A10SR_ENTIRE_REG) { + reg = ((index - ALTR_A10SR_ENTIRE_REG) << 1); + mask = ALTR_A10SR_ENTIRE_REG_MASK; + } + + ret = regmap_read(hwmon->regmap, reg, &val); + if (ret < 0) + return ret; + + if (mask == ALTR_A10SR_ENTIRE_REG_MASK) + val = val & mask; + else + val = !!(val & mask); + + return scnprintf(buf, 5, "%d\n", val); +} + +static ssize_t set_enable(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + unsigned long val; + struct altr_a10sr_hwmon *hwmon = dev_get_drvdata(dev); + int ret, index = to_sensor_dev_attr(dev_attr)->index; + int mask = ALTR_A10SR_REG_BIT_MASK(index); + unsigned char reg = (ALTR_A10SR_PWR_GOOD1_REG & WRITE_REG_MASK) + + ALTR_A10SR_REG_OFFSET(index); + int res = kstrtol(buf, 10, &val); + + if (res < 0) + return res; + + /* Check if this is an entire register write */ + if (index >= ALTR_A10SR_ENTIRE_REG) { + reg = ((index - ALTR_A10SR_ENTIRE_REG) << 1); + mask = ALTR_A10SR_ENTIRE_REG_MASK; + } + + ret = regmap_update_bits(hwmon->regmap, reg, mask, val); + if (ret < 0) + return ret; + + return count; +} + +/* First Power Good Register Bits */ +static SENSOR_DEVICE_ATTR(1v0_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_1V0_BIT_POS); +static SENSOR_DEVICE_ATTR(0v95_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_0V95_BIT_POS); +static SENSOR_DEVICE_ATTR(0v9_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_0V9_BIT_POS); +static SENSOR_DEVICE_ATTR(5v0_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_5V0_BIT_POS); +static SENSOR_DEVICE_ATTR(3v3_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_3V3_BIT_POS); +static SENSOR_DEVICE_ATTR(2v5_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_2V5_BIT_POS); +static SENSOR_DEVICE_ATTR(1v8_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_1V8_BIT_POS); +static SENSOR_DEVICE_ATTR(opflag_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_OP_FLAG_BIT_POS); +/* Second Power Good Register Bits */ +static SENSOR_DEVICE_ATTR(fbc2mp_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FBC2MP_BIT_POS); +static SENSOR_DEVICE_ATTR(fac2mp_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FAC2MP_BIT_POS); +static SENSOR_DEVICE_ATTR(fmcbvadj_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FMCBVADJ_BIT_POS); +static SENSOR_DEVICE_ATTR(fmcavadj_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FMCAVADJ_BIT_POS); +static SENSOR_DEVICE_ATTR(hl_vddq_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_HL_VDDQ_BIT_POS); +static SENSOR_DEVICE_ATTR(hl_vdd_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_HL_VDD_BIT_POS); +static SENSOR_DEVICE_ATTR(hlhps_vdd_alarm, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_HL_HPS_BIT_POS); +static SENSOR_DEVICE_ATTR(hps_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_HPS_BIT_POS); +/* Third Power Good Register Bits */ +static SENSOR_DEVICE_ATTR(pcie_wake_input, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_PCIE_WAKE_BIT_POS); +static SENSOR_DEVICE_ATTR(pcie_pr_input, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_PCIE_PR_BIT_POS); +static SENSOR_DEVICE_ATTR(fmcb_pr_input, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FMCB_PR_BIT_POS); +static SENSOR_DEVICE_ATTR(fmca_pr_input, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FMCA_PR_BIT_POS); +static SENSOR_DEVICE_ATTR(file_pr_input, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FILE_PR_BIT_POS); +static SENSOR_DEVICE_ATTR(bf_pr_input, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_BF_PR_BIT_POS); +static SENSOR_DEVICE_ATTR(10v_alarm, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_10V_FAIL_BIT_POS); +static SENSOR_DEVICE_ATTR(fam2c_alarm, S_IRUGO, altr_a10sr_read_status, NULL, + ALTR_A10SR_FAM2C_BIT_POS); +/* Peripheral Enable bits */ +static SENSOR_DEVICE_ATTR(fmcb_aux_en, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_FMCB_AUXEN_POS); +static SENSOR_DEVICE_ATTR(fmcb_en, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_FMCB_EN_POS); +static SENSOR_DEVICE_ATTR(fmca_aux_en, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_FMCA_AUXEN_POS); +static SENSOR_DEVICE_ATTR(fmca_en, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_FMCA_EN_POS); +static SENSOR_DEVICE_ATTR(pcie_aux_en, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PCIE_AUXEN_POS); +static SENSOR_DEVICE_ATTR(pcie_en, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PCIE_EN_POS); +/* HPS Reset bits */ +static SENSOR_DEVICE_ATTR(hps_uart_rst, S_IRUGO, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_UART_POS); +static SENSOR_DEVICE_ATTR(hps_warm_rst, S_IRUGO, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_WARM_POS); +static SENSOR_DEVICE_ATTR(hps_warm1_rst, S_IRUGO, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_WARM1_POS); +static SENSOR_DEVICE_ATTR(hps_cold_rst, S_IRUGO, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_COLD_POS); +static SENSOR_DEVICE_ATTR(hps_npor, S_IRUGO, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_NPOR_POS); +static SENSOR_DEVICE_ATTR(hps_nrst, S_IRUGO, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_NRST_POS); +static SENSOR_DEVICE_ATTR(hps_enet_rst, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_ENET_POS); +static SENSOR_DEVICE_ATTR(hps_enet_int, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST_ENETINT_POS); +/* Peripheral Reset bits */ +static SENSOR_DEVICE_ATTR(usb_reset, S_IRUGO | S_IWUSR, altr_a10sr_read_status, + set_enable, ALTR_A10SR_PER_RST_USB_POS); +static SENSOR_DEVICE_ATTR(bqspi_resetn, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PER_RST_BQSPI_POS); +static SENSOR_DEVICE_ATTR(file_resetn, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PER_RST_FILE_POS); +static SENSOR_DEVICE_ATTR(pcie_perstn, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PER_RST_PCIE_POS); +/* Entire Byte Read */ +static SENSOR_DEVICE_ATTR(max5_version, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_VERSION); +static SENSOR_DEVICE_ATTR(max5_led, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_LED); +static SENSOR_DEVICE_ATTR(max5_button, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_PB); +static SENSOR_DEVICE_ATTR(max5_button_irq, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, ALTR_A10SR_PBF); +static SENSOR_DEVICE_ATTR(max5_pg1, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_PG1); +static SENSOR_DEVICE_ATTR(max5_pg2, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_PG2); +static SENSOR_DEVICE_ATTR(max5_pg3, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_PG3); +static SENSOR_DEVICE_ATTR(max5_fmcab, S_IRUGO, altr_a10sr_read_status, + NULL, ALTR_A10SR_FMCAB); +static SENSOR_DEVICE_ATTR(max5_hps_resets, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_HPS_RST); +static SENSOR_DEVICE_ATTR(max5_per_resets, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PER_RST); +static SENSOR_DEVICE_ATTR(max5_sfpa, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, ALTR_A10SR_SFPA); +static SENSOR_DEVICE_ATTR(max5_sfpb, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, ALTR_A10SR_SFPB); +static SENSOR_DEVICE_ATTR(max5_i2c_master, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_I2C_MASTER); +static SENSOR_DEVICE_ATTR(max5_pmbus, S_IRUGO | S_IWUSR, + altr_a10sr_read_status, set_enable, + ALTR_A10SR_PMBUS); + +static struct attribute *altr_a10sr_attrs[] = { + /* First Power Good Register */ + &sensor_dev_attr_opflag_alarm.dev_attr.attr, + &sensor_dev_attr_1v8_alarm.dev_attr.attr, + &sensor_dev_attr_2v5_alarm.dev_attr.attr, + &sensor_dev_attr_1v0_alarm.dev_attr.attr, + &sensor_dev_attr_3v3_alarm.dev_attr.attr, + &sensor_dev_attr_5v0_alarm.dev_attr.attr, + &sensor_dev_attr_0v9_alarm.dev_attr.attr, + &sensor_dev_attr_0v95_alarm.dev_attr.attr, + /* Second Power Good Register */ + &sensor_dev_attr_hps_alarm.dev_attr.attr, + &sensor_dev_attr_hlhps_vdd_alarm.dev_attr.attr, + &sensor_dev_attr_hl_vdd_alarm.dev_attr.attr, + &sensor_dev_attr_hl_vddq_alarm.dev_attr.attr, + &sensor_dev_attr_fmcavadj_alarm.dev_attr.attr, + &sensor_dev_attr_fmcbvadj_alarm.dev_attr.attr, + &sensor_dev_attr_fac2mp_alarm.dev_attr.attr, + &sensor_dev_attr_fbc2mp_alarm.dev_attr.attr, + /* Third Power Good Register */ + &sensor_dev_attr_pcie_wake_input.dev_attr.attr, + &sensor_dev_attr_pcie_pr_input.dev_attr.attr, + &sensor_dev_attr_fmcb_pr_input.dev_attr.attr, + &sensor_dev_attr_fmca_pr_input.dev_attr.attr, + &sensor_dev_attr_file_pr_input.dev_attr.attr, + &sensor_dev_attr_bf_pr_input.dev_attr.attr, + &sensor_dev_attr_10v_alarm.dev_attr.attr, + &sensor_dev_attr_fam2c_alarm.dev_attr.attr, +/* Peripheral Enable Register */ + &sensor_dev_attr_fmcb_aux_en.dev_attr.attr, + &sensor_dev_attr_fmcb_en.dev_attr.attr, + &sensor_dev_attr_fmca_aux_en.dev_attr.attr, + &sensor_dev_attr_fmca_en.dev_attr.attr, + &sensor_dev_attr_pcie_aux_en.dev_attr.attr, + &sensor_dev_attr_pcie_en.dev_attr.attr, + /* HPS Reset bits */ + &sensor_dev_attr_hps_uart_rst.dev_attr.attr, + &sensor_dev_attr_hps_warm_rst.dev_attr.attr, + &sensor_dev_attr_hps_warm1_rst.dev_attr.attr, + &sensor_dev_attr_hps_cold_rst.dev_attr.attr, + &sensor_dev_attr_hps_npor.dev_attr.attr, + &sensor_dev_attr_hps_nrst.dev_attr.attr, + &sensor_dev_attr_hps_enet_rst.dev_attr.attr, + &sensor_dev_attr_hps_enet_int.dev_attr.attr, + /* Peripheral Reset bits */ + &sensor_dev_attr_usb_reset.dev_attr.attr, + &sensor_dev_attr_bqspi_resetn.dev_attr.attr, + &sensor_dev_attr_file_resetn.dev_attr.attr, + &sensor_dev_attr_pcie_perstn.dev_attr.attr, + /* Byte Value Register */ + &sensor_dev_attr_max5_version.dev_attr.attr, + &sensor_dev_attr_max5_led.dev_attr.attr, + &sensor_dev_attr_max5_button.dev_attr.attr, + &sensor_dev_attr_max5_button_irq.dev_attr.attr, + &sensor_dev_attr_max5_pg1.dev_attr.attr, + &sensor_dev_attr_max5_pg2.dev_attr.attr, + &sensor_dev_attr_max5_pg3.dev_attr.attr, + &sensor_dev_attr_max5_fmcab.dev_attr.attr, + &sensor_dev_attr_max5_hps_resets.dev_attr.attr, + &sensor_dev_attr_max5_per_resets.dev_attr.attr, + &sensor_dev_attr_max5_sfpa.dev_attr.attr, + &sensor_dev_attr_max5_sfpb.dev_attr.attr, + &sensor_dev_attr_max5_i2c_master.dev_attr.attr, + &sensor_dev_attr_max5_pmbus.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(altr_a10sr); + +static int altr_a10sr_hwmon_probe(struct platform_device *pdev) +{ + struct altr_a10sr_hwmon *hwmon; + struct device *hwmon_dev; + struct altr_a10sr *a10sr = dev_get_drvdata(pdev->dev.parent); + + hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL); + if (!hwmon) + return -ENOMEM; + + hwmon->regmap = a10sr->regmap; + + hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, + "a10sr_hwmon", hwmon, + altr_a10sr_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct of_device_id altr_a10sr_hwmon_of_match[] = { + { .compatible = "altr,a10sr-hwmon" }, + { }, +}; +MODULE_DEVICE_TABLE(of, altr_a10sr_hwmon_of_match); + +static struct platform_driver altr_a10sr_hwmon_driver = { + .probe = altr_a10sr_hwmon_probe, + .driver = { + .name = "altr_a10sr_hwmon", + .of_match_table = of_match_ptr(altr_a10sr_hwmon_of_match), + }, +}; + +module_platform_driver(altr_a10sr_hwmon_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thor Thayer "); +MODULE_DESCRIPTION("HW Monitor driver for Altera Arria10 System Resource Chip"); diff --git a/drivers/hwmon/soc64-hwmon.c b/drivers/hwmon/soc64-hwmon.c new file mode 100644 index 0000000000000..ed6542f764d50 --- /dev/null +++ b/drivers/hwmon/soc64-hwmon.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * 64-bit SoC FPGA hardware monitoring features + * + * Copyright (c) 2021 - 2024 Intel Corporation. All rights reserved + * + * Author: Kris Chaplin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HWMON_TIMEOUT (msecs_to_jiffies(SVC_HWMON_REQUEST_TIMEOUT_MS)) + +#define ETEMP_INACTIVE 0x80000000 +#define ETEMP_TOO_OLD 0x80000001 +#define ETEMP_NOT_PRESENT 0x80000002 +#define ETEMP_TIMEOUT 0x80000003 +#define ETEMP_CORRUPT 0x80000004 +#define ETEMP_BUSY 0x80000005 +#define ETEMP_NOT_INITIALIZED 0x800000FF + +#define SOC64_HWMON_MAXSENSORS 16 +#define SOC64_HWMON_TEMPERATURE "temperature" +#define SOC64_HWMON_VOLTAGE "voltage" + +#define HWMON_RETRY_SLEEP_MS (1U) +#define HWMON_ASYNC_MSG_RETRY (3U) + +struct soc64_hwmon_priv { + struct stratix10_svc_chan *chan; + struct stratix10_svc_client client; + int temperature; + int voltage; + int temperature_channels; + int voltage_channels; + const char *soc64_volt_chan_names[SOC64_HWMON_MAXSENSORS]; + const char *soc64_temp_chan_names[SOC64_HWMON_MAXSENSORS]; + u32 soc64_volt_chan[SOC64_HWMON_MAXSENSORS]; + u32 soc64_temp_chan[SOC64_HWMON_MAXSENSORS]; +}; + + +static umode_t soc64_is_visible(const void *dev, + enum hwmon_sensor_types type, + u32 attr, int chan) +{ + const struct soc64_hwmon_priv *priv = dev; + + switch (type) { + case hwmon_temp: + if (chan < priv->temperature_channels) + return 0444; + + return 0; + case hwmon_in: + if (chan < priv->voltage_channels) + return 0444; + + return 0; + + default: + return 0; + } +} + +static void soc64_async_callback(void *ptr) +{ + if (ptr) + complete(ptr); +} + +static int soc64_async_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int chan, long *val) +{ + unsigned long ret; + int status, index; + struct soc64_hwmon_priv *priv = dev_get_drvdata(dev); + void *handle = NULL; + struct completion completion; + struct stratix10_svc_cb_data data; + struct stratix10_svc_client_msg msg = {0}; + + init_completion(&completion); + + switch (type) { + case hwmon_temp: + if (chan > 15) + return -EOPNOTSUPP; + + /* To support Page at upper word and channel at lower word */ + msg.arg[0] = + (((u64)1 << (priv->soc64_temp_chan[chan] & 0xFFFF)) + + (priv->soc64_temp_chan[chan] & 0xFFF0000)); + msg.command = COMMAND_HWMON_READTEMP; + + for (index = 0; index < HWMON_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_send(priv->chan, &msg, + &handle, soc64_async_callback, + &completion); + if (status == 0) + break; + dev_warn(dev, "Failed to send async message\n"); + msleep(HWMON_RETRY_SLEEP_MS); + } + + if (status && !handle) + return -ETIMEDOUT; + + ret = wait_for_completion_io_timeout(&completion, + (HWMON_TIMEOUT)); + if (ret > 0) + dev_dbg(dev, "Received async interrupt\n"); + else if (ret == 0) + dev_dbg(dev, + "Timeout occurred.trying to poll the response\n"); + + for (index = 0; index < HWMON_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_poll(priv->chan, handle, + &data); + if (status == -EAGAIN) { + dev_dbg(dev, + "Async message is still in progress\n"); + } else if (status < 0) { + dev_alert(dev, + "Failed to poll async message\n"); + ret = -ETIMEDOUT; + } else if (status == 0) { + ret = 0; + break; + } + msleep(HWMON_RETRY_SLEEP_MS); + } + + if (ret) { + dev_err(dev, "Failed to get async response\n"); + goto status_done; + } + + if (data.status == 0) { + priv->temperature = *((unsigned long *)data.kaddr1); + } else { + dev_err(dev, "%s returned 0x%p\n", __func__, + data.kaddr1); + goto status_done; + } + + *val = ((long)(priv->temperature)) * 1000 / 256; + + switch (priv->temperature) { + case ETEMP_INACTIVE: + case ETEMP_NOT_PRESENT: + case ETEMP_CORRUPT: + case ETEMP_NOT_INITIALIZED: + ret = -EOPNOTSUPP; + break; + + case ETEMP_TIMEOUT: + case ETEMP_BUSY: + case ETEMP_TOO_OLD: + ret = -EAGAIN; + break; + default: + ret = 0; + break; + } + + break; + + case hwmon_in: // Read voltage + if (chan > 15) + return -EOPNOTSUPP; // Channel outside of range + + msg.arg[0] = ((u64)1 << priv->soc64_volt_chan[chan]); + msg.command = COMMAND_HWMON_READVOLT; + + for (index = 0; index < HWMON_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_send(priv->chan, &msg, + &handle, soc64_async_callback, + &completion); + if (status == 0) + break; + msleep(HWMON_RETRY_SLEEP_MS); + } + + if (status && !handle) + return -ETIMEDOUT; + + ret = wait_for_completion_io_timeout(&completion, HWMON_TIMEOUT); + if (ret > 0) + dev_dbg(dev, "received async interrupt\n"); + else if (ret == 0) + dev_err(dev, + "timeout occurred ,waiting for async message, trying for polling\n"); + + for (index = 0; index < HWMON_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_poll(priv->chan, handle, + &data); + if (status == -EAGAIN) { + dev_dbg(dev, + "async message is still in progress\n"); + ret = -EAGAIN; + } else if (status < 0) { + dev_alert(dev, + "Failed to poll async message\n"); + ret = -ETIMEDOUT; + } else if (status == 0) { + dev_dbg(dev, "async response received\n"); + ret = 0; + break; + } + msleep(HWMON_RETRY_SLEEP_MS); + } + + if (ret) { + dev_err(dev, "Failed to get async response\n"); + goto status_done; + } + + if (data.status == 0) { + priv->voltage = *((unsigned long *)data.kaddr1); + } else { + dev_err(dev, "%s returned 0x%p\n", __func__, + data.kaddr1); + ret = -EFAULT; + goto status_done; + } + + *val = ((long)(priv->voltage)) * 1000 / 65536; + ret = 0; + break; + + default: + return -EOPNOTSUPP; + } + +status_done: + stratix10_svc_async_done(priv->chan, handle); + return ret; +} + +static int soc64_read_string(struct device *dev, + enum hwmon_sensor_types type, u32 attr, + int chan, const char **str) +{ + struct soc64_hwmon_priv *priv = dev_get_drvdata(dev); + + switch (type) { + case hwmon_in: + *str = priv->soc64_volt_chan_names[chan]; + return 0; + case hwmon_temp: + *str = priv->soc64_temp_chan_names[chan]; + return 0; + default: + return -EOPNOTSUPP; + } +} + + +static const struct hwmon_ops soc64_ops = { + .is_visible = soc64_is_visible, + .read = soc64_async_read, + .read_string = soc64_read_string, +}; + +static const struct hwmon_channel_info *soc64_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), + NULL +}; + +static const struct hwmon_chip_info soc64_chip_info = { + .ops = &soc64_ops, + .info = soc64_info, +}; + +static int soc64_add_channel(struct device *dev, const char *type, + u32 val, const char *label, + struct soc64_hwmon_priv *priv) +{ + if (!strcmp(type, SOC64_HWMON_TEMPERATURE)) { + if (priv->temperature_channels >= SOC64_HWMON_MAXSENSORS) { + dev_warn(dev, + "Cant add temp node %s, too many channels", + label); + return 0; + } + + priv->soc64_temp_chan_names[priv->temperature_channels] = label; + priv->soc64_temp_chan[priv->temperature_channels] = val; + priv->temperature_channels++; + return 0; + } + + if (!strcmp(type, SOC64_HWMON_VOLTAGE)) { + if (priv->voltage_channels >= SOC64_HWMON_MAXSENSORS) { + dev_warn(dev, + "Cant add voltage node %s, too many channels", + label); + return 0; + } + + priv->soc64_volt_chan_names[priv->voltage_channels] = label; + priv->soc64_volt_chan[priv->voltage_channels] = val; + priv->voltage_channels++; + return 0; + } + + dev_warn(dev, "unsupported sensor type %s", type); + return 0; +} + +static int soc64_probe_child_from_dt(struct device *dev, + struct device_node *child, + struct soc64_hwmon_priv *priv) +{ + u32 val; + int ret; + struct device_node *grandchild; + const char *label; + const char *type; + + of_property_read_string(child, "name", &type); + for_each_child_of_node(child, grandchild) { + + ret = of_property_read_u32(grandchild, "reg", &val); + if (ret) { + dev_err(dev, "missing reg property of %pOFn\n", + grandchild); + return ret; + } + ret = of_property_read_string(grandchild, "label", + &label); + if (ret) { + dev_err(dev, "missing label propoerty of %pOFn\n", + grandchild); + return ret; + } + + soc64_add_channel(dev, type, val, label, priv); + } + + return 0; +} + +static int soc64_probe_from_dt(struct device *dev, + struct soc64_hwmon_priv *priv) +{ + const struct device_node *np = dev->of_node; + struct device_node *child; + int ret; + + /* Compatible with non-DT platforms */ + if (!np) + return 0; + + for_each_child_of_node(np, child) { + ret = soc64_probe_child_from_dt(dev, child, priv); + if (ret) { + of_node_put(child); + return ret; + } + } + + return 0; +} + +static int soc64_hwmon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device *hwmon_dev; + struct soc64_hwmon_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->client.dev = dev; + priv->client.receive_cb = NULL; + priv->client.priv = priv; + priv->temperature_channels = 0; + priv->voltage_channels = 0; + + ret = soc64_probe_from_dt(dev, priv); + if (ret) { + dev_err(dev, "Unable to probe from device tree\n"); + return ret; + } + + priv->chan = stratix10_svc_request_channel_byname(&priv->client, + SVC_CLIENT_HWMON); + if (IS_ERR(priv->chan)) { + dev_err(dev, "couldn't get service channel %s defering probe...\n", + SVC_CLIENT_HWMON); + return -EPROBE_DEFER; + } + + dev_info(dev, "Initialized %d temperature and %d voltage channels", + priv->temperature_channels, priv->voltage_channels); + + hwmon_dev = devm_hwmon_device_register_with_info(dev, "soc64hwmon", + priv, + &soc64_chip_info, + NULL); + + ret = stratix10_svc_add_async_client(priv->chan, false); + if (ret) { + dev_err(dev, "failed to enable async client hwmon client\n"); + stratix10_svc_free_channel(priv->chan); + hwmon_device_unregister(hwmon_dev); + return PTR_ERR(priv->chan); + } + + platform_set_drvdata(pdev, priv); + + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static void soc64_hwmon_remove(struct platform_device *pdev) +{ + struct soc64_hwmon_priv *priv = platform_get_drvdata(pdev); + + stratix10_svc_remove_async_client(priv->chan); + stratix10_svc_free_channel(priv->chan); +} + +static const struct of_device_id soc64_of_match[] = { + { .compatible = "intel,soc64-hwmon" }, + {}, +}; +MODULE_DEVICE_TABLE(of, soc64_of_match); + +static struct platform_driver soc64_hwmon_driver = { + .driver = { + .name = "soc64-hwmon", + .of_match_table = soc64_of_match, + }, + .probe = soc64_hwmon_probe, + .remove = soc64_hwmon_remove, +}; +module_platform_driver(soc64_hwmon_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("64-bit SoC FPGA hardware monitoring features"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 2d32896d06734..aa19915bfd2d8 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -310,7 +310,8 @@ struct dw_i2c_dev { #define MODEL_BAIKAL_BT1 BIT(9) #define MODEL_AMD_NAVI_GPU BIT(10) #define MODEL_WANGXUN_SP BIT(11) -#define MODEL_MASK GENMASK(11, 8) +#define MODEL_SOCFPGA BIT(12) +#define MODEL_MASK GENMASK(12, 8) /* * Enable UCSI interrupt by writing 0xd at register diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 28188c6d0555e..86c0605f3fff1 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -939,10 +939,26 @@ static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) i2c_dw_init_master(dev); } -static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) +static int i2c_socfpga_scl_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + + bri->prepare_recovery(adap); + bri->unprepare_recovery(adap); + + return 0; +} + +static int i2c_dw_init_socfpga_recovery_info(struct dw_i2c_dev *dev, + struct i2c_bus_recovery_info *rinfo) +{ + rinfo->recover_bus = i2c_socfpga_scl_recovery; + return 1; +} + +static int i2c_dw_init_generic_recovery_info(struct dw_i2c_dev *dev, + struct i2c_bus_recovery_info *rinfo) { - struct i2c_bus_recovery_info *rinfo = &dev->rinfo; - struct i2c_adapter *adap = &dev->adapter; struct gpio_desc *gpio; gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); @@ -968,13 +984,34 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) } rinfo->recover_bus = i2c_generic_scl_recovery; - rinfo->prepare_recovery = i2c_dw_prepare_recovery; - rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; - adap->bus_recovery_info = rinfo; dev_info(dev->dev, "running with gpio recovery mode! scl%s", rinfo->sda_gpiod ? ",sda" : ""); + return 1; +} + +static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) +{ + struct i2c_bus_recovery_info *rinfo = &dev->rinfo; + struct i2c_adapter *adap = &dev->adapter; + int ret; + + switch (dev->flags & MODEL_MASK) { + case MODEL_SOCFPGA: + ret = i2c_dw_init_socfpga_recovery_info(dev, rinfo); + break; + default: + ret = i2c_dw_init_generic_recovery_info(dev, rinfo); + break; + } + if (ret <= 0) + return ret; + + rinfo->prepare_recovery = i2c_dw_prepare_recovery; + rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; + adap->bus_recovery_info = rinfo; + return 0; } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2d0c7348e4917..2484ffe044e99 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -339,6 +339,7 @@ static const struct of_device_id dw_i2c_of_match[] = { { .compatible = "snps,designware-i2c", }, { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, + { .compatible = "intel,socfpga-i2c", .data = (void *)MODEL_SOCFPGA }, {} }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 53ab814b676ff..838dcc793f8f1 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -942,6 +942,9 @@ static int i3c_master_rstdaa_locked(struct i3c_master_controller *master, ret = i3c_master_send_ccc_cmd_locked(master, &cmd); i3c_ccc_cmd_dest_cleanup(&dest); + if (ret) + ret = cmd.err; + return ret; } @@ -997,6 +1000,9 @@ static int i3c_master_enec_disec_locked(struct i3c_master_controller *master, ret = i3c_master_send_ccc_cmd_locked(master, &cmd); i3c_ccc_cmd_dest_cleanup(&dest); + if (ret) + ret = cmd.err; + return ret; } diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index dbcd3984f2578..a5b6ea42b9dd6 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -572,6 +572,14 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master) if (hcnt < SCL_I3C_TIMING_CNT_MIN) hcnt = SCL_I3C_TIMING_CNT_MIN; + /* set back to THIGH_MAX_NS, after disable spike filter */ + if (!master->first_broadcast) { + lcnt = SCL_I3C_TIMING_LCNT(readl(master->regs + SCL_I3C_OD_TIMING)); + scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | lcnt; + writel(scl_timing, master->regs + SCL_I3C_OD_TIMING); + return 0; + } + lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt; if (lcnt < SCL_I3C_TIMING_CNT_MIN) lcnt = SCL_I3C_TIMING_CNT_MIN; @@ -591,6 +599,8 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master) lcnt = max_t(u8, DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt); + /* first broadcast thigh to 200ns, to disable spike filter */ + hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_INIT_OD_MIN_NS, core_period); scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt); writel(scl_timing, master->regs + SCL_I3C_OD_TIMING); master->i3c_od_timing = scl_timing; @@ -664,6 +674,9 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m) if (ret) goto rpm_out; + /* first broadcast to disable spike filter */ + master->first_broadcast = true; + switch (bus->mode) { case I3C_BUS_MODE_MIXED_FAST: case I3C_BUS_MODE_MIXED_LIMITED: @@ -1488,6 +1501,14 @@ static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id) dw_i3c_master_end_xfer_locked(master, status); if (status & INTR_TRANSFER_ERR_STAT) writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS); + /* set back to THIGH_MAX_NS, after disable spike filter */ + if (master->first_broadcast) { + master->first_broadcast = false; + int ret = dw_i3c_clk_cfg(master); + + if (ret) + return ret; + } spin_unlock(&master->xferqueue.lock); if (status & INTR_IBI_THLD_STAT) diff --git a/drivers/i3c/master/dw-i3c-master.h b/drivers/i3c/master/dw-i3c-master.h index 219ff815d3a73..7c0c4fd14aaa4 100644 --- a/drivers/i3c/master/dw-i3c-master.h +++ b/drivers/i3c/master/dw-i3c-master.h @@ -69,6 +69,8 @@ struct dw_i3c_master { const struct dw_i3c_platform_ops *platform_ops; struct work_struct hj_work; + + bool first_broadcast; }; struct dw_i3c_platform_ops { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0d3a889b1905c..f2ba7292ed9b3 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -51,6 +51,7 @@ #define RD_LOCAL_MEMRESERVE_DONE BIT(2) static u32 lpi_id_bits; +static bool dma_32bit_flag; /* * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to @@ -2306,6 +2307,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, u32 alloc_pages, psz; struct page *page; void *base; + gfp_t flags = GFP_KERNEL | __GFP_ZERO; psz = baser->psz; alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); @@ -2317,7 +2319,10 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, order = get_order(GITS_BASER_PAGES_MAX * psz); } - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (dma_32bit_flag) + flags |= GFP_DMA32; + + page = alloc_pages_node(its->numa_node, flags, order); if (!page) return -ENOMEM; @@ -3290,6 +3295,7 @@ static bool its_alloc_table_entry(struct its_node *its, struct page *page; u32 esz, idx; __le64 *table; + gfp_t flags = GFP_KERNEL | __GFP_ZERO; /* Don't allow device id that exceeds single, flat table limit */ esz = GITS_BASER_ENTRY_SIZE(baser->val); @@ -3303,9 +3309,12 @@ static bool its_alloc_table_entry(struct its_node *its, table = baser->base; + if (dma_32bit_flag) + flags |= GFP_DMA32; + /* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + page = alloc_pages_node(its->numa_node, flags, get_order(baser->psz)); if (!page) return false; @@ -5127,8 +5136,11 @@ static int __init its_probe_one(struct its_node *its) struct page *page; u32 ctlr; int err; + gfp_t flags = GFP_KERNEL | __GFP_ZERO; its_enable_quirks(its); + if (dma_32bit_flag) + flags |= GFP_DMA32; if (is_v4(its)) { if (!(its->typer & GITS_TYPER_VMOVP)) { @@ -5160,7 +5172,7 @@ static int __init its_probe_one(struct its_node *its) } } - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + page = alloc_pages_node(its->numa_node, flags, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; @@ -5476,6 +5488,8 @@ static int __init its_of_probe(struct device_node *node) continue; } + if (of_property_read_bool(np, "dma-32bit-quirk")) + dma_32bit_flag = true; its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); if (!its) @@ -5710,6 +5724,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, bool has_v4 = false; bool has_v4_1 = false; int err; + dma_32bit_flag = false; gic_rdists = rdists; diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c index d53e433ab5c1d..2a3427e99eea2 100644 --- a/drivers/mfd/altera-a10sr.c +++ b/drivers/mfd/altera-a10sr.c @@ -19,6 +19,10 @@ #include static const struct mfd_cell altr_a10sr_subdev_info[] = { + { + .name = "altr_a10sr_hwmon", + .of_compatible = "altr,a10sr-hwmon", + }, { .name = "altr_a10sr_gpio", .of_compatible = "altr,a10sr-gpio", diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3fe7e2a9bd294..e3f1a72e94684 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -467,6 +467,12 @@ config DW_XDATA_PCIE If unsure, say N. +config ALTERA_HWMUTEX + tristate "Altera Hardware Mutex" + help + This option enables device driver support for Altera Hardware Mutex. + Say Y here if you want to use the Altera hardware mutex support. + config PCI_ENDPOINT_TEST depends on PCI select CRC32 @@ -540,25 +546,10 @@ config VCPU_STALL_DETECTOR If you do not intend to run this kernel as a guest, say N. -config TMR_MANAGER - tristate "Select TMR Manager" - depends on MICROBLAZE && MB_MANAGER +config ALTERA_SYSID + tristate "Altera System ID" help - This option enables the driver developed for TMR Manager. - The Triple Modular Redundancy(TMR) manager provides support for - fault detection. - - Say N here unless you know what you are doing. - -config TMR_INJECT - tristate "Select TMR Inject" - depends on TMR_MANAGER && FAULT_INJECTION_DEBUG_FS - help - This option enables the driver developed for TMR Inject. - The Triple Modular Redundancy(TMR) Inject provides - fault injection. - - Say N here unless you know what you are doing. + This enables Altera System ID soft core driver. config TPS6594_ESM tristate "TI TPS6594 Error Signal Monitor support" @@ -610,6 +601,28 @@ config MARVELL_CN10K_DPI To compile this driver as a module, choose M here: the module will be called mrvl_cn10k_dpi. +config ALTERA_ILC + tristate "Altera Interrupt Latency Counter driver" + help + This enables the Interrupt Latency Counter driver for the Altera + SOCFPGA platform. + +config ALTERA_SOCFPGA_CONFIG + tristate "Altera FPGA FCS configuration support" + depends on INTEL_STRATIX10_SERVICE + help + Altera socfpga FCS security features driver. + +config ALTERA_SOCFPGA_FCS_DEBUG + bool "Altera FPGA FCS debug driver support" + depends on ALTERA_SOCFPGA_CONFIG + help + This option enables debug option for the fcs driver and also enables + generic mailbox command support. This option can be used to debug + socfpga fcs drivers. + + Generic mailbox command support can be enabled only using this option. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index a9f94525e1819..f4035a0f856f0 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -43,6 +43,9 @@ obj-$(CONFIG_PCH_PHUB) += pch_phub.o obj-y += ti-st/ obj-y += lis3lv02d/ obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ +obj-$(CONFIG_ALTERA_HWMUTEX) += altera_hwmutex.o +obj-$(CONFIG_ALTERA_ILC) += altera_ilc.o +obj-$(CONFIG_ALTERA_SYSID) += altera_sysid.o obj-$(CONFIG_INTEL_MEI) += mei/ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o @@ -72,3 +75,5 @@ obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o obj-$(CONFIG_NSM) += nsm.o obj-$(CONFIG_MARVELL_CN10K_DPI) += mrvl_cn10k_dpi.o obj-y += keba/ +obj-$(CONFIG_ALTERA_SOCFPGA_CONFIG) += altera-fcs-config.o +altera-fcs-config-y := socfpga-config.o socfpga_fcs_plat.o socfpga_fcs_hal.o diff --git a/drivers/misc/altera_hwmutex.c b/drivers/misc/altera_hwmutex.c new file mode 100644 index 0000000000000..5004fe92e94b4 --- /dev/null +++ b/drivers/misc/altera_hwmutex.c @@ -0,0 +1,320 @@ +/* + * Copyright Altera Corporation (C) 2013. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "altera_hwmutex" + + +static DEFINE_SPINLOCK(list_lock); /* protect mutex_list */ +static LIST_HEAD(mutex_list); + +/* Mutex Registers */ +#define MUTEX_REG 0x0 + +#define MUTEX_REG_VALUE_MASK 0xFFFF +#define MUTEX_REG_OWNER_OFFSET 16 +#define MUTEX_REG_OWNER_MASK 0xFFFF +#define MUTEX_GET_OWNER(reg) \ + ((reg >> MUTEX_REG_OWNER_OFFSET) & MUTEX_REG_OWNER_MASK) + +/** + * altera_mutex_request - Retrieves a pointer to an acquired mutex device + * structure + * @mutex_np: The pointer to mutex device node + * + * Returns a pointer to the mutex device structure associated with the + * supplied device node, or NULL if no corresponding mutex device was + * found. + */ +struct altera_mutex *altera_mutex_request(struct device_node *mutex_np) +{ + struct altera_mutex *mutex; + + spin_lock(&list_lock); + list_for_each_entry(mutex, &mutex_list, list) { + if (mutex_np == mutex->pdev->dev.of_node) { + if (!mutex->requested) { + mutex->requested = true; + spin_unlock(&list_lock); + return mutex; + } else { + pr_info("Mutex device is in use.\n"); + spin_unlock(&list_lock); + return NULL; + } + } + } + spin_unlock(&list_lock); + pr_info("Mutex device not found!\n"); + return NULL; +} +EXPORT_SYMBOL(altera_mutex_request); + +/** + * altera_mutex_free - Free the mutex + * @mutex: the mutex + * + * Return 0 if success. Otherwise, returns non-zero. + */ +int altera_mutex_free(struct altera_mutex *mutex) +{ + if (!mutex || !mutex->requested) + return -EINVAL; + + spin_lock(&list_lock); + mutex->requested = false; + spin_unlock(&list_lock); + + return 0; +} +EXPORT_SYMBOL(altera_mutex_free); + +static int __mutex_trylock(struct altera_mutex *mutex, u16 owner, u16 value) +{ + u32 read; + int ret = 0; + u32 data = (owner << MUTEX_REG_OWNER_OFFSET) | value; + + mutex_lock(&mutex->lock); + __raw_writel(data, mutex->regs + MUTEX_REG); + read = __raw_readl(mutex->regs + MUTEX_REG); + if (read != data) + ret = -1; + + mutex_unlock(&mutex->lock); + return ret; +} + +/** + * altera_mutex_lock - Acquires a hardware mutex, wait until it can get it. + * @mutex: the mutex to be acquired + * @owner: owner ID + * @value: the new non-zero value to write to mutex + * + * Returns 0 if mutex was successfully locked. Otherwise, returns non-zero. + * + * The mutex must later on be released by the same owner that acquired it. + * This function is not ISR callable. + */ +int altera_mutex_lock(struct altera_mutex *mutex, u16 owner, u16 value) +{ + if (!mutex || !mutex->requested) + return -EINVAL; + + while (__mutex_trylock(mutex, owner, value) != 0) + ; + + return 0; +} +EXPORT_SYMBOL(altera_mutex_lock); + +/** + * altera_mutex_trylock - Tries once to lock the hardware mutex and returns + * immediately + * @mutex: the mutex to be acquired + * @owner: owner ID + * @value: the new non-zero value to write to mutex + * + * Returns 0 if mutex was successfully locked. Otherwise, returns non-zero. + * + * The mutex must later on be released by the same owner that acquired it. + * This function is not ISR callable. + */ +int altera_mutex_trylock(struct altera_mutex *mutex, u16 owner, u16 value) +{ + if (!mutex || !mutex->requested) + return -EINVAL; + + return __mutex_trylock(mutex, owner, value); +} +EXPORT_SYMBOL(altera_mutex_trylock); + +/** + * altera_mutex_unlock - Unlock a mutex that has been locked by this owner + * previously that was locked on the + * altera_mutex_lock. Upon release, the value stored + * in the mutex is set to zero. + * @mutex: the mutex to be released + * @owner: Owner ID + * + * Returns 0 if mutex was successfully unlocked. Otherwise, returns + * non-zero. + * + * This function is not ISR callable. + */ +int altera_mutex_unlock(struct altera_mutex *mutex, u16 owner) +{ + u32 reg; + + if (!mutex || !mutex->requested) + return -EINVAL; + + mutex_lock(&mutex->lock); + + __raw_writel(owner << MUTEX_REG_OWNER_OFFSET, + mutex->regs + MUTEX_REG); + + reg = __raw_readl(mutex->regs + MUTEX_REG); + if (reg & MUTEX_REG_VALUE_MASK) { + /* Unlock failed */ + dev_dbg(&mutex->pdev->dev, + "Unlock mutex failed, owner %d and expected owner %d\n", + owner, MUTEX_GET_OWNER(reg)); + mutex_unlock(&mutex->lock); + return -EINVAL; + } + + mutex_unlock(&mutex->lock); + return 0; +} +EXPORT_SYMBOL(altera_mutex_unlock); + +/** + * altera_mutex_owned - Determines if this owner owns the mutex + * @mutex: the mutex to be queried + * @owner: Owner ID + * + * Returns 1 if the owner owns the mutex. Otherwise, returns zero. + */ +int altera_mutex_owned(struct altera_mutex *mutex, u16 owner) +{ + u32 reg; + u16 actual_owner; + int ret = 0; + + if (!mutex || !mutex->requested) + return ret; + + mutex_lock(&mutex->lock); + reg = __raw_readl(mutex->regs + MUTEX_REG); + actual_owner = MUTEX_GET_OWNER(reg); + if (actual_owner == owner) + ret = 1; + + mutex_unlock(&mutex->lock); + return ret; +} +EXPORT_SYMBOL(altera_mutex_owned); + +/** + * altera_mutex_is_locked - Determines if the mutex is locked + * @mutex: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +int altera_mutex_is_locked(struct altera_mutex *mutex) +{ + u32 reg; + int ret = 0; + + if (!mutex || !mutex->requested) + return ret; + + mutex_lock(&mutex->lock); + reg = __raw_readl(mutex->regs + MUTEX_REG); + reg &= MUTEX_REG_VALUE_MASK; + if (reg) + ret = 1; + + mutex_unlock(&mutex->lock); + return ret; +} +EXPORT_SYMBOL(altera_mutex_is_locked); + +static int altera_mutex_probe(struct platform_device *pdev) +{ + struct altera_mutex *mutex; + struct resource *regs; + + mutex = devm_kzalloc(&pdev->dev, sizeof(struct altera_mutex), + GFP_KERNEL); + if (!mutex) + return -ENOMEM; + + mutex->pdev = pdev; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + + mutex->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mutex->regs)) + return PTR_ERR(mutex->regs); + + mutex_init(&mutex->lock); + + spin_lock(&list_lock); + list_add_tail(&mutex->list, &mutex_list); + spin_unlock(&list_lock); + + platform_set_drvdata(pdev, mutex); + + return 0; +} + +static int altera_mutex_remove(struct platform_device *pdev) +{ + struct altera_mutex *mutex = platform_get_drvdata(pdev); + + spin_lock(&list_lock); + if (mutex) + list_del(&mutex->list); + spin_unlock(&list_lock); + + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id altera_mutex_match[] = { + { .compatible = "altr,hwmutex-1.0" }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, altera_mutex_match); + +static struct platform_driver altera_mutex_platform_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = altera_mutex_match, + }, + .remove = altera_mutex_remove, +}; + +static int __init altera_mutex_init(void) +{ + return platform_driver_probe(&altera_mutex_platform_driver, + altera_mutex_probe); +} + +static void __exit altera_mutex_exit(void) +{ + platform_driver_unregister(&altera_mutex_platform_driver); +} + +module_init(altera_mutex_init); +module_exit(altera_mutex_exit); + +MODULE_AUTHOR("Ley Foon Tan "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Altera Hardware Mutex driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/misc/altera_ilc.c b/drivers/misc/altera_ilc.c new file mode 100644 index 0000000000000..9e04291156d72 --- /dev/null +++ b/drivers/misc/altera_ilc.c @@ -0,0 +1,298 @@ +/* + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "altera_ilc" +#define CTRL_REG 0x80 +#define FREQ_REG 0x84 +#define STP_REG 0x88 +#define VLD_REG 0x8C +#define ILC_MAX_PORTS 32 +#define ILC_FIFO_DEFAULT 32 +#define ILC_ENABLE 0x01 +#define CHAR_SIZE 10 +#define POLL_INTERVAL 1 +#define GET_PORT_COUNT(_val) ((_val & 0x7C) >> 2) +#define GET_VLD_BIT(_val, _offset) (((_val) >> _offset) & 0x1) + +struct altera_ilc { + struct platform_device *pdev; + void __iomem *regs; + unsigned int port_count; + unsigned int irq; + unsigned int channel_offset; + unsigned int interrupt_channels[ILC_MAX_PORTS]; + struct kfifo kfifos[ILC_MAX_PORTS]; + struct device_attribute dev_attr[ILC_MAX_PORTS]; + struct delayed_work ilc_work; + char sysfs[ILC_MAX_PORTS][CHAR_SIZE]; + u32 fifo_depth; +}; + +static int ilc_irq_lookup(struct altera_ilc *ilc, int irq) +{ + int i; + for (i = 0; i < ilc->port_count; i++) { + if (irq == platform_get_irq(ilc->pdev, i)) + return i; + } + return -EPERM; +} + +static ssize_t ilc_show_counter(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, i, id, fifo_len; + unsigned int fifo_buf[ILC_MAX_PORTS]; + char temp[10]; + struct altera_ilc *ilc = dev_get_drvdata(dev); + + fifo_len = 0; + ret = kstrtouint(attr->attr.name, 0, &id); + + for (i = 0; i < ilc->port_count; i++) { + if (id == (ilc->interrupt_channels[i])) { + /*Check for kfifo length*/ + fifo_len = kfifo_len(&ilc->kfifos[i]) + /sizeof(unsigned int); + if (fifo_len <= 0) { + dev_info(&ilc->pdev->dev, "Fifo for interrupt %s is empty\n", + attr->attr.name); + return 0; + } + /*Read from kfifo*/ + ret = kfifo_out(&ilc->kfifos[i], &fifo_buf, + kfifo_len(&ilc->kfifos[i])); + } + } + + for (i = 0; i < fifo_len; i++) { + sprintf(temp, "%u\n", fifo_buf[i]); + strcat(buf, temp); + } + + strcat(buf, "\0"); + + return strlen(buf); +} + +static struct attribute *altera_ilc_attrs[ILC_MAX_PORTS]; + +struct attribute_group altera_ilc_attr_group = { + .name = "ilc_data", + .attrs = altera_ilc_attrs, +}; + +static void ilc_work(struct work_struct *work) +{ + unsigned int ilc_value, ret, offset, stp_reg; + struct altera_ilc *ilc = + container_of(work, struct altera_ilc, ilc_work.work); + + offset = ilc_irq_lookup(ilc, ilc->irq); + if (offset < 0) { + dev_err(&ilc->pdev->dev, "Unable to lookup irq number\n"); + return; + } + + if (GET_VLD_BIT(readl(ilc->regs + VLD_REG), offset)) { + /*Read counter register*/ + ilc_value = readl(ilc->regs + (offset) * 4); + + /*Putting value into kfifo*/ + ret = kfifo_in((&ilc->kfifos[offset]), + (unsigned int *)&ilc_value, sizeof(ilc_value)); + + /*Clearing stop register*/ + stp_reg = readl(ilc->regs + STP_REG); + writel((~(0x1 << offset) & stp_reg), ilc->regs + STP_REG); + + return; + } + + /*Start workqueue to poll data valid*/ + schedule_delayed_work(&ilc->ilc_work, msecs_to_jiffies(POLL_INTERVAL)); +} + +static irqreturn_t ilc_interrupt_handler(int irq, void *p) +{ + unsigned int offset, stp_reg; + + struct altera_ilc *ilc = (struct altera_ilc *)p; + + /*Update ILC struct*/ + ilc->irq = irq; + + dev_dbg(&ilc->pdev->dev, "Interrupt %u triggered\n", + ilc->irq); + + offset = ilc_irq_lookup(ilc, irq); + if (offset < 0) { + dev_err(&ilc->pdev->dev, "Unable to lookup irq number\n"); + return IRQ_RETVAL(IRQ_NONE); + } + + /*Setting stop register*/ + stp_reg = readl(ilc->regs + STP_REG); + writel((0x1 << offset)|stp_reg, ilc->regs + STP_REG); + + /*Start workqueue to poll data valid*/ + schedule_delayed_work(&ilc->ilc_work, 0); + + return IRQ_RETVAL(IRQ_NONE); +} + +static int altera_ilc_probe(struct platform_device *pdev) +{ + struct altera_ilc *ilc; + struct resource *regs; + struct device_node *np = pdev->dev.of_node; + int ret, i; + + ilc = devm_kzalloc(&pdev->dev, sizeof(struct altera_ilc), + GFP_KERNEL); + if (!ilc) + return -ENOMEM; + + ilc->pdev = pdev; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + + ilc->regs = devm_ioremap_resource(&pdev->dev, regs); + if (!ilc->regs) + return -EADDRNOTAVAIL; + + ilc->port_count = GET_PORT_COUNT(readl(ilc->regs + CTRL_REG)); + if (ilc->port_count <= 0) { + dev_warn(&pdev->dev, "No interrupt connected to ILC\n"); + return -EPERM; + } + + /*Check for fifo depth*/ + ret = of_property_read_u32(np, "altr,sw-fifo-depth", + &(ilc->fifo_depth)); + if (ret) { + dev_warn(&pdev->dev, "Fifo depth undefined\n"); + dev_warn(&pdev->dev, "Setting fifo depth to default value (32)\n"); + ilc->fifo_depth = ILC_FIFO_DEFAULT; + } + + /*Initialize Kfifo*/ + for (i = 0; i < ilc->port_count; i++) { + ret = kfifo_alloc(&ilc->kfifos[i], (ilc->fifo_depth * + sizeof(unsigned int)), GFP_KERNEL); + if (ret) { + dev_err(&pdev->dev, "Kfifo failed to initialize\n"); + return ret; + } + } + + /*Register each of the IRQs*/ + for (i = 0; i < ilc->port_count; i++) { + ilc->interrupt_channels[i] = platform_get_irq(pdev, i); + + ret = devm_request_irq(&pdev->dev, (ilc->interrupt_channels[i]), + ilc_interrupt_handler, IRQF_SHARED, "ilc_0", + (void *)(ilc)); + + if (ret < 0) + dev_warn(&pdev->dev, "Failed to register interrupt handler"); + } + + /*Setup sysfs interface*/ + for (i = 0; (i < ilc->port_count); i++) { + sprintf(ilc->sysfs[i], "%d", (ilc->interrupt_channels[i])); + ilc->dev_attr[i].attr.name = ilc->sysfs[i]; + ilc->dev_attr[i].attr.mode = S_IRUGO; + ilc->dev_attr[i].show = ilc_show_counter; + altera_ilc_attrs[i] = &ilc->dev_attr[i].attr; + altera_ilc_attrs[i+1] = NULL; + } + ret = sysfs_create_group(&pdev->dev.kobj, &altera_ilc_attr_group); + + /*Initialize workqueue*/ + INIT_DELAYED_WORK(&ilc->ilc_work, ilc_work); + + /*Global enable ILC softIP*/ + writel(ILC_ENABLE, ilc->regs + CTRL_REG); + + platform_set_drvdata(pdev, ilc); + + dev_info(&pdev->dev, "Driver successfully loaded\n"); + + return 0; +} + +static void altera_ilc_remove(struct platform_device *pdev) +{ + int i; + struct altera_ilc *ilc = platform_get_drvdata(pdev); + + /*Remove sysfs interface*/ + sysfs_remove_group(&pdev->dev.kobj, &altera_ilc_attr_group); + + /*Free up kfifo memory*/ + for (i = 0; i < ilc->port_count; i++) + kfifo_free(&ilc->kfifos[i]); + + platform_set_drvdata(pdev, NULL); +} + +static const struct of_device_id altera_ilc_match[] = { + { .compatible = "altr,ilc-1.0" }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, altera_ilc_match); + +static struct platform_driver altera_ilc_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(altera_ilc_match), + }, + .remove = altera_ilc_remove, +}; + +static int __init altera_ilc_init(void) +{ + return platform_driver_probe(&altera_ilc_platform_driver, + altera_ilc_probe); +} + +static void __exit altera_ilc_exit(void) +{ + platform_driver_unregister(&altera_ilc_platform_driver); +} + +module_init(altera_ilc_init); +module_exit(altera_ilc_exit); + +MODULE_AUTHOR("Chee Nouk Phoon "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Altera Interrupt Latency Counter Driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/misc/altera_sysid.c b/drivers/misc/altera_sysid.c new file mode 100644 index 0000000000000..6f91bb0cea445 --- /dev/null +++ b/drivers/misc/altera_sysid.c @@ -0,0 +1,140 @@ +/* + * Copyright Altera Corporation (C) 2013. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Credit: + * Walter Goossens + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "altera_sysid" + +struct altera_sysid { + void __iomem *regs; +}; + +/* System ID Registers*/ +#define SYSID_REG_ID (0x0) +#define SYSID_REG_TIMESTAMP (0x4) + +static ssize_t altera_sysid_show_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct altera_sysid *sysid = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", readl(sysid->regs + SYSID_REG_ID)); +} + +static ssize_t altera_sysid_show_timestamp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int reg; + struct tm timestamp; + struct altera_sysid *sysid = dev_get_drvdata(dev); + + reg = readl(sysid->regs + SYSID_REG_TIMESTAMP); + + time64_to_tm(reg, 0, ×tamp); + + return sprintf(buf, "%u (%u-%u-%u %u:%u:%u UTC)\n", reg, + (unsigned int)(timestamp.tm_year + 1900), + timestamp.tm_mon + 1, timestamp.tm_mday, timestamp.tm_hour, + timestamp.tm_min, timestamp.tm_sec); +} + +static DEVICE_ATTR(id, S_IRUGO, altera_sysid_show_id, NULL); +static DEVICE_ATTR(timestamp, S_IRUGO, altera_sysid_show_timestamp, NULL); + +static struct attribute *altera_sysid_attrs[] = { + &dev_attr_id.attr, + &dev_attr_timestamp.attr, + NULL, +}; + +struct attribute_group altera_sysid_attr_group = { + .name = "sysid", + .attrs = altera_sysid_attrs, +}; + +static int altera_sysid_probe(struct platform_device *pdev) +{ + struct altera_sysid *sysid; + struct resource *regs; + + sysid = devm_kzalloc(&pdev->dev, sizeof(struct altera_sysid), + GFP_KERNEL); + if (!sysid) + return -ENOMEM; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + + sysid->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(sysid->regs)) + return PTR_ERR(sysid->regs); + + platform_set_drvdata(pdev, sysid); + + return sysfs_create_group(&pdev->dev.kobj, &altera_sysid_attr_group); +} + +static void altera_sysid_remove(struct platform_device *pdev) +{ + sysfs_remove_group(&pdev->dev.kobj, &altera_sysid_attr_group); + + platform_set_drvdata(pdev, NULL); +} + +static const struct of_device_id altera_sysid_match[] = { + { .compatible = "altr,sysid-1.0" }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, altera_sysid_match); + +static struct platform_driver altera_sysid_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(altera_sysid_match), + }, + .probe = altera_sysid_probe, + .remove = altera_sysid_remove, +}; + +static int __init altera_sysid_init(void) +{ + return platform_driver_register(&altera_sysid_platform_driver); +} + +static void __exit altera_sysid_exit(void) +{ + platform_driver_unregister(&altera_sysid_platform_driver); +} + +module_init(altera_sysid_init); +module_exit(altera_sysid_exit); + +MODULE_AUTHOR("Ley Foon Tan "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Altera System ID driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/misc/socfpga-config.c b/drivers/misc/socfpga-config.c new file mode 100644 index 0000000000000..1b2ee3715b818 --- /dev/null +++ b/drivers/misc/socfpga-config.c @@ -0,0 +1,1820 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025, Altera + */ + +#include +#include +#include +#include +#include + +// Define the store function for the session_id attribute +static ssize_t open_session_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + ret = copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context)); + if (ret) { + pr_err("Failed to copy context from user space ret: %d\n", ret); + ret = -EFAULT; + goto out; + } + + ret = hal_session_open(k_ctx); + if (ret) + pr_err("Failed to open session\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +// Define the store function for the session_id attribute +static ssize_t close_session_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_session_close(k_ctx); + if (ret) + pr_err("Failed to close session\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t import_key_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_import_key(k_ctx); + if (ret) + pr_err("Failed to import key\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t export_key_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_export_key(k_ctx); + if (ret) + pr_err("Failed to export key\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t remove_key_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_remove_key(k_ctx); + if (ret) + pr_err("Failed to remove key\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t key_info_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_get_key_info(k_ctx); + if (ret) + pr_err("Failed to get key info\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t create_key_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_create_key(k_ctx); + if (ret) + pr_err("Failed to create key\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t hkdf_req_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_hkdf_request(k_ctx); + if (ret) + pr_err("Failed to hkdf request\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t prov_data_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_get_provision_data(k_ctx); + if (ret) + pr_err("Failed to get provision data\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ctr_set_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_counter_set(k_ctx); + if (ret) + pr_err("Failed to set counter\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ctr_set_preauth_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_counter_set_preauth(k_ctx); + if (ret) + pr_err("Failed to set counter preauth\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t context_info_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + } + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t mac_verify_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_mac_verify(k_ctx); + if (ret) + pr_err("Failed to mac verify\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t aes_crypt_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_aes_crypt(k_ctx); + if (ret) + pr_err("Failed to crypt data\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ecdh_req_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdh_req(k_ctx); + if (ret) + pr_err("Failed to perform ecdh request\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t chip_id_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_get_chip_id(k_ctx); + if (ret) + pr_err("Failed to get chip id\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t atstn_cert_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_attestation_get_certificate(k_ctx); + if (ret) + pr_err("Failed to get attestation cert\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t atstn_cert_reload_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_attestation_certificate_reload(k_ctx); + if (ret) + pr_err("Failed to get attestation cert\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t mctp_req_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_mctp_request(k_ctx); + if (ret) + pr_err("Failed to send mctp request\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t qspi_open_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_qspi_open(k_ctx); + if (ret) + pr_err("Failed to open qspi\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t qspi_close_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_qspi_close(k_ctx); + if (ret) + pr_err("Failed to close qspi\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t qspi_cs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_qspi_cs(k_ctx); + if (ret) + pr_err("Failed to set qspi cs\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t qspi_read_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_qspi_read(k_ctx); + if (ret) + pr_err("Failed to read qspi\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t qspi_write_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_qspi_write(k_ctx); + if (ret) + pr_err("Failed to write qspi\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t qspi_erase_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_qspi_erase(k_ctx); + if (ret) + pr_err("Failed to erase qspi\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t jtag_idcode_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_jtag_idcode(k_ctx); + if (ret) + pr_err("Failed to get jtag idcode\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t device_identity_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_get_device_identity(k_ctx); + if (ret) + pr_err("Failed to get device identity\n"); + +out: + + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ecdsa_get_pubkey_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_get_pubkey(k_ctx); + if (ret) + pr_err("Failed to get ecdsa public key\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ecsda_hash_sign_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_hash_sign(k_ctx); + if (ret) + pr_err("Failed to sign hash\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ecdsa_hash_verify_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_hash_verify(k_ctx); + if (ret) + pr_err("Failed to verify hash\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ecdsa_sha2_data_sign_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_sha2_data_sign(k_ctx); + if (ret) + pr_err("Failed to sign data\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t ecdsa_sha2data_verify_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_sha2_data_verify(k_ctx); + if (ret) + pr_err("Failed to verify data\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t hps_image_validate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_hps_img_validate(k_ctx); + if (ret) + pr_err("Failed to validate HPS image\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t atf_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int version[3]; + + hal_get_atf_version(version); + return sprintf(buf, "%u.%u.%u\n", version[0], version[1], version[2]); +} + +static ssize_t sdos_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_sdos_crypt(k_ctx); + if (ret) + pr_err("Failed to perform SDOS operation\n"); + +out: + + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG +static ssize_t generic_mbox_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + int ret = buf_size; + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *const k_ctx = hal_get_fcs_cmd_ctx(); + + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_generic_mbox(k_ctx); + if (ret) + pr_err("Failed to send generic mbox\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} +#endif + +static ssize_t aes_crypt_init_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_aes_streaming_init(k_ctx); + if (ret) + pr_err("AES crypt streaming init failed\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t aes_crypt_update_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_aes_streaming_update(k_ctx); + if (ret) + pr_err("AES crypt update failed\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t aes_crypt_final_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_aes_streaming_final(k_ctx); + if (ret) + pr_err("AES crypt final failed\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t ecdsa_data_sign_init_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_data_sign_streaming_init(k_ctx); + if (ret) + pr_err("Failed to perform ecdsa data sign init\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t ecdsa_data_sign_up_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_data_sign_streaming_update(k_ctx); + if (ret) + pr_err("Failed to perform ecdsa data sign update\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t ecdsa_data_sign_final_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_data_sign_streaming_final(k_ctx); + if (ret) + pr_err("ECDSA SHA2 Data sign final stage failed\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t ecdsa_data_verify_init_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_data_verify_streaming_init(k_ctx); + if (ret) + pr_err("Failed to perform ecdsa data sign init\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t ecdsa_data_verify_up_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_data_verify_streaming_update(k_ctx); + if (ret) + pr_err("Failed to perform ecdsa data verify update\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t ecdsa_data_verify_final_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_ecdsa_data_verify_streaming_final(k_ctx); + if (ret) + pr_err("ECDSA SHA2 Data verify final stage failed\n"); + +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t get_digest_init_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_digest_streaming_init(k_ctx); + if (ret) + pr_err("Failed to perform get digest init\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t get_digest_update_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_digest_streaming_update(k_ctx); + if (ret) + pr_err("Failed to perform get digest update\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t get_digest_final_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_digest_streaming_final(k_ctx); + if (ret) + pr_err("Failed to perform get digest final\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t get_digest_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_get_digest(k_ctx); + if (ret) + pr_err("Failed to perform get digest update\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static ssize_t mac_verify_init_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_mac_verify_streaming_init(k_ctx); + if (ret) + pr_err("Failed to mac verify init\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t mac_verify_update_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_mac_verify_streaming_update(k_ctx); + if (ret) + pr_err("Failed to mac verify update\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t mac_verify_final_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed get context. Context is in use\n"); + ret = -EFAULT; + goto out; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_mac_verify_streaming_final(k_ctx); + if (ret) + pr_err("Failed to mac verify final\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + + return ret; +} + +static ssize_t get_rng_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t buf_size) +{ + struct fcs_cmd_context *const u_ctx = *(struct fcs_cmd_context **)buf; + struct fcs_cmd_context *k_ctx; + int ret = buf_size; + + k_ctx = hal_get_fcs_cmd_ctx(); + if (!k_ctx) { + pr_err("Failed to get context, context is in use\n"); + ret = -EFAULT; + return ret; + } + + if (copy_from_user(k_ctx, u_ctx, sizeof(struct fcs_cmd_context))) { + pr_err("Failed to copy context from user space\n"); + ret = -EFAULT; + goto out; + } + + ret = hal_random_number(k_ctx); + if (ret) + pr_err("Failed to get random number\n"); +out: + hal_release_fcs_cmd_ctx(k_ctx); + return ret; +} + +static DEVICE_ATTR_WO(open_session); +static DEVICE_ATTR_WO(close_session); +static DEVICE_ATTR_WO(context_info); +static DEVICE_ATTR_WO(import_key); +static DEVICE_ATTR_WO(export_key); +static DEVICE_ATTR_WO(remove_key); +static DEVICE_ATTR_WO(key_info); +static DEVICE_ATTR_WO(create_key); +static DEVICE_ATTR_WO(hkdf_req); +static DEVICE_ATTR_WO(prov_data); +static DEVICE_ATTR_WO(ctr_set); +static DEVICE_ATTR_WO(ctr_set_preauth); +static DEVICE_ATTR_WO(mac_verify); +static DEVICE_ATTR_WO(aes_crypt); +static DEVICE_ATTR_WO(ecdh_req); +static DEVICE_ATTR_WO(chip_id); +static DEVICE_ATTR_WO(atstn_cert); +static DEVICE_ATTR_WO(atstn_cert_reload); +static DEVICE_ATTR_WO(mctp_req); +static DEVICE_ATTR_WO(jtag_idcode); +static DEVICE_ATTR_WO(device_identity); +static DEVICE_ATTR_WO(qspi_open); +static DEVICE_ATTR_WO(qspi_close); +static DEVICE_ATTR_WO(qspi_cs); +static DEVICE_ATTR_WO(qspi_read); +static DEVICE_ATTR_WO(qspi_write); +static DEVICE_ATTR_WO(qspi_erase); +static DEVICE_ATTR_WO(ecdsa_get_pubkey); +static DEVICE_ATTR_WO(ecsda_hash_sign); +static DEVICE_ATTR_WO(ecdsa_hash_verify); +static DEVICE_ATTR_WO(ecdsa_sha2_data_sign); +static DEVICE_ATTR_WO(ecdsa_sha2data_verify); +static DEVICE_ATTR_WO(hps_image_validate); +static DEVICE_ATTR_RO(atf_version); +static DEVICE_ATTR_WO(sdos); +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG +static DEVICE_ATTR_WO(generic_mbox); +#endif +static DEVICE_ATTR_WO(aes_crypt_init); +static DEVICE_ATTR_WO(aes_crypt_update); +static DEVICE_ATTR_WO(aes_crypt_final); +static DEVICE_ATTR_WO(ecdsa_data_sign_init); +static DEVICE_ATTR_WO(ecdsa_data_sign_up); +static DEVICE_ATTR_WO(ecdsa_data_sign_final); +static DEVICE_ATTR_WO(ecdsa_data_verify_init); +static DEVICE_ATTR_WO(ecdsa_data_verify_up); +static DEVICE_ATTR_WO(ecdsa_data_verify_final); +static DEVICE_ATTR_WO(get_digest_init); +static DEVICE_ATTR_WO(get_digest_update); +static DEVICE_ATTR_WO(get_digest_final); +static DEVICE_ATTR_WO(get_digest); +static DEVICE_ATTR_WO(mac_verify_init); +static DEVICE_ATTR_WO(mac_verify_update); +static DEVICE_ATTR_WO(mac_verify_final); +static DEVICE_ATTR_WO(get_rng); + +static struct attribute *fcs_config_attrs[] = { + &dev_attr_open_session.attr, + &dev_attr_close_session.attr, + &dev_attr_context_info.attr, + &dev_attr_import_key.attr, + &dev_attr_export_key.attr, + &dev_attr_remove_key.attr, + &dev_attr_key_info.attr, + &dev_attr_create_key.attr, + &dev_attr_hkdf_req.attr, + &dev_attr_prov_data.attr, + &dev_attr_ctr_set.attr, + &dev_attr_ctr_set_preauth.attr, + &dev_attr_mac_verify.attr, + &dev_attr_aes_crypt.attr, + &dev_attr_ecdh_req.attr, + &dev_attr_chip_id.attr, + &dev_attr_atstn_cert.attr, + &dev_attr_atstn_cert_reload.attr, + &dev_attr_mctp_req.attr, + &dev_attr_jtag_idcode.attr, + &dev_attr_device_identity.attr, + &dev_attr_qspi_open.attr, + &dev_attr_qspi_close.attr, + &dev_attr_qspi_cs.attr, + &dev_attr_qspi_read.attr, + &dev_attr_qspi_write.attr, + &dev_attr_qspi_erase.attr, + &dev_attr_ecdsa_get_pubkey.attr, + &dev_attr_ecsda_hash_sign.attr, + &dev_attr_ecdsa_hash_verify.attr, + &dev_attr_ecdsa_sha2_data_sign.attr, + &dev_attr_ecdsa_sha2data_verify.attr, + &dev_attr_hps_image_validate.attr, + &dev_attr_atf_version.attr, + &dev_attr_sdos.attr, +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG + &dev_attr_generic_mbox.attr, +#endif + &dev_attr_aes_crypt_init.attr, + &dev_attr_aes_crypt_update.attr, + &dev_attr_aes_crypt_final.attr, + &dev_attr_ecdsa_data_sign_init.attr, + &dev_attr_ecdsa_data_sign_up.attr, + &dev_attr_ecdsa_data_sign_final.attr, + &dev_attr_ecdsa_data_verify_init.attr, + &dev_attr_ecdsa_data_verify_up.attr, + &dev_attr_ecdsa_data_verify_final.attr, + &dev_attr_get_digest_init.attr, + &dev_attr_get_digest_update.attr, + &dev_attr_get_digest_final.attr, + &dev_attr_get_digest.attr, + &dev_attr_mac_verify_init.attr, + &dev_attr_mac_verify_update.attr, + &dev_attr_mac_verify_final.attr, + &dev_attr_get_rng.attr, + NULL +}; + +static struct attribute_group fcs_group = { + .attrs = fcs_config_attrs, +}; + +static const struct attribute_group *fcs_groups[] = { + &fcs_group, + NULL, +}; + +struct kobject *sysfs_kobj; + +static int fcs_driver_probe(struct platform_device *pdev) +{ + int ret; + struct device *dev = &pdev->dev; + + sysfs_kobj = kobject_create_and_add("fcs_sysfs", kernel_kobj); + if (!sysfs_kobj) { + pr_err("Failed to create and add kobject\n"); + return -ENOMEM; + } + + ret = sysfs_create_groups(sysfs_kobj, fcs_groups); + if (ret) { + dev_err(dev, "Failed to create sysfs groups\n"); + kobject_put(sysfs_kobj); + return ret; + } + + ret = hal_fcs_init(dev); + if (ret) { + dev_err(dev, "Failed to initialize FCS HAL\n"); + sysfs_remove_groups(sysfs_kobj, fcs_groups); + kobject_put(sysfs_kobj); + return ret; + } + + pr_info("FCS config probing successfully completed"); + + return ret; +} + +static const struct of_device_id fcs_of_match[] = { + { .compatible = "intel,agilex5-soc-fcs-config" }, + { .compatible = "intel,agilex-soc-fcs-config" }, + {}, +}; + +static struct platform_driver fcs_driver = { + .probe = fcs_driver_probe, + .driver = { + .name = "socfpga-config", + .of_match_table = of_match_ptr(fcs_of_match), + }, +}; + +MODULE_DEVICE_TABLE(of, fcs_of_match); + +static int __init fcs_config_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + int ret; + + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + of_node_get(fw_np); + np = of_find_matching_node(fw_np, fcs_of_match); + if (!np) { + of_node_put(fw_np); + return -ENODEV; + } + + of_node_put(np); + ret = of_platform_populate(fw_np, fcs_of_match, NULL, NULL); + of_node_put(fw_np); + if (ret) + return ret; + + ret = platform_driver_register(&fcs_driver); + if (ret) + pr_err("Failed to register platform driver\n"); + + return ret; +} + +static void __exit fcs_config_exit(void) +{ + /* Remove sysfs groups */ + sysfs_remove_groups(sysfs_kobj, fcs_groups); + + /* Remove the kobject */ + if (sysfs_kobj) + kobject_put(sysfs_kobj); + + return platform_driver_unregister(&fcs_driver); +} + +module_init(fcs_config_init); +module_exit(fcs_config_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Altera socfpga config driver"); +MODULE_AUTHOR("Balsundar Ponnusamy, Santosh Male, Sagar Khadgi"); diff --git a/drivers/misc/socfpga_fcs_hal.c b/drivers/misc/socfpga_fcs_hal.c new file mode 100644 index 0000000000000..d001d087d6945 --- /dev/null +++ b/drivers/misc/socfpga_fcs_hal.c @@ -0,0 +1,4312 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT +/* + * Copyright (C) 2025 Altera Corporation + */ + +#include +#include "socfpga_fcs_plat.h" + +#define RANDOM_NUMBER_EXT_HDR_SIZE 12 +#define FCS_CRYPTO_KEY_HEADER_SIZE 12 +#define CERTIFICATE_RSP_MAX_SZ 4096 +#define MBOX_SEND_RSP_MAX_SZ 4096 +#define QSPI_READ_LEN_MAX 4096 +#define MCTP_MAX_LEN 4096 +#define DEVICE_IDENTITY_MAX_LEN 4096 +#define QSPI_GET_INFO_LEN 36 + +#define HKDF_REQ_SZ_MAX 4096 + +#define DIGEST_CMD_MAX_SZ SZ_4M +#define CRYPTO_DIGEST_MAX_SZ SZ_4M +#define MAC_CMD_MAX_SZ SZ_4M +#define FCS_ECC_PUBKEY_LEN SZ_4M +#define FCS_ECDSA_HASH_SIGN_MAX_LEN SZ_4M +#define FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ SZ_4M +#define FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ SZ_4M +#define FCS_ECDSA_DATA_SIGN_VERIFY_MAX_LEN SZ_4M + +#define FCS_ECDSA_SHA2_DATA_VERIFY_RSP_SZ SZ_4M + +#define WORDS_TO_BYTES_SIZE 4 /* 4 bytes in a word */ + +#define CRYPTO_SERVICE_MIN_DATA_SIZE 8 + +#define OWNER_ID_OFFSET 12 +#define OWNER_ID_SIZE 8 + +#define RESPONSE_HEADER_SIZE 12 + +/*SDM required minimum 8 bytes of data for crypto service*/ +#define DIGEST_SERVICE_MIN_DATA_SIZE 8 +#define FCS_AES_IV_SZ 16 +#define FCS_POLL_STATUS_LEN 4 +#define FCS_AES_REQUEST_TIMEOUT (10 * FCS_REQUEST_TIMEOUT) +#define FCS_CRYPTO_BLOCK_SZ (4 * 1024 * 1024) +#define FCS_AES_CRYPT_BLOCK_SZ FCS_CRYPTO_BLOCK_SZ +#define AES_PARAMS_CRYPT_OFFSET 1 +#define AES_PARAMS_TAG_LEN_OFFSET 2 +#define AES_PARAMS_IV_TYPE_OFFSET 4 +#define AES_PARAMS_AAD_LEN_OFFSET 8 +#define FCS_AES_PARAMS_ECB_SZ 12 +#define GCM_TAG_LEN 16 +#define GCM_AAD_ALIGN 16 +#define GCM_DATA_ALIGN 16 +#define NON_GCM_DATA_ALIGN 32 +#define FCS_STATUS_LEN 4 +#define FCS_ECDSA_CRYPTO_BLOCK_SZ FCS_CRYPTO_BLOCK_SZ + +/* AES GCM tag length */ +#define GCS_TAG_LEN_32 4 +#define GCS_TAG_LEN_64 8 +#define GCS_TAG_LEN_96 12 +#define GCS_TAG_LEN_128 16 +#define MAP_GCM_TAG_32_TO_SDM_TAG 0 +#define MAP_GCM_TAG_64_TO_SDM_TAG 1 +#define MAP_GCM_TAG_96_TO_SDM_TAG 2 +#define MAP_GCM_TAG_128_TO_SDM_TAG 3 + +/* HKDF input payload size with 1st and 2nd input */ +#define HKDF_INPUT_DATA_SIZE 80 + +#define FCS_MAX_RESP_MS 50 +#define SDOS_DECRYPTION_REPROVISION_KEY_WARN 0x102 +#define SDOS_DECRYPTION_NOT_LATEST_KEY_WARN 0x103 + +static struct socfpga_fcs_priv *priv; + +FCS_HAL_INT hal_session_close(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->close_session.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("Session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_CLOSE_SESSION, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_CLOSE_SESSION, ret); + return ret; + } + + fcs_plat_uuid_clear(priv); + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to close session ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mail box status code to user ret: %d\n", + ret); + } + + + return ret; +} +EXPORT_SYMBOL(hal_session_close); + +FCS_HAL_INT hal_session_open(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_OPEN_SESSION, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_OPEN_SESSION, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to open session ret: %d\n", ret); + goto copy_mbox_status; + } + + fcs_plat_uuid_generate(priv); + + memcpy(&priv->session_id, &priv->resp, sizeof(priv->session_id)); + + ret = fcs_plat_copy_to_user(k_ctx->open_session.suuid, &priv->uuid_id, + sizeof(FCS_HAL_UUID)); + if (ret) { + LOG_ERR("Failed to copy session ID to user suuid addr: %p ret: %d\n", + k_ctx->open_session.suuid, ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(k_ctx->error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mail box status code to user ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_session_open); + +FCS_HAL_VOID hal_get_atf_version(FCS_HAL_U32 *version) +{ + fcs_plat_memcpy(version, priv->atf_version, sizeof(priv->atf_version)); +} +EXPORT_SYMBOL(hal_get_atf_version); + +FCS_HAL_INT hal_import_key(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_UINT sbuf_size; + FCS_HAL_VOID *s_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->import_key.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("Session UUID Mismatch ret: %d\n", ret); + return ret; + } + + sbuf_size = k_ctx->import_key.key_len + FCS_CRYPTO_KEY_HEADER_SIZE; + + s_buf = priv->plat_data->svc_alloc_memory(priv, sbuf_size); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for source buffer ret: %d\n", + ret); + return ret; + } + + /* Copy the session ID into the source buffer */ + fcs_plat_memcpy(s_buf, &priv->session_id, sizeof(FCS_HAL_U32)); + + /* Copy the key data from user space to the source buffer */ + ret = fcs_plat_copy_from_user(s_buf + FCS_CRYPTO_KEY_HEADER_SIZE, + k_ctx->import_key.key, + k_ctx->import_key.key_len); + if (ret) { + LOG_ERR("Failed to copy data from user to kernel source buffer ret: %d\n", + ret); + goto free_mem; + } + + k_ctx->import_key.key = s_buf; + k_ctx->import_key.key_len = sbuf_size; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_CRYPTO_IMPORT_KEY, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_IMPORT_KEY, ret); + ret = -EFAULT; + goto free_mem; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to import key ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.import_key.status, &priv->resp, 1); + if (ret) { + LOG_ERR("Failed to copy import key response data to user ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mbox error code to user ret: %d\n", + ret); + ret = -EFAULT; + } +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_import_key); + +FCS_HAL_INT hal_export_key(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_UINT key_len = CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->export_key.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("Session UUID Mismatch ret: %d\n", ret); + return ret; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, + key_len + FCS_STATUS_LEN); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for key object ret: %d\n", + ret); + return ret; + } + + k_ctx->export_key.key = d_buf; + k_ctx->export_key.key_len = &key_len; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_CRYPTO_EXPORT_KEY, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_EXPORT_KEY, ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to export key ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.export_key.key, d_buf + FCS_STATUS_LEN, + priv->resp - FCS_STATUS_LEN); + if (ret) { + LOG_ERR("Failed to copy key to user ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.export_key.key_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy key length to user ret: %d\n", ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mail box status code to user ret: %d\n", + ret); + } +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return ret; +} +EXPORT_SYMBOL(hal_export_key); + +FCS_HAL_INT hal_remove_key(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->remove_key.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("Session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_CRYPTO_REMOVE_KEY, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_REMOVE_KEY, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to remove key ret: %d\n", ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mail box status code to user ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_remove_key); + +FCS_HAL_INT hal_get_key_info(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_UINT info_len = CRYPTO_KEY_INFO_MAX_SZ; + FCS_HAL_VOID *d_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->key_info.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + /* Allocate memory for the key info kernel buffer */ + d_buf = priv->plat_data->svc_alloc_memory(priv, CRYPTO_KEY_INFO_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for key info kernel buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->key_info.info = d_buf; + k_ctx->key_info.info_len = &info_len; + + /* Send the request to get key info */ + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_GET_KEY_INFO, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_GET_KEY_INFO, ret); + goto free_dest; + } + + /* Check if there was a mailbox error during key info retrieval */ + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to get key info ret: %d\n", ret); + goto copy_mbox_status; + } + + /* Copy the key info from kernel space to user space */ + ret = fcs_plat_copy_to_user(ctx.key_info.info, d_buf, priv->resp); + if (ret) { + LOG_ERR("Failed to copy key info to user ret: %d\n", ret); + goto copy_mbox_status; + } + + /* Copy the key info length from kernel space to user space */ + ret = fcs_plat_copy_to_user(ctx.key_info.info_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy key info length to user ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return ret; +} +EXPORT_SYMBOL(hal_get_key_info); + +FCS_HAL_INT hal_create_key(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->create_key.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + /* Calculate the total key length including the header */ + k_ctx->create_key.key_len = + FCS_CRYPTO_KEY_HEADER_SIZE + k_ctx->create_key.key_len; + + /* Allocate memory for the key object */ + s_buf = priv->plat_data->svc_alloc_memory(priv, + k_ctx->create_key.key_len); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for key object ret: %d\n", + ret); + return ret; + } + + /* Copy the session ID into the source buffer */ + fcs_plat_memcpy(s_buf, &priv->session_id, sizeof(FCS_HAL_U32)); + + /* Copy the key object data from user space to the source buffer */ + ret = fcs_plat_copy_from_user(s_buf + FCS_CRYPTO_KEY_HEADER_SIZE, + k_ctx->create_key.key, + k_ctx->create_key.key_len); + if (ret) { + LOG_ERR("Failed to copy data from user to kernel source buffer ret: %d\n", + ret); + goto free_mem; + } + + k_ctx->create_key.key = s_buf; + + /* Send the request to create key */ + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_CRYPTO_CREATE_KEY, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_CREATE_KEY, ret); + goto free_mem; + } + + /* Check if there was a mailbox error during key creation */ + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to create key ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.create_key.status, &priv->resp, 1); + if (ret) { + LOG_ERR("Failed to copy create key status to user ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_create_key); + +FCS_HAL_INT hal_get_provision_data(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_U32 data_len = CRYPTO_PROVISION_DATA_MAX_SZ; + FCS_HAL_VOID *d_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + d_buf = priv->plat_data->svc_alloc_memory(priv, data_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for provision data ret: %d\n", + ret); + return ret; + } + + k_ctx->prov_data.data = d_buf; + k_ctx->prov_data.data_len = &data_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_GET_PROVISION_DATA, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_GET_PROVISION_DATA, ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, get provision data request Failed ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.prov_data.data, d_buf, priv->resp); + if (ret) { + LOG_ERR("Failed to copy provision data to user ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.prov_data.data_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy provision data length to user ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return ret; +} +EXPORT_SYMBOL(hal_get_provision_data); + +FCS_HAL_INT hal_counter_set(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT tsz, datasz; + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Allocate memory for certificate + test word */ + tsz = sizeof(FCS_HAL_U32); + datasz = ctx.ctr_set.ccert_len + tsz; + + s_buf = priv->plat_data->svc_alloc_memory(priv, datasz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for counter set kernel buffer ret: %d\n", + ret); + return ret; + } + + ret = fcs_plat_copy_from_user(s_buf, &k_ctx->ctr_set.cache, tsz); + if (ret) { + LOG_ERR("Failed to copy cache to kernel buffer ret: %d\n", ret); + goto free_mem; + } + + ret = fcs_plat_copy_from_user(s_buf + tsz, k_ctx->ctr_set.ccert, + k_ctx->ctr_set.ccert_len); + if (ret) { + LOG_ERR("Failed to copy certificate to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + + k_ctx->ctr_set.ccert = s_buf; + k_ctx->ctr_set.ccert_len = datasz; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_COUNTER_SET, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_COUNTER_SET, ret); + goto free_mem; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to set counter ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.ctr_set.status, &priv->resp, 1); + if (ret) { + LOG_ERR("Failed to copy counter set status to user ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_counter_set); + +FCS_HAL_INT hal_counter_set_preauth(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_COUNTER_SET_PREAUTHORIZED, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_COUNTER_SET_PREAUTHORIZED, ret); + ret = -EFAULT; + goto unmap; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to set counter preauthorized ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +unmap: + return ret; +} +EXPORT_SYMBOL(hal_counter_set_preauth); + +FCS_HAL_INT hal_random_number(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->rng.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + k_ctx->rng.rng_len = ctx.rng.rng_len; + + s_buf = priv->plat_data->svc_alloc_memory( + priv, k_ctx->rng.rng_len + RANDOM_NUMBER_EXT_HDR_SIZE); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for rng kernel source buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->rng.rng = s_buf; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_RANDOM_NUMBER_GEN, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_RANDOM_NUMBER_GEN, ret); + goto free_mem; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, generate random number request failed ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.rng.rng, + k_ctx->rng.rng + RANDOM_NUMBER_EXT_HDR_SIZE, + ctx.rng.rng_len); + if (ret) + LOG_ERR("Failed to copy random number to user ret: %d\n", ret); + +copy_mbox_status: + ret = fcs_plat_copy_to_user(k_ctx->error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mail box status code to user ret: %d\n", + ret); + } +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_random_number); + +FCS_HAL_INT hal_hkdf_request(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *src_ptr = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->hkdf_req.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + /* Allocate memory for the shared secret kernel buffer */ + s_buf = priv->plat_data->svc_alloc_memory(priv, FCS_KDK_MAX_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for HKDF kernel buffer ret: %d\n", + ret); + return ret; + } + + src_ptr = s_buf; + + fcs_plat_memset(src_ptr, 0, HKDF_REQ_SZ_MAX); + + ret = fcs_plat_copy_from_user(src_ptr, &ctx.hkdf_req.ikm_len, + sizeof(ctx.hkdf_req.ikm_len)); + if (ret) { + LOG_ERR("Failed to copy HKDF salt length from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + src_ptr += sizeof(ctx.hkdf_req.ikm_len); + + ret = fcs_plat_copy_from_user(src_ptr, ctx.hkdf_req.ikm, + ctx.hkdf_req.ikm_len); + if (ret) { + LOG_ERR("Failed to copy HKDF info from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + src_ptr += HKDF_INPUT_DATA_SIZE; + + ret = fcs_plat_copy_from_user(src_ptr, &ctx.hkdf_req.info_len, + sizeof(ctx.hkdf_req.info_len)); + if (ret) { + LOG_ERR("Failed to copy HKDF salt from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + src_ptr += sizeof(ctx.hkdf_req.info_len); + + ret = fcs_plat_copy_from_user(src_ptr, ctx.hkdf_req.info, + ctx.hkdf_req.info_len); + if (ret) { + LOG_ERR("Failed to copy HKDF salt from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + src_ptr += HKDF_INPUT_DATA_SIZE; + + ret = fcs_plat_copy_from_user(src_ptr, + ctx.hkdf_req.output_key_obj, + ctx.hkdf_req.output_key_obj_len); + if (ret) { + LOG_ERR("Failed to copy HKDF output key obj from user ret: %d\n", + ret); + goto free_mem; + } + + k_ctx->hkdf_req.ikm = s_buf; + + ret = priv->plat_data->svc_send_request(priv, + FCS_DEV_CRYPTO_HKDF_REQUEST, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_HKDF_REQUEST, ret); + goto free_mem; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform HKDF ret: %d\n", ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.hkdf_req.hkdf_resp, &priv->resp, + sizeof(priv->resp)); + if (ret) + LOG_ERR("Failed to copy HKDF status to user ret: %d\n", ret); + +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_hkdf_request); + +static FCS_HAL_INT hal_digest_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_GET_DIGEST_INIT, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_GET_DIGEST_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to initialize digest ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + return ret; +} + +static FCS_HAL_INT hal_digest_update(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 ldigest_len = DIGEST_CMD_MAX_SZ; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + k_ctx->dgst.digest_len = &ldigest_len; + + + d_buf = priv->plat_data->svc_alloc_memory(priv, DIGEST_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for digest output kernel buffer. ret: %d\n", + ret); + return ret; + } + + k_ctx->dgst.digest = d_buf; + + /* Send the request to perform digest */ + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_GET_DIGEST_UPDATE, + 10 * FCS_REQUEST_TIMEOUT); + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform digest ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + priv->plat_data->svc_free_memory(priv, d_buf); + return ret; +} + +static FCS_HAL_INT hal_digest_final(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 ldigest_len = DIGEST_CMD_MAX_SZ; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + d_buf = priv->plat_data->svc_alloc_memory(priv, DIGEST_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for digest output kernel buffer ret: %d\n", + ret); + goto free_src; + } + + k_ctx->dgst.digest = d_buf; + k_ctx->dgst.digest_len = &ldigest_len; + + /* Send the request to finalize digest */ + ret = priv->plat_data->svc_send_request(priv, + FCS_DEV_CRYPTO_GET_DIGEST_FINAL, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_GET_DIGEST_FINAL, ret); + goto free_dst; + } + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to finalize digest ret: %d\n", + ret); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + /* Copy the digest output from kernel space to user space */ + ret = fcs_plat_copy_to_user(ctx.dgst.digest, + k_ctx->dgst.digest + RESPONSE_HEADER_SIZE, + priv->resp); + if (ret) { + LOG_ERR("Failed to copy digest output to user ret: %d\n", ret); + goto copy_mbox_status; + } + + /* Copy the digest output length from kernel space to user space */ + ret = fcs_plat_copy_to_user(ctx.dgst.digest_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy digest output length to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dst: + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + priv->plat_data->svc_free_memory(priv, k_ctx->dgst.src); + + return ret; +} + +FCS_HAL_INT hal_mac_verify(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *input_buffer; + FCS_HAL_U32 remaining_size; + FCS_HAL_U32 sign_size; + FCS_HAL_U32 data_size; + FCS_HAL_U32 ud_sz, out_sz = 32; + FCS_HAL_U32 update_stage; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->rng.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_MAC_VERIFY_INIT, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_MAC_VERIFY_INIT, ret); + return ret; + } + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to initialize digest ret: %d\n", + ret); + return ret; + } + + input_buffer = k_ctx->mac_verify.src; + remaining_size = k_ctx->mac_verify.src_size; + sign_size = + k_ctx->mac_verify.src_size - k_ctx->mac_verify.user_data_size; + + /* Allocate memory for the input data kernel buffer */ + s_buf = priv->plat_data->svc_alloc_memory(priv, MAC_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for mac input data kernel buffer ret: %d\n", + ret); + return ret; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, MAC_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for mac output kernel buffer ret: %d\n", + ret); + goto free_s_buf; + } + + while (remaining_size > 0) { + if (remaining_size > MAC_CMD_MAX_SZ) { + /* Finalize stage require minimum 8bytes data size */ + if ((remaining_size - MAC_CMD_MAX_SZ) >= + (DIGEST_SERVICE_MIN_DATA_SIZE + sign_size)) { + data_size = CRYPTO_DIGEST_MAX_SZ; + ud_sz = CRYPTO_DIGEST_MAX_SZ; + LOG_DBG("Update full. data_size=%d, ud_sz=%d\n", + data_size, ud_sz); + } else { + /* Partial stage */ + data_size = remaining_size - + DIGEST_SERVICE_MIN_DATA_SIZE - + sign_size; + ud_sz = remaining_size - + DIGEST_SERVICE_MIN_DATA_SIZE - + sign_size; + LOG_DBG("Update partial. data_size=%d, ud_sz=%d\n", + data_size, ud_sz); + } + update_stage = 1; + } else { + data_size = remaining_size; + ud_sz = remaining_size - sign_size; + LOG_ERR("Finalize. data_size=%d, ud_sz=%d\n", data_size, + ud_sz); + update_stage = 0; + } + + /* Copy the user space input data to the input data kernel buffer */ + ret = fcs_plat_copy_from_user(s_buf, input_buffer, data_size); + if (ret) { + LOG_ERR("Failed to copy input data from user to kernel buffer ret: %d\n", + ret); + goto free_dest; + } + + k_ctx->mac_verify.src = s_buf; + k_ctx->mac_verify.src_size = data_size; + k_ctx->mac_verify.dst = d_buf; + k_ctx->mac_verify.dst_size = &out_sz; + k_ctx->mac_verify.user_data_size = ud_sz; + + if (update_stage == 1) { + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_MAC_VERIFY_UPDATE, + 100 * FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_MAC_VERIFY_UPDATE, ret); + goto free_dest; + } + update_stage = 0; + } else { + /* Finalize stage */ + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_MAC_VERIFY_FINAL, + 100 * FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_MAC_VERIFY_FINAL, ret); + goto free_dest; + } + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to Update digest verify ret: %d\n", + ret); + goto copy_mbox_status; + } + + remaining_size -= data_size; + if (remaining_size == 0) { + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user( + ctx.mac_verify.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy MAC verify data to user ret: %d\n", + ret); + goto free_dest; + } + ret = fcs_plat_copy_to_user(ctx.mac_verify.dst_size, + &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy MAC verify data size to user ret: %d\n", + ret); + goto free_dest; + } + } else { + input_buffer += data_size; + } + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); +free_s_buf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_mac_verify); + +static FCS_HAL_INT hal_aes_crypt_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_CHAR *aes_parms = NULL; + FCS_HAL_UINT aes_parms_len = FCS_AES_PARAMS_ECB_SZ + FCS_AES_IV_SZ; + + aes_parms = priv->plat_data->svc_alloc_memory(priv, aes_parms_len); + if (IS_ERR(aes_parms)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for AES parameters ret: %d\n", + ret); + return ret; + } + + fcs_plat_memset(aes_parms, 0, aes_parms_len); + fcs_plat_memcpy(aes_parms, &k_ctx->aes.mode, 1); + fcs_plat_memcpy(aes_parms + AES_PARAMS_CRYPT_OFFSET, &k_ctx->aes.crypt, 1); + fcs_plat_memcpy(aes_parms + AES_PARAMS_TAG_LEN_OFFSET, &k_ctx->aes.tag_len, 2); + fcs_plat_memcpy(aes_parms + AES_PARAMS_IV_TYPE_OFFSET, &k_ctx->aes.iv_source, 1); + fcs_plat_memcpy(aes_parms + AES_PARAMS_AAD_LEN_OFFSET, &k_ctx->aes.aad_len, 4); + + LOG_DBG("AES init: mode: %d, ENC/DEC: %d, tag_len: %d iv_src: %d aad_len: %d\n", + k_ctx->aes.mode, k_ctx->aes.crypt, k_ctx->aes.tag_len, + k_ctx->aes.iv_source, k_ctx->aes.aad_len); + + k_ctx->aes.ip_len = FCS_AES_PARAMS_ECB_SZ; + if (k_ctx->aes.mode != FCS_AES_BLOCK_MODE_ECB) { + ret = fcs_plat_copy_from_user(aes_parms + FCS_AES_PARAMS_ECB_SZ, + k_ctx->aes.iv, FCS_AES_IV_SZ); + if (ret) { + LOG_ERR("Failed to copy iv from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + k_ctx->aes.ip_len += FCS_AES_IV_SZ; + } + k_ctx->aes.input = aes_parms; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_AES_CRYPT_INIT, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_AES_CRYPT_INIT, ret); + goto free_mem; + } + +free_mem: + priv->plat_data->svc_free_memory(priv, aes_parms); + + return ret; +} + +static FCS_HAL_INT hal_aes_crypt_update_final(FCS_HAL_CHAR *ip_ptr, FCS_HAL_UINT src_len, + FCS_HAL_CHAR *aad, FCS_HAL_UINT aad_size, + FCS_HAL_CHAR *tag, FCS_HAL_UINT src_tag_len, + FCS_HAL_UINT dst_tag_len, FCS_HAL_CHAR *op_ptr, + FCS_HAL_UINT mode, + struct fcs_cmd_context *const k_ctx, + FCS_HAL_INT command) +{ + FCS_HAL_INT ret = 0, pad1 = 0, pad2 = 0, s_buf_size = 0, d_buf_size = 0; + FCS_HAL_CHAR *s_buf = NULL, *s_buf_wr_ptr = NULL, *d_buf = NULL; + + if (mode == FCS_AES_BLOCK_MODE_GCM || + mode == FCS_AES_BLOCK_MODE_GHASH) { + pad1 = (aad_size % GCM_AAD_ALIGN) ? + (GCM_AAD_ALIGN - (aad_size % GCM_AAD_ALIGN)) : 0; + pad2 = (src_len % GCM_DATA_ALIGN) ? + (GCM_DATA_ALIGN - (src_len % GCM_DATA_ALIGN)) : 0; + + s_buf_size = aad_size + pad1 + src_len + pad2 + src_tag_len; + d_buf_size = src_len + pad2 + dst_tag_len; + + if (s_buf_size > FCS_AES_CRYPT_BLOCK_SZ || + d_buf_size > FCS_AES_CRYPT_BLOCK_SZ) { + LOG_ERR("Invalid size request. Maximum buffer size supported is %d bytes\n", + FCS_AES_CRYPT_BLOCK_SZ); + return -EINVAL; + } + + LOG_DBG("AES GCM: aadlen:%d, pad1:%d, srcln:%d, pad2:%d, srctag:%d, dsttag:%d\n", + aad_size, pad1, src_len, pad2, src_tag_len, + dst_tag_len); + } else { + pad2 = (src_len % NON_GCM_DATA_ALIGN) ? + (NON_GCM_DATA_ALIGN - (aad_size % NON_GCM_DATA_ALIGN)) : + 0; + s_buf_size = src_len + pad2; + d_buf_size = src_len + pad2; + + if (s_buf_size > FCS_AES_CRYPT_BLOCK_SZ) { + LOG_ERR("Invalid size request. Maximum buffer size supported is %d bytes\n", + FCS_AES_CRYPT_BLOCK_SZ); + return -EINVAL; + } + aad_size = 0; + src_tag_len = 0; + dst_tag_len = 0; + } + + s_buf = priv->plat_data->svc_alloc_memory(priv, FCS_AES_CRYPT_BLOCK_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for AES source buffer ret: %d\n", ret); + return ret; + } + s_buf_wr_ptr = s_buf; + + LOG_DBG("AES Update/final s_buf = %p, s_buf_size = %d\n", s_buf, s_buf_size); + + if (aad_size) { + if (aad) { + LOG_DBG("AES Update/final copy AAD at %p aad_size = %d\n", + s_buf, aad_size); + + ret = fcs_plat_copy_from_user(s_buf, aad, aad_size); + if (ret) { + LOG_ERR("Failed to copy AAD data to svc buffer ret: %d\n", + ret); + goto free_src; + } + + fcs_plat_memset(s_buf + aad_size, 0, pad1); + } else { + LOG_ERR("Invalid AAD data buffer address %d\n", ret); + ret = -EINVAL; + goto free_src; + } + + aad_size += pad1; + s_buf_wr_ptr = s_buf_wr_ptr + aad_size; + } + + LOG_DBG("AES Update/final copy Data at %p data_size = %d\n", + s_buf_wr_ptr, src_len); + + ret = fcs_plat_copy_from_user(s_buf_wr_ptr, ip_ptr, src_len); + if (ret) { + LOG_ERR("Failed to copy AES data to svc buffer ret: %d\n", ret); + goto free_src; + } + + s_buf_wr_ptr = s_buf_wr_ptr + src_len; + fcs_plat_memset(s_buf_wr_ptr, 0, pad2); + s_buf_wr_ptr = s_buf_wr_ptr + pad2; + + if (src_tag_len) { + if (tag) { + LOG_DBG("AES Update/final Tag value at %p tag_size = %d\n", + s_buf_wr_ptr, src_tag_len); + ret = fcs_plat_copy_from_user(s_buf_wr_ptr, tag, + src_tag_len); + if (ret) { + LOG_ERR("Failed to copy Tag data to svc buffer ret: %d\n", + ret); + goto free_src; + } + } else { + LOG_ERR("Invalid TAG data buffer address %d\n", ret); + ret = -EINVAL; + goto free_src; + } + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, FCS_AES_CRYPT_BLOCK_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for AES destination buffer ret: %d\n", + ret); + goto free_src; + } + + k_ctx->aes.ip_len = s_buf_size; + if (mode == FCS_AES_BLOCK_MODE_GHASH) + *k_ctx->aes.op_len = 0; + else + *k_ctx->aes.op_len = d_buf_size; + + k_ctx->aes.input = s_buf; + k_ctx->aes.output = d_buf; + k_ctx->aes.input_pad = pad2; + + /* Send the AES crypt request */ + ret = priv->plat_data->svc_send_request(priv, command, + FCS_AES_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", command, ret); + goto free_dst; + } + + /* Copy the mailbox status code to the user */ + ret = fcs_plat_copy_to_user(k_ctx->error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + goto free_dst; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform AES crypt Mbox status: 0x%x\n", + priv->status); + goto free_dst; + } + + + if (mode != FCS_AES_BLOCK_MODE_GHASH) { + LOG_DBG("AES copy Data to destination buffer %p data_size = %d\n", + op_ptr, src_len + pad2); + /* Copy the destination buffer to the user space */ + ret = fcs_plat_copy_to_user(op_ptr, d_buf, src_len + pad2); + if (ret) + LOG_ERR("Failed to copy AES data from kernel to user buffer ret: %d\n", + ret); + } + + if (dst_tag_len) { + if (tag) { + LOG_DBG("AES copy tag value to Tag buffer %p data_size = %d\n", + tag, dst_tag_len); + ret = fcs_plat_copy_to_user(tag, d_buf + src_len + pad2, + dst_tag_len); + if (ret) { + LOG_ERR("Failed to copy TAG value to tag buffer ret: %d\n", + ret); + goto free_dst; + } + } else { + LOG_ERR("Invalid TAG data buffer address %d\n", ret); + goto free_dst; + } + } + + LOG_DBG("AES Update/final Success\n"); + +free_dst: + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + priv->plat_data->svc_free_memory(priv, s_buf); + return ret; +} + +FCS_HAL_INT hal_aes_crypt(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_CHAR *ip_ptr = NULL, *op_ptr = NULL; + FCS_HAL_UINT ip_len = 0, op_len = 0, src_len = 0; + FCS_HAL_UINT total_op_len = 0; + FCS_HAL_UINT pad1 = 0, aad_size = 0, src_tag_len = 0, dst_tag_len = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &ctx.aes.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + /* Initialize the AES crypt */ + ret = hal_aes_crypt_init(k_ctx); + if (ret) { + LOG_ERR("Failed to perform AES crypt init ret: %d\n", ret); + return ret; + } + + /* Calculate AAD data padding length. AAD data shall be 16 bytes aligned. + * Applicable for only GCM for other modes aad_len will be 0 hence pad1 will be 0 + */ + aad_size = k_ctx->aes.aad_len; + pad1 = (aad_size % GCM_AAD_ALIGN) ? + GCM_AAD_ALIGN - (k_ctx->aes.aad_len % GCM_AAD_ALIGN) : 0; + + ip_len = (ctx.aes.ip_len + aad_size + pad1); + k_ctx->aes.op_len = &op_len; + ip_ptr = ctx.aes.input; + op_ptr = ctx.aes.output; + k_ctx->aes.input_pad = 0; + + while (ip_len > FCS_AES_CRYPT_BLOCK_SZ) { + src_len = FCS_AES_CRYPT_BLOCK_SZ - (aad_size + pad1); + + ret = hal_aes_crypt_update_final(ip_ptr, src_len, + k_ctx->aes.aad, aad_size, + NULL, src_tag_len, dst_tag_len, + op_ptr, k_ctx->aes.mode, k_ctx, + FCS_DEV_CRYPTO_AES_CRYPT_UPDATE); + if (ret) { + LOG_ERR("Failed to perform AES crypt update ret: %d\n", + ret); + return ret; + } + + ip_ptr += src_len; + op_ptr += src_len; + ip_len -= (src_len + aad_size + pad1); + total_op_len += src_len; + aad_size = 0; + pad1 = 0; + } + + if (k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GCM || + k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GHASH) { + if (ip_len > (FCS_AES_CRYPT_BLOCK_SZ - GCM_TAG_LEN)) { + src_len = FCS_AES_CRYPT_BLOCK_SZ - GCM_TAG_LEN - + (aad_size + pad1); + + ret = hal_aes_crypt_update_final(ip_ptr, src_len, + k_ctx->aes.aad, aad_size, + NULL, src_tag_len, dst_tag_len, + op_ptr, k_ctx->aes.mode, k_ctx, + FCS_DEV_CRYPTO_AES_CRYPT_UPDATE); + if (ret) { + LOG_ERR("Failed to perform AES crypt update ret: %d\n", + ret); + return ret; + } + + ip_ptr += src_len; + op_ptr += src_len; + ip_len -= (src_len + aad_size + pad1); + total_op_len += src_len; + aad_size = 0; + pad1 = 0; + } + + if (k_ctx->aes.crypt == FCS_AES_ENCRYPT) { + src_tag_len = 0; + dst_tag_len = GCM_TAG_LEN; + } else { + src_tag_len = GCM_TAG_LEN; + dst_tag_len = 0; + } + } + + if (ip_len) { + src_len = ip_len - (aad_size + pad1); + + ret = hal_aes_crypt_update_final(ip_ptr, src_len, + k_ctx->aes.aad, aad_size, + k_ctx->aes.tag, src_tag_len, dst_tag_len, + op_ptr, k_ctx->aes.mode, k_ctx, + FCS_DEV_CRYPTO_AES_CRYPT_FINAL); + if (ret) { + LOG_ERR("Failed to perform AES crypt update ret: %d\n", + ret); + return ret; + } + + if (k_ctx->aes.mode != FCS_AES_BLOCK_MODE_GHASH) + total_op_len += src_len; + else + total_op_len = 0; + } + + /* Copy the destination buffer to the user space */ + ret = fcs_plat_copy_to_user(ctx.aes.op_len, &total_op_len, + sizeof(ctx.aes.op_len)); + if (ret) { + LOG_ERR("Failed to copy AES data from kernel to user buffer ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_aes_crypt); + +FCS_HAL_INT hal_ecdh_req(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_UINT d_buf_len = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + if ((ctx.ecdh_req.ecc_curve == FCS_ECC_CURVE_NIST_P256 && + ctx.ecdh_req.pubkey_len != FCS_ECDH_P256_PUBKEY_LEN) || + (ctx.ecdh_req.ecc_curve == FCS_ECC_CURVE_NIST_P384 && + ctx.ecdh_req.pubkey_len != FCS_ECDH_P384_PUBKEY_LEN) || + (ctx.ecdh_req.ecc_curve == FCS_ECC_CURVE_BRAINPOOL_P256 && + ctx.ecdh_req.pubkey_len != FCS_ECDH_BP256_PUBKEY_LEN) || + (ctx.ecdh_req.ecc_curve == FCS_ECC_CURVE_BRAINPOOL_P384 && + ctx.ecdh_req.pubkey_len != FCS_ECDH_BP384_PUBKEY_LEN)) { + ret = -EINVAL; + LOG_ERR("Invalid shared secret length ret: %d\n", ret); + return ret; + } + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &ctx.ecdh_req.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + /* Allocate memory for the source buffer */ + s_buf = priv->plat_data->svc_alloc_memory(priv, + ctx.ecdh_req.pubkey_len); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDH source buffer ret: %d\n", + ret); + return ret; + } + + /* Copy the user space source data to the source buffer */ + ret = fcs_plat_copy_from_user(s_buf, ctx.ecdh_req.pubkey, + ctx.ecdh_req.pubkey_len); + if (ret) { + LOG_ERR("Failed to copy ECDH data from user to kernel buffer ret: %d\n", + ret); + goto free_src; + } + + /* 1 byte for format indicator + pk len bytes for X coordinate + pk len + * bytes for Y coordinate + */ + d_buf_len = ctx.ecdh_req.pubkey_len >> 1; + /* Allocate memory for the destination buffer */ + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_len + 12); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDH destination buffer ret: %d\n", + ret); + goto free_src; + } + + k_ctx->ecdh_req.pubkey = s_buf; + k_ctx->ecdh_req.sh_secret = d_buf; + k_ctx->ecdh_req.sh_secret_len = &d_buf_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDH_REQUEST_INIT, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDH_REQUEST_INIT, ret); + goto free_dst; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform ECDH ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDH_REQUEST_FINALIZE, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDH_REQUEST_FINALIZE, ret); + goto copy_mbox_status; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform ECDH ret: %d\n", ret); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdh_req.sh_secret, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) + LOG_ERR("Failed to copy ECDH data to user ret: %d\n", ret); + + ret = fcs_plat_copy_to_user(ctx.ecdh_req.sh_secret_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy ECDH data length to user ret: %d\n", + ret); + } +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dst: + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdh_req); + +FCS_HAL_INT hal_get_chip_id(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_CHIP_ID, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", FCS_DEV_CHIP_ID, + ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, get chip ID request failed ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.chip_id.chip_id_lo, &priv->chip_id_lo, + sizeof(priv->chip_id_lo)); + if (ret) { + LOG_ERR("Failed to copy chip ID to user ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.chip_id.chip_id_hi, &priv->chip_id_hi, + sizeof(priv->chip_id_hi)); + if (ret) + LOG_ERR("Failed to copy chip ID to user ret: %d\n", ret); + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_get_chip_id); + +FCS_HAL_INT hal_attestation_get_certificate(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_U32 cert_len = CERTIFICATE_RSP_MAX_SZ; + FCS_HAL_VOID *d_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + d_buf = priv->plat_data->svc_alloc_memory(priv, cert_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for certificate kernel buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->attestation_cert.cert = d_buf; + k_ctx->attestation_cert.cert_size = &cert_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_ATTESTATION_GET_CERTIFICATE, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_ATTESTATION_GET_CERTIFICATE, ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, get attestation certificate request failed ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.attestation_cert.cert, d_buf, + priv->resp); + if (ret) { + LOG_ERR("Failed to copy attestation certificate to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.attestation_cert.cert_size, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy attestation certificate length to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return ret; +} +EXPORT_SYMBOL(hal_attestation_get_certificate); + +FCS_HAL_INT +hal_attestation_certificate_reload(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, attestation certificate reload request failed ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_attestation_certificate_reload); + +FCS_HAL_INT hal_mctp_request(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_UINT mctp_len = MCTP_MAX_LEN; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + if (ctx.mctp.mctp_req_len > MCTP_MAX_LEN) { + LOG_ERR("MCTP data length %d is Invalid, must be less than %d\n", + ctx.mctp.mctp_req_len, MCTP_MAX_LEN); + return -EINVAL; + } + + s_buf = priv->plat_data->svc_alloc_memory(priv, ctx.mctp.mctp_req_len); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for source buffer ret: %d\n", + ret); + return ret; + } + + ret = fcs_plat_copy_from_user(s_buf, ctx.mctp.mctp_req, + ctx.mctp.mctp_req_len); + if (ret) { + LOG_ERR("Failed to copy data from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, mctp_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for destination buffer ret: %d\n", + ret); + goto free_mem; + } + + k_ctx->mctp.mctp_req = s_buf; + k_ctx->mctp.mctp_resp = d_buf; + k_ctx->mctp.mctp_resp_len = &mctp_len; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_MCTP_REQUEST, + 10 * FCS_REQUEST_TIMEOUT); + + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, MCTP request failed ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.mctp.mctp_resp, d_buf, priv->resp); + if (ret) { + LOG_ERR("Failed to copy MCTP response to user ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.mctp.mctp_resp_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy MCTP response size to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + priv->plat_data->svc_free_memory(priv, d_buf); + +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_mctp_request); + +FCS_HAL_INT hal_jtag_idcode(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_U32 ret; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_GET_IDCODE, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_GET_IDCODE, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to get JTAG IDCODE ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.jtag_id.jtag_idcode, &priv->resp, + sizeof(priv->resp)); + if (ret) + LOG_ERR("Failed to copy JTAG IDCODE to user ret: %d\n", ret); + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_jtag_idcode); + +FCS_HAL_INT hal_get_device_identity(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_U32 ret; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_U32 devid_len = DEVICE_IDENTITY_MAX_LEN; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + d_buf = priv->plat_data->svc_alloc_memory(priv, devid_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for Device Identity kernel buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->device_identity.identity = d_buf; + k_ctx->device_identity.identity_len = &devid_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_GET_DEVICE_IDENTITY, FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_GET_DEVICE_IDENTITY, ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to get Device Identity ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.device_identity.identity, d_buf, + priv->resp); + if (ret) { + LOG_ERR("Failed to copy Device Identity to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.device_identity.identity_len, + &priv->resp, sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy Device Identity length to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return 0; +} +EXPORT_SYMBOL(hal_get_device_identity); + +FCS_HAL_INT hal_qspi_open(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + if (k_ctx) + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_QSPI_OPEN, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", FCS_DEV_QSPI_OPEN, + ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to open QSPI ret: %d\n", ret); + } + + if (k_ctx) { + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + } + + return 0; +} +EXPORT_SYMBOL(hal_qspi_open); + +FCS_HAL_INT hal_qspi_close(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + if (k_ctx) + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_QSPI_CLOSE, + FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_QSPI_CLOSE, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to close QSPI ret: %d\n", ret); + } + + if (k_ctx) { + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + } + + return 0; +} +EXPORT_SYMBOL(hal_qspi_close); + +FCS_HAL_INT hal_qspi_cs(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + if (k_ctx) + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_QSPI_CS, + FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", FCS_DEV_QSPI_CS, + ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform QSPI CS ret: %d\n", + ret); + } + + if (k_ctx) { + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + } + + return 0; +} +EXPORT_SYMBOL(hal_qspi_cs); + +FCS_HAL_INT hal_qspi_read(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_U32 resp_len = QSPI_READ_LEN_MAX; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + d_buf = priv->plat_data->svc_alloc_memory(priv, resp_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for QSPI read kernel buffer ret: %d\n", + ret); + return ret; + } + + + k_ctx->qspi_read.qspi_data = d_buf; + k_ctx->qspi_read.qspi_data_len = &resp_len; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_QSPI_READ, + FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", FCS_DEV_QSPI_READ, + ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to read QSPI ret: %d\n", ret); + goto copy_mbox_status; + } + + /* requested size and response is not matching */ + if (ctx.qspi_read.qspi_len != priv->resp / 4) { + LOG_ERR("QSPI read req and resp size is not matching resp_size:0x%x\n", + priv->resp); + ret = -EFAULT; + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.qspi_read.qspi_data, d_buf, priv->resp); + if (ret) { + LOG_ERR("Failed to copy QSPI read data to user ret: %d\n", ret); + goto copy_mbox_status; + } + +copy_mbox_status: + if (fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status))) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return 0; +} +EXPORT_SYMBOL(hal_qspi_read); + +FCS_HAL_INT hal_qspi_write(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *s_buf = NULL; + FCS_HAL_U32 s_buf_sz = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* s_buf_sz--> (number of words * 4) + 4 bytes for qspi addr + 4 bytes qspi write len */ + s_buf_sz = ctx.qspi_write.qspi_len * WORDS_TO_BYTES_SIZE + 8; + + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for QSPI write kernel buffer ret: %d\n", + ret); + return ret; + } + + ret = fcs_plat_copy_from_user(s_buf, &ctx.qspi_write.qspi_addr, 4); + if (ret) { + LOG_ERR("Failed to copy QSPI write address from user to kernel buffer ret: %d\n", + ret); + goto free_src_mem; + } + + ret = fcs_plat_copy_from_user(s_buf + 4, &ctx.qspi_write.qspi_len, 4); + if (ret) { + LOG_ERR("Failed to copy QSPI write length from user to kernel buffer ret: %d\n", + ret); + goto free_src_mem; + } + + ret = fcs_plat_copy_from_user(s_buf + 8, ctx.qspi_write.qspi_data, + ctx.qspi_write.qspi_len * 4); + if (ret) { + LOG_ERR("Failed to copy data from user to kernel buffer ret: %d\n", + ret); + goto free_src_mem; + } + + k_ctx->qspi_write.qspi_data = s_buf; + k_ctx->qspi_write.qspi_data_len = &s_buf_sz; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_QSPI_WRITE, + FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_QSPI_WRITE, ret); + goto free_src_mem; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to write QSPI ret: %d\n", ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_src_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return 0; +} +EXPORT_SYMBOL(hal_qspi_write); + +FCS_HAL_INT hal_qspi_erase(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_QSPI_ERASE, + FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_QSPI_ERASE, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to erase QSPI ret: %d\n", ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + return 0; +} +EXPORT_SYMBOL(hal_qspi_erase); + +FCS_HAL_INT hal_sdos_crypt(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 output_size; + FCS_HAL_U64 owner_id; + FCS_HAL_INT ret = 0; + FCS_HAL_CHAR *temp; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + if (ctx.sdos.op_mode) + output_size = SDOS_ENCRYPTED_MAX_SZ; + else + output_size = SDOS_DECRYPTED_MAX_SZ; + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->rng.suuid); + if (!ret) { + LOG_ERR("Session UUID Mismatch ret: %d\n", ret); + ret = -EINVAL; + return ret; + } + + s_buf = priv->plat_data->svc_alloc_memory(priv, k_ctx->sdos.src_size); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for SDOS input data kernel buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->sdos.dst_size = &output_size; + + d_buf = priv->plat_data->svc_alloc_memory(priv, *k_ctx->sdos.dst_size); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for SDOS output kernel buffer ret: %d\n", + ret); + goto free_sbuf; + } + + /* Copy the user space input data to the input data kernel buffer */ + ret = fcs_plat_copy_from_user(s_buf, k_ctx->sdos.src, + k_ctx->sdos.src_size); + if (ret) { + LOG_ERR("Failed to copy SDOS data from user to kernel buffer ret: %d\n", + ret); + goto free_dbuf; + } + + /* Get Owner ID from buf */ + temp = (uint8_t *)s_buf; + memcpy(&owner_id, temp + OWNER_ID_OFFSET, OWNER_ID_SIZE); + + k_ctx->sdos.own = owner_id; + + + k_ctx->sdos.src = s_buf; + k_ctx->sdos.dst = d_buf; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_SDOS_DATA_EXT, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_SDOS_DATA_EXT, ret); + goto free_dbuf; + } + if ((priv->status) && + (priv->status != SDOS_DECRYPTION_REPROVISION_KEY_WARN) && + (priv->status != SDOS_DECRYPTION_NOT_LATEST_KEY_WARN)) { + LOG_ERR("Failed to perform SDOS operation ret: %d Mailbox Status = %d\n", + ret, priv->status); + ret = -EIO; + goto copy_mbox_status; + } + + /* Copy the encrypted/decrypted output from kernel space to user space */ + ret = fcs_plat_copy_to_user(ctx.sdos.dst, d_buf, priv->resp); + if (ret) { + LOG_ERR("Failed to copy encrypted output to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + /* Copy the encrypted output length from kernel space to user space */ + ret = fcs_plat_copy_to_user(ctx.sdos.dst_size, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy encrypted output length to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_dbuf: + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_sdos_crypt); + +FCS_HAL_INT hal_ecdsa_get_pubkey(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_UINT pubkey_len = FCS_ECC_PUBKEY_LEN; + FCS_HAL_VOID *d_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_pub_key.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch while requesting pubkey ret: %d\n", + ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Failed to get public key with mbox status:0x%X\n", + priv->status); + return ret; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, pubkey_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA public key kernel buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->ecdsa_pub_key.pubkey = d_buf; + k_ctx->ecdsa_pub_key.pubkey_len = &pubkey_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE, + 10 * FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE, ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, get ECDSA public key request failed ret: %d\n", + ret); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_pub_key.pubkey, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA public key to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_pub_key.pubkey_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy ECDSA public key length to user ret: %d\n", + ret); + } + +copy_mbox_status: + if (fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status))) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_get_pubkey); + +FCS_HAL_INT hal_ecdsa_hash_sign(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + FCS_HAL_UINT hash_len = FCS_ECDSA_HASH_SIGN_MAX_LEN; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_hash_sign.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_INIT, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("ECDSA Hash sign initialization failed mbox status:0x%X\n", + priv->status); + return ret; + } + + s_buf = priv->plat_data->svc_alloc_memory(priv, + ctx.ecdsa_hash_sign.src_len); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA hash sign ret: %d\n", + ret); + return ret; + } + + /* Copy the user space input data to the input data kernel buffer */ + ret = fcs_plat_copy_from_user(s_buf, ctx.ecdsa_hash_sign.src, + ctx.ecdsa_hash_sign.src_len); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data from user to kernel buffer ret: %d\n", + ret); + goto free_sbuf; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, hash_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA hash sign dst buffer ret: %d\n", + ret); + goto free_sbuf; + } + + + k_ctx->ecdsa_hash_sign.src = s_buf; + k_ctx->ecdsa_hash_sign.dst = d_buf; + k_ctx->ecdsa_hash_sign.dst_len = &hash_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE, + 10 * FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE, ret); + goto free_dbuf; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("ECDSA Hash sign initialization failed mbox status:0x%X\n", + priv->status); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_hash_sign.dst_len, &priv->resp, + priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data length to user ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_hash_sign.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data to user ret: %d\n", + ret); + } + +copy_mbox_status: + if (fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status))) { + LOG_ERR("Failed to copy mailbox status code to user\n"); + } + +free_dbuf: + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_hash_sign); + +FCS_HAL_INT hal_ecdsa_hash_verify(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + FCS_HAL_U32 total_sz; + FCS_HAL_UINT hash_len = FCS_ECDSA_HASH_SIGN_MAX_LEN; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_hash_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_INIT, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Failed to initialize ECDSA verify, mbox status:0x%X\n", + priv->status); + return ret; + } + + total_sz = ctx.ecdsa_hash_verify.src_len + + ctx.ecdsa_hash_verify.signature_len + + ctx.ecdsa_hash_verify.pubkey_len; + + s_buf = priv->plat_data->svc_alloc_memory(priv, total_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA verify src buffer ret: %d\n", + ret); + return ret; + } + + /* Copy the user space input data to the input data kernel buffer */ + ret = fcs_plat_copy_from_user(s_buf, ctx.ecdsa_hash_verify.src, + ctx.ecdsa_hash_verify.src_len); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify user data from user to sbuf ret: %d\n", + ret); + goto free_sbuf; + } + + /* Copy the user space signature data to the input data kernel buffer */ + ret = fcs_plat_copy_from_user(s_buf + ctx.ecdsa_hash_verify.src_len, + ctx.ecdsa_hash_verify.signature, + ctx.ecdsa_hash_verify.signature_len); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify signature from user to sbuf ret: %d\n", + ret); + goto free_sbuf; + } + + if (ctx.ecdsa_hash_verify.key_id == 0) { + /* Copy the user space public key data to the input data kernel buffer */ + ret = fcs_plat_copy_from_user( + s_buf + ctx.ecdsa_hash_verify.src_len + + ctx.ecdsa_hash_verify.signature_len, + ctx.ecdsa_hash_verify.pubkey, + ctx.ecdsa_hash_verify.pubkey_len); + if (ret) { + LOG_ERR("ECDSA verify: copy from user failed for public key ret:%d\n", + ret); + goto free_sbuf; + } + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, hash_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA verify dst buffer ret: %d\n", + ret); + goto free_sbuf; + } + + k_ctx->ecdsa_hash_verify.src = s_buf; + k_ctx->ecdsa_hash_verify.src_len = total_sz; + k_ctx->ecdsa_hash_verify.dst = d_buf; + k_ctx->ecdsa_hash_verify.dst_len = &hash_len; + + ret = priv->plat_data->svc_send_request( + priv, + FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE, + 10 * FCS_REQUEST_TIMEOUT); + + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE, ret); + goto free_dbuf; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Failed to perform ECDSA verify mbox status:0x%X\n", + priv->status); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_hash_verify.dst_len, &priv->resp, + priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify data length to user ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_hash_verify.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify data to user ret: %d\n", + ret); + } + +copy_mbox_status: + if (copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status))) { + LOG_ERR("Failed to copy mailbox status code to user\n"); + } + +free_dbuf: + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_hash_verify); + +static FCS_HAL_INT +hal_ecdsa_sha2data_sign_upfinal(FCS_HAL_VOID *src, FCS_HAL_U32 src_len, + FCS_HAL_VOID *dst, FCS_HAL_U32 dst_len, + struct fcs_cmd_context *const k_ctx, + FCS_HAL_U32 command) +{ + FCS_HAL_INT ret = 0; + + k_ctx->ecdsa_sha2_data_sign.src_len = src_len; + + ret = fcs_plat_copy_from_user(k_ctx->ecdsa_sha2_data_sign.src, src, + src_len); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data from user to src buffer ret: %d\n", + ret); + return ret; + } + + ret = priv->plat_data->svc_send_request(priv, command, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", command, ret); + goto goto_ret; + } + + ret = fcs_plat_copy_to_user(k_ctx->error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + goto goto_ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Failed to perform ECDSA sha2 data sign mbox status: 0x%x\n", + priv->status); + } + +goto_ret: + return ret; +} + +static FCS_HAL_INT +hal_ecdsa_sha2_data_sign_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Failed to initialize ECDSA sign ret: %d\n", ret); + return ret; + } + + return ret; +} + +FCS_HAL_INT hal_ecdsa_sha2_data_sign(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + FCS_HAL_VOID *ip_ptr = NULL; + FCS_HAL_U32 s_buf_sz, d_buf_sz; + FCS_HAL_U32 remaining_sz; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. + * Here suuid is set through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_sign.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch while performing sha2 data sign ret: %d\n", + ret); + return ret; + } + + ret = hal_ecdsa_sha2_data_sign_init(k_ctx); + if (ret) { + LOG_ERR("Failed to initialize ECDSA sign ret: %d\n", ret); + return ret; + } + + s_buf_sz = (ctx.ecdsa_sha2_data_sign.src_len > + FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ) ? + FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ : + ctx.ecdsa_sha2_data_sign.src_len; + + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign src buffer ret: %d\n", + ret); + return ret; + } + + d_buf_sz = FCS_ECDSA_HASH_SIGN_MAX_LEN; + + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_sz); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign dst buffer ret: %d\n", + ret); + goto free_sbuf; + } + + + remaining_sz = ctx.ecdsa_sha2_data_sign.src_len; + ip_ptr = ctx.ecdsa_sha2_data_sign.src; + + k_ctx->ecdsa_sha2_data_sign.src = s_buf; + k_ctx->ecdsa_sha2_data_sign.dst = d_buf; + k_ctx->ecdsa_sha2_data_sign.dst_len = &d_buf_sz; + + /** + * Perform the update and final stage of ECDSA SHA-2 data signing. + * + * This function processes the input data in blocks of size + * FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ. For each block, it calls + * hal_ecdsa_sha2data_sign_upfinal to perform the cryptographic update. + * + * if the remaining_sz is less than FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ, + * the final block is processed by sending the final command to the SDM. + */ + while (remaining_sz > FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ) { + ret = hal_ecdsa_sha2data_sign_upfinal( + ip_ptr, s_buf_sz, d_buf, d_buf_sz, k_ctx, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE); + if (ret) { + LOG_ERR("Failed to perform SHA2 data sign update ret: %d\n", + ret); + goto copy_mbox_status; + } + remaining_sz -= FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ; + ip_ptr += FCS_ECDSA_HSHA2_DATA_SIGN_BLOCK_SZ; + } + + ret = hal_ecdsa_sha2data_sign_upfinal( + ip_ptr, remaining_sz, d_buf, d_buf_sz, k_ctx, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE); + if (ret) { + LOG_ERR("Failed to perform ECDSA SHA2 Data Signing final ret: %d\n", + ret); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_sign.dst_len, + &priv->resp, sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data length to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_sign.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_sha2_data_sign); + +static FCS_HAL_INT hal_ecdsa_sha2data_verify_upfinal( + FCS_HAL_VOID *ip_ptr, FCS_HAL_U32 ip_len, FCS_HAL_CHAR *signature, + FCS_HAL_U32 signature_len, FCS_HAL_CHAR *pubkey, FCS_HAL_U32 pubkey_len, + FCS_HAL_VOID *dst, FCS_HAL_U32 dst_len, + struct fcs_cmd_context *const k_ctx, FCS_HAL_U32 command) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_U32 copy_sz = ip_len; + + ret = fcs_plat_copy_from_user(k_ctx->ecdsa_sha2_data_verify.src, ip_ptr, + ip_len); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data from user to kernel buffer ret: %d\n", + ret); + return ret; + } + + if (command == FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_FINALIZE) { + copy_sz += signature_len + pubkey_len; + + ret = fcs_plat_copy_from_user( + k_ctx->ecdsa_sha2_data_verify.src + ip_len, signature, + signature_len); + if (ret) { + LOG_ERR("ECDSA sha2verify: signature from user failed ret: %d\n", + ret); + return ret; + } + + if (k_ctx->ecdsa_sha2_data_verify.key_id == 0) { + ret = fcs_plat_copy_from_user( + k_ctx->ecdsa_sha2_data_verify.src + ip_len + + signature_len, + pubkey, pubkey_len); + if (ret) { + LOG_ERR("ECDSA sha2verify: pubkey from user failed ret: %d\n", + ret); + return ret; + } + } + } + + ret = priv->plat_data->svc_send_request(priv, command, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", command, ret); + goto goto_ret; + } + + ret = fcs_plat_copy_to_user(k_ctx->error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + goto goto_ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Failed to perform ECDSA sha2 data verify mbox status:%x\n", + priv->status); + } + +goto_ret: + return ret; +} + +static FCS_HAL_INT +hal_ecdsa_sha2_data_verify_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_INIT, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to initialize ECDSA verify ret: %d\n", + ret); + return ret; + } + + return ret; +} + +FCS_HAL_INT hal_ecdsa_sha2_data_verify(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + FCS_HAL_U32 s_buf_sz, d_buf_sz = FCS_ECDSA_SHA2_DATA_VERIFY_RSP_SZ; + FCS_HAL_U32 remaining_sz; + FCS_HAL_VOID *ip_ptr = NULL; + FCS_HAL_U32 command; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + /* Compare the session UUIDs to check for a match. Here suuid is set + * through hal_store_context + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch in sha2 data verify request ret: %d\n", + ret); + return ret; + } + + ret = hal_ecdsa_sha2_data_verify_init(k_ctx); + if (ret) { + LOG_ERR("Failed to initialize ECDSA verify ret: %d\n", ret); + return ret; + } + + remaining_sz = ctx.ecdsa_sha2_data_verify.src_len + + ctx.ecdsa_sha2_data_verify.signature_len + + ctx.ecdsa_sha2_data_verify.pubkey_len; + + s_buf_sz = (remaining_sz > FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ) ? + FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ : + remaining_sz; + + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sha2verify src buffer ret: %d\n", + ret); + return ret; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_sz); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sha2verify dst buffer ret: %d\n", + ret); + goto free_sbuf; + } + + k_ctx->ecdsa_sha2_data_verify.src = s_buf; + k_ctx->ecdsa_sha2_data_verify.dst = d_buf; + k_ctx->ecdsa_sha2_data_verify.dst_len = &d_buf_sz; + + ip_ptr = ctx.ecdsa_sha2_data_verify.src; + + /** + * Perform the update and final stage of ECDSA SHA-2 data verification. + * + * This function processes the input data in blocks of size + * FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ. For each block, it calls + * hal_ecdsa_sha2data_verify_upfinal to perform the cryptographic update. + * + * if the remaining_sz is less than FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ, + * the final block is processed by sending the final command to the SDM. + */ + + /* Final stage requires minimum 8-bytes of source buffer to be sent */ + + while (remaining_sz > 0) { + if (remaining_sz > FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ) { + if ((remaining_sz - + FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ) >= + (CRYPTO_SERVICE_MIN_DATA_SIZE + + ctx.ecdsa_sha2_data_verify.signature_len + + ctx.ecdsa_sha2_data_verify.pubkey_len)) { + k_ctx->ecdsa_sha2_data_verify.src_len = + FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ; + k_ctx->ecdsa_sha2_data_verify.user_data_sz = + FCS_ECDSA_SHA2_DATA_VERIFY_BLOCK_SZ; + } else { + k_ctx->ecdsa_sha2_data_verify.src_len = + remaining_sz - + CRYPTO_SERVICE_MIN_DATA_SIZE - + ctx.ecdsa_sha2_data_verify + .signature_len - + ctx.ecdsa_sha2_data_verify.pubkey_len; + k_ctx->ecdsa_sha2_data_verify.user_data_sz = + remaining_sz - + CRYPTO_SERVICE_MIN_DATA_SIZE - + ctx.ecdsa_sha2_data_verify + .signature_len - + ctx.ecdsa_sha2_data_verify.pubkey_len; + } + + command = FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_UPDATE; + } else { + k_ctx->ecdsa_sha2_data_verify.src_len = remaining_sz; + k_ctx->ecdsa_sha2_data_verify.user_data_sz = + remaining_sz - + ctx.ecdsa_sha2_data_verify.signature_len - + ctx.ecdsa_sha2_data_verify.pubkey_len; + command = + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_FINALIZE; + } + + ret = hal_ecdsa_sha2data_verify_upfinal( + ip_ptr, k_ctx->ecdsa_sha2_data_verify.user_data_sz, + ctx.ecdsa_sha2_data_verify.signature, + ctx.ecdsa_sha2_data_verify.signature_len, + ctx.ecdsa_sha2_data_verify.pubkey, + ctx.ecdsa_sha2_data_verify.pubkey_len, d_buf, d_buf_sz, + k_ctx, command); + if (ret) { + LOG_ERR("Failed to perform SHA2 Data verify final ret: %d\n", + ret); + goto free_mem; + } + + ip_ptr += k_ctx->ecdsa_sha2_data_verify.src_len; + remaining_sz -= k_ctx->ecdsa_sha2_data_verify.src_len; + } + + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_verify.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify data to user ret: %d\n", + ret); + goto free_mem; + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_verify.dst_len, + &priv->resp, sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify data length to user ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_mem: + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_sha2_data_verify); + +FCS_HAL_INT hal_hps_img_validate(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_UINT s_buf_len = 0, tsz = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + tsz = sizeof(ctx.hps_img_validate.test); + s_buf_len = ctx.hps_img_validate.vab_cert_len + tsz; + + /* Allocate memory for the source buffer */ + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_len); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for HPS image buffer ret: %d\n", + ret); + return ret; + } + + fcs_plat_memcpy(s_buf, &ctx.hps_img_validate.test, tsz); + + /* Copy the user space source data to the source buffer */ + ret = fcs_plat_copy_from_user(s_buf + tsz, + ctx.hps_img_validate.vab_cert, + ctx.hps_img_validate.vab_cert_len); + if (ret) { + LOG_ERR("Failed to copy HPS image validat from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + + k_ctx->hps_img_validate.vab_cert = s_buf; + k_ctx->hps_img_validate.vab_cert_len = s_buf_len; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_HPS_IMG_VALIDATE_REQUEST, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_HPS_IMG_VALIDATE_REQUEST, ret); + goto free_mem; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, failed to perform HPS image validation ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.hps_img_validate.resp, &priv->resp, + sizeof(FCS_HAL_U32)); + if (ret) { + LOG_ERR("Failed to copy Image validation response to user ret: %d\n", + ret); + goto copy_mbox_status; + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_mem: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_hps_img_validate); + +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG +FCS_HAL_INT hal_generic_mbox(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_INT resp_size = 0, max_res_size = MBOX_SEND_RSP_MAX_SZ; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + if (ctx.mbox.cmd_data_sz % 4) { + pr_err("Command data size %d is Invalid, must be 4 byte aligned\n", + ctx.mbox.cmd_data_sz); + return -EINVAL; + } + + ret = fcs_plat_copy_from_user(&resp_size, ctx.mbox.resp_data_sz, + sizeof(FCS_HAL_INT)); + if (ret) { + pr_err("Failed to copy response data size from user to kernel buffer ret: %d\n", + ret); + return ret; + } + + if (k_ctx->mbox.cmd_data_sz) { + s_buf = priv->plat_data->svc_alloc_memory(priv, + ctx.mbox.cmd_data_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + pr_err("Failed to allocate memory for generic mailbox source buffer ret: %d\n", + ret); + return ret; + } + + k_ctx->mbox.cmd_data = s_buf; + + ret = fcs_plat_copy_from_user(s_buf, ctx.mbox.cmd_data, + ctx.mbox.cmd_data_sz); + if (ret) { + pr_err("Failed to copy generic mailbox data from user to kernel buffer ret: %d\n", + ret); + goto free_mem; + } + } + + if (k_ctx->mbox.resp_data_sz) { + d_buf = priv->plat_data->svc_alloc_memory(priv, max_res_size); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + pr_err("Failed to allocate memory for generic mailbox destination buffer ret: %d\n", + ret); + goto free_src; + } + + k_ctx->mbox.resp_data = d_buf; + k_ctx->mbox.resp_data_sz = &max_res_size; + } + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_MBOX_SEND, + FCS_MAX_RESP_MS * + FCS_COMPLETED_TIMEOUT); + if (ret) { + pr_err("Failed to send the cmd=%d,ret=%d\n", FCS_DEV_MBOX_SEND, + ret); + goto free_mem; + } + + if (priv->status) { + ret = -EIO; + pr_err("Mailbox error, generic mailbox request failed ret: %d\n", + ret); + goto copy_mbox_status; + } + + if (d_buf) { + ret = fcs_plat_copy_to_user(ctx.mbox.resp_data, d_buf, + priv->resp); + if (ret) { + pr_err("Failed to copy generic mailbox response to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.mbox.resp_data_sz, + &priv->resp, + sizeof(priv->resp)); + if (ret) { + pr_err("Failed to copy generic mailbox response size to user ret: %d\n", + ret); + } + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + pr_err("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_mem: + if (d_buf) + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + if (s_buf) + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_generic_mbox); +#endif + +FCS_HAL_INT hal_aes_streaming_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + + if (k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GHASH || + k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GCM) { + if (k_ctx->aes.tag_len == GCS_TAG_LEN_32) { + k_ctx->aes.tag_len = MAP_GCM_TAG_32_TO_SDM_TAG; + } else if (k_ctx->aes.tag_len == GCS_TAG_LEN_64) { + k_ctx->aes.tag_len = MAP_GCM_TAG_64_TO_SDM_TAG; + } else if (k_ctx->aes.tag_len == GCS_TAG_LEN_96) { + k_ctx->aes.tag_len = MAP_GCM_TAG_96_TO_SDM_TAG; + } else if (k_ctx->aes.tag_len == GCS_TAG_LEN_128) { + k_ctx->aes.tag_len = MAP_GCM_TAG_128_TO_SDM_TAG; + } else { + ret = -EINVAL; + LOG_ERR("Invalid tag length in AES streaming request ret: %d\n", + ret); + return ret; + } + } + + /* Compare the session UUIDs to check for a match.*/ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch in aes crypt request ret: %d\n", + ret); + return ret; + } + + /* Initialize the AES crypt */ + ret = hal_aes_crypt_init(k_ctx); + if (ret) + LOG_ERR("Failed to perform AES crypt init ret: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL(hal_aes_streaming_init); + +FCS_HAL_INT hal_aes_streaming_update(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_U32 op_len = 0; + + /* Compare the session UUIDs to check for a match.*/ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch in aes crypt request ret: %d\n", + ret); + return ret; + } + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + k_ctx->aes.op_len = &op_len; + + ret = hal_aes_crypt_update_final(k_ctx->aes.input, k_ctx->aes.ip_len, + k_ctx->aes.aad, k_ctx->aes.aad_len, + NULL, 0, 0, k_ctx->aes.output, + k_ctx->aes.mode, k_ctx, + FCS_DEV_CRYPTO_AES_CRYPT_UPDATE); + if (ret) + LOG_ERR("Failed to perform AES crypt update ret: %d\n", ret); + + if (k_ctx->aes.mode != FCS_AES_BLOCK_MODE_GHASH) { + ret = fcs_plat_copy_to_user(ctx.aes.op_len, &op_len, sizeof(op_len)); + if (ret) + LOG_ERR("Failed to copy AES data from kernel to user buffer ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_aes_streaming_update); + +FCS_HAL_INT hal_aes_streaming_final(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_UINT src_tag_len = 0, dst_tag_len = 0; + FCS_HAL_U32 op_len = 0; + + /* Compare the session UUIDs to check for a match.*/ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch in aes crypt request ret: %d\n", + ret); + return ret; + } + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + if (k_ctx->aes.crypt == FCS_AES_ENCRYPT) { + src_tag_len = 0; + dst_tag_len = GCM_TAG_LEN; + } else { + src_tag_len = GCM_TAG_LEN; + dst_tag_len = 0; + } + + k_ctx->aes.op_len = &op_len; + + ret = hal_aes_crypt_update_final(k_ctx->aes.input, k_ctx->aes.ip_len, + k_ctx->aes.aad, k_ctx->aes.aad_len, + k_ctx->aes.tag, src_tag_len, + dst_tag_len, k_ctx->aes.output, + k_ctx->aes.mode, k_ctx, + FCS_DEV_CRYPTO_AES_CRYPT_FINAL); + if (ret) { + LOG_ERR("Failed to perform AES crypt finalize ret: %d\n", ret); + return ret; + } + + if (k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GCM) + op_len -= dst_tag_len; + else if (k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GHASH) + op_len = 0; + + /* Copy the output tag to the user buffer */ + if (k_ctx->aes.mode != FCS_AES_BLOCK_MODE_GHASH) { + ret = fcs_plat_copy_to_user(ctx.aes.op_len, &op_len, + sizeof(op_len)); + if (ret) + LOG_ERR("Failed to copy AES data from kernel to user buffer ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_aes_streaming_final); + +FCS_HAL_INT +hal_ecdsa_data_sign_streaming_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret; + + /* Compare the session UUIDs to check for a match. + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch in sha2 data verify request ret: %d\n", + ret); + return ret; + } + ret = hal_ecdsa_sha2_data_sign_init(k_ctx); + if (ret) { + LOG_ERR("Failed to initialize ECDSA sign ret: %d\n", ret); + return ret; + } + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_data_sign_streaming_init); + +FCS_HAL_INT +hal_ecdsa_data_sign_streaming_update(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *s_buf = NULL; + FCS_HAL_U32 s_buf_sz = 0; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_U32 d_buf_sz = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf_sz = k_ctx->ecdsa_sha2_data_sign.src_len; + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign src buffer ret: %d\n", + ret); + return ret; + } + + d_buf_sz = FCS_ECDSA_HASH_SIGN_MAX_LEN; + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_sz); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign dst buffer ret: %d\n", + ret); + goto free_src; + } + + k_ctx->ecdsa_sha2_data_sign.src = s_buf; + k_ctx->ecdsa_sha2_data_sign.dst = d_buf; + k_ctx->ecdsa_sha2_data_sign.src_len = s_buf_sz; + k_ctx->ecdsa_sha2_data_sign.dst_len = &d_buf_sz; + ret = hal_ecdsa_sha2data_sign_upfinal( + ctx.ecdsa_sha2_data_sign.src, s_buf_sz, d_buf, d_buf_sz, k_ctx, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE); + if (ret) { + LOG_ERR("Failed to perform ECDSA sha2 data sign update ret: %d\n", + ret); + return ret; + } + + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_data_sign_streaming_update); + +FCS_HAL_INT +hal_ecdsa_data_sign_streaming_final(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *s_buf = NULL; + FCS_HAL_U32 s_buf_sz = 0; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_U32 d_buf_sz = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf_sz = k_ctx->ecdsa_sha2_data_sign.src_len; + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign src buffer ret: %d\n", + ret); + return ret; + } + + d_buf_sz = FCS_ECDSA_HASH_SIGN_MAX_LEN; + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_sz); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign dst buffer ret: %d\n", + ret); + goto free_src; + } + + k_ctx->ecdsa_sha2_data_sign.src = s_buf; + k_ctx->ecdsa_sha2_data_sign.dst = d_buf; + k_ctx->ecdsa_sha2_data_sign.src_len = s_buf_sz; + k_ctx->ecdsa_sha2_data_sign.dst_len = &d_buf_sz; + + ret = hal_ecdsa_sha2data_sign_upfinal( + ctx.ecdsa_sha2_data_sign.src, s_buf_sz, d_buf, d_buf_sz, k_ctx, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE); + if (ret) { + LOG_ERR("Failed to perform ECDSA sha2 data sign finalize ret: %d\n", + ret); + goto free_dst; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_sign.dst_len, + &priv->resp, sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy ECDSA sign data length to user ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_sign.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) + LOG_ERR("Failed to copy ECDSA sign data to user ret: %d\n", + ret); + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + +free_dst: + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_data_sign_streaming_final); + +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret; + + /* Compare the session UUIDs to check for a match. + */ + ret = fcs_plat_uuid_compare(&priv->uuid_id, + &k_ctx->ecdsa_sha2_data_verify.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch in sha2 data verify request ret: %d\n", + ret); + return ret; + } + + ret = hal_ecdsa_sha2_data_verify_init(k_ctx); + if (ret) { + LOG_ERR("Failed to initialize ECDSA verify ret: %d\n", ret); + return ret; + } + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_data_verify_streaming_init); + +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_update(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *s_buf = NULL; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_U32 d_buf_sz = 0; + FCS_HAL_CHAR *ip_ptr = NULL; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf = priv->plat_data->svc_alloc_memory( + priv, ctx.ecdsa_sha2_data_verify.src_len); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign src buffer ret: %d\n", + ret); + return ret; + } + + d_buf_sz = FCS_ECDSA_HASH_SIGN_MAX_LEN; + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_sz); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign dst buffer ret: %d\n", + ret); + goto free_sbuf; + } + + ip_ptr = k_ctx->ecdsa_sha2_data_verify.src; + k_ctx->ecdsa_sha2_data_verify.src = s_buf; + k_ctx->ecdsa_sha2_data_verify.dst = d_buf; + k_ctx->ecdsa_sha2_data_verify.dst_len = &d_buf_sz; + + ret = hal_ecdsa_sha2data_verify_upfinal( + ip_ptr, ctx.ecdsa_sha2_data_verify.user_data_sz, NULL, 0, NULL, + 0, d_buf, d_buf_sz, k_ctx, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_UPDATE); + if (ret) + LOG_ERR("Failed to perform ECDSA sha2 data verify update ret: %d\n", + ret); + + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_data_verify_streaming_update); + +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_final(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_VOID *s_buf = NULL; + FCS_HAL_U32 s_buf_sz = 0; + FCS_HAL_VOID *d_buf = NULL; + FCS_HAL_U32 d_buf_sz = 0; + FCS_HAL_CHAR *ip_ptr = NULL; + FCS_HAL_U32 ip_len = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf_sz = ctx.ecdsa_sha2_data_verify.src_len; + s_buf = priv->plat_data->svc_alloc_memory(priv, s_buf_sz); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign src buffer ret: %d\n", + ret); + return ret; + } + + d_buf_sz = FCS_ECDSA_HASH_SIGN_MAX_LEN; + d_buf = priv->plat_data->svc_alloc_memory(priv, d_buf_sz); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for ECDSA sign dst buffer ret: %d\n", + ret); + goto free_src; + } + + ip_ptr = k_ctx->ecdsa_sha2_data_verify.src; + ip_len = ctx.ecdsa_sha2_data_verify.user_data_sz; + + k_ctx->ecdsa_sha2_data_verify.src = s_buf; + k_ctx->ecdsa_sha2_data_verify.src_len = s_buf_sz; + k_ctx->ecdsa_sha2_data_verify.dst = d_buf; + k_ctx->ecdsa_sha2_data_verify.src_len = s_buf_sz; + k_ctx->ecdsa_sha2_data_verify.dst_len = &d_buf_sz; + + ret = hal_ecdsa_sha2data_verify_upfinal( + ip_ptr, ip_len, k_ctx->ecdsa_sha2_data_verify.signature, + k_ctx->ecdsa_sha2_data_verify.signature_len, + k_ctx->ecdsa_sha2_data_verify.pubkey, + k_ctx->ecdsa_sha2_data_verify.pubkey_len, d_buf, d_buf_sz, + k_ctx, FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_FINALIZE); + if (ret) { + LOG_ERR("Failed to perform ECDSA sha2 data sign finalize ret: %d\n", + ret); + goto free_dst; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_verify.dst_len, + &priv->resp, sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify data length to user ret: %d\n", + ret); + goto free_dst; + } + + ret = fcs_plat_copy_to_user(ctx.ecdsa_sha2_data_verify.dst, + d_buf + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy ECDSA verify data to user ret: %d\n", + ret); + } + +free_dst: + priv->plat_data->svc_free_memory(priv, d_buf); +free_src: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_ecdsa_data_verify_streaming_final); + +FCS_HAL_INT hal_digest_streaming_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->dgst.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_GET_DIGEST_INIT, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_GET_DIGEST_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to initialize digest ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + return ret; + } + return ret; +} +EXPORT_SYMBOL(hal_digest_streaming_init); + +FCS_HAL_INT hal_digest_streaming_update(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *d_buf = NULL, *s_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 ldigest_len = DIGEST_CMD_MAX_SZ; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + k_ctx->dgst.digest_len = &ldigest_len; + + s_buf = priv->plat_data->svc_alloc_memory(priv, DIGEST_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for digest input data kernel buffer ret: %d\n", + ret); + return ret; + } + + ret = fcs_plat_copy_from_user(s_buf, k_ctx->dgst.src, + k_ctx->dgst.src_len); + if (ret) { + LOG_ERR("Failed to copy data from user ret: %d\n", ret); + goto free_sbuf; + } + k_ctx->dgst.src = s_buf; + + d_buf = priv->plat_data->svc_alloc_memory(priv, + *k_ctx->dgst.digest_len); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for digest output kernel buffer. ret: %d\n", + ret); + goto free_sbuf; + } + k_ctx->dgst.digest = d_buf; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_GET_DIGEST_UPDATE, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_GET_DIGEST_UPDATE, ret); + goto free_dest; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform digest ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_digest_streaming_update); + +FCS_HAL_INT hal_digest_streaming_final(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *d_buf = NULL, *s_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 ldigest_len = DIGEST_CMD_MAX_SZ; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf = priv->plat_data->svc_alloc_memory(priv, DIGEST_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for digest input data kernel buffer ret: %d\n", + ret); + return ret; + } + + ret = fcs_plat_copy_from_user(s_buf, k_ctx->dgst.src, + k_ctx->dgst.src_len); + if (ret) { + LOG_ERR("Failed to copy data from user ret: %d\n", ret); + goto free_sbuf; + } + + k_ctx->dgst.src = s_buf; + + d_buf = priv->plat_data->svc_alloc_memory(priv, DIGEST_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for digest output kernel buffer ret: %d\n", + ret); + goto free_sbuf; + } + + k_ctx->dgst.digest = d_buf; + k_ctx->dgst.digest_len = &ldigest_len; + + ret = priv->plat_data->svc_send_request(priv, + FCS_DEV_CRYPTO_GET_DIGEST_FINAL, + 10 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_GET_DIGEST_FINAL, ret); + goto copy_mbox_status; + } + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to finalize digest ret: %d\n", + ret); + goto copy_mbox_status; + } + + priv->resp -= RESPONSE_HEADER_SIZE; + + ret = fcs_plat_copy_to_user(ctx.dgst.digest, + k_ctx->dgst.digest + RESPONSE_HEADER_SIZE, + priv->resp); + if (ret) { + LOG_ERR("Failed to copy digest output to user ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.dgst.digest_len, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy digest output length to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + priv->plat_data->svc_free_memory(priv, d_buf); + +free_sbuf: + priv->plat_data->svc_free_memory(priv, s_buf); + + return ret; +} +EXPORT_SYMBOL(hal_digest_streaming_final); + +FCS_HAL_INT hal_get_digest(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + FCS_HAL_U32 remaining_bytes = 0, bytes_transfered = 0; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + ret = hal_digest_init(k_ctx); + if (ret) { + LOG_ERR("Failed to initialize digest ret: %d\n", ret); + return ret; + } + + remaining_bytes = k_ctx->dgst.src_len; + while (remaining_bytes > 0) { + if (remaining_bytes > CRYPTO_DIGEST_MAX_SZ) { + k_ctx->dgst.src_len = CRYPTO_DIGEST_MAX_SZ; + ret = hal_digest_update(k_ctx); + } else { + k_ctx->dgst.src_len = remaining_bytes; + ret = hal_digest_final(k_ctx); + } + if (ret) { + LOG_ERR("Failed to perform digest ret: %d\n", ret); + return ret; + } + + remaining_bytes -= k_ctx->dgst.src_len; + bytes_transfered += k_ctx->dgst.src_len; + k_ctx->dgst.src = ctx.dgst.src + bytes_transfered; + k_ctx->dgst.digest = ctx.dgst.digest; + k_ctx->dgst.digest_len = ctx.dgst.digest_len; + } + + return ret; +} +EXPORT_SYMBOL(hal_get_digest); + +FCS_HAL_INT hal_mac_verify_streaming_init(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + struct fcs_cmd_context ctx; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + ret = fcs_plat_uuid_compare(&priv->uuid_id, &k_ctx->rng.suuid); + if (!ret) { + ret = -EINVAL; + LOG_ERR("session UUID Mismatch ret: %d\n", ret); + return ret; + } + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_MAC_VERIFY_INIT, FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_MAC_VERIFY_INIT, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to initialize digest ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } + + return ret; +} +EXPORT_SYMBOL(hal_mac_verify_streaming_init); + +FCS_HAL_INT hal_mac_verify_streaming_update(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 data_size = MAC_CMD_MAX_SZ; + FCS_HAL_U32 out_sz = 32; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf = priv->plat_data->svc_alloc_memory(priv, MAC_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for mac input data kernel buffer ret: %d\n", + ret); + return ret; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, MAC_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for mac output kernel buffer ret: %d\n", + ret); + goto free_s_buf; + } + + ret = fcs_plat_copy_from_user(s_buf, k_ctx->mac_verify.src, data_size); + if (ret) { + LOG_ERR("Failed to copy input data from user to kernel buffer ret: %d\n", + ret); + goto free_dest; + } + + k_ctx->mac_verify.src = s_buf; + k_ctx->mac_verify.dst = d_buf; + k_ctx->mac_verify.dst_size = &out_sz; + + ret = priv->plat_data->svc_send_request( + priv, FCS_DEV_CRYPTO_MAC_VERIFY_UPDATE, + 100 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_MAC_VERIFY_UPDATE, ret); + goto free_dest; + } + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform MAC verify ret: %d\n", + ret); + } + + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); +free_s_buf: + priv->plat_data->svc_free_memory(priv, s_buf); + return ret; +} +EXPORT_SYMBOL(hal_mac_verify_streaming_update); + +FCS_HAL_INT hal_mac_verify_streaming_final(struct fcs_cmd_context *const k_ctx) +{ + FCS_HAL_INT ret = 0; + FCS_HAL_VOID *s_buf = NULL, *d_buf = NULL; + struct fcs_cmd_context ctx; + FCS_HAL_U32 data_size = MAC_CMD_MAX_SZ; + FCS_HAL_U32 out_sz = 32; + + fcs_plat_memcpy(&ctx, k_ctx, sizeof(struct fcs_cmd_context)); + + s_buf = priv->plat_data->svc_alloc_memory(priv, MAC_CMD_MAX_SZ); + if (IS_ERR(s_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for mac input data kernel buffer ret: %d\n", + ret); + return ret; + } + + d_buf = priv->plat_data->svc_alloc_memory(priv, MAC_CMD_MAX_SZ); + if (IS_ERR(d_buf)) { + ret = -ENOMEM; + LOG_ERR("Failed to allocate memory for mac output kernel buffer ret: %d\n", + ret); + goto free_s_buf; + } + + data_size = + k_ctx->mac_verify.src_size - k_ctx->mac_verify.user_data_size; + + ret = fcs_plat_copy_from_user(s_buf, k_ctx->mac_verify.src, + k_ctx->mac_verify.src_size); + if (ret) { + LOG_ERR("Failed to copy input data from user to kernel buffer ret: %d\n", + ret); + goto free_dest; + } + + k_ctx->mac_verify.src = s_buf; + k_ctx->mac_verify.dst = d_buf; + k_ctx->mac_verify.dst_size = &out_sz; + + ret = priv->plat_data->svc_send_request(priv, + FCS_DEV_CRYPTO_MAC_VERIFY_FINAL, + 100 * FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_CRYPTO_MAC_VERIFY_FINAL, ret); + goto free_dest; + } + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to perform MAC verify ret: %d\n", + ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user( + ctx.mac_verify.dst, + k_ctx->mac_verify.dst + RESPONSE_HEADER_SIZE, priv->resp); + if (ret) { + LOG_ERR("Failed to copy digest output to user ret: %d\n", ret); + goto copy_mbox_status; + } + + ret = fcs_plat_copy_to_user(ctx.mac_verify.dst_size, &priv->resp, + sizeof(priv->resp)); + if (ret) { + LOG_ERR("Failed to copy digest output length to user ret: %d\n", + ret); + } + +copy_mbox_status: + ret = fcs_plat_copy_to_user(ctx.error_code_addr, &priv->status, + sizeof(priv->status)); + if (ret) { + LOG_ERR("Failed to copy mailbox status code to user ret: %d\n", + ret); + } +free_dest: + priv->plat_data->svc_free_memory(priv, d_buf); +free_s_buf: + priv->plat_data->svc_free_memory(priv, s_buf); + return ret; +} +EXPORT_SYMBOL(hal_mac_verify_streaming_final); + +struct fcs_cmd_context *hal_get_fcs_cmd_ctx(void) +{ + fcs_plat_mutex_lock(priv); + return &priv->k_ctx; +} +EXPORT_SYMBOL(hal_get_fcs_cmd_ctx); + +FCS_HAL_VOID hal_destroy_fcs_cmd_ctx(struct fcs_cmd_context *const k_ctx) +{ + fcs_plat_memset(k_ctx, 0, sizeof(struct fcs_cmd_context)); +} +EXPORT_SYMBOL(hal_destroy_fcs_cmd_ctx); + +FCS_HAL_VOID hal_release_fcs_cmd_ctx(struct fcs_cmd_context *const k_ctx) +{ + fcs_plat_mutex_unlock(priv); +} +EXPORT_SYMBOL(hal_release_fcs_cmd_ctx); + +static FCS_HAL_INT hal_read_version_from_atf(FCS_HAL_VOID) +{ + FCS_HAL_INT ret = 0; + + ret = priv->plat_data->svc_send_request(priv, FCS_DEV_ATF_VERSION, + FCS_REQUEST_TIMEOUT); + if (ret) { + LOG_ERR("Failed to send the cmd=%d,ret=%d\n", + FCS_DEV_ATF_VERSION, ret); + return ret; + } + + if (priv->status) { + ret = -EIO; + LOG_ERR("Mailbox error, Failed to read ATF version ret: %d\n", + ret); + } + + priv->plat_data->svc_task_done(priv); + + return ret; +} + +FCS_HAL_INT hal_fcs_init(FCS_HAL_DEV *dev) +{ + FCS_HAL_INT ret; + + priv = devm_kzalloc(dev, sizeof(struct socfpga_fcs_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = fcs_plat_init(dev, priv); + if (ret) { + LOG_ERR("Failed to initialize platform data ret: %d\n", ret); + return ret; + } + + hal_read_version_from_atf(); + + return ret; +} + +FCS_HAL_VOID hal_fcs_cleanup(void) +{ + fcs_plat_cleanup(priv); + priv = NULL; +} diff --git a/drivers/misc/socfpga_fcs_plat.c b/drivers/misc/socfpga_fcs_plat.c new file mode 100644 index 0000000000000..ec6572df95c00 --- /dev/null +++ b/drivers/misc/socfpga_fcs_plat.c @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT +/* + * Copyright (C) 2024 Intel Corporation + */ + +#ifndef __SOCFPA_HAL_LL_H +#define __SOCFPA_HAL_LL_H + +#include +#include "socfpga_fcs_plat.h" + +#include +#include + +#define INVALID_STATUS 0xFFFFFFFF +#define INVALID_ID 0xFFFFFFFF + +#define DIGEST_PARAM_SZ 4 +#define DIGEST_SZ_OFFSET 4 +#define CRYPTO_ECC_PARAM_SZ 4 + +#define MSG_RETRY 3 +#define RETRY_SLEEP_MS 1 +#define TIMEOUT 1000 + +#define FCS_SVC_CLIENT_NAME "socfpga-fcs" + +FCS_HAL_VOID fcs_plat_reinit_completion(FCS_HAL_COMPLETION *completion) +{ + reinit_completion(completion); +} + +static void fcs_atf_version_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct socfpga_fcs_priv *priv = client->priv; + + priv->status = data->status; + if (data->status == BIT(SVC_STATUS_OK)) { + priv->status = 0; + priv->atf_version[0] = *((FCS_HAL_UINT *)data->kaddr1); + priv->atf_version[1] = *((FCS_HAL_UINT *)data->kaddr2); + priv->atf_version[2] = *((FCS_HAL_UINT *)data->kaddr3); + } else if (data->status == BIT(SVC_STATUS_ERROR)) { + priv->status = *((FCS_HAL_UINT *)data->kaddr1); + dev_err(client->dev, "mbox_error=0x%x\n", priv->status); + } + + complete(&priv->completion); +} + +FCS_HAL_INT fcs_plat_copy_to_user(FCS_HAL_VOID *dst, FCS_HAL_VOID *src, FCS_HAL_SIZE size) +{ + if (access_ok(dst, size)) { + if (copy_to_user(dst, src, size)) { + pr_err("Failed to copy data to user-space\n"); + return -EFAULT; + } + } else { + fcs_plat_memcpy(dst, src, size); + } + + return 0; +} + +FCS_HAL_INT fcs_plat_copy_from_user(FCS_HAL_VOID *dst, FCS_HAL_VOID *src, FCS_HAL_SIZE size) +{ + if (access_ok(src, size)) { + if (copy_from_user(dst, src, size)) { + pr_err("Failed to copy data from user-space\n"); + return -EFAULT; + } + } else { + fcs_plat_memcpy(dst, src, size); + } + + return 0; +} + +FCS_HAL_VOID fcs_plat_memset(FCS_HAL_VOID *dst, FCS_HAL_U8 val, FCS_HAL_SIZE size) +{ + memset(dst, val, size); +} + +FCS_HAL_VOID fcs_plat_memcpy(FCS_HAL_VOID *dst, FCS_HAL_VOID *src, FCS_HAL_SIZE size) +{ + memcpy(dst, src, size); +} + +FCS_HAL_INT fcs_plat_wait_for_completion(FCS_HAL_COMPLETION *completion, FCS_HAL_ULONG timeout) +{ + return wait_for_completion_timeout(completion, timeout); +} + +FCS_HAL_VOID fcs_plat_mutex_lock(struct socfpga_fcs_priv *priv) +{ + mutex_lock(&priv->lock); +} + +FCS_HAL_VOID fcs_plat_mutex_unlock(struct socfpga_fcs_priv *priv) +{ + mutex_unlock(&priv->lock); +} + +FCS_HAL_VOID *fcs_plat_alloc_mem(FCS_HAL_SIZE size) +{ + return kmalloc(size, GFP_KERNEL); +} + +FCS_HAL_VOID fcs_plat_free_mem(FCS_HAL_VOID *ptr) +{ + kfree(ptr); +} + +FCS_HAL_BOOL fcs_plat_uuid_compare(FCS_HAL_UUID *uuid1, FCS_HAL_UUID *uuid2) +{ + return uuid_equal(uuid1, uuid2); +} + +FCS_HAL_VOID fcs_plat_uuid_copy(FCS_HAL_UUID *dst, FCS_HAL_UUID *src) +{ + uuid_copy(dst, src); +} + +FCS_HAL_VOID fcs_plat_uuid_generate(struct socfpga_fcs_priv *priv) +{ + uuid_gen(&priv->uuid_id); +} + +FCS_HAL_VOID fcs_plat_uuid_clear(struct socfpga_fcs_priv *priv) +{ + memset(&priv->uuid_id, 0, sizeof(FCS_HAL_UUID)); + memset(&priv->session_id, 0, sizeof(FCS_HAL_U32)); +} + +FCS_HAL_VOID fcs_plat_free_svc_memory(struct socfpga_fcs_priv *priv, + void *buf1, void *buf2, void *buf3) +{ + if (buf1) + stratix10_svc_free_memory(priv->chan, buf1); + + if (buf2) + stratix10_svc_free_memory(priv->chan, buf2); + + if (buf3) + stratix10_svc_free_memory(priv->chan, buf3); +} + +static FCS_HAL_VOID *plat_sip_svc_allocate_memory(struct socfpga_fcs_priv *priv, + size_t size) +{ + return stratix10_svc_allocate_memory(priv->chan, size); +} + +static FCS_HAL_VOID plat_sip_svc_free_memory(struct socfpga_fcs_priv *priv, + void *buf) +{ + stratix10_svc_free_memory(priv->chan, buf); +} + +static FCS_HAL_VOID plat_sip_svc_task_done(struct socfpga_fcs_priv *priv) +{ + stratix10_svc_done(priv->chan); +} + +static void soc64_async_callback(void *ptr) +{ + if (ptr) + complete(ptr); +} + +static FCS_HAL_INT plat_sip_svc_send_request(struct socfpga_fcs_priv *priv, + enum fcs_command_code command, + FCS_HAL_ULONG timeout) +{ + FCS_HAL_BOOL no_async_poll = false; + FCS_HAL_INT ret = 0; + int status, index; + void *handle = NULL; + struct stratix10_svc_cb_data data; + struct completion completion; + struct fcs_cmd_context *k_ctx = &priv->k_ctx; + FCS_SVC_CLIENT_MSG *msg = + kzalloc(sizeof(FCS_SVC_CLIENT_MSG), GFP_KERNEL); + + if (!msg) { + pr_err("failed to allocate memory for svc client message ret: %d\n", + ret); + return -ENOMEM; + } + + priv->status = 0; + priv->resp = 0; + + switch (command) { + case FCS_DEV_CRYPTO_OPEN_SESSION: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_OPEN_SESSION\n"); + msg->command = COMMAND_FCS_CRYPTO_OPEN_SESSION; + break; + + case FCS_DEV_CRYPTO_CLOSE_SESSION: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_CLOSE_SESSION with session_id: 0x%x\n", + priv->session_id); + msg->arg[0] = priv->session_id; + msg->command = COMMAND_FCS_CRYPTO_CLOSE_SESSION; + break; + + case FCS_DEV_ATF_VERSION: + pr_debug("Sending command: COMMAND_SMC_ATF_BUILD_VER\n"); + msg->command = COMMAND_SMC_ATF_BUILD_VER; + priv->client.receive_cb = fcs_atf_version_callback; + break; + + case FCS_DEV_RANDOM_NUMBER_GEN: + pr_debug("Sending command: COMMAND_FCS_RANDOM_NUMBER_GEN_EXT with session_id: 0x%x, context_id: 0x%x\n", + priv->session_id, k_ctx->rng.context_id); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->rng.context_id; + msg->command = COMMAND_FCS_RANDOM_NUMBER_GEN_EXT; + msg->payload_output = k_ctx->rng.rng; + msg->payload_length_output = k_ctx->rng.rng_len; + break; + + case FCS_DEV_CRYPTO_IMPORT_KEY: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_IMPORT_KEY\n"); + pr_debug("Key: %*ph\n", k_ctx->import_key.key_len, + k_ctx->import_key.key); + msg->payload = k_ctx->import_key.key; + msg->payload_length = k_ctx->import_key.key_len; + msg->command = COMMAND_FCS_CRYPTO_IMPORT_KEY; + break; + + case FCS_DEV_CRYPTO_EXPORT_KEY: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_EXPORT_KEY with session_id: 0x%x, key_id: 0x%x\n", + priv->session_id, k_ctx->export_key.key_id); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->export_key.key_id; + msg->payload_output = k_ctx->export_key.key; + msg->payload_length_output = *k_ctx->export_key.key_len; + msg->command = COMMAND_FCS_CRYPTO_EXPORT_KEY; + break; + + case FCS_DEV_CRYPTO_REMOVE_KEY: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_REMOVE_KEY with session_id: 0x%x, key_id: 0x%x\n", + priv->session_id, k_ctx->remove_key.key_id); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->remove_key.key_id; + msg->command = COMMAND_FCS_CRYPTO_REMOVE_KEY; + break; + + case FCS_DEV_CRYPTO_GET_KEY_INFO: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_GET_KEY_INFO with session_id: 0x%x, key_id: 0x%x\n", + priv->session_id, k_ctx->key_info.key_id); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->key_info.key_id; + msg->payload_output = k_ctx->key_info.info; + msg->payload_length_output = *k_ctx->key_info.info_len; + msg->command = COMMAND_FCS_CRYPTO_GET_KEY_INFO; + break; + + case FCS_DEV_CRYPTO_CREATE_KEY: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_CREATE_KEY\n"); + pr_debug("Key: %*ph\n", k_ctx->create_key.key_len, + k_ctx->create_key.key); + msg->payload = k_ctx->create_key.key; + msg->payload_length = k_ctx->create_key.key_len; + msg->command = COMMAND_FCS_CRYPTO_CREATE_KEY; + break; + + case FCS_DEV_CRYPTO_HKDF_REQUEST: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_HKDF_REQUEST with session_id: 0x%x, steptype: 0x%x, macmode: 0x%x, key_id: 0x%x\n", + priv->session_id, k_ctx->hkdf_req.step_type, + k_ctx->hkdf_req.mac_mode, k_ctx->hkdf_req.key_id); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->hkdf_req.step_type; + msg->arg[2] = k_ctx->hkdf_req.mac_mode; + msg->arg[3] = k_ctx->hkdf_req.key_id; + msg->arg[4] = k_ctx->hkdf_req.output_key_obj_len; + msg->payload = k_ctx->hkdf_req.ikm; + msg->command = COMMAND_FCS_CRYPTO_HKDF_REQUEST; + break; + + case FCS_DEV_GET_PROVISION_DATA: + pr_debug("Sending command: COMMAND_FCS_GET_PROVISION_DATA\n"); + msg->payload_output = k_ctx->prov_data.data; + msg->payload_length_output = *k_ctx->prov_data.data_len; + msg->command = COMMAND_FCS_GET_PROVISION_DATA; + break; + + case FCS_DEV_COUNTER_SET: + pr_debug("Sending command: COMMAND_FCS_SEND_CERTIFICATE\n"); + msg->payload = k_ctx->ctr_set.ccert; + msg->payload_length = k_ctx->ctr_set.ccert_len; + msg->command = COMMAND_FCS_SEND_CERTIFICATE; + break; + + case FCS_DEV_COUNTER_SET_POLL_SERVICE: + pr_debug("Sending command: COMMAND_POLL_SERVICE_STATUS\n"); + msg->payload = k_ctx->ctr_set.status; + msg->payload_length = *k_ctx->ctr_set.status_len; + msg->command = COMMAND_POLL_SERVICE_STATUS; + break; + + case FCS_DEV_COUNTER_SET_PREAUTHORIZED: + pr_debug("Sending command: COMMAND_FCS_COUNTER_SET_PREAUTHORIZED with ctr_type: 0x%x, ctr_val: 0x%x, test: 0x%x\n", + k_ctx->ctr_set_preauth.ctr_type, + k_ctx->ctr_set_preauth.ctr_val, + k_ctx->ctr_set_preauth.test); + msg->arg[0] = k_ctx->ctr_set_preauth.ctr_type; + msg->arg[1] = k_ctx->ctr_set_preauth.ctr_val; + msg->arg[2] = k_ctx->ctr_set_preauth.test; + msg->command = COMMAND_FCS_COUNTER_SET_PREAUTHORIZED; + break; + + case FCS_DEV_CRYPTO_GET_DIGEST_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_GET_DIGEST_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x,sha_op_mode: 0x%x, sha_digest_sz: 0x%x\n", + priv->session_id, k_ctx->dgst.context_id, + k_ctx->dgst.key_id, k_ctx->dgst.sha_op_mode, + k_ctx->dgst.sha_digest_sz); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->dgst.context_id; + msg->arg[2] = k_ctx->dgst.key_id; + msg->arg[3] = DIGEST_PARAM_SZ; + msg->arg[4] = k_ctx->dgst.sha_op_mode | + (k_ctx->dgst.sha_digest_sz << DIGEST_SZ_OFFSET); + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_GET_DIGEST_UPDATE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE with session_id: 0x%x, context_id: 0x%x, src_len: 0x%x\n", + priv->session_id, k_ctx->dgst.context_id, + k_ctx->dgst.src_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->dgst.context_id; + msg->payload = k_ctx->dgst.src; + msg->payload_length = k_ctx->dgst.src_len; + msg->payload_output = k_ctx->dgst.digest; + msg->payload_length_output = *k_ctx->dgst.digest_len; + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE; + break; + + case FCS_DEV_CRYPTO_GET_DIGEST_FINAL: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE with session_id: 0x%x, context_id: 0x%x, src_len: 0x%x\n", + priv->session_id, k_ctx->dgst.context_id, + k_ctx->dgst.src_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->dgst.context_id; + msg->payload = k_ctx->dgst.src; + msg->payload_length = k_ctx->dgst.src_len; + msg->payload_output = k_ctx->dgst.digest; + msg->payload_length_output = *k_ctx->dgst.digest_len; + msg->command = COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE; + break; + + case FCS_DEV_CRYPTO_MAC_VERIFY_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x, sha_op_mode: 0x%x, sha_digest_sz: 0x%x\n", + priv->session_id, k_ctx->mac_verify.context_id, + k_ctx->mac_verify.key_id, k_ctx->mac_verify.sha_op_mode, + k_ctx->mac_verify.sha_digest_sz); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->mac_verify.context_id; + msg->arg[2] = k_ctx->mac_verify.key_id; + msg->arg[3] = DIGEST_PARAM_SZ; + msg->arg[4] = + k_ctx->mac_verify.sha_op_mode | + (k_ctx->mac_verify.sha_digest_sz << DIGEST_SZ_OFFSET); + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_MAC_VERIFY_UPDATE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE with session_id: 0x%x, context_id: 0x%x, user_data_size: 0x%x\n", + priv->session_id, k_ctx->mac_verify.context_id, + k_ctx->mac_verify.user_data_size); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->mac_verify.context_id; + msg->arg[2] = k_ctx->mac_verify.user_data_size; + msg->payload = k_ctx->mac_verify.src; + msg->payload_length = k_ctx->mac_verify.src_size; + msg->payload_output = k_ctx->mac_verify.dst; + msg->payload_length_output = *k_ctx->mac_verify.dst_size; + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE; + break; + + case FCS_DEV_CRYPTO_MAC_VERIFY_FINAL: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE with session_id: 0x%x, context_id: 0x%x, user_data_size: 0x%x\n", + priv->session_id, k_ctx->mac_verify.context_id, + k_ctx->mac_verify.user_data_size); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->mac_verify.context_id; + msg->arg[2] = k_ctx->mac_verify.user_data_size; + msg->payload = k_ctx->mac_verify.src; + msg->payload_length = k_ctx->mac_verify.src_size; + msg->payload_output = k_ctx->mac_verify.dst; + msg->payload_length_output = *k_ctx->mac_verify.dst_size; + msg->command = COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE; + break; + + case FCS_DEV_CRYPTO_AES_CRYPT_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_AES_CRYPT_INIT with session_id: 0x%x, cid: 0x%x, kid: 0x%x\n", + priv->session_id, k_ctx->aes.cid, k_ctx->aes.kid); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->aes.cid; + msg->arg[2] = k_ctx->aes.kid; + msg->payload = k_ctx->aes.input; + msg->payload_length = k_ctx->aes.ip_len; + msg->payload_output = NULL; + msg->payload_length_output = 0; + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_AES_CRYPT_UPDATE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE with session_id:0x%x, cid: 0x%x, kid: 0x%x src = %p, dst = %p, src_size = %d dst_size= %d\n", + priv->session_id, k_ctx->aes.cid, k_ctx->aes.kid, k_ctx->aes.input, + k_ctx->aes.output, k_ctx->aes.ip_len, *k_ctx->aes.op_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->aes.cid; + + if (k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GCM || + k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GHASH) + msg->arg[2] = k_ctx->aes.input_pad; + else + msg->arg[2] = 0; + + msg->payload = k_ctx->aes.input; + msg->payload_length = k_ctx->aes.ip_len; + msg->payload_output = k_ctx->aes.output; + msg->payload_length_output = *k_ctx->aes.op_len; + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE; + break; + + case FCS_DEV_CRYPTO_AES_CRYPT_FINAL: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE with session_id: 0x%x, cid: 0x%x, kid: 0x%x src = %p, dst = %p, src_size = %d dst_size= %d\n", + priv->session_id, k_ctx->aes.cid, k_ctx->aes.kid, k_ctx->aes.input, + k_ctx->aes.output, k_ctx->aes.ip_len, *k_ctx->aes.op_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->aes.cid; + + if (k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GCM || + k_ctx->aes.mode == FCS_AES_BLOCK_MODE_GHASH) + msg->arg[2] = k_ctx->aes.input_pad; + else + msg->arg[2] = 0; + + msg->payload = k_ctx->aes.input; + msg->payload_length = k_ctx->aes.ip_len; + msg->payload_output = k_ctx->aes.output; + msg->payload_length_output = *k_ctx->aes.op_len; + msg->command = COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE; + break; + + case FCS_DEV_CRYPTO_ECDH_REQUEST_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT with session_id: 0x%x, cid: 0x%x, kid: 0x%x, ecc_curve: 0x%x\n", + priv->session_id, k_ctx->ecdh_req.cid, + k_ctx->ecdh_req.kid, k_ctx->ecdh_req.ecc_curve); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdh_req.cid; + msg->arg[2] = k_ctx->ecdh_req.kid; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = k_ctx->ecdh_req.ecc_curve & FCS_ECC_CURVE_MASK; + msg->command = COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_ECDH_REQUEST_FINALIZE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE with session_id: 0x%x, cid: 0x%x, kid: 0x%x, pubkey_len: 0x%x\n", + priv->session_id, k_ctx->ecdh_req.cid, + k_ctx->ecdh_req.kid, k_ctx->ecdh_req.pubkey_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdh_req.cid; + msg->arg[2] = k_ctx->ecdh_req.kid; + msg->payload = k_ctx->ecdh_req.pubkey; + msg->payload_length = k_ctx->ecdh_req.pubkey_len; + msg->payload_output = k_ctx->ecdh_req.sh_secret; + msg->payload_length_output = *k_ctx->ecdh_req.sh_secret_len; + msg->command = COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE; + break; + + case FCS_DEV_CHIP_ID: + pr_debug("Sending command: COMMAND_FCS_GET_CHIP_ID\n"); + msg->command = COMMAND_FCS_GET_CHIP_ID; + break; + + case FCS_DEV_ATTESTATION_GET_CERTIFICATE: + pr_debug("Sending command: COMMAND_FCS_ATTESTATION_CERTIFICATE with cert_request: 0x%x\n", + k_ctx->attestation_cert.cert_request); + msg->payload = NULL; + msg->payload_length = 0; + msg->payload_output = k_ctx->attestation_cert.cert; + msg->payload_length_output = *k_ctx->attestation_cert.cert_size; + msg->arg[0] = k_ctx->attestation_cert.cert_request; + msg->command = COMMAND_FCS_ATTESTATION_CERTIFICATE; + break; + + case FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD: + pr_debug("Sending command: COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD with cert_request: 0x%x\n", + k_ctx->attestation_cert_reload.cert_request & 0xff); + msg->arg[0] = k_ctx->attestation_cert_reload.cert_request & + 0xff; + msg->command = COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD; + break; + + case FCS_DEV_MCTP_REQUEST: + pr_debug("Sending command: COMMAND_FCS_MCTP_SEND with mctp_req_len: 0x%x\n", + k_ctx->mctp.mctp_req_len); + msg->command = COMMAND_FCS_MCTP_SEND; + msg->payload = k_ctx->mctp.mctp_req; + msg->payload_length = k_ctx->mctp.mctp_req_len; + msg->payload_output = k_ctx->mctp.mctp_resp; + msg->payload_length_output = *k_ctx->mctp.mctp_resp_len; + break; + + case FCS_DEV_GET_IDCODE: + pr_debug("Sending command: COMMAND_GET_IDCODE\n"); + msg->command = COMMAND_GET_IDCODE; + break; + + case FCS_DEV_GET_DEVICE_IDENTITY: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_GET_DEVICE_IDENTITY\n"); + msg->command = COMMAND_FCS_CRYPTO_GET_DEVICE_IDENTITY; + msg->payload_output = k_ctx->device_identity.identity; + msg->payload_length_output = + *k_ctx->device_identity.identity_len; + break; + + case FCS_DEV_QSPI_OPEN: + pr_debug("Sending command: COMMAND_QSPI_OPEN\n"); + msg->command = COMMAND_QSPI_OPEN; + break; + + case FCS_DEV_QSPI_CLOSE: + pr_debug("Sending command: COMMAND_QSPI_CLOSE\n"); + msg->command = COMMAND_QSPI_CLOSE; + break; + + case FCS_DEV_QSPI_CS: + pr_debug("Sending command: COMMAND_QSPI_SET_CS with chipsel: 0x%x\n", + k_ctx->qspi_cs.chipsel); + msg->command = COMMAND_QSPI_SET_CS; + msg->arg[0] = (k_ctx->qspi_cs.chipsel >> 28) & 0xF; + msg->arg[1] = (k_ctx->qspi_cs.chipsel >> 27) & 0x1; + msg->arg[2] = (k_ctx->qspi_cs.chipsel >> 26) & 0x1; + break; + + case FCS_DEV_QSPI_READ: + pr_debug("Sending command: COMMAND_QSPI_READ with qspi_addr: 0x%x, qspi_len: 0x%x\n", + k_ctx->qspi_read.qspi_addr, k_ctx->qspi_read.qspi_len); + msg->command = COMMAND_QSPI_READ; + msg->arg[0] = k_ctx->qspi_read.qspi_addr; + msg->payload_output = k_ctx->qspi_read.qspi_data; + msg->payload_length_output = k_ctx->qspi_read.qspi_len * 4; + break; + + case FCS_DEV_QSPI_WRITE: + pr_debug("Sending command: COMMAND_QSPI_WRITE with qspi_data_len: 0x%x\n", + *k_ctx->qspi_write.qspi_data_len); + msg->command = COMMAND_QSPI_WRITE; + msg->payload = k_ctx->qspi_write.qspi_data; + msg->payload_length = *k_ctx->qspi_write.qspi_data_len; + break; + + case FCS_DEV_QSPI_ERASE: + pr_debug("Sending command: COMMAND_QSPI_ERASE with qspi_addr: 0x%x, len: 0x%x\n", + k_ctx->qspi_erase.qspi_addr, k_ctx->qspi_erase.len); + msg->command = COMMAND_QSPI_ERASE; + msg->arg[0] = k_ctx->qspi_erase.qspi_addr; + msg->arg[1] = k_ctx->qspi_erase.len * 4; + break; + + case FCS_DEV_SDOS_DATA_EXT: + pr_debug("Sending command: COMMAND_FCS_SDOS_DATA_EXT with session_id: 0x%x, context_id: 0x%x, op_mode: 0x%x, own: 0x%llx\n", + priv->session_id, k_ctx->sdos.context_id, + k_ctx->sdos.op_mode, k_ctx->sdos.own); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->sdos.context_id; + msg->arg[2] = k_ctx->sdos.op_mode; + msg->arg[3] = k_ctx->sdos.own; + msg->payload = k_ctx->sdos.src; + msg->payload_length = k_ctx->sdos.src_size; + msg->payload_output = k_ctx->sdos.dst; + msg->payload_length_output = *k_ctx->sdos.dst_size; + msg->command = COMMAND_FCS_SDOS_DATA_EXT; + break; + + case FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x,ecc_curve: 0x%x\n", + priv->session_id, k_ctx->ecdsa_pub_key.context_id, + k_ctx->ecdsa_pub_key.key_id, + k_ctx->ecdsa_pub_key.ecc_curve & FCS_ECC_CURVE_MASK); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_pub_key.context_id; + msg->arg[2] = k_ctx->ecdsa_pub_key.key_id; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = k_ctx->ecdsa_pub_key.ecc_curve & + FCS_ECC_CURVE_MASK; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE: + pr_debug("Sending command: ECDSA_GET_PUBLIC_KEY_FINALIZE with session_id: 0x%x, context_id: 0x%x\n", + priv->session_id, k_ctx->ecdsa_pub_key.context_id); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_pub_key.context_id; + msg->payload = NULL; + msg->payload_length = 0; + msg->payload_output = k_ctx->ecdsa_pub_key.pubkey; + msg->payload_length_output = *k_ctx->ecdsa_pub_key.pubkey_len; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE; + break; + + case FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_INIT: + pr_debug("Sending command: ECDSA_HASH_SIGNING_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x, ecc_curve: 0x%x\n", + priv->session_id, k_ctx->ecdsa_hash_sign.context_id, + k_ctx->ecdsa_hash_sign.key_id, + k_ctx->ecdsa_hash_sign.ecc_curve & FCS_ECC_CURVE_MASK); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_hash_sign.context_id; + msg->arg[2] = k_ctx->ecdsa_hash_sign.key_id; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = k_ctx->ecdsa_hash_sign.ecc_curve & + FCS_ECC_CURVE_MASK; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE with session_id: 0x%x, context_id: 0x%x, src_len: 0x%x\n", + priv->session_id, k_ctx->ecdsa_hash_sign.context_id, + k_ctx->ecdsa_hash_sign.src_len); + + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_hash_sign.context_id; + msg->payload = k_ctx->ecdsa_hash_sign.src; + msg->payload_length = k_ctx->ecdsa_hash_sign.src_len; + msg->payload_output = k_ctx->ecdsa_hash_sign.dst; + msg->payload_length_output = *k_ctx->ecdsa_hash_sign.dst_len; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE; + break; + + case FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x, ecc_curve: 0x%x\n", + priv->session_id, k_ctx->ecdsa_hash_verify.context_id, + k_ctx->ecdsa_hash_verify.key_id, + k_ctx->ecdsa_hash_verify.ecc_curve & + FCS_ECC_CURVE_MASK); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_hash_verify.context_id; + msg->arg[2] = k_ctx->ecdsa_hash_verify.key_id; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = k_ctx->ecdsa_hash_verify.ecc_curve & + FCS_ECC_CURVE_MASK; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE with session_id: 0x%x, context_id: 0x%x, src_len: 0x%x\n", + priv->session_id, k_ctx->ecdsa_hash_verify.context_id, + k_ctx->ecdsa_hash_verify.src_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_hash_verify.context_id; + msg->payload = k_ctx->ecdsa_hash_verify.src; + msg->payload_length = k_ctx->ecdsa_hash_verify.src_len; + msg->payload_output = k_ctx->ecdsa_hash_verify.dst; + msg->payload_length_output = *k_ctx->ecdsa_hash_verify.dst_len; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE; + break; + + case FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x, ecc_curve: 0x%x\n", + priv->session_id, + k_ctx->ecdsa_sha2_data_sign.context_id, + k_ctx->ecdsa_sha2_data_sign.key_id, + k_ctx->ecdsa_sha2_data_sign.ecc_curve & + FCS_ECC_CURVE_MASK); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_sha2_data_sign.context_id; + msg->arg[2] = k_ctx->ecdsa_sha2_data_sign.key_id; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = k_ctx->ecdsa_sha2_data_sign.ecc_curve & + FCS_ECC_CURVE_MASK; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE with session_id: 0x%x, context_id: 0x%x, src_len: 0x%x\n", + priv->session_id, + k_ctx->ecdsa_sha2_data_sign.context_id, + k_ctx->ecdsa_sha2_data_sign.src_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_sha2_data_sign.context_id; + msg->payload = k_ctx->ecdsa_sha2_data_sign.src; + msg->payload_length = k_ctx->ecdsa_sha2_data_sign.src_len; + msg->payload_output = k_ctx->ecdsa_sha2_data_sign.dst; + msg->payload_length_output = + *k_ctx->ecdsa_sha2_data_sign.dst_len; + msg->command = + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE; + break; + + case FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE with session_id: 0x%x, context_id: 0x%x, src_len: 0x%x\n", + priv->session_id, + k_ctx->ecdsa_sha2_data_sign.context_id, + k_ctx->ecdsa_sha2_data_sign.src_len); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_sha2_data_sign.context_id; + msg->payload = k_ctx->ecdsa_sha2_data_sign.src; + msg->payload_length = k_ctx->ecdsa_sha2_data_sign.src_len; + msg->payload_output = k_ctx->ecdsa_sha2_data_sign.dst; + msg->payload_length_output = + *k_ctx->ecdsa_sha2_data_sign.dst_len; + msg->command = + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE; + break; + + case FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_INIT: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT with session_id: 0x%x, context_id: 0x%x, key_id: 0x%x, ecc_curve: 0x%x\n", + priv->session_id, + k_ctx->ecdsa_sha2_data_verify.context_id, + k_ctx->ecdsa_sha2_data_verify.key_id, + k_ctx->ecdsa_sha2_data_verify.ecc_curve & + FCS_ECC_CURVE_MASK); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_sha2_data_verify.context_id; + msg->arg[2] = k_ctx->ecdsa_sha2_data_verify.key_id; + msg->arg[3] = CRYPTO_ECC_PARAM_SZ; + msg->arg[4] = k_ctx->ecdsa_sha2_data_verify.ecc_curve & + FCS_ECC_CURVE_MASK; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT; + no_async_poll = true; + break; + + case FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_FINALIZE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE with session_id: 0x%x, context_id: 0x%x, user_data_sz: 0x%x\n", + priv->session_id, + k_ctx->ecdsa_sha2_data_verify.context_id, + k_ctx->ecdsa_sha2_data_verify.user_data_sz); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_sha2_data_verify.context_id; + msg->arg[2] = k_ctx->ecdsa_sha2_data_verify.user_data_sz; + msg->payload = k_ctx->ecdsa_sha2_data_verify.src; + msg->payload_length = k_ctx->ecdsa_sha2_data_verify.src_len; + msg->payload_output = k_ctx->ecdsa_sha2_data_verify.dst; + msg->payload_length_output = + *k_ctx->ecdsa_sha2_data_verify.dst_len; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE; + break; + + case FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_UPDATE: + pr_debug("Sending command: COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE with session_id: 0x%x, context_id: 0x%x, user_data_sz: 0x%x\n", + priv->session_id, + k_ctx->ecdsa_sha2_data_verify.context_id, + k_ctx->ecdsa_sha2_data_verify.user_data_sz); + msg->arg[0] = priv->session_id; + msg->arg[1] = k_ctx->ecdsa_sha2_data_verify.context_id; + msg->arg[2] = k_ctx->ecdsa_sha2_data_verify.user_data_sz; + msg->payload = k_ctx->ecdsa_sha2_data_verify.src; + msg->payload_length = k_ctx->ecdsa_sha2_data_verify.src_len; + msg->payload_output = k_ctx->ecdsa_sha2_data_verify.dst; + msg->payload_length_output = + *k_ctx->ecdsa_sha2_data_verify.dst_len; + msg->command = COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE; + break; + + case FCS_DEV_HPS_IMG_VALIDATE_REQUEST: + pr_debug("Sending command: COMMAND_FCS_SEND_CERTIFICATE with vab_cert_len: 0x%x\n", + k_ctx->hps_img_validate.vab_cert_len); + msg->payload = k_ctx->hps_img_validate.vab_cert; + msg->payload_length = k_ctx->hps_img_validate.vab_cert_len; + msg->command = COMMAND_FCS_SEND_CERTIFICATE; + break; + +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG + case FCS_DEV_MBOX_SEND: + msg->command = COMMAND_MBOX_SEND_CMD; + msg->arg[0] = k_ctx->mbox.mbox_cmd; + msg->payload = k_ctx->mbox.cmd_data; + msg->payload_length = k_ctx->mbox.cmd_data_sz; + msg->payload_output = k_ctx->mbox.resp_data; + msg->payload_length_output = *k_ctx->mbox.resp_data_sz; + break; +#endif + + default: + pr_err("Unknown command: 0x%x\n", command); + ret = -EINVAL; + break; + } + + if (command == FCS_DEV_ATF_VERSION) { + reinit_completion(&priv->completion); + + ret = stratix10_svc_send(priv->chan, msg); + if (ret) { + pr_err("failed to send message to service channel\n"); + goto fun_ret; + } + + if (!wait_for_completion_timeout(&priv->completion, timeout)) { + pr_err("svc timeout to get completed status\n"); + ret = -ETIMEDOUT; + } +fun_ret: + kfree(msg); + return ret; + } + + init_completion(&completion); + + for (index = 0; index < MSG_RETRY; index++) { + status = stratix10_svc_async_send(priv->chan, msg, &handle, + soc64_async_callback, + &completion); + if (status == 0) + break; + msleep(RETRY_SLEEP_MS); + } + + if (!handle || status != 0) { + pr_err("Failed to send async message\n"); + return -ETIMEDOUT; + } + + if (!no_async_poll) { + ret = wait_for_completion_io_timeout(&completion, (TIMEOUT)); + if (ret > 0) + pr_debug("Received async interrupt\n"); + else + pr_err("timeout occurred while waiting for async message\n"); + + ret = stratix10_svc_async_poll(priv->chan, handle, &data); + if (ret) { + pr_err("Failed to poll async message\n"); + goto out; + } + + priv->status = data.status; + + if (data.kaddr1) { + if (command == FCS_DEV_CHIP_ID) { + priv->chip_id_lo = + *((FCS_HAL_UINT *)data.kaddr1); + priv->chip_id_hi = + *((FCS_HAL_UINT *)data.kaddr2); + } else { + priv->resp = *((FCS_HAL_U32 *)data.kaddr1); + } + } else { + priv->resp = 0; + } + } + +out: + stratix10_svc_async_done(priv->chan, handle); + kfree(msg); + + return ret; +} + +FCS_HAL_INT fcs_plat_init(FCS_HAL_DEV *dev, struct socfpga_fcs_priv *priv) +{ + mutex_init(&priv->lock); + FCS_HAL_S32 ret = 0; + + priv->plat_data = + kmalloc(sizeof(struct socfpga_fcs_service_ops), GFP_KERNEL); + if (!priv->plat_data) { + pr_err("Failed to allocate memory for priv->plat_data\n"); + return -ENOMEM; + } + + priv->dev = dev; + priv->client.dev = dev; + priv->client.receive_cb = NULL; + priv->client.priv = priv; + + priv->chan = stratix10_svc_request_channel_byname(&priv->client, + SVC_CLIENT_FCS); + if (IS_ERR(priv->chan)) { + pr_err("couldn't get service channel %s\n", SVC_CLIENT_FCS); + return -EPROBE_DEFER; + } + + ret = stratix10_svc_add_async_client(priv->chan, true); + if (ret) { + pr_err("Failed to add async client\n"); + return ret; + } + + init_completion(&priv->completion); + + priv->plat_data->svc_send_request = plat_sip_svc_send_request; + priv->plat_data->svc_alloc_memory = plat_sip_svc_allocate_memory; + priv->plat_data->svc_free_memory = plat_sip_svc_free_memory; + priv->plat_data->svc_task_done = plat_sip_svc_task_done; + + return 0; +} + +FCS_HAL_VOID fcs_plat_cleanup(struct socfpga_fcs_priv *priv) +{ + stratix10_svc_free_channel(priv->chan); +} + +#endif /* __SOCFPA_HAL_LL_H */ diff --git a/drivers/misc/socfpga_fcs_plat.h b/drivers/misc/socfpga_fcs_plat.h new file mode 100644 index 0000000000000..1123d60bc2247 --- /dev/null +++ b/drivers/misc/socfpga_fcs_plat.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ +/* + * Copyright (C) 2025 Altera + */ + +#ifndef SOCFPGA_FCS_PLAT_H_ +#define SOCFPGA_FCS_PLAT_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define FCS_STATUS_OK SVC_STATUS_OK +#define FCS_STATUS_BUFFER_SUBMITTED SVC_STATUS_BUFFER_SUBMITTED +#define FCS_STATUS_BUFFER_DONE SVC_STATUS_BUFFER_DONE +#define FCS_STATUS_COMPLETED SVC_STATUS_COMPLETED +#define FCS_STATUS_BUSY SVC_STATUS_BUSY +#define FCS_STATUS_ERROR SVC_STATUS_ERROR +#define FCS_STATUS_NO_SUPPORT SVC_STATUS_NO_SUPPORT +#define FCS_STATUS_INVALID_PARAM SVC_STATUS_INVALID_PARAM +#define FCS_STATUS_NO_RESPONSE SVC_STATUS_NO_RESPONSE +#define FCS_ASYNC_POLL_SERVICE 0x00004F4E + +/** + * struct socfpga_fcs_service_ops - Service operations for SoCFPGA FCS + * @svc_send_request: Function pointer for sending a request + * @svc_alloc_memory: Function pointer for allocating memory + * @svc_free_memory: Function pointer for freeing allocated memory + * @svc_task_done: Function pointer for marking a task as done + * + * This structure defines the service operations for the SoCFPGA FCS (FPGA + * Crypto Service). Each member is a function pointer to the respective + * operation required for handling FCS services. + */ +struct socfpga_fcs_service_ops { + FCS_HAL_INT(*svc_send_request) + (struct socfpga_fcs_priv *priv, enum fcs_command_code command, + FCS_HAL_ULONG timeout); + FCS_HAL_VOID *(*svc_alloc_memory)(struct socfpga_fcs_priv *priv, + size_t size); + FCS_HAL_VOID (*svc_free_memory) + (struct socfpga_fcs_priv *priv, void *buf); + FCS_HAL_VOID (*svc_task_done)(struct socfpga_fcs_priv *priv); +}; + +/** + * @brief Function to complete the platform operation. + * + * This function is used to complete the platform operation by providing the + * completion structure. + * + * @param completion Pointer to the completion structure. + */ +FCS_HAL_VOID fcs_plat_complete(FCS_HAL_COMPLETION *completion); + +/** + * @brief Reinitializes the given completion structure. + * + * This function reinitializes the completion structure pointed to by the + * `completion` parameter. It is typically used to reset the state of the + * completion structure so that it can be reused. + * + * @param completion Pointer to the completion structure to be reinitialized. + * + * @return FCS_HAL_VOID + */ +FCS_HAL_VOID fcs_plat_reinit_completion(FCS_HAL_COMPLETION *completion); + +FCS_HAL_INT fcs_plat_wait_for_completion(FCS_HAL_COMPLETION *completion, + FCS_HAL_ULONG timeout); +FCS_HAL_VOID fcs_plat_mutex_lock(struct socfpga_fcs_priv *priv); +FCS_HAL_VOID fcs_plat_mutex_unlock(struct socfpga_fcs_priv *priv); +FCS_HAL_VOID *fcs_plat_alloc_mem(FCS_HAL_SIZE size); +FCS_HAL_VOID fcs_plat_free_mem(FCS_HAL_VOID *ptr); +FCS_HAL_BOOL fcs_plat_uuid_compare(FCS_HAL_UUID *uuid1, FCS_HAL_UUID *uuid2); +FCS_HAL_VOID fcs_plat_uuid_copy(FCS_HAL_UUID *dst, FCS_HAL_UUID *src); +FCS_HAL_VOID fcs_plat_uuid_generate(struct socfpga_fcs_priv *priv); +FCS_HAL_VOID fcs_plat_free_svc_memory(struct socfpga_fcs_priv *priv, void *buf1, + void *buf2, void *buf3); +FCS_HAL_INT fcs_plat_init(struct device *dev, struct socfpga_fcs_priv *priv); +FCS_HAL_VOID fcs_plat_cleanup(struct socfpga_fcs_priv *priv); +FCS_HAL_VOID fcs_plat_uuid_clear(struct socfpga_fcs_priv *priv); +FCS_HAL_INT fcs_plat_copy_to_user(FCS_HAL_VOID *dst, FCS_HAL_VOID *src, + FCS_HAL_SIZE size); +FCS_HAL_INT fcs_plat_copy_from_user(FCS_HAL_VOID *dst, FCS_HAL_VOID *src, + FCS_HAL_SIZE size); +FCS_HAL_VOID fcs_plat_memcpy(FCS_HAL_VOID *dst, FCS_HAL_VOID *src, + FCS_HAL_SIZE size); +FCS_HAL_VOID fcs_plat_memset(FCS_HAL_VOID *dst, FCS_HAL_U8 val, + FCS_HAL_SIZE size); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index be1505e8c536e..61ba269583e09 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -18,6 +18,7 @@ /* HRS - Host Register Set (specific to Cadence) */ #define SDHCI_CDNS_HRS04 0x10 /* PHY access port */ +#define SDHCI_CDNS_HRS05 0x14 /* PHY data access port */ #define SDHCI_CDNS_HRS04_ACK BIT(26) #define SDHCI_CDNS_HRS04_RD BIT(25) #define SDHCI_CDNS_HRS04_WR BIT(24) @@ -36,8 +37,43 @@ #define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5 #define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6 +/* PHY specific register */ +/* HRS register to set after SDMMC reset */ +#define SDHCI_CDNS_HRS00 0x0 +#define SDHCI_CDNS_HRS07 0x1C /* IO_DELAY_INFO_REG */ +#define SDHCI_CDNS_HRS07_RW_COMPENSATE GENMASK(20, 16) /* RW_COMPENSATE */ +#define SDHCI_CDNS_HRS07_IDELAY_VAL GENMASK(4, 0) /* IDELAY_VAL */ +/* TODO: check DV dfi_init val=9 */ +#define SDHCI_CDNS_HRS07_RW_COMPENSATE_DATA 0x9 +/* TODO: check DV dfi_init val=8 ; DDR Mode */ +#define SDHCI_CDNS_HRS07_RW_COMPENSATE_DATA_DDR 0x8 +#define SDHCI_CDNS_HRS07_IDELAY_VAL_DATA 0x0 + +#define SDHCI_CDNS_HRS09 0x024 +#define SDHCI_CDNS_HRS09_PHY_SW_RESET BIT(0) /* PHY_SW_RESET */ +#define SDHCI_CDNS_HRS09_PHY_INIT_COMPLETE BIT(1) /* PHY_INIT_COMPLETE */ +#define SDHCI_CDNS_HRS09_RDDATA_EN BIT(16) /* RDDATA_EN */ +#define SDHCI_CDNS_HRS09_RDCMD_EN BIT(15) /* RDCMD_EN */ +#define SDHCI_CDNS_HRS09_EXTENDED_WR_MODE BIT(3) /* EXTENDED_WR_MODE */ +#define SDHCI_CDNS_HRS09_EXTENDED_RD_MODE BIT(2) /* EXTENDED_RD_MODE */ + +#define SDHCI_CDNS_HRS10 0x28 /* PHY reset port */ +#define SDHCI_CDNS_HRS10_HCSDCLKADJ GENMASK(19, 16) /* HCSDCLKADJ */ +#define SDHCI_CDNS_HRS10_HCSDCLKADJ_DATA 0x0 /* HCSDCLKADJ DATA */ +/* HCSDCLKADJ DATA; DDR Mode */ +#define SDHCI_CDNS_HRS10_HCSDCLKADJ_DATA_DDR 0x2 +#define SDHCI_CDNS_HRS16 0x40 /* CMD_DATA_OUTPUT */ + /* SRS - Slot Register Set (SDHCI-compatible) */ -#define SDHCI_CDNS_SRS_BASE 0x200 +#define SDHCI_CDNS_SRS_BASE 0x200 +#define SDHCI_CDNS_SRS09 0x224 +#define SDHCI_CDNS_SRS10 0x228 +#define SDHCI_CDNS_SRS11 0x22c +#define SDHCI_CDNS_SRS12 0x230 +#define SDHCI_CDNS_SRS13 0x234 +#define SDHCI_CDNS_SRS09_CI BIT(16) +#define SDHCI_CDNS_SRS13_DATA 0xffffffff +#define SD_HOST_CLK 200000000 /* PHY */ #define SDHCI_CDNS_PHY_DLY_SD_HS 0x00 @@ -52,6 +88,51 @@ #define SDHCI_CDNS_PHY_DLY_SDCLK 0x0b #define SDHCI_CDNS_PHY_DLY_HSMMC 0x0c #define SDHCI_CDNS_PHY_DLY_STROBE 0x0d +/* PHY register values */ +#define PHY_DQ_TIMING_REG 0x2000 +#define PHY_DQS_TIMING_REG 0x2004 +#define PHY_GATE_LPBK_CTRL_REG 0x2008 +#define PHY_DLL_MASTER_CTRL_REG 0x200C +#define PHY_DLL_SLAVE_CTRL_REG 0x2010 +#define PHY_CTRL_REG 0x2080 +#define USE_EXT_LPBK_DQS BIT(22) +#define USE_LPBK_DQS BIT(21) +#define USE_PHONY_DQS BIT(20) +#define USE_PHONY_DQS_CMD BIT(19) +#define SYNC_METHOD BIT(31) +#define SW_HALF_CYCLE_SHIFT BIT(28) +#define RD_DEL_SEL GENMASK(24, 19) +#define RD_DEL_SEL_DATA 0x34 +#define GATE_CFG_ALWAYS_ON BIT(6) +#define UNDERRUN_SUPPRESS BIT(18) +#define PARAM_DLL_BYPASS_MODE BIT(23) +#define PARAM_PHASE_DETECT_SEL GENMASK(22, 20) +#define PARAM_DLL_START_POINT GENMASK(7, 0) +#define PARAM_PHASE_DETECT_SEL_DATA 0x2 +#define PARAM_DLL_START_POINT_DATA 0x4 +#define PARAM_DLL_START_POINT_DATA_SDR50 254 + +#define READ_DQS_CMD_DELAY GENMASK(31, 24) +#define CLK_WRDQS_DELAY GENMASK(23, 16) +#define CLK_WR_DELAY GENMASK(15, 8) +#define READ_DQS_DELAY GENMASK(7, 0) +#define READ_DQS_CMD_DELAY_DATA 0x0 +#define CLK_WRDQS_DELAY_DATA 0x0 +#define CLK_WR_DELAY_DATA 0x0 +#define READ_DQS_DELAY_DATA 0x0 + +#define PHONY_DQS_TIMING GENMASK(9, 4) +#define PHONY_DQS_TIMING_DATA 0x0 + +#define IO_MASK_ALWAYS_ON BIT(31) +#define IO_MASK_END GENMASK(29, 27) +#define IO_MASK_START GENMASK(26, 24) +#define DATA_SELECT_OE_END GENMASK(2, 0) +#define IO_MASK_END_DATA 0x5 +/* DDR Mode */ +#define IO_MASK_END_DATA_DDR 0x2 +#define IO_MASK_START_DATA 0x0 +#define DATA_SELECT_OE_END_DATA 0x1 /* * The tuned val register is 6 bit-wide, but not the whole of the range is @@ -61,8 +142,9 @@ #define SDHCI_CDNS_MAX_TUNING_LOOP 40 struct sdhci_cdns_phy_param { - u8 addr; - u8 data; + u32 addr; + u32 data; + u32 offset; }; struct sdhci_cdns_priv { @@ -78,7 +160,8 @@ struct sdhci_cdns_priv { struct sdhci_cdns_phy_cfg { const char *property; - u8 addr; + u32 addr; + u32 offset; }; struct sdhci_cdns_drv_data { @@ -98,6 +181,42 @@ static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = { { "cdns,phy-dll-delay-sdclk", SDHCI_CDNS_PHY_DLY_SDCLK, }, { "cdns,phy-dll-delay-sdclk-hsmmc", SDHCI_CDNS_PHY_DLY_HSMMC, }, { "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, }, + { "cdns,phy-use-ext-lpbk-dqs", PHY_DQS_TIMING_REG, 22,}, + { "cdns,phy-use-lpbk-dqs", PHY_DQS_TIMING_REG, 21,}, + { "cdns,phy-use-phony-dqs", PHY_DQS_TIMING_REG, 20,}, + { "cdns,phy-use-phony-dqs-cmd", PHY_DQS_TIMING_REG, 19,}, + { "cdns,phy-io-mask-always-on", PHY_DQ_TIMING_REG, 31,}, + { "cdns,phy-io-mask-end", PHY_DQ_TIMING_REG, 27,}, + { "cdns,phy-io-mask-start", PHY_DQ_TIMING_REG, 24,}, + { "cdns,phy-data-select-oe-end", PHY_DQ_TIMING_REG, 0,}, + { "cdns,phy-sync-method", PHY_GATE_LPBK_CTRL_REG, 31,}, + { "cdns,phy-sw-half-cycle-shift", PHY_GATE_LPBK_CTRL_REG, 28,}, + { "cdns,phy-rd-del-sel", PHY_GATE_LPBK_CTRL_REG, 19,}, + { "cdns,phy-underrun-suppress", PHY_GATE_LPBK_CTRL_REG, 18,}, + { "cdns,phy-gate-cfg-always-on", PHY_GATE_LPBK_CTRL_REG, 6,}, + { "cdns,phy-param-dll-bypass-mode", PHY_DLL_MASTER_CTRL_REG, 23,}, + { "cdns,phy-param-phase-detect-sel", PHY_DLL_MASTER_CTRL_REG, 20,}, + { "cdns,phy-param-dll-start-point", PHY_DLL_MASTER_CTRL_REG, 0,}, + { "cdns,phy-read-dqs-cmd-delay", PHY_DLL_SLAVE_CTRL_REG, 24,}, + { "cdns,phy-clk-wrdqs-delay", PHY_DLL_SLAVE_CTRL_REG, 16,}, + { "cdns,phy-clk-wr-delay", PHY_DLL_SLAVE_CTRL_REG, 8,}, + { "cdns,phy-read-dqs-delay", PHY_DLL_SLAVE_CTRL_REG, 0,}, + { "cdns,phy-phony-dqs-timing", PHY_CTRL_REG, 4,}, + { "cdns,hrs09-rddata-en", SDHCI_CDNS_HRS09, 16,}, + { "cdns,hrs09-rdcmd-en", SDHCI_CDNS_HRS09, 15,}, + { "cdns,hrs09-extended-wr-mode", SDHCI_CDNS_HRS09, 3,}, + { "cdns,hrs09-extended-rd-mode", SDHCI_CDNS_HRS09, 2,}, + { "cdns,hrs10-hcsdclkadj", SDHCI_CDNS_HRS10, 16,}, + { "cdns,hrs16-wrdata1-sdclk-dly", SDHCI_CDNS_HRS16, 28,}, + { "cdns,hrs16-wrdata0-sdclk-dly", SDHCI_CDNS_HRS16, 24,}, + { "cdns,hrs16-wrcmd1-sdclk-dly", SDHCI_CDNS_HRS16, 20,}, + { "cdns,hrs16-wrcmd0-sdclk-dly", SDHCI_CDNS_HRS16, 16,}, + { "cdns,hrs16-wrdata1-dly", SDHCI_CDNS_HRS16, 12,}, + { "cdns,hrs16-wrdata0-dly", SDHCI_CDNS_HRS16, 8,}, + { "cdns,hrs16-wrcmd1-dly", SDHCI_CDNS_HRS16, 4,}, + { "cdns,hrs16-wrcmd0-dly", SDHCI_CDNS_HRS16, 0,}, + { "cdns,hrs07-rw-compensate", SDHCI_CDNS_HRS07, 16,}, + { "cdns,hrs07-idelay-val", SDHCI_CDNS_HRS07, 0,}, }; static inline void cdns_writel(struct sdhci_cdns_priv *priv, u32 val, @@ -138,6 +257,206 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, return ret; } +static int sdhci_cdns_write_phy_reg_mask(struct sdhci_cdns_priv *priv, + u32 addr, u32 data, u32 mask) +{ + u32 tmp; + + tmp = addr; + + /* get PHY address */ + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS04); + + /* read current PHY register value, before write */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS05); + + tmp &= ~mask; + tmp |= data; + + /* write operation */ + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS05); + + /* re-read PHY address */ + writel(addr, priv->hrs_addr + SDHCI_CDNS_HRS04); + + /* re-read current PHY register value, check */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS05); + + return 0; +} + +static u32 sdhci_cdns_read_phy_reg(struct sdhci_cdns_priv *priv, + u32 addr) +{ + u32 tmp; + + tmp = addr; + + /* get PHY address */ + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS04); + + /* read current PHY register value, before write */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS05); + + return tmp; +} + +static int sdhci_cdns_dfi_phy_val(struct sdhci_cdns_priv *priv, u32 reg) +{ + int i; + u32 tmp; + + tmp = 0; + + for (i = 0; i < priv->nr_phy_params; i++) { + if (priv->phy_params[i].addr == reg) + tmp |= priv->phy_params[i].data << priv->phy_params[i].offset; + } + + return tmp; +} + +static int sdhci_cdns_combophy_init_sd_dfi_init(struct sdhci_cdns_priv *priv) +{ + int ret = 0; + u32 mask = 0x0; + u32 tmp = 0; + + writel(0x0, priv->hrs_addr + SDHCI_CDNS_SRS11); + writel(1<<0, priv->hrs_addr + SDHCI_CDNS_HRS00); + while ((readl(priv->hrs_addr + SDHCI_CDNS_HRS00) & 1<<0) == 1) + + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS09) & ~1; + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS09); + + tmp = (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19); + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DQS_TIMING_REG, tmp, tmp); + + tmp = (1 << 31) | (0 << 28) | (52 << 19) | (1 << 18) | (1 << 6); + mask = SYNC_METHOD | SW_HALF_CYCLE_SHIFT | RD_DEL_SEL | UNDERRUN_SUPPRESS | + GATE_CFG_ALWAYS_ON; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_GATE_LPBK_CTRL_REG, tmp, + mask); + + tmp = (1 << 23) | (2 << 20) | (4 << 0); + mask = PARAM_DLL_BYPASS_MODE | PARAM_PHASE_DETECT_SEL | + PARAM_DLL_START_POINT; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DLL_MASTER_CTRL_REG, tmp, + mask); + + tmp = (0 << 24) | (0 << 16) | (0 << 8) | (0 << 0); + mask = READ_DQS_CMD_DELAY | CLK_WRDQS_DELAY | CLK_WR_DELAY | READ_DQS_DELAY; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DLL_SLAVE_CTRL_REG, tmp, + mask); + + writel(0x2080, priv->hrs_addr + SDHCI_CDNS_HRS04); + tmp &= ~(0x3f << 4); + mask = PHONY_DQS_TIMING; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_CTRL_REG, tmp, mask); + + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS09) | 1; + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS09); + + while (~readl(priv->hrs_addr + SDHCI_CDNS_HRS09) & (1 << 1)) + + tmp = sdhci_cdns_read_phy_reg(priv, PHY_DQ_TIMING_REG) & 0x07FFFF8; + tmp |= (0 << 31) | (0 << 27) | (0 << 24) | (1 << 0); + mask = IO_MASK_ALWAYS_ON | IO_MASK_END | IO_MASK_START | DATA_SELECT_OE_END; + + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DQ_TIMING_REG, tmp, mask); + + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS09) & 0xFFFE7FF3; + + tmp |= (1 << 16) | (1 << 15) | (1 << 3) | (1 << 2); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS09); + + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS10) & 0xFFF0FFFF; + + tmp |= (0 << 16); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS10); + + tmp = (0 << 28) | (0 << 24) | (0 << 20) | (0 << 16) | (0 << 12) | (1 << 8) | + (0 << 4) | (1 << 0); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS16); + + tmp = (9 << 16) | (0 << 0); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS07); + + return ret; +} + +static int sdhci_cdns_combophy_init_sd_gen(struct sdhci_cdns_priv *priv) +{ + u32 tmp; + int ret = 0; + u32 mask = 0x0; + + /* step 1, switch on DLL_RESET */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS09) & ~1; + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS09); + + /* step 2, program PHY_DQS_TIMING_REG */ + tmp = sdhci_cdns_dfi_phy_val(priv, PHY_DQS_TIMING_REG); + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DQS_TIMING_REG, tmp, tmp); + + /* step 3, program PHY_GATE_LPBK_CTRL_REG */ + tmp = sdhci_cdns_dfi_phy_val(priv, PHY_GATE_LPBK_CTRL_REG); + mask = SYNC_METHOD | SW_HALF_CYCLE_SHIFT | RD_DEL_SEL | UNDERRUN_SUPPRESS | + GATE_CFG_ALWAYS_ON; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_GATE_LPBK_CTRL_REG, tmp, + mask); + + /* step 4, program PHY_DLL_MASTER_CTRL_REG */ + tmp = sdhci_cdns_dfi_phy_val(priv, PHY_DLL_MASTER_CTRL_REG); + mask = PARAM_DLL_BYPASS_MODE | PARAM_PHASE_DETECT_SEL | + PARAM_DLL_START_POINT; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DLL_MASTER_CTRL_REG, tmp, + mask); + + /* step 5, program PHY_DLL_SLAVE_CTRL_REG */ + tmp = sdhci_cdns_dfi_phy_val(priv, PHY_DLL_SLAVE_CTRL_REG); + mask = READ_DQS_CMD_DELAY | CLK_WRDQS_DELAY | CLK_WR_DELAY | READ_DQS_DELAY; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DLL_SLAVE_CTRL_REG, tmp, + mask); + + /* step 7, switch off DLL_RESET */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS09) | 1; + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS09); + + /* step 8, polling PHY_INIT_COMPLETE */ + while (~readl(priv->hrs_addr + SDHCI_CDNS_HRS09) & (1 << 1)) + /* polling for PHY_INIT_COMPLETE bit */ + + /* step 9, program PHY_DQ_TIMING_REG */ + tmp = sdhci_cdns_read_phy_reg(priv, PHY_DQ_TIMING_REG) & 0x07FFFF8; + tmp |= sdhci_cdns_dfi_phy_val(priv, PHY_DQ_TIMING_REG); + mask = IO_MASK_ALWAYS_ON | IO_MASK_END | IO_MASK_START | DATA_SELECT_OE_END; + ret = sdhci_cdns_write_phy_reg_mask(priv, PHY_DQ_TIMING_REG, tmp, mask); + + /* step 10, program HRS09, register 42 */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS09) & 0xFFFE7FF3; + + tmp |= sdhci_cdns_dfi_phy_val(priv, SDHCI_CDNS_HRS09); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS09); + + /* step 11, program HRS10, register 43 */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS10) & 0xFFF0FFFF; + tmp |= sdhci_cdns_dfi_phy_val(priv, SDHCI_CDNS_HRS10); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS10); + + /* step 12, program HRS16, register 48 */ + tmp = sdhci_cdns_dfi_phy_val(priv, SDHCI_CDNS_HRS16); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS16); + + /* step 13, program HRS07, register 40 */ + tmp = sdhci_cdns_dfi_phy_val(priv, SDHCI_CDNS_HRS07); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS07); + /* end of combophy init */ + + return ret; +} + + static unsigned int sdhci_cdns_phy_param_count(struct device_node *np) { unsigned int count = 0; @@ -165,6 +484,7 @@ static void sdhci_cdns_phy_param_parse(struct device_node *np, p->addr = sdhci_cdns_phy_cfgs[i].addr; p->data = val; + p->offset = sdhci_cdns_phy_cfgs[i].offset; p++; } } @@ -174,6 +494,9 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) int ret, i; for (i = 0; i < priv->nr_phy_params; i++) { + if (priv->phy_params[i].offset) + break; + ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr, priv->phy_params[i].data); if (ret) @@ -410,8 +733,19 @@ static int elba_drv_init(struct platform_device *pdev) return 0; } +static void sdhci_cdns_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_cdns_combophy_init_sd_gen(priv); + + sdhci_set_clock(host, clock); +} + static const struct sdhci_ops sdhci_cdns_ops = { - .set_clock = sdhci_set_clock, + .set_clock = sdhci_cdns_set_clock, .get_timeout_clock = sdhci_cdns_get_timeout_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, @@ -426,6 +760,13 @@ static const struct sdhci_cdns_drv_data sdhci_cdns_uniphier_drv_data = { }, }; +static const struct sdhci_cdns_drv_data sdhci_cdns_pltfm_data = { + .pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, +}; + static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = { .init = elba_drv_init, .pltfm_data = { @@ -439,6 +780,13 @@ static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = { }, }; +static const struct sdhci_cdns_drv_data sdhci_cdns_agilex5_pltfm_data = { + .pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_40_BIT_DMA_MASK, + }, +}; + static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -543,6 +891,10 @@ static int sdhci_cdns_probe(struct platform_device *pdev) host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset; } + ret = sdhci_cdns_combophy_init_sd_dfi_init(priv); + if (ret) + goto free; + ret = sdhci_add_host(host); if (ret) goto free; @@ -596,6 +948,10 @@ static const struct of_device_id sdhci_cdns_match[] = { .data = &sdhci_elba_drv_data, }, { .compatible = "cdns,sd4hc" }, + { + .compatible = "intel,agilex5-sd4hc", + .data = &sdhci_cdns_agilex5_pltfm_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sdhci_cdns_match); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 4b91c9e966357..1f85b77e1afea 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -4062,14 +4062,22 @@ static int sdhci_set_dma_mask(struct sdhci_host *host) /* Try 64-bit mask if hardware is capable of it */ if (host->flags & SDHCI_USE_64_BIT_DMA) { - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (ret) { - pr_warn("%s: Failed to set 64-bit DMA mask.\n", - mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_64_BIT_DMA; + if (host->quirks2 & SDHCI_QUIRK2_40_BIT_DMA_MASK) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) + pr_warn("%s: Failed to set 40-bit DMA mask.\n", + mmc_hostname(mmc)); } + else { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) + pr_warn("%s: Failed to set 64-bit DMA mask.\n", + mmc_hostname(mmc)); + } + + if (ret) + host->flags &= ~SDHCI_USE_64_BIT_DMA; } - /* 32-bit mask as default & fallback */ if (ret) { ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f531b617f28d7..2095f6172ec43 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -484,6 +484,8 @@ struct sdhci_host { #define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18) /* Issue CMD and DATA reset together */ #define SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1<<19) +/* Set if controller supports 64-bit DMA but requires 40-bit DMA mask */ +#define SDHCI_QUIRK2_40_BIT_DMA_MASK (1<<20) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index fca54e21a164f..09d76c75bdcdc 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -199,6 +199,7 @@ /* Common settings. */ #define COMMON_SET 0x1008 +#define OPR_MODE_NVDDR BIT(0) /* 16 bit device connected to the NAND Flash interface. */ #define COMMON_SET_DEVICE_16BIT BIT(8) @@ -211,12 +212,20 @@ #define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0) /* Timings configuration. */ +#define TOGGLE_TIMINGS_0 0x1014 +#define TOGGLE_TIMINGS_1 0x1018 + #define ASYNC_TOGGLE_TIMINGS 0x101c #define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24) #define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16) #define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8) #define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0) +#define SYNC_TIMINGS 0x1020 +#define SYNC_TCKWR GENMASK(21, 16) +#define SYNC_TWRCK GENMASK(13, 8) +#define SYNC_TCAD GENMASK(5, 0) + #define TIMINGS0 0x1024 #define TIMINGS0_TADL GENMASK(31, 24) #define TIMINGS0_TCCS GENMASK(23, 16) @@ -226,6 +235,7 @@ #define TIMINGS1 0x1028 #define TIMINGS1_TRHZ GENMASK(31, 24) #define TIMINGS1_TWB GENMASK(23, 16) +#define TIMINGS1_TCWAW GENMASK(15, 8) #define TIMINGS1_TVDLY GENMASK(7, 0) #define TIMINGS2 0x102c @@ -243,14 +253,23 @@ /* Register controlling DQ related timing. */ #define PHY_DQ_TIMING 0x2000 +#define PHY_DQ_TIMING_OE_END GENMASK(2, 0) +#define PHY_DQ_TIMING_OE_START GENMASK(6, 4) +#define PHY_DQ_TIMING_TSEL_END GENMASK(11, 8) +#define PHY_DQ_TIMING_TSEL_START GENMASK(15, 12) + /* Register controlling DSQ related timing. */ #define PHY_DQS_TIMING 0x2004 #define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0) +#define PHY_DQS_TIMING_DQS_SEL_OE_START GENMASK(7, 4) +#define PHY_DQS_TIMING_DQS_SEL_TSEL_END GENMASK(11, 8) #define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16) #define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20) /* Register controlling the gate and loopback control related timing. */ #define PHY_GATE_LPBK_CTRL 0x2008 +#define PHY_GATE_LPBK_CTRL_GATE_CFG GENMASK(3, 0) +#define PHY_GATE_LPBK_CTRL_GATE_CFG_CLOSE GENMASK(5, 4) #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19) /* Register holds the control for the master DLL logic. */ @@ -260,6 +279,12 @@ /* Register holds the control for the slave DLL logic. */ #define PHY_DLL_SLAVE_CTRL 0x2010 +/* Register controls the DQS related timing. */ +#define PHY_IE_TIMING 0x2014 +#define PHY_IE_TIMING_DQS_IE_START GENMASK(10, 8) +#define PHY_IE_TIMING_DQ_IE_START GENMASK(18, 16) +#define PHY_IE_TIMING_IE_ALWAYS_ON BIT(20) + /* This register handles the global control settings for the PHY. */ #define PHY_CTRL 0x2080 #define PHY_CTRL_SDR_DQS BIT(14) @@ -375,15 +400,41 @@ #define BCH_MAX_NUM_CORR_CAPS 8 #define BCH_MAX_NUM_SECTOR_SIZES 2 +/* NVDDR mode specific parameters and register values based on cadence specs */ +#define NVDDR_PHY_RD_DELAY 29 +#define NVDDR_PHY_RD_DELAY_MAX 31 +#define NVDDR_GATE_CFG_OPT 14 +#define NVDDR_GATE_CFG_MIN 7 +#define NVDDR_GATE_CFG_MAX 15 +#define NVDDR_DATA_SEL_OE_START 1 +#define NVDDR_DATA_SEL_OE_START_MAX 7 +#define NVDDR_DATA_SEL_OE_END 6 +#define NVDDR_DATA_SEL_OE_END_MIN 4 +#define NVDDR_DATA_SEL_OE_END_MAX 15 +#define NVDDR_RS_HIGH_WAIT_CNT 7 +#define NVDDR_RS_IDLE_CNT 7 +#define NVDDR_TCWAW_DELAY 250000 +#define NVDDR_TVDLY_DELAY 500000 +#define NVDDR_TOGGLE_TIMINGS_0 0x00000301 +#define NVDDR_TOGGLE_TIMINGS_1 0x0a060102 +#define NVDDR_ASYNC_TOGGLE_TIMINGS 0 +#define NVDDR_PHY_CTRL 0x00004000 +#define NVDDR_PHY_TSEL 0 +#define NVDDR_PHY_DLL_MASTER_CTRL 0x00140004 +#define NVDDR_PHY_DLL_SLAVE_CTRL 0x00003c3c + struct cadence_nand_timings { u32 async_toggle_timings; + u32 sync_timings; u32 timings0; u32 timings1; u32 timings2; u32 dll_phy_ctrl; u32 phy_ctrl; + u32 phy_dq_timing; u32 phy_dqs_timing; u32 phy_gate_lpbk_ctrl; + u32 phy_ie_timing; }; /* Command DMA descriptor. */ @@ -505,6 +556,7 @@ struct cdns_nand_ctrl { unsigned long assigned_cs; struct list_head chips; u8 bch_metadata_size; + bool is_ecc; }; struct cdns_nand_chip { @@ -1279,6 +1331,19 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, else ctype = CDMA_CT_WR; + /* Workaround to force disable ECC while using the Command DMA mode. + * if chip->ecc.engine_type=NAND_ECC_ENGINE_TYPE_NONE is used to disable + * the ECC then Generic work mode and Slave DMA interface is used by nand + * framework for page read/write and we don't want that because Agilex5 + * A0 silicon NAND SDMA address range is 4K, while the page size on some + * NAND Flash device could be more. + * So keeping chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST to + * use Command DMA mode for page read/write and do not enable ecc here. + */ + + if(!cdns_ctrl->is_ecc) + with_ecc = false; + cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc); dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir); @@ -2345,11 +2410,9 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, return (trp_cnt + 1) * clk_period + trhoh_min - trea_max; } -static int -cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_interface_config *conf) +static int cadence_nand_setup_sdr_interface(struct nand_chip *chip, + const struct nand_sdr_timings *sdr) { - const struct nand_sdr_timings *sdr; struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); struct cadence_nand_timings *t = &cdns_chip->timings; @@ -2370,13 +2433,8 @@ cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0; u32 sampling_point; - sdr = nand_get_sdr_timings(conf); - if (IS_ERR(sdr)) - return PTR_ERR(sdr); - memset(t, 0, sizeof(*t)); /* Sampling point calculation. */ - if (cdns_ctrl->caps2.is_phy_type_dll) phony_dqs_mod = 2; else @@ -2633,10 +2691,218 @@ cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, PHY_DLL_MASTER_CTRL_BYPASS_MODE); dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0); } + return 0; +} + +static int +cadence_nand_setup_nvddr_interface(struct nand_chip *chip, + const struct nand_nvddr_timings *nvddr) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct cadence_nand_timings *t = &cdns_chip->timings; + u32 board_delay = cdns_ctrl->board_delay; + u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL, + cdns_ctrl->nf_clk_rate); + u32 ddr_clk_ctrl_period = clk_period * 2; + u32 if_skew = cdns_ctrl->caps1->if_skew; + u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt; + u32 twrck_cnt, tcad_cnt, tckwr_cnt = 0; + u32 tfeat_cnt, trhz_cnt, tvdly_cnt, tcwaw_cnt; + u32 trhw_cnt, twb_cnt, twhr_cnt; + u32 oe_start, oe_end, oe_end_dqsd; + u32 rd_del_sel = 0; + u32 dqs_driven_by_device, dqs_toogle_by_device, gate_open_delay; + u32 dll_phy_gate_open_delay, gate_close_delay, ie_start; + u32 dll_phy_rd_delay; + u32 reg; + + memset(t, 0, sizeof(*t)); + twrck_cnt = calc_cycl(nvddr->tWRCK_min, ddr_clk_ctrl_period); + tcad_cnt = calc_cycl(nvddr->tCAD_min, ddr_clk_ctrl_period); + + reg = FIELD_PREP(SYNC_TWRCK, twrck_cnt); + reg |= FIELD_PREP(SYNC_TCAD, tcad_cnt); + t->sync_timings = reg; + dev_dbg(cdns_ctrl->dev, "SYNC_TIMINGS_NVDDR\t%08x\n", reg); + + tadl_cnt = calc_cycl((nvddr->tADL_min + if_skew), ddr_clk_ctrl_period); + tccs_cnt = calc_cycl((nvddr->tCCS_min + if_skew), ddr_clk_ctrl_period); + twhr_cnt = calc_cycl((nvddr->tWHR_min + if_skew), ddr_clk_ctrl_period); + trhw_cnt = calc_cycl((nvddr->tRHW_min + if_skew), ddr_clk_ctrl_period); + reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt); + reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt); + reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt); + reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt); + t->timings0 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS0_NVDDR\t%08x\n", reg); + + twb_cnt = calc_cycl((nvddr->tWB_max + board_delay), + ddr_clk_ctrl_period); + /* + * Because of the two stage syncflop the value must be increased by 3 + * first value is related with sync, second value is related + * with output if delay. + */ + twb_cnt = twb_cnt + 3 + 5; + tvdly_cnt = calc_cycl(NVDDR_TVDLY_DELAY + if_skew, ddr_clk_ctrl_period); + tcwaw_cnt = calc_cycl(NVDDR_TCWAW_DELAY, ddr_clk_ctrl_period); + trhz_cnt = 1; + reg = FIELD_PREP(TIMINGS1_TWB, twb_cnt); + reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt); + reg |= FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt); + reg |= FIELD_PREP(TIMINGS1_TCWAW, tcwaw_cnt); + t->timings1 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS1_NVDDR\t%08x\n", reg); + tfeat_cnt = calc_cycl(nvddr->tFEAT_max, ddr_clk_ctrl_period); + if (tfeat_cnt < twb_cnt) + tfeat_cnt = twb_cnt; + + tceh_cnt = calc_cycl(nvddr->tCEH_min, ddr_clk_ctrl_period); + tcs_cnt = calc_cycl((nvddr->tCS_min + if_skew), ddr_clk_ctrl_period); + reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt); + reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt); + reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt); + t->timings2 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS2_NVDDR\t%08x\n", reg); + + reg = FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, NVDDR_RS_HIGH_WAIT_CNT); + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, NVDDR_RS_IDLE_CNT); + t->dll_phy_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_NVDDR\t%08x\n", reg); + + reg = PHY_CTRL_SDR_DQS; + t->phy_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_NVDDR\t%08x\n", reg); + + dqs_driven_by_device = (nvddr->tDQSD_max + board_delay) / 1000 + + if_skew; + dqs_toogle_by_device = (nvddr->tDQSCK_max + board_delay) / 1000 - + if_skew; + gate_open_delay = dqs_toogle_by_device / (clk_period / 1000); + if (dqs_toogle_by_device > clk_period / 1000) { + if (gate_open_delay > NVDDR_GATE_CFG_OPT) + dll_phy_gate_open_delay = NVDDR_GATE_CFG_MAX; + else + dll_phy_gate_open_delay = gate_open_delay + 1; + gate_close_delay = 0; + } else { + twrck_cnt = calc_cycl(dqs_driven_by_device * 1000, clk_period); + dll_phy_gate_open_delay = 1; + gate_close_delay = 0; + + reg = FIELD_PREP(SYNC_TCKWR, tckwr_cnt); + reg |= FIELD_PREP(SYNC_TWRCK, twrck_cnt); + reg |= FIELD_PREP(SYNC_TCAD, tcad_cnt); + t->sync_timings = reg; + dev_dbg(cdns_ctrl->dev, "SYNC_TIMINGS_NVDDR\t%08x\n", reg); + } + + if (dll_phy_gate_open_delay > NVDDR_GATE_CFG_MIN) + ie_start = NVDDR_GATE_CFG_MIN; + else + ie_start = dll_phy_gate_open_delay; + + dll_phy_rd_delay = ((nvddr->tDQSCK_max + board_delay) + + (clk_period / 2)) / clk_period; + if (dll_phy_rd_delay <= NVDDR_PHY_RD_DELAY) + rd_del_sel = dll_phy_rd_delay + 2; + else + rd_del_sel = NVDDR_PHY_RD_DELAY_MAX; + + reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_GATE_CFG, dll_phy_gate_open_delay); + reg |= FIELD_PREP(PHY_GATE_LPBK_CTRL_GATE_CFG_CLOSE, gate_close_delay); + reg |= FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel); + t->phy_gate_lpbk_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_NVDDR\t%08x\n", reg); + + oe_end_dqsd = ((nvddr->tDQSD_max / 1000) / ((clk_period / 2) / 1000)) + + NVDDR_DATA_SEL_OE_END_MIN; + oe_end = (NVDDR_DATA_SEL_OE_END_MIN + oe_end_dqsd) / 2; + if (oe_end > NVDDR_DATA_SEL_OE_END_MAX) + oe_end = NVDDR_DATA_SEL_OE_END_MAX; + + oe_start = ((nvddr->tDQSHZ_max / 1000) / ((clk_period / 2) / 1000)) + 1; + if (oe_start > NVDDR_DATA_SEL_OE_START_MAX) + oe_start = NVDDR_DATA_SEL_OE_START_MAX; + + reg = FIELD_PREP(PHY_DQ_TIMING_OE_END, NVDDR_DATA_SEL_OE_END); + reg |= FIELD_PREP(PHY_DQ_TIMING_OE_START, NVDDR_DATA_SEL_OE_START); + reg |= FIELD_PREP(PHY_DQ_TIMING_TSEL_END, NVDDR_DATA_SEL_OE_END); + reg |= FIELD_PREP(PHY_DQ_TIMING_TSEL_START, NVDDR_DATA_SEL_OE_START); + t->phy_dq_timing = reg; + dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_NVDDR\t%08x\n", reg); + + reg = FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, oe_end); + reg |= FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_START, oe_start); + reg |= FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_TSEL_END, oe_end); + t->phy_dqs_timing = reg; + dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_NVDDR\t%08x\n", reg); + + reg = FIELD_PREP(PHY_IE_TIMING_DQS_IE_START, ie_start); + reg |= FIELD_PREP(PHY_IE_TIMING_DQ_IE_START, ie_start); + reg |= FIELD_PREP(PHY_IE_TIMING_IE_ALWAYS_ON, 0); + t->phy_ie_timing = reg; + dev_dbg(cdns_ctrl->dev, "PHY_IE_TIMING_REG_NVDDR\t%08x\n", reg); + + reg = readl_relaxed(cdns_ctrl->reg + DLL_PHY_CTRL); + reg &= ~(DLL_PHY_CTRL_DLL_RST_N | + DLL_PHY_CTRL_EXTENDED_RD_MODE | + DLL_PHY_CTRL_EXTENDED_WR_MODE); + writel_relaxed(reg, cdns_ctrl->reg + DLL_PHY_CTRL); + writel_relaxed(OPR_MODE_NVDDR, cdns_ctrl->reg + COMMON_SET); + writel_relaxed(NVDDR_TOGGLE_TIMINGS_0, + cdns_ctrl->reg + TOGGLE_TIMINGS_0); + writel_relaxed(NVDDR_TOGGLE_TIMINGS_1, + cdns_ctrl->reg + TOGGLE_TIMINGS_1); + writel_relaxed(NVDDR_ASYNC_TOGGLE_TIMINGS, + cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS); + writel_relaxed(t->sync_timings, cdns_ctrl->reg + SYNC_TIMINGS); + writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0); + writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1); + writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2); + writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL); + writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL); + writel_relaxed(NVDDR_PHY_TSEL, cdns_ctrl->reg + PHY_TSEL); + writel_relaxed(t->phy_dq_timing, cdns_ctrl->reg + PHY_DQ_TIMING); + writel_relaxed(t->phy_dqs_timing, cdns_ctrl->reg + PHY_DQS_TIMING); + writel_relaxed(t->phy_gate_lpbk_ctrl, + cdns_ctrl->reg + PHY_GATE_LPBK_CTRL); + writel_relaxed(NVDDR_PHY_DLL_MASTER_CTRL, + cdns_ctrl->reg + PHY_DLL_MASTER_CTRL); + writel_relaxed(NVDDR_PHY_DLL_SLAVE_CTRL, + cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL); + writel_relaxed(t->phy_ie_timing, cdns_ctrl->reg + PHY_IE_TIMING); + writel_relaxed((reg | DLL_PHY_CTRL_DLL_RST_N), + cdns_ctrl->reg + DLL_PHY_CTRL); return 0; } +static int +cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) +{ + int ret = 0; + + if (nand_interface_is_sdr(conf)) { + const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf); + + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + ret = cadence_nand_setup_sdr_interface(chip, sdr); + } else if (chipnr >= 0) { + const struct nand_nvddr_timings *nvddr = nand_get_nvddr_timings(conf); + + if (IS_ERR(nvddr)) + return PTR_ERR(nvddr); + + ret = cadence_nand_setup_nvddr_interface(chip, nvddr); + } + return ret; +} + static int cadence_nand_attach_chip(struct nand_chip *chip) { struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); @@ -2871,7 +3137,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) { dma_cap_mask_t mask; - struct dma_device *dma_dev = cdns_ctrl->dmac->device; + struct dma_device *dma_dev; int ret; cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, @@ -2915,6 +3181,7 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) } } + dma_dev = cdns_ctrl->dmac->device; cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma, cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); @@ -2972,8 +3239,10 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) { cadence_nand_chips_cleanup(cdns_ctrl); - dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma, - cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); + if (cdns_ctrl->dmac) + dma_unmap_resource(cdns_ctrl->dmac->device->dev, + cdns_ctrl->io.iova_dma, cdns_ctrl->io.size, + DMA_BIDIRECTIONAL, 0); cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); kfree(cdns_ctrl->buf); dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), @@ -3058,6 +3327,11 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev) } cdns_ctrl->board_delay = val; + if(of_property_read_bool(ofdev->dev.of_node, "disable-ecc")) + cdns_ctrl->is_ecc = false; + else + cdns_ctrl->is_ecc = true; + ret = cadence_nand_init(cdns_ctrl); if (ret) return ret; diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 9d6e85bf227b9..aebc2c2a3e3ba 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -3250,7 +3250,8 @@ static void spi_nor_put_device(struct mtd_info *mtd) else dev = nor->dev; - module_put(dev->driver->owner); + if (dev && dev->driver && dev->driver->owner) + module_put(dev->driver->owner); } static void spi_nor_restore(struct spi_nor *nor) diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c index ef1edd0add70e..10d2ceb7eb0b0 100644 --- a/drivers/mtd/spi-nor/gigadevice.c +++ b/drivers/mtd/spi-nor/gigadevice.c @@ -82,6 +82,11 @@ static const struct flash_info gigadevice_nor_parts[] = { .size = SZ_16M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, + }, { + .id = SNOR_ID(0xc8, 0x67, 0x1c), + .name = "gd55lb02ge", + .size = SZ_256M, + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6, }, }; diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c index 18d9a00aa22eb..9e8d2098bce22 100644 --- a/drivers/mtd/spi-nor/issi.c +++ b/drivers/mtd/spi-nor/issi.c @@ -126,6 +126,9 @@ static const struct flash_info issi_nor_parts[] = { .flags = SPI_NOR_QUAD_PP, .fixups = &is25lp256_fixups, .fixup_flags = SPI_NOR_4B_OPCODES, + }, { + .id = SNOR_ID(0x9d, 0x70, 0x21), + .name = "is25wp01gg", } }; diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index ea6be95e75a52..7fbd7792d701d 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -143,6 +143,12 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_64M, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .fixup_flags = SPI_NOR_4B_OPCODES, + }, { + .id = SNOR_ID(0xc2, 0x25, 0x3b), + .name = "mx66u1g45g", + .size = SZ_128M, + .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, + .fixup_flags = SPI_NOR_4B_OPCODES, }, { .id = SNOR_ID(0xc2, 0x25, 0x3c), .name = "mx66u2g45g", diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 99fa180dedb80..ad5371c2a3349 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ -obj-$(CONFIG_ALTERA_TSE) += altera/ +obj-$(CONFIG_NET_ALTERA_ETH) += altera/ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig index 4ef819a9a1ad8..58f721c53e23d 100644 --- a/drivers/net/ethernet/altera/Kconfig +++ b/drivers/net/ethernet/altera/Kconfig @@ -1,15 +1,49 @@ # SPDX-License-Identifier: GPL-2.0-only +config NET_ALTERA_ETH + tristate "Altera Ethernet drivers" + depends on HAS_DMA + help + This driver supports ethernet drivers built around Altera + Ethernet and DMA IP. + + If you have a network (Ethernet) IP belonging to this class, say Y. + +if NET_ALTERA_ETH + config ALTERA_TSE tristate "Altera Triple-Speed Ethernet MAC support" - depends on HAS_DMA - depends on HAS_IOMEM + depends on PTP_1588_CLOCK select PHYLIB select PHYLINK - select PCS_LYNX - select MDIO_REGMAP - select REGMAP_MMIO + select PCS_ALTERA_TSE + help + imply PTP_1588_CLOCK + ---help--- help This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. To compile this driver as a module, choose M here. The module will be called alteratse. + +config INTEL_FPGA_QSE_LL + tristate "Intel FPGA Quad-Speed Low Latency Ethernet MAC support" + select PTP_1588_CLOCK + select PHYLINK + help + This driver supports the Intel FPGA Low Latency Quad-Speed (QSE) + Ethernet MAC. + + To compile this driver as a module, choose M here. The module + will be called intel_fpga_qse_ll. + +config INTEL_FPGA_ETILE + tristate "Intel FPGA E-tile Ethernet MAC support" + select PTP_1588_CLOCK + select PHYLINK + help + This driver supports the Intel FPGA E-tile Ethernet MAC. + + To compile this driver as a module, choose M here. The module + will be called intel_fpga_etile. + +endif diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile index a52db80aee9f1..f82fe30fc49a1 100644 --- a/drivers/net/ethernet/altera/Makefile +++ b/drivers/net/ethernet/altera/Makefile @@ -3,6 +3,20 @@ # Makefile for the Altera device drivers. # -obj-$(CONFIG_ALTERA_TSE) += altera_tse.o -altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ -altera_msgdma.o altera_sgdma.o altera_utils.o +obj-$(CONFIG_NET_ALTERA_ETH) += altera_eth.o +altera_eth-objs := altera_utils.o intel_fpga_tod.o altera_eth_dma.o \ + altera_sgdma.o altera_msgdma.o \ + altera_msgdma_prefetcher.o +ifeq ($(CONFIG_ALTERA_TSE),y) + obj-$(CONFIG_ALTERA_TSE) += altera_tse.o + altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o +endif +ifeq ($(CONFIG_INTEL_FPGA_QSE_LL),y) + obj-$(CONFIG_INTEL_FPGA_QSE_LL) += intel_fpga_qse_ll.o + intel_fpga_qse_ll-objs := intel_fpga_qse_ll_main.o intel_fpga_qse_ll_ethtool.o +endif +ifeq ($(CONFIG_INTEL_FPGA_ETILE),y) + obj-$(CONFIG_INTEL_FPGA_ETILE) += intel_fpga_etile.o + intel_fpga_etile-objs := intel_fpga_etile_main.o intel_fpga_etile_fec.o \ + intel_fpga_etile_ethtool.o +endif diff --git a/drivers/net/ethernet/altera/altera_eth_dma.c b/drivers/net/ethernet/altera/altera_eth_dma.c new file mode 100644 index 0000000000000..a4cfdf3abe1c8 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_eth_dma.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* DMA support for Intel FPGA Quad-Speed Ethernet MAC driver + * Copyright (C) 2019 Intel Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + * Joyce Ooi + */ + +#include +#include +#include +#include + +#include "altera_eth_dma.h" +#include "altera_utils.h" + +/* Probe DMA + */ +int altera_eth_dma_probe(struct platform_device *pdev, + struct altera_dma_private *priv, + enum altera_dma_type type) +{ + int ret = -ENODEV; + struct resource *dma_res; + void __iomem *descmap; + + /* xSGDMA Rx Dispatcher address space */ + ret = request_and_map(pdev, "rx_csr", &dma_res, + &priv->rx_dma_csr); + if (ret) + goto err; + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tDMA RX CSR at 0x%08lx\n", + (unsigned long)dma_res->start); + + /* mSGDMA Tx Dispatcher address space */ + ret = request_and_map(pdev, "tx_csr", &dma_res, + &priv->tx_dma_csr); + if (ret) + goto err; + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tDMA TX CSR at 0x%08lx\n", + (unsigned long)dma_res->start); + + switch (type) { + case ALTERA_DTYPE_SGDMA: + /* Get the mapped address to the SGDMA descriptor memory */ + ret = request_and_map(pdev, "s1", &dma_res, &descmap); + if (ret) + break; + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tDMA Desc Mem at 0x%08lx\n", + (unsigned long)dma_res->start); + + /* Start of that memory is for transmit descriptors */ + priv->tx_dma_desc = descmap; + + /* First half is for tx descriptors, other half for tx */ + priv->txdescmem = resource_size(dma_res) / 2; + + priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; + + priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + + priv->txdescmem)); + priv->rxdescmem = resource_size(dma_res) / 2; + priv->rxdescmem_busaddr = dma_res->start; + priv->rxdescmem_busaddr += priv->txdescmem; + + if (upper_32_bits(priv->rxdescmem_busaddr)) + ret = -EINVAL; + + if (upper_32_bits(priv->txdescmem_busaddr)) + ret = -EINVAL; + break; + case ALTERA_DTYPE_MSGDMA: + ret = request_and_map(pdev, "rx_resp", &dma_res, + &priv->rx_dma_resp); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tRX Resp Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + + ret = request_and_map(pdev, "tx_desc", &dma_res, + &priv->tx_dma_desc); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tTX Desc Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + + priv->txdescmem = resource_size(dma_res); + priv->txdescmem_busaddr = dma_res->start; + + ret = request_and_map(pdev, "rx_desc", &dma_res, + &priv->rx_dma_desc); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tRX Desc Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + + priv->rxdescmem = resource_size(dma_res); + priv->rxdescmem_busaddr = dma_res->start; + break; + case ALTERA_DTYPE_MSGDMA_PTP: + ret = request_and_map(pdev, "rx_desc", &dma_res, + &priv->rx_dma_desc); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tRX Desc Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + + priv->rxdescmem = resource_size(dma_res); + priv->rxdescmem_busaddr = dma_res->start; + + ret = request_and_map(pdev, "rx_resp", &dma_res, + &priv->rx_dma_resp); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tRX Resp Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + + ret = request_and_map(pdev, "tx_desc", &dma_res, + &priv->tx_dma_desc); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tTX Desc Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + + priv->txdescmem = resource_size(dma_res); + priv->txdescmem_busaddr = dma_res->start; + + ret = request_and_map(pdev, "tx_resp", &dma_res, + &priv->tx_dma_resp); + if (ret) + break; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tTX Resp Slave at 0x%08lx\n", + (unsigned long)dma_res->start); + break; + case ALTERA_DTYPE_MSGDMA_PREF: + /* mSGDMA Rx Prefetcher address space */ + ret = request_and_map(pdev, "rx_pref", &dma_res, + &priv->rx_pref_csr); + if (ret) + break; + + /* mSGDMA Tx Prefetcher address space */ + ret = request_and_map(pdev, "tx_pref", &dma_res, + &priv->tx_pref_csr); + if (ret) + break; + + /* get prefetcher rx poll frequency from device tree */ + if (of_property_read_u32(pdev->dev.of_node, + "rx-poll-freq", + &priv->rx_poll_freq)) { + dev_info(&pdev->dev, "Defaulting RX Poll Frequency to 128\n"); + priv->rx_poll_freq = 128; + } + + /* get prefetcher rx poll frequency from device tree */ + if (of_property_read_u32(pdev->dev.of_node, + "tx-poll-freq", + &priv->tx_poll_freq)) { + dev_info(&pdev->dev, "Defaulting TX Poll Frequency to 128\n"); + priv->tx_poll_freq = 128; + } + break; + default: + ret = -ENODEV; + break; + } +err: + return ret; +}; +EXPORT_SYMBOL_GPL(altera_eth_dma_probe); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/altera/altera_eth_dma.h b/drivers/net/ethernet/altera/altera_eth_dma.h new file mode 100644 index 0000000000000..a847b57ffeb4e --- /dev/null +++ b/drivers/net/ethernet/altera/altera_eth_dma.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* DMA support for Intel FPGA Quad-Speed Ethernet MAC driver + * Copyright (C) 2019 Intel Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + * Joyce Ooi + */ + +#ifndef __ALTERA_ETH_DMA_H__ +#define __ALTERA_ETH_DMA_H__ + +#include +#include +#include +#include + +#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000 + +struct altera_dma_private { + struct net_device *dev; + struct device *device; + + /* mSGDMA Rx Dispatcher address space */ + void __iomem *rx_dma_csr; + void __iomem *rx_dma_desc; + void __iomem *rx_dma_resp; + + /* mSGDMA Tx Dispatcher address space */ + void __iomem *tx_dma_csr; + void __iomem *tx_dma_desc; + void __iomem *tx_dma_resp; + + /* mSGDMA Rx Prefecher address space */ + void __iomem *rx_pref_csr; + struct msgdma_pref_extended_desc *pref_rxdesc; + dma_addr_t pref_rxdescphys; + u32 pref_rx_prod; + + /* mSGDMA Tx Prefecher address space */ + void __iomem *tx_pref_csr; + struct msgdma_pref_extended_desc *pref_txdesc; + dma_addr_t pref_txdescphys; + u32 rx_poll_freq; + u32 tx_poll_freq; + + /* Rx buffers queue */ + struct altera_dma_buffer *rx_ring; + u32 rx_cons; + u32 rx_prod; + u32 rx_ring_size; + u32 rx_dma_buf_sz; + + /* Tx ring buffer */ + struct altera_dma_buffer *tx_ring; + u32 tx_prod; + u32 tx_cons; + u32 tx_ring_size; + + /* Descriptor memory info for managing SGDMA */ + u32 txdescmem; + u32 rxdescmem; + dma_addr_t rxdescmem_busaddr; + dma_addr_t txdescmem_busaddr; + u32 txctrlreg; + u32 rxctrlreg; + dma_addr_t rxdescphys; + dma_addr_t txdescphys; + + struct list_head txlisthd; + struct list_head rxlisthd; + + int hwts_tx_en; + int hwts_rx_en; + + /* ethtool msglvl option */ + u32 msg_enable; +}; + +/* Wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct altera_dma_buffer { + struct list_head lh; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 len; + int mapped_as_page; +}; + +enum altera_dma_type { + ALTERA_DTYPE_SGDMA = 1, + ALTERA_DTYPE_MSGDMA = 2, + ALTERA_DTYPE_MSGDMA_PTP = 3, + ALTERA_DTYPE_MSGDMA_PREF = 4, +}; + +struct altera_dma_resp { + u32 status; + u32 external_resp[4]; +}; + +/* standard DMA interface for SGDMA and MSGDMA */ +struct altera_dmaops { + enum altera_dma_type altera_dtype; + int dmamask; + void (*reset_dma)(struct altera_dma_private *priv); + void (*enable_txirq)(struct altera_dma_private *priv); + void (*enable_rxirq)(struct altera_dma_private *priv); + void (*disable_txirq)(struct altera_dma_private *priv); + void (*disable_rxirq)(struct altera_dma_private *priv); + void (*clear_txirq)(struct altera_dma_private *priv); + void (*clear_rxirq)(struct altera_dma_private *priv); + netdev_tx_t (*tx_buffer)(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); + u32 (*tx_completions)(struct altera_dma_private *priv); + void (*add_rx_desc)(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); + u32 (*get_rx_status)(struct altera_dma_private *priv); + int (*init_dma)(struct altera_dma_private *priv); + void (*uninit_dma)(struct altera_dma_private *priv); + void (*start_rxdma)(struct altera_dma_private *priv); + void (*start_txdma)(struct altera_dma_private *priv); +}; + +int altera_eth_dma_probe(struct platform_device *pdev, + struct altera_dma_private *priv, + enum altera_dma_type type); + +#endif /* __ALTERA_ETH_DMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index ac1efd08267a6..399315dc43632 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -5,25 +5,25 @@ #include #include "altera_utils.h" -#include "altera_tse.h" +#include "altera_eth_dma.h" #include "altera_msgdmahw.h" #include "altera_msgdma.h" /* No initialization work to do for MSGDMA */ -int msgdma_initialize(struct altera_tse_private *priv) +int msgdma_initialize(struct altera_dma_private *priv) { return 0; } -void msgdma_uninitialize(struct altera_tse_private *priv) +void msgdma_uninitialize(struct altera_dma_private *priv) { } -void msgdma_start_rxdma(struct altera_tse_private *priv) +void msgdma_start_rxdma(struct altera_dma_private *priv) { } -void msgdma_reset(struct altera_tse_private *priv) +void msgdma_reset(struct altera_dma_private *priv) { int counter; @@ -71,42 +71,43 @@ void msgdma_reset(struct altera_tse_private *priv) csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); } -void msgdma_disable_rxirq(struct altera_tse_private *priv) +void msgdma_disable_rxirq(struct altera_dma_private *priv) { tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), MSGDMA_CSR_CTL_GLOBAL_INTR); } -void msgdma_enable_rxirq(struct altera_tse_private *priv) +void msgdma_enable_rxirq(struct altera_dma_private *priv) { tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), MSGDMA_CSR_CTL_GLOBAL_INTR); } -void msgdma_disable_txirq(struct altera_tse_private *priv) +void msgdma_disable_txirq(struct altera_dma_private *priv) { tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), MSGDMA_CSR_CTL_GLOBAL_INTR); } -void msgdma_enable_txirq(struct altera_tse_private *priv) +void msgdma_enable_txirq(struct altera_dma_private *priv) { tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), MSGDMA_CSR_CTL_GLOBAL_INTR); } -void msgdma_clear_rxirq(struct altera_tse_private *priv) +void msgdma_clear_rxirq(struct altera_dma_private *priv) { csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); } -void msgdma_clear_txirq(struct altera_tse_private *priv) +void msgdma_clear_txirq(struct altera_dma_private *priv) { csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); } /* return 0 to indicate transmit is pending */ -int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +netdev_tx_t msgdma_tx_buffer(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer) { csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, msgdma_descroffs(read_addr_lo)); @@ -120,10 +121,10 @@ int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) msgdma_descroffs(stride)); csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, msgdma_descroffs(control)); - return 0; + return NETDEV_TX_OK; } -u32 msgdma_tx_completions(struct altera_tse_private *priv) +u32 msgdma_tx_completions(struct altera_dma_private *priv) { u32 ready = 0; u32 inuse; @@ -149,8 +150,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) /* Put buffer to the mSGDMA RX FIFO */ -void msgdma_add_rx_desc(struct altera_tse_private *priv, - struct tse_buffer *rxbuffer) +void msgdma_add_rx_desc(struct altera_dma_private *priv, + struct altera_dma_buffer *rxbuffer) { u32 len = priv->rx_dma_buf_sz; dma_addr_t dma_addr = rxbuffer->dma_addr; @@ -176,7 +177,7 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv, /* status is returned on upper 16 bits, * length is returned in lower 16 bits */ -u32 msgdma_rx_status(struct altera_tse_private *priv) +u32 msgdma_rx_status(struct altera_dma_private *priv) { u32 rxstatus = 0; u32 pktlength; diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h index 9813fbfff4d39..378738f86a09f 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.h +++ b/drivers/net/ethernet/altera/altera_msgdma.h @@ -1,24 +1,37 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Altera TSE SGDMA and MSGDMA Linux driver * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef __ALTERA_MSGDMA_H__ #define __ALTERA_MSGDMA_H__ -void msgdma_reset(struct altera_tse_private *); -void msgdma_enable_txirq(struct altera_tse_private *); -void msgdma_enable_rxirq(struct altera_tse_private *); -void msgdma_disable_rxirq(struct altera_tse_private *); -void msgdma_disable_txirq(struct altera_tse_private *); -void msgdma_clear_rxirq(struct altera_tse_private *); -void msgdma_clear_txirq(struct altera_tse_private *); -u32 msgdma_tx_completions(struct altera_tse_private *); -void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); -int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); -u32 msgdma_rx_status(struct altera_tse_private *); -int msgdma_initialize(struct altera_tse_private *); -void msgdma_uninitialize(struct altera_tse_private *); -void msgdma_start_rxdma(struct altera_tse_private *); +void msgdma_reset(struct altera_dma_private *priv); +void msgdma_enable_txirq(struct altera_dma_private *priv); +void msgdma_enable_rxirq(struct altera_dma_private *priv); +void msgdma_disable_rxirq(struct altera_dma_private *priv); +void msgdma_disable_txirq(struct altera_dma_private *priv); +void msgdma_clear_rxirq(struct altera_dma_private *priv); +void msgdma_clear_txirq(struct altera_dma_private *priv); +u32 msgdma_tx_completions(struct altera_dma_private *priv); +void msgdma_add_rx_desc(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); +netdev_tx_t msgdma_tx_buffer(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); +u32 msgdma_rx_status(struct altera_dma_private *priv); +int msgdma_initialize(struct altera_dma_private *priv); +void msgdma_uninitialize(struct altera_dma_private *priv); +void msgdma_start_rxdma(struct altera_dma_private *priv); #endif /* __ALTERA_MSGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdma_prefetcher.c b/drivers/net/ethernet/altera/altera_msgdma_prefetcher.c new file mode 100644 index 0000000000000..b199992bc7ee9 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma_prefetcher.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MSGDMA Prefetcher driver for Altera ethernet devices + * + * Copyright (C) 2020 Intel Corporation. All rights reserved. + * Author(s): + * Dalon Westergreen + */ + +#include +#include +#include +#include "altera_eth_dma.h" +#include "altera_msgdma.h" +#include "altera_msgdmahw.h" +#include "altera_msgdma_prefetcher.h" +#include "altera_msgdmahw_prefetcher.h" +#include "altera_utils.h" + +int msgdma_pref_initialize(struct altera_dma_private *priv) +{ + int i; + struct msgdma_pref_extended_desc *rx_descs; + struct msgdma_pref_extended_desc *tx_descs; + dma_addr_t rx_descsphys; + dma_addr_t tx_descsphys; + + priv->pref_rxdescphys = (dma_addr_t)0; + priv->pref_txdescphys = (dma_addr_t)0; + + /* we need to allocate more pref descriptors than ringsize to + * prevent all of the descriptors being owned by hw. To do this + * we just allocate twice ring_size descriptors. + * rx_ring_size = priv->rx_ring_size * 2 + * tx_ring_size = priv->tx_ring_size * 2 + */ + + /* The prefetcher requires the descriptors to be aligned to the + * descriptor read/write master's data width which worst case is + * 512 bits. Currently we DO NOT CHECK THIS and only support 32-bit + * prefetcher masters. + */ + + /* allocate memory for rx descriptors */ + priv->pref_rxdesc = + dma_alloc_coherent(priv->device, + sizeof(struct msgdma_pref_extended_desc) + * priv->rx_ring_size * 2, + &priv->pref_rxdescphys, GFP_KERNEL); + + if (!priv->pref_rxdesc) + goto err_rx; + + /* allocate memory for tx descriptors */ + priv->pref_txdesc = + dma_alloc_coherent(priv->device, + sizeof(struct msgdma_pref_extended_desc) + * priv->tx_ring_size * 2, + &priv->pref_txdescphys, GFP_KERNEL); + + if (!priv->pref_txdesc) + goto err_tx; + + /* setup base descriptor ring for tx & rx */ + rx_descs = (struct msgdma_pref_extended_desc *)priv->pref_rxdesc; + tx_descs = (struct msgdma_pref_extended_desc *)priv->pref_txdesc; + tx_descsphys = priv->pref_txdescphys; + rx_descsphys = priv->pref_rxdescphys; + + /* setup RX descriptors */ + priv->pref_rx_prod = 0; + for (i = 0; i < priv->rx_ring_size * 2; i++) { + rx_descsphys = priv->pref_rxdescphys + + (((i + 1) % (priv->rx_ring_size * 2)) * + sizeof(struct msgdma_pref_extended_desc)); + rx_descs[i].next_desc_lo = lower_32_bits(rx_descsphys); + rx_descs[i].next_desc_hi = upper_32_bits(rx_descsphys); + rx_descs[i].stride = MSGDMA_DESC_RX_STRIDE; + /* burst set to 0 so it defaults to max configured */ + /* set seq number to desc number */ + rx_descs[i].burst_seq_num = i; + } + + /* setup TX descriptors */ + for (i = 0; i < priv->tx_ring_size * 2; i++) { + tx_descsphys = priv->pref_txdescphys + + (((i + 1) % (priv->tx_ring_size * 2)) * + sizeof(struct msgdma_pref_extended_desc)); + tx_descs[i].next_desc_lo = lower_32_bits(tx_descsphys); + tx_descs[i].next_desc_hi = upper_32_bits(tx_descsphys); + tx_descs[i].stride = MSGDMA_DESC_TX_STRIDE; + /* burst set to 0 so it defaults to max configured */ + /* set seq number to desc number */ + tx_descs[i].burst_seq_num = i; + } + + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "%s: RX Desc mem at 0x%llx\n", __func__, + priv->pref_rxdescphys); + + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "%s: TX Desc mem at 0x%llx\n", __func__, + priv->pref_txdescphys); + + return 0; + +err_tx: + dma_free_coherent(priv->device, + sizeof(struct msgdma_pref_extended_desc) + * priv->rx_ring_size * 2, + priv->pref_rxdesc, priv->pref_rxdescphys); +err_rx: + return -ENOMEM; +} + +void msgdma_pref_uninitialize(struct altera_dma_private *priv) +{ + if (priv->pref_rxdesc) + dma_free_coherent(priv->device, + sizeof(struct msgdma_pref_extended_desc) + * priv->rx_ring_size * 2, + priv->pref_rxdesc, priv->pref_rxdescphys); + + if (priv->pref_txdesc) + dma_free_coherent(priv->device, + sizeof(struct msgdma_pref_extended_desc) + * priv->tx_ring_size * 2, + priv->pref_txdesc, priv->pref_txdescphys); +} + +void msgdma_pref_enable_txirq(struct altera_dma_private *priv) +{ + tse_set_bit(priv->tx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_GLOBAL_INTR); +} + +void msgdma_pref_disable_txirq(struct altera_dma_private *priv) +{ + tse_clear_bit(priv->tx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_GLOBAL_INTR); +} + +void msgdma_pref_clear_txirq(struct altera_dma_private *priv) +{ + csrwr32(MSGDMA_PREF_STAT_IRQ, priv->tx_pref_csr, + msgdma_pref_csroffs(status)); +} + +void msgdma_pref_enable_rxirq(struct altera_dma_private *priv) +{ + tse_set_bit(priv->rx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_GLOBAL_INTR); +} + +void msgdma_pref_disable_rxirq(struct altera_dma_private *priv) +{ + tse_clear_bit(priv->rx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_GLOBAL_INTR); +} + +void msgdma_pref_clear_rxirq(struct altera_dma_private *priv) +{ + csrwr32(MSGDMA_PREF_STAT_IRQ, priv->rx_pref_csr, + msgdma_pref_csroffs(status)); +} + +static u64 timestamp_to_ns(struct msgdma_pref_extended_desc *desc) +{ + u64 ns = 0; + u64 second; + u32 tmp; + + tmp = desc->timestamp_96b[0] >> 16; + tmp |= (desc->timestamp_96b[1] << 16); + + second = desc->timestamp_96b[2]; + second <<= 16; + second |= ((desc->timestamp_96b[1] & 0xffff0000) >> 16); + + ns = second * NSEC_PER_SEC + tmp; + + return ns; +} + +/* Setup TX descriptor + * -> this should never be called when a descriptor isn't available + */ + +netdev_tx_t msgdma_pref_tx_buffer(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer) +{ + u32 desc_entry = priv->tx_prod % (priv->tx_ring_size * 2); + struct msgdma_pref_extended_desc *tx_descs = priv->pref_txdesc; + + /* if for some reason the descriptor is still owned by hardware */ + if (unlikely(tx_descs[desc_entry].desc_control + & MSGDMA_PREF_DESC_CTL_OWNED_BY_HW)) { + if (!netif_queue_stopped(priv->dev)) + netif_stop_queue(priv->dev); + return NETDEV_TX_BUSY; + } + + /* write descriptor entries */ + tx_descs[desc_entry].len = buffer->len; + tx_descs[desc_entry].read_addr_lo = lower_32_bits(buffer->dma_addr); + tx_descs[desc_entry].read_addr_hi = upper_32_bits(buffer->dma_addr); + + /* + * Ensure that the high and low address bits of the descriptor are + * written prior to the go bit being set. + */ + dma_wmb(); + + /* set the control bits and set owned by hw */ + tx_descs[desc_entry].desc_control = (MSGDMA_DESC_CTL_TX_SINGLE + | MSGDMA_PREF_DESC_CTL_OWNED_BY_HW); + + if (netif_msg_tx_queued(priv)) + netdev_info(priv->dev, "%s: cons: %d prod: %d", + __func__, priv->tx_cons, priv->tx_prod); + + return NETDEV_TX_OK; +} + +u32 msgdma_pref_tx_completions(struct altera_dma_private *priv) +{ + u32 control; + u32 ready = 0; + u32 cons = priv->tx_cons; + u32 desc_ringsize = priv->tx_ring_size * 2; + u32 ringsize = priv->tx_ring_size; + u64 ns = 0; + struct msgdma_pref_extended_desc *cur; + struct altera_dma_buffer *tx_buff; + struct skb_shared_hwtstamps shhwtstamp; + int i; + + if (netif_msg_tx_done(priv)) + for (i = 0; i < desc_ringsize; i++) + netdev_info(priv->dev, "%s: desc: %d control 0x%x\n", + __func__, i, + priv->pref_txdesc[i].desc_control); + + cur = &priv->pref_txdesc[cons % desc_ringsize]; + control = cur->desc_control; + tx_buff = &priv->tx_ring[cons % ringsize]; + + while (!(control & MSGDMA_PREF_DESC_CTL_OWNED_BY_HW) && + (priv->tx_prod != (cons + ready)) && control) { + if (skb_shinfo(tx_buff->skb)->tx_flags & SKBTX_IN_PROGRESS) { + /* Timestamping is enabled, pass timestamp back */ + ns = timestamp_to_ns(cur); + memset(&shhwtstamp, 0, + sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(tx_buff->skb, &shhwtstamp); + } + + if (netif_msg_tx_done(priv)) + netdev_info(priv->dev, "%s: cur: %d ts: %lld ns\n", + __func__, + ((cons + ready) % desc_ringsize), ns); + + /* clear data */ + cur->desc_control = 0; + cur->timestamp_96b[0] = 0; + cur->timestamp_96b[1] = 0; + cur->timestamp_96b[2] = 0; + + ready++; + cur = &priv->pref_txdesc[(cons + ready) % desc_ringsize]; + tx_buff = &priv->tx_ring[(cons + ready) % ringsize]; + control = cur->desc_control; + } + + return ready; +} + +void msgdma_pref_reset(struct altera_dma_private *priv) +{ + int counter; + + /* turn off polling */ + tse_clear_bit(priv->rx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_DESC_POLL_EN); + tse_clear_bit(priv->tx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_DESC_POLL_EN); + + /* Reset the RX Prefetcher */ + csrwr32(MSGDMA_PREF_STAT_IRQ, priv->rx_pref_csr, + msgdma_pref_csroffs(status)); + csrwr32(MSGDMA_PREF_CTL_RESET, priv->rx_pref_csr, + msgdma_pref_csroffs(control)); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(priv->rx_pref_csr, + msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_RESET)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) + netif_warn(priv, drv, priv->dev, + "TSE Rx Prefetcher reset bit never cleared!\n"); + + /* Reset the TX Prefetcher */ + csrwr32(MSGDMA_PREF_STAT_IRQ, priv->tx_pref_csr, + msgdma_pref_csroffs(status)); + csrwr32(MSGDMA_PREF_CTL_RESET, priv->tx_pref_csr, + msgdma_pref_csroffs(control)); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(priv->tx_pref_csr, + msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_RESET)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) + netif_warn(priv, drv, priv->dev, + "TSE Tx Prefetcher reset bit never cleared!\n"); + + /* clear all status bits */ + csrwr32(MSGDMA_PREF_STAT_IRQ, priv->tx_pref_csr, + msgdma_pref_csroffs(status)); + + /* Reset mSGDMA dispatchers*/ + msgdma_reset(priv); +} + +/* Setup the RX and TX prefetchers to poll the descriptor chain */ +void msgdma_pref_start_rxdma(struct altera_dma_private *priv) +{ + csrwr32(priv->rx_poll_freq, priv->rx_pref_csr, + msgdma_pref_csroffs(desc_poll_freq)); + csrwr32(lower_32_bits(priv->pref_rxdescphys), priv->rx_pref_csr, + msgdma_pref_csroffs(next_desc_lo)); + csrwr32(upper_32_bits(priv->pref_rxdescphys), priv->rx_pref_csr, + msgdma_pref_csroffs(next_desc_hi)); + tse_set_bit(priv->rx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_DESC_POLL_EN | MSGDMA_PREF_CTL_RUN); +} + +void msgdma_pref_start_txdma(struct altera_dma_private *priv) +{ + csrwr32(priv->tx_poll_freq, priv->tx_pref_csr, + msgdma_pref_csroffs(desc_poll_freq)); + csrwr32(lower_32_bits(priv->pref_txdescphys), priv->tx_pref_csr, + msgdma_pref_csroffs(next_desc_lo)); + csrwr32(upper_32_bits(priv->pref_txdescphys), priv->tx_pref_csr, + msgdma_pref_csroffs(next_desc_hi)); + tse_set_bit(priv->tx_pref_csr, msgdma_pref_csroffs(control), + MSGDMA_PREF_CTL_DESC_POLL_EN | MSGDMA_PREF_CTL_RUN); +} + +/* Add MSGDMA Prefetcher Descriptor to descriptor list + * -> This should never be called when a descriptor isn't available + */ +void msgdma_pref_add_rx_desc(struct altera_dma_private *priv, + struct altera_dma_buffer *rxbuffer) +{ + struct msgdma_pref_extended_desc *rx_descs = priv->pref_rxdesc; + u32 desc_entry = priv->pref_rx_prod % (priv->rx_ring_size * 2); + + /* write descriptor entries */ + rx_descs[desc_entry].len = priv->rx_dma_buf_sz; + rx_descs[desc_entry].write_addr_lo = lower_32_bits(rxbuffer->dma_addr); + rx_descs[desc_entry].write_addr_hi = upper_32_bits(rxbuffer->dma_addr); + + /* set the control bits and set owned by hw */ + rx_descs[desc_entry].desc_control = (MSGDMA_DESC_CTL_END_ON_EOP + | MSGDMA_DESC_CTL_END_ON_LEN + | MSGDMA_DESC_CTL_TR_COMP_IRQ + | MSGDMA_DESC_CTL_EARLY_IRQ + | MSGDMA_DESC_CTL_TR_ERR_IRQ + | MSGDMA_DESC_CTL_GO + | MSGDMA_PREF_DESC_CTL_OWNED_BY_HW); + + /* we need to keep a separate one for rx as RX_DESCRIPTORS are + * pre-configured at startup + */ + priv->pref_rx_prod++; + + if (netif_msg_rx_status(priv)) { + netdev_info(priv->dev, "%s: desc: %d buf: %d control 0x%x\n", + __func__, desc_entry, + priv->rx_prod % priv->rx_ring_size, + priv->pref_rxdesc[desc_entry].desc_control); + } +} + +u32 msgdma_pref_rx_status(struct altera_dma_private *priv) +{ + u32 rxstatus = 0; + u32 pktlength; + u32 pktstatus; + u64 ns = 0; + u32 entry = priv->rx_cons % priv->rx_ring_size; + u32 desc_entry = priv->rx_prod % (priv->rx_ring_size * 2); + struct msgdma_pref_extended_desc *rx_descs = priv->pref_rxdesc; + struct skb_shared_hwtstamps *shhwtstamp = NULL; + struct altera_dma_buffer *rx_buff = priv->rx_ring; + + /* if the current entry is not owned by hardware, process it */ + if (!(rx_descs[desc_entry].desc_control + & MSGDMA_PREF_DESC_CTL_OWNED_BY_HW) && + rx_descs[desc_entry].desc_control) { + pktlength = rx_descs[desc_entry].bytes_transferred; + pktstatus = rx_descs[desc_entry].desc_status; + rxstatus = pktstatus; + rxstatus = rxstatus << 16; + rxstatus |= (pktlength & 0xffff); + + /* get the timestamp */ + if (priv->hwts_rx_en) { + ns = timestamp_to_ns(&rx_descs[desc_entry]); + shhwtstamp = skb_hwtstamps(rx_buff[entry].skb); + memset(shhwtstamp, 0, + sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); + } + + /* clear data */ + rx_descs[desc_entry].desc_control = 0; + rx_descs[desc_entry].timestamp_96b[0] = 0; + rx_descs[desc_entry].timestamp_96b[1] = 0; + rx_descs[desc_entry].timestamp_96b[2] = 0; + + if (netif_msg_rx_status(priv)) + netdev_info(priv->dev, "%s: desc: %d buf: %d ts: %lld ns", + __func__, desc_entry, entry, ns); + } + return rxstatus; +} diff --git a/drivers/net/ethernet/altera/altera_msgdma_prefetcher.h b/drivers/net/ethernet/altera/altera_msgdma_prefetcher.h new file mode 100644 index 0000000000000..e7bbcac4e0182 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma_prefetcher.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* MSGDMA Prefetcher driver for Altera ethernet devices + * + * Copyright (C) 2020 Intel Corporation. + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + */ + +#ifndef __ALTERA_PREF_MSGDMA_H__ +#define __ALTERA_PREF_MSGDMA_H__ + +void msgdma_pref_reset(struct altera_dma_private *priv); +void msgdma_pref_enable_txirq(struct altera_dma_private *priv); +void msgdma_pref_enable_rxirq(struct altera_dma_private *priv); +void msgdma_pref_disable_rxirq(struct altera_dma_private *priv); +void msgdma_pref_disable_txirq(struct altera_dma_private *priv); +void msgdma_pref_clear_rxirq(struct altera_dma_private *priv); +void msgdma_pref_clear_txirq(struct altera_dma_private *priv); +u32 msgdma_pref_tx_completions(struct altera_dma_private *priv); +void msgdma_pref_add_rx_desc(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); +netdev_tx_t msgdma_pref_tx_buffer(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); +u32 msgdma_pref_rx_status(struct altera_dma_private *priv); +int msgdma_pref_initialize(struct altera_dma_private *priv); +void msgdma_pref_uninitialize(struct altera_dma_private *priv); +void msgdma_pref_start_rxdma(struct altera_dma_private *priv); +void msgdma_pref_start_txdma(struct altera_dma_private *priv); + +#endif /* __ALTERA_PREF_MSGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdmahw_prefetcher.h b/drivers/net/ethernet/altera/altera_msgdmahw_prefetcher.h new file mode 100644 index 0000000000000..efda31e491cad --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdmahw_prefetcher.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* MSGDMA Prefetcher driver for Altera ethernet devices + * + * Copyright (C) 2020 Intel Corporation. + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + */ + +#ifndef __ALTERA_MSGDMAHW_PREFETCHER_H__ +#define __ALTERA_MSGDMAHW_PREFETCHER_H__ + +/* mSGDMA prefetcher extended prefectcher descriptor format + */ +struct msgdma_pref_extended_desc { + /* data buffer source address low bits */ + u32 read_addr_lo; + /* data buffer destination address low bits */ + u32 write_addr_lo; + /* the number of bytes to transfer */ + u32 len; + /* next descriptor low address */ + u32 next_desc_lo; + /* number of bytes transferred */ + u32 bytes_transferred; + u32 desc_status; + u32 reserved_18; + /* bit 31:24 write burst */ + /* bit 23:16 read burst */ + /* bit 15:0 sequence number */ + u32 burst_seq_num; + /* bit 31:16 write stride */ + /* bit 15:0 read stride */ + u32 stride; + /* data buffer source address high bits */ + u32 read_addr_hi; + /* data buffer destination address high bits */ + u32 write_addr_hi; + /* next descriptor high address */ + u32 next_desc_hi; + /* prefetcher mod now writes these reserved bits*/ + /* Response bits [191:160] */ + u32 timestamp_96b[3]; + /* desc_control */ + u32 desc_control; +}; + +/* mSGDMA Prefetcher Descriptor Status bits */ +#define MSGDMA_PREF_DESC_STAT_STOPPED_ON_EARLY BIT(8) +#define MSGDMA_PREF_DESC_STAT_MASK 0xFF + +/* mSGDMA Prefetcher Descriptor Control bits */ +/* bit 31 and bits 29-0 are the same as the normal dispatcher ctl flags */ +#define MSGDMA_PREF_DESC_CTL_OWNED_BY_HW BIT(30) + +/* mSGDMA Prefetcher CSR */ +struct msgdma_prefetcher_csr { + u32 control; + u32 next_desc_lo; + u32 next_desc_hi; + u32 desc_poll_freq; + u32 status; +}; + +/* mSGDMA Prefetcher Control */ +#define MSGDMA_PREF_CTL_PARK BIT(4) +#define MSGDMA_PREF_CTL_GLOBAL_INTR BIT(3) +#define MSGDMA_PREF_CTL_RESET BIT(2) +#define MSGDMA_PREF_CTL_DESC_POLL_EN BIT(1) +#define MSGDMA_PREF_CTL_RUN BIT(0) + +#define MSGDMA_PREF_POLL_FREQ_MASK 0xFFFF + +/* mSGDMA Prefetcher Status */ +#define MSGDMA_PREF_STAT_IRQ BIT(0) + +#define msgdma_pref_csroffs(a) (offsetof(struct msgdma_prefetcher_csr, a)) +#define msgdma_pref_descroffs(a) (offsetof(struct msgdma_pref_extended_desc, a)) + +#endif /* __ALTERA_MSGDMAHW_PREFETCHER_H__*/ diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 7f247ccbe6bad..1ca5f7cbff0a0 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -4,8 +4,9 @@ */ #include +#include #include "altera_utils.h" -#include "altera_tse.h" +#include "altera_eth_dma.h" #include "altera_sgdmahw.h" #include "altera_sgdma.h" @@ -19,39 +20,39 @@ static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, int rfixed, int wfixed); -static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip __iomem *desc); +static int sgdma_async_write(struct altera_dma_private *priv, + struct sgdma_descrip __iomem *desc); -static int sgdma_async_read(struct altera_tse_private *priv); +static int sgdma_async_read(struct altera_dma_private *priv); static dma_addr_t -sgdma_txphysaddr(struct altera_tse_private *priv, +sgdma_txphysaddr(struct altera_dma_private *priv, struct sgdma_descrip __iomem *desc); static dma_addr_t -sgdma_rxphysaddr(struct altera_tse_private *priv, +sgdma_rxphysaddr(struct altera_dma_private *priv, struct sgdma_descrip __iomem *desc); -static int sgdma_txbusy(struct altera_tse_private *priv); +static int sgdma_txbusy(struct altera_dma_private *priv); -static int sgdma_rxbusy(struct altera_tse_private *priv); +static int sgdma_rxbusy(struct altera_dma_private *priv); static void -queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer); +queue_tx(struct altera_dma_private *priv, struct altera_dma_buffer *buffer); static void -queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer); +queue_rx(struct altera_dma_private *priv, struct altera_dma_buffer *buffer); -static struct tse_buffer * -dequeue_tx(struct altera_tse_private *priv); +static struct altera_dma_buffer * +dequeue_tx(struct altera_dma_private *priv); -static struct tse_buffer * -dequeue_rx(struct altera_tse_private *priv); +static struct altera_dma_buffer * +dequeue_rx(struct altera_dma_private *priv); -static struct tse_buffer * -queue_rx_peekhead(struct altera_tse_private *priv); +static struct altera_dma_buffer * +queue_rx_peekhead(struct altera_dma_private *priv); -int sgdma_initialize(struct altera_tse_private *priv) +int sgdma_initialize(struct altera_dma_private *priv) { priv->txctrlreg = SGDMA_CTRLREG_ILASTD | SGDMA_CTRLREG_INTEN; @@ -99,7 +100,7 @@ int sgdma_initialize(struct altera_tse_private *priv) return 0; } -void sgdma_uninitialize(struct altera_tse_private *priv) +void sgdma_uninitialize(struct altera_dma_private *priv) { if (priv->rxdescphys) dma_unmap_single(priv->device, priv->rxdescphys, @@ -113,7 +114,7 @@ void sgdma_uninitialize(struct altera_tse_private *priv) /* This function resets the SGDMA controller and clears the * descriptor memory used for transmits and receives. */ -void sgdma_reset(struct altera_tse_private *priv) +void sgdma_reset(struct altera_dma_private *priv) { /* Initialize descriptor memory to 0 */ memset_io(priv->tx_dma_desc, 0, priv->txdescmem); @@ -131,40 +132,42 @@ void sgdma_reset(struct altera_tse_private *priv) * and disable */ -void sgdma_enable_rxirq(struct altera_tse_private *priv) +void sgdma_enable_rxirq(struct altera_dma_private *priv) { } -void sgdma_enable_txirq(struct altera_tse_private *priv) +void sgdma_enable_txirq(struct altera_dma_private *priv) { } -void sgdma_disable_rxirq(struct altera_tse_private *priv) +void sgdma_disable_rxirq(struct altera_dma_private *priv) { } -void sgdma_disable_txirq(struct altera_tse_private *priv) +void sgdma_disable_txirq(struct altera_dma_private *priv) { } -void sgdma_clear_rxirq(struct altera_tse_private *priv) +void sgdma_clear_rxirq(struct altera_dma_private *priv) { tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), SGDMA_CTRLREG_CLRINT); } -void sgdma_clear_txirq(struct altera_tse_private *priv) +void sgdma_clear_txirq(struct altera_dma_private *priv) { tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), SGDMA_CTRLREG_CLRINT); } -/* transmits buffer through SGDMA. Returns number of buffers - * transmitted, 0 if not possible. - * - * tx_lock is held by the caller +/* transmits buffer through SGDMA. + * original behavior returned the number of transmitted packets (always 1) & + * returned 0 on error. This differs from the msgdma. the calling function + * will now actually look at the code, so from now, 0 is good and return + * NETDEV_TX_BUSY when busy. */ -int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +netdev_tx_t sgdma_tx_buffer(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer) { struct sgdma_descrip __iomem *descbase = (struct sgdma_descrip __iomem *)priv->tx_dma_desc; @@ -173,8 +176,11 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) struct sgdma_descrip __iomem *ndesc = &descbase[1]; /* wait 'til the tx sgdma is ready for the next transmit request */ - if (sgdma_txbusy(priv)) - return 0; + if (sgdma_txbusy(priv)) { + if (!netif_queue_stopped(priv->dev)) + netif_stop_queue(priv->dev); + return NETDEV_TX_BUSY; + } sgdma_setup_descrip(cdesc, /* current descriptor */ ndesc, /* next descriptor */ @@ -191,13 +197,13 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) /* enqueue the request to the pending transmit queue */ queue_tx(priv, buffer); - return 1; + return NETDEV_TX_OK; } /* tx_lock held to protect access to queued tx list */ -u32 sgdma_tx_completions(struct altera_tse_private *priv) +u32 sgdma_tx_completions(struct altera_dma_private *priv) { u32 ready = 0; @@ -211,13 +217,13 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv) return ready; } -void sgdma_start_rxdma(struct altera_tse_private *priv) +void sgdma_start_rxdma(struct altera_dma_private *priv) { sgdma_async_read(priv); } -void sgdma_add_rx_desc(struct altera_tse_private *priv, - struct tse_buffer *rxbuffer) +void sgdma_add_rx_desc(struct altera_dma_private *priv, + struct altera_dma_buffer *rxbuffer) { queue_rx(priv, rxbuffer); } @@ -225,12 +231,12 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv, /* status is returned on upper 16 bits, * length is returned in lower 16 bits */ -u32 sgdma_rx_status(struct altera_tse_private *priv) +u32 sgdma_rx_status(struct altera_dma_private *priv) { struct sgdma_descrip __iomem *base = (struct sgdma_descrip __iomem *)priv->rx_dma_desc; struct sgdma_descrip __iomem *desc = NULL; - struct tse_buffer *rxbuffer = NULL; + struct altera_dma_buffer *rxbuffer = NULL; unsigned int rxstatus = 0; u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); @@ -333,14 +339,14 @@ static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, * If read status indicate not busy and a status, restart the async * DMA read. */ -static int sgdma_async_read(struct altera_tse_private *priv) +static int sgdma_async_read(struct altera_dma_private *priv) { struct sgdma_descrip __iomem *descbase = (struct sgdma_descrip __iomem *)priv->rx_dma_desc; struct sgdma_descrip __iomem *cdesc = &descbase[0]; struct sgdma_descrip __iomem *ndesc = &descbase[1]; - struct tse_buffer *rxbuffer = NULL; + struct altera_dma_buffer *rxbuffer = NULL; if (!sgdma_rxbusy(priv)) { rxbuffer = queue_rx_peekhead(priv); @@ -378,7 +384,7 @@ static int sgdma_async_read(struct altera_tse_private *priv) return 0; } -static int sgdma_async_write(struct altera_tse_private *priv, +static int sgdma_async_write(struct altera_dma_private *priv, struct sgdma_descrip __iomem *desc) { if (sgdma_txbusy(priv)) @@ -403,7 +409,7 @@ static int sgdma_async_write(struct altera_tse_private *priv, } static dma_addr_t -sgdma_txphysaddr(struct altera_tse_private *priv, +sgdma_txphysaddr(struct altera_dma_private *priv, struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->txdescmem_busaddr; @@ -412,7 +418,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv, } static dma_addr_t -sgdma_rxphysaddr(struct altera_tse_private *priv, +sgdma_rxphysaddr(struct altera_dma_private *priv, struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->rxdescmem_busaddr; @@ -437,70 +443,76 @@ sgdma_rxphysaddr(struct altera_tse_private *priv, } \ } while (0) -/* adds a tse_buffer to the tail of a tx buffer list. +/* adds a altera_dma_buffer to the tail of a tx buffer list. * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list. */ static void -queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer) +queue_tx(struct altera_dma_private *priv, struct altera_dma_buffer *buffer) { list_add_tail(&buffer->lh, &priv->txlisthd); } -/* adds a tse_buffer to the tail of a rx buffer list +/* adds a altera_dma_buffer to the tail of a rx buffer list * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list. */ static void -queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer) +queue_rx(struct altera_dma_private *priv, struct altera_dma_buffer *buffer) { list_add_tail(&buffer->lh, &priv->rxlisthd); } -/* dequeues a tse_buffer from the transmit buffer list, otherwise +/* dequeues a altera_dma_buffer from the transmit buffer list, otherwise * returns NULL if empty. * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list. */ -static struct tse_buffer * -dequeue_tx(struct altera_tse_private *priv) +static struct altera_dma_buffer * +dequeue_tx(struct altera_dma_private *priv) { - struct tse_buffer *buffer = NULL; - list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh); + struct altera_dma_buffer *buffer = NULL; + + list_remove_head(&priv->txlisthd, buffer, struct altera_dma_buffer, lh); + return buffer; } -/* dequeues a tse_buffer from the receive buffer list, otherwise +/* dequeues a altera_dma_buffer from the receive buffer list, otherwise * returns NULL if empty * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list. */ -static struct tse_buffer * -dequeue_rx(struct altera_tse_private *priv) +static struct altera_dma_buffer * +dequeue_rx(struct altera_dma_private *priv) { - struct tse_buffer *buffer = NULL; - list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); + struct altera_dma_buffer *buffer = NULL; + + list_remove_head(&priv->rxlisthd, buffer, struct altera_dma_buffer, lh); + return buffer; } -/* dequeues a tse_buffer from the receive buffer list, otherwise +/* dequeues a altera_dma_buffer from the receive buffer list, otherwise * returns NULL if empty * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list while the * head is being examined. */ -static struct tse_buffer * -queue_rx_peekhead(struct altera_tse_private *priv) +static struct altera_dma_buffer * +queue_rx_peekhead(struct altera_dma_private *priv) { - struct tse_buffer *buffer = NULL; - list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); + struct altera_dma_buffer *buffer = NULL; + + list_peek_head(&priv->rxlisthd, buffer, struct altera_dma_buffer, lh); + return buffer; } /* check and return rx sgdma status without polling */ -static int sgdma_rxbusy(struct altera_tse_private *priv) +static int sgdma_rxbusy(struct altera_dma_private *priv) { return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) & SGDMA_STSREG_BUSY; @@ -509,7 +521,7 @@ static int sgdma_rxbusy(struct altera_tse_private *priv) /* waits for the tx sgdma to finish it's current operation, returns 0 * when it transitions to nonbusy, returns 1 if the operation times out */ -static int sgdma_txbusy(struct altera_tse_private *priv) +static int sgdma_txbusy(struct altera_dma_private *priv) { int delay = 0; diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h index 08afe1c9994fe..0a24a98eaf3bd 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.h +++ b/drivers/net/ethernet/altera/altera_sgdma.h @@ -1,25 +1,38 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Altera TSE SGDMA and MSGDMA Linux driver * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef __ALTERA_SGDMA_H__ #define __ALTERA_SGDMA_H__ -void sgdma_reset(struct altera_tse_private *); -void sgdma_enable_txirq(struct altera_tse_private *); -void sgdma_enable_rxirq(struct altera_tse_private *); -void sgdma_disable_rxirq(struct altera_tse_private *); -void sgdma_disable_txirq(struct altera_tse_private *); -void sgdma_clear_rxirq(struct altera_tse_private *); -void sgdma_clear_txirq(struct altera_tse_private *); -int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); -u32 sgdma_tx_completions(struct altera_tse_private *); -void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); -void sgdma_status(struct altera_tse_private *); -u32 sgdma_rx_status(struct altera_tse_private *); -int sgdma_initialize(struct altera_tse_private *); -void sgdma_uninitialize(struct altera_tse_private *); -void sgdma_start_rxdma(struct altera_tse_private *); +void sgdma_reset(struct altera_dma_private *priv); +void sgdma_enable_txirq(struct altera_dma_private *priv); +void sgdma_enable_rxirq(struct altera_dma_private *priv); +void sgdma_disable_rxirq(struct altera_dma_private *priv); +void sgdma_disable_txirq(struct altera_dma_private *priv); +void sgdma_clear_rxirq(struct altera_dma_private *priv); +void sgdma_clear_txirq(struct altera_dma_private *priv); +netdev_tx_t sgdma_tx_buffer(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); +u32 sgdma_tx_completions(struct altera_dma_private *priv); +void sgdma_add_rx_desc(struct altera_dma_private *priv, + struct altera_dma_buffer *buffer); +void sgdma_status(struct altera_dma_private *priv); +u32 sgdma_rx_status(struct altera_dma_private *priv); +int sgdma_initialize(struct altera_dma_private *priv); +void sgdma_uninitialize(struct altera_dma_private *priv); +void sgdma_start_rxdma(struct altera_dma_private *priv); #endif /* __ALTERA_SGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 82f2363a45cd0..aea6d7ea5f76d 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -29,6 +29,8 @@ #include #include +#include "intel_fpga_tod.h" + #define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000 #define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in * bytes @@ -355,42 +357,8 @@ struct altera_tse_mac { #define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18) #define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25) -/* Wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer - */ -struct tse_buffer { - struct list_head lh; - struct sk_buff *skb; - dma_addr_t dma_addr; - u32 len; - int mapped_as_page; -}; - struct altera_tse_private; -#define ALTERA_DTYPE_SGDMA 1 -#define ALTERA_DTYPE_MSGDMA 2 - -/* standard DMA interface for SGDMA and MSGDMA */ -struct altera_dmaops { - int altera_dtype; - int dmamask; - void (*reset_dma)(struct altera_tse_private *); - void (*enable_txirq)(struct altera_tse_private *); - void (*enable_rxirq)(struct altera_tse_private *); - void (*disable_txirq)(struct altera_tse_private *); - void (*disable_rxirq)(struct altera_tse_private *); - void (*clear_txirq)(struct altera_tse_private *); - void (*clear_rxirq)(struct altera_tse_private *); - int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); - u32 (*tx_completions)(struct altera_tse_private *); - void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); - u32 (*get_rx_status)(struct altera_tse_private *); - int (*init_dma)(struct altera_tse_private *); - void (*uninit_dma)(struct altera_tse_private *); - void (*start_rxdma)(struct altera_tse_private *); -}; - /* This structure is private to each device. */ struct altera_tse_private { @@ -401,34 +369,19 @@ struct altera_tse_private { /* MAC address space */ struct altera_tse_mac __iomem *mac_dev; + /* Shared DMA structure */ + struct altera_dma_private dma_priv; + /* TSE Revision */ u32 revision; - /* mSGDMA Rx Dispatcher address space */ - void __iomem *rx_dma_csr; - void __iomem *rx_dma_desc; - void __iomem *rx_dma_resp; - - /* mSGDMA Tx Dispatcher address space */ - void __iomem *tx_dma_csr; - void __iomem *tx_dma_desc; + /* Shared PTP structure */ + struct intel_fpga_tod_private ptp_priv; + u32 ptp_enable; /* SGMII PCS address space */ void __iomem *pcs_base; - /* Rx buffers queue */ - struct tse_buffer *rx_ring; - u32 rx_cons; - u32 rx_prod; - u32 rx_ring_size; - u32 rx_dma_buf_sz; - - /* Tx ring buffer */ - struct tse_buffer *tx_ring; - u32 tx_prod; - u32 tx_cons; - u32 tx_ring_size; - /* Interrupts */ u32 tx_irq; u32 rx_irq; @@ -441,19 +394,6 @@ struct altera_tse_private { u32 hash_filter; u32 added_unicast; - /* Descriptor memory info for managing SGDMA */ - u32 txdescmem; - u32 rxdescmem; - dma_addr_t rxdescmem_busaddr; - dma_addr_t txdescmem_busaddr; - u32 txctrlreg; - u32 rxctrlreg; - dma_addr_t rxdescphys; - dma_addr_t txdescphys; - - struct list_head txlisthd; - struct list_head rxlisthd; - /* MAC command_config register protection */ spinlock_t mac_cfg_lock; /* Tx path protection */ @@ -483,49 +423,4 @@ struct altera_tse_private { */ void altera_tse_set_ethtool_ops(struct net_device *); -static inline -u32 csrrd32(void __iomem *mac, size_t offs) -{ - void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); - return readl(paddr); -} - -static inline -u16 csrrd16(void __iomem *mac, size_t offs) -{ - void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); - return readw(paddr); -} - -static inline -u8 csrrd8(void __iomem *mac, size_t offs) -{ - void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); - return readb(paddr); -} - -static inline -void csrwr32(u32 val, void __iomem *mac, size_t offs) -{ - void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); - - writel(val, paddr); -} - -static inline -void csrwr16(u16 val, void __iomem *mac, size_t offs) -{ - void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); - - writew(val, paddr); -} - -static inline -void csrwr8(u8 val, void __iomem *mac, size_t offs) -{ - void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); - - writeb(val, paddr); -} - #endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index 81313c85833eb..908d1a9105b43 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -19,9 +19,12 @@ #include #include #include +#include #include +#include "altera_eth_dma.h" #include "altera_tse.h" +#include "altera_utils.h" #define TSE_STATS_LEN 31 #define TSE_NUM_REGS 128 @@ -221,20 +224,30 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, buf[i] = csrrd32(priv->mac_dev, i * 4); } -static int tse_ethtool_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) +static int tse_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) { struct altera_tse_private *priv = netdev_priv(dev); - return phylink_ethtool_ksettings_set(priv->phylink, cmd); -} + if (priv->ptp_enable) { + if (priv->ptp_priv.ptp_clock) + info->phc_index = + ptp_clock_index(priv->ptp_priv.ptp_clock); -static int tse_ethtool_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct altera_tse_private *priv = netdev_priv(dev); + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; - return phylink_ethtool_ksettings_get(priv->phylink, cmd); + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; + } else { + return ethtool_op_get_ts_info(dev, info); + } } static const struct ethtool_ops tse_ethtool_ops = { @@ -247,9 +260,9 @@ static const struct ethtool_ops tse_ethtool_ops = { .get_ethtool_stats = tse_fill_stats, .get_msglevel = tse_get_msglevel, .set_msglevel = tse_set_msglevel, - .get_link_ksettings = tse_ethtool_get_link_ksettings, - .set_link_ksettings = tse_ethtool_set_link_ksettings, - .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ts_info = tse_get_ts_info, }; void altera_tse_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3c112c18ae6ae..f4b9e18990243 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -18,32 +18,35 @@ */ #include +#include #include #include +#include #include #include #include -#include #include #include #include -#include +#include #include #include #include #include -#include +#include #include -#include -#include +#include #include #include #include #include "altera_utils.h" +#include "altera_eth_dma.h" #include "altera_tse.h" #include "altera_sgdma.h" #include "altera_msgdma.h" +#include "intel_fpga_tod.h" +#include "altera_msgdma_prefetcher.h" static atomic_t instance_count = ATOMIC_INIT(~0); /* Module parameters */ @@ -78,13 +81,14 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); /* Allow network stack to resume queuing packets after we've * finished transmitting at least 1/4 of the packets in the queue. */ -#define TSE_TX_THRESH(x) (x->tx_ring_size / 4) +#define TSE_TX_THRESH(x) ((x)->dma_priv.tx_ring_size / 4) #define TXQUEUESTOP_THRESHHOLD 2 static inline u32 tse_tx_avail(struct altera_tse_private *priv) { - return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; + return priv->dma_priv.tx_cons + priv->dma_priv.tx_ring_size + - priv->dma_priv.tx_prod - 1; } /* MDIO specific functions @@ -193,7 +197,7 @@ static void altera_tse_mdio_destroy(struct net_device *dev) } static int tse_init_rx_buffer(struct altera_tse_private *priv, - struct tse_buffer *rxbuffer, int len) + struct altera_dma_buffer *rxbuffer, int len) { rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); if (!rxbuffer->skb) @@ -214,7 +218,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv, } static void tse_free_rx_buffer(struct altera_tse_private *priv, - struct tse_buffer *rxbuffer) + struct altera_dma_buffer *rxbuffer) { dma_addr_t dma_addr = rxbuffer->dma_addr; struct sk_buff *skb = rxbuffer->skb; @@ -233,7 +237,7 @@ static void tse_free_rx_buffer(struct altera_tse_private *priv, /* Unmap and free Tx buffer resources */ static void tse_free_tx_buffer(struct altera_tse_private *priv, - struct tse_buffer *buffer) + struct altera_dma_buffer *buffer) { if (buffer->dma_addr) { if (buffer->mapped_as_page) @@ -252,44 +256,46 @@ static void tse_free_tx_buffer(struct altera_tse_private *priv, static int alloc_init_skbufs(struct altera_tse_private *priv) { - unsigned int rx_descs = priv->rx_ring_size; - unsigned int tx_descs = priv->tx_ring_size; + unsigned int rx_descs = priv->dma_priv.rx_ring_size; + unsigned int tx_descs = priv->dma_priv.tx_ring_size; int ret = -ENOMEM; int i; /* Create Rx ring buffer */ - priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), - GFP_KERNEL); - if (!priv->rx_ring) + priv->dma_priv.rx_ring = kcalloc(rx_descs, + sizeof(struct altera_dma_private), + GFP_KERNEL); + if (!priv->dma_priv.rx_ring) goto err_rx_ring; /* Create Tx ring buffer */ - priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer), - GFP_KERNEL); - if (!priv->tx_ring) + priv->dma_priv.tx_ring = kcalloc(tx_descs, + sizeof(struct altera_dma_private), + GFP_KERNEL); + if (!priv->dma_priv.tx_ring) goto err_tx_ring; - priv->tx_cons = 0; - priv->tx_prod = 0; + priv->dma_priv.tx_cons = 0; + priv->dma_priv.tx_prod = 0; /* Init Rx ring */ for (i = 0; i < rx_descs; i++) { - ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], - priv->rx_dma_buf_sz); + ret = tse_init_rx_buffer(priv, &priv->dma_priv.rx_ring[i], + priv->dma_priv.rx_dma_buf_sz); if (ret) goto err_init_rx_buffers; } - priv->rx_cons = 0; - priv->rx_prod = 0; + priv->dma_priv.rx_cons = 0; + priv->dma_priv.rx_prod = 0; return 0; err_init_rx_buffers: while (--i >= 0) - tse_free_rx_buffer(priv, &priv->rx_ring[i]); - kfree(priv->tx_ring); + tse_free_rx_buffer(priv, &priv->dma_priv.rx_ring[i]); + kfree(priv->dma_priv.tx_ring); err_tx_ring: - kfree(priv->rx_ring); + kfree(priv->dma_priv.rx_ring); err_rx_ring: return ret; } @@ -297,37 +303,39 @@ static int alloc_init_skbufs(struct altera_tse_private *priv) static void free_skbufs(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - unsigned int rx_descs = priv->rx_ring_size; - unsigned int tx_descs = priv->tx_ring_size; + unsigned int rx_descs = priv->dma_priv.rx_ring_size; + unsigned int tx_descs = priv->dma_priv.tx_ring_size; int i; /* Release the DMA TX/RX socket buffers */ for (i = 0; i < rx_descs; i++) - tse_free_rx_buffer(priv, &priv->rx_ring[i]); + tse_free_rx_buffer(priv, &priv->dma_priv.rx_ring[i]); for (i = 0; i < tx_descs; i++) - tse_free_tx_buffer(priv, &priv->tx_ring[i]); + tse_free_tx_buffer(priv, &priv->dma_priv.tx_ring[i]); - kfree(priv->tx_ring); + kfree(priv->dma_priv.tx_ring); } /* Reallocate the skb for the reception process */ static inline void tse_rx_refill(struct altera_tse_private *priv) { - unsigned int rxsize = priv->rx_ring_size; + unsigned int rxsize = priv->dma_priv.rx_ring_size; unsigned int entry; int ret; - for (; priv->rx_cons - priv->rx_prod > 0; - priv->rx_prod++) { - entry = priv->rx_prod % rxsize; - if (likely(priv->rx_ring[entry].skb == NULL)) { - ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], - priv->rx_dma_buf_sz); + for (; priv->dma_priv.rx_cons - priv->dma_priv.rx_prod > 0; + priv->dma_priv.rx_prod++) { + entry = priv->dma_priv.rx_prod % rxsize; + if (likely(priv->dma_priv.rx_ring[entry].skb == NULL)) { + ret = tse_init_rx_buffer(priv, + &priv->dma_priv.rx_ring[entry], + priv->dma_priv.rx_dma_buf_sz); if (unlikely(ret != 0)) break; - priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); + priv->dmaops->add_rx_desc(&priv->dma_priv, + &priv->dma_priv.rx_ring[entry]); } } } @@ -352,10 +360,11 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb) */ static int tse_rx(struct altera_tse_private *priv, int limit) { - unsigned int entry = priv->rx_cons % priv->rx_ring_size; - unsigned int next_entry; unsigned int count = 0; + unsigned int next_entry; struct sk_buff *skb; + unsigned int entry + = priv->dma_priv.rx_cons % priv->dma_priv.rx_ring_size; u32 rxstatus; u16 pktlength; u16 pktstatus; @@ -366,7 +375,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit) * (reading the last byte of the response pops the value from the fifo.) */ while ((count < limit) && - ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { + ((rxstatus = priv->dmaops->get_rx_status(&priv->dma_priv)) != 0)) { pktstatus = rxstatus >> 16; pktlength = rxstatus & 0xffff; @@ -382,9 +391,9 @@ static int tse_rx(struct altera_tse_private *priv, int limit) pktlength -= 2; count++; - next_entry = (++priv->rx_cons) % priv->rx_ring_size; + next_entry = (++priv->dma_priv.rx_cons) % priv->dma_priv.rx_ring_size; - skb = priv->rx_ring[entry].skb; + skb = priv->dma_priv.rx_ring[entry].skb; if (unlikely(!skb)) { netdev_err(priv->dev, "%s: Inconsistent Rx descriptor chain\n", @@ -392,12 +401,14 @@ static int tse_rx(struct altera_tse_private *priv, int limit) priv->dev->stats.rx_dropped++; break; } - priv->rx_ring[entry].skb = NULL; + priv->dma_priv.rx_ring[entry].skb = NULL; skb_put(skb, pktlength); - dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, - priv->rx_ring[entry].len, DMA_FROM_DEVICE); + dma_unmap_single(priv->device, + priv->dma_priv.rx_ring[entry].dma_addr, + priv->dma_priv.rx_ring[entry].len, + DMA_FROM_DEVICE); if (netif_msg_pktdata(priv)) { netdev_info(priv->dev, "frame received %d bytes\n", @@ -428,30 +439,31 @@ static int tse_rx(struct altera_tse_private *priv, int limit) */ static int tse_tx_complete(struct altera_tse_private *priv) { - unsigned int txsize = priv->tx_ring_size; - struct tse_buffer *tx_buff; + unsigned int txsize = priv->dma_priv.tx_ring_size; + struct altera_dma_buffer *tx_buff; unsigned int entry; int txcomplete = 0; u32 ready; spin_lock(&priv->tx_lock); - ready = priv->dmaops->tx_completions(priv); + ready = priv->dmaops->tx_completions(&priv->dma_priv); /* Free sent buffers */ - while (ready && (priv->tx_cons != priv->tx_prod)) { - entry = priv->tx_cons % txsize; - tx_buff = &priv->tx_ring[entry]; + while (ready && (priv->dma_priv.tx_cons != priv->dma_priv.tx_prod)) { + entry = priv->dma_priv.tx_cons % txsize; + tx_buff = &priv->dma_priv.tx_ring[entry]; if (netif_msg_tx_done(priv)) netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n", - __func__, priv->tx_prod, priv->tx_cons); + __func__, priv->dma_priv.tx_prod, + priv->dma_priv.tx_cons); if (likely(tx_buff->skb)) priv->dev->stats.tx_packets++; tse_free_tx_buffer(priv, tx_buff); - priv->tx_cons++; + priv->dma_priv.tx_cons++; txcomplete++; ready--; @@ -494,8 +506,8 @@ static int tse_poll(struct napi_struct *napi, int budget) rxcomplete, budget); spin_lock_irqsave(&priv->rxdma_irq_lock, flags); - priv->dmaops->enable_rxirq(priv); - priv->dmaops->enable_txirq(priv); + priv->dmaops->enable_rxirq(&priv->dma_priv); + priv->dmaops->enable_txirq(&priv->dma_priv); spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); } return rxcomplete; @@ -516,14 +528,14 @@ static irqreturn_t altera_isr(int irq, void *dev_id) spin_lock(&priv->rxdma_irq_lock); /* reset IRQs */ - priv->dmaops->clear_rxirq(priv); - priv->dmaops->clear_txirq(priv); + priv->dmaops->clear_rxirq(&priv->dma_priv); + priv->dmaops->clear_txirq(&priv->dma_priv); spin_unlock(&priv->rxdma_irq_lock); if (likely(napi_schedule_prep(&priv->napi))) { spin_lock(&priv->rxdma_irq_lock); - priv->dmaops->disable_rxirq(priv); - priv->dmaops->disable_txirq(priv); + priv->dmaops->disable_rxirq(&priv->dma_priv); + priv->dmaops->disable_txirq(&priv->dma_priv); spin_unlock(&priv->rxdma_irq_lock); __napi_schedule(&priv->napi); } @@ -542,10 +554,10 @@ static irqreturn_t altera_isr(int irq, void *dev_id) static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_priv.tx_ring_size; unsigned int nopaged_len = skb_headlen(skb); - unsigned int txsize = priv->tx_ring_size; + struct altera_dma_buffer *buffer = NULL; int nfrags = skb_shinfo(skb)->nr_frags; - struct tse_buffer *buffer = NULL; netdev_tx_t ret = NETDEV_TX_OK; dma_addr_t dma_addr; unsigned int entry; @@ -565,8 +577,8 @@ static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Map the first skb fragment */ - entry = priv->tx_prod % txsize; - buffer = &priv->tx_ring[entry]; + entry = priv->dma_priv.tx_prod % txsize; + buffer = &priv->dma_priv.tx_ring[entry]; dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); @@ -580,11 +592,17 @@ static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev) buffer->dma_addr = dma_addr; buffer->len = nopaged_len; - priv->dmaops->tx_buffer(priv, buffer); + ret = priv->dmaops->tx_buffer(&priv->dma_priv, buffer); + if (ret) + goto out; - skb_tx_timestamp(skb); + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->dma_priv.hwts_tx_en)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + else + skb_tx_timestamp(skb); - priv->tx_prod++; + priv->dma_priv.tx_prod++; dev->stats.tx_bytes += skb->len; if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { @@ -872,6 +890,77 @@ static void tse_set_rx_mode(struct net_device *dev) spin_unlock(&priv->mac_cfg_lock); } +static struct phy_device *connect_local_phy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + int addr = priv->phy_addr; + + if (priv->phy_addr != POLL_PHY) { + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + priv->mdio->id, priv->phy_addr); + + netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt); + + phydev = mdiobus_get_phy(priv->mdio, addr); + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + phydev = NULL; + } + } else { + phydev = phy_find_first(priv->mdio); + if (!phydev) { + netdev_err(dev, "No PHY found\n"); + return phydev; + } + } + return phydev; +} + +/* Initialize driver's PHY state, and attach to the PHY + */ +static int init_phy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + int ret = 0; + + /* Avoid init phy in case of no phy present */ + if (!priv->phy_iface) + return 0; + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0); + + if (ret) { + netdev_dbg(dev, "no phy-handle found\n"); + if (!priv->mdio) { + netdev_err(dev, "No phy-handle nor local mdio specified\n"); + return -ENODEV; + } + phydev = connect_local_phy(dev); + if (phydev) { + ret = phylink_connect_phy(priv->phylink, phydev); + if (ret) + return -ENODEV; + } + } + + if (!phydev) { + netdev_err(dev, "Could not find the PHY\n"); + return -ENODEV; + } + + netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", + phydev->mdio.addr, phydev->phy_id, phydev->link); + + return 0; +} + /* Open and initialize the interface */ static int tse_open(struct net_device *dev) @@ -881,8 +970,12 @@ static int tse_open(struct net_device *dev) int ret = 0; int i; + /* set tx and rx ring size */ + priv->dma_priv.rx_ring_size = dma_rx_num; + priv->dma_priv.tx_ring_size = dma_tx_num; + /* Reset and configure TSE MAC and probe associated PHY */ - ret = priv->dmaops->init_dma(priv); + ret = priv->dmaops->init_dma(&priv->dma_priv); if (ret != 0) { netdev_err(dev, "Cannot initialize DMA\n"); goto phy_error; @@ -912,11 +1005,9 @@ static int tse_open(struct net_device *dev) goto alloc_skbuf_error; } - priv->dmaops->reset_dma(priv); + priv->dmaops->reset_dma(&priv->dma_priv); /* Create and initialize the TX/RX descriptors chains. */ - priv->rx_ring_size = dma_rx_num; - priv->tx_ring_size = dma_tx_num; ret = alloc_init_skbufs(priv); if (ret) { netdev_err(dev, "DMA descriptors initialization failed\n"); @@ -944,26 +1035,33 @@ static int tse_open(struct net_device *dev) /* Enable DMA interrupts */ spin_lock_irqsave(&priv->rxdma_irq_lock, flags); - priv->dmaops->enable_rxirq(priv); - priv->dmaops->enable_txirq(priv); + priv->dmaops->enable_rxirq(&priv->dma_priv); + priv->dmaops->enable_txirq(&priv->dma_priv); /* Setup RX descriptor chain */ - for (i = 0; i < priv->rx_ring_size; i++) - priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); + for (i = 0; i < priv->dma_priv.rx_ring_size; i++) + priv->dmaops->add_rx_desc(&priv->dma_priv, + &priv->dma_priv.rx_ring[i]); spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0); + ret = init_phy(dev); if (ret) { netdev_err(dev, "could not connect phylink (%d)\n", ret); goto tx_request_irq_error; } phylink_start(priv->phylink); + priv->dma_priv.hwts_tx_en = 0; + priv->dma_priv.hwts_rx_en = 0; + napi_enable(&priv->napi); netif_start_queue(dev); - priv->dmaops->start_rxdma(priv); + priv->dmaops->start_rxdma(&priv->dma_priv); + + if (priv->dmaops->start_txdma) + priv->dmaops->start_txdma(&priv->dma_priv); /* Start MAC Rx/Tx */ spin_lock(&priv->mac_cfg_lock); @@ -996,8 +1094,8 @@ static int tse_shutdown(struct net_device *dev) /* Disable DMA interrupts */ spin_lock_irqsave(&priv->rxdma_irq_lock, flags); - priv->dmaops->disable_rxirq(priv); - priv->dmaops->disable_txirq(priv); + priv->dmaops->disable_rxirq(&priv->dma_priv); + priv->dmaops->disable_txirq(&priv->dma_priv); spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); /* Free the IRQ lines */ @@ -1015,17 +1113,94 @@ static int tse_shutdown(struct net_device *dev) */ if (ret) netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); - priv->dmaops->reset_dma(priv); + priv->dmaops->reset_dma(&priv->dma_priv); free_skbufs(dev); spin_unlock(&priv->tx_lock); spin_unlock(&priv->mac_cfg_lock); - priv->dmaops->uninit_dma(priv); + priv->dmaops->uninit_dma(&priv->dma_priv); return 0; } +/* ioctl to configure timestamping */ +static int tse_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (!netif_running(dev)) + return -EINVAL; + + if (!priv->ptp_enable) { + netdev_alert(priv->dev, "Timestamping not supported"); + return -EOPNOTSUPP; + } + + if (cmd == SIOCSHWTSTAMP) { + if (copy_from_user(&config, ifr->ifr_data, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->dma_priv.hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + priv->dma_priv.hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->dma_priv.hwts_rx_en = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + priv->dma_priv.hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + if (copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config))) + return -EFAULT; + else + return 0; + } + + if (cmd == SIOCGHWTSTAMP) { + config.flags = 0; + + if (priv->dma_priv.hwts_tx_en) + config.tx_type = HWTSTAMP_TX_ON; + else + config.tx_type = HWTSTAMP_TX_OFF; + + if (priv->dma_priv.hwts_rx_en) + config.rx_filter = HWTSTAMP_FILTER_ALL; + else + config.rx_filter = HWTSTAMP_FILTER_NONE; + + if (copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config))) + return -EFAULT; + else + return 0; + } + + if (!dev->phydev) + return -EINVAL; + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + static struct net_device_ops altera_tse_netdev_ops = { .ndo_open = tse_open, .ndo_stop = tse_shutdown, @@ -1034,6 +1209,7 @@ static struct net_device_ops altera_tse_netdev_ops = { .ndo_set_rx_mode = tse_set_rx_mode, .ndo_change_mtu = tse_change_mtu, .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = tse_do_ioctl, }; static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode, @@ -1098,49 +1274,15 @@ static const struct phylink_mac_ops alt_tse_phylink_ops = { .mac_select_pcs = alt_tse_select_pcs, }; -static int request_and_map(struct platform_device *pdev, const char *name, - struct resource **res, void __iomem **ptr) -{ - struct device *device = &pdev->dev; - struct resource *region; - - *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - if (*res == NULL) { - dev_err(device, "resource %s not defined\n", name); - return -ENODEV; - } - - region = devm_request_mem_region(device, (*res)->start, - resource_size(*res), dev_name(device)); - if (region == NULL) { - dev_err(device, "unable to request %s\n", name); - return -EBUSY; - } - - *ptr = devm_ioremap(device, region->start, - resource_size(region)); - if (*ptr == NULL) { - dev_err(device, "ioremap of %s failed!", name); - return -ENOMEM; - } - - return 0; -} - /* Probe Altera TSE MAC device */ static int altera_tse_probe(struct platform_device *pdev) { - struct regmap_config pcs_regmap_cfg; struct altera_tse_private *priv; - struct mdio_regmap_config mrc; struct resource *control_port; - struct regmap *pcs_regmap; - struct resource *dma_res; struct resource *pcs_res; - struct mii_bus *pcs_bus; struct net_device *ndev; - void __iomem *descmap; + int pcs_reg_width = 2; int ret = -ENODEV; ndev = alloc_etherdev(sizeof(struct altera_tse_private)); @@ -1153,69 +1295,19 @@ static int altera_tse_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->device = &pdev->dev; + priv->dma_priv.device = &pdev->dev; priv->dev = ndev; + priv->dma_priv.dev = ndev; priv->msg_enable = netif_msg_init(debug, default_msg_level); + priv->dma_priv.msg_enable = netif_msg_init(debug, default_msg_level); priv->dmaops = device_get_match_data(&pdev->dev); - if (priv->dmaops && - priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { - /* Get the mapped address to the SGDMA descriptor memory */ - ret = request_and_map(pdev, "s1", &dma_res, &descmap); - if (ret) - goto err_free_netdev; - - /* Start of that memory is for transmit descriptors */ - priv->tx_dma_desc = descmap; - - /* First half is for tx descriptors, other half for tx */ - priv->txdescmem = resource_size(dma_res)/2; - - priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; - - priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + - priv->txdescmem)); - priv->rxdescmem = resource_size(dma_res)/2; - priv->rxdescmem_busaddr = dma_res->start; - priv->rxdescmem_busaddr += priv->txdescmem; - - if (upper_32_bits(priv->rxdescmem_busaddr)) { - dev_dbg(priv->device, - "SGDMA bus addresses greater than 32-bits\n"); - ret = -EINVAL; - goto err_free_netdev; - } - if (upper_32_bits(priv->txdescmem_busaddr)) { - dev_dbg(priv->device, - "SGDMA bus addresses greater than 32-bits\n"); - ret = -EINVAL; - goto err_free_netdev; - } - } else if (priv->dmaops && - priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { - ret = request_and_map(pdev, "rx_resp", &dma_res, - &priv->rx_dma_resp); - if (ret) - goto err_free_netdev; - - ret = request_and_map(pdev, "tx_desc", &dma_res, - &priv->tx_dma_desc); - if (ret) - goto err_free_netdev; - - priv->txdescmem = resource_size(dma_res); - priv->txdescmem_busaddr = dma_res->start; - - ret = request_and_map(pdev, "rx_desc", &dma_res, - &priv->rx_dma_desc); - if (ret) - goto err_free_netdev; - - priv->rxdescmem = resource_size(dma_res); - priv->rxdescmem_busaddr = dma_res->start; - - } else { - ret = -ENODEV; + /* Map DMA */ + ret = altera_eth_dma_probe(pdev, &priv->dma_priv, + priv->dmaops->altera_dtype); + if (ret) { + dev_err(&pdev->dev, "cannot map DMA\n"); goto err_free_netdev; } @@ -1235,53 +1327,18 @@ static int altera_tse_probe(struct platform_device *pdev) if (ret) goto err_free_netdev; - /* xSGDMA Rx Dispatcher address space */ - ret = request_and_map(pdev, "rx_csr", &dma_res, - &priv->rx_dma_csr); - if (ret) - goto err_free_netdev; - - - /* xSGDMA Tx Dispatcher address space */ - ret = request_and_map(pdev, "tx_csr", &dma_res, - &priv->tx_dma_csr); - if (ret) - goto err_free_netdev; - - memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg)); - memset(&mrc, 0, sizeof(mrc)); /* SGMII PCS address space. The location can vary depending on how the * IP is integrated. We can have a resource dedicated to it at a specific * address space, but if it's not the case, we fallback to the mdiophy0 * from the MAC's address space */ - ret = request_and_map(pdev, "pcs", &pcs_res, &priv->pcs_base); + ret = request_and_map(pdev, "pcs", &pcs_res, + &priv->pcs_base); if (ret) { - /* If we can't find a dedicated resource for the PCS, fallback - * to the internal PCS, that has a different address stride - */ - priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0); - pcs_regmap_cfg.reg_bits = 32; - /* Values are MDIO-like values, on 16 bits */ - pcs_regmap_cfg.val_bits = 16; - pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(2); - } else { - pcs_regmap_cfg.reg_bits = 16; - pcs_regmap_cfg.val_bits = 16; - pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1); - } - - /* Create a regmap for the PCS so that it can be used by the PCS driver */ - pcs_regmap = devm_regmap_init_mmio(&pdev->dev, priv->pcs_base, - &pcs_regmap_cfg); - if (IS_ERR(pcs_regmap)) { - ret = PTR_ERR(pcs_regmap); - goto err_free_netdev; + priv->pcs_base = (void __iomem *)((uintptr_t)(priv->mac_dev) + + tse_csroffs(mdio_phy0)); + pcs_reg_width = 4; } - mrc.regmap = pcs_regmap; - mrc.parent = &pdev->dev; - mrc.valid_addr = 0x0; - mrc.autoscan = false; /* Rx IRQ */ priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); @@ -1343,7 +1400,7 @@ static int altera_tse_probe(struct platform_device *pdev) /* The DMA buffer size already accounts for an alignment bias * to avoid unaligned access exceptions for the NIOS processor, */ - priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; + priv->dma_priv.rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; /* get default MAC address from device tree */ ret = of_get_ethdev_address(pdev->dev.of_node, ndev); @@ -1405,17 +1462,10 @@ static int altera_tse_probe(struct platform_device *pdev) (unsigned long) control_port->start, priv->rx_irq, priv->tx_irq); - snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name); - pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc); - if (IS_ERR(pcs_bus)) { - ret = PTR_ERR(pcs_bus); - goto err_init_pcs; - } - - priv->pcs = lynx_pcs_create_mdiodev(pcs_bus, 0); + priv->pcs = alt_tse_pcs_create(ndev, priv->pcs_base, pcs_reg_width); if (IS_ERR(priv->pcs)) { ret = PTR_ERR(priv->pcs); - goto err_init_pcs; + goto err_init_phy; } priv->phylink_config.dev = &ndev->dev; @@ -1439,13 +1489,31 @@ static int altera_tse_probe(struct platform_device *pdev) if (IS_ERR(priv->phylink)) { dev_err(&pdev->dev, "failed to create phylink\n"); ret = PTR_ERR(priv->phylink); - goto err_init_phylink; + goto err_init_phy; + } + + priv->ptp_enable = of_property_read_bool(pdev->dev.of_node, + "altr,has-ptp"); + dev_info(&pdev->dev, "PTP Enable: %d\n", priv->ptp_enable); + + if (priv->ptp_enable) { + /* MAP PTP */ + ret = intel_fpga_tod_probe(pdev, &priv->ptp_priv); + if (ret) { + dev_err(&pdev->dev, "cannot map PTP\n"); + goto err_init_phy; + } + ret = intel_fpga_tod_register(&priv->ptp_priv, + priv->device); + if (ret) { + dev_err(&pdev->dev, "Failed to register PTP clock\n"); + ret = -ENXIO; + goto err_init_phy; + } } return 0; -err_init_phylink: - lynx_pcs_destroy(priv->pcs); -err_init_pcs: +err_init_phy: unregister_netdev(ndev); err_register_netdev: netif_napi_del(&priv->napi); @@ -1463,10 +1531,11 @@ static void altera_tse_remove(struct platform_device *pdev) struct altera_tse_private *priv = netdev_priv(ndev); platform_set_drvdata(pdev, NULL); + if (priv->ptp_enable) + intel_fpga_tod_unregister(&priv->ptp_priv); altera_tse_mdio_destroy(ndev); unregister_netdev(ndev); phylink_destroy(priv->phylink); - lynx_pcs_destroy(priv->pcs); free_netdev(ndev); } @@ -1488,6 +1557,7 @@ static const struct altera_dmaops altera_dtype_sgdma = { .init_dma = sgdma_initialize, .uninit_dma = sgdma_uninitialize, .start_rxdma = sgdma_start_rxdma, + .start_txdma = NULL, }; static const struct altera_dmaops altera_dtype_msgdma = { @@ -1507,9 +1577,32 @@ static const struct altera_dmaops altera_dtype_msgdma = { .init_dma = msgdma_initialize, .uninit_dma = msgdma_uninitialize, .start_rxdma = msgdma_start_rxdma, + .start_txdma = NULL, +}; + +static const struct altera_dmaops altera_dtype_prefetcher = { + .altera_dtype = ALTERA_DTYPE_MSGDMA_PREF, + .dmamask = 64, + .reset_dma = msgdma_pref_reset, + .enable_txirq = msgdma_pref_enable_txirq, + .enable_rxirq = msgdma_pref_enable_rxirq, + .disable_txirq = msgdma_pref_disable_txirq, + .disable_rxirq = msgdma_pref_disable_rxirq, + .clear_txirq = msgdma_pref_clear_txirq, + .clear_rxirq = msgdma_pref_clear_rxirq, + .tx_buffer = msgdma_pref_tx_buffer, + .tx_completions = msgdma_pref_tx_completions, + .add_rx_desc = msgdma_pref_add_rx_desc, + .get_rx_status = msgdma_pref_rx_status, + .init_dma = msgdma_pref_initialize, + .uninit_dma = msgdma_pref_uninitialize, + .start_rxdma = msgdma_pref_start_rxdma, + .start_txdma = msgdma_pref_start_txdma, }; static const struct of_device_id altera_tse_ids[] = { + { .compatible = "altr,tse-msgdma-2.0", + .data = &altera_dtype_prefetcher, }, { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c index e6a7fc9d8fb1c..f235a08f875ef 100644 --- a/drivers/net/ethernet/altera/altera_utils.c +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -3,6 +3,7 @@ * Copyright (C) 2014 Altera Corporation. All rights reserved */ +#include "altera_eth_dma.h" #include "altera_tse.h" #include "altera_utils.h" @@ -31,3 +32,32 @@ int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 0 : 1; } + +int request_and_map(struct platform_device *pdev, const char *name, + struct resource **res, void __iomem **ptr) +{ + struct resource *region; + struct device *device = &pdev->dev; + + *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!*res) { + dev_err(device, "resource %s not defined\n", name); + return -ENODEV; + } + + region = devm_request_mem_region(device, (*res)->start, + resource_size(*res), dev_name(device)); + if (!region) { + dev_err(device, "unable to request %s\n", name); + return -EBUSY; + } + + *ptr = devm_ioremap(device, region->start, + resource_size(region)); + if (!*ptr) { + dev_err(device, "ioremap of %s failed!", name); + return -ENOMEM; + } + + return 0; +} diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h index 3c2e32fb73893..cd011985f6e02 100644 --- a/drivers/net/ethernet/altera/altera_utils.h +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -3,6 +3,10 @@ * Copyright (C) 2014 Altera Corporation. All rights reserved */ +#include +#include +#include + #ifndef __ALTERA_UTILS_H__ #define __ALTERA_UTILS_H__ @@ -13,5 +17,54 @@ void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int request_and_map(struct platform_device *pdev, const char *name, + struct resource **res, void __iomem **ptr); + +static inline +u32 csrrd32(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + return readl(paddr); +} + +static inline +u16 csrrd16(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + return readw(paddr); +} + +static inline +u8 csrrd8(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + return readb(paddr); +} + +static inline +void csrwr32(u32 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writel(val, paddr); +} + +static inline +void csrwr16(u16 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writew(val, paddr); +} + +static inline +void csrwr8(u8 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + writeb(val, paddr); +} #endif /* __ALTERA_UTILS_H__*/ diff --git a/drivers/net/ethernet/altera/intel_fpga_etile.h b/drivers/net/ethernet/altera/intel_fpga_etile.h new file mode 100644 index 0000000000000..fe3f7bfeeb179 --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_etile.h @@ -0,0 +1,2715 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Intel FPGA E-tile Ethernet MAC driver + * Copyright (C) 2020-2022 Intel Corporation. All rights reserved. + * + * Contributors: + * Roman Bulgakov + * Yu Ying Choo + * Joyce Ooi + * Arzu Ozdogan-tackin + * Alexis Rodriguez + * + * Original driver contributed by GlobalLogic. + */ + +#ifndef __INTEL_FPGA_ETILE_ETH_H__ +#define __INTEL_FPGA_ETILE_ETH_H__ + +#define INTEL_FPGA_ETILE_ETH_RESOURCE_NAME "intel_fpga_etile" + +#include +#include +#include +#include +#include +#include +#include +#include "intel_fpga_tod.h" + +/* DR link state gets updated after every switch*/ +extern int dr_link_state; + +#define INTEL_FPGA_ETILE_SW_RESET_WATCHDOG_CNTR 1000000 +#define INTEL_FPGA_ETILE_UI_VALUE_10G 0x0018D302 +#define INTEL_FPGA_ETILE_UI_VALUE_25G 0x0009EE01 +#define INTEL_FPGA_TX_PMA_DELAY_25G 105 +#define INTEL_FPGA_RX_PMA_DELAY_25G 89 +/* in miliseconds */ +#define INTEL_FPGA_PMA_OFFSET_207_TIMEOUT 300 + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_ON (FLOW_TX | FLOW_RX) + +/* Ethernet Reconfiguration Interface + * Auto Negotiation and Link Training + *Bit Definitions + */ +/* 0xB0: ANLT Sequencer Config */ +#define ETH_ANLT_SEQ_CONF_RESET_SEQ BIT(0) +#define ETH_ANLT_SEQ_CONF_DISABLE_AN_TIMER BIT(1) +#define ETH_ANLT_SEQ_CONF_DISABLE_LF_TIMER BIT(2) +#define ETH_ANLT_SEQ_CONF_SEQ_FORCE_MODE_NONE (0b000 << 4) +#define ETH_ANLT_SEQ_CONF_SEQ_FORCE_MODE_25G_R1 (0b001 << 4) +#define ETH_ANLT_SEQ_CONF_SEQ_FORCE_MODE_100G_R4 (0b011 << 4) +#define ETH_ANLT_SEQ_CONF_SEQ_FORCE_MODE_10G_R1 (0b101 << 4) +#define ETH_ANLT_SEQ_CONF_SEQ_FORCE_MODE_100G_P2 (0x7 << 4) +#define ETH_ANLT_SEQ_CONF_SEQ_FORCE_MODE_RS_FEC_ON BIT(7) + +#define ETH_ANLT_SEQ_CONF_LINK_FAILURE_RESP BIT(12) +#define ETH_ANLT_SEQ_CONF_LINK_FAIL_IF_HIBER BIT(13) +#define ETH_ANLT_SEQ_CONF_SKIP_LINK_ON_TO BIT(14) +#define ETH_ANLT_SEQ_CONF_ILPBK_LN_0 BIT(16) +#define ETH_ANLT_SEQ_CONF_ILPBK_LN_1 BIT(17) +#define ETH_ANLT_SEQ_CONF_ILPBK_LN_2 BIT(18) +#define ETH_ANLT_SEQ_CONF_ILPBK_LN_3 BIT(19) +#define ETH_ANLT_SEQ_CONF_RS_FEC_NEG_EN BIT(20) +#define ETH_ANLT_SEQ_CONF_RS_FEC_REQ BIT(21) +#define ETH_ANLT_SEQ_CONF_TX_POL_INV_LN_0 BIT(22) +#define ETH_ANLT_SEQ_CONF_TX_POL_INV_LN_1 BIT(23) +#define ETH_ANLT_SEQ_CONF_TX_POL_INV_LN_2 BIT(24) +#define ETH_ANLT_SEQ_CONF_TX_POL_INV_LN_3 BIT(25) +#define ETH_ANLT_SEQ_CONF_RX_POL_INV_LN_0 BIT(26) +#define ETH_ANLT_SEQ_CONF_RX_POL_INV_LN_1 BIT(27) +#define ETH_ANLT_SEQ_CONF_RX_POL_INV_LN_2 BIT(28) +#define ETH_ANLT_SEQ_CONF_RX_POL_INV_LN_3 BIT(29) +#define ETH_ANLT_SEQ_CONF_KR_PAUSE BIT(31) + +/* 0xB1: ANLT Sequencer Status */ +#define ETH_ANLT_SEQ_STAT_LINK_READY BIT(0) +#define ETH_ANLT_SEQ_STAT_AUTO_NEG_TIMEOUT BIT(1) +#define ETH_ANLT_SEQ_STAT_LINK_TR_TIMEOUT BIT(2) +#define ETH_ANLT_SEQ_STAT_RECONF_MODE_AN BIT(8) +#define ETH_ANLT_SEQ_STAT_RECONF_MODE_LT BIT(9) +#define ETH_ANLT_SEQ_STAT_RECONF_MODE_10G_DATA BIT(10) +#define ETH_ANLT_SEQ_STAT_RECONF_MODE_25G_DATA BIT(11) +#define ETH_ANLT_SEQ_STAT_RECONF_MODE_100G_R4_DATA BIT(13) +#define ETH_ANLT_SEQ_STAT_RECONF_MODE_100G_P2 BIT(15) +#define ETH_ANLT_SEQ_STAT_RS_FEC_SUPPORT BIT(18) +#define ETH_ANLT_SEQ_STAT_KR_PAUSED BIT(31) + +/* 0xC0: Auto Negotiation Config Register 1 */ +#define ETH_AUTO_NEG_CONF_1_EN BIT(0) +#define ETH_AUTO_NEG_CONF_1_EN_AN_BASE_PAGE BIT(1) +#define ETH_AUTO_NEG_CONF_1_EN_AN_NEXT_PAGE BIT(2) +#define ETH_AUTO_NEG_CONF_1_FORCE_LOCAL_DEV_REM_FAULT BIT(3) +#define ETH_AUTO_NEG_CONF_1_OVERRIDE_AN_PARAMS BIT(5) +#define ETH_AUTO_NEG_CONF_1_OVERRIDE_AN_MASTER_CH BIT(6) +#define ETH_AUTO_NEG_CONF_1_IGNORE_NONCE_FIELD BIT(7) +#define ETH_AUTO_NEG_CONF_1_EN_CONSORT_NEXT_PAGE_SENT BIT(8) +#define ETH_AUTO_NEG_CONF_1_EN_CONSORT_NEXT_PAGE_RECV BIT(9) +#define ETH_AUTO_NEG_CONF_1_EN_CONSORT_NEXT_PAGE_OVERRIDE BIT(10) +#define ETH_AUTO_NEG_CONF_1_IGNORE_CONSORT_NEXT_PAGE_CODE BIT(11) +#define ETH_AUTO_NEG_CONF_1_CONSORT_OUI (0xFFFF << 16) + +/* 0xC1: Auto Negotiation Config Register 2 */ +#define ETH_AUTO_NEG_CONF_2_RESET_AN BIT(0) +#define ETH_AUTO_NEG_CONF_2_AN_NEXT_PAGE BIT(8) +#define ETH_AUTO_NEG_CONF_2_CONST_OUI_UPPER (0xFF << 16) + +/* 0xC2: Auto Negotiation Status Register */ +#define ETH_AUTO_NEG_STATUS_AN_PAGE_RECEIVED BIT(1) +#define ETH_AUTO_NEG_STATUS_AN_COMPLETE BIT(2) +#define ETH_AUTO_NEG_STATUS_AN_ADV_REM_FAULT BIT(3) +#define ETH_AUTO_NEG_STATUS_PHY_AN_ABILITY BIT(5) +#define ETH_AUTO_NEG_STATUS_AN_STATUS BIT(7) +#define ETH_AUTO_NEG_STATUS_AN_LP_ABILITY BIT(7) +#define ETH_AUTO_NEG_STATUS_CONSORT_NEXT_PAGE_RECV BIT(10) +#define ETH_AUTO_NEG_STATUS_AN_FAILURE BIT(11) +#define ETH_AUTO_NEG_STATUS_IEEE_1000BASE_KX BIT(12) +#define ETH_AUTO_NEG_STATUS_IEEE_10GBASE_KX4 BIT(13) +#define ETH_AUTO_NEG_STATUS_IEEE_10GBASE_KR BIT(14) +#define ETH_AUTO_NEG_STATUS_IEEE_40GBASE_KR4 BIT(15) +#define ETH_AUTO_NEG_STATUS_IEEE_40GBASE_CR4 BIT(16) +#define ETH_AUTO_NEG_STATUS_IEEE_100GBASE_CR10 BIT(17) +#define ETH_AUTO_NEG_STATUS_IEEE_100GBASE_KP4 BIT(18) +#define ETH_AUTO_NEG_STATUS_IEEE_100GBASE_KR4 BIT(19) +#define ETH_AUTO_NEG_STATUS_IEEE_100GBASE_CR4 BIT(20) +#define ETH_AUTO_NEG_STATUS_IEEE_25GBASE_KRS_CRS BIT(21) +#define ETH_AUTO_NEG_STATUS_IEEE_25GBASE_KR_CR BIT(22) +#define ETH_AUTO_NEG_STATUS_IEEE_25GBASE_KR2_CR2 BIT(23) +#define ETH_AUTO_NEG_STATUS_CONST_25GBASE_KR1 BIT(24) +#define ETH_AUTO_NEG_STATUS_CONST_25GBASE_CR1 BIT(25) +#define ETH_AUTO_NEG_STATUS_CONST_50GBASE_KR2 BIT(26) +#define ETH_AUTO_NEG_STATUS_CONST_50GBASE_CR2 BIT(27) +#define ETH_AUTO_NEG_STATUS_RS_FEC_NEGOTIATED BIT(30) + +/* 0xC3: Auto Negotiation Config Register 3 */ +#define ETH_AUTO_NEG_CONF_3_USER_BASE_PAGE_LOW_SELECTOR 0x1F +#define ETH_AUTO_NEG_CONF_3_USER_BASE_PAGE_LOW_ECHOED_NONE (0x1F << 5) +#define ETH_AUTO_NEG_CONF_3_USER_BASE_PAGE_LOW_REMOTE_FAULT BIT(13) +#define ETH_AUTO_NEG_CONF_3_USER_BASE_PAGE_LOW_ACK BIT(14) +#define ETH_AUTO_NEG_CONF_3_USER_BASE_PAGE_LOW_NEXT_PG BIT(15) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_TECH_10GBASE_KX4 BIT(17) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_TECH_10GBASE_KR BIT(18) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_TECH_100GBASE_KP4 BIT(22) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_TECH_100GBASE_KR4 BIT(23) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_FEC_10GBASE_RSFEC_CAP BIT(24) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_FEC_10GBASE_RSFEC_REQ BIT(25) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_FEC_25G_IEEE_RSFEC_REQ BIT(26) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_FEC_25G_IEEE_BASE_RSFEC_REQ BIT(27) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_PAUSE_AB BIT(28) +#define ETH_AUTO_NEG_CONF_3_OVERRIDE_AN_AYSSYM_DIR BIT(29) + +/* 0xC4: Auto Negotiation Config Register 4 */ +#define ETH_AUTO_NEG_CONF_4_TX_NONCE 0x1F +#define ETH_AUTO_NEG_CONF_4_TECH_AB 0x3FFFFFE0 +#define ETH_AUTO_NEG_CONF_4_FEC (0x3 << 30) + +/* 0xC5: Auto Negotiation Config Register 5 */ +#define ETH_AUTO_NEG_CONF_5_MSG_CODE 0x7FF +#define ETH_AUTO_NEG_CONF_5_TOGGLE BIT(11) +#define ETH_AUTO_NEG_CONF_5_ACK2 BIT(12) +#define ETH_AUTO_NEG_CONF_5_MP BIT(13) +#define ETH_AUTO_NEG_CONF_5_ACK BIT(14) +#define ETH_AUTO_NEG_CONF_5_NEXT_PAGE BIT(15) +#define ETH_AUTO_NEG_CONF_5_100GBASE_CR4 BIT(16) +#define ETH_AUTO_NEG_CONF_5_25GBASE_KRS_CRS BIT(17) +#define ETH_AUTO_NEG_CONF_5_25GBASE_KR_CR BIT(18) +#define ETH_AUTO_NEG_CONF_5_2_5GBASE_KX BIT(19) +#define ETH_AUTO_NEG_CONF_5_5GBASE_KR BIT(20) +#define ETH_AUTO_NEG_CONF_5_50GBASE_KR_CR BIT(21) +#define ETH_AUTO_NEG_CONF_5_100GBASE_KR2_CR2 BIT(22) +#define ETH_AUTO_NEG_CONF_5_200GBASE_KR4_CR4 BIT(23) + +/* 0xC6: Auto Negotiation Config Register 6 */ +#define ETH_AUTO_NEG_CONF_6_USER_NEXT_PAGE_HIGH 0xFFFFFFFF + +/* 0xC7: Auto Negotiation Status Register 1 */ +#define ETH_AUTO_NEG_STAT_1_LP_BASE_PG_LO_SEL 0x1F +#define ETH_AUTO_NEG_STAT_1_LP_BASE_PG_LO_ECHO (0x1F << 5) +#define ETH_AUTO_NEG_STAT_1_LP_BASE_PG_LO_PAUSE (0x7 << 10) +#define ETH_AUTO_NEG_STAT_1_LP_BASE_PG_LO_RF BIT(13) +#define ETH_AUTO_NEG_STAT_1_LP_BASE_PG_LO_ACK BIT(14) +#define ETH_AUTO_NEG_STAT_1_LP_BASE_PG_LO_NP BIT(15) + +/* 0xC8: Auto Negotiation Status Register 2 */ + +#define ETH_AUTO_NEG_STAT_2_LP_BASE_PG_HI_TX_NONCE 0x1F +#define ETH_AUTO_NEG_STAT_2_LP_BASE_PG_HI_TECH_AB 0x3FFFFFE0 +#define ETH_AUTO_NEG_STAT_2_LP_BASE_PG_HI_FEC (0x3 << 30) + +/* 0xC9: Auto Negotiation Status Register 3 */ + +#define ETH_AUTO_NEG_STAT_3_LP_NEXT_PG_LO_MSG 0x7FF +#define ETH_AUTO_NEG_STAT_3_LP_NEXT_PG_LO_TOGGLE BIT(11) +#define ETH_AUTO_NEG_STAT_3_LP_NEXT_PG_LO_ACK2 BIT(12) +#define ETH_AUTO_NEG_STAT_3_LP_NEXT_PG_LO_MP BIT(13) +#define ETH_AUTO_NEG_STAT_3_LP_NEXT_PG_LO_ACK BIT(14) +#define ETH_AUTO_NEG_STAT_3_LP_NEXT_PG_LO_NEXT_PAGE BIT(15) + +/* 0xCA: Auto Negotiation Status Register 4 */ + +#define ETH_AUTO_NEG_STAT_4_LP_NEXT_PG_HI 0xFFFFFFFF + +/* 0xCB: Auto Negotiation Status Register 5 */ +#define ETH_AUTO_NEG_STAT_5_1000BASE_KX BIT(0) +#define ETH_AUTO_NEG_STAT_5_10GBASE_KX4 BIT(1) +#define ETH_AUTO_NEG_STAT_5_10GBASE_KR BIT(2) +#define ETH_AUTO_NEG_STAT_5_40GBASE_KR4 BIT(3) +#define ETH_AUTO_NEG_STAT_5_40GBASE_CR4 BIT(4) +#define ETH_AUTO_NEG_STAT_5_100GBASE_CR10 BIT(5) +#define ETH_AUTO_NEG_STAT_5_100GBASE_KP4 BIT(6) +#define ETH_AUTO_NEG_STAT_5_100GBASE_KR4 BIT(7) +#define ETH_AUTO_NEG_STAT_5_100GBASE_CR4 BIT(8) +#define ETH_AUTO_NEG_STAT_5_25GBASE_KRS_CRS BIT(9) +#define ETH_AUTO_NEG_STAT_5_25GBASE_KR_CR BIT(10) +#define ETH_AUTO_NEG_STAT_5_2_5GBASE_KX BIT(11) +#define ETH_AUTO_NEG_STAT_5_5GBASE_KR BIT(12) +#define ETH_AUTO_NEG_STAT_5_50GBASE_KR_CR BIT(13) +#define ETH_AUTO_NEG_STAT_5_100GBASE_KR2_CR2 BIT(14) +#define ETH_AUTO_NEG_STAT_5_200GBASE_KR4_CR4 BIT(15) +#define ETH_AUTO_NEG_STAT_5_25G_RSFEC_REQ BIT(23) +#define ETH_AUTO_NEG_STAT_5_25GBASE_R_FEC_REQ BIT(24) +#define ETH_AUTO_NEG_STAT_5_FEC_AB BIT(25) +#define ETH_AUTO_NEG_STAT_5_FEC_REQ BIT(26) +#define ETH_AUTO_NEG_STAT_5_REMOTE_FAULT BIT(27) +#define ETH_AUTO_NEG_STAT_5_PAUSE BIT(28) +#define ETH_AUTO_NEG_STAT_5_ASM_DIR BIT(29) + +/* 0xCC: AN Channel Override */ +#define ETH_OR_AN_CH 0x3 + +/* 0xCD: Consortium Next Page Override*/ +#define ETH_OR_CONSORT_NEXT_PG_TECH_25GBASE_KR1_AB BIT(13) +#define ETH_OR_CONSORT_NEXT_PG_TECH_25GBASE_CR1_AB BIT(14) +#define ETH_OR_CONSORT_NEXT_PG_TECH_50GBASE_KR2_AB BIT(17) +#define ETH_OR_CONSORT_NEXT_PG_TECH_50GBASE_CR2_AB BIT(18) +#define ETH_OR_CONSORT_NEXT_PG_FEC_CTRL_F1_CL91_RSFEC_AB BIT(24) +#define ETH_OR_CONSORT_NEXT_PG_FEC_CTRL_F2_CL74_RSFEC_AB BIT(25) +#define ETH_OR_CONSORT_NEXT_PG_FEC_CTRL_F3_CL91_RSFEC_REQ BIT(26) +#define ETH_OR_CONSORT_NEXT_PG_FEC_CTRL_F4_CL74_RSFEC_REQ BIT(27) + +/* 0xCE: Consortium Next Page Link Partner Status */ +#define ETH_LP_CONSORT_NEXT_PG_TECH_25GBASE_KR1_AB BIT(13) +#define ETH_LP_CONSORT_NEXT_PG_TECH_25GBASE_CR1_AB BIT(14) +#define ETH_LP_CONSORT_NEXT_PG_TECH_50GBASE_KR2_AB BIT(17) +#define ETH_LP_CONSORT_NEXT_PG_TECH_50GBASE_CR2_AB BIT(18) +#define ETH_LP_CONSORT_NEXT_PG_FEC_F1_CL91_RSFEC_AB BIT(24) +#define ETH_LP_CONSORT_NEXT_PG_FEC_F2_CL74_RSFEC_AB BIT(25) +#define ETH_LP_CONSORT_NEXT_PG_FEC_F3_CL91_RSFEC_REQ BIT(26) +#define ETH_LP_CONSORT_NEXT_PG_FEC_F4_CL74_RSFEC_REQ BIT(27) + +/* 0xD0: Link training Config 1 */ +#define ETH_LNK_CONF_LNK_TRN_EN BIT(0) +#define ETH_LNK_CONF_MAX_WAIT_TMR_DIS BIT(1) +#define ETH_LNK_CONF_HIGH_EFFORT BIT(2) +#define ETH_LNK_CONF_SERDES_EN BIT(3) +#define ETH_LNK_CONF_LT_MAX_WAIT_TIME (0xFFF << 4) +#define ETH_LNK_CONF_RX_ADAPT_DIS BIT(16) +#define ETH_LNK_CONF_PER_RX_ADAPT_DIS BIT(17) +#define ETH_LNK_CONF_PRE_LT_DIS BIT(18) +#define ETH_LNK_CONF_POST_LT_DIS BIT(19) +#define ETH_LNK_CONF_LF_OR (0xF << 20) +#define ETH_LNK_CONF_HF_OR (0xF << 24) +#define ETH_LNK_CONF_BW_OR (0xF << 28) + +/* 0xD2: Link training Status 1 */ +#define ETH_LNK_TR_STAT_LN0 BIT(0) +#define ETH_LNK_TR_STAT_FRM_LN0 BIT(1) +#define ETH_LNK_TR_STAT_STARTUP_LN0 BIT(2) +#define ETH_LNK_TR_STAT_FAIL_LN0 BIT(3) +#define ETH_LNK_TR_STAT_LN1 BIT(8) +#define ETH_LNK_TR_STAT_FRM_LN1 BIT(9) +#define ETH_LNK_TR_STAT_STARTUP_LN1 BIT(10) +#define ETH_LNK_TR_STAT_FAIL_LN1 BIT(11) +#define ETH_LNK_TR_STAT_LN2 BIT(16) +#define ETH_LNK_TR_STAT_FRM_LN2 BIT(17) +#define ETH_LNK_TR_STAT_STARTUP_LN2 BIT(18) +#define ETH_LNK_TR_STAT_FAIL_LN2 BIT(19) +#define ETH_LNK_TR_STAT_LN3 BIT(24) +#define ETH_LNK_TR_STAT_FRM_LN3 BIT(25) +#define ETH_LNK_TR_STAT_STARTUP_LN3 BIT(26) +#define ETH_LNK_TR_STAT_FAIL_LN3 BIT(27) + +/* 0xD3: Link training Config Lane 0 */ +#define ETH_LNK_TR_CONF_PRSB_SELECT_LN0 0x7 +#define ETH_LNK_TR_CONF_PRSB_SEED_LN0 (0x7FF << 16) + +/* 0x:E0 Link training Config Lane 1 */ +#define ETH_LNK_TR_CONF_PRSB_SELECT_LN1 0x7 +#define ETH_LNK_TR_CONF_PRSB_SEED_LN1 (0x7FF << 16) + +/* 0xE4: Link training Config Lane 2 */ +#define ETH_LNK_TR_CONF_PRSB_SELECT_LN2 0x7 +#define ETH_LNK_TR_CONF_PRSB_SEED_LN2 (0x7FF << 16) + +/* 0xE8: Link training Config Lane 3 */ +#define ETH_LNK_TR_CONF_PRSB_SELECT_LN3 0x7 +#define ETH_LNK_TR_CONF_PRSB_SEED_LN3 (0x7FF << 16) + +/* 0x300: PHY ID */ +#define ETH_PHY_ID 0xffffffff + +/* 0x301: PHY scratch */ +#define ETH_PHY_SCRATCH 0xffffffff + +/* 0x30D: PHY link loopback */ +#define ETH_PHY_LOOPBACK_TX_PCS (0x7 << 3) +#define ETH_PHY_LOOPBACK_RX_PCS (0x7 << 15) +#define ETH_PHY_LOOPBACK_RX_MAC (0x7 << 18) +#define ETH_PHY_LOOPBACK_RX_PLD (0x7 << 21) + +/* 0x310: PHY configuration */ +/* **** RESET REGISTERS FOR THE MAC ****/ +#define ETH_PHY_CONF_IO_SYS_RESET BIT(0) +#define ETH_PHY_CONF_SOFT_TXP_RESET BIT(1) +#define ETH_PHY_CONF_SOFT_RXP_RESET BIT(2) +#define ETH_PHY_CONF_SET_REF_CLOCK BIT(4) +#define ETH_PHY_CONF_SET_DSATA_CLOCK BIT(5) + +/* 0x321: PHY CDR locked */ +#define ETH_PHY_CD_PLL_LOCKED 0xF + +/* 0x322: PHY tx datapath ready */ +#define ETH_PHY_TX_PCS_READY BIT(0) + +/* 0x323 PHY frame erors detected */ +#define ETH_PHY_FRM_ERROR 0xFFFFF + +/* 0x324 PHY clear frame error */ +#define ETH_PHY_CLR_FRM_ERROR BIT(0) + +/* 0x326: PHY RX PCS status */ +#define ETH_PHY_RX_PCS_ALIGNED BIT(0) +#define ETH_PHY_HI_BER BIT(1) + +/* 0x327: PHY pcs error injection */ +#define ETH_PHY_INJ_ERROR 0x000FFFFF + +/* 0x328: PHY alignment marker lock */ +#define ETH_PHY_AM_LOCK BIT(0) + +/* 0x329: PHY PCS deskew status */ +#define ETH_PHY_RX_PCS_DSKW_STAT BIT(0) +#define ETH_PHY_RX_PCS_DSKW_CHNG BIT(1) + +/* 0x32A: PHY BER count */ +#define ETH_PHY_BER_CNT 0xffffffff + +/* 0x32B: PHY transfer ready */ +#define ETH_PHY_AIB_RESETEHIP_TX_TRANSFER 0xF +#define ETH_PHY_AIB_RESET_PTP_TX_TRANSFER (0x3 << 4) +#define ETH_PHY_AIB_RESET_EHIP_RX_TRANSFER (0xF << 16) +#define ETH_PHY_AIB_RESET_PTP_RX_TRANSFER (0x3 << 20) + +/* 0x3C: PHY soft rc reset status */ +#define ETH_PHY_EHIP_RESET BIT(0) +#define ETH_PHY_EHIP_TX_RESET BIT(1) +#define ETH_PHY_EHIP_RX_RESET BIT(2) +#define ETH_PHY_EHIP_RSFEC_RESET BIT(3) +#define ETH_PHY_EHIP_RSFEC_TX_RESET BIT(4) +#define ETH_PHY_EHIP_RSFEC_RX_RESET BIT(5) + +/* 0x330: PHY virtual lane 0 */ +#define ETH_PHY_VLANE_0_vlan0 0x1F +#define ETH_PHY_VLANE_0_vlan1 (0x1F << 5) +#define ETH_PHY_VLANE_0_vlan2 (0x1F << 10) +#define ETH_PHY_VLANE_0_vlan3 (0x1F << 15) +#define ETH_PHY_VLANE_0_vlan4 (0x1F << 20) +#define ETH_PHY_VLANE_0_vlan5 (0x1F << 25) + +/* 0x331: PHY virtual lane 1 */ +#define ETH_PHY_VLANE_1_vlan6 0x1F +#define ETH_PHY_VLANE_1_vlan7 (0x1F << 5) +#define ETH_PHY_VLANE_1_vlan8 (0x1F << 10) +#define ETH_PHY_VLANE_1_vlan9 (0x1F << 15) +#define ETH_PHY_VLANE_1_vlan10 (0x1F << 20) +#define ETH_PHY_VLANE_1_vlan11 (0x1F << 25) + +/* 0x332: PHY virtual lane 2 */ +#define ETH_PHY_VLANE_2_vlan12 0x1F +#define ETH_PHY_VLANE_2_vlan13 (0x1F << 5) +#define ETH_PHY_VLANE_2_vlan14 (0x1F << 10) +#define ETH_PHY_VLANE_2_vlan15 (0x1F << 15) +#define ETH_PHY_VLANE_2_vlan16 (0x1F << 20) +#define ETH_PHY_VLANE_2_vlan17 (0x1F << 25) + +/* 0x333: PHY virtual lane 3 */ +#define ETH_PHY_VLANE_3_vlan18 0x1F +#define ETH_PHY_VLANE_3_vlan19 (0X1F << 5) + +/* 0x341: PHY recovered clock */ +#define ETH_PHY_KHZ_RX 0xffffffff + +/* 0x342: PHY TX clock */ +#define ETH_PHY_KHZ_TX 0xffffffff + +/* 0x350: PHY TX PLD Configuration */ +#define ETH_PHY_TX_PLD_EHIP_MODE 0x7 +#define ETH_PHY_TX_PLD_FIFO_FULL (0x1F << 8) +#define ETH_PHY_TX_PLD_DESKEW_CHAN_SEL (0x3F << 16) +#define ETH_PHY_TX_PLD_TX_DESKEW_CLEAR BIT(22) +#define ETH_PHY_TX_PLD_SEL_50GX2 BIT(23) + +/* 0x351: PHY TX PLD status */ +#define ETH_PHY_TX_PLD_STAT_EVAL_DONE BIT(0) +#define ETH_PHY_TX_PLD_STAT_DESKW (0x7 << 1) +#define ETH_PHY_TX_PLD_STAT_MONITOR_ERR (0x3F << 8) +#define ETH_PHY_TX_PLD_STAT_DSK_ACT_CHAN (0x3F << 16) +#define ETH_PHY_TX_PLD_STAT_FIFO_UNDERFLOW BIT(22) +#define ETH_PHY_TX_PLD_STAT_FIFO_EMPTY BIT(23) +#define ETH_PHY_TX_PLD_STAT_FIFO_OVERFLOW BIT(24) + +/* 0x354 PHY dynamic deskew buffer */ +#define ETH_PHY_DYNAMIC_DESKEW_BUFF_ALMOST_FULL (0xF << 8) +#define ETH_PHY_DYNAMIC_DESKEW_BUFF_OVERFLOW_IND (0xF << 12) +#define ETH_PHY_DYNAMIC_DESKEW_BUFF_OVERFLOW BIT(16) + +/* 0x355 PHY RX PLD block */ +#define ETH_PHY_RX_PLD_RX_EHIP_MODE 0x7 +#define ETH_PHY_RX_PLD_USER_LANE_PTP BIT(3) +#define ETH_PHY_RX_PLD_SEL_50GX2 BIT(4) + +/* 0x360 PHY RX PCS */ +#define ETH_PHY_RX_PCS_AM_INTVL 0x3FFF +#define ETH_PHY_RX_PCS_RX_PCS_MAX_SKEW (0x3F << 14) +#define ETH_PHY_RX_PCS_USE_HIBER_MON BIT(20) + +/* 0x361 PHY BIP counter 0 */ +#define ETH_PHY_BIP_CNT_0 0x0000FFFF + +/* 0x362 PHY BIP counter 1 */ +#define ETH_PHY_BIP_CNT_1 0x0000FFFF + +/* 0x363 PHY BIP counter 2 */ +#define ETH_PHY_BIP_CNT_2 0x0000FFFF + +/* 0x364 PHY BIP counter 3 */ +#define ETH_PHY_BIP_CNT_3 0x0000FFFF + +/* 0x365 PHY BIP counter 4 */ +#define ETH_PHY_BIP_CNT_4 0x0000FFFF + +/* 0x366 PHY BIP counter 5 */ +#define ETH_PHY_BIP_CNT_5 0x0000FFFF + +/* 0x367 PHY BIP counter 6 */ +#define ETH_PHY_BIP_CNT_6 0x0000FFFF + +/* 0x368 PHY BIP counter 7 */ +#define ETH_PHY_BIP_CNT_7 0x0000FFFF + +/* 0x369 PHY BIP counter 8 */ +#define ETH_PHY_BIP_CNT_8 0x0000FFFF + +/* 0x36A PHY BIP counter 9 */ +#define ETH_PHY_BIP_CNT_9 0x0000FFFF + +/* 0x36B PHY BIP counter 10 */ +#define ETH_PHY_BIP_CNT_10 0x0000FFFF + +/* 0x36C PHY BIP counter 11 */ +#define ETH_PHY_BIP_CNT_11 0x0000FFFF + +/* 0x36D PHY BIP counter 12 */ +#define ETH_PHY_BIP_CNT_12 0x0000FFFF + +/* 0x36E PHY BIP counter 13 */ +#define ETH_PHY_BIP_CNT_13 0x0000FFFF + +/* 0x36F PHY BIP counter 14 */ +#define ETH_PHY_BIP_CNT_14 0x0000FFFF + +/* 0x370 PHY BIP counter 15 */ +#define ETH_PHY_BIP_CNT_15 0x0000FFFF + +/* 0x371 PHY BIP counter 16 */ +#define ETH_PHY_BIP_CNT_16 0x0000FFFF + +/* 0x372 PHY BIP counter 17 */ +#define ETH_PHY_BIP_CNT_17 0x0000FFFF + +/* 0x373 PHY BIP counter 18 */ +#define ETH_PHY_BIP_CNT_18 0x0000FFFF + +/* 0x374 PHY BIP counter 19 */ +#define ETH_PHY_BIP_CNT_19 0x0000FFFF + +/* 0x37A PHY hiber chekcs */ +#define ETH_PHY_HIBER_CHECKS 0x001FFFFF +/* 0x37B PHY hiber frame error */ +#define ETH_PHY_HIBER_FRM_ERR 0x7F + +/* 0x37C PHY error block count */ +#define ETH_PHY_ERR_BLOCK_COUNT 0xffffffff + +/* 0x37F PHY BIP deskew dept 0 */ +#define ETH_PHY_DES_DEPT0_DEPT_0 0x3F +#define ETH_PHY_DES_DEPT0_DEPT_1 (0x3F << 6) +#define ETH_PHY_DES_DEPT0_DEPT_2 (0x3F << 12) +#define ETH_PHY_DES_DEPT0_DEPT_3 (0x3F << 18) +#define ETH_PHY_DES_DEPT0_DEPT_4 (0x3F << 24) + +/* 0x380 PHY BIP deskew dept 1 */ +#define ETH_PHY_DES_DEPT1_DEPT_0 0x3F +#define ETH_PHY_DES_DEPT1_DEPT_1 (0x3F << 6) +#define ETH_PHY_DES_DEPT1_DEPT_2 (0x3F << 12) +#define ETH_PHY_DES_DEPT1_DEPT_3 (0x3F << 18) +#define ETH_PHY_DES_DEPT1_DEPT_4 (0x3F << 24) + +/* 0x381 PHY BIP deskew dept 2 */ +#define ETH_PHY_DES_DEPT2_DEPT_0 0x3F +#define ETH_PHY_DES_DEPT2_DEPT_1 (0x3F << 6) +#define ETH_PHY_DES_DEPT2_DEPT_2 (0x3F << 12) +#define ETH_PHY_DES_DEPT2_DEPT_3 (0x3F << 18) +#define ETH_PHY_DES_DEPT2_DEPT_4 (0x3F << 24) + +/* 0x382 PHY BIP deskew dept 3 */ +#define ETH_PHY_DES_DEPT3_DEPT_0 0x3F +#define ETH_PHY_DES_DEPT3_DEPT_1 (0x3F << 6) +#define ETH_PHY_DES_DEPT3_DEPT_2 (0x3F << 12) +#define ETH_PHY_DES_DEPT3_DEPT_3 (0x3F << 18) +#define ETH_PHY_DES_DEPT3_DEPT_4 (0x3F << 24) +#define ETH_PHY_DES_DEPT3 0xffff + +/* 0x383 PHY RX PCS test error count */ +#define ETH_PHY_RX_PCS_TEST_ERR_CNT 0xFFFFFFFF + +/* 0x400: TX MAC ID */ +#define ETH_TX_ETH_ID 0xffffffff + +/* 0x401: TX MAC scratch */ +#define ETH_TX_MAC_SCRATCH 0xffffffff + +/* 0x405: TX MAC link fault configuiration */ +#define ETH_TX_MAC_LF_EN BIT(0) +#define ETH_TX_MAC_UNIDIR_EN BIT(1) +#define ETH_TX_MAC_DISABLE_RF BIT(2) +#define ETH_TX_MAC_FORCE_RF BIT(3) + +/* 0x406: TX MAC IPG col rem */ +#define ETH_TX_MAC_IPG_COL_REM 0x0000ffff + +/* 0x407: TX MAC frame size */ +#define ETH_TX_MAC_MAX_TX 0x0000ffff + +/* 0x40A: TX MAC configuration */ +#define ETH_TX_MAC_DISABLE_S_ADDR_EN BIT(3) +#define ETH_TX_MAC_DISABLE_TXVMAC BIT(2) +#define ETH_TX_MAC_DISABLE_TXVLAN BIT(1) + +/* 0x40B: TX MAC EHIP configuration */ +#define ETH_TX_MAC_EHIP_CONF_EN_PP BIT(0) +#define ETH_TX_MAC_EHIP_CONF_IPG (0x3 << 1) +#define ETH_TX_MAC_EHIP_CONF_AM_WIDTH_25G (0x4 << 3) +#define ETH_TX_MAC_EHIP_CONF_AM_WIDTH_10G (0x1 << 3) +#define ETH_TX_MAC_EHIP_CONF_FLOWREG_25G (0x3 << 6) +#define ETH_TX_MAC_EHIP_CONF_FLOWREG_8b_10G (0x1 << 8) +#define ETH_TX_MAC_EHIP_CONF_FLOWREG_10G (0x4 << 6) +#define ETH_TX_MAC_EHIP_CONF_CRC_EN BIT(9) +#define ETH_TX_MAC_EHIP_CONF_AM_PERIOD (0x1FFF << 15) +#define ETH_TX_MAC_EHIP_CONF_AM_25G_FEC (0x13FFF << 15) +#define ETH_TX_MAC_EHIP_CONF_AM_25G (0x13FFC << 15) + +/* 0x40C: TX MAC source address lower bytes */ +#define ETH_TX_MAC_LOW_BYTES 0xffffffff + +/* 0x40D: TX MAC source address higher bytes */ +#define ETH_TX_MAC_HIGH_BYTES 0x0000ffff + +/* 0x500 RX MAC ID confuguration */ +#define ETH_RX_MAC_ID 0xffffffff + +/* 0x501 RX MAC scratch confuguration */ +#define ETH_RX_MAC_SCRATCH 0xffffffff + +/* 0x506 RX MAC frame size */ +#define ETH_RX_MAC_FRAME_SIZE 0x0000ffff + +/* 0x507 RX MAC CRC forwarding */ +#define ETH_RX_MAC_CRC_FORWARD BIT(0) + +/* 0x508 RX MAC link status */ +#define ETH_RX_MAC_LOCAL_FAULT BIT(0) +#define ETH_RX_MAC_REMOTE_FAULT BIT(1) + +/* 0x50A RX MAC confuguration */ +#define ETH_RX_MAC_EN_PLEN BIT(0) +#define ETH_RX_MAC_RXVLAN_DISABLE BIT(1) +#define ETH_RX_MAC_EN_CHECK_SFD BIT(3) +#define ETH_RX_MAC_EN_STR_PREAMBLE BIT(4) +#define ETH_RX_MAC_ENFORCE_MAX_RX BIT(7) +#define ETH_RX_MAC_REMOVE_RX_PAD BIT(8) + +/* 0x50B RX MAC feature confuguration */ +#define ETH_RX_MAC_EN_PP BIT(0) +#define ETH_RX_MAC_RXCRC_PREAMBLE BIT(1) + +/* Ethernet Reconfiguration Interface + * Pause and Priority - Based Flow Control + * Bit Definitions + */ +/* 0x605 Enable TX Pause Ports */ +#define ETH_EN_PFC_PORT_FOR_PFC 0xFF +#define ETH_EN_PFC_PORT_FOR_PAUSE BIT(8) + +/* 0x606 TX Pause Request */ +#define ETH_TX_PAUSE_REQ_FOR_PFC 0xFF +#define ETH_TX_PAUSE_REQ_FOR_PAUSE BIT(8) + +/* 0x607 Enable Automatic TX Pause Retransmission */ +#define ETH_TX_EN_HOLDOFF_FOR_PFC 0xFF +#define ETH_TX_EN_HOLDOFF_FOR_PAUSE BIT(8) + +/* 0x608 Retransmit Holdoff Quanta Fields */ +#define ETH_TX_RETRANSMIT_HOLDOFF_QUANTA 0xFFFF + +/* 0x609 Retransmit Pause Quanta */ +#define ETH_RETRANSMIT_PAUSE_QUANTA 0xFFFF + +/* 0x60A Enable TX XOFF */ +#define ETH_TX_XOFF 0x3 + +/* 0x60B Enable Uniform Holdoff */ +#define ETH_EN_UNIFORM_HOLDOFF BIT(0) + +/* 0x60C Set Uniform Holdoff */ +#define ETH_SET_UNIFORM_HOLDOFF 0xFFFF + +/* 0x60D Lower 4 Bytes of the Dest Addr for Flow Control Fields */ +#define ETH_TX_DEST_ADDR_FLOW_CTRL_LO 0xFFFFFFFF + +/* 0x60E Higher 2 Bytes of the Dest Addr for Flow Control Fields */ +#define ETH_TX_DEST_ADDR_FLOW_CTRL_HI 0xFFFF + +/* 0x60F Lower 4 Bytes of the Src Addr for Flow Control Fields */ +#define ETH_TX_SRC_ADDR_FLOW_CTRL_LO 0xFFFFFFFF + +/* 0x610 Higher 2 Bytes of the Src Addr for Flow Control Fields */ +#define ETH_TX_SRC_ADDR_FLOW_CTRL_HI 0xFFFF + +/* 0x611 TX Flow Control Feature Configuration */ +#define ETH_TX_EN_STD_FLOW_CTRL BIT(0) +#define ETH_TX_EN_PRIORITY_FLOW_CTRL BIT(1) +#define MAC_PAUSEFRAME_QUANTA 0xFFFF + +/* 0x620 Pause Quanta 0 */ +#define ETH_PAUSE_QUANTA_0 0xFFFF + +/* 0x621 Pause Quanta 1 */ +#define ETH_PAUSE_QUANTA_1 0xFFFF + +/* 0x622 Pause Quanta 2 */ +#define ETH_PAUSE_QUANTA_2 0xFFFF + +/* 0x623 Pause Quanta 3 */ +#define ETH_PAUSE_QUANTA_3 0xFFFF + +/* 0x624 Pause Quanta 4 */ +#define ETH_PAUSE_QUANTA_4 0xFFFF + +/* 0x625 Pause Quanta 5 */ +#define ETH_PAUSE_QUANTA_5 0xFFFF + +/* 0x626 Pause Quanta 6 */ +#define ETH_PAUSE_QUANTA_6 0xFFFF + +/* 0x627 Pause Quanta 7 */ +#define ETH_PAUSE_QUANTA_7 0xFFFF + +/* 0x628 PFC Holdoff Quanta 0 */ +#define ETH_PFC_HOLDOFF_QUANTA_0 0xFFFF + +/* 0x629 PFC Holdoff Quanta 1 */ +#define ETH_PFC_HOLDOFF_QUANTA_1 0xFFFF + +/* 0x62A PFC Holdoff Quanta 2 */ +#define ETH_PFC_HOLDOFF_QUANTA_2 0xFFFF + +/* 0x62B PFC Holdoff Quanta 3 */ +#define ETH_PFC_HOLDOFF_QUANTA_3 0xFFFF + +/* 0x62C PFC Holdoff Quanta 4 */ +#define ETH_PFC_HOLDOFF_QUANTA_4 0xFFFF + +/* 0x62D PFC Holdoff Quanta 5 */ +#define ETH_PFC_HOLDOFF_QUANTA_5 0xFFFF + +/* 0x62E PFC Holdoff Quanta 6 */ +#define ETH_PFC_HOLDOFF_QUANTA_6 0xFFFF + +/* 0x62F PFC Holdoff Quanta 7 */ +#define ETH_PFC_HOLDOFF_QUANTA_7 0xFFFF + +/* 0x705 Enable RX Pause Frame Processing Fields */ +#define ETH_EN_RX_PAUSE BIT(0) + +/* 0x706 RX Forward Flow Control Frames */ +#define ETH_RX_PAUSE_FWD BIT(0) + +/* 0x707 Lower 4 bytes of Dest. Addr. for RX Pause Frames */ +#define ETH_RX_DEST_ADDR_FLOW_CTRL_LO 0xFFFFFFFF + +/* 0x708 Higher 2 bytes of the Dest Addr for RX Pause Frames */ +#define ETH_RX_DEST_ADDR_FLOW_CTRL_HI 0xFFFF + +/* 0x709 RX Flow Control Feature Configuration */ +#define ETH_RX_EN_STD_FLOW_CTRL BIT(0) +#define ETH_RX_EN_PRIORITY_FLOW_CTRL BIT(1) + +/* Ethernet Reconfiguration Interface + * TX Statistics Counter + * Bit Definitions + */ +/* 0x845: Configuration of TX Statistics Counters */ +#define ETH_TX_CNTR_CFG_RST_ALL BIT(0) +#define ETH_TX_CNTR_CFG_RST_PARITY_ERR BIT(1) +#define ETH_TX_CNTR_CFG_FREEZE_STATS BIT(2) + +/* 0x846: Status of TX Statistics Counters */ +#define ETH_TX_CNTR_STAT_PARITY_ERR BIT(0) +#define ETH_TX_CNTR_STAT_CSR BIT(1) + +/* Ethernet Reconfiguration Interface + * RX Statistics Counter + * Bit Definitions + */ +/* 0x945: Configuration of RX Statistics Counters */ +#define ETH_RX_CNTR_CFG_RST_ALL BIT(0) +#define ETH_RX_CNTR_CFG_RST_PARITY_ERR BIT(1) +#define ETH_RX_CNTR_CFG_FREEZE_STATS BIT(2) + +/* 0x946: Status of RX Statistics Counters */ +#define ETH_RX_CNTR_STAT_PARITY_ERR BIT(0) +#define ETH_RX_CNTR_STAT_CSR BIT(1) + +/* Ethernet Reconfiguration Interface + * TX 1588 PTP + * Bit Definitions + */ +/* 0xA05: TX 1588 PTP Clock Period */ +#define ETH_PTP_CLK_PERIOD_FRAC_NS 0xFFFF +#define ETH_PTP_CLK_PERIOD_NS (0xF << 16) + +/* 0xA0A: TX 1588 PTP Extra Latency */ +#define ETH_TX_PTP_EXTRA_LATENCY_FRAC_NS 0xFFFF +#define ETH_TX_PTP_EXTRA_LATENCY_NS (0x7FFF << 16) +#define ETH_TX_PTP_EXTRA_LATENCY_SIGN BIT(31) + +/* 0xA0D: TX 1588 PTP Debug */ +#define ETH_TX_PTP_DEBUG BIT(0) + +/* Ethernet Reconfiguration Interface + * RX 1588 PTP + * Bit Definitions + */ +/* 0xB06: RX 1588 PTP Extra Latency */ +#define ETH_RX_PTP_EXTRA_LATENCY_FRAC_NS 0xFFFF +#define ETH_RX_PTP_EXTRA_LATENCY_NS (0x7FFF << 16) +#define ETH_RX_PTP_EXTRA_LATENCY_SIGN BIT(31) + +/* Ethernet Reconfiguration Interface + * 10G/25G PTP PPM UI Adjustment + * Bit Definitions + */ +/* 0xB10: TX 1588 PTP UI */ +#define ETH_TX_UI_FRAC_NS 0xFFFFFF +#define ETH_TX_UI_NS (0xFF << 24) + +/* 0xB11: RX 1588 PTP UI */ +#define ETH_RX_UI_FRAC_NS 0xFFFFFF +#define ETH_RX_UI_NS (0xFF << 24) + +/* 0xB19: Time Value Control*/ +#define ETH_TAM_SNAPSHOT BIT(0) + +/* 0xB1A: TX TAM Lower */ +#define ETH_TX_TAM_LO_FRAC_NS 0xFFFF +#define ETH_TX_TAM_LO_NS (0xFFFF << 16) + +/* 0xB1B: TX TAM Upper */ +#define ETH_TX_TAM_HI_NS 0xFFFF + +/* 0xB1C: TX Count */ +#define ETH_TX_CNT 0xFFFF + +/* 0xB1D: RX TAM Lower */ +#define ETH_RX_TAM_LO_FRAC_NS 0xFFFF +#define ETH_RX_TAM_LO_NS (0xFFFF << 16) + +/* 0xB1E: RX TAM Upper */ +#define ETH_RX_TAM_HI_NS 0xFFFF + +/* 0xB1F: RX Count */ +#define ETH_TX_CNT 0xFFFF + +/* RSFEC Reconfiguration Interface + * TX & RX RSFEC + * Bit Definitions + */ +/* 0x010: RS-FEC TX Configuration */ +#define RSFEC_TX_SEL_LANE_0_EHIP_CORE_TX_DATA 0x0 +#define RSFEC_TX_SEL_LANE_0_EHIP_LANE_TX_DATA 0x1 +#define RSFEC_TX_SEL_LANE_0_EMIB_LANE_TX_DATA_DESKEW 0x2 +#define RSFEC_TX_SEL_LANE_0_EMIB_LANE_TX_DATA_NO_DESKEW 0x3 +#define RSFEC_TX_SEL_LANE_0_FEC_LANE_DISABLED 0x6 +#define RSFEC_TX_SEL_LANE_0_DEBUG 0x7 +#define RSFEC_TX_SEL_LANE_1_EHIP_CORE_TX_DATA (0x0 << 4) +#define RSFEC_TX_SEL_LANE_1_EHIP_LANE_TX_DATA (0x1 << 4) +#define RSFEC_TX_SEL_LANE_1_EMIB_LANE_TX_DATA_DESKEW (0x2 << 4) +#define RSFEC_TX_SEL_LANE_1_EMIB_LANE_TX_DATA_NO_DESKEW (0x3 << 4) +#define RSFEC_TX_SEL_LANE_1_FEC_LANE_DISABLED (0x6 << 4) +#define RSFEC_TX_SEL_LANE_1_DEBUG (0x7 << 4) +#define RSFEC_TX_SEL_LANE_2_EHIP_CORE_TX_DATA (0x0 << 8) +#define RSFEC_TX_SEL_LANE_2_EHIP_LANE_TX_DATA (0x1 << 8) +#define RSFEC_TX_SEL_LANE_2_EMIB_LANE_TX_DATA_DESKEW (0x2 << 8) +#define RSFEC_TX_SEL_LANE_2_EMIB_LANE_TX_DATA_NO_DESKEW (0x3 << 8) +#define RSFEC_TX_SEL_LANE_2_FEC_LANE_DISABLED (0x6 << 8) +#define RSFEC_TX_SEL_LANE_2_DEBUG (0x7 << 8) +#define RSFEC_TX_SEL_LANE_3_EHIP_CORE_TX_DATA (0x0 << 12) +#define RSFEC_TX_SEL_LANE_3_EHIP_LANE_TX_DATA (0x1 << 12) +#define RSFEC_TX_SEL_LANE_3_EMIB_LANE_TX_DATA_DESKEW (0x2 << 12) +#define RSFEC_TX_SEL_LANE_3_EMIB_LANE_TX_DATA_NO_DESKEW (0x3 << 12) +#define RSFEC_TX_SEL_LANE_3_FEC_LANE_DISABLED (0x6 << 12) +#define RSFEC_TX_SEL_LANE_3_DEBUG (0x7 << 12) +#define RSFEC_FEC_TX_BYPASS_LANE_0 BIT(28) +#define RSFEC_FEC_TX_BYPASS_LANE_1 BIT(29) +#define RSFEC_FEC_TX_BYPASS_LANE_2 BIT(30) +#define RSFEC_FEC_TX_BYPASS_LANE_3 BIT(31) + +/* 0x014: RS-FEC RX Configuration */ +#define RSFEC_RX_SEL_LANE_0_BYPASS_RSFEC_RX_PATH_NORMAL 0x0 +#define RSFEC_RX_SEL_LANE_0_SEL_OUTPUT_OF_RX 0x1 +#define RSFEC_RX_SEL_LANE_0_BYPASS_RSFEC_RX_PATH_EHIP_ELANE 0x2 +#define RSFEC_RX_SEL_LANE_0_DEBUG 0x3 +#define RSFEC_RX_SEL_LANE_1_BYPASS_RSFEC_RX_PATH_NORMAL (0x0 << 4) +#define RSFEC_RX_SEL_LANE_1_SEL_OUTPUT_OF_RX (0x1 << 4) +#define RSFEC_RX_SEL_LANE_1_BYPASS_RSFEC_RX_PATH_EHIP_ELANE (0x2 << 4) +#define RSFEC_RX_SEL_LANE_1_DEBUG (0x3 << 4) +#define RSFEC_RX_SEL_LANE_2_BYPASS_RSFEC_RX_PATH_NORMAL (0x0 << 8) +#define RSFEC_RX_SEL_LANE_2_SEL_OUTPUT_OF_RX (0x1 << 8) +#define RSFEC_RX_SEL_LANE_2_BYPASS_RSFEC_RX_PATH_EHIP_ELANE (0x2 << 8) +#define RSFEC_RX_SEL_LANE_2_DEBUG (0x3 << 8) +#define RSFEC_RX_SEL_LANE_3_BYPASS_RSFEC_RX_PATH_NORMAL (0x0 << 12) +#define RSFEC_RX_SEL_LANE_3_SEL_OUTPUT_OF_RX (0x1 << 12) +#define RSFEC_RX_SEL_LANE_3_BYPASS_RSFEC_RX_PATH_EHIP_ELANE (0x2 << 12) +#define RSFEC_RX_SEL_LANE_3_DEBUG (0x3 << 12) +#define RSFEC_FEC_RX_BYPASS_LANE_0 BIT(28) +#define RSFEC_FEC_RX_BYPASS_LANE_1 BIT(29) +#define RSFEC_FEC_RX_BYPASS_LANE_2 BIT(30) +#define RSFEC_FEC_RX_BYPASS_LANE_3 BIT(31) + +/* 0x020: TX Deskew Configuration */ +#define RSFEC_TX_DESKEW_CHAN_SEL_LANE_0 BIT(0) +#define RSFEC_TX_DESKEW_CHAN_SEL_LANE_1 BIT(1) +#define RSFEC_TX_DESKEW_CHAN_SEL_LANE_2 BIT(2) +#define RSFEC_TX_DESKEW_CHAN_SEL_LANE_3 BIT(3) +#define RSFEC_TX_DESKEW_CLEAR BIT(7) + +/* 0x030: RS-FEC Core Configuration */ +#define RSFEC_CORE_FRAC_NONE 0x0 +#define RSFEC_CORE_FRAC_FRAC4 0x3 + +/* 0x040: RS-FEC Lane 0 Configuration */ +#define RSFEC_LANE0_FIBER_CHAN_MODE BIT(0) +#define RSFEC_LANE0_EN_PN5280_SCRAMBLE BIT(1) +#define RSFEC_LANE0_BYPASS_ERROR_INDICATION BIT(2) +#define RSFEC_LANE0_RS_ENCODER_DECODER_MODE BIT(3) + +/* 0x044: RS-FEC Lane 1 Configuration */ +#define RSFEC_LANE1_FIBER_CHAN_MODE BIT(0) +#define RSFEC_LANE1_EN_PN5280_SCRAMBLE BIT(1) +#define RSFEC_LANE1_BYPASS_ERROR_INDICATION BIT(2) +#define RSFEC_LANE1_RS_ENCODER_DECODER_MODE BIT(3) + +/* 0x048: RS-FEC Lane 2 Configuration */ +#define RSFEC_LANE2_FIBER_CHAN_MODE BIT(0) +#define RSFEC_LANE2_EN_PN5280_SCRAMBLE BIT(1) +#define RSFEC_LANE2_BYPASS_ERROR_INDICATION BIT(2) +#define RSFEC_LANE2_RS_ENCODER_DECODER_MODE BIT(3) + +/* 0x04C: RS-FEC Lane 3 Configuration */ +#define RSFEC_LANE3_FIBER_CHAN_MODE BIT(0) +#define RSFEC_LANE3_EN_PN5280_SCRAMBLE BIT(1) +#define RSFEC_LANE3_BYPASS_ERROR_INDICATION BIT(2) +#define RSFEC_LANE3_RS_ENCODER_DECODER_MODE BIT(3) + +/* 0x104: TX Deskew Status */ +#define RSFEC_TX_DESKEW_COMPLETE BIT(0) +#define RSFEC_TX_DESKEW_STATUS (0x7 << 1) +#define RSFEC_TX_DESKEW_MONITOR_ERR (0xF << 4) +#define RSFEC_TX_DESKEW_ACTIVE_CHANNELS (0xF << 8) + +/* 0x108: RSFEC Debug Configuration */ +#define RSFEC_DEBUG_SHADOW_REQ 0xF +#define RSFEC_DEBUG_SHADOW_CLR BIT(4) +#define RSFEC_DEBUG_TX_RST BIT(28) +#define RSFEC_DEBUG_RX_RST BIT(29) +#define RSFEC_DEBUG_MAIN_RST BIT(31) + +/* 0x120: RSFEC TX Status Lane 0 */ +#define RSFEC_TX_STATUS_LANE_0_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_STATUS_LANE_0_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_STATUS_LANE_0_MARKER_RESYNC BIT(2) +#define RSFEC_TX_STATUS_LANE_0_PACING_VIOLATION BIT(3) + +/* 0x124: RSFEC TX Status Lane 1 */ +#define RSFEC_TX_STATUS_LANE_1_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_STATUS_LANE_1_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_STATUS_LANE_1_MARKER_RESYNC BIT(2) +#define RSFEC_TX_STATUS_LANE_1_PACING_VIOLATION BIT(3) + +/* 0x128: RSFEC TX Status Lane 2 */ +#define RSFEC_TX_STATUS_LANE_2_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_STATUS_LANE_2_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_STATUS_LANE_2_MARKER_RESYNC BIT(2) +#define RSFEC_TX_STATUS_LANE_2_PACING_VIOLATION BIT(3) + +/* 0x12C: RSFEC TX Status Lane 3 */ +#define RSFEC_TX_STATUS_LANE_3_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_STATUS_LANE_3_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_STATUS_LANE_3_MARKER_RESYNC BIT(2) +#define RSFEC_TX_STATUS_LANE_3_PACING_VIOLATION BIT(3) + +/* 0x130 RSFEC TX Hold Status Lane 0 */ +#define RSFEC_TX_HOLD_STATUS_LANE_0_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_LANE_0_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_LANE_0_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_LANE_0_PACING_VIOLATION BIT(3) + +/* 0x134 RSFEC TX Hold Status Lane 1 */ +#define RSFEC_TX_HOLD_STATUS_LANE_1_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_LANE_1_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_LANE_1_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_LANE_1_PACING_VIOLATION BIT(3) + +/* 0x138 RSFEC TX Hold Status Lane 2 */ +#define RSFEC_TX_HOLD_STATUS_LANE_2_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_LANE_2_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_LANE_2_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_LANE_2_PACING_VIOLATION BIT(3) + +/* 0x13C RSFEC TX Hold Status Lane 3 */ +#define RSFEC_TX_HOLD_STATUS_LANE_3_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_LANE_3_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_LANE_3_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_LANE_3_PACING_VIOLATION BIT(3) + +/* 0x140 RSFEC TX Hold Status Interrupt Lane 0 */ +#define RSFEC_TX_HOLD_STATUS_INT_LANE_0_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_0_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_0_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_0_PACING_VIOLATION BIT(3) + +/* 0x144 RSFEC TX Hold Status Interrupt Lane 1 */ +#define RSFEC_TX_HOLD_STATUS_INT_LANE_1_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_1_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_1_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_1_PACING_VIOLATION BIT(3) + +/* 0x148 RSFEC TX Hold Status Interrupt Lane 2 */ +#define RSFEC_TX_HOLD_STATUS_INT_LANE_2_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_2_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_2_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_2_PACING_VIOLATION BIT(3) + +/* 0x14C RSFEC TX Hold Status Interrupt Lane 3 */ +#define RSFEC_TX_HOLD_STATUS_INT_LANE_3_INVALID_SYNC_HEADER BIT(0) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_3_INVALID_BLOCK_TYPE BIT(1) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_3_MARKER_RESYNC BIT(2) +#define RSFEC_TX_HOLD_STATUS_INT_LANE_3_PACING_VIOLATION BIT(3) + +/* 0x150 RSFEC RX Status Lane 0 */ +#define RSFEC_RX_STATUS_LANE_0_SIG_FAIL BIT(0) +#define RSFEC_RX_STATUS_LANE_0_NOT_LOCKED BIT(1) +#define RSFEC_RX_STATUS_LANE_0_FEC_3BAD BIT(2) +#define RSFEC_RX_STATUS_LANE_0_AM_5BAD BIT(3) +#define RSFEC_RX_STATUS_LANE_0_HI_SER BIT(4) +#define RSFEC_RX_STATUS_LANE_0_CORR_CW BIT(5) +#define RSFEC_RX_STATUS_LANE_0_UNCORR_CW BIT(6) + +/* 0x154 RSFEC RX Status Lane 1 */ +#define RSFEC_RX_STATUS_LANE_1_SIG_FAIL BIT(0) +#define RSFEC_RX_STATUS_LANE_1_NOT_LOCKED BIT(1) +#define RSFEC_RX_STATUS_LANE_1_FEC_3BAD BIT(2) +#define RSFEC_RX_STATUS_LANE_1_AM_5BAD BIT(3) +#define RSFEC_RX_STATUS_LANE_1_HI_SER BIT(4) +#define RSFEC_RX_STATUS_LANE_1_CORR_CW BIT(5) +#define RSFEC_RX_STATUS_LANE_1_UNCORR_CW BIT(6) + +/* 0x158 RSFEC RX Status Lane 2 */ +#define RSFEC_RX_STATUS_LANE_2_SIG_FAIL BIT(0) +#define RSFEC_RX_STATUS_LANE_2_NOT_LOCKED BIT(1) +#define RSFEC_RX_STATUS_LANE_2_FEC_3BAD BIT(2) +#define RSFEC_RX_STATUS_LANE_2_AM_5BAD BIT(3) +#define RSFEC_RX_STATUS_LANE_2_HI_SER BIT(4) +#define RSFEC_RX_STATUS_LANE_2_CORR_CW BIT(5) +#define RSFEC_RX_STATUS_LANE_2_UNCORR_CW BIT(6) + +/* 0x15C RSFEC RX Status Lane 3 */ +#define RSFEC_RX_STATUS_LANE_3_SIG_FAIL BIT(0) +#define RSFEC_RX_STATUS_LANE_3_NOT_LOCKED BIT(1) +#define RSFEC_RX_STATUS_LANE_3_FEC_3BAD BIT(2) +#define RSFEC_RX_STATUS_LANE_3_AM_5BAD BIT(3) +#define RSFEC_RX_STATUS_LANE_3_HI_SER BIT(4) +#define RSFEC_RX_STATUS_LANE_3_CORR_CW BIT(5) +#define RSFEC_RX_STATUS_LANE_3_UNCORR_CW BIT(6) + +/* 0x160 RSFEC RX Hold Status Lane 0 */ +#define RSFEC_RX_HOLD_STATUS_LANE_0_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_STATUS_LANE_0_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_STATUS_LANE_0_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_STATUS_LANE_0_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_STATUS_LANE_0_HI_SER BIT(4) +#define RSFEC_RX_HOLD_STATUS_LANE_0_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_STATUS_LANE_0_UNCORR_CW BIT(6) + +/* 0x164 RSFEC RX Hold Status Lane 1 */ +#define RSFEC_RX_HOLD_STATUS_LANE_1_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_STATUS_LANE_1_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_STATUS_LANE_1_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_STATUS_LANE_1_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_STATUS_LANE_1_HI_SER BIT(4) +#define RSFEC_RX_HOLD_STATUS_LANE_1_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_STATUS_LANE_1_UNCORR_CW BIT(6) + +/* 0x168 RSFEC RX Hold Status Lane 2 */ +#define RSFEC_RX_HOLD_STATUS_LANE_2_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_STATUS_LANE_2_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_STATUS_LANE_2_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_STATUS_LANE_2_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_STATUS_LANE_2_HI_SER BIT(4) +#define RSFEC_RX_HOLD_STATUS_LANE_2_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_STATUS_LANE_2_UNCORR_CW BIT(6) + +/* 0x16C RSFEC RX Hold Status Lane 3 */ +#define RSFEC_RX_HOLD_STATUS_LANE_3_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_STATUS_LANE_3_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_STATUS_LANE_3_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_STATUS_LANE_3_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_STATUS_LANE_3_HI_SER BIT(4) +#define RSFEC_RX_HOLD_STATUS_LANE_3_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_STATUS_LANE_3_UNCORR_CW BIT(6) + +/* 0x170 RSFEC RX Hold Interrupt Status Lane 0 */ +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_HI_SER BIT(4) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_0_UNCORR_CW BIT(6) + +/* 0x174 RSFEC RX Hold Interrupt Status Lane 1 */ +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_HI_SER BIT(4) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_1_UNCORR_CW BIT(6) + +/* 0x178 RSFEC RX Hold Interrupt Status Lane 2 */ +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_HI_SER BIT(4) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_2_UNCORR_CW BIT(6) + +/* 0x17C RSFEC RX Hold Interrupt Status Lane 3 */ +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_SIG_FAIL BIT(0) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_NOT_LOCKED BIT(1) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_FEC_3BAD BIT(2) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_AM_5BAD BIT(3) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_HI_SER BIT(4) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_CORR_CW BIT(5) +#define RSFEC_RX_HOLD_INT_STATUS_LANE_3_UNCORR_CW BIT(6) + +/* 0x180: RSFEC RX Lanes Status */ +#define RSFEC_RX_LANES_STATUS_NOT_ALIGN BIT(0) +#define RSFEC_RX_LANES_STATUS_NOT_DESKEW BIT(1) + +/* 0x188: RSFEC RX Lanes Hold Status */ +#define RSFEC_RX_LANES_HOLD_STATUS_NOT_ALIGN BIT(0) +#define RSFEC_RX_LANES_HOLD_STATUS_NOT_DESKEW BIT(1) + +/* 0x18C: RSFEC RX Lanes Hold Interrupt Status */ +#define RSFEC_RX_LANES_HOLD_INT_STATUS_NOT_ALIGN BIT(0) +#define RSFEC_RX_LANES_HOLD_INT_STATUS_NOT_DESKEW BIT(1) + +/* 0x1A0: RSFEC RX Lane 0 Mapping */ +#define RSFEC_RX_LANE_0_MAPPING_FEC_LANE 0x3 + +/* 0x1A4: RSFEC RX Lane 1 Mapping */ +#define RSFEC_RX_LANE_1_MAPPING_FEC_LANE 0x3 + +/* 0x1A8: RSFEC RX Lane 2 Mapping */ +#define RSFEC_RX_LANE_2_MAPPING_FEC_LANE 0x3 + +/* 0x1AC: RSFEC RX Lane 3 Mapping */ +#define RSFEC_RX_LANE_3_MAPPING_FEC_LANE 0x3 + +/* 0x1B0: RSFEC RX FEC Lane 0 Skew */ +#define RSFEC_RX_FEC_LANE_0_SKEW_VAL 0x7F + +/* 0x1B4: RSFEC RX FEC Lane 1 Skew */ +#define RSFEC_RX_FEC_LANE_1_SKEW_VAL 0x7F + +/* 0x1B8: RSFEC RX FEC Lane 2 Skew */ +#define RSFEC_RX_FEC_LANE_2_SKEW_VAL 0x7F + +/* 0x1BC: RSFEC RX FEC Lane 3 Skew */ +#define RSFEC_RX_FEC_LANE_3_SKEW_VAL 0x7F + +/* 0x1C0: RSFEC RX Lane 0 Codeword Position */ +#define RSFEC_RX_LANE_0_CW_POS 0x1FFF + +/* 0x1C4: RSFEC RX Lane 1 Codeword Position */ +#define RSFEC_RX_LANE_1_CW_POS 0x1FFF + +/* 0x1C8: RSFEC RX Lane 2 Codeword Position */ +#define RSFEC_RX_LANE_2_CW_POS 0x1FFF + +/* 0x1CC: RSFEC RX Lane 3 Codeword Position */ +#define RSFEC_RX_LANE_3_CW_POS 0x1FFF + +/* 0x1D0 RSFEC SRAM ECC Hold Status */ +#define RSFEC_SRAM_ECC_HOLD_STATUS_SBE 0xFF +#define RSFEC_SRAM_ECC_HOLD_STATUS_MBE (0xFF << 8) + +/* 0x1E0 RSFEC TX Lane 0 Error Injection Mode */ +#define RSFEC_TX_LANE_0_ERR_INJ_MODE_RATE 0xFF +#define RSFEC_TX_LANE_0_ERR_INJ_MODE_PAT (0xFF << 8) + +/* 0x1E4 RSFEC TX Lane 1 Error Injection Mode */ +#define RSFEC_TX_LANE_1_ERR_INJ_MODE_RATE 0xFF +#define RSFEC_TX_LANE_1_ERR_INJ_MODE_PAT (0xFF << 8) + +/* 0x1E8 RSFEC TX Lane 2 Error Injection Mode */ +#define RSFEC_TX_LANE_2_ERR_INJ_MODE_RATE 0xFF +#define RSFEC_TX_LANE_2_ERR_INJ_MODE_PAT (0xFF << 8) + +/* 0x1EC RSFEC TX Lane 3 Error Injection Mode */ +#define RSFEC_TX_LANE_3_ERR_INJ_MODE_RATE 0xFF +#define RSFEC_TX_LANE_3_ERR_INJ_MODE_PAT (0xFF << 8) + +/* 0x1F0 RSFEC TX Lane 0 Error Injection Status */ +#define RSFEC_TX_LANE_0_ERR_INJ_STATUS_INJ0S 0xFF +#define RSFEC_TX_LANE_0_ERR_INJ_STATUS_INJ1S (0xFF << 8) + +/* 0x1F4 RSFEC TX Lane 1 Error Injection Status */ +#define RSFEC_TX_LANE_1_ERR_INJ_STATUS_INJ0S 0xFF +#define RSFEC_TX_LANE_1_ERR_INJ_STATUS_INJ1S (0xFF << 8) + +/* 0x1F8 RSFEC TX Lane 2 Error Injection Status */ +#define RSFEC_TX_LANE_2_ERR_INJ_STATUS_INJ0S 0xFF +#define RSFEC_TX_LANE_2_ERR_INJ_STATUS_INJ1S (0xFF << 8) + +/* 0x1FC RSFEC TX Lane 3 Error Injection Status */ +#define RSFEC_TX_LANE_3_ERR_INJ_STATUS_INJ0S 0xFF +#define RSFEC_TX_LANE_3_ERR_INJ_STATUS_INJ1S (0xFF << 8) + +/* 0x200 RSFEC Lane 0 Corrected Code Words Count (Low) */ +#define RSFEC_LANE_0_CORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x208 RSFEC Lane 1 Corrected Code Words Count (Low) */ +#define RSFEC_LANE_1_CORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x210 RSFEC Lane 2 Corrected Code Words Count (Low) */ +#define RSFEC_LANE_2_CORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x218 RSFEC Lane 3 Corrected Code Words Count (Low) */ +#define RSFEC_LANE_3_CORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x200 RSFEC Lane 0 Corrected Code Words Count (High) */ +#define RSFEC_LANE_0_CORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x204 RSFEC Lane 1 Corrected Code Words Count (High) */ +#define RSFEC_LANE_1_CORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x20C RSFEC Lane 2 Corrected Code Words Count (High) */ +#define RSFEC_LANE_2_CORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x21C RSFEC Lane 3 Corrected Code Words Count (High) */ +#define RSFEC_LANE_3_CORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x220 RSFEC Lane 0 Uncorrected Code Words Count (Low) */ +#define RSFEC_LANE_0_UNCORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x228 RSFEC Lane 1 Uncorrected Code Words Count (Low) */ +#define RSFEC_LANE_1_UNCORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x230 RSFEC Lane 2 Uncorrected Code Words Count (Low) */ +#define RSFEC_LANE_2_UNCORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x238 RSFEC Lane 3 Uncorrected Code Words Count (Low) */ +#define RSFEC_LANE_3_UNCORR_CW_CNT_LO 0xFFFFFFFF + +/* 0x224 RSFEC Lane 0 Uncorrected Code Words Count (High) */ +#define RSFEC_LANE_0_UNCORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x22C RSFEC Lane 1 Uncorrected Code Words Count (High) */ +#define RSFEC_LANE_1_UNCORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x234 RSFEC Lane 2 Uncorrected Code Words Count (High) */ +#define RSFEC_LANE_2_UNCORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x23C RSFEC Lane 3 Uncorrected Code Words Count (High) */ +#define RSFEC_LANE_3_UNCORR_CW_CNT_HI 0xFFFFFFFF + +/* 0x240: RSFEC Lane 0 Corrected Symbol Count (Low) */ +#define RSFEC_LANE_0_CORR_SYM_CNT_LO 0xFFFFFFFF + +/* 0x248: RSFEC Lane 1 Corrected Symbol Count (Low) */ +#define RSFEC_LANE_1_CORR_SYM_CNT_LO 0xFFFFFFFF + +/* 0x250: RSFEC Lane 2 Corrected Symbol Count (Low) */ +#define RSFEC_LANE_2_CORR_SYM_CNT_LO 0xFFFFFFFF + +/* 0x258: RSFEC Lane 3 Corrected Symbol Count (Low) */ +#define RSFEC_LANE_3_CORR_SYM_CNT_LO 0xFFFFFFFF + +/* 0x244: RSFEC Lane 0 Corrected Symbol Count (High) */ +#define RSFEC_LANE_0_CORR_SYM_CNT_HI 0xFFFFFFFF + +/* 0x24C: RSFEC Lane 1 Corrected Symbol Count (High) */ +#define RSFEC_LANE_1_CORR_SYM_CNT_HI 0xFFFFFFFF + +/* 0x254: RSFEC Lane 2 Corrected Symbol Count (High) */ +#define RSFEC_LANE_2_CORR_SYM_CNT_HI 0xFFFFFFFF + +/* 0x25C: RSFEC Lane 3 Corrected Symbol Count (High) */ +#define RSFEC_LANE_3_CORR_SYM_CNT_HI 0xFFFFFFFF + +/* 0x260: RSFEC Lane 0 Corrected 0s Count (Low) */ +#define RSFEC_LANE_0_CORR_0S_CNT_LO 0xFFFFFFFF + +/* 0x268: RSFEC Lane 1 Corrected 0s Count (Low) */ +#define RSFEC_LANE_1_CORR_0S_CNT_LO 0xFFFFFFFF + +/* 0x270: RSFEC Lane 2 Corrected 0s Count (Low) */ +#define RSFEC_LANE_2_CORR_0S_CNT_LO 0xFFFFFFFF + +/* 0x268: RSFEC Lane 3 Corrected 0s Count (Low) */ +#define RSFEC_LANE_3_CORR_0S_CNT_LO 0xFFFFFFFF + +/* 0x260: RSFEC Lane 0 Corrected 0s Count (High) */ +#define RSFEC_LANE_0_CORR_0S_CNT_HI 0xFFFFFFFF + +/* 0x268: RSFEC Lane 1 Corrected 0s Count (High) */ +#define RSFEC_LANE_1_CORR_0S_CNT_HI 0xFFFFFFFF + +/* 0x270: RSFEC Lane 2 Corrected 0s Count (High) */ +#define RSFEC_LANE_2_CORR_0S_CNT_HI 0xFFFFFFFF + +/* 0x268: RSFEC Lane 3 Corrected 0s Count (High) */ +#define RSFEC_LANE_3_CORR_0S_CNT_HI 0xFFFFFFFF + +/* 0x260: RSFEC Lane 0 Corrected 1s Count (Low) */ +#define RSFEC_LANE_0_CORR_1S_CNT_LO 0xFFFFFFFF + +/* 0x268: RSFEC Lane 1 Corrected 1s Count (Low) */ +#define RSFEC_LANE_1_CORR_1S_CNT_LO 0xFFFFFFFF + +/* 0x270: RSFEC Lane 2 Corrected 1s Count (Low) */ +#define RSFEC_LANE_2_CORR_1S_CNT_LO 0xFFFFFFFF + +/* 0x268: RSFEC Lane 3 Corrected 1s Count (Low) */ +#define RSFEC_LANE_3_CORR_1S_CNT_LO 0xFFFFFFFF + +/* 0x260: RSFEC Lane 0 Corrected 1s Count (High) */ +#define RSFEC_LANE_0_CORR_1S_CNT_HI 0xFFFFFFFF + +/* 0x268: RSFEC Lane 1 Corrected 1s Count (High) */ +#define RSFEC_LANE_1_CORR_1S_CNT_HI 0xFFFFFFFF + +/* 0x270: RSFEC Lane 2 Corrected 1s Count (High) */ +#define RSFEC_LANE_2_CORR_1S_CNT_HI 0xFFFFFFFF + +/* 0x268: RSFEC Lane 3 Corrected 1s Count (High) */ +#define RSFEC_LANE_3_CORR_1S_CNT_HI 0xFFFFFFFF + +/* Transceiver Reconfiguration Interface + * PMA AVMM + * Bit Definitions + */ +/* 0x004 */ +#define XCVR_PMA_AVMM_004_TX_DATAPATH_CLK_EN BIT(0) +#define XCVR_PMA_AVMM_004_TRANSMIT_FULL_CLK_OUT_EN BIT(1) +#define XCVR_PMA_AVMM_004_TRANSMIT_DATA_INPUT_SEL (0x7 << 2) +#define XCVR_PMA_AVMM_004_TRANSMIT_FULL_CLK_OUT_SEL BIT(5) +#define XCVR_PMA_AVMM_004_TRANSMIT_CLK_DATAPATH_SEL BIT(6) +#define XCVR_PMA_AVMM_004_TRANSMIT_ADAPTATION_ORD_SEL BIT(7) + +/* 0x005 */ +#define XCVR_PMA_AVMM_005_TRANSMIT_MULTI_LANE_DATA_SEL 0x3 +#define XCVR_PMA_AVMM_005_TX_GBOX_CLK_EN BIT(2) +#define XCVR_PMA_AVMM_005_TX_DATAPATH_CLK_EN BIT(3) +#define XCVR_PMA_AVMM_005_TX_PCS_DIV2_CLK_INPUT_EN BIT(4) +#define XCVR_PMA_AVMM_005_TX_FEC_DIV2_CLK_INPUT_EN BIT(5) +#define XCVR_PMA_AVMM_005_TX_EHIP_DIV2_CLK_INPUT_EN BIT(6) +#define XCVR_PMA_AVMM_005_TX_DIRECT_CLK_INPUT_EN BIT(7) + +/* 0x006 */ +#define XCVR_PMA_AVMM_006_RX_DATAPATH_CLK_EN BIT(0) +#define XCVR_PMA_AVMM_006_RECV_FULL_CLK_OUT_EN BIT(1) +#define XCVR_PMA_AVMM_006_RECV_HALF_CLK_OUT_EN BIT(2) +#define XCVR_PMA_AVMM_006_RECV_DIV66_CLK_OUT_EN BIT(3) +#define XCVR_PMA_AVMM_006_RECV_ADAPTATION_ORD_SEL BIT(4) +#define XCVR_PMA_AVMM_006_RECV_ADAPTER_DATA_SEL (0x3 << 5) +#define XCVR_PMA_AVMM_006_RECV_REVERSE_BIT_ORDER_IN_GBOX BIT(7) + +/* 0x007 */ +#define XCVR_PMA_AVMM_007_RECV_REVERSE_64_66_SYNC_HEADER BIT(0) +#define XCVR_PMA_AVMM_007_RX_FIFO_READ_CLK_EN BIT(1) +#define XCVR_PMA_AVMM_007_RECV_GBOX_FIFO_WR_CLK_EN BIT(2) +#define XCVR_PMA_AVMM_007_RECV_DIRECT_DATA_MODE_MULTI_LANE (0x3 << 3) +#define XCVR_PMA_AVMM_007_SEL_RX_FIFO_RD_CLK (0x3 << 5) +#define XCVR_PMA_AVMM_007_RX_ADAPTER_CLK_EN BIT(7) + +/* 0x008 TX Gearbox*/ +#define XCVR_PMA_AVMM_008_REVERSE_DATA_BIT_TRANS_ORDER BIT(0) +#define XCVR_PMA_AVMM_008_REVERSE_64_66_SYNC_HEADER_BIT BIT(1) +#define XCVR_PMA_AVMM_008_DYNAMIC_BITSLIP_EN BIT(3) +#define XCVR_PMA_AVMM_008_64_66_SYNC_HEADER_LOC BIT(5) + +/* 0x009 TX Deskew */ +#define XCVR_PMA_AVMM_009_TX_DESKEW_MULTI_LANE_MODE 0x3 +#define XCVR_PMA_AVMM_009_TX_DESKEW_STATUS (0x3 << 2) +#define XCVR_PMA_AVMM_009_TX_DESKEW_ALIGN_STATUS BIT(4) +#define XCVR_PMA_AVMM_009_RX_FIFO_BIT_67_SEL BIT(5) + +/* 0x00A */ +#define XCVR_PMA_AVMM_00A_TRANSMIT_DESKEW_EN 0x7 +#define XCVR_PMA_AVMM_00A_DYNAMIC_RX_BITSLIP_EN BIT(5) + +/* 0x010 */ +#define XCVR_PMA_AVMM_010_XCVR_RX_FIFO_EMPTY_THRESHOLD 0x1F +#define XCVR_PMA_AVMM_010_XCVR_RX_FIFO_ALMOST_EMPTY_THRESHOLD (0x3 << 6) + +/* 0x011 */ +#define XCVR_PMA_AVMM_011_XCVR_RX_FIFO_ALMOST_EMPTY_THRESHOLD 0x7 +#define XCVR_PMA_AVMM_011_XCVR_RX_FIFO_FULL_THRESHOLD 0xF0 + +/* 0x012 */ +#define XCVR_PMA_AVMM_012_XCVR_RX_FIFO_FULL_THRESHOLD BIT(0) +#define XCVR_PMA_AVMM_012_XCVR_RX_FIFO_ALMOST_FULL_THRESHOLD (0x1F << 2) + +/* 0x013 */ +#define XCVR_PMA_AVMM_013_RX_FIFO_RD_WHEN_EMPTY BIT(6) +#define XCVR_PMA_AVMM_013_RX_FIFO_RD_WHEN_FULL BIT(7) + +/* 0x014 */ +#define XCVR_PMA_AVMM_014_XCVR_TX_FIFO_EMPTY_THRESHOLD 0x1F +#define XCVR_PMA_AVMM_014_XCVR_TX_FIFO_ALMOST_EMPTY_THRESHOLD (0x3 << 6) + +/* 0x015 */ +#define XCVR_PMA_AVMM_015_XCVR_TX_FIFO_ALMOST_EMPTY_THRESHOLD 0x7 +#define XCVR_PMA_AVMM_015_XCVR_TX_FIFO_FULL_THRESHOLD 0xF0 + +/* 0x016 */ +#define XCVR_PMA_AVMM_016_XCVR_TX_FIFO_FULL_THRESHOLD BIT(0) +#define XCVR_PMA_AVMM_016_XCVR_TX_FIFO_ALMOST_FULL_THRESHOLD (0x1F << 2) + +/* 0x017 */ +#define XCVR_PMA_AVMM_017_TX_FIFO_PHASE_COMP_MODE (0x3 << 4) +#define XCVR_PMA_AVMM_017_TX_FIFO_WR_WHEN_FULL BIT(6) +#define XCVR_PMA_AVMM_017_TX_FIFO_RD_WHEN_EMPTY BIT(7) + +/* 0x01C */ +#define XCVR_PMA_AVMM_01C_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_01D_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_01E_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_01F_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_020_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_021_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_022_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_023_TRANSMIT_OUTPUT_VAL 0xFF +#define XCVR_PMA_AVMM_024_TRANSMIT_OUTPUT_VAL 0x7 + +/* 0x034 */ +#define XCVR_PMA_AVMM_034_SERIALIZATION_FACTOR_RX_BIT_CNT 0x3 +#define XCVR_PMA_AVMM_034_RX_BIT_CNT_RESET_VAL 0xF0 + +/* 0x035 */ +#define XCVR_PMA_AVMM_035_RX_BIT_CNT_RESET_VAL 0xFF + +/* 0x036 */ +#define XCVR_PMA_AVMM_036_RX_BIT_CNT_RESET_VAL BIT(0) +#define XCVR_PMA_AVMM_036_RDWR_SELF_CLR BIT(3) +#define XCVR_PMA_AVMM_036_TRANSMIT_DIV66_CLK_OUT BIT(4) + +/* 0x037 */ +#define XCVR_PMA_AVMM_037_TRANSMIT_SCLK_EN BIT(0) +#define XCVR_PMA_AVMM_037_INC_TX_FIFO_LATENCY_SEL 0x30 +#define XCVR_PMA_AVMM_037_RECV_SCLK_EN BIT(4) +#define XCVR_PMA_AVMM_037_INC_RX_FIFO_LATENCY_SEL (0x3 << 5) +#define XCVR_PMA_AVMM_037_ASYNC_LATENCY_PULSE_SEL BIT(7) + +/* 0x038 */ +#define XCVR_PMA_AVMM_038_DCC_BYPASS_DISABLE BIT(0) +#define XCVR_PMA_AVMM_038_DCC_MASTER_EN BIT(1) +#define XCVR_PMA_AVMM_038_DCC_SEL_CONT_CAL BIT(2) + +/* 0x03C */ +#define XCVR_PMA_AVMM_03C_DCC_EN_FOR_FSM BIT(1) + +/* 0x080 */ +#define XCVR_PMA_AVMM_080_CORE_PMA_ATTR_CTRL 0xF +#define XCVR_PMA_AVMM_081_CORE_PMA_ATTR_CTRL 0xF +#define XCVR_PMA_AVMM_084_CORE_PMA_ATTR_DATA 0xF +#define XCVR_PMA_AVMM_085_CORE_PMA_ATTR_DATA 0xF +#define XCVR_PMA_AVMM_086_CORE_PMA_ATTR_DATA 0xF +#define XCVR_PMA_AVMM_087_CORE_PMA_ATTR_CODE 0xF +#define XCVR_PMA_AVMM_088_CORE_PMA_ATTR_CODE_RET_VAL_LO 0xF +#define XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI 0xF +#define XCVR_PMA_AVMM_088_PMA_INTERNAL_LOOPBACK BIT(3) +#define XCVR_PMA_AVMM_089_PMA_INTERNAL_LOOPBACK BIT(0) +#define XCVR_PMA_AVMM_088_PMA_RECEIVER_TUNING_CTRL GENMASK(3, 1) +#define XCVR_PMA_AVMM_089_PMA_RECEIVER_TUNING_CTRL BIT(0) +#define XCVR_PMA_AVMM_088_PMA_READ_RECEIVER_TUNING BIT(0) +#define XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS BIT(7) +#define XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR BIT(0) +#define XCVR_PMA_AVMM_08B_PMA_RELOAD_SUCCESS GENMASK(3, 2) +#define XCVR_PMA_AVMM_090_LD_084_087_PMA BIT(0) +#define XCVR_PMA_AVMM_091_LD_INIT_PMA_OR_LAST_SEL BIT(0) +#define XCVR_PMA_AVMM_095_PMA_CALIBRATE BIT(5) +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_0 0x0 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_1 0x1 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_2 0x2 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_3 0x3 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_4 0x4 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_5 0x5 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_6 0x6 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_7 0x7 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_A_8 0x8 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_0 0x00 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_1 0x10 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_2 0x20 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_3 0x30 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_4 0x40 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_5 0x50 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_6 0x60 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_7 0x70 +#define XCVR_PMA_AVMM_0EC_REF_CLK_SEL_IN_REFCLK4_8 0x80 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_0 0x0 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_1 0x1 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_2 0x2 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_3 0x3 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_4 0x4 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_5 0x5 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_6 0x6 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_7 0x7 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK0_8 0x8 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_0 0x00 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_1 0x10 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_2 0x20 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_3 0x30 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_4 0x40 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_5 0x50 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_6 0x60 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_7 0x70 +#define XCVR_PMA_AVMM_0EE_REF_CLK_SEL_IN_REFCLK1_8 0x80 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_0 0x0 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_1 0x1 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_2 0x2 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_3 0x3 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_4 0x4 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_5 0x5 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_6 0x6 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_7 0x7 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK2_8 0x8 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_0 0x00 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_1 0x10 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_2 0x20 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_3 0x30 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_4 0x40 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_5 0x50 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_6 0x60 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_7 0x70 +#define XCVR_PMA_AVMM_0EF_REF_CLK_SEL_IN_REFCLK3_8 0x80 + +/* PMA Analog Reset or PMA Operating Mode - SEE ETILE PHY USER GUIDE */ +#define XCVR_PMA_AVMM_200_FIELD 0xFF +#define XCVR_PMA_AVMM_201_FIELD 0xFF +#define XCVR_PMA_AVMM_202_FIELD 0xFF +#define XCVR_PMA_AVMM_203_FIELD 0xFF + +/* PMA Others */ +#define XCVR_PMA_AVMM_204_RET_PHYS_CHANNEL_NUMBER 0xFF +#define XCVR_PMA_AVMM_207_OP_ON_200_203_SUCCESS BIT(0) +#define XCVR_PMA_AVMM_207_LAST_OP_ON_200_203_SUCCESS BIT(7) + +/* Transceiver Reconfiguration Interface + * PMA Capability + * Bit Definitions + */ +/* 0x40000: IP Identifier 0 */ +#define XCVR_PMA_CAP_IP_ID_0 0xFF + +/* 0x40001: IP Identifier 1 */ +#define XCVR_PMA_CAP_IP_ID_1 0xFF + +/* 0x40002: IP Identifier 2 */ +#define XCVR_PMA_CAP_IP_ID_2 0xFF + +/* 0x40003: IP Identifier 3 */ +#define XCVR_PMA_CAP_IP_ID_3 0xFF + +/* 0x40004: Status Register Enabled */ +#define XCVR_PMA_CAP_STATUS_REG_EN BIT(0) + +/* 0x40005: Control Register Enabled */ +#define XCVR_PMA_CAP_CTRL_REG_EN BIT(0) + +/* 0x40010: Number of Channels */ +#define XCVR_PMA_CAP_NUMBER_OF_CHANNELS 0xFF + +/* 0x40011: Channel Number */ +#define XCVR_PMA_CAP_CHANNEL_NUMBER 0xFF + +/* 0x40012: Transceiver Mode */ +#define XCVR_PMA_CAP_MODE 0x3 + +/* Transceiver Reconfiguration Interface + * PMA Control and Status + * Bit Definitions + */ +/* 0x40080: RX Locked to Data Status */ +#define XCVR_PMA_CTRL_STAT_RX_LOCKED_TO_DATA_STATUS BIT(0) + +/* 0x40081: TXRX Ready Status */ +#define XCVR_PMA_CTRL_STAT_TX_READY_STATUS BIT(0) +#define XCVR_PMA_CTRL_STAT_RX_READY_STATUS BIT(1) + +/* 0x40082: TXRX Transfer Status */ +#define XCVR_PMA_CTRL_STAT_TX_TRANSFER_STATUS BIT(0) +#define XCVR_PMA_CTRL_STAT_RX_TRANSFER_STATUS BIT(1) + +/* 0x400E2: TXRX Reset Configuration */ +#define XCVR_PMA_CTRL_STAT_RX_PMA_INT_RESET BIT(0) +#define XCVR_PMA_CTRL_STAT_RX_EMIB_RESET BIT(1) +#define XCVR_PMA_CTRL_STAT_TX_PMA_INT_RESET BIT(2) +#define XCVR_PMA_CTRL_STAT_TX_EMIB_RESET BIT(3) +#define XCVR_PMA_CTRL_STAT_RX_PMA_INT_RESET_OVERRIDE BIT(4) +#define XCVR_PMA_CTRL_STAT_RX_EMIB_RESET_OVERRIDE BIT(5) +#define XCVR_PMA_CTRL_STAT_TX_PMA_INT_RESET_OVERRIDE BIT(6) +#define XCVR_PMA_CTRL_STAT_TX_EMIB_RESET_OVERRIDE BIT(7) + +/* 0x40140: Configuration Profile Select */ +#define XCVR_PMA_CTRL_STAT_SET_CFG_PROFILE 0x7 +#define XCVR_PMA_CTRL_STAT_START_STREAM BIT(7) + +/* 0x40141: Busy Status */ +#define XCVR_PMA_CTRL_STAT_BUSY_STATUS BIT(0) + +/* 0x40143: PMA Configuration */ +#define XCVR_PMA_CTRL_STAT_PMA_CFG_SELECT 0x7 +#define XCVR_PMA_CTRL_STAT_REQ_PMA_CFG_LOAD BIT(7) + +/* 0x40144: PMA Configuration Loading Status */ +#define XCVR_PMA_CTRL_STAT_RCP_LOAD_FINISH BIT(0) +#define XCVR_PMA_CTRL_STAT_RCP_LOAD_TIMEOUT BIT(1) +#define XCVR_PMA_CTRL_STAT_RCP_LOAD_BUSY BIT(2) + +/*PMA Low Byte write Values*/ + +#define XCVR_TX_REF_PMA_CODE_RET_VAL_LO 0x5 +#define XCVR_RX_REF_PMA_CODE_RET_VAL_LO 0x6 +#define XCVR_RX_TX_Width_PMA_CODE_RET_VAL_LO 0x14 +#define XCVR_RX_Phase_PMA_CODE_RET_VAL_LO 0xE +#define XCVR_PMA_ENBL_CODE_RET_VAL_LO 0x1 +#define XCVR_INTR_LOOP_PMA_CODE_RET_VAL_LO 0x8 +#define XCVR_GNRL_CALB_PMA_CODE_RET_VAL_LO 0x2c +#define XCVR_INIT_ADAPT_7B_PMA_CODE_RET_VAL_LO 0xa +#define XCVR_INIT_ADAPT_7C_PMA_CODE_RET_VAL_LO 0x0 + +#define INTEL_FPGA_BYTE_ALIGN 8 +#define INTEL_FPGA_WORD_ALIGN 32 + +/* QSFP SFF 8636 REV2.9 Channel Status Interrupt Flags */ +#define QSFP_CHANNEL_1_RX_LOS BIT(0) +#define QSFP_CHANNEL_2_RX_LOS BIT(1) +#define QSFP_CHANNEL_3_RX_LOS BIT(2) +#define QSFP_CHANNEL_4_RX_LOS BIT(3) +#define QSFP_CHANNEL_1_TX_LOS BIT(4) +#define QSFP_CHANNEL_2_TX_LOS BIT(5) +#define QSFP_CHANNEL_3_TX_LOS BIT(6) +#define QSFP_CHANNEL_4_TX_LOS BIT(7) + +/* Ethernet Reconfiguration Interface Register Base Addresses + * Word Offset Register Type + * 0x0B0-0x0E8 Auto Negotiation and Link Training registers + * 0x300-0x3FF PHY registers + * 0x310-0x310 Reset Controller registers + * 0x400-0x4FF TX MAC registers + * 0x500-0x5FF RX MAC registers + * 0x600-0x7FF Pause and Priority- Based Flow Control registers + * 0x800-0x8FF TX Statistics Counter registers + * 0x900-0x9FF RX Statistics Counter registers + * 0xA00-0xAFF TX 1588 PTP registers + * 0xB00-0xBFF RX 1588 PTP registers + * struct Definitions + */ +/* 0x0B0-0x0E8 Auto Negotiation and Link Training registers + * Note: Belongs within intel_fpga_etile_eth struct + */ +struct intel_fpga_etile_eth_auto_neg_link { + u32 anlt_sequencer_config; //0x0B0 + u32 anlt_sequencer_status; //0x0B1 + u32 reserved_0b2[14]; //0x0B2-0xBF + u32 auto_neg_conf_1; //0x0C0 + u32 auto_neg_conf_2; //0x0C1 + u32 auto_neg_stat; //0x0C2 + u32 auto_neg_conf_3; //0x0C3 + u32 auto_neg_conf_4; //0x0C4 + u32 auto_neg_conf_5; //0x0C5 + u32 auto_neg_conf_6; //0x0C6 + u32 auto_neg_stat_1; //0x0C7 + u32 auto_neg_stat_2; //0x0C8 + u32 auto_neg_stat_3; //0x0C9 + u32 auto_neg_stat_4; //0x0CA + u32 auto_neg_stat_5; //0x0CB + u32 auto_neg_an_channel_override; //0x0CC + u32 auto_neg_const_next_page_override; //0x0CD + u32 auto_neg_const_next_page_lp_stat; //0x0CE + u32 reserved_0cf; //0x0CF + u32 link_train_conf_1; //0x0D0 + u32 reserved_0d1; //0x0D1 + u32 link_train_stat_1; //0x0D2 + u32 link_train_conf_lane_0; //0x0D3 + u32 reserved_0d4[12]; //0x0D4-0x0DF + u32 link_train_conf_lane_1; //0x0E0 + u32 reserved_0e1[3]; //0x0E1-0x0E3 + u32 link_train_conf_lane_2; //0x0E4 + u32 reserved_0e5[3]; //0x0E5-0x0E7 + u32 link_train_conf_lane_3; //0x0E8 +}; + +/* 0x300-0x3FF PHY registers + * Note: Belongs within intel_fpga_etile_eth struct + */ +struct intel_fpga_etile_eth_phy { + u32 phy_rev_id; //0x300 + u32 phy_scratch; //0x301 + u32 reserved_302[11]; //0x302-0x30C + u32 phy_loopback; //0x30D + u32 phy_rx_pcs_align; //0x30E + u32 reserved_30f; //0x30F + u32 phy_config; //0x310 + u32 reserved_311[16]; //0x311-0x320 + u32 phy_cdr_pll_locked; //0x321 + u32 phy_tx_datapath_ready; //0x322 + u32 phy_frm_err_detect; //0x323 + u32 phy_clr_frm_err; //0x324 + u32 reserved_325; //0x325 + u32 phy_pcs_stat_anlt; //0x326 + u32 phy_pcs_err_inject; //0x327 + u32 phy_am_lock; //0x328 + u32 phy_dskew_chng; //0x329 + u32 phy_ber_cnt; //0x32A + u32 phy_aib_transfer_ready_stat; //0x32B + u32 phy_soft_rc_reset_stat; //0x32C + u32 reserved_32d[3]; //0x32D-0x32F + u32 phy_pcs_virtual_ln_0; //0x330 + u32 phy_pcs_virtual_ln_1; //0x331 + u32 phy_pcs_virtual_ln_2; //0x332 + u32 phy_pcs_virtual_ln_3; //0x333 + u32 reserved_334[13]; //0x334-0x340 + u32 phy_recovered_clk_freq; //0x341 + u32 phy_tx_clk_freq; //0x342 + u32 reserved_343[13]; //0x343-0x34F + u32 phy_tx_pld_conf; //0x350 + u32 phy_tx_pld_stat; //0x351 + u32 reserved_352[2]; //0x352-0x353 + u32 phy_dynamic_deskew_buf_stat; //0x354 + u32 phy_rx_pld_conf; //0x355 + u32 reserved_356[10]; //0x356-0x35F + u32 phy_rx_pcs_conf; //0x360 + u32 phy_bip_cnt_0; //0x361 + u32 phy_bip_cnt_1; //0x362 + u32 phy_bip_cnt_2; //0x363 + u32 phy_bip_cnt_3; //0x364 + u32 phy_bip_cnt_4; //0x365 + u32 phy_bip_cnt_5; //0x366 + u32 phy_bip_cnt_6; //0x367 + u32 phy_bip_cnt_7; //0x368 + u32 phy_bip_cnt_8; //0x369 + u32 phy_bip_cnt_9; //0x36A + u32 phy_bip_cnt_10; //0x36B + u32 phy_bip_cnt_11; //0x36C + u32 phy_bip_cnt_12; //0x36D + u32 phy_bip_cnt_13; //0x36E + u32 phy_bip_cnt_14; //0x36F + u32 phy_bip_cnt_15; //0x370 + u32 phy_bip_cnt_16; //0x371 + u32 phy_bip_cnt_17; //0x372 + u32 phy_bip_cnt_18; //0x373 + u32 phy_bip_cnt_19; //0x374 + u32 reserved_375[5]; //0x375-0x379 + u32 phy_timer_window_hiber_check; //0x37A + u32 phy_hiber_frm_err; //0x37B + u32 phy_err_block_cnt; //0x37C + u32 reserved_37d[2]; //0x37D-0x37E + u32 phy_deskew_dept_0; //0x37F + u32 phy_deskew_dept_1; //0x380 + u32 phy_deskew_dept_2; //0x381 + u32 phy_deskew_dept_3; //0x382 + u32 phy_rx_pcs_test_err_cnt; //0x383 + u32 reserved_384[124]; //0x384-0x3FF +}; + +/* 0x400-0x4FF TX MAC registers + * Note: Belongs within intel_fpga_etile_eth struct + */ + +struct intel_fpga_etile_eth_tx_mac_10_25G { + u32 tx_mac_rev_id; //0x400 + u32 tx_mac_scratch; //0x401 + u32 reserved_402[3]; //0x402-0x404 + u32 tx_mac_link_fault; //0x405 + u32 tx_mac_ipg_col_rem; //0x406 + u32 tx_mac_max_frm_size; //0x407 + u32 reserved_408[2]; //0x408-0x409 + u32 tx_mac_conf; //0x40A + u32 tx_mac_ehip_conf; //0x40B + u32 tx_mac_source_addr_lower_bytes; //0x40C + u32 tx_mac_source_addr_higher_bytes; //0x40D + u32 reserved_40e[242]; //0x40E-0x4FF +}; + +/* 0x500-0x5FF RX MAC registers + * Note: Belongs within intel_fpga_etile_eth struct + */ + +struct intel_fpga_etile_eth_rx_mac_10_25G { + u32 rx_mac_rev_id; //0x500 + u32 rx_mac_scratch; //0x501 + u32 reserved_502[4]; //0x502-0x505 + u32 rx_mac_max_frm_size; //0x506 + u32 rx_mac_frwd_rx_crc; //0x507 + u32 rx_max_link_fault; //0x508 + u32 reserved_509; //0x509 + u32 rx_mac_conf; //0x50A + u32 rx_mac_ehip_conf; //0x50B + u32 reserved_50C[244]; //0x50C-0x5FF +}; + +/* 0x600-0x7FF Pause and Priority - Based Flow Control Registers + * Note: Belongs within intel_fpga_etile_eth struct + */ +struct intel_fpga_etile_eth_pause_priority { + u32 txsfc_module_revision_id; // 0x600 + u32 txsfc_scratch_register; // 0x601 + u32 reserved_602[3]; // 0x602-0x604 + u32 enable_tx_pause_ports; // 0x605 + u32 tx_pause_request; // 0x606 + u32 enable_automatic_tx_pause_retransmission; // 0x607 + u32 retransmit_holdoff_quanta; // 0x608 + u32 retransmit_pause_quanta; // 0x609 + u32 enable_tx_xoff; // 0x60A + u32 enable_uniform_holdoff; // 0x60B + u32 set_uniform_holdoff; // 0x60C + u32 flow_control_fields_lsb; // 0x60D + u32 flow_control_fields_msb; // 0x60E + u32 flow_control_frames_lsb; // 0x60F + u32 flow_control_frames_msb; // 0x610 + u32 tx_flow_control_feature_cfg; // 0x611 + u32 reserved_612[14]; // 0x612-0x61F + u32 pause_quanta_0; // 0x620 + u32 pause_quanta_1; // 0x621 + u32 pause_quanta_2; // 0x622 + u32 pause_quanta_3; // 0x623 + u32 pause_quanta_4; // 0x624 + u32 pause_quanta_5; // 0x625 + u32 pause_quanta_6; // 0x626 + u32 pause_quanta_7; // 0x627 + u32 pfc_holdoff_quanta_0; // 0x628 + u32 pfc_holdoff_quanta_1; // 0x629 + u32 pfc_holdoff_quanta_2; // 0x62A + u32 pfc_holdoff_quanta_3; // 0x62B + u32 pfc_holdoff_quanta_4; // 0x62C + u32 pfc_holdoff_quanta_5; // 0x62D + u32 pfc_holdoff_quanta_6; // 0x62E + u32 pfc_holdoff_quanta_7; // 0x62F + u32 reserved_630[208]; // 0x630-0x6FF + u32 rxsfc_module_revision_id; // 0x700 + u32 rxsfc_scratch_register; // 0x701 + u32 reserved_702[3]; // 0x702-0x704 + u32 enable_rx_pause_frame_processing_fields; // 0x705 + u32 forward_flow_control_frames; // 0x706 + u32 rx_pause_frames_lsb; // 0x707 + u32 rx_pause_frames_msb; // 0x708 + u32 rx_flow_control_feature_cfg; // 0x709 + u32 reserved_70A[246]; // 0x70A-0x7FF +}; + +/* 0x800-0x8FF TX Statistics Counter registers + * Note: Belongs within intel_fpga_etile_eth struct + */ +struct intel_fpga_etile_eth_tx_stats { + u32 tx_fragments_lsb; // 0x800 + u32 tx_fragments_msb; // 0x801 + u32 tx_jabbers_lsb; // 0x802 + u32 tx_jabbers_msb; // 0x803 + u32 tx_fcserr_lsb; // 0x804 + u32 tx_fcserr_msb; // 0x805 + u32 tx_crcerr_okpkt_lsb; // 0x806 + u32 tx_crcerr_okpkt_msb; // 0x807 + u32 tx_mcast_data_err_lsb; // 0x808 + u32 tx_mcast_data_err_msb; // 0x809 + u32 tx_bcast_data_err_lsb; // 0x80A + u32 tx_bcast_data_err_msb; // 0x80B + u32 tx_ucast_data_err_lsb; // 0x80C + u32 tx_ucast_data_err_msb; // 0x80D + u32 tx_mcast_ctrl_err_lsb; // 0x80E + u32 tx_mcast_ctrl_err_msb; // 0x80F + u32 tx_bcast_ctrl_err_lsb; // 0x810 + u32 tx_bcast_ctrl_err_msb; // 0x811 + u32 tx_ucast_ctrl_err_lsb; // 0x812 + u32 tx_ucast_ctrl_err_msb; // 0x813 + u32 tx_pause_err_lsb; // 0x814 + u32 tx_pause_err_msb; // 0x815 + u32 tx_64b_lsb; // 0x816 + u32 tx_64b_msb; // 0x817 + u32 tx_65to127b_lsb; // 0x818 + u32 tx_65to127b_msb; // 0x819 + u32 tx_128to255b_lsb; // 0x81A + u32 tx_128to255b_msb; // 0x81B + u32 tx_256to511b_lsb; // 0x81C + u32 tx_256to511b_msb; // 0x81D + u32 tx_512to1023b_lsb; // 0x81E + u32 tx_512to1023b_msb; // 0x81F + u32 tx_1024to1518b_lsb; // 0x820 + u32 tx_1024to1518b_msb; // 0x821 + u32 tx_1519tomaxb_lsb; // 0x822 + u32 tx_1519tomaxb_msb; // 0x823 + u32 tx_oversize_lsb; // 0x824 + u32 tx_oversize_msb; // 0x825 + u32 tx_mcast_data_ok_lsb; // 0x826 + u32 tx_mcast_data_ok_msb; // 0x827 + u32 tx_bcast_data_ok_lsb; // 0x828 + u32 tx_bcast_data_ok_msb; // 0x829 + u32 tx_ucast_data_ok_lsb; // 0x82A + u32 tx_ucast_data_ok_msb; // 0x82B + u32 tx_mcast_ctrl_ok_lsb; // 0x82C + u32 tx_mcast_ctrl_ok_msb; // 0x82D + u32 tx_bcast_ctrl_ok_lsb; // 0x82E + u32 tx_bcast_ctrl_ok_msb; // 0x82F + u32 tx_ucast_ctrl_ok_lsb; // 0x830 + u32 tx_ucast_ctrl_ok_msb; // 0x831 + u32 tx_pause_lsb; // 0x832 + u32 tx_pause_msb; // 0x833 + u32 tx_rnt_lsb; // 0x834 + u32 tx_rnt_msb; // 0x835 + u32 tx_st_lsb; // 0x836 + u32 tx_st_msb; // 0x837 + u32 tx_lenerr_lsb; // 0x838 + u32 tx_lenerr_msb; // 0x839 + u32 tx_pfc_err_lsb; // 0x83A + u32 tx_pfc_err_msb; // 0x83B + u32 tx_pfc_lsb; // 0x83C + u32 tx_pfc_msb; // 0x83D + u32 reserved_83E[2]; // 0x83E-0x83F + u32 tx_stat_revid; // 0x840 + u32 tx_stat_scratch; // 0x841 + u32 reserved_842[3]; // 0x842-0x844 + u32 tx_cntr_config; // 0x845 + u32 tx_cntr_status; // 0x846 + u32 reserved_847[25]; // 0x847-0x85F + u32 tx_payload_octetsok_lsb; // 0x860 + u32 tx_payload_octetsok_msb; // 0x861 + u32 tx_frame_octetsok_lsb; // 0x862 + u32 tx_frame_octetsok_msb; // 0x863 + u32 tx_malformed_ctrl_lsb; // 0x864 + u32 tx_malformed_ctrl_msb; // 0x865 + u32 tx_dropped_ctrl_lsb; // 0x866 + u32 tx_dropped_ctrl_msb; // 0x867 + u32 tx_badlt_ctrl_lsb; // 0x868 + u32 tx_badlt_ctrl_msb; // 0x869 + u32 reserved_86A[150]; // 0x86A-0x8FF +}; + +/* 0x900-0x9FF RX Statistics Counter registers + * Note: Belongs within intel_fpga_etile_eth struct + */ +struct intel_fpga_etile_eth_rx_stats { + u32 rx_fragments_lsb; // 0x900 + u32 rx_fragments_msb; // 0x901 + u32 rx_jabbers_lsb; // 0x902 + u32 rx_jabbers_msb; // 0x903 + u32 rx_fcserr_lsb; // 0x904 + u32 rx_fcserr_msb; // 0x905 + u32 rx_crcerr_okpkt_lsb; // 0x906 + u32 rx_crcerr_okpkt_msb; // 0x907 + u32 rx_mcast_data_err_lsb; // 0x908 + u32 rx_mcast_data_err_msb; // 0x909 + u32 rx_bcast_data_err_lsb; // 0x90A + u32 rx_bcast_data_err_msb; // 0x90B + u32 rx_ucast_data_err_lsb; // 0x90C + u32 rx_ucast_data_err_msb; // 0x90D + u32 rx_mcast_ctrl_err_lsb; // 0x90E + u32 rx_mcast_ctrl_err_msb; // 0x90F + u32 rx_bcast_ctrl_err_lsb; // 0x910 + u32 rx_bcast_ctrl_err_msb; // 0x911 + u32 rx_ucast_ctrl_err_lsb; // 0x912 + u32 rx_ucast_ctrl_err_msb; // 0x913 + u32 rx_pause_err_lsb; // 0x914 + u32 rx_pause_err_msb; // 0x915 + u32 rx_64b_lsb; // 0x916 + u32 rx_64b_msb; // 0x917 + u32 rx_65to127b_lsb; // 0x918 + u32 rx_65to127b_msb; // 0x919 + u32 rx_128to255b_lsb; // 0x91A + u32 rx_128to255b_msb; // 0x91B + u32 rx_256to511b_lsb; // 0x91C + u32 rx_256to511b_msb; // 0x91D + u32 rx_512to1023b_lsb; // 0x91E + u32 rx_512to1023b_msb; // 0x91F + u32 rx_1024to1518b_lsb; // 0x920 + u32 rx_1024to1518b_msb; // 0x921 + u32 rx_1519tomaxb_lsb; // 0x922 + u32 rx_1519tomaxb_msb; // 0x923 + u32 rx_oversize_lsb; // 0x924 + u32 rx_oversize_msb; // 0x925 + u32 rx_mcast_data_ok_lsb; // 0x926 + u32 rx_mcast_data_ok_msb; // 0x927 + u32 rx_bcast_data_ok_lsb; // 0x928 + u32 rx_bcast_data_ok_msb; // 0x929 + u32 rx_ucast_data_ok_lsb; // 0x92A + u32 rx_ucast_data_ok_msb; // 0x92B + u32 rx_mcast_ctrl_ok_lsb; // 0x92C + u32 rx_mcast_ctrl_ok_msb; // 0x92D + u32 rx_bcast_ctrl_ok_lsb; // 0x92E + u32 rx_bcast_ctrl_ok_msb; // 0x92F + u32 rx_ucast_ctrl_ok_lsb; // 0x930 + u32 rx_ucast_ctrl_ok_msb; // 0x931 + u32 rx_pause_lsb; // 0x932 + u32 rx_pause_msb; // 0x933 + u32 rx_rnt_lsb; // 0x934 + u32 rx_rnt_msb; // 0x935 + u32 rx_st_lsb; // 0x936 + u32 rx_st_msb; // 0x937 + u32 rx_lenerr_lsb; // 0x938 + u32 rx_lenerr_msb; // 0x939 + u32 rx_pfc_err_lsb; // 0x93A + u32 rx_pfc_err_msb; // 0x93B + u32 rx_pfc_lsb; // 0x93C + u32 rx_pfc_msb; // 0x93D + u32 reserved_93E[2]; // 0x93E-0x93F + u32 rx_stat_revid; // 0x940 + u32 rx_stat_scratch; // 0x941 + u32 reserved_942[3]; // 0x942-0x944 + u32 rx_cntr_config; // 0x945 + u32 rx_cntr_status; // 0x946 + u32 reserved_947[25]; // 0x947-0x95F + u32 rx_payload_octetsok_lsb; // 0x960 + u32 rx_payload_octetsok_msb; // 0x961 + u32 rx_frame_octetsok_lsb; // 0x962 + u32 rx_frame_octetsok_msb; // 0x963 + u32 reserved_964[156]; // 0x964-0x9FF +}; + +/* 0xA00-0xAFF TX 1588 PTP registers + * 0xB00-0xBFF RX 1588 PTP registers + * Note: Belongs within intel_fpga_etile_eth struct + */ +struct intel_fpga_etile_eth_1588_ptp { + /* TX 1588 PTP Registers */ + u32 txptp_revid; // 0xA00 + u32 txptp_scratch; // 0xA01 + u32 reserved_a02[3]; // 0xA02-0xA04 + u32 tx_ptp_clk_period; // 0xA05 + u32 reserved_a06[4]; // 0xA06-0xA09 + u32 tx_ptp_extra_latency; // 0xA0A + u32 reserved_a0b[2]; // 0xA0B-0xA0C + u32 ptp_debug; // 0xA0D + u32 reserved_a0e[242]; // 0xA0E-0xAFF + /* RX 1588 PTP Registers */ + u32 rxptp_revid; // 0xB00 + u32 rxptp_scratch; // 0xB01 + u32 reserved_b02[4]; // 0xB02-0xB05 + u32 rx_ptp_extra_latency; // 0xB06 + u32 reserved_b07[9]; // 0xB07-0xB0F + /* 10G/25G PTP PPM UI Adjustment Registers */ + u32 tx_ui_reg; // 0xB10 + u32 rx_ui_reg; // 0xB11 + u32 reserved_b12[7]; // 0xB12-0xB18 + u32 tam_snapshot; // 0xB19 + u32 tx_tam_l; // 0xB1A + u32 tx_tam_h; // 0xB1B + u32 tx_count; // 0xB1C + u32 rx_tam_l; // 0xB1D + u32 rx_tam_h; // 0xB1E + u32 rx_count; // 0xB1F + u32 reserved_b20[224]; // 0xB20-0xBFF +}; + +/* E-Tile RS-FEC Reconfiguration Interface Register Base Addresses + * Word Offset Register Type + * 0x000-0x2FF TX and RX RS-FEC + */ +struct intel_fpga_etile_rsfec { + u8 reserved_0[4]; //0x000-0x003 + u8 rsfec_top_clk_cfg_b0; //0x004 + u8 rsfec_top_clk_cfg_b8; //0x005 + u8 rsfec_top_clk_cfg_b16; //0x006 + u8 rsfec_top_clk_cfg_b24; //0x007 + u8 reserved_8[8]; //0x008-0x00F + u8 rsfec_top_tx_cfg_b0; //0x010 + u8 rsfec_top_tx_cfg_b8; //0x011 + u8 rsfec_top_tx_cfg_b16; //0x012 + u8 rsfec_top_tx_cfg_b24; //0x013 + u8 rsfec_top_rx_cfg_b0; //0x014 + u8 rsfec_top_rx_cfg_b8; //0x015 + u8 rsfec_top_rx_cfg_b16; //0x016 + u8 rsfec_top_rx_cfg_b24; //0x017 + u8 reserved_18[8]; //0x018-0x01F + u8 tx_aib_dsk_conf_b0; //0x020 + u8 tx_aib_dsk_conf_b8; //0x021 + u8 tx_aib_dsk_conf_b16; //0x022 + u8 tx_aib_dsk_conf_b24; //0x023 + u8 reserved_24[12]; //0x024-0x02F + u8 rsfec_core_cfg_b0; //0x030 + u8 rsfec_core_cfg_b8; //0x031 + u8 rsfec_core_cfg_b16; //0x032 + u8 rsfec_core_cfg_b24; //0x033 + u8 reserved_34[12]; //0x034-0x03F + u8 rsfec_lane_cfg_0_b0; //0x040 + u8 rsfec_lane_cfg_0_b8; //0x041 + u8 rsfec_lane_cfg_0_b16; //0x042 + u8 rsfec_lane_cfg_0_b24; //0x043 + u8 rsfec_lane_cfg_1_b0; //0x044 + u8 rsfec_lane_cfg_1_b8; //0x045 + u8 rsfec_lane_cfg_1_b16; //0x046 + u8 rsfec_lane_cfg_1_b24; //0x047 + u8 rsfec_lane_cfg_2_b0; //0x048 + u8 rsfec_lane_cfg_2_b8; //0x049 + u8 rsfec_lane_cfg_2_b16; //0x04A + u8 rsfec_lane_cfg_2_b24; //0x04B + u8 rsfec_lane_cfg_3_b0; //0x04C + u8 rsfec_lane_cfg_3_b8; //0x04D + u8 rsfec_lane_cfg_3_b16; //0x04E + u8 rsfec_lane_cfg_3_b24; //0x04F + u8 reserved_50[180]; //0x050 - 0x103 + u8 tx_aib_dsk_status_b0; //0x104 + u8 tx_aib_dsk_status_b8; //0x105 + u8 tx_aib_dsk_status_b16; //0x106 + u8 tx_aib_dsk_status_b24; //0x107 + u8 rsfec_debug_cfg_b0; //0x108 + u8 rsfec_debug_cfg_b8; //0x109 + u8 rsfec_debug_cfg_b16; //0x10A + u8 rsfec_debug_cfg_b24; //0x10B + u8 reserved_10C[20]; //0x10C-0x11F + u8 rsfec_lane_tx_stat_0_b0; //0x120 + u8 rsfec_lane_tx_stat_0_b8; //0x121 + u8 rsfec_lane_tx_stat_0_b16; //0x122 + u8 rsfec_lane_tx_stat_0_b24; //0x123 + u8 rsfec_lane_tx_stat_1_b0; //0x124 + u8 rsfec_lane_tx_stat_1_b8; //0x125 + u8 rsfec_lane_tx_stat_1_b16; //0x126 + u8 rsfec_lane_tx_stat_1_b24; //0x127 + u8 rsfec_lane_tx_stat_2_b0; //0x128 + u8 rsfec_lane_tx_stat_2_b8; //0x129 + u8 rsfec_lane_tx_stat_2_b16; //0x12A + u8 rsfec_lane_tx_stat_2_b24; //0x12B + u8 rsfec_lane_tx_stat_3_b0; //0x12C + u8 rsfec_lane_tx_stat_3_b8; //0x12D + u8 rsfec_lane_tx_stat_3_b16; //0x12E + u8 rsfec_lane_tx_stat_3_b24; //0x12F + u8 rsfec_lane_tx_hold_0_b0; //0x130 + u8 rsfec_lane_tx_hold_0_b8; //0x131 + u8 rsfec_lane_tx_hold_0_b16; //0x132 + u8 rsfec_lane_tx_hold_0_b24; //0x133 + u8 rsfec_lane_tx_hold_1_b0; //0x134 + u8 rsfec_lane_tx_hold_1_b8; //0x135 + u8 rsfec_lane_tx_hold_1_b16; //0x136 + u8 rsfec_lane_tx_hold_1_b24; //0x137 + u8 rsfec_lane_tx_hold_2_b0; //0x138 + u8 rsfec_lane_tx_hold_2_b8; //0x139 + u8 rsfec_lane_tx_hold_2_b16; //0x13A + u8 rsfec_lane_tx_hold_2_b24; //0x13B + u8 rsfec_lane_tx_hold_3_b0; //0x13C + u8 rsfec_lane_tx_hold_3_b8; //0x13D + u8 rsfec_lane_tx_hold_3_b16; //0x13E + u8 rsfec_lane_tx_hold_3_b24; //0x13F + u8 rsfec_lane_tx_inten_0_b0; //0x140 + u8 rsfec_lane_tx_inten_0_b8; //0x141 + u8 rsfec_lane_tx_inten_0_b16; //0x142 + u8 rsfec_lane_tx_inten_0_b24; //0x143 + u8 rsfec_lane_tx_inten_1_b0; //0x144 + u8 rsfec_lane_tx_inten_1_b8; //0x145 + u8 rsfec_lane_tx_inten_1_b16; //0x146 + u8 rsfec_lane_tx_inten_1_b24; //0x147 + u8 rsfec_lane_tx_inten_2_b0; //0x148 + u8 rsfec_lane_tx_inten_2_b8; //0x149 + u8 rsfec_lane_tx_inten_2_b16; //0x14A + u8 rsfec_lane_tx_inten_2_b24; //0x14B + u8 rsfec_lane_tx_inten_3_b0; //0x14C + u8 rsfec_lane_tx_inten_3_b8; //0x14D + u8 rsfec_lane_tx_inten_3_b16; //0x14E + u8 rsfec_lane_tx_inten_3_b24; //0x14F + u8 rsfec_lane_rx_stat_0_b0; //0x150 + u8 rsfec_lane_rx_stat_0_b8; //0x151 + u8 rsfec_lane_rx_stat_0_b16; //0x152 + u8 rsfec_lane_rx_stat_0_b24; //0x153 + u8 rsfec_lane_rx_stat_1_b0; //0x154 + u8 rsfec_lane_rx_stat_1_b8; //0x155 + u8 rsfec_lane_rx_stat_1_b16; //0x156 + u8 rsfec_lane_rx_stat_1_b24; //0x157 + u8 rsfec_lane_rx_stat_2_b0; //0x158 + u8 rsfec_lane_rx_stat_2_b8; //0x159 + u8 rsfec_lane_rx_stat_2_b16; //0x15A + u8 rsfec_lane_rx_stat_2_b24; //0x15B + u8 rsfec_lane_rx_stat_3_b0; //0x15C + u8 rsfec_lane_rx_stat_3_b8; //0x15D + u8 rsfec_lane_rx_stat_3_b16; //0x15E + u8 rsfec_lane_rx_stat_3_b24; //0x15F + u8 rsfec_lane_rx_hold_0_b0; //0x160 + u8 rsfec_lane_rx_hold_0_b8; //0x161 + u8 rsfec_lane_rx_hold_0_b16; //0x162 + u8 rsfec_lane_rx_hold_0_b24; //0x163 + u8 rsfec_lane_rx_hold_1_b0; //0x164 + u8 rsfec_lane_rx_hold_1_b8; //0x165 + u8 rsfec_lane_rx_hold_1_b16; //0x166 + u8 rsfec_lane_rx_hold_1_b24; //0x167 + u8 rsfec_lane_rx_hold_2_b0; //0x168 + u8 rsfec_lane_rx_hold_2_b8; //0x169 + u8 rsfec_lane_rx_hold_2_b16; //0x16A + u8 rsfec_lane_rx_hold_2_b24; //0x16B + u8 rsfec_lane_rx_hold_3_b0; //0x16C + u8 rsfec_lane_rx_hold_3_b8; //0x16D + u8 rsfec_lane_rx_hold_3_b16; //0x16E + u8 rsfec_lane_rx_hold_3_b24; //0x16F + u8 rsfec_lane_rx_inten_0_b0; //0x170 + u8 rsfec_lane_rx_inten_0_b8; //0x171 + u8 rsfec_lane_rx_inten_0_b16; //0x172 + u8 rsfec_lane_rx_inten_0_b24; //0x173 + u8 rsfec_lane_rx_inten_1_b0; //0x174 + u8 rsfec_lane_rx_inten_1_b8; //0x175 + u8 rsfec_lane_rx_inten_1_b16; //0x176 + u8 rsfec_lane_rx_inten_1_b24; //0x177 + u8 rsfec_lane_rx_inten_2_b0; //0x178 + u8 rsfec_lane_rx_inten_2_b8; //0x179 + u8 rsfec_lane_rx_inten_2_b16; //0x17A + u8 rsfec_lane_rx_inten_2_b24; //0x17B + u8 rsfec_lane_rx_inten_3_b0; //0x17C + u8 rsfec_lane_rx_inten_3_b8; //0x17D + u8 rsfec_lane_rx_inten_3_b16; //0x17E + u8 rsfec_lane_rx_inten_3_b24; //0x17F + u8 rsfec_lanes_rx_stat_b0; //0x180 + u8 rsfec_lanes_rx_stat_b8; //0x181 + u8 rsfec_lanes_rx_stat_b16; //0x182 + u8 rsfec_lanes_rx_stat_b24; //0x183 + u8 reserved_184[4]; //0x184-0x187 + u8 rsfec_lanes_rx_hold_b0; //0x188 + u8 rsfec_lanes_rx_hold_b8; //0x189 + u8 rsfec_lanes_rx_hold_b16; //0x18A + u8 rsfec_lanes_rx_hold_b24; //0x18B + u8 rsfec_lanes_rx_inten_b0; //0x18C + u8 rsfec_lanes_rx_inten_b8; //0x18D + u8 rsfec_lanes_rx_inten_b16; //0x18E + u8 rsfec_lanes_rx_inten_b24; //0x18F + u8 reserved_190[16]; //0x190-0x19F + u8 rsfec_ln_mapping_rx_0_b0; //0x1A0 + u8 rsfec_ln_mapping_rx_0_b8; //0x1A1 + u8 rsfec_ln_mapping_rx_0_b16; //0x1A2 + u8 rsfec_ln_mapping_rx_0_b24; //0x1A3 + u8 rsfec_ln_mapping_rx_1_b0; //0x1A4 + u8 rsfec_ln_mapping_rx_1_b8; //0x1A5 + u8 rsfec_ln_mapping_rx_1_b16; //0x1A6 + u8 rsfec_ln_mapping_rx_1_b24; //0x1A7 + u8 rsfec_ln_mapping_rx_2_b0; //0x1A8 + u8 rsfec_ln_mapping_rx_2_b8; //0x1A9 + u8 rsfec_ln_mapping_rx_2_b16; //0x1AA + u8 rsfec_ln_mapping_rx_2_b24; //0x1AB + u8 rsfec_ln_mapping_rx_3_b0; //0x1AC + u8 rsfec_ln_mapping_rx_3_b8; //0x1AD + u8 rsfec_ln_mapping_rx_3_b16; //0x1AE + u8 rsfec_ln_mapping_rx_3_b24; //0x1AF + u8 rsfec_ln_skew_rx_0_b0; //0x1B0 + u8 rsfec_ln_skew_rx_0_b8; //0x1B1 + u8 rsfec_ln_skew_rx_0_b16; //0x1B2 + u8 rsfec_ln_skew_rx_0_b24; //0x1B3 + u8 rsfec_ln_skew_rx_1_b0; //0x1B4 + u8 rsfec_ln_skew_rx_1_b8; //0x1B5 + u8 rsfec_ln_skew_rx_1_b16; //0x1B6 + u8 rsfec_ln_skew_rx_1_b24; //0x1B7 + u8 rsfec_ln_skew_rx_2_b0; //0x1B8 + u8 rsfec_ln_skew_rx_2_b8; //0x1B9 + u8 rsfec_ln_skew_rx_2_b16; //0x1BA + u8 rsfec_ln_skew_rx_2_b24; //0x1BB + u8 rsfec_ln_skew_rx_3_b0; //0x1BC + u8 rsfec_ln_skew_rx_3_b8; //0x1BD + u8 rsfec_ln_skew_rx_3_b16; //0x1BE + u8 rsfec_ln_skew_rx_3_b24; //0x1BF + u8 rsfec_cw_pos_rx_0_b0; //0x1C0 + u8 rsfec_cw_pos_rx_0_b8; //0x1C1 + u8 rsfec_cw_pos_rx_0_b16; //0x1C2 + u8 rsfec_cw_pos_rx_0_b24; //0x1C3 + u8 rsfec_cw_pos_rx_1_b0; //0x1C4 + u8 rsfec_cw_pos_rx_1_b8; //0x1C5 + u8 rsfec_cw_pos_rx_1_b16; //0x1C6 + u8 rsfec_cw_pos_rx_1_b24; //0x1C7 + u8 rsfec_cw_pos_rx_2_b0; //0x1C8 + u8 rsfec_cw_pos_rx_2_b8; //0x1C9 + u8 rsfec_cw_pos_rx_2_b16; //0x1CA + u8 rsfec_cw_pos_rx_2_b24; //0x1CB + u8 rsfec_cw_pos_rx_3_b0; //0x1CC + u8 rsfec_cw_pos_rx_3_b8; //0x1CD + u8 rsfec_cw_pos_rx_3_b16; //0x1CE + u8 rsfec_cw_pos_rx_3_b24; //0x1CF + u8 rsfec_core_ecc_hold_b0; //0x1D0 + u8 rsfec_core_ecc_hold_b8; //0x1D1 + u8 rsfec_core_ecc_hold_b16; //0x1D2 + u8 rsfec_core_ecc_hold_b24; //0x1D3 + u8 reserved_1d4[12]; //0x1D4-0x1DF + u8 rsfec_err_inj_tx_0_b0; //0x1E0 + u8 rsfec_err_inj_tx_0_b8; //0x1E1 + u8 rsfec_err_inj_tx_0_b16; //0x1E2 + u8 rsfec_err_inj_tx_0_b24; //0x1E3 + u8 rsfec_err_inj_tx_1_b0; //0x1E4 + u8 rsfec_err_inj_tx_1_b8; //0x1E5 + u8 rsfec_err_inj_tx_1_b16; //0x1E6 + u8 rsfec_err_inj_tx_1_b24; //0x1E7 + u8 rsfec_err_inj_tx_2_b0; //0x1E8 + u8 rsfec_err_inj_tx_2_b8; //0x1E9 + u8 rsfec_err_inj_tx_2_b16; //0x1EA + u8 rsfec_err_inj_tx_2_b24; //0x1EB + u8 rsfec_err_inj_tx_3_b0; //0x1EC + u8 rsfec_err_inj_tx_3_b8; //0x1ED + u8 rsfec_err_inj_tx_3_b16; //0x1EE + u8 rsfec_err_inj_tx_3_b24; //0x1EF + u8 rsfec_err_val_tx_0_b0; //0x1F0 + u8 rsfec_err_val_tx_0_b8; //0x1F1 + u8 rsfec_err_val_tx_0_b16; //0x1F2 + u8 rsfec_err_val_tx_0_b24; //0x1F3 + u8 rsfec_err_val_tx_1_b0; //0x1F4 + u8 rsfec_err_val_tx_1_b8; //0x1F5 + u8 rsfec_err_val_tx_1_b16; //0x1F6 + u8 rsfec_err_val_tx_1_b24; //0x1F7 + u8 rsfec_err_val_tx_2_b0; //0x1F8 + u8 rsfec_err_val_tx_2_b8; //0x1F9 + u8 rsfec_err_val_tx_2_b16; //0x1FA + u8 rsfec_err_val_tx_2_b24; //0x1FB + u8 rsfec_err_val_tx_3_b0; //0x1FC + u8 rsfec_err_val_tx_3_b8; //0x1FD + u8 rsfec_err_val_tx_3_b16; //0x1FE + u8 rsfec_err_val_tx_3_b24; //0x1FF + u8 rsfec_corr_cw_cnt_0_lo_b0; //0x200 + u8 rsfec_corr_cw_cnt_0_lo_b8; //0x201 + u8 rsfec_corr_cw_cnt_0_lo_b16; //0x202 + u8 rsfec_corr_cw_cnt_0_lo_b24; //0x203 + u8 rsfec_corr_cw_cnt_0_hi_b0; //0x204 + u8 rsfec_corr_cw_cnt_0_hi_b8; //0x205 + u8 rsfec_corr_cw_cnt_0_hi_b16; //0x206 + u8 rsfec_corr_cw_cnt_0_hi_b24; //0x207 + u8 rsfec_corr_cw_cnt_1_lo_b0; //0x208 + u8 rsfec_corr_cw_cnt_1_lo_b8; //0x209 + u8 rsfec_corr_cw_cnt_1_lo_b16; //0x20A + u8 rsfec_corr_cw_cnt_1_lo_b24; //0x20B + u8 rsfec_corr_cw_cnt_1_hi_b0; //0x20C + u8 rsfec_corr_cw_cnt_1_hi_b8; //0x20D + u8 rsfec_corr_cw_cnt_1_hi_b16; //0x20E + u8 rsfec_corr_cw_cnt_1_hi_b24; //0x20F + u8 rsfec_corr_cw_cnt_2_lo_b0; //0x210 + u8 rsfec_corr_cw_cnt_2_lo_b8; //0x211 + u8 rsfec_corr_cw_cnt_2_lo_b16; //0x212 + u8 rsfec_corr_cw_cnt_2_lo_b24; //0x213 + u8 rsfec_corr_cw_cnt_2_hi_b0; //0x214 + u8 rsfec_corr_cw_cnt_2_hi_b8; //0x215 + u8 rsfec_corr_cw_cnt_2_hi_b16; //0x216 + u8 rsfec_corr_cw_cnt_2_hi_b24; //0x217 + u8 rsfec_corr_cw_cnt_3_lo_b0; //0x218 + u8 rsfec_corr_cw_cnt_3_lo_b8; //0x219 + u8 rsfec_corr_cw_cnt_3_lo_b16; //0x21A + u8 rsfec_corr_cw_cnt_3_lo_b24; //0x21B + u8 rsfec_corr_cw_cnt_3_hi_b0; //0x21C + u8 rsfec_corr_cw_cnt_3_hi_b8; //0x21D + u8 rsfec_corr_cw_cnt_3_hi_b16; //0x21E + u8 rsfec_corr_cw_cnt_3_hi_b24; //0x21F + u8 rsfec_uncorr_cw_cnt_0_lo_b0; //0x220 + u8 rsfec_uncorr_cw_cnt_0_lo_b8; //0x221 + u8 rsfec_uncorr_cw_cnt_0_lo_b16; //0x222 + u8 rsfec_uncorr_cw_cnt_0_lo_b24; //0x223 + u8 rsfec_uncorr_cw_cnt_0_hi_b0; //0x224 + u8 rsfec_uncorr_cw_cnt_0_hi_b8; //0x225 + u8 rsfec_uncorr_cw_cnt_0_hi_b16; //0x226 + u8 rsfec_uncorr_cw_cnt_0_hi_b24; //0x227 + u8 rsfec_uncorr_cw_cnt_1_lo_b0; //0x228 + u8 rsfec_uncorr_cw_cnt_1_lo_b8; //0x229 + u8 rsfec_uncorr_cw_cnt_1_lo_b16; //0x22A + u8 rsfec_uncorr_cw_cnt_1_lo_b24; //0x22B + u8 rsfec_uncorr_cw_cnt_1_hi_b0; //0x22C + u8 rsfec_uncorr_cw_cnt_1_hi_b8; //0x22D + u8 rsfec_uncorr_cw_cnt_1_hi_b16; //0x22E + u8 rsfec_uncorr_cw_cnt_1_hi_b24; //0x22F + u8 rsfec_uncorr_cw_cnt_2_lo_b0; //0x230 + u8 rsfec_uncorr_cw_cnt_2_lo_b8; //0x231 + u8 rsfec_uncorr_cw_cnt_2_lo_b16; //0x232 + u8 rsfec_uncorr_cw_cnt_2_lo_b24; //0x233 + u8 rsfec_uncorr_cw_cnt_2_hi_b0; //0x234 + u8 rsfec_uncorr_cw_cnt_2_hi_b8; //0x235 + u8 rsfec_uncorr_cw_cnt_2_hi_b16; //0x236 + u8 rsfec_uncorr_cw_cnt_2_hi_b24; //0x237 + u8 rsfec_uncorr_cw_cnt_3_lo_b0; //0x238 + u8 rsfec_uncorr_cw_cnt_3_lo_b8; //0x239 + u8 rsfec_uncorr_cw_cnt_3_lo_b16; //0x23A + u8 rsfec_uncorr_cw_cnt_3_lo_b24; //0x23B + u8 rsfec_uncorr_cw_cnt_3_hi_b0; //0x23C + u8 rsfec_uncorr_cw_cnt_3_hi_b8; //0x23D + u8 rsfec_uncorr_cw_cnt_3_hi_b16; //0x23E + u8 rsfec_uncorr_cw_cnt_3_hi_b24; //0x23F + u8 rsfec_corr_syms_cnt_0_lo_b0; //0x240 + u8 rsfec_corr_syms_cnt_0_lo_b8; //0x241 + u8 rsfec_corr_syms_cnt_0_lo_b16; //0x242 + u8 rsfec_corr_syms_cnt_0_lo_b24; //0x243 + u8 rsfec_corr_syms_cnt_0_hi_b0; //0x244 + u8 rsfec_corr_syms_cnt_0_hi_b8; //0x245 + u8 rsfec_corr_syms_cnt_0_hi_b16; //0x246 + u8 rsfec_corr_syms_cnt_0_hi_b24; //0x247 + u8 rsfec_corr_syms_cnt_1_lo_b0; //0x248 + u8 rsfec_corr_syms_cnt_1_lo_b8; //0x249 + u8 rsfec_corr_syms_cnt_1_lo_b16; //0x24A + u8 rsfec_corr_syms_cnt_1_lo_b24; //0x24B + u8 rsfec_corr_syms_cnt_1_hi_b0; //0x24C + u8 rsfec_corr_syms_cnt_1_hi_b8; //0x24D + u8 rsfec_corr_syms_cnt_1_hi_b16; //0x24E + u8 rsfec_corr_syms_cnt_1_hi_b24; //0x24F + u8 rsfec_corr_syms_cnt_2_lo_b0; //0x250 + u8 rsfec_corr_syms_cnt_2_lo_b8; //0x251 + u8 rsfec_corr_syms_cnt_2_lo_b16; //0x252 + u8 rsfec_corr_syms_cnt_2_lo_b24; //0x253 + u8 rsfec_corr_syms_cnt_2_hi_b0; //0x254 + u8 rsfec_corr_syms_cnt_2_hi_b8; //0x255 + u8 rsfec_corr_syms_cnt_2_hi_b16; //0x256 + u8 rsfec_corr_syms_cnt_2_hi_b24; //0x257 + u8 rsfec_corr_syms_cnt_3_lo_b0; //0x258 + u8 rsfec_corr_syms_cnt_3_lo_b8; //0x259 + u8 rsfec_corr_syms_cnt_3_lo_b16; //0x25A + u8 rsfec_corr_syms_cnt_3_lo_b24; //0x25B + u8 rsfec_corr_syms_cnt_3_hi_b0; //0x25C + u8 rsfec_corr_syms_cnt_3_hi_b8; //0x25D + u8 rsfec_corr_syms_cnt_3_hi_b16; //0x25E + u8 rsfec_corr_syms_cnt_3_hi_b24; //0x25F + u8 rsfec_corr_0s_cnt_0_lo_b0; //0x260 + u8 rsfec_corr_0s_cnt_0_lo_b8; //0x261 + u8 rsfec_corr_0s_cnt_0_lo_b16; //0x262 + u8 rsfec_corr_0s_cnt_0_lo_b24; //0x263 + u8 rsfec_corr_0s_cnt_0_hi_b0; //0x264 + u8 rsfec_corr_0s_cnt_0_hi_b8; //0x265 + u8 rsfec_corr_0s_cnt_0_hi_b16; //0x266 + u8 rsfec_corr_0s_cnt_0_hi_b24; //0x267 + u8 rsfec_corr_0s_cnt_1_lo_b0; //0x268 + u8 rsfec_corr_0s_cnt_1_lo_b8; //0x269 + u8 rsfec_corr_0s_cnt_1_lo_b16; //0x26A + u8 rsfec_corr_0s_cnt_1_lo_b24; //0x26B + u8 rsfec_corr_0s_cnt_1_hi_b0; //0x26C + u8 rsfec_corr_0s_cnt_1_hi_b8; //0x26D + u8 rsfec_corr_0s_cnt_1_hi_b16; //0x26E + u8 rsfec_corr_0s_cnt_1_hi_b24; //0x26F + u8 rsfec_corr_0s_cnt_2_lo_b0; //0x270 + u8 rsfec_corr_0s_cnt_2_lo_b8; //0x271 + u8 rsfec_corr_0s_cnt_2_lo_b16; //0x272 + u8 rsfec_corr_0s_cnt_2_lo_b24; //0x273 + u8 rsfec_corr_0s_cnt_2_hi_b0; //0x274 + u8 rsfec_corr_0s_cnt_2_hi_b8; //0x275 + u8 rsfec_corr_0s_cnt_2_hi_b16; //0x276 + u8 rsfec_corr_0s_cnt_2_hi_b24; //0x277 + u8 rsfec_corr_0s_cnt_3_lo_b0; //0x278 + u8 rsfec_corr_0s_cnt_3_lo_b8; //0x279 + u8 rsfec_corr_0s_cnt_3_lo_b16; //0x27A + u8 rsfec_corr_0s_cnt_3_lo_b24; //0x27B + u8 rsfec_corr_0s_cnt_3_hi_b0; //0x27C + u8 rsfec_corr_0s_cnt_3_hi_b8; //0x27D + u8 rsfec_corr_0s_cnt_3_hi_b16; //0x27E + u8 rsfec_corr_0s_cnt_3_hi_b24; //0x27F + u8 rsfec_corr_1s_cnt_0_lo_b0; //0x280 + u8 rsfec_corr_1s_cnt_0_lo_b8; //0x281 + u8 rsfec_corr_1s_cnt_0_lo_b16; //0x282 + u8 rsfec_corr_1s_cnt_0_lo_b24; //0x283 + u8 rsfec_corr_1s_cnt_0_hi_b0; //0x284 + u8 rsfec_corr_1s_cnt_0_hi_b8; //0x285 + u8 rsfec_corr_1s_cnt_0_hi_b16; //0x286 + u8 rsfec_corr_1s_cnt_0_hi_b24; //0x287 + u8 rsfec_corr_1s_cnt_1_lo_b0; //0x288 + u8 rsfec_corr_1s_cnt_1_lo_b8; //0x289 + u8 rsfec_corr_1s_cnt_1_lo_b16; //0x28A + u8 rsfec_corr_1s_cnt_1_lo_b24; //0x28B + u8 rsfec_corr_1s_cnt_1_hi_b0; //0x28C + u8 rsfec_corr_1s_cnt_1_hi_b8; //0x28D + u8 rsfec_corr_1s_cnt_1_hi_b16; //0x28E + u8 rsfec_corr_1s_cnt_1_hi_b24; //0x28F + u8 rsfec_corr_1s_cnt_2_lo_b0; //0x290 + u8 rsfec_corr_1s_cnt_2_lo_b8; //0x291 + u8 rsfec_corr_1s_cnt_2_lo_b16; //0x292 + u8 rsfec_corr_1s_cnt_2_lo_b24; //0x293 + u8 rsfec_corr_1s_cnt_2_hi_b0; //0x294 + u8 rsfec_corr_1s_cnt_2_hi_b8; //0x295 + u8 rsfec_corr_1s_cnt_2_hi_b16; //0x296 + u8 rsfec_corr_1s_cnt_2_hi_b24; //0x297 + u8 rsfec_corr_1s_cnt_3_lo_b0; //0x298 + u8 rsfec_corr_1s_cnt_3_lo_b8; //0x299 + u8 rsfec_corr_1s_cnt_3_lo_b16; //0x29A + u8 rsfec_corr_1s_cnt_3_lo_b24; //0x29B + u8 rsfec_corr_1s_cnt_3_hi_b0; //0x29C + u8 rsfec_corr_1s_cnt_3_hi_b8; //0x29D + u8 rsfec_corr_1s_cnt_3_hi_b16; //0x29E + u8 rsfec_corr_1s_cnt_3_hi_b24; //0x29F + u8 reserved_2A0[96]; //0x2A0-0x2FF +}; + +#define eth_rsfec_csroffs(a) \ + (offsetof(struct intel_fpga_etile_rsfec, a)) + +/* E-Tile Transceiver Reconfiguration Interface Register Base Addresses + * Word Offset Register Type + * 0x00000-0x00207 PMA AVMM + * 0x40000-0x40012 PMA Capability + * 0x40080-0x40144 PMA Control and Status + */ +/* 0x00000-0x00207 PMA AVMM + * Note: Names not used for registers since they are not named in E-Tile PHY User Guide + */ +struct intel_fpga_etile_xcvr_pma_avmm { + u8 reserved_0[4]; // 0x000-0x003 + u8 reg_004; // 0x004 + u8 reg_005; // 0x005 + u8 reg_006; // 0x006 + u8 reg_007; // 0x007 + u8 reg_008; // 0x008 + u8 reg_009; // 0x009 + u8 reg_00a; // 0x00A + u8 reserved_00b[5]; // 0x00B-0x00F + u8 reg_010; // 0x010 + u8 reg_011; // 0x011 + u8 reg_012; // 0x012 + u8 reg_013; // 0x013 + u8 reg_014; // 0x014 + u8 reg_015; // 0x015 + u8 reg_016; // 0x016 + u8 reg_017; // 0x017 + u8 reserved_018[4]; // 0x018-0x01B + u8 reg_01c; // 0x01C + u8 reg_01d; // 0x01D + u8 reg_01e; // 0x01E + u8 reg_01f; // 0x01F + u8 reg_020; // 0x020 + u8 reg_021; // 0x021 + u8 reg_022; // 0x022 + u8 reg_023; // 0x023 + u8 reg_024; // 0x024 + u8 reserved_025[3]; // 0x025-0x027 + u8 reg_028; // 0x028 + u8 reserved_029[11]; // 0x029-0x033 + u8 reg_034; // 0x034 + u8 reg_035; // 0x035 + u8 reg_036; // 0x036 + u8 reg_037; // 0x037 + u8 reg_038; // 0x038 + u8 reserved_039[3]; // 0x039-0x03B + u8 reg_03c; // 0x03C + u8 reserved_03d[67]; //0x03D-0x07F + u8 reg_080; // 0x080 + u8 reg_081; // 0x081 + u8 reserved_082[2]; //0x082-0x083 + u8 reg_084; // 0x084 + u8 reg_085; // 0x085 + u8 reg_086; // 0x086 + u8 reg_087; // 0x087 + u8 reg_088; // 0x088 + u8 reg_089; // 0x089 + u8 reg_08A; // 0x08A + u8 reg_08B; // 0x08B + u8 reserved_08C[4]; //0x08C-0x08F + u8 reg_090; // 0x090 + u8 reg_091; // 0x091 + u8 reserved_092[3]; //0x092-0x094 + u8 reg_095; // 0x095 + u8 reserved_096[86]; //0x096-0x0EB + u8 reg_0ec; // 0x0EC + u8 reserved_0ed; // 0x0ED + u8 reg_0ee; // 0x0EE + u8 reg_0ef; // 0x0EF + u8 reserved_0f0[272]; //0x0F0-0x1FF + u8 reg_200; // 0x200 + u8 reg_201; // 0x201 + u8 reg_202; // 0x202 + u8 reg_203; // 0x203 + u8 reg_204; // 0x204 + u8 reserved_205[2]; //0x205-0x206 + u8 reg_207; // 0x207 +} __packed; + +/* 0x40000-0x40012 PMA Capability */ +struct intel_fpga_etile_xcvr_pma_capability { + u8 ip_identifier_0; // 0x40000 + u8 ip_identifier_1; // 0x40001 + u8 ip_identifier_2; // 0x40002 + u8 ip_identifier_3; // 0x40003 + u8 status_register_en; // 0x40004 + u8 ctrl_reg_en; // 0x40005 + u8 reserved_40006[10]; // 0x40006-0x4000F + u8 number_of_channels; // 0x40010 + u8 channel_number; // 0x40011 + u8 duplex; // 0x40012 +}; + +/* 0x40080-0x40144 PMA Control and Status */ +struct intel_fpga_etile_xcvr_pma_ctrl_status { + u8 rx_locked_to_data_status; // 0x40080 + u8 txrx_ready_status; // 0x40081 + u8 txrx_transfer_ready_status; // 0x40082 + u8 reserved_40083[95]; // 0x40083-0x400E1 + u8 txrx_pmaemib_reset; // 0x400E2 + u8 reserved_400e3[93]; // 0x400E3-0x4013F + u8 cfg_prof_select_and_start_streaming; // 0x40140 + u8 busy_status_bit; // 0x40141 + u8 reserved_400142; // 0x40142 + u8 pma_cfg; // 0x40143 +}; + +struct intel_fpga_etile_xcvr { + struct intel_fpga_etile_xcvr_pma_avmm pma_avmm; // 0x00000-0x00207 + u8 reserved_208[261624]; // 0x00208-0x3FFFF + struct intel_fpga_etile_xcvr_pma_capability pma_capability; // 0x40000-0x40012 + struct intel_fpga_etile_xcvr_pma_ctrl_status pma_ctrl_status; +}; + +struct intel_fpga_etile_tod_pio { + u32 etile_tod_pio_config; +}; + +#define eth_tod_pio_offs(a) \ + (offsetof(struct intel_fpga_etile_tod_pio, a)) + +#define eth_pma_avmm_csroffs(a) \ + (offsetof(struct intel_fpga_etile_xcvr, pma_avmm.a)) +#define eth_pma_capability_csroffs(a) \ + (offsetof(struct intel_fpga_etile_xcvr, pma_capability.a)) +#define eth_pma_ctrl_status_csroffs(a) \ + (offsetof(struct intel_fpga_etile_xcvr, pma_ctrl_status.a)) + +struct intel_fpga_etile_ethernet { + u32 reserved_0[176]; //0x000-0x0AF + struct intel_fpga_etile_eth_auto_neg_link auto_neg_link; //0x0B0-0x0E8 + u32 reserved_e9[535]; //0x0E9-0x2FF + struct intel_fpga_etile_eth_phy phy; //0x300-0x3FF + struct intel_fpga_etile_eth_tx_mac_10_25G tx_mac; //0x400-0x4FF + struct intel_fpga_etile_eth_rx_mac_10_25G rx_mac; //0x500-0x5ff + struct intel_fpga_etile_eth_pause_priority pause_priority; //0x600-0x7FF + struct intel_fpga_etile_eth_tx_stats tx_stats; //0x800-0x8FF + struct intel_fpga_etile_eth_rx_stats rx_stats; //0x900-0x9FF + struct intel_fpga_etile_eth_1588_ptp ptp; //0xA00-0xBFF +}; + +#define eth_csroffs(a) (offsetof(struct intel_fpga_etile_ethernet, a)) +#define eth_auto_neg_link_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, auto_neg_link.a)) +#define eth_phy_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, phy.a)) +#define eth_tx_mac_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, tx_mac.a)) +#define eth_rx_mac_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, rx_mac.a)) +#define eth_pause_and_priority_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, pause_priority.a)) +#define eth_tx_stats_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, tx_stats.a)) +#define eth_rx_stats_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, rx_stats.a)) +#define eth_ptp_csroffs(a) \ + (offsetof(struct intel_fpga_etile_ethernet, ptp.a)) + +/* RX FIFO Address Space + */ +struct intel_fpga_rx_fifo { + u32 fill_level; //0x00 + u32 reserved; //0x04 + u32 almost_full_threshold; //0x08 + u32 almost_empty_threshold; //0x0C + u32 cut_through_threshold; //0x10 + u32 drop_on_error; //0x14 +}; + +#define rx_fifo_csroffs(a) (offsetof(struct intel_fpga_rx_fifo, a)) + +struct intel_fpga_etile_eth_private { + struct net_device *dev; + struct device *device; + struct napi_struct napi; + struct timer_list fec_timer; + + /* Phylink */ + struct phylink *phylink; + struct phylink_config phylink_config; + + /* MAC address space */ + struct intel_fpga_etile_ethernet __iomem *mac_dev; + /* Shared DMA structure */ + struct altera_dma_private dma_priv; + + /* Shared PTP structure */ + struct intel_fpga_tod_private ptp_priv; + u32 ptp_enable; + + /* FIFO address space */ + struct intel_fpga_rx_fifo __iomem *rx_fifo; + + /* Etile xcvr address space */ + struct intel_fpga_etile_xcvr __iomem *xcvr; + + /* RS-FEC address space */ + struct intel_fpga_etile_rsfec __iomem *rsfec; + + /* Tod-pio address space */ + struct intel_fpga_etile_tod_pio __iomem *tod_pio; + + /* Interrupts */ + u32 tx_irq; + u32 rx_irq; + + /* RX/TX MAC FIFO configs */ + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 rx_fifo_almost_full; + u32 rx_fifo_almost_empty; + u32 max_mtu; + + /* Hash filter settings */ + u32 hash_filter; + u32 added_unicast; + + /* MAC command_config register protection */ + spinlock_t mac_cfg_lock; + + /* Tx path protection */ + spinlock_t tx_lock; + + /* Rx DMA & interrupt control protection */ + spinlock_t rxdma_irq_lock; + + /* Rx DMA Buffer Size */ + u32 rxdma_buffer_size; + + /* MAC flow control */ + unsigned int flow_ctrl; + unsigned int pause; + + /* PMA digital delay */ + u32 tx_pma_delay_ns; + u32 rx_pma_delay_ns; + u32 tx_pma_delay_fns; + u32 rx_pma_delay_fns; + + /* External PHY delay */ + u32 tx_external_phy_delay_ns; + u32 rx_external_phy_delay_ns; + + /* PHY */ + void __iomem *mac_extra_control; + /* PHY's MDIO address, -1 for autodetection */ + int phy_addr; + phy_interface_t phy_iface; + struct mii_bus *mdio; + u32 link_speed; + /* Will hold previous link speed during DR switching */ + u32 prv_link_speed; + u8 duplex; + int oldlink; + + /* FEC */ + const char *fec_type; + /* Will hold previous fec type during DR switching */ + const char *prv_fec_type; + u32 rsfec_cw_pos_rx; + + /* ethtool msglvl option */ + u32 msg_enable; + struct altera_dmaops *dmaops; + + /* previous qsfp channel info */ + int old_qsfp_channel_info; + int qsfp_poll_delay_count; + const char *phy_mode; +}; + +/* Function prototypes + */ + +void intel_fpga_etile_set_ethtool_ops(struct net_device *dev); +int etile_dynamic_reconfiguration(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd); +int fec_init(struct platform_device *pdev, struct intel_fpga_etile_eth_private *priv); +void ui_adjustments(struct timer_list *t); +int etile_check_counter_complete(void __iomem *ioaddr, size_t offs, u32 bit_mask, + bool set_bit, int align); + +#ifdef CONFIG_INTEL_FPGA_ETILE_DEBUG_FS +int intel_fpga_etile_init_fs(struct net_device *dev); +void intel_fpga_etile_exit_fs(struct net_device *dev); +#else +static inline int intel_fpga_etile_init_fs(struct net_device *dev) +{ + return 0; +} + +static inline void intel_fpga_etile_exit_fs(struct net_device *dev) {} +#endif /* CONFIG_INTEL_FPGA_ETILE_DEBUG_FS */ + +#endif /* __INTEL_FPGA_ETILE_ETH_H__ */ diff --git a/drivers/net/ethernet/altera/intel_fpga_etile_ethtool.c b/drivers/net/ethernet/altera/intel_fpga_etile_ethtool.c new file mode 100644 index 0000000000000..4550a766a47be --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_etile_ethtool.c @@ -0,0 +1,1088 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Ethtool support for Intel FPGA E-tile Ethernet MAC driver + * Copyright (C) 2019-2022 Intel Corporation. All rights reserved + * + * Contributors: + * Roman Bulgakov + * Yu Ying Choo + * Dalon Westergreen + * Joyce Ooi + * + * Original driver contributed by GlobalLogic. + */ + +#include +#include +#include +#include +#include +#include + +#include "altera_eth_dma.h" +#include "intel_fpga_etile.h" +#include "altera_utils.h" + +#define ETILE_STATS_LEN ARRAY_SIZE(stat_gstrings) +#define ETILE_NUM_REGS 294 + +int dr_link_state; + +static const char stat_gstrings[][ETH_GSTRING_LEN] = { + "tx_fragments", + "tx_jabbers", + "tx_fcs_errors", + "tx_crc_errors", + "tx_errored_multicast", + "tx_errored_broadcast", + "tx_errored_unicast", + "tx_errored_mulitcast_ctrl_frames", + "tx_errored_broadcast_ctrl_frames", + "tx_errored_unicast_ctrl_frames", + "tx_pause_errors", + "tx_64byte_frames", + "tx_65to127bytes_frames", + "tx_128to255bytes_frames", + "tx_256to511bytes_frames", + "tx_512to1023bytes_frames", + "tx_1024to1518bytes_frames", + "tx_1519tomax_frames", + "tx_oversize_frames", + "tx_multicast_frames", + "tx_broadcast_frames", + "tx_unicast_frames", + "tx_multicast_ctrl_frames", + "tx_broadcast_ctrl_frames", + "tx_unicast_ctrl_frames", + "tx_pause_frames", + "tx_runt_packets", + "tx_frame_starts", + "tx_length_errored_frames", + "tx_prc_errored_frames", + "tx_prc_frames", + "tx_payload_bytes", + "tx_bytes", + "tx_errors", + "tx_dropped", + "tx_bad_length_type_frames", + "rx_fragments", + "rx_jabbers", + "rx_fcs_errors", + "rx_crc_errors", + "rx_errored_multicast", + "rx_errored_broadcast", + "rx_errored_unicast", + "rx_errored_mulitcast_ctrl_frames", + "rx_errored_broadcast_ctrl_frames", + "rx_errored_unicast_ctrl_frames", + "rx_pause_errors", + "rx_64byte_frames", + "rx_65to127bytes_frames", + "rx_128to255bytes_frames", + "rx_256to511bytes_frames", + "rx_512to1023bytes_frames", + "rx_1024to1518bytes_frames", + "rx_1519tomax_frames", + "rx_oversize_frames", + "rx_multicast_frames", + "rx_broadcast_frames", + "rx_unicast_frames", + "rx_multicast_ctrl_frames", + "rx_broadcast_ctrl_frames", + "rx_unicast_ctrl_frames", + "rx_pause_frames", + "rx_runt_packets", + "rx_frame_starts", + "rx_length_errored_frames", + "rx_prc_errored_frames", + "rx_prc_frames", + "rx_payload_bytes", + "rx_bytes" +}; + +static void etile_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "intel_fpga_etile", ETH_GSTRING_LEN); + strscpy(info->version, "v1.0", ETH_GSTRING_LEN); + strscpy(info->bus_info, "platform", ETH_GSTRING_LEN); +} + +/* Fill in a buffer with the strings which correspond to the + * stats + */ +static void etile_gstrings(struct net_device *dev, u32 stringset, u8 *buf) +{ + memcpy(buf, stat_gstrings, ETILE_STATS_LEN * ETH_GSTRING_LEN); +} + +static void etile_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + u32 lsb; + u32 msb; + + /* TX Fragments */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fragments_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fragments_msb)); + buf[0] = ((u64)msb << 32) | lsb; + + /* TX Jabbers */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_jabbers_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_jabbers_msb)); + buf[1] = ((u64)msb << 32) | lsb; + + /* TX FCS errors */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fcserr_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fcserr_msb)); + buf[2] = ((u64)msb << 32) | lsb; + + /* TX CRC errors */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_crcerr_okpkt_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_crcerr_okpkt_msb)); + buf[3] = ((u64)msb << 32) | lsb; + + /* TX errored multicast */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_err_msb)); + buf[4] = ((u64)msb << 32) | lsb; + + /* TX errored broadcast */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_err_msb)); + buf[5] = ((u64)msb << 32) | lsb; + + /* TX errored unicast */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_err_msb)); + buf[6] = ((u64)msb << 32) | lsb; + + /* TX errored multicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_err_msb)); + buf[7] = ((u64)msb << 32) | lsb; + + /* TX errored broadcast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_err_msb)); + buf[8] = ((u64)msb << 32) | lsb; + + /* TX errored unicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_err_msb)); + buf[9] = ((u64)msb << 32) | lsb; + + /* TX pause errors */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_err_msb)); + buf[10] = ((u64)msb << 32) | lsb; + + /* TX 64-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_64b_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_64b_msb)); + buf[11] = ((u64)msb << 32) | lsb; + + /* TX 65to127-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_65to127b_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_65to127b_msb)); + buf[12] = ((u64)msb << 32) | lsb; + + /* TX 128to255-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_128to255b_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_128to255b_msb)); + buf[13] = ((u64)msb << 32) | lsb; + + /* TX 256to511-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_256to511b_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_256to511b_msb)); + buf[14] = ((u64)msb << 32) | lsb; + + /* TX 512to1023-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_512to1023b_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_512to1023b_msb)); + buf[15] = ((u64)msb << 32) | lsb; + + /* TX 1024to1518-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1024to1518b_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1024to1518b_msb)); + buf[16] = ((u64)msb << 32) | lsb; + + /* TX 1519toMAX-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1519tomaxb_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1519tomaxb_msb)); + buf[17] = ((u64)msb << 32) | lsb; + + /* TX oversize frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_oversize_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_oversize_msb)); + buf[18] = ((u64)msb << 32) | lsb; + + /* TX multicast frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_ok_msb)); + buf[19] = ((u64)msb << 32) | lsb; + + /* TX broadcast frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_ok_msb)); + buf[20] = ((u64)msb << 32) | lsb; + + /* TX unicast frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_ok_msb)); + buf[21] = ((u64)msb << 32) | lsb; + + /* TX multicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_ok_msb)); + buf[22] = ((u64)msb << 32) | lsb; + + /* TX broadcast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_ok_msb)); + buf[23] = ((u64)msb << 32) | lsb; + + /* TX unicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_ok_msb)); + buf[24] = ((u64)msb << 32) | lsb; + + /* TX pause frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_msb)); + buf[25] = ((u64)msb << 32) | lsb; + + /* TX runt packets */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_rnt_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_rnt_msb)); + buf[26] = ((u64)msb << 32) | lsb; + + /* TX frame starts */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_st_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_st_msb)); + buf[27] = ((u64)msb << 32) | lsb; + + /* TX length-errored frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_lenerr_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_lenerr_msb)); + buf[28] = ((u64)msb << 32) | lsb; + + /* TX PRC-errored frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_err_msb)); + buf[29] = ((u64)msb << 32) | lsb; + + /* TX PFC frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_msb)); + buf[30] = ((u64)msb << 32) | lsb; + + /* TX payload bytes in frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_payload_octetsok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_payload_octetsok_msb)); + buf[31] = ((u64)msb << 32) | lsb; + + /* TX bytes in frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_frame_octetsok_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_frame_octetsok_msb)); + buf[32] = ((u64)msb << 32) | lsb; + + /* TX malformed frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_malformed_ctrl_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_malformed_ctrl_msb)); + buf[33] = ((u64)msb << 32) | lsb; + + /* TX dropped frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_dropped_ctrl_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_dropped_ctrl_msb)); + buf[34] = ((u64)msb << 32) | lsb; + + /* TX bad-length/type frames */ + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_badlt_ctrl_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_badlt_ctrl_msb)); + buf[35] = ((u64)msb << 32) | lsb; + + /* RX Fragments */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fragments_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fragments_msb)); + buf[36] = ((u64)msb << 32) | lsb; + + /* RX Jabbers */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_jabbers_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_jabbers_msb)); + buf[37] = ((u64)msb << 32) | lsb; + + /* RX FCS errors */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fcserr_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fcserr_msb)); + buf[38] = ((u64)msb << 32) | lsb; + + /* RX CRC errors */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_crcerr_okpkt_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_crcerr_okpkt_msb)); + buf[39] = ((u64)msb << 32) | lsb; + + /* RX errored multicast */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_err_msb)); + buf[40] = ((u64)msb << 32) | lsb; + + /* RX errored broadcast */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_err_msb)); + buf[41] = ((u64)msb << 32) | lsb; + + /* RX errored unicast */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_err_msb)); + buf[42] = ((u64)msb << 32) | lsb; + + /* RX errored multicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_err_msb)); + buf[43] = ((u64)msb << 32) | lsb; + + /* RX errored broadcast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_err_msb)); + buf[44] = ((u64)msb << 32) | lsb; + + /* RX errored unicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_err_msb)); + buf[45] = ((u64)msb << 32) | lsb; + + /* RX pause errors */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_err_msb)); + buf[46] = ((u64)msb << 32) | lsb; + + /* RX 64-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_64b_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_64b_msb)); + buf[47] = ((u64)msb << 32) | lsb; + + /* RX 65to127-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_65to127b_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_65to127b_msb)); + buf[48] = ((u64)msb << 32) | lsb; + + /* RX 128to255-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_128to255b_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_128to255b_msb)); + buf[49] = ((u64)msb << 32) | lsb; + + /* RX 256to511-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_256to511b_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_256to511b_msb)); + buf[50] = ((u64)msb << 32) | lsb; + + /* RX 512to1023-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_512to1023b_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_512to1023b_msb)); + buf[51] = ((u64)msb << 32) | lsb; + + /* RX 1024to1518-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1024to1518b_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1024to1518b_msb)); + buf[52] = ((u64)msb << 32) | lsb; + + /* RX 1519toMAX-byte frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1519tomaxb_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1519tomaxb_msb)); + buf[53] = ((u64)msb << 32) | lsb; + + /* RX oversize frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_oversize_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_oversize_msb)); + buf[54] = ((u64)msb << 32) | lsb; + + /* RX multicast frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_ok_msb)); + buf[55] = ((u64)msb << 32) | lsb; + + /* RX broadcast frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_ok_msb)); + buf[56] = ((u64)msb << 32) | lsb; + + /* RX unicast frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_ok_msb)); + buf[57] = ((u64)msb << 32) | lsb; + + /* RX multicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_ok_msb)); + buf[58] = ((u64)msb << 32) | lsb; + + /* RX broadcast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_ok_msb)); + buf[59] = ((u64)msb << 32) | lsb; + + /* RX unicast ctrl frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_ok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_ok_msb)); + buf[60] = ((u64)msb << 32) | lsb; + + /* RX pause frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_msb)); + buf[61] = ((u64)msb << 32) | lsb; + + /* RX runt frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_rnt_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_rnt_msb)); + buf[62] = ((u64)msb << 32) | lsb; + + /* RX frame starts */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_st_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_st_msb)); + buf[63] = ((u64)msb << 32) | lsb; + + /* RX length-errored frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_lenerr_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_lenerr_msb)); + buf[64] = ((u64)msb << 32) | lsb; + + /* RX PRC-errored frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_err_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_err_msb)); + buf[65] = ((u64)msb << 32) | lsb; + + /* RX PRC frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_msb)); + buf[66] = ((u64)msb << 32) | lsb; + + /* RX payload bytes in frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_payload_octetsok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_payload_octetsok_msb)); + buf[67] = ((u64)msb << 32) | lsb; + + /* RX bytes in frames */ + lsb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_frame_octetsok_lsb)); + msb = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_frame_octetsok_msb)); + buf[68] = ((u64)msb << 32) | lsb; +} + +static int etile_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ETILE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 etile_get_msglevel(struct net_device *dev) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void etile_set_msglevel(struct net_device *dev, uint32_t data) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + + priv->msg_enable = data; + priv->dma_priv.msg_enable = data; +} + +static int etile_reglen(struct net_device *dev) +{ + return ETILE_NUM_REGS * sizeof(u32); +} + +static void etile_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *regbuf) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + u32 *buf = regbuf; + + /* Set version to a known value, so ethtool knows + * how to do any special formatting of this data. + * This version number will need to change if and + * when this register table is changed. + * + * version[31:0] = 1: Dump the 10GbE MAC IP Registers + * Upper bits are all 0 by default + * + * Upper 16-bits will indicate feature presence for + * Ethtool register decoding in future version. + */ + + regs->version = 1; + /* Auto Negotiation and Link Training Registers */ + buf[0] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(anlt_sequencer_config)); + buf[1] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(anlt_sequencer_status)); + buf[2] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_conf_1)); + buf[3] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_conf_2)); + buf[4] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_stat)); + buf[5] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_conf_3)); + buf[6] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_conf_4)); + buf[7] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_conf_5)); + buf[8] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_conf_6)); + buf[9] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_stat_1)); + buf[10] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_stat_2)); + buf[11] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_stat_3)); + buf[12] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_stat_4)); + buf[13] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_stat_5)); + buf[14] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(auto_neg_an_channel_override)); + buf[15] = csrrd32(priv->mac_dev, + eth_auto_neg_link_csroffs(auto_neg_const_next_page_override)); + buf[16] = csrrd32(priv->mac_dev, + eth_auto_neg_link_csroffs(auto_neg_const_next_page_lp_stat)); + buf[17] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(link_train_conf_1)); + buf[18] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(link_train_stat_1)); + buf[19] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(link_train_conf_lane_0)); + buf[20] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(link_train_conf_lane_1)); + buf[21] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(link_train_conf_lane_2)); + buf[22] = csrrd32(priv->mac_dev, eth_auto_neg_link_csroffs(link_train_conf_lane_3)); + + /* PHY registers */ + buf[23] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_rev_id)); + buf[24] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_scratch)); + buf[25] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_loopback)); + buf[26] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_config)); + buf[27] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_cdr_pll_locked)); + buf[28] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_tx_datapath_ready)); + buf[29] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_frm_err_detect)); + buf[30] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_clr_frm_err)); + buf[31] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_pcs_stat_anlt)); + buf[32] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_pcs_err_inject)); + buf[33] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_am_lock)); + buf[34] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_dskew_chng)); + buf[35] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_ber_cnt)); + buf[36] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_aib_transfer_ready_stat)); + buf[37] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_soft_rc_reset_stat)); + buf[38] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_pcs_virtual_ln_0)); + buf[39] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_pcs_virtual_ln_1)); + buf[40] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_pcs_virtual_ln_2)); + buf[41] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_pcs_virtual_ln_3)); + buf[42] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_recovered_clk_freq)); + buf[43] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_tx_clk_freq)); + buf[44] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_tx_pld_conf)); + buf[45] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_tx_pld_stat)); + buf[46] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_dynamic_deskew_buf_stat)); + buf[47] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_rx_pld_conf)); + buf[48] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_rx_pcs_conf)); + buf[49] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_0)); + buf[50] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_1)); + buf[51] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_2)); + buf[52] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_3)); + buf[53] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_4)); + buf[54] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_5)); + buf[55] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_6)); + buf[56] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_7)); + buf[57] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_8)); + buf[58] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_9)); + buf[59] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_10)); + buf[60] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_11)); + buf[61] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_12)); + buf[62] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_13)); + buf[63] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_14)); + buf[64] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_15)); + buf[65] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_16)); + buf[66] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_17)); + buf[67] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_18)); + buf[68] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_bip_cnt_19)); + buf[69] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_timer_window_hiber_check)); + buf[70] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_hiber_frm_err)); + buf[71] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_err_block_cnt)); + buf[72] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_deskew_dept_0)); + buf[73] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_deskew_dept_1)); + buf[74] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_deskew_dept_2)); + buf[75] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_deskew_dept_3)); + buf[76] = csrrd32(priv->mac_dev, eth_phy_csroffs(phy_rx_pcs_test_err_cnt)); + + /* TX MAC Registers */ + buf[77] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_rev_id)); + buf[78] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_scratch)); + buf[79] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_link_fault)); + buf[80] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_ipg_col_rem)); + buf[81] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_max_frm_size)); + buf[82] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_conf)); + buf[83] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_ehip_conf)); + buf[84] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_source_addr_lower_bytes)); + buf[85] = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_source_addr_higher_bytes)); + + /* RX MAC Registers */ + buf[86] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_rev_id)); + buf[87] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_scratch)); + buf[88] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_max_frm_size)); + buf[89] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_frwd_rx_crc)); + buf[90] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_max_link_fault)); + buf[91] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_conf)); + buf[92] = csrrd32(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_ehip_conf)); + + /* Pause and Priority Registers */ + buf[93] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(txsfc_module_revision_id)); + buf[94] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(txsfc_scratch_register)); + buf[95] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(enable_tx_pause_ports)); + buf[96] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(tx_pause_request)); + buf[97] = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(enable_automatic_tx_pause_retransmission)); + buf[98] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(retransmit_holdoff_quanta)); + buf[99] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(retransmit_pause_quanta)); + buf[100] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(enable_tx_xoff)); + buf[101] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(enable_uniform_holdoff)); + buf[102] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(set_uniform_holdoff)); + buf[103] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(flow_control_fields_lsb)); + buf[104] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(flow_control_fields_msb)); + buf[105] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(flow_control_frames_lsb)); + buf[106] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(flow_control_frames_msb)); + buf[107] = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(tx_flow_control_feature_cfg)); + buf[108] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_0)); + buf[109] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_1)); + buf[110] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_2)); + buf[111] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_3)); + buf[112] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_4)); + buf[113] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_5)); + buf[114] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_6)); + buf[115] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pause_quanta_7)); + buf[116] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_0)); + buf[117] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_1)); + buf[118] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_2)); + buf[119] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_3)); + buf[120] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_4)); + buf[121] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_5)); + buf[122] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_6)); + buf[123] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(pfc_holdoff_quanta_7)); + buf[124] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs + (rxsfc_module_revision_id)); + buf[125] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(rxsfc_scratch_register)); + buf[126] = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(enable_rx_pause_frame_processing_fields)); + buf[127] = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(forward_flow_control_frames)); + buf[128] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(rx_pause_frames_lsb)); + buf[129] = csrrd32(priv->mac_dev, eth_pause_and_priority_csroffs(rx_pause_frames_msb)); + buf[130] = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(rx_flow_control_feature_cfg)); + + /* TX Statistics Counter Registers */ + buf[131] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fragments_lsb)); + buf[132] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fragments_msb)); + buf[133] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_jabbers_lsb)); + buf[134] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_jabbers_msb)); + buf[135] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fcserr_lsb)); + buf[136] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_fcserr_msb)); + buf[137] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_crcerr_okpkt_lsb)); + buf[138] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_crcerr_okpkt_msb)); + buf[139] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_err_lsb)); + buf[140] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_err_msb)); + buf[141] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_err_lsb)); + buf[142] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_err_msb)); + buf[143] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_err_lsb)); + buf[144] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_err_msb)); + buf[145] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_err_lsb)); + buf[146] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_err_msb)); + buf[147] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_err_lsb)); + buf[148] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_err_msb)); + buf[149] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_err_lsb)); + buf[150] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_err_msb)); + buf[151] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_err_lsb)); + buf[152] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_err_msb)); + buf[153] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_64b_lsb)); + buf[154] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_64b_msb)); + buf[155] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_65to127b_lsb)); + buf[156] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_65to127b_msb)); + buf[157] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_128to255b_lsb)); + buf[158] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_128to255b_msb)); + buf[159] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_256to511b_lsb)); + buf[160] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_256to511b_msb)); + buf[161] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_512to1023b_lsb)); + buf[162] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_512to1023b_msb)); + buf[163] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1024to1518b_lsb)); + buf[164] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1024to1518b_msb)); + buf[165] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1519tomaxb_lsb)); + buf[166] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_1519tomaxb_msb)); + buf[167] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_oversize_lsb)); + buf[168] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_oversize_msb)); + buf[169] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_ok_lsb)); + buf[170] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_data_ok_msb)); + buf[171] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_ok_lsb)); + buf[172] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_data_ok_msb)); + buf[173] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_ok_lsb)); + buf[174] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_data_ok_msb)); + buf[175] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_ok_lsb)); + buf[176] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_mcast_ctrl_ok_msb)); + buf[177] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_ok_lsb)); + buf[178] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_bcast_ctrl_ok_msb)); + buf[179] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_ok_lsb)); + buf[180] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_ucast_ctrl_ok_msb)); + buf[181] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_lsb)); + buf[182] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pause_msb)); + buf[183] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_rnt_lsb)); + buf[184] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_rnt_msb)); + buf[185] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_st_lsb)); + buf[186] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_st_msb)); + buf[187] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_lenerr_lsb)); + buf[188] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_lenerr_msb)); + buf[189] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_err_lsb)); + buf[190] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_err_msb)); + buf[191] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_lsb)); + buf[192] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_pfc_msb)); + buf[193] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_stat_revid)); + buf[194] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_stat_scratch)); + buf[195] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_cntr_config)); + buf[196] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_cntr_status)); + buf[197] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_payload_octetsok_lsb)); + buf[198] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_payload_octetsok_msb)); + buf[199] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_frame_octetsok_lsb)); + buf[200] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_frame_octetsok_msb)); + buf[201] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_malformed_ctrl_lsb)); + buf[202] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_malformed_ctrl_msb)); + buf[203] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_dropped_ctrl_lsb)); + buf[204] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_dropped_ctrl_msb)); + buf[205] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_badlt_ctrl_lsb)); + buf[206] = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_badlt_ctrl_msb)); + + /* RX Statistics Counter Registers */ + buf[207] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fragments_lsb)); + buf[208] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fragments_msb)); + buf[209] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_jabbers_lsb)); + buf[210] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_jabbers_msb)); + buf[211] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fcserr_lsb)); + buf[212] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_fcserr_msb)); + buf[213] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_crcerr_okpkt_lsb)); + buf[214] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_crcerr_okpkt_msb)); + buf[215] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_err_lsb)); + buf[216] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_err_msb)); + buf[217] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_err_lsb)); + buf[218] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_err_msb)); + buf[219] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_err_lsb)); + buf[220] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_err_msb)); + buf[221] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_err_lsb)); + buf[222] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_err_msb)); + buf[223] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_err_lsb)); + buf[224] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_err_msb)); + buf[225] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_err_lsb)); + buf[226] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_err_msb)); + buf[227] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_err_lsb)); + buf[228] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_err_msb)); + buf[229] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_64b_lsb)); + buf[230] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_64b_msb)); + buf[231] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_65to127b_lsb)); + buf[232] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_65to127b_msb)); + buf[233] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_128to255b_lsb)); + buf[234] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_128to255b_msb)); + buf[235] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_256to511b_lsb)); + buf[236] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_256to511b_msb)); + buf[237] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_512to1023b_lsb)); + buf[238] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_512to1023b_msb)); + buf[239] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1024to1518b_lsb)); + buf[240] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1024to1518b_msb)); + buf[241] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1519tomaxb_lsb)); + buf[242] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_1519tomaxb_msb)); + buf[243] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_oversize_lsb)); + buf[244] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_oversize_msb)); + buf[245] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_ok_lsb)); + buf[246] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_data_ok_msb)); + buf[247] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_ok_lsb)); + buf[248] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_data_ok_msb)); + buf[249] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_ok_lsb)); + buf[250] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_data_ok_msb)); + buf[251] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_ok_lsb)); + buf[252] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_mcast_ctrl_ok_msb)); + buf[253] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_ok_lsb)); + buf[254] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_bcast_ctrl_ok_msb)); + buf[255] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_ok_lsb)); + buf[256] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_ucast_ctrl_ok_msb)); + buf[257] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_lsb)); + buf[258] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pause_msb)); + buf[259] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_rnt_lsb)); + buf[260] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_rnt_msb)); + buf[261] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_st_lsb)); + buf[262] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_st_msb)); + buf[263] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_lenerr_lsb)); + buf[264] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_lenerr_msb)); + buf[265] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_err_lsb)); + buf[266] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_err_msb)); + buf[267] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_lsb)); + buf[268] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_pfc_msb)); + buf[269] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_stat_revid)); + buf[270] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_stat_scratch)); + buf[271] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_cntr_config)); + buf[272] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_cntr_status)); + buf[273] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_payload_octetsok_lsb)); + buf[274] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_payload_octetsok_msb)); + buf[275] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_frame_octetsok_lsb)); + buf[276] = csrrd32(priv->mac_dev, eth_rx_stats_csroffs(rx_frame_octetsok_msb)); + + /* PTP Registers */ + buf[277] = csrrd32(priv->mac_dev, eth_ptp_csroffs(txptp_revid)); + buf[278] = csrrd32(priv->mac_dev, eth_ptp_csroffs(txptp_scratch)); + buf[279] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_ptp_clk_period)); + buf[280] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_ptp_extra_latency)); + buf[281] = csrrd32(priv->mac_dev, eth_ptp_csroffs(ptp_debug)); + buf[282] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rxptp_revid)); + buf[283] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rxptp_scratch)); + buf[284] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_ptp_extra_latency)); + buf[285] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_ui_reg)); + buf[286] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_ui_reg)); + buf[287] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tam_snapshot)); + buf[288] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_tam_l)); + buf[289] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_tam_h)); + buf[290] = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_count)); + buf[291] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_tam_l)); + buf[292] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_tam_h)); + buf[293] = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_count)); +} + +static void etile_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + + pauseparam->rx_pause = 0; + pauseparam->tx_pause = 0; + pauseparam->autoneg = 0; + + if (priv->flow_ctrl & FLOW_RX) + pauseparam->rx_pause = 1; + if (priv->flow_ctrl & FLOW_TX) + pauseparam->tx_pause = 1; +} + +static int etile_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + int new_pause = FLOW_OFF; + int ret = 0; + + spin_lock(&priv->mac_cfg_lock); + + if (pauseparam->autoneg != 0) { + ret = -EINVAL; + goto out; + } + + if (pauseparam->rx_pause) { + new_pause |= FLOW_RX; + tse_set_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(rx_flow_control_feature_cfg), + ETH_RX_EN_STD_FLOW_CTRL); + } else { + tse_clear_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(rx_flow_control_feature_cfg), + ETH_RX_EN_STD_FLOW_CTRL); + } + + if (pauseparam->tx_pause) { + new_pause |= FLOW_TX; + tse_set_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(tx_flow_control_feature_cfg), + ETH_TX_EN_STD_FLOW_CTRL); + } else { + tse_clear_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(tx_flow_control_feature_cfg), + ETH_TX_EN_STD_FLOW_CTRL); + } + + csrwr32(priv->pause, priv->mac_dev, + eth_pause_and_priority_csroffs(pause_quanta_0)); + priv->flow_ctrl = new_pause; +out: + spin_unlock(&priv->mac_cfg_lock); + return ret; +} + +static int etile_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (priv->ptp_priv.ptp_clock) + info->phc_index = ptp_clock_index(priv->ptp_priv.ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +/* Set link ksettings (phy address, speed) for ethtools */ +static int etile_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + int ret; + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + + if (!priv) + return -ENODEV; + + if (cmd->base.speed == SPEED_25000 || cmd->base.speed == SPEED_10000) { + priv->link_speed = cmd->base.speed; + if (priv->link_speed == SPEED_10000) + priv->fec_type = "no-fec"; + + spin_lock(&priv->mac_cfg_lock); + ret = etile_dynamic_reconfiguration(priv, cmd); + spin_unlock(&priv->mac_cfg_lock); + } else { + return -EPERM; + } + + if (ret) + netdev_dbg(dev, "Cannot configure dynamic reconfiguration(error: %d)\n", ret); + + ret = phylink_ethtool_ksettings_set(priv->phylink, cmd); + + return ret; +} + +/* Get link ksettings (phy address, speed) for ethtools */ +static int etile_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + int ret; + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + + dr_link_state = 1; + + if (!priv) + return -ENODEV; + ret = phylink_ethtool_ksettings_get(priv->phylink, cmd); + + return ret; +} + +static int etile_get_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(netdev); + + dr_link_state = 1; + + if (!priv) + return -ENODEV; + + if (!(strcmp(priv->fec_type, "kr-fec"))) { + if (priv->link_speed == SPEED_25000) { + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec |= ETHTOOL_FEC_RS; + } + if (priv->link_speed == SPEED_10000) { + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec |= ETHTOOL_FEC_RS; + } } else if (!(strcmp(priv->fec_type, "no-fec"))) { + if (priv->link_speed == SPEED_25000) { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec |= ETHTOOL_FEC_OFF; + } + if (priv->link_speed == SPEED_10000) { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec |= ETHTOOL_FEC_OFF; + } + } + return 0; +} + +static int etile_set_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) + +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(netdev); + struct ethtool_link_ksettings *cmd; + + int ret; + + if (!priv) + return -ENODEV; + + switch (fecparam->fec) { + case ETHTOOL_FEC_RS: + if (priv->link_speed == SPEED_25000 && + !(strcmp(priv->fec_type, "no-fec"))) { + priv->fec_type = "kr-fec"; + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec |= ETHTOOL_FEC_RS; + spin_lock(&priv->mac_cfg_lock); + ret = etile_dynamic_reconfiguration(priv, cmd); + spin_unlock(&priv->mac_cfg_lock); + priv->prv_fec_type = priv->fec_type; + priv->prv_link_speed = priv->link_speed; + } else if (priv->link_speed == SPEED_10000 && + !(strcmp(priv->fec_type, "no-fec"))) { + return -EPERM; + + } else if (priv->link_speed == SPEED_25000 && + !(strcmp(priv->fec_type, "kr-fec"))) { + netdev_warn(priv->dev, "E-tile already with %d/%s\n", priv->link_speed, + priv->fec_type); + return 0; + } + break; + case ETHTOOL_FEC_OFF: + if (priv->link_speed == SPEED_25000 && + !(strcmp(priv->fec_type, "kr-fec"))) { + priv->fec_type = "no-fec"; + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec |= ETHTOOL_FEC_OFF; + spin_lock(&priv->mac_cfg_lock); + ret = etile_dynamic_reconfiguration(priv, cmd); + spin_unlock(&priv->mac_cfg_lock); + priv->prv_fec_type = priv->fec_type; + priv->prv_link_speed = priv->link_speed; + } else if (priv->link_speed == SPEED_25000 && + !(strcmp(priv->fec_type, "no-fec"))) { + netdev_warn(priv->dev, "E-tile already with %d/%s\n", priv->link_speed, + priv->fec_type); + return 0; + } else if (priv->link_speed == SPEED_10000 && + !(strcmp(priv->fec_type, "no-fec"))) { + netdev_warn(priv->dev, "E-tile already with %d/%s\n", priv->link_speed, + priv->fec_type); + return 0; + } + break; + default: + break; + } + + return 0; +} + +static const struct ethtool_ops etile_ethtool_ops = { + .get_drvinfo = etile_get_drvinfo, + .get_regs_len = etile_reglen, + .get_regs = etile_get_regs, + .get_link = ethtool_op_get_link, + .get_link = ethtool_op_get_link, + .get_strings = etile_gstrings, + .get_sset_count = etile_sset_count, + .get_ethtool_stats = etile_fill_stats, + .get_msglevel = etile_get_msglevel, + .set_msglevel = etile_set_msglevel, + .get_pauseparam = etile_get_pauseparam, + .set_pauseparam = etile_set_pauseparam, + .get_ts_info = etile_get_ts_info, + .get_link_ksettings = etile_get_link_ksettings, + .set_link_ksettings = etile_set_link_ksettings, + .get_fecparam = etile_get_fec_param, + .set_fecparam = etile_set_fec_param, + +}; + +void intel_fpga_etile_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &etile_ethtool_ops; +} diff --git a/drivers/net/ethernet/altera/intel_fpga_etile_fec.c b/drivers/net/ethernet/altera/intel_fpga_etile_fec.c new file mode 100644 index 0000000000000..02e16903cd1d9 --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_etile_fec.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Intel FPGA E-tile Forward Error Correction (FEC) Linux driver + * Copyright (C) 2020-2022 Intel Corporation. All rights reserved. + * + * Contributors: + * Joyce Ooi + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "altera_eth_dma.h" +#include "altera_utils.h" +#include "intel_fpga_etile.h" + +#define MAX_COUNT_OFFSET 64000 + +/* Init FEC */ +int fec_init(struct platform_device *pdev, struct intel_fpga_etile_eth_private *priv) +{ + int ret; + + /* get FEC type from device tree */ + ret = of_property_read_string(pdev->dev.of_node, "fec-type", + &priv->fec_type); + if (ret < 0) { + dev_err(&pdev->dev, "cannot obtain fec-type\n"); + return ret; + } + dev_info(&pdev->dev, "\tFEC type is %s\n", priv->fec_type); + + /* get FEC channel from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "fec-cw-pos-rx", + &priv->rsfec_cw_pos_rx)) { + dev_err(&pdev->dev, "cannot obtain fec codeword bit position!\n"); + return -ENXIO; + } + dev_info(&pdev->dev, "\trsfec rx codeword bit position is 0x%x\n", + priv->rsfec_cw_pos_rx); + + return 0; +} + +/* Calculate Unit Interval Adjustments */ +void ui_adjustments(struct timer_list *t) +{ + struct intel_fpga_etile_eth_private *priv = from_timer(priv, t, fec_timer); + u32 tx_tam_l_initial, tx_tam_h_initial, tx_tam_count_initial; + u32 rx_tam_l_initial, rx_tam_h_initial, rx_tam_count_initial; + u32 tx_tam_l_nth, tx_tam_h_nth, tx_tam_count_nth; + u32 rx_tam_l_nth, rx_tam_h_nth, rx_tam_count_nth; + u64 tx_tam_initial, rx_tam_initial, tx_tam_nth, rx_tam_nth; + u32 tx_tam_interval = 0, rx_tam_interval = 0; + u32 tx_tam_count_est = 0, rx_tam_count_est = 0, ui_value, tx_tam_count, rx_tam_count; + u64 tx_tam_delta, rx_tam_delta; + u64 tx_ui = 0, rx_ui = 0; + u64 start_jiffies; + u32 ui_value_16bit_fns = 0; + + start_jiffies = get_jiffies_64(); + /* Set tam_snapshot to 1 to take the first snapshot of the Time of + * Alignment marker (TAM) + */ + tse_set_bit(priv->mac_dev, eth_ptp_csroffs(tam_snapshot), + ETH_TAM_SNAPSHOT); + + /* Read snapshotted initial TX TAM and counter values */ + tx_tam_l_initial = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_tam_l)); + tx_tam_h_initial = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_tam_h)); + tx_tam_initial = ((u64)tx_tam_h_initial << 32) | tx_tam_l_initial; + tx_tam_count_initial = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_count)); + + /* Read snapshotted initial RX TAM and counter values */ + rx_tam_l_initial = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_tam_l)); + rx_tam_h_initial = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_tam_h)); + rx_tam_initial = ((u64)rx_tam_h_initial << 32) | rx_tam_l_initial; + rx_tam_count_initial = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_count)); + + /* Clear snapshot */ + tse_clear_bit(priv->mac_dev, eth_ptp_csroffs(tam_snapshot), + ETH_TAM_SNAPSHOT); + + /* Wait for a few TAM interval */ + udelay(5300); + + /* Request snapshot of Nth TX TAM and RX TAM */ + tse_set_bit(priv->mac_dev, eth_ptp_csroffs(tam_snapshot), + ETH_TAM_SNAPSHOT); + + /* Read snapshotted of Nth TX TAM and counter values */ + tx_tam_l_nth = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_tam_l)); + tx_tam_h_nth = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_tam_h)); + tx_tam_nth = ((u64)tx_tam_h_nth << 32) | tx_tam_l_nth; + tx_tam_count_nth = csrrd32(priv->mac_dev, eth_ptp_csroffs(tx_count)); + + /* Read snapshotted of Nth RX TAM and counter values */ + rx_tam_l_nth = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_tam_l)); + rx_tam_h_nth = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_tam_h)); + rx_tam_nth = ((u64)rx_tam_h_nth << 32) | rx_tam_l_nth; + rx_tam_count_nth = csrrd32(priv->mac_dev, eth_ptp_csroffs(rx_count)); + + /* Clear snapshot */ + tse_clear_bit(priv->mac_dev, eth_ptp_csroffs(tam_snapshot), + ETH_TAM_SNAPSHOT); + if ((get_jiffies_64() - start_jiffies) > HZ) { + netdev_warn(priv->dev, + "%s:1st to Nth snapshot takes more than 1 second\n", + __func__); + goto ui_restart; + } + + /* Calculate new UI value */ + /* Reference Time (TAM) interval = AM interval * Unit interval of serial bit + * AM interval for No FEC for 10/25GbE: TX = 5406720, RX = 6336 + * AM interval for KR-FEC for 25GbE: TX = 5406720, RX = 5406720 + * Unit interval of serial bit = 0.0387878 nanoseconds + */ + if (!strcasecmp(priv->fec_type, "kr-fec")) { + tx_tam_interval = 5406720; + rx_tam_interval = 5406720; + } else if (!strcasecmp(priv->fec_type, "no-fec")) { + tx_tam_interval = 5406720; + rx_tam_interval = 6336; + } + + /* Calculate time elapsed */ + if (tx_tam_nth <= tx_tam_initial) + tx_tam_delta = (tx_tam_nth + (int_pow(10, 9) << 16)) - tx_tam_initial; + else + tx_tam_delta = tx_tam_nth - tx_tam_initial; + + if (rx_tam_nth <= rx_tam_initial) + rx_tam_delta = (rx_tam_nth + (int_pow(10, 9) << 16)) - rx_tam_initial; + else + rx_tam_delta = rx_tam_nth - rx_tam_initial; + + if (dr_link_state == 1) { + switch (priv->link_speed) { + case SPEED_10000: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_10G; + ui_value_16bit_fns = ui_value >> 8; + break; + case SPEED_25000: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_25G; + ui_value_16bit_fns = ui_value >> 8; + break; + default: + ui_value = 0; + } + } else { + switch (priv->phy_iface) { + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_10GBASER: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_10G; + ui_value_16bit_fns = ui_value >> 8; + break; + case PHY_INTERFACE_MODE_25GBASER: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_25G; + ui_value_16bit_fns = ui_value >> 8; + break; + default: + ui_value = 0; + } + } + + /* Calculate estimated count value */ + if (ui_value > 0) { + if (tx_tam_interval > 0) + tx_tam_count_est = tx_tam_delta / + (tx_tam_interval * ui_value_16bit_fns); + + if (rx_tam_interval > 0) + rx_tam_count_est = rx_tam_delta / + (rx_tam_interval * ui_value_16bit_fns); + } + + /* if estimated count value is more than 64000 (max count value with + * offset), discard the snapshot and repeat steps + */ + if (tx_tam_count_est > MAX_COUNT_OFFSET) { + netdev_warn(priv->dev, + "Est tx count exceeded: %u = %llu / (%u * 0x%x)", + tx_tam_count_est, tx_tam_delta, tx_tam_interval, + ui_value_16bit_fns); + netdev_warn(priv->dev, "tx_tam_nth: %llu, tx_tam_initial: %llu\n", + tx_tam_nth, tx_tam_initial); + goto ui_restart; + } + + if (rx_tam_count_est > MAX_COUNT_OFFSET) { + netdev_warn(priv->dev, + "Est rx count exceeded:%u = %llu / (%u * 0x%x)", + rx_tam_count_est, rx_tam_delta, rx_tam_interval, + ui_value_16bit_fns); + netdev_warn(priv->dev, "rx_tam_nth: %llu, rx_tam_initial: %llu\n", + rx_tam_nth, rx_tam_initial); + goto ui_restart; + } + + /* Calculate TAM count value */ + if (tx_tam_count_nth <= tx_tam_count_initial) + tx_tam_count = (tx_tam_count_nth + int_pow(2, 16)) - tx_tam_count_initial; + else + tx_tam_count = tx_tam_count_nth - tx_tam_count_initial; + + if (rx_tam_count_nth <= rx_tam_count_initial) + rx_tam_count = (rx_tam_count_nth + int_pow(2, 16)) - rx_tam_count_initial; + else + rx_tam_count = rx_tam_count_nth - rx_tam_count_initial; + + /* Calculate UI value */ + if (tx_tam_count > 0 && tx_tam_interval > 0) + tx_ui = (tx_tam_delta * int_pow(2, 8)) / (tx_tam_count * tx_tam_interval); + + if (rx_tam_count > 0 && rx_tam_interval > 0) + rx_ui = (rx_tam_delta * int_pow(2, 8)) / (rx_tam_count * rx_tam_interval); + + /* UI Adjustment for 25G kr-fec */ + if (priv->link_speed == SPEED_25000) { + if (tx_ui > 0x9EE42 || tx_ui < 0x9EDC0) { + netdev_warn(priv->dev, + "%s:TX UI value(0x%llx) for 25G is not within range", + __func__, tx_ui); + goto ui_restart; + } + if (rx_ui > 0x9EE42 || rx_ui < 0x9EDC0) { + netdev_warn(priv->dev, + "%s:RX UI value(0x%llx) for 25G is not within range", + __func__, rx_ui); + goto ui_restart; + } + + } else { + if (tx_ui > 0x18D3A4 || tx_ui < 0x18D25F) { + netdev_warn(priv->dev, + "%s:TX UI value (0x%llx) for 10G is not within range", + __func__, tx_ui); + goto ui_restart; + } + + if (rx_ui > 0x18D3A4 || rx_ui < 0x18D25F) { + netdev_warn(priv->dev, + "%s:RX UI value (0x%llx) for 10G is not within range", + __func__, rx_ui); + goto ui_restart; + } + } + csrwr32(tx_ui, priv->mac_dev, eth_ptp_csroffs(tx_ui_reg)); + csrwr32(rx_ui, priv->mac_dev, eth_ptp_csroffs(rx_ui_reg)); + +ui_restart: + mod_timer(&priv->fec_timer, jiffies + msecs_to_jiffies(1000)); +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/altera/intel_fpga_etile_main.c b/drivers/net/ethernet/altera/intel_fpga_etile_main.c new file mode 100644 index 0000000000000..3a752098a79df --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_etile_main.c @@ -0,0 +1,3282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Intel FPGA E-tile Ethernet MAC driver + * Copyright (C) 2020-2022 Intel Corporation. All rights reserved + * + * Contributors: + * Roman Bulgakov + * Yu Ying Choov + * Dalon Westergreen + * Joyce Ooi + * Arzu Ozdogan-tackin + * + * Original driver contributed by GlobalLogic. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "altera_eth_dma.h" +#include "altera_msgdma.h" +#include "altera_msgdma_prefetcher.h" +#include "altera_sgdma.h" +#include "altera_utils.h" +#include "intel_fpga_tod.h" +#include "intel_fpga_etile.h" + +struct qsfp *qsfp_tmp; +const char *phy_mode_etile; + +/* Module parameters */ +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +bool pma_enable; + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +#define RX_DESCRIPTORS 512 +static int dma_rx_num = RX_DESCRIPTORS; +module_param(dma_rx_num, int, 0644); +MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); + +#define TX_DESCRIPTORS 512 +static int dma_tx_num = TX_DESCRIPTORS; +module_param(dma_tx_num, int, 0644); +MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); + +static int flow_ctrl = FLOW_OFF; +module_param(flow_ctrl, int, 0644); +MODULE_PARM_DESC(flow_ctrl, "Flow control (0: off, 1: rx, 2: tx, 3: on)"); + +static int pause = MAC_PAUSEFRAME_QUANTA; +module_param(pause, int, 0644); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +/* Make sure DMA buffer size is larger than the max frame size + * plus some alignment offset and a VLAN header. If the max frame size is + * 1518, a VLAN header would be additional 4 bytes and additional + * headroom for alignment is 2 bytes, 2048 is just fine. + */ +#define INTEL_FPGA_RXDMABUFFER_SIZE 2048 +#define INTEL_FPGA_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) + +static const struct of_device_id intel_fpga_etile_ll_ids[]; + +/* Allow network stack to resume queueing packets after we've + * finished transmitting at least 1/4 of the packets in the queue. + */ +#define ETH_TX_THRESH(x) ((x)->dma_priv.tx_ring_size / 4) + +#define TXQUEUESTOP_THRESHOLD 2 + +static inline u32 etile_tx_avail(struct intel_fpga_etile_eth_private *priv) +{ + return priv->dma_priv.tx_cons + priv->dma_priv.tx_ring_size + - priv->dma_priv.tx_prod - 1; +} + +static int etile_init_rx_buffer(struct intel_fpga_etile_eth_private *priv, + struct altera_dma_buffer *rxbuffer, int len) +{ + rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); + if (!rxbuffer->skb) + return -ENOMEM; + + rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, + len, DMA_FROM_DEVICE); + + if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(rxbuffer->skb); + return -EINVAL; + } + rxbuffer->dma_addr &= (dma_addr_t)~3; + rxbuffer->len = len; + + return 0; +} + +static void etile_free_rx_buffer(struct intel_fpga_etile_eth_private *priv, + struct altera_dma_buffer *rxbuffer) +{ + struct sk_buff *skb = rxbuffer->skb; + dma_addr_t dma_addr = rxbuffer->dma_addr; + + if (skb) { + if (dma_addr) + dma_unmap_single(priv->device, dma_addr, + rxbuffer->len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + rxbuffer->skb = NULL; + rxbuffer->dma_addr = 0; + } +} + +/* Unmap and free Tx buffer resources + */ +static void etile_free_tx_buffer(struct intel_fpga_etile_eth_private *priv, + struct altera_dma_buffer *buffer) +{ + if (buffer->dma_addr) { + if (buffer->mapped_as_page) + dma_unmap_page(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + buffer->dma_addr = 0; + } + if (buffer->skb) { + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } +} + +static int etile_alloc_init_skbufs(struct intel_fpga_etile_eth_private *priv) +{ + unsigned int rx_descs = priv->dma_priv.rx_ring_size; + unsigned int tx_descs = priv->dma_priv.tx_ring_size; + int ret = -ENOMEM; + int i; + + /* Create Rx ring buffer */ + priv->dma_priv.rx_ring = kcalloc(rx_descs, + sizeof(struct altera_dma_private), + GFP_KERNEL); + if (!priv->dma_priv.rx_ring) + goto err_rx_ring; + + /* Create Tx ring buffer */ + priv->dma_priv.tx_ring = kcalloc(tx_descs, + sizeof(struct altera_dma_private), + GFP_KERNEL); + if (!priv->dma_priv.tx_ring) + goto err_tx_ring; + + priv->dma_priv.tx_cons = 0; + priv->dma_priv.tx_prod = 0; + + /* Init Rx FIFO */ + csrwr32(priv->rx_fifo_almost_full, priv->rx_fifo, + rx_fifo_csroffs(almost_full_threshold)); + csrwr32(priv->rx_fifo_almost_empty, priv->rx_fifo, + rx_fifo_csroffs(almost_empty_threshold)); + + /* Init Rx ring */ + for (i = 0; i < rx_descs; i++) { + ret = etile_init_rx_buffer(priv, &priv->dma_priv.rx_ring[i], + priv->dma_priv.rx_dma_buf_sz); + if (ret) + goto err_init_rx_buffers; + } + + priv->dma_priv.rx_cons = 0; + priv->dma_priv.rx_prod = 0; + + return 0; +err_init_rx_buffers: + while (--i >= 0) + etile_free_rx_buffer(priv, &priv->dma_priv.rx_ring[i]); + kfree(priv->dma_priv.tx_ring); +err_tx_ring: + kfree(priv->dma_priv.rx_ring); +err_rx_ring: + return ret; +} + +static void etile_free_skbufs(struct net_device *dev) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + unsigned int rx_descs = priv->dma_priv.rx_ring_size; + unsigned int tx_descs = priv->dma_priv.tx_ring_size; + int i; + + /* Release the DMA TX/RX socket buffers */ + for (i = 0; i < rx_descs; i++) + etile_free_rx_buffer(priv, &priv->dma_priv.rx_ring[i]); + for (i = 0; i < tx_descs; i++) + etile_free_tx_buffer(priv, &priv->dma_priv.tx_ring[i]); +} + +/* Reallocate the skb for the reception process + */ +static inline void etile_rx_refill(struct intel_fpga_etile_eth_private *priv) +{ + unsigned int rxsize = priv->dma_priv.rx_ring_size; + unsigned int entry; + int ret; + + for (; priv->dma_priv.rx_cons - priv->dma_priv.rx_prod > 0; + priv->dma_priv.rx_prod++) { + entry = priv->dma_priv.rx_prod % rxsize; + if (likely(!priv->dma_priv.rx_ring[entry].skb)) { + ret = etile_init_rx_buffer(priv, + &priv->dma_priv.rx_ring[entry], + priv->dma_priv.rx_dma_buf_sz); + if (unlikely(ret != 0)) + break; + priv->dmaops->add_rx_desc(&priv->dma_priv, + &priv->dma_priv.rx_ring[entry]); + } + } +} + +/* Pull out the VLAN tag and fix up the packet + */ +static inline void etile_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + !__vlan_get_tag(skb, &vid)) { + eth_hdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/* Receive a packet: retrieve and pass over to upper levels + */ +static int etile_rx(struct intel_fpga_etile_eth_private *priv, int limit) +{ + unsigned int count = 0; + unsigned int next_entry; + struct sk_buff *skb; + unsigned int entry = + priv->dma_priv.rx_cons % priv->dma_priv.rx_ring_size; + u32 rxstatus; + u16 pktlength; + u16 pktstatus; + + while ((count < limit) && + ((rxstatus = priv->dmaops->get_rx_status(&priv->dma_priv)) + != 0)) { + pktstatus = rxstatus >> 16; + pktlength = rxstatus & 0xffff; + + if ((pktstatus & 0xff) || pktlength == 0) + netdev_err(priv->dev, + "RCV pktstatus %08X pktlength %08X\n", + pktstatus, pktlength); + + /* DMA transfer from TSE starts with 2 additional bytes for + * IP payload alignment. Status returned by get_rx_status() + * contains DMA transfer length. Packet is 2 bytes shorter. + */ + /* pktlength -= 2;*/ + + count++; + next_entry = (++priv->dma_priv.rx_cons) + % priv->dma_priv.rx_ring_size; + + skb = priv->dma_priv.rx_ring[entry].skb; + if (unlikely(!skb)) { + netdev_err(priv->dev, + "%s: Inconsistent Rx descriptor chain\n", + __func__); + priv->dev->stats.rx_dropped++; + break; + } + priv->dma_priv.rx_ring[entry].skb = NULL; + skb_put(skb, pktlength); + + /* make cache consistent with receive packet buffer */ + dma_sync_single_for_cpu(priv->device, + priv->dma_priv.rx_ring[entry].dma_addr, + priv->dma_priv.rx_ring[entry].len, + DMA_FROM_DEVICE); + + dma_unmap_single(priv->device, + priv->dma_priv.rx_ring[entry].dma_addr, + priv->dma_priv.rx_ring[entry].len, + DMA_FROM_DEVICE); + + if (netif_msg_pktdata(priv)) { + netdev_info(priv->dev, "frame received %d bytes\n", + pktlength); + print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, pktlength, true); + } + + etile_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + skb_checksum_none_assert(skb); + napi_gro_receive(&priv->napi, skb); + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += pktlength; + entry = next_entry; + etile_rx_refill(priv); + } + + return count; +} + +/* Reclaim resources after transmission completes + */ +static int etile_tx_complete(struct intel_fpga_etile_eth_private *priv) +{ + unsigned int txsize = priv->dma_priv.tx_ring_size; + u32 ready; + unsigned int entry; + struct altera_dma_buffer *tx_buff; + int txcomplete = 0; + + spin_lock(&priv->tx_lock); + ready = priv->dmaops->tx_completions(&priv->dma_priv); + + /* Free sent buffers */ + while (ready && (priv->dma_priv.tx_cons != priv->dma_priv.tx_prod)) { + entry = priv->dma_priv.tx_cons % txsize; + tx_buff = &priv->dma_priv.tx_ring[entry]; + + if (likely(tx_buff->skb)) + priv->dev->stats.tx_packets++; + + if (netif_msg_tx_done(priv)) + netdev_info(priv->dev, "%s: curr %d, dirty %d\n", + __func__, priv->dma_priv.tx_prod, + priv->dma_priv.tx_cons); + + etile_free_tx_buffer(priv, tx_buff); + priv->dma_priv.tx_cons++; + + txcomplete++; + ready--; + } + + if (unlikely(netif_queue_stopped(priv->dev) && + etile_tx_avail(priv) > ETH_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_msg_tx_done(priv)) + netdev_info(priv->dev, "%s: restart transmit\n", + __func__); + netif_wake_queue(priv->dev); + netif_tx_unlock(priv->dev); + } + + spin_unlock(&priv->tx_lock); + + return txcomplete; +} + +/* NAPI polling function + */ +static int etile_poll(struct napi_struct *napi, int budget) +{ + struct intel_fpga_etile_eth_private *priv = + container_of(napi, struct intel_fpga_etile_eth_private, napi); + int rxcomplete = 0; + unsigned long flags; + + etile_tx_complete(priv); + + rxcomplete = etile_rx(priv, budget); + + if (rxcomplete < budget) { + napi_complete_done(napi, rxcomplete); + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + rxcomplete, budget); + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(&priv->dma_priv); + priv->dmaops->enable_txirq(&priv->dma_priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + } + + return rxcomplete; +} + +/* DMA TX & RX FIFO interrupt routing + */ +static irqreturn_t intel_fpga_etile_isr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct intel_fpga_etile_eth_private *priv; + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + priv = netdev_priv(dev); + + if (unlikely(netif_msg_intr(priv))) + netdev_info(dev, "Got TX/RX Interrupt"); + + spin_lock(&priv->rxdma_irq_lock); + /* reset IRQs */ + priv->dmaops->clear_rxirq(&priv->dma_priv); + priv->dmaops->clear_txirq(&priv->dma_priv); + spin_unlock(&priv->rxdma_irq_lock); + + if (likely(napi_schedule_prep(&priv->napi))) { + spin_lock(&priv->rxdma_irq_lock); + priv->dmaops->disable_rxirq(&priv->dma_priv); + priv->dmaops->disable_txirq(&priv->dma_priv); + spin_unlock(&priv->rxdma_irq_lock); + __napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +/* Transmit a packet (called by the kernel). Dispatches + * either the SGDMA method for transmitting or the + * MSGDMA method, assumes no scatter/gather support, + * implying an assumption that there's only one + * physically contiguous fragment starting at + * skb->data, for length of skb_headlen(skb). + */ +static netdev_tx_t etile_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_priv.tx_ring_size; + unsigned int entry; + struct altera_dma_buffer *buffer = NULL; + int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int nopaged_len = skb_headlen(skb); + enum netdev_tx ret = NETDEV_TX_OK; + dma_addr_t dma_addr; + + spin_lock_bh(&priv->tx_lock); + + if (unlikely(etile_tx_avail(priv) < nfrags + 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx list full when queue awake\n", + __func__); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + if (unlikely(netif_msg_tx_queued(priv))) { + netdev_info(dev, "sending 0x%p, len=%d\n", skb, skb->len); + if (netif_msg_pktdata(priv)) + print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); + } + + /* Map the first skb fragment */ + entry = priv->dma_priv.tx_prod % txsize; + buffer = &priv->dma_priv.tx_ring[entry]; + + dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + ret = -EINVAL; + goto out; + } + + buffer->skb = skb; + buffer->dma_addr = dma_addr; + buffer->len = nopaged_len; + + /* Push data out of the cache hierarchy into main memory */ + dma_sync_single_for_device(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + + priv->dmaops->tx_buffer(&priv->dma_priv, buffer); + + /* Provide a hardware time stamp if requested. + */ + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->dma_priv.hwts_tx_en)) + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* Provide a software time stamp if requested and hardware timestamping + * is not possible (SKBTX_IN_PROGRESS not set). + */ + if (!priv->dma_priv.hwts_tx_en) + skb_tx_timestamp(skb); + + priv->dma_priv.tx_prod++; + dev->stats.tx_bytes += skb->len; + + if (unlikely(etile_tx_avail(priv) <= TXQUEUESTOP_THRESHOLD)) { + if (netif_msg_hw(priv)) + netdev_info(priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_stop_queue(dev); + } + +out: + spin_unlock_bh(&priv->tx_lock); + + return ret; +} + +int etile_check_counter_complete(void __iomem *ioaddr, + size_t offs, u32 bit_mask, bool set_bit, + int align) +{ + int counter; + + counter = 0; + switch (align) { + case 8: /* byte aligned */ + while (counter++ < INTEL_FPGA_ETILE_SW_RESET_WATCHDOG_CNTR) { + if (set_bit) { + if (csrrd8(ioaddr, offs) & bit_mask) + break; + } else { + if ((csrrd8(ioaddr, offs) & bit_mask) == 0) + break; + } + udelay(1); + } + if (counter >= INTEL_FPGA_ETILE_SW_RESET_WATCHDOG_CNTR) { + if (set_bit) { + if ((csrrd8(ioaddr, offs) & bit_mask) == 0) + return -EINVAL; + } else { + if (csrrd8(ioaddr, offs) & bit_mask) + return -EINVAL; + } + } + break; + default: /* default is word aligned */ + while (counter++ < INTEL_FPGA_ETILE_SW_RESET_WATCHDOG_CNTR) { + if (set_bit) { + if (tse_bit_is_set(ioaddr, + offs, bit_mask)) + break; + } else { + if (tse_bit_is_clear(ioaddr, + offs, bit_mask)) + break; + } + udelay(1); + } + if (counter >= INTEL_FPGA_ETILE_SW_RESET_WATCHDOG_CNTR) { + if (set_bit) { + if (tse_bit_is_clear(ioaddr, + offs, bit_mask)) + return -EINVAL; + } else { + if (tse_bit_is_set(ioaddr, + offs, bit_mask)) + return -EINVAL; + } + } + break; + } + return 0; +} + +static void etile_set_mac(struct intel_fpga_etile_eth_private *priv, bool enable) +{ + if (enable) { + /* Enable Rx and Tx datapath */ + tse_clear_bit(priv->mac_dev, + eth_tx_mac_csroffs(tx_mac_conf), + ETH_TX_MAC_DISABLE_TXVMAC); + tse_clear_bit(priv->mac_dev, eth_rx_mac_csroffs(rx_mac_frwd_rx_crc), + ETH_RX_MAC_CRC_FORWARD); + } else { + /* Disable Rx and Tx datapath */ + tse_set_bit(priv->mac_dev, + eth_tx_mac_csroffs(tx_mac_conf), + ETH_TX_MAC_DISABLE_TXVMAC); + tse_clear_bit(priv->mac_dev, + eth_tx_mac_csroffs(tx_mac_conf), + ETH_TX_MAC_DISABLE_S_ADDR_EN); + netif_warn(priv, drv, priv->dev, "Stop done\n"); + } +} + +/* Change the MTU + */ +static int etile_change_mtu(struct net_device *dev, int new_mtu) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + unsigned int max_mtu = priv->dev->max_mtu; + unsigned int min_mtu = priv->dev->min_mtu; + + if (netif_running(dev)) { + netdev_err(dev, "must be stopped to change its MTU\n"); + return -EBUSY; + } + + if (new_mtu < min_mtu || new_mtu > max_mtu) { + netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static void etile_update_mac_addr(struct intel_fpga_etile_eth_private *priv, + const unsigned char *addr) +{ + u32 msb; + u32 lsb; + + lsb = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; + msb = ((addr[0] << 8) | addr[1]) & 0xffff; + /* Set primary MAC address */ + csrwr32(lsb, priv->mac_dev, eth_tx_mac_csroffs(tx_mac_source_addr_lower_bytes)); + csrwr32(msb, priv->mac_dev, eth_tx_mac_csroffs(tx_mac_source_addr_higher_bytes)); + + tse_set_bit(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_conf), + ETH_TX_MAC_DISABLE_S_ADDR_EN); +} + +static void etile_set_mac_flow_ctrl(struct intel_fpga_etile_eth_private *priv) +{ + u32 reg; + + if (priv->flow_ctrl & FLOW_RX) + tse_set_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(rx_flow_control_feature_cfg), + ETH_RX_EN_STD_FLOW_CTRL); + else + tse_clear_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(rx_flow_control_feature_cfg), + ETH_RX_EN_STD_FLOW_CTRL); + + reg = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(rx_flow_control_feature_cfg)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "E-tile rx_flow_ctrl: 0x%08x\n", reg); + + if (priv->flow_ctrl & FLOW_TX) { + tse_set_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(tx_flow_control_feature_cfg), + ETH_TX_EN_PRIORITY_FLOW_CTRL); + } else { + tse_clear_bit(priv->mac_dev, + eth_pause_and_priority_csroffs(tx_flow_control_feature_cfg), + ETH_TX_EN_PRIORITY_FLOW_CTRL); + } + + reg = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(tx_flow_control_feature_cfg)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "E-tile tx_flow_ctrl: 0x%08x\n", reg); + + csrwr32(priv->pause, priv->mac_dev, + eth_pause_and_priority_csroffs(pause_quanta_0)); + + reg = csrrd32(priv->mac_dev, + eth_pause_and_priority_csroffs(pause_quanta_0)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "E-tile: pause_quanta0: 0x%08x\n", reg); + + if (dr_link_state == 1) + netdev_info(priv->dev, "E-tile with %d/%s\n", priv->link_speed, + priv->fec_type); +} + +static void etile_clear_mac_statistics(struct intel_fpga_etile_eth_private *priv) +{ + /* Clear all statistics counters for the receive and transmit path */ + tse_set_bit(priv->mac_dev, eth_tx_stats_csroffs(tx_cntr_config), + ETH_TX_CNTR_CFG_RST_ALL); + tse_clear_bit(priv->mac_dev, eth_tx_stats_csroffs(tx_cntr_config), + ETH_TX_CNTR_CFG_RST_ALL); + tse_set_bit(priv->mac_dev, eth_rx_stats_csroffs(rx_cntr_config), + ETH_RX_CNTR_CFG_RST_ALL); + tse_clear_bit(priv->mac_dev, eth_rx_stats_csroffs(rx_cntr_config), + ETH_RX_CNTR_CFG_RST_ALL); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void etile_set_rx_mode(struct net_device *dev) +{ + /* Not Supported */ +} + +static int eth_etile_tx_rx_user_flow(struct intel_fpga_etile_eth_private *priv) +{ + u32 tx_pma_delay_ns = 0; + u32 tx_extra_latency = 0; + u32 rx_fec_cw_pos = 0; + u32 rx_spulse_offset = 0; + u32 rx_pma_delay_ns = 0; + u32 rx_extra_latency = 0; + u32 ui_value; + u8 rx_bitslip_cnt = 0; + u8 rx_fec_cw_pos_b0 = 0; + u8 rx_fec_cw_pos_b8 = 0; + + int ret; + const char *kr_fec = "kr-fec"; + + if (dr_link_state == 1) { + switch (priv->link_speed) { + case SPEED_10000: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_10G; + break; + case SPEED_25000: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_25G; + break; + default: + return -ENODEV; + } + } else { + switch (priv->phy_iface) { + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_10GBASER: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_10G; + break; + case PHY_INTERFACE_MODE_25GBASER: + ui_value = INTEL_FPGA_ETILE_UI_VALUE_25G; + break; + default: + return -ENODEV; + } + } + + /* TX User Flow */ + /* Step 1 After power up or reset, wait until TX data path is up */ + if (etile_check_counter_complete(priv->mac_dev, + eth_phy_csroffs(phy_tx_datapath_ready), + ETH_PHY_TX_PCS_READY, true, + INTEL_FPGA_WORD_ALIGN)) { + netdev_err(priv->dev, "MAC Tx datapath not ready\n"); + return -EINVAL; + } + + /* Step 2 Calculate TX extra latency */ + /* Convert unit of TX PMA delay from UI to nanoseconds */ + tx_pma_delay_ns = INTEL_FPGA_TX_PMA_DELAY_25G * ui_value; + + /* Get Tx external PHY delay from vendor and add in device tree + * and total up all extra latency together + */ + tx_extra_latency = (tx_pma_delay_ns + priv->tx_external_phy_delay_ns) >> 8; + + /* Step 3 Write TX extra latency*/ + csrwr32(tx_extra_latency, priv->mac_dev, eth_ptp_csroffs(tx_ptp_extra_latency)); + // wait until TX PTP is ready -> o_sl_tx_ptp_ready = 1'b1 + // TX PTP is up + // Adjust TX UI + + /* RX User Flow */ + /* Step 1 After power up or reset, wait until RX data path is up */ + if (etile_check_counter_complete(priv->mac_dev, + eth_phy_csroffs(phy_pcs_stat_anlt), + ETH_PHY_RX_PCS_ALIGNED, true, + INTEL_FPGA_WORD_ALIGN)) { + netdev_err(priv->dev, "MAC Rx datapath not ready\n"); + return -EINVAL; + } + + /* Check for 25G FEC variants */ + if (priv->link_speed == SPEED_25000 && (!strcasecmp(kr_fec, priv->fec_type))) { + /* Step 2a Read RX FEC codeword position */ + switch (priv->rsfec_cw_pos_rx) { + case 0: + rx_fec_cw_pos_b0 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_0_b0)); + rx_fec_cw_pos_b8 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_0_b8)); + break; + case 1: + rx_fec_cw_pos_b0 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_1_b0)); + rx_fec_cw_pos_b8 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_1_b8)); + break; + case 2: + rx_fec_cw_pos_b0 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_2_b0)); + rx_fec_cw_pos_b8 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_2_b8)); + break; + case 3: + default: + rx_fec_cw_pos_b0 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_3_b0)); + rx_fec_cw_pos_b8 = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_cw_pos_rx_3_b8)); + break; + } + + rx_fec_cw_pos = (rx_fec_cw_pos_b8 << 8) | rx_fec_cw_pos_b0; + + /* Step 3 Determine sync pulse (Alignment Marker) + * offsets with reference to async pulse + */ + rx_spulse_offset = (rx_fec_cw_pos * ui_value); + + netdev_info(priv->dev, "Rx FEC lane:%d codeword pos:%d ui value:0x%x\n", + priv->rsfec_cw_pos_rx, rx_fec_cw_pos, ui_value); + + /* Step 4 Calculate RX Extra latency and total up extra latency together */ + rx_pma_delay_ns = (INTEL_FPGA_RX_PMA_DELAY_25G * ui_value); + rx_extra_latency = ((rx_pma_delay_ns + priv->rx_external_phy_delay_ns - + rx_spulse_offset) >> 8) | 0x80000000; + } else { + /* Step 2b Read bitslip count from IP */ + rx_bitslip_cnt = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_028)); + + /* Step 3 Determine sync pulse (Alignment Marker) + * offsets with reference to async pulse + */ + if (rx_bitslip_cnt > 62) { + rx_spulse_offset = (rx_bitslip_cnt - 66) * ui_value; + if (rx_bitslip_cnt > 62 && rx_bitslip_cnt <= 66) { + netdev_warn(priv->dev, + "rx_blitslip_cnt value :%d is incorrect!\n", + rx_bitslip_cnt); + } + } else { + rx_spulse_offset = (rx_bitslip_cnt * ui_value); + } + + netdev_info(priv->dev, "Rx bitslip cnt:%d ui value:%x\n", + rx_bitslip_cnt, ui_value); + + /* Step 4 Calculate RX Extra latency and total up extra latency together */ + rx_pma_delay_ns = (INTEL_FPGA_RX_PMA_DELAY_25G * ui_value); + rx_extra_latency = ((rx_pma_delay_ns + rx_spulse_offset + + priv->rx_external_phy_delay_ns) >> 8) | 0x80000000; + } + + /* Step 5 Write RX extra Latency */ + csrwr32(rx_extra_latency, priv->mac_dev, eth_ptp_csroffs(rx_ptp_extra_latency)); + + netdev_info(priv->dev, "tx_extra_latency:0x%x , rx_extra_latency:0x%x\n", + tx_extra_latency, rx_extra_latency); + + /* Adjust UI value */ + timer_setup(&priv->fec_timer, ui_adjustments, 0); + ret = mod_timer(&priv->fec_timer, jiffies + msecs_to_jiffies(5000)); + if (ret) + netdev_err(priv->dev, "Timer failed to start UI adjustment\n"); + + return 0; +} + +static void etile_rsfec_reconfiguration(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + u8 fec_lane_ena = 0; + u8 core_tx_in_sel = 0; + u8 core_rx_out_sel = 0; + + /* Step 4b - Reconfigure RSFEC Reconfiguration Registers + * rsfec_top_clk_cfg: Lane enable. One bit per lane + * rsfec_top_tx_cfg: Select rsfec tx for Lane # + * 3b'001 : Select EHIP Lane TX Data + * 3b'110 : FEC Lane Disabled - tie inputs to 0 + * Lane #3: Bit: [14:12] + * Lane #2: Bit: [10:8] + * rsfec_top_rx_cfg: Select rsfec rx for Lane # + * Offset Bit 25Gptpfec-10Gptp 10Gptp-25Gptpfec + * 0x4 11:8 4'b0xxx 4'b1xxx + * 0x10 [XX:XX] 3b'110 3b'001 + * + * 0x10 10:8 + */ + /** + *1 RSFEC Lane enable. One bit per lane i.e., bit0 = lane0. + * This design uses RSFEC lane 3. + *2 RS-FEC TX Select for Lane 3 + *3 RS-FEC RX Output Select for Lane 3 + **/ + + /*25gptpfec-25gptpnofec and 25gptpfec – 10gptp*/ + + if ((priv->link_speed == SPEED_25000 || priv->link_speed == SPEED_10000) && + (!strcmp(priv->fec_type, "no-fec"))) { + switch (priv->rsfec_cw_pos_rx) { + case 0: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /* 0xE = enable lane 0 , bit[0] = lane0 */ + fec_lane_ena &= 0xE; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_tx_in_sel &= 0xF0; + /*make sure do not disturbed the RS-FEC TX Select For Lane1*/ + core_tx_in_sel |= 0x1; + /*setting bit 2:0 to b01 RS-FEC TX Select For Lane0*/ + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + core_rx_out_sel &= 0xF0; + core_rx_out_sel |= 0x1; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + break; + case 1: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /*0xE = enable lane 1 , bit[1] = lane1*/ + fec_lane_ena &= 0xD; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_tx_in_sel &= 0xF; + core_tx_in_sel |= (0x1 << 4); + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + core_rx_out_sel &= 0xF0; + core_rx_out_sel |= 0x1; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + break; + case 2: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /*0xE = enable lane 1 , bit[2] = 0, lane2*/ + fec_lane_ena &= 0xB; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_tx_in_sel &= 0xF0; + core_tx_in_sel |= 0x1; + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + core_rx_out_sel &= 0xF0; + core_rx_out_sel |= 0x1; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + break; + case 3: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + fec_lane_ena &= 0xF7; + fec_lane_ena |= 0x0; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_tx_in_sel &= 0x8F; + core_tx_in_sel |= 0x60; + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + core_rx_out_sel &= 0xCF; + core_rx_out_sel |= 0x0; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + break; + } + } + + if (priv->link_speed == SPEED_25000 && (!(strcmp(priv->fec_type, "kr-fec")))) { + csrwr8(0x0, priv->rsfec, eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + switch (priv->rsfec_cw_pos_rx) { + case 0: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /*0xE = enable lane 0 , bit[0] = lane0*/ + fec_lane_ena &= 0xE; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_tx_in_sel &= 0xF0; + /* make sure do not disturbed the RS-FEC TX Select For Lane 1 */ + core_tx_in_sel |= 0x1; + /*setting bit2:0 to b01 RS-FEC TX Select For Lane0*/ + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + core_rx_out_sel &= 0xF0; + core_rx_out_sel |= 0x1; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + break; + case 1: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /*0xE = enable lane 1 , bit[1] = lane1*/ + fec_lane_ena &= 0xD; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_tx_in_sel &= 0xF; + core_tx_in_sel |= (0x1 << 4); + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b0)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + core_rx_out_sel &= 0xF0; + core_rx_out_sel |= 0x1; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b0)); + break; + case 2: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /*0xE = enable lane 1 , bit[2] = 0, lane2*/ + fec_lane_ena &= 0xB; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_tx_in_sel &= 0xF0; + core_tx_in_sel |= 0x1; + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + core_rx_out_sel &= 0xF0; + core_rx_out_sel |= 0x1; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + break; + case 3: + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + /*0xE = enable lane 3 , bit[3] = 0, lane4*/ + fec_lane_ena &= 0xF7; + fec_lane_ena |= 0x08; + csrwr8(fec_lane_ena, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + fec_lane_ena = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_clk_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_tx_in_sel &= 0x8F; + core_tx_in_sel |= 0x10; + csrwr8(core_tx_in_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_tx_in_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_tx_cfg_b8)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + core_rx_out_sel &= 0xCF; + core_rx_out_sel |= 0x10; + csrwr8(core_rx_out_sel, priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + core_rx_out_sel = csrrd8(priv->rsfec, + eth_rsfec_csroffs(rsfec_top_rx_cfg_b8)); + break; + } + } +} + +static void etile_transceiver_reconfiguration(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + u8 transmit_data_input = 0; + u8 TX_PCS_FEC_div2_clock_input_enable = 0; + u8 RX_FIFO_Read_clock = 0; + u8 Async_latency_pulse_select = 0; + u8 rx_bit_counter = 0; + u8 Dynamic_rx_bitslip_enable = 0; + + /* Step 4c - Reconfigure transceiver Reconfiguration Registers */ + /* offset Bit 25Gptpfec-10Gptp 10Gptp-25Gptpfec + * 0x4 4:2 b00 b01 + * 0x5 5 b0 b1 + * 4 b1 b0 + * 0x7 6:5 b10 b00 + * 0x37 7 b1 b0 + * 0x34 7:0 0x03 0x03 + * 0x35 7:0 0x8A 0x48 + * 0x36 3:0 0x1 0x1 + * 0xA 5 0x1 0xb0 + */ + + if ((priv->link_speed == SPEED_25000 || priv->link_speed == SPEED_10000) && + (!strcmp(priv->fec_type, "no-fec"))) { + transmit_data_input = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_004)); + transmit_data_input &= 0xE3; + transmit_data_input |= 0x0; + csrwr8(transmit_data_input, priv->xcvr, + eth_pma_avmm_csroffs(reg_004)); + transmit_data_input = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_004)); + TX_PCS_FEC_div2_clock_input_enable = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs + (reg_005)); + TX_PCS_FEC_div2_clock_input_enable &= 0xCF; + TX_PCS_FEC_div2_clock_input_enable |= 0x10; + csrwr8(TX_PCS_FEC_div2_clock_input_enable, priv->xcvr, + eth_pma_avmm_csroffs(reg_005)); + RX_FIFO_Read_clock = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_007)); + RX_FIFO_Read_clock &= 0x9F; + RX_FIFO_Read_clock |= 0x40; + csrwr8(RX_FIFO_Read_clock, priv->xcvr, + eth_pma_avmm_csroffs(reg_007)); + rx_bit_counter = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_034)); + rx_bit_counter &= 0x0F; + rx_bit_counter |= 0x3; + csrwr8(rx_bit_counter, priv->xcvr, + eth_pma_avmm_csroffs(reg_034)); + rx_bit_counter = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_035)); + rx_bit_counter &= 0x00; + rx_bit_counter |= 0x8A; + csrwr8(rx_bit_counter, priv->xcvr, + eth_pma_avmm_csroffs(reg_035)); + rx_bit_counter = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_036)); + rx_bit_counter &= 0xFE; + rx_bit_counter |= 0x1; + csrwr8(rx_bit_counter, priv->xcvr, + eth_pma_avmm_csroffs(reg_036)); + Async_latency_pulse_select = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_037)); + Async_latency_pulse_select &= 0x7F; + Async_latency_pulse_select |= 0x80; + csrwr8(Async_latency_pulse_select, priv->xcvr, + eth_pma_avmm_csroffs(reg_037)); + Dynamic_rx_bitslip_enable = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_00a)); + Dynamic_rx_bitslip_enable &= 0xDF; + Dynamic_rx_bitslip_enable |= 0x20; + csrwr8(Dynamic_rx_bitslip_enable, priv->xcvr, + eth_pma_avmm_csroffs(reg_00a)); + } + + if (priv->link_speed == SPEED_25000 && (!(strcmp(priv->fec_type, "kr-fec")))) { + transmit_data_input = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_004)); + transmit_data_input &= 0xE3; + transmit_data_input |= 0x4; + csrwr8(transmit_data_input, priv->xcvr, + eth_pma_avmm_csroffs(reg_004)); + TX_PCS_FEC_div2_clock_input_enable = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs + (reg_005)); + TX_PCS_FEC_div2_clock_input_enable &= 0xCF; + TX_PCS_FEC_div2_clock_input_enable |= (1 << 5); + csrwr8(TX_PCS_FEC_div2_clock_input_enable, priv->xcvr, + eth_pma_avmm_csroffs(reg_005)); + RX_FIFO_Read_clock = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_007)); + RX_FIFO_Read_clock &= 0x9F; + RX_FIFO_Read_clock |= 0x0; + csrwr8(RX_FIFO_Read_clock, priv->xcvr, + eth_pma_avmm_csroffs(reg_007)); + Async_latency_pulse_select = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_037)); + Async_latency_pulse_select &= 0x7F; + Async_latency_pulse_select |= 0x0; + csrwr8(Async_latency_pulse_select, priv->xcvr, + eth_pma_avmm_csroffs(reg_037)); + rx_bit_counter = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_034)); + rx_bit_counter &= 0x0F; + rx_bit_counter |= 0x3; + csrwr8(rx_bit_counter, priv->xcvr, + eth_pma_avmm_csroffs(reg_034)); + rx_bit_counter = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_035)); + rx_bit_counter &= 0x00; + rx_bit_counter = 0x48; + csrwr8(rx_bit_counter, priv->xcvr, + eth_pma_avmm_csroffs(reg_035)); + rx_bit_counter = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_036)); + rx_bit_counter &= 0xFE; + rx_bit_counter |= 0x1; + csrwr8(rx_bit_counter, priv->xcvr, + eth_pma_avmm_csroffs(reg_036)); + Dynamic_rx_bitslip_enable = csrrd8(priv->xcvr, + eth_pma_avmm_csroffs(reg_00a)); + Dynamic_rx_bitslip_enable &= 0xDF; + Dynamic_rx_bitslip_enable |= 0; + csrwr8(Dynamic_rx_bitslip_enable, priv->xcvr, + eth_pma_avmm_csroffs(reg_00a)); + } +} + +static void etile_phy_ehip_reconfiguration(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + u32 eth_tx_mac_ehip_reconfg = 0; + u32 eth_phy_rx_pcs_align = 0; + u32 phy_timer_window_hiber = 0; + u32 phy_hiber_frm_err_cnt = 0; + + /* Step 4a - Reconfigure Ethernet Reconfiguration Registers + * Offset Bit 25Gptpfec-10Gptp 10Gptp-25Gptpfec + * 0x40B 31:15 0x13FFF << 15 0x13FFC << 15 + * 0x40B 8:6 0x4 << 6 0x3 << 6 + * 0x40B 5:3 0x1 << 3 0x4 << 3 + * 0x30E 9 1 0 + * 0x30E 4 0 1 + * 0x30E 3 1 0 + * 0x37A 20:0 0x0C4E3 0xC4E33 + * 0x37B 6:0 0x10 0x61 + */ + + if (priv->link_speed == SPEED_25000 && (!(strcmp(priv->fec_type, "kr-fec")))) { + /* 0x40B */ + eth_tx_mac_ehip_reconfg = csrrd32(priv->mac_dev, eth_tx_mac_csroffs + (tx_mac_ehip_conf)); + eth_tx_mac_ehip_reconfg = eth_tx_mac_ehip_reconfg & 0x7E07; + eth_tx_mac_ehip_reconfg = 0x9FFE00E0; + csrwr32(eth_tx_mac_ehip_reconfg, priv->mac_dev, eth_tx_mac_csroffs + (tx_mac_ehip_conf)); + if (!pma_enable) { + /* 0x37A */ + phy_timer_window_hiber = csrrd32(priv->mac_dev, + eth_phy_csroffs + (phy_timer_window_hiber_check)); + phy_timer_window_hiber = phy_timer_window_hiber & 0xFFE00000; + phy_timer_window_hiber |= 0xC4E33; + csrwr32(phy_timer_window_hiber, priv->mac_dev, + eth_phy_csroffs(phy_timer_window_hiber_check)); + /* 0x37B */ + phy_hiber_frm_err_cnt = csrrd32(priv->mac_dev, + eth_phy_csroffs(phy_hiber_frm_err)); + phy_hiber_frm_err_cnt &= 0xFFFFFF80; + phy_hiber_frm_err_cnt |= 0x61; + csrwr32(phy_hiber_frm_err_cnt, priv->mac_dev, + eth_phy_csroffs(phy_hiber_frm_err)); + } + /* 0x30E */ + eth_phy_rx_pcs_align = csrrd32(priv->mac_dev, + eth_phy_csroffs(phy_rx_pcs_align)); + eth_phy_rx_pcs_align &= 0xFFFFFDE7; + eth_phy_rx_pcs_align |= 0x10; + csrwr32(eth_phy_rx_pcs_align, priv->mac_dev, + eth_phy_csroffs(phy_rx_pcs_align)); + } + if (priv->link_speed == SPEED_25000 && (!(strcmp(priv->fec_type, "no-fec")))) { + /* 0x40B */ + eth_tx_mac_ehip_reconfg = csrrd32(priv->mac_dev, eth_tx_mac_csroffs + (tx_mac_ehip_conf)); + eth_tx_mac_ehip_reconfg = eth_tx_mac_ehip_reconfg & 0x7E07; + eth_tx_mac_ehip_reconfg |= 0x9FFF80C8; + csrwr32(eth_tx_mac_ehip_reconfg, priv->mac_dev, eth_tx_mac_csroffs + (tx_mac_ehip_conf)); + if (!pma_enable) { + /* 0x37A */ + phy_timer_window_hiber = csrrd32(priv->mac_dev, + eth_phy_csroffs + (phy_timer_window_hiber_check)); + phy_timer_window_hiber &= 0xFFE00000; + phy_timer_window_hiber |= 0xC4E33; + csrwr32(phy_timer_window_hiber, priv->mac_dev, + eth_phy_csroffs(phy_timer_window_hiber_check)); + /* 0x37B */ + phy_hiber_frm_err_cnt = csrrd32(priv->mac_dev, + eth_phy_csroffs + (phy_hiber_frm_err)); + phy_hiber_frm_err_cnt &= 0xFFFFFF80; + phy_hiber_frm_err_cnt |= 0x61; + csrwr32(phy_hiber_frm_err_cnt, priv->mac_dev, + eth_phy_csroffs(phy_hiber_frm_err)); + } + /* 0x30E */ + eth_phy_rx_pcs_align = csrrd32(priv->mac_dev, + eth_phy_csroffs(phy_rx_pcs_align)); + eth_phy_rx_pcs_align &= 0xFFFFFDE7; + eth_phy_rx_pcs_align |= 0x208; + csrwr32(eth_phy_rx_pcs_align, priv->mac_dev, eth_phy_csroffs + (phy_rx_pcs_align)); + } + if (priv->link_speed == SPEED_10000 && (!(strcmp(priv->fec_type, "no-fec")))) { + /* 0x40B */ + eth_tx_mac_ehip_reconfg = csrrd32(priv->mac_dev, eth_tx_mac_csroffs + (tx_mac_ehip_conf)); + eth_tx_mac_ehip_reconfg &= 0x00007E07; + eth_tx_mac_ehip_reconfg |= 0x9FFF8108; + csrwr32(eth_tx_mac_ehip_reconfg, priv->mac_dev, + eth_tx_mac_csroffs(tx_mac_ehip_conf)); + if (!pma_enable) { + /* 0x37A */ + phy_timer_window_hiber = csrrd32(priv->mac_dev, + eth_phy_csroffs + (phy_timer_window_hiber_check)); + phy_timer_window_hiber &= 0xFFE00000; + phy_timer_window_hiber |= 0x0C4E3; + csrwr32(phy_timer_window_hiber, priv->mac_dev, + eth_phy_csroffs(phy_timer_window_hiber_check)); + /* 0x37B */ + phy_hiber_frm_err_cnt = csrrd32(priv->mac_dev, + eth_phy_csroffs(phy_hiber_frm_err)); + phy_hiber_frm_err_cnt &= 0xFFFFFF80; + phy_hiber_frm_err_cnt |= 0x10; + csrwr32(phy_hiber_frm_err_cnt, priv->mac_dev, + eth_phy_csroffs(phy_hiber_frm_err)); + } + /* 0x30E */ + eth_phy_rx_pcs_align = csrrd32(priv->mac_dev, + eth_phy_csroffs(phy_rx_pcs_align)); + eth_phy_rx_pcs_align &= 0xFFFFFDE7; + eth_phy_rx_pcs_align |= 0x208; + csrwr32(eth_phy_rx_pcs_align, priv->mac_dev, + eth_phy_csroffs(phy_rx_pcs_align)); + } +} + +static int etile_TX_and_RX_digital_reset(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + csrwr32(0x6, priv->mac_dev, eth_phy_csroffs(phy_config)); + return 0; +} + +static int etile_Disable_PMA(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* Step 2 - Disable PMA + * 1. PMA AWMM Write, Offset = 0x84, value = 0x0 + * 2. PMA AWMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AWMM Write, Offset = 0x86, value = 0x1 + * 4. PMA AWMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AWMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AWMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AWMM Read, Offset = 0x8B[0], value = 0 + * 8. PMA AWMM Read, Offset = 0x88, expected value = 0x1 + * (same as 0x86 in step #3) + * 9. PMA AWMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AWMM Write, Offset = 0x8A[7], value = 1 + */ + u8 ret; + + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_READ_RECEIVER_TUNING, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA low byte failed\n"); + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA high byte failed\n"); + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_PMA_analog_reset(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* Step 3 - Trigger PMA analog reset + * 1. PMA AVMM Write, Offset = 0x200, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x201, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x202, value = 0x0 + * 4. PMA AVMM Write, Ofset = 0x203, value = 0x81 + * 5. PMA AVMM Read, Offset = 0x207, expected value = 0x80 + * 6. PMA AVMM Read, Offset = 0x204, expected value = 0x0 (channel #) + */ + + u8 ret; + + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_200)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_201)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_202)); + csrwr8(0x81, priv->xcvr, eth_pma_avmm_csroffs(reg_203)); + + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_207)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_207), + XCVR_PMA_AVMM_207_LAST_OP_ON_200_203_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) { + netdev_err(priv->dev, "Analog PMA reset failed, abort\n"); + return -EINVAL; + } + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_207)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_204), + XCVR_PMA_AVMM_204_RET_PHYS_CHANNEL_NUMBER, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, "Cannot read channel number\n"); + + return 0; +} + +static int etile_Change_TX_reference_clock_ratio(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* step 4d - Change TX reference clock ratio */ + /** + * 1. PMA AVMM Write, Offset = 0x84, value = 0xA5(25G)/42(10G) + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x5 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x5 + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + u8 ret; + + if (priv->link_speed == SPEED_10000) + csrwr8(0x42, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + if (priv->link_speed == SPEED_25000) + csrwr8(0xA5, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x5, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_TX_REF_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA high byte failed\n"); + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_Change_RX_reference_clock_ratio(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* step 4e - Change RX reference clock ratio */ + /** + * 1. PMA AVMM Write, Offset = 0x84, value = 0xA5(25G)/42(10G) + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x6 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, expected value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x6 + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + u8 ret; + + if (priv->link_speed == SPEED_10000) + csrwr8(0x42, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + if (priv->link_speed == SPEED_25000) + csrwr8(0xA5, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x6, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_RX_REF_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA high byte failed\n"); + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_PMA_RX_TX_Width(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* step 4f - PMA RX/TX Width */ + /** + * 1. PMA AVMM Write, Offset = 0x84, value = 0x55 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x14 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x14 + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + u8 ret; + + csrwr8(0x55, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x14, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_08A), PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_RX_TX_Width_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: eth_pma_avmm_csroffs(reg_088)PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_089) PMA high byte failed\n"); + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_RX_Phase_slip(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* step 4g - RX Phase slip */ + /** + * 1. PMA AVMM Write, Offset = 0x84, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x9C + * 3. PMA AVMM Write, Offset = 0x86, value = 0xE + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0xE + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + u8 ret; + + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x9C, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0xE, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_08A), PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_RX_Phase_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_088), PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback:(reg_089), PMA high byte failed\n"); + + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_pma_enable(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* step 5 - PMA Enable */ + /** + * 1. PMA AVMM Write, Offset = 0x84, value = 0x7 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x1 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x1 + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + u8 ret; + u32 eth_tx_mac_ehip_reconfg; + + csrwr8(0x7, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback: eth_pma_avmm_csroffs(reg_08A),PMA attribute sent failed\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_ENBL_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_088) PMA low byte failed\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_089) PMA high byte failed\n"); + } + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + eth_tx_mac_ehip_reconfg = csrrd32(priv->mac_dev, eth_tx_mac_csroffs(tx_mac_ehip_conf)); + + return 0; +} + +static int etile_Enable_Internal_Loopback(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* step 6 - Enable Internal Loopback */ + /** + * 1. PMA AVMM Write, Offset = 0x84, value = 0x1 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x1 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x8 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x8 + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + u8 ret; + + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x8, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) { + netdev_warn(priv->dev, + "Internal loopback: eth_pma_avmm_csroffs(reg_08A),PMA attribute sent failed\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_INTR_LOOP_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_088) PMA low byte failed\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_089) PMA high byte failed\n"); + } + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_De_assert_TX_digital_reset(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* Step 7 - De-assert TX digital reset + * EHIP CSR Write, Offset = 0x310, value = 0x4 + */ + csrwr32(0x4, priv->mac_dev, eth_phy_csroffs(phy_config)); + + return 0; +} + +static int etile_general_calibration(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + u8 ret; + + csrwr8(0x18, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x2c, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) { + netdev_warn(priv->dev, + "Internal loopback: eth_pma_avmm_csroffs(reg_08A),PMA attribute sent failed\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) { + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + } + + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x6c, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback: eth_pma_avmm_csroffs(reg_08A),PMA attribute sent failed\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_GNRL_CALB_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_088) PMA low byte failed\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_089) PMA high byte failed\n"); + } + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 7b - Initial Adaptation */ + /** 1. PMA AVMM Write, Offset = 0x84, value = 0x1 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0xa + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0xa + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0xa, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback: eth_pma_avmm_csroffs(reg_08A),PMA attribute sent failed\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_INIT_ADAPT_7B_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_088) PMA low byte failed\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_089) PMA high byte failed\n"); + } + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 7C - Initial Adaptation */ + /* 1. PMA AVMM Write, Offset = 0x84, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x85, value = 0xB + * 3. PMA AVMM Write, Offset = 0x86, value = 0x26 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x1 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 0x1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0x0 + * 8. PMA AVMM Read, Offset = 0x88[0], expected value = 0x0 + * (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], expected value = 0x1 + */ + + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0xb, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x26, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_08A),PMA attribute sent failed\n"); + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_08B), PMA attribute not returned\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_INIT_ADAPT_7C_PMA_CODE_RET_VAL_LO, + true, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_088) PMA low byte failed\n"); + } + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)){ + netdev_warn(priv->dev, + "Internal loopback:(reg_089) PMA high byte failed\n"); + } + ret = csrrd8(priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + ret |= 0x80; + /* read modify */ + csrwr8(ret, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + return 0; +} + +static int etile_Enable_mission_mode_and_disable_internal_serial_loopback +(struct intel_fpga_etile_eth_private *priv, const struct ethtool_link_ksettings *cmd) + +{ + /* Step 8 - Enable mission mode and disable internal serial loopback + * 1. PMA AVMM Write, Offset = 0x200, value = 0xE6 + * 2. PMA AVMM Write, Offset = 0x201, value = 0x01 + * 3. PMA AVMM Write, Offset = 0x202, value = 0x03 + * 4. PMA AVMM Write, Ofset = 0x203, value = 0x96 + * 5. PMA AVMM Read, Offset = 0x207, expected value = 0x80 + */ + + csrwr8(0xE6, priv->xcvr, eth_pma_avmm_csroffs(reg_200)); + csrwr8(0x01, priv->xcvr, eth_pma_avmm_csroffs(reg_201)); + csrwr8(0x03, priv->xcvr, eth_pma_avmm_csroffs(reg_202)); + csrwr8(0x96, priv->xcvr, eth_pma_avmm_csroffs(reg_203)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_207), + XCVR_PMA_AVMM_207_LAST_OP_ON_200_203_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) { + netdev_err(priv->dev, "Mission mode PMA reset failed, abort\n"); + return -EINVAL; + } + return 0; +} + +static int etile_De_assert_RX_digital_reset(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + /* Step 9 - De-assert RX digital reset + * EHIP CSR Write, Offset = 0x310, value = 0x0 + */ + + csrwr32(0x0, priv->mac_dev, eth_phy_csroffs(phy_config)); + return 0; +} + +static int init_rst_mac(struct intel_fpga_etile_eth_private *priv) +{ + int ret; + + /* start the mac */ + etile_set_mac(priv, true); + etile_update_mac_addr(priv, priv->dev->dev_addr); + + /* Step 1 - Trigger TX and RX digital reset + * 1. EHIP CSR Write, Offset = 0x310, value = 0x6 + */ + + csrwr32(0x6, priv->mac_dev, eth_phy_csroffs(phy_config)); + + /* Step 2 - Trigger PMA analog reset + * 1. PMA AVMM Write, Offset = 0x200, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x201, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x202, value = 0x0 + * 4. PMA AVMM Write, Ofset = 0x203, value = 0x81 + * 5. PMA AVMM Read, Offset = 0x207, expected value = 0x80 + * 6. PMA AVMM Read, Offset = 0x204, expected value = 0x0 (channel #) + */ + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_200)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_201)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_202)); + csrwr8(0x81, priv->xcvr, eth_pma_avmm_csroffs(reg_203)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_207), + XCVR_PMA_AVMM_207_LAST_OP_ON_200_203_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) { + netdev_err(priv->dev, "Analog PMA reset failed, abort\n"); + return -EINVAL; + } + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_204), + XCVR_PMA_AVMM_204_RET_PHYS_CHANNEL_NUMBER, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, "Cannot read channel number\n"); + + /* Step 3 - Reload PMA settings + * 1. PMA AVMM Write, Offset = 0x91[0], value = 0x1 + * 2. PMA AVMM Read, Offset = 0x8B, expected values bit [2] and [3] = ‘11’ + */ + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_091)); + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_RELOAD_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, "Reload PMA settings failed\n"); + + /* Step 4 - De-assert TX digital reset + * EHIP CSR Write, Offset = 0x310, value = 0x4 + */ + csrwr32(0x4, priv->mac_dev, eth_phy_csroffs(phy_config)); + + /* Step 5 - Ignore*/ + + /* Step 6a - Enable Internal Loopback + * 1. PMA AVMM Write, Offset = 0x84, value = 0x1 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x1 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x8 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x8(same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0(same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x8, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_INTERNAL_LOOPBACK, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Internal loopback: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 6b - Initial Adaptation + * 1. PMA AVMM Write, Offset = 0x84, value = 0x1 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0xA + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0xA(same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0(same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0xA, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation: PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, false, + INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation: PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_RECEIVER_TUNING_CTRL, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 6c - Verify Initial Adaptation Status + * 1. PMA AVMM Write, Offset = 0x84, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x85, value = 0xB + * 3. PMA AVMM Write, Offset = 0x86, value = 0x26 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x1 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88[0], expected value = 0 + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0xB, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x26, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status: PMA sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, false, + INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status: PMA not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_READ_RECEIVER_TUNING, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status: PMA low byte failed"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 7 - Disable internal serial loopback + * 1. PMA AVMM Write, Offset = 0x84, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x1 + * 3. PMA AVMM Write, Offset = 0x86, value = 0x8 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0x8(same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0(same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x8, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Disable loopback: PMA attribute sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, false, + INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Disable loopback: PMA attribute not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_INTERNAL_LOOPBACK, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Disable loopback: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Disable loopback: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 8 - Wait for valid data traffic on RX and then proceed to the next step. + */ + + /* Step 9 - Run initial adaptation. + * Verify that the initial adaptation status is complete using interrupt code 0x0126 + * and data 0x0B00 (skip this step if using internal serial loopback). + * Same as step 6b and 6c + * Step 9.6b - Initial Adaptation + * 1. PMA AVMM Write, Offset = 0x84, value = 0x1 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0xA + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0xA(same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0(same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0xA, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation repeat: PMA sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, false, + INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation repeat: PMA not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_RECEIVER_TUNING_CTRL, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation repeat: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation repeat: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 9.6c - Verify Initial Adaptation Status + * 1. PMA AVMM Write, Offset = 0x84, value = 0x0 + * 2. PMA AVMM Write, Offset = 0x85, value = 0xB + * 3. PMA AVMM Write, Offset = 0x86, value = 0x26 + * 4. PMA AVMM Write, Offset = 0x87, value = 0x1 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88[0], expected value = 0 + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0xB, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0x26, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status repeat: PMA sent failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, false, + INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status repeat: PMA not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_READ_RECEIVER_TUNING, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status repeat: PMA low byte failed"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Initial Adaptation Status repeat: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 10 - Run continuous adaptation + * *During the continuous adaptation, the link partner must keep sending the data. + * If link goes down, the entire sequence must be repeated. + * 1. PMA AVMM Write, Offset = 0x84, value = 0x6 + * 2. PMA AVMM Write, Offset = 0x85, value = 0x0 + * 3. PMA AVMM Write, Offset = 0x86, value = 0xA + * 4. PMA AVMM Write, Offset = 0x87, value = 0x0 + * 5. PMA AVMM Write, Offset = 0x90, value = 0x1 + * 6. PMA AVMM Read, Offset = 0x8A[7], expected value = 1 + * 7. PMA AVMM Read, Offset = 0x8B[0], expected value = 0 + * 8. PMA AVMM Read, Offset = 0x88, expected value = 0xA (same as 0x86 in step #3) + * 9. PMA AVMM Read, Offset = 0x89, expected value = 0x0 (same as 0x87 in step #4) + * 10. PMA AVMM Write, Offset = 0x8A[7], value = 1 + */ + csrwr8(0x6, priv->xcvr, eth_pma_avmm_csroffs(reg_084)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_085)); + csrwr8(0xA, priv->xcvr, eth_pma_avmm_csroffs(reg_086)); + csrwr8(0x0, priv->xcvr, eth_pma_avmm_csroffs(reg_087)); + csrwr8(0x1, priv->xcvr, eth_pma_avmm_csroffs(reg_090)); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08A), + XCVR_PMA_AVMM_08A_PMA_ATTR_SENT_SUCCESS, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Continuous Adaption: PMA failed to send\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_08B), + XCVR_PMA_AVMM_08B_PMA_FINISH_ATTR, false, + INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Continuous Adaption: PMA not returned\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_088), + XCVR_PMA_AVMM_088_PMA_RECEIVER_TUNING_CTRL, + true, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Continuous Adaption: PMA low byte failed\n"); + + if (etile_check_counter_complete(priv->xcvr, eth_pma_avmm_csroffs(reg_089), + XCVR_PMA_AVMM_089_CORE_PMA_ATTR_CODE_RET_VAL_HI, + false, INTEL_FPGA_BYTE_ALIGN)) + netdev_warn(priv->dev, + "Continuous Adaption: PMA high byte failed\n"); + + csrwr8(0x80, priv->xcvr, eth_pma_avmm_csroffs(reg_08A)); + + /* Step 11 - De-assert RX digital reset + * EHIP CSR Write, Offset = 0x310, value = 0x0 + */ + csrwr32(0x0, priv->mac_dev, eth_phy_csroffs(phy_config)); + + /* Step 12 - Verify RX PCS Status + * EHIP CSR Read, Offset = 0x326, expected value = 0x1 + */ + if (etile_check_counter_complete(priv->mac_dev, + eth_phy_csroffs(phy_pcs_stat_anlt), + ETH_PHY_RX_PCS_ALIGNED, true, + INTEL_FPGA_WORD_ALIGN)) { + netdev_err(priv->dev, "RX PCS is not aligned\n"); + return -EINVAL; + } + + /* Step 13 - IP Ready */ + /* if the link goes down anytime, this whole process above needs to be repeated */ + ret = eth_etile_tx_rx_user_flow(priv); + if (ret < 0) { + netdev_err(priv->dev, "Tx & Rx user flow failed\n"); + return ret; + } + + etile_clear_mac_statistics(priv); + etile_set_mac_flow_ctrl(priv); + + return 0; +} + +/* Control hardware timestamping. + * This function configures the MAC to enable/disable both outgoing(TX) + * and incoming(RX) packets time stamping based on user input. + */ +static int etile_set_hwtstamp_config(struct net_device *dev, struct ifreq *ifr) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + struct hwtstamp_config config; + int ret = 0; + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + netif_info(priv, drv, dev, + "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_ON: + priv->dma_priv.hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->dma_priv.hwts_rx_en = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + priv->dma_priv.hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + if (copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + return ret; +} + +/* Entry point for the ioctl. + */ +static int etile_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int ret = 0; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCSHWTSTAMP: + ret = etile_set_hwtstamp_config(dev, ifr); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +/* Reconfigure E-Tile settings for different data rates */ +int etile_dynamic_reconfiguration(struct intel_fpga_etile_eth_private *priv, + const struct ethtool_link_ksettings *cmd) +{ + int ret; + static u32 tod_read_value; + + del_timer_sync(&priv->fec_timer); + + /*step 1 TX_and_RX_digital_reset */ + etile_TX_and_RX_digital_reset(priv, cmd); + + /*Step 2 - Disable PMA */ + etile_Disable_PMA(priv, cmd); + + pma_enable = false; + + /* Step 3 - Trigger PMA analog reset */ + etile_PMA_analog_reset(priv, cmd); + + tod_read_value = csrrd32(priv->tod_pio, eth_tod_pio_offs(etile_tod_pio_config)); + if (priv->link_speed == SPEED_10000) { + tod_read_value = tod_read_value & 0xfffffffe; + tod_read_value = tod_read_value | 0x0; + csrwr32(tod_read_value, priv->tod_pio, eth_tod_pio_offs + (etile_tod_pio_config)); + } + if (priv->link_speed == SPEED_25000 && tod_read_value == 0) { + tod_read_value = tod_read_value & 0xfffffffe; + tod_read_value = tod_read_value | 0x1; + csrwr32(tod_read_value, priv->tod_pio, eth_tod_pio_offs + (etile_tod_pio_config)); + } + if (priv->link_speed == SPEED_25000) { + /* Step 4a - Reconfigure Ethernet Reconfiguration Registers*/ + etile_phy_ehip_reconfiguration(priv, cmd); + + /* step 4b - rsfec reconfiguration */ + etile_rsfec_reconfiguration(priv, cmd); + + /* step 4c - transceiver_reconfiguration */ + etile_transceiver_reconfiguration(priv, cmd); + } + /* step 4d - Change TX reference clock ratio */ + etile_Change_TX_reference_clock_ratio(priv, cmd); + + /* step 4e - Change RX reference clock ratio */ + etile_Change_RX_reference_clock_ratio(priv, cmd); + + if (priv->link_speed == SPEED_10000) { + /* Step 4a - Reconfigure Ethernet Reconfiguration Registers*/ + etile_phy_ehip_reconfiguration(priv, cmd); + + /* step 4b - rsfec reconfiguration */ + etile_rsfec_reconfiguration(priv, cmd); + + /* step 4c - transceiver_reconfiguration */ + etile_transceiver_reconfiguration(priv, cmd); + } + + /* step 4f - PMA RX/TX Width */ + etile_PMA_RX_TX_Width(priv, cmd); + + /* step 4g - RX Phase slip */ + etile_RX_Phase_slip(priv, cmd); + + /* step 5 - PMA Enable */ + etile_pma_enable(priv, cmd); + + /* [WA]:When we try to perform PMA Serdes enabled step ( 0x84 = 7 and 0x86=1 ) , + * the 0x40b and 0x30e registers somehow have been reset to its hw reset value + * due to that reconfiguration the MAC register + */ + + /* Step 4a - Reconfigure Ethernet Reconfiguration Registers*/ + pma_enable = true; + + etile_phy_ehip_reconfiguration(priv, cmd); + + /* step 6 - Enable Internal Loopback */ + etile_Enable_Internal_Loopback(priv, cmd); + + /* Step 7 - De-assert TX digital reset + * EHIP CSR Write, Offset = 0x310, value = 0x4 + */ + etile_De_assert_TX_digital_reset(priv, cmd); + + /* genarale calibration */ + etile_general_calibration(priv, cmd); + + /* Step 8 - Enable mission mode and disable internal serial loopback */ + etile_Enable_mission_mode_and_disable_internal_serial_loopback(priv, cmd); + + /* Step 9 - De-assert RX digital reset + * EHIP CSR Write, Offset = 0x310, value = 0x0 + */ + etile_De_assert_RX_digital_reset(priv, cmd); + + /* Step 10 - Verify IP Readiness RX + * EHIP CSR Read, Offset = 0x326, expected value = 0x1 + */ + + if (etile_check_counter_complete(priv->mac_dev, + eth_phy_csroffs(phy_pcs_stat_anlt), + ETH_PHY_RX_PCS_ALIGNED, true, + INTEL_FPGA_WORD_ALIGN)) { + netdev_err(priv->dev, "RX PCS is not aligned\n"); + return -EINVAL; + } + + /* Step 11 - IP Ready */ + /* if the link goes down anytime, this whole process above needs to be repeated */ + + ret = eth_etile_tx_rx_user_flow(priv); + if (ret < 0) { + netdev_err(priv->dev, "Tx & Rx user flow failed\n"); + return ret; + } + etile_clear_mac_statistics(priv); + etile_set_mac_flow_ctrl(priv); + return 0; +} + +/* Open and initialize the interface + */ +static int etile_open(struct net_device *dev) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + int ret = 0; + int i; + unsigned long flags; + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_priv.rx_ring_size = dma_rx_num; + priv->dma_priv.tx_ring_size = dma_tx_num; + /* Reset and configure E-tile MAC and probe associated PHY */ + ret = priv->dmaops->init_dma(&priv->dma_priv); + if (ret) { + netdev_err(dev, "Cannot initialize DMA\n"); + goto phy_error; + } + + if (netif_msg_ifup(priv)) + netdev_info(dev, "device MAC address %pM\n", + dev->dev_addr); + + spin_lock(&priv->mac_cfg_lock); + + /* E-tile reset */ + ret = init_rst_mac(priv); + spin_unlock(&priv->mac_cfg_lock); + if (ret) { + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); + goto alloc_skbuf_error; + } + + priv->dmaops->reset_dma(&priv->dma_priv); + + ret = etile_alloc_init_skbufs(priv); + if (ret) { + netdev_err(dev, "DMA descriptors initialization failed\n"); + goto alloc_skbuf_error; + } + + /* Register RX interrupt */ + ret = devm_request_irq(priv->device, priv->rx_irq, intel_fpga_etile_isr, + IRQF_SHARED, dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register RX interrupt %d\n", + priv->rx_irq); + goto init_error; + } + + /* Register TX interrupt */ + ret = devm_request_irq(priv->device, priv->tx_irq, intel_fpga_etile_isr, + IRQF_SHARED, dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register TX interrupt %d\n", + priv->tx_irq); + goto init_error; + } + + /* Enable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(&priv->dma_priv); + priv->dmaops->enable_txirq(&priv->dma_priv); + + /* Setup RX descriptor chain */ + for (i = 0; i < priv->dma_priv.rx_ring_size; i++) + priv->dmaops->add_rx_desc(&priv->dma_priv, + &priv->dma_priv.rx_ring[i]); + + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + netdev_warn(dev, "start rxdma\n"); + priv->dmaops->start_rxdma(&priv->dma_priv); + + if (priv->dmaops->start_txdma) + priv->dmaops->start_txdma(&priv->dma_priv); + + if (priv->phylink) { + /* Reset qsfp poll delay after PMA adaptation flow */ + priv->qsfp_poll_delay_count = 0; + phylink_start(priv->phylink); + } + + return 0; + +init_error: + etile_free_skbufs(dev); +alloc_skbuf_error: +phy_error: + return ret; +} + +/* Stop TSE MAC interface and put the device in an inactive state + */ +static int etile_shutdown(struct net_device *dev) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + unsigned long flags; + + /* Stop the PHY */ + if (priv->phylink) + phylink_stop(priv->phylink); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + /* Disable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->disable_rxirq(&priv->dma_priv); + priv->dmaops->disable_txirq(&priv->dma_priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + /* Unregister RX interrupt */ + devm_free_irq(priv->device, priv->rx_irq, dev); + + /* Unregister TX interrupt */ + devm_free_irq(priv->device, priv->tx_irq, dev); + + /* disable and reset the MAC, empties fifo */ + spin_lock(&priv->mac_cfg_lock); + spin_lock(&priv->tx_lock); + + /* Trigger RX digital reset + * 1. EHIP CSR Write, Offset = 0x310, value = 0x4 + */ + csrwr32(0x4, priv->mac_dev, eth_phy_csroffs(phy_config)); + udelay(1); + + priv->dmaops->reset_dma(&priv->dma_priv); + etile_free_skbufs(dev); + + spin_unlock(&priv->tx_lock); + spin_unlock(&priv->mac_cfg_lock); + priv->dmaops->uninit_dma(&priv->dma_priv); + del_timer_sync(&priv->fec_timer); + + netdev_warn(dev, "shutdown completed\n"); + + return 0; +} + +static void etile_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct intel_fpga_etile_eth_private *priv = netdev_priv(dev); + u32 lsb; + u32 msb; + + /* rx stats */ + lsb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_frame_octetsok_lsb)); + msb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_frame_octetsok_msb)); + storage->rx_bytes = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_mcast_data_ok_lsb)); + msb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_mcast_data_ok_msb)); + storage->multicast = ((u64)msb << 32) | lsb; + + storage->collisions = 0; + + lsb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_lenerr_lsb)); + msb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_lenerr_msb)); + storage->rx_length_errors = ((u64)msb << 32) | lsb; + + storage->rx_over_errors = 0; + + lsb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_crcerr_okpkt_lsb)); + msb = csrrd32(priv->mac_dev, + eth_rx_stats_csroffs(rx_crcerr_okpkt_msb)); + storage->rx_crc_errors = ((u64)msb << 32) | lsb; + + storage->rx_fifo_errors = 0; + storage->rx_missed_errors = 0; + /* IP UG does not have total RX packets, + * total RX bad packets, total RX dropped packets + */ + storage->rx_packets = 0; + storage->rx_errors = 0; + storage->rx_dropped = 0; + /* also count the packets dropped by this network driver */ + storage->rx_dropped += dev->stats.rx_dropped; + + /* tx stats */ + lsb = csrrd32(priv->mac_dev, + eth_tx_stats_csroffs(tx_frame_octetsok_lsb)); + msb = csrrd32(priv->mac_dev, + eth_tx_stats_csroffs(tx_frame_octetsok_msb)); + storage->tx_bytes = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_malformed_ctrl_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_malformed_ctrl_msb)); + storage->tx_errors = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_dropped_ctrl_lsb)); + msb = csrrd32(priv->mac_dev, eth_tx_stats_csroffs(tx_dropped_ctrl_msb)); + storage->tx_dropped = ((u64)msb << 32) | lsb; + + storage->tx_aborted_errors = 0; + storage->tx_fifo_errors = 0; + storage->tx_heartbeat_errors = 0; + storage->tx_window_errors = 0; + storage->rx_compressed = 0; + storage->tx_compressed = 0; + /* IP UG does not have total TX packets */ + storage->tx_packets = 0; +} + +static const struct net_device_ops intel_fpga_etile_netdev_ops = { + .ndo_open = etile_open, + .ndo_stop = etile_shutdown, + .ndo_start_xmit = etile_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = etile_set_rx_mode, + .ndo_change_mtu = etile_change_mtu, + .ndo_eth_ioctl = etile_do_ioctl, + .ndo_get_stats64 = etile_get_stats64 +}; + +static void intel_fpga_etile_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + /* Not Supported */ +} + +static void intel_fpga_etile_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct intel_fpga_etile_eth_private *priv = + netdev_priv(to_net_dev(config->dev)); + + phylink_mac_change(priv->phylink, false); +} + +static void intel_fpga_etile_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, int speed, + int duplex, bool tx_pause, + bool rx_pause) +{ + struct intel_fpga_etile_eth_private *priv = + netdev_priv(to_net_dev(config->dev)); + + phylink_mac_change(priv->phylink, true); +} + +static const struct phylink_mac_ops intel_fpga_etile_phylink_ops = { + .mac_config = intel_fpga_etile_mac_config, + .mac_link_down = intel_fpga_etile_mac_link_down, + .mac_link_up = intel_fpga_etile_mac_link_up, +}; + +/* Probe Altera E-tile MAC device + */ +static int intel_fpga_etile_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + int ret = -ENODEV; + struct resource *eth_reconfig; + struct resource *rx_fifo; + struct resource *xcvr; + struct resource *rsfec; + struct resource *tod_pio; + struct intel_fpga_etile_eth_private *priv; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + u8 addr[ETH_ALEN]; + + ndev = alloc_etherdev(sizeof(struct intel_fpga_etile_eth_private)); + if (!ndev) { + dev_err(&pdev->dev, "Could not allocate network device\n"); + return -ENODEV; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + priv = netdev_priv(ndev); + priv->device = &pdev->dev; + priv->dma_priv.device = &pdev->dev; + priv->dev = ndev; + priv->dma_priv.dev = ndev; + priv->ptp_priv.dev = ndev; + priv->msg_enable = netif_msg_init(debug, default_msg_level); + priv->dma_priv.msg_enable = netif_msg_init(debug, default_msg_level); + priv->pause = pause; + priv->flow_ctrl = flow_ctrl; + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + + of_id = of_match_device(intel_fpga_etile_ll_ids, &pdev->dev); + if (of_id) + priv->dmaops = (struct altera_dmaops *)of_id->data; + /* PTP is only supported with a modified MSGDMA */ + priv->ptp_enable = of_property_read_bool(pdev->dev.of_node, + "altr,has-ptp"); + if (priv->ptp_enable && + priv->dmaops->altera_dtype != ALTERA_DTYPE_MSGDMA_PREF) { + dev_err(&pdev->dev, "PTP requires modified dma\n"); + ret = -ENODEV; + goto err_free_netdev; + } + /* MAC address space */ + ret = request_and_map(pdev, "eth_reconfig", ð_reconfig, + (void __iomem **)&priv->mac_dev); + if (ret) + goto err_free_netdev; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tEth Reconfig at 0x%08lx\n", + (unsigned long)eth_reconfig->start); + + /* mSGDMA Tx IRQ */ + priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); + if (priv->tx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); + ret = -ENXIO; + goto err_free_netdev; + } + /* mSGDMA Rx IRQ */ + priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); + if (priv->rx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); + ret = -ENXIO; + goto err_free_netdev; + } + /* Map DMA */ + ret = altera_eth_dma_probe(pdev, &priv->dma_priv, + priv->dmaops->altera_dtype); + if (ret) { + dev_err(&pdev->dev, "cannot map DMA\n"); + goto err_free_netdev; + } + + /* Rx Fifo */ + ret = request_and_map(pdev, "rx_fifo", &rx_fifo, + (void __iomem **)&priv->rx_fifo); + if (ret) + goto err_free_netdev; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tRX FIFO at 0x%08lx\n", + (unsigned long)rx_fifo->start); + /* XCVR address space */ + ret = request_and_map(pdev, "xcvr", &xcvr, + (void __iomem **)&priv->xcvr); + if (ret) + goto err_free_netdev; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tXCVR at 0x%08lx\n", + (unsigned long)xcvr->start); + + /*Read it from the device tree and then map*/ + of_property_read_u32(pdev->dev.of_node, "fec-cw-pos-rx", + &priv->rsfec_cw_pos_rx); + /* TOD-PIO address space */ + ret = request_and_map(pdev, "tod_pio", &tod_pio, + (void __iomem **)&priv->tod_pio); + if (ret) + goto err_free_netdev; + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tTOD-PIO at 0x%08lx\n", + (unsigned long)tod_pio->start); + /* RS-FEC address space */ + ret = request_and_map(pdev, "rsfec", &rsfec, + (void __iomem **)&priv->rsfec); + if (ret) + goto err_free_netdev; + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tRS-FEC at 0x%08lx\n", + (unsigned long)rsfec->start); + /* we only support ptp with the msgdma */ + if (priv->ptp_enable) { + /* MAP PTP */ + ret = intel_fpga_tod_probe(pdev, &priv->ptp_priv); + if (ret) { + dev_err(&pdev->dev, "cannot map PTP\n"); + goto err_free_netdev; + } + } + + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + dma_set_coherent_mask(priv->device, + DMA_BIT_MASK(priv->dmaops->dmamask)); + else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); + else + goto err_free_netdev; + + /* get FIFO depths from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", + &priv->rx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &priv->tx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-almost-full", + &priv->rx_fifo_almost_full)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-almost-full\n"); + priv->rx_fifo_almost_full = 0x4000; + } + + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-almost-empty", + &priv->rx_fifo_almost_empty)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-almost-empty\n"); + priv->rx_fifo_almost_empty = 0x3000; + } + + /* Set hash filter to not set for now until the + * multicast filter receive issue is debugged + */ + priv->hash_filter = 0; + + /* get supplemental address settings for this instance */ + priv->added_unicast = + of_property_read_bool(pdev->dev.of_node, + "altr,has-supplementary-unicast"); + + priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; + /* Max MTU is 1500, ETH_DATA_LEN */ + priv->dev->max_mtu = ETH_DATA_LEN; + + /* Get the max mtu from the device tree. Note that the + * "max-frame-size" parameter is actually max mtu. Definition + * in the ePAPR v1.1 spec and usage differ, so go with usage. + */ + of_property_read_u32(pdev->dev.of_node, "max-frame-size", + &priv->dev->max_mtu); + + /* The DMA buffer size already accounts for an alignment bias + * to avoid unaligned access exceptions for the NIOS processor, + */ + priv->dma_priv.rx_dma_buf_sz = INTEL_FPGA_RXDMABUFFER_SIZE; + + /* Get MAC PMA digital delays from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "altr,tx-pma-delay-ns", + &priv->tx_pma_delay_ns)) { + dev_warn(&pdev->dev, "cannot obtain Tx PMA delay ns\n"); + priv->tx_pma_delay_ns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, "altr,rx-pma-delay-ns", + &priv->rx_pma_delay_ns)) { + dev_warn(&pdev->dev, "cannot obtain Rx PMA delay\n"); + priv->rx_pma_delay_ns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, "altr,tx-pma-delay-fns", + &priv->tx_pma_delay_fns)) { + dev_warn(&pdev->dev, "cannot obtain Tx PMA delay fns\n"); + priv->tx_pma_delay_fns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, "altr,rx-pma-delay-fns", + &priv->rx_pma_delay_fns)) { + dev_warn(&pdev->dev, "cannot obtain Rx PMA delay\n"); + priv->rx_pma_delay_fns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, + "altr,tx-external-phy-delay-ns", + &priv->tx_external_phy_delay_ns)) { + dev_warn(&pdev->dev, "cannot obtain Tx phy delay ns\n"); + priv->tx_external_phy_delay_ns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, + "altr,rx-external-phy-delay-ns", + &priv->rx_external_phy_delay_ns)) { + dev_warn(&pdev->dev, "cannot obtain Rx phy delay ns\n"); + priv->rx_external_phy_delay_ns = 0; + } + + /* get default MAC address from device tree */ + ret = of_get_mac_address(pdev->dev.of_node, addr); + if (ret) + eth_hw_addr_random(ndev); + else + eth_hw_addr_set(ndev, addr); + + /* initialize netdev */ + ndev->mem_start = eth_reconfig->start; + ndev->mem_end = eth_reconfig->end; + ndev->netdev_ops = &intel_fpga_etile_netdev_ops; + intel_fpga_etile_set_ethtool_ops(ndev); + + /* Scatter/gather IO is not supported, + * so it is turned off + */ + ndev->hw_features &= ~NETIF_F_SG; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + + /* VLAN offloading of tagging, stripping and filtering is not + * supported by hardware, but driver will accommodate the + * extra 4-byte VLAN tag for processing by upper layers + */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + /* setup NAPI interface */ + netif_napi_add(ndev, &priv->napi, etile_poll); + + spin_lock_init(&priv->mac_cfg_lock); + spin_lock_init(&priv->tx_lock); + spin_lock_init(&priv->rxdma_irq_lock); + spin_lock_init(&priv->ptp_priv.tod_lock); + + /* check if phy-mode is present */ + ret = of_get_phy_mode(np, &priv->phy_iface); + if (ret) { + dev_err(&pdev->dev, "incorrect phy-mode\n"); + goto err_free_netdev; + } + + ret = of_property_read_string(pdev->dev.of_node, "phy-mode", + &priv->phy_mode); + if (ret < 0) { + dev_err(&pdev->dev, "cannot obtain phy_mode\n"); + return ret; + } + dev_info(&pdev->dev, "\t phymode is %s\n", priv->phy_mode); + + phy_mode_etile = priv->phy_mode; + __set_bit(PHY_INTERFACE_MODE_10GBASER, + priv->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_25GBASER, + priv->phylink_config.supported_interfaces); + + /* create phylink */ + priv->phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode, + priv->phy_iface, &intel_fpga_etile_phylink_ops); + if (IS_ERR(priv->phylink)) { + dev_err(&pdev->dev, "failed to create phylink\n"); + ret = PTR_ERR(priv->phylink); + goto err_free_netdev; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register E-tile ethernet device\n"); + goto err_register_netdev; + } + + platform_set_drvdata(pdev, ndev); + + if (priv->ptp_enable) { + ret = intel_fpga_tod_register(&priv->ptp_priv, priv->device); + if (ret) { + dev_err(&pdev->dev, "Unable to register PTP clock\n"); + ret = -ENXIO; + goto err_init_phy; + } + } + + ret = fec_init(pdev, priv); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to init FEC\n"); + ret = -ENXIO; + goto err_init_phy; + } + + return 0; + +err_init_phy: + unregister_netdev(ndev); +err_register_netdev: + netif_napi_del(&priv->napi); +err_free_netdev: + free_netdev(ndev); + return ret; +} + +/* Remove Altera E-tile MAC device + */ +static void intel_fpga_etile_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct intel_fpga_etile_eth_private *priv = netdev_priv(ndev); + + if (priv->ptp_enable) + intel_fpga_tod_unregister(&priv->ptp_priv); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(ndev); + free_netdev(ndev); +} + +static const struct altera_dmaops altera_dtype_prefetcher = { + .altera_dtype = ALTERA_DTYPE_MSGDMA_PREF, + .dmamask = 64, + .reset_dma = msgdma_pref_reset, + .enable_txirq = msgdma_pref_enable_txirq, + .enable_rxirq = msgdma_pref_enable_rxirq, + .disable_txirq = msgdma_pref_disable_txirq, + .disable_rxirq = msgdma_pref_disable_rxirq, + .clear_txirq = msgdma_pref_clear_txirq, + .clear_rxirq = msgdma_pref_clear_rxirq, + .tx_buffer = msgdma_pref_tx_buffer, + .tx_completions = msgdma_pref_tx_completions, + .add_rx_desc = msgdma_pref_add_rx_desc, + .get_rx_status = msgdma_pref_rx_status, + .init_dma = msgdma_pref_initialize, + .uninit_dma = msgdma_pref_uninitialize, + .start_rxdma = msgdma_pref_start_rxdma, + .start_txdma = msgdma_pref_start_txdma, +}; + +static const struct of_device_id intel_fpga_etile_ll_ids[] = { + { .compatible = "altr,etile-msgdma-2.0", + .data = &altera_dtype_prefetcher, }, + {}, +}; +MODULE_DEVICE_TABLE(of, intel_fpga_etile_ll_ids); + +static struct platform_driver intel_fpga_etile_driver = { + .probe = intel_fpga_etile_probe, + .remove = intel_fpga_etile_remove, + .suspend = NULL, + .resume = NULL, + .driver = { + .name = INTEL_FPGA_ETILE_ETH_RESOURCE_NAME, + .owner = THIS_MODULE, + .of_match_table = intel_fpga_etile_ll_ids, + }, +}; + +module_platform_driver(intel_fpga_etile_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Altera E-tile MAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/altera/intel_fpga_qse_ll.h b/drivers/net/ethernet/altera/intel_fpga_qse_ll.h new file mode 100644 index 0000000000000..c5ae459fe7c5a --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_qse_ll.h @@ -0,0 +1,649 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Intel FPGA Quad-Speed Ethernet MAC driver + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * + * Contributors: + * Roman Bulgakov + * Yu Ying Choo + * Joyce Ooi + * + * Original driver contributed by GlobalLogic. + */ + +#ifndef __INTEL_FPGA_QSE_LL_H__ +#define __INTEL_FPGA_QSE_LL_H__ + +#define INTEL_FPGA_QSE_LL_RESOURCE_NAME "intel_fpga_qse_ll" + +#include +#include +#include +#include +#include +#include +#include "intel_fpga_tod.h" + +#define INTEL_FPGA_QSE_SW_RESET_WATCHDOG_CNTR 10000 + +/* 10G MAC Reset Control registers bit definitions + */ +#define MAC_TX_RESET BIT(0) +#define MAC_RX_RESET BIT(8) + +/* 10G MAC TX Config and Control registers bit definitions + */ +#define MAC_TX_TRANSFER_CTL_TX_DISABLE BIT(0) + +/* 10G MAC TX Packet Transfer registers bit definitions + */ +#define MAC_TX_TRANSFER_STATUS_BUSY BIT(8) +#define MAC_TX_TRANSFER_STATUS_RST BIT(12) + +/* TX Pad Inserter register bit definitions + */ +#define MAC_TX_PADINS_CTL_ENA BIT(0) + +/* TX CRC Inserter register bit definitions + */ +#define MAC_TX_CRCINS_CTL_ALWAYS BIT(0) +#define MAC_TX_CRCINS_CTL_ENA BIT(1) + +#define MAX_TX_PREAMBLE_CTL_ENA BIT(0) + +/* TX Address Inserter register bit definitions + */ +#define MAC_TX_ADDRINS_CTL_ENA BIT(0) + +/* TX MAX Length register bit definitions + */ +#define MAC_TX_FRM_MAX_LENGTH 0xffff + +/* TX VLAN Detection register bit definitions + */ +#define MAC_TX_VLAN_DETECT_ENA BIT(0) + +/* TX IPG 10G Average len register bit definitions + * 0 -> 8 bytes + * 1 -> 12 bytes + */ +#define MAC_TX_IPG_10G_LEN BIT(0) + +/* TX IPG 1000/100/10 Average len register bit + * definitions + * Should be between 8 and 15 + */ +#define MAC_TX_IPG_LEN 0xffff + +/* TX Pausefreame control register bit definitions + */ +#define MAC_TX_PAUSEFRAME_CTL_DISABLE 0x0 +#define MAC_TX_PAUSEFRAME_CTL_XON_ENA 0x1 +#define MAC_TX_PAUSEFRAME_CTL_XOFF_ENA 0x2 + +#define MAC_TX_PAUSEFRAME_QUANTA 0xffff + +#define MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA 0xffff + +/* TX Pauseframe enable register bit definitions + */ +#define MAC_TX_PAUSEFRAME_ENA BIT(0) +#define MAC_TX_PAUSEFRAME_REQ_CFG BIT(1) + +/* TX Priority base flow control register bit definitions + */ +#define MAC_TX_PFC_PRIO_ENA_0 BIT(0) +#define MAC_TX_PFC_PRIO_ENA_1 BIT(1) +#define MAC_TX_PFC_PRIO_ENA_2 BIT(2) +#define MAC_TX_PFC_PRIO_ENA_3 BIT(3) +#define MAC_TX_PFC_PRIO_ENA_4 BIT(4) +#define MAC_TX_PFC_PRIO_ENA_5 BIT(5) +#define MAC_TX_PFC_PRIO_ENA_6 BIT(6) +#define MAC_TX_PFC_PRIO_ENA_7 BIT(7) + +/* TX unidirectional control register bit definitions + */ +#define MAX_TX_UNDIR_ENA BIT(0) + +/* 10G MAC RX Config and Control registers bit definitions + */ +#define MAC_RX_TRANSFER_CTL_RX_DISABLE BIT(0) + +/* 10G MAC RX Packet Transfer registers bit definitions + */ +#define MAC_RX_TRANSFER_STATUS_BUSY BIT(8) +#define MAC_RX_TRANSFER_STATUS_RST BIT(12) + +/* RX Pad/CRC Remover register bit definitions + * 10 is reserved, only 00, 01, 11 allowed + */ +#define MAC_RX_PADCRC_CTL_CRC_REMOVE BIT(0) +#define MAC_RX_PADCRC_CTL_ALL_REMOVE BIT(1) + +/* RX CRC Checker register bit definitions + */ +#define MAC_RX_CRCCHECK_CTL_ENA BIT(1) + +/* RX Custom Preamble register bit definitions + */ +#define MAC_RX_CUST_PREAMBLE_FWD_EN BIT(0) + +/* RX Preamble register bit definitions + */ +#define MAC_RX_PREAMBLE_FWD_EN BIT(0) + +/* RX frame control register bit definitions + */ +#define MAC_RX_FRM_CTL_EN_ALLUCAST BIT(0) +#define MAC_RX_FRM_CTL_EN_ALLMCAST BIT(1) +#define MAC_RX_FRM_CTL_FWD_CONTROL BIT(3) +#define MAC_RX_FRM_CTL_FWD_PAUSE BIT(4) +#define MAC_RX_FRM_CTL_IGNORE_PAUSE BIT(5) +#define MAC_RX_FRM_CTL_EN_SUPP0 BIT(16) +#define MAC_RX_FRM_CTL_EN_SUPP1 BIT(17) +#define MAC_RX_FRM_CTL_EN_SUPP2 BIT(18) +#define MAC_RX_FRM_CTL_EN_SUPP3 BIT(19) + +/* RX MAX Length register bit definitions + */ +#define MAC_RX_FRM_MAX_LENGTH 0xffff + +/* RX VLAN Detection register bit definitions + */ +#define MAC_RX_VLAN_DETECT_ENA BIT(0) + +/* RX Priority base flow control register bit definitions + */ +#define MAC_RX_PFC_CTL_PRIO_ENA_0 BIT(0) +#define MAC_RX_PFC_CTL_PRIO_ENA_1 BIT(1) +#define MAC_RX_PFC_CTL_PRIO_ENA_2 BIT(2) +#define MAC_RX_PFC_CTL_PRIO_ENA_3 BIT(3) +#define MAC_RX_PFC_CTL_PRIO_ENA_4 BIT(4) +#define MAC_RX_PFC_CTL_PRIO_ENA_5 BIT(5) +#define MAC_RX_PFC_CTL_PRIO_ENA_6 BIT(6) +#define MAC_RX_PFC_CTL_PRIO_ENA_7 BIT(7) +#define MAC_RX_PFC_CTL_CTL_FRM_FWD_ENA BIT(16) + +/* Timestamp TX Assymetry register bit definitions + */ +#define MAC_TIMESTAMP_TX_ASSYMETRY_VAL 0x1ffff +#define MAC_TIMESTAMP_TX_ASSYMETRY_DIR BIT(17) + +/* MAC ECC Enable and Status register bit definitions + * Enables external signalling of det / corr of ECC + * errors + */ +#define MAC_ECC_ERR_DET_CORR_ENA BIT(0) +#define MAC_ECC_ERR_DET_UNCORR_ENA BIT(1) +#define MAC_ECC_STATUS_DET_CORR BIT(0) +#define MAC_ECC_STATUS_DET_UNCORR BIT(1) + +/* Statistics counters control register bit definitions + */ +#define MAC_STATS_CTL_CLEAR BIT(0) + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_ON (FLOW_TX | FLOW_RX) + +/* PHY Speed + */ +#define PHY_ETH_SPEED_1000 0x0 +#define PHY_ETH_SPEED_2500 0x1 +#define PHY_ETH_SPEED_10000 0x3 + +/* PHY Reconfiguration + */ +#define PHY_RECONFIG_BUSY BIT(0) +#define PHY_RECONFIG_START BIT(16) + +/* Timestamp Registers + */ +struct intel_fpga_qse_ll_timestamp_ctrl { + u32 period_10g; + u32 reserved1[1]; + u32 fns_adjustment_10g; + u32 reserved3[1]; + u32 ns_adjustment_10g; + u32 reserved_5[3]; + u32 period_mult_speed; + u32 reserved_9[1]; + u32 fns_adjustment_mult_speed; + u32 reserved_b[1]; + u32 ns_adjustment_mult_speed; + u32 reserved_d[3]; +}; + +/* Altera 10G Ethernet MAC RX/TX statistics counters within MAC register space. + * To read the statistic counters, read the LSB before reading the MSB. + */ +struct intel_fpga_qse_ll_mac_stats { + u32 clear; + u32 reserved_1[0x1]; + u32 frames_ok_lsb; + u32 frames_ok_msb; + u32 frames_err_lsb; + u32 frames_err_msb; + u32 frames_crc_err_lsb; + u32 frames_crc_err_msb; + u32 octets_ok_lsb; + u32 octets_ok_msb; + u32 pause_mac_ctrl_frames_lsb; + u32 pause_mac_ctrl_frames_msb; + u32 if_errors_lsb; + u32 if_errors_msb; + u32 unicast_frames_ok_lsb; + u32 unicast_frames_ok_msb; + u32 unicast_frames_err_lsb; + u32 unicast_frames_err_msb; + u32 multicast_frames_ok_lsb; + u32 multicast_frames_ok_msb; + u32 multicast_frames_err_lsb; + u32 multicast_frames_err_msb; + u32 broadcast_frames_ok_lsb; + u32 broadcast_frames_ok_msb; + u32 broadcast_frames_err_lsb; + u32 broadcast_frames_err_msb; + u32 eth_stats_oct_lsb; + u32 eth_stats_oct_msb; + u32 eth_stats_pkts_lsb; + u32 eth_stats_pkts_msb; + u32 eth_stats_undersize_pkts_lsb; + u32 eth_stats_undersize_pkts_msb; + u32 eth_stats_oversize_pkts_lsb; + u32 eth_stats_oversize_pkts_msb; + u32 eth_stats_pkts_64_oct_lsb; + u32 eth_stats_pkts_64_oct_msb; + u32 eth_stats_pkts_65to127_oct_lsb; + u32 eth_stats_pkts_65to127_oct_msb; + u32 eth_stats_pkts_128to255_oct_lsb; + u32 eth_stats_pkts_128to255_oct_msb; + u32 eth_stats_pkts_256to511_oct_lsb; + u32 eth_stats_pkts_256to511_oct_msb; + u32 eth_stats_pkts_512to1023_oct_lsb; + u32 eth_stats_pkts_512to1023_oct_msb; + u32 eth_stats_pkts_1024to1518_oct_lsb; + u32 eth_stats_pkts_1024to1518_oct_msb; + u32 eth_stats_pkts_1519tox_oct_lsb; + u32 eth_stats_pkts_1519tox_oct_msb; + u32 eth_stats_fragments_lsb; + u32 eth_stats_fragments_msb; + u32 eth_stats_jabbers_lsb; + u32 eth_stats_jabbers_msb; + u32 eth_stats_crc_err_lsb; + u32 eth_stats_crc_err_msb; + u32 unicast_mac_ctrl_frames_lsb; + u32 unicast_mac_ctrl_frames_msb; + u32 multicast_mac_ctrl_frames_lsb; + u32 multicast_mac_ctrl_frames_msb; + u32 broadcast_mac_ctrl_frames_lsb; + u32 broadcast_mac_ctrl_frames_msb; + u32 pfc_mac_ctrl_frames_lsb; + u32 pfc_mac_ctrl_frames_msb; +}; + +/* Altera 10G Low Latency Ethernet MAC register space. Note that some of these + * registers may or may not be present depending upon options chosen by the user + * when the core was configured and built. Please consult the Altera 10G MAC + * User Guide for details. + */ +struct intel_fpga_qse_ll_mac { + /* Reserved 0x0 to 0xf words */ + u32 reserved_0[16]; + /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary + * MAC address + */ + u32 primary_mac_addr0; //0x10 + /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary + * MAC address + */ + u32 primary_mac_addr1; //0x11 + /* Reserved 0x12 to 0x1E words */ + u32 reserved_12[13]; + /* TX and RX Datapath reset control. TX: bit 0 RX bit 8 */ + u32 mac_reset_control; //0x1f + /* Enable / Disable TX data path */ + u32 tx_packet_control; //0x20 + u32 reserved_21[1]; //0x21 + /* TX Datapath status, Idle: bit 8 Reset: bit 12 */ + u32 tx_transfer_status; //0x22 + u32 reserved_23[1]; //0x23 + /* Control pad insertion to ensure minimum packet size is met */ + u32 tx_pad_control; //0x24 + u32 reserved_25[1]; //0x25 + /* Enable CRC insertion into dataframe Note bit 1 should always be 1 */ + u32 tx_crc_control; //0x26 + u32 reserved_27[1]; //0x27 + /* Bypass control for preamble insertion into the data frame */ + u32 tx_preamble_control; //0x28 + u32 reserved_29[1]; //0x29 + /* Override source MAC address with address in primary_mac_address */ + u32 tx_src_addr_override; //0x2a + u32 reserved_2b[1]; //0x2b + /* MAX MTU */ + u32 tx_frame_maxlength; //0x2c + /*Disable VLAN Tag detection */ + u32 tx_vlan_detection; //0x2d + /* Set the average Inter Packet Gap for 10Gb 0: 8 bytes 1: 12 bytes */ + u32 tx_ipg_10g; //0x2e + /* IPG settings for 10/100/1000, set between 8-15 on bits[3:0] */ + u32 tx_ipg_10m_100m_1g; //0x2f + /* Reserved 0x30 to 0x3D words */ + u32 reserved_30[14]; + /* 36 bit counter for tx buffer underflow */ + u32 tx_underflow_counter0; //0x3e + u32 tx_underflow_counter1; //0x3f + /* Pauseframe trigger condition confifuration */ + u32 tx_pauseframe_control; //0x40 + u32 reserved_41[1]; //0x41 + /* Quana value used for XOFF generation */ + u32 tx_pauseframe_quanta; //0x42 + /* Gap between consecutive XOFF frames */ + u32 tx_pauseframe_holdoff_quanta; //0x43 + u32 tx_pauseframe_enable; //0x44 + u32 reserved_45[1]; //0x45 + u32 tx_pfc_priority_enable; //0x46 + u32 reserved_47[1]; //0x47 + /* Specifies pause quanta per queue */ + u32 pfc_pause_quanta_0; //0x48 + u32 pfc_pause_quanta_1; //0x49 + u32 pfc_pause_quanta_2; //0x4a + u32 pfc_pause_quanta_3; //0x4b + u32 pfc_pause_quanta_4; //0x4c + u32 pfc_pause_quanta_5; //0x4d + u32 pfc_pause_quanta_6; //0x4e + u32 pfc_pause_quanta_7; //0x4f + /* Reserved 0x50 to 0x57 words */ + u32 reserved_50[8]; + /* Specifies gap between XOFF pause frames per queue */ + u32 pfc_holdoff_quanta_0; //0x58 + u32 pfc_holdoff_quanta_1; //0x59 + u32 pfc_holdoff_quanta_2; //0x5a + u32 pfc_holdoff_quanta_3; //0x5b + u32 pfc_holdoff_quanta_4; //0x5c + u32 pfc_holdoff_quanta_5; //0x5d + u32 pfc_holdoff_quanta_6; //0x5e + u32 pfc_holdoff_quanta_7; //0x5f + /* Reserved 0x60 to 0x6f words */ + u32 reserved_60[16]; + /* unidirectional feature and fault control */ + u32 tx_unidir_control; //0x70 + /* Reserved 0x71 to 0x9f words */ + u32 reserved_71[47]; + /* Enable / Disable RX data path */ + u32 rx_transfer_control; //0xa0 + u32 reserved_a1[1]; //0xa1 + /* RX Datapath status, Idle: bit 8 Reset: bit 12 */ + u32 rx_transfer_status; //0xa2 + u32 reserved_a3[1]; //0xa3 + /* Padding and CRC removal on receive */ + u32 rx_padcrc_control; //0xa4 + u32 reserved_a5[1]; //0xa5 + /* Check CRC on receive */ + u32 rx_crccheck_control; //0xa6 + u32 reserved_a7[1]; //0xa7 + /* Enable forwarding of custom preable*/ + u32 rx_custom_preamble_forward; //0xa8 + u32 reserved_a9[1]; //0xa9 + /* Preamble passthru enable on receive */ + u32 rx_preamble_control; //0xaa + u32 reserved_ab[1]; //0xab + /* Enable/Disable reception of UCAST MCAST and pause frames */ + u32 rx_frame_control; //0xac + u32 reserved_ad[1]; //0xad + /* Maximum allowable frame size */ + u32 rx_frame_maxlength; //0xae + /* Enable RX VLAN detection */ + u32 rx_vlan_detection; //0xaf + /* 4 supplementary RX MAC addresses */ + u32 rx_frame_spaddr0_0; //0xb0 + u32 rx_frame_spaddr0_1; //0xb1 + u32 rx_frame_spaddr1_0; //0xb2 + u32 rx_frame_spaddr1_1; //0xb3 + u32 rx_frame_spaddr2_0; //0xb4 + u32 rx_frame_spaddr2_1; //0xb5 + u32 rx_frame_spaddr3_0; //0xb6 + u32 rx_frame_spaddr3_1; //0xb7 + /* Reserved 0xB8 to 0xBF words */ + u32 reserved_b8[8]; + /* Enable priority based flow control */ + u32 rx_pfc_control; //0xc0 + /* Reserved 0xC1 to 0xFB words */ + u32 reserved_c1[59]; + /* RX packet overflow counter counts truncated pkts */ + u32 rx_pktovrflow_error0; //0xfc + u32 rx_pktovrflow_error1; //0xfd + /* RX packet overflow counter counts dropped pkts */ + u32 rx_pktovrflow_eth_stats_dropevents0; //0xfe + u32 rx_pktovrflow_eth_stats_dropevents1; //0xff + /* Timestamp Registers */ + /* TX 0x100 to 0x10f */ + struct intel_fpga_qse_ll_timestamp_ctrl tx_timestamp_ctrl; + /* Specifies value and direction of assymetric arithmetic operation */ + u32 tx_asymmetry; //0x110 + /* Reserved 0x111 to 0x11f words */ + u32 reserved_111[15]; + /* RX 0x120 to 0x12f */ + struct intel_fpga_qse_ll_timestamp_ctrl rx_timestamp_ctrl; + /* Reserved 0x130 to 0x13f words */ + u32 reserved_130[16]; + struct intel_fpga_qse_ll_mac_stats tx_stats; + /* Reserved 0x17e to 0x1bf words */ + u32 reserved_17e[66]; + struct intel_fpga_qse_ll_mac_stats rx_stats; + /* Reserved 0x1fe to 0x23f words */ + u32 reserved_1fe[66]; + /* ECC status indicates error and correction status */ + u32 ecc_status; //0x240 + /* ECC Enable */ + u32 ecc_enable; //0x241 +}; + +#define qse_csroffs(a) (offsetof(struct intel_fpga_qse_ll_mac, a)) +#define qse_rxstat_csroffs(a) \ + (offsetof(struct intel_fpga_qse_ll_mac, rx_stats.a)) +#define qse_txstat_csroffs(a) \ + (offsetof(struct intel_fpga_qse_ll_mac, tx_stats.a)) +#define qse_rx_ts_csroffs(a) \ + (offsetof(struct intel_fpga_qse_ll_mac, rx_timestamp_ctrl.a)) +#define qse_tx_ts_csroffs(a) \ + (offsetof(struct intel_fpga_qse_ll_mac, tx_timestamp_ctrl.a)) + +/* Altera 10GBASE-KR register space. + */ +struct arria10_10gbase_kr { // word addresses + u32 hssi_regs[4096]; // 0x0 to 0x3ff + u32 reserved_400[68]; // 0x400 to 0x443 + + /* PMA Regs */ + u32 pma_reset; // 0x444 + u32 reserved_445[28]; // 0x445 to 0x460 + u32 phy_serial_loopback; // 0x461 + u32 reserved[2]; // 0x462 to 0x463 + u32 pma_rx_set_locktodata; // 0x464 + u32 pma_rx_set_locktoref; // 0x465 + u32 pma_rx_is_lockedtodata; // 0x466 + u32 pma_rx_is_lockedtoref; // 0x467 + u32 reserved_468[24]; // 0x468 to 0x479 + + /* Enhanced PCS Regs */ + u32 pcs_indirect_addr; // 0x480 + u32 pcs_rclr_error_count; // 0x481 + u32 pcs_status; // 0x482 + u32 reserved_483[13]; // 0x483 to 0x48f + u32 pcs_control_1g; // 0x490 + u32 pcs_status_1g; // 0x491 + u32 pcs_dev_ability_1g; // 0x492 + u32 reserved_493[21]; // 0x493 to 0x4a7 + + /* 1G Data mode */ + u32 pma_electrical_setting; // 0x4a8 + u32 pma_status; // 0x4a9 + u32 reserved_4aa[6]; // 0x4aa to 0x4af + + /* KR Registers */ + u32 seq_control; // 0x4b0 + u32 seq_status; // 0x4b1 + u32 kr_fec_tx_error_insert; // 0x4b2 + u32 reserved_4b3[2]; // 0x4b3 to 0x4b4 + u32 reserved_40g[10]; // 0x4b5 to 0x4bf + u32 an_control; // 0x4c0 + u32 an_control_expand; // 0x4c1 + u32 an_status; // 0x4c2 + u32 an_base_page_regs0; // 0x4c3 + u32 an_base_page_regs1; // 0x4c4 + u32 an_base_page_regs2; // 0x4c5 + u32 an_base_page_regs3; // 0x4c6 + u32 an_base_page_regs4; // 0x4c7 + u32 an_base_page_regs5; // 0x4c8 + u32 an_base_page_regs6; // 0x4c9 + u32 an_base_page_regs7; // 0x4ca + u32 an_received_ability; // 0x4cb + u32 reserve_4cc[4]; // 0x4cc to 0x4cf + u32 link_train_control; // 0x4d0 + u32 link_train_control_ext; // 0x4d1 + u32 link_train_status; // 0x4d2 + u32 ber_time; // 0x4d3 + u32 ld_status; // 0x4d4 + u32 lt_setting; // 0x4d5 + u32 pma_settings; // 0x4d6 + u32 reserved_40g_ext[10]; // 0x4d7 to 0x4ff +}; + +#define arria10_10gbase_kr_csroffs(a) (offsetof(struct arria10_10gbase_kr, a)) + +/* PHY Reconfiguration Controller Address Space + */ +struct intel_fpga_phy_reconfig { + u32 logical_chan_num; //0x0 + u32 speed_reconfig; //0x4 + u32 reconfig_busy; //0x8 +}; + +#define phy_csroffs(a) (offsetof(struct intel_fpga_phy_reconfig, a)) + +/* RX FIFO Address Space + */ +struct intel_fpga_rx_fifo { + u32 fill_level; //0x00 + u32 reserved; //0x04 + u32 almost_full_threshold; //0x08 + u32 almost_empty_threshold; //0x0C + u32 cut_through_threshold; //0x10 + u32 drop_on_error; //0x14 +}; + +#define rx_fifo_csroffs(a) (offsetof(struct intel_fpga_rx_fifo, a)) + +/* Altera Stratix Multi-rate Ethernet PHY register space. + */ +struct stratix10_usxgmii_addr { + u32 usxgmii_ctrl; // USXGMII Reg Control 0x400 + u32 usxgmii_status; // USXGMII Reg Status 0x401 + u32 reserved_402[3]; // Reserved 0x402:0x404 + u32 usxgmii_partner_ability; // USXGMII Reg Partner Ability 0x405 + u32 reserved_406[5]; // Reserved 0x406:0x411 + u32 usxgmii_link_timer; // USXGMII link timer 0x412 + u32 reserved_413[78]; // Reserved 0x413:0x460 + u32 pma_rx_is_lockedtodata; // USXGMII Serial Loopback 0x461 +}; + +struct intel_fpga_qse_private { + struct net_device *dev; + struct device *device; + struct napi_struct napi; + + /* Phylink */ + struct phylink *phylink; + struct phylink_config phylink_config; + + /* MAC address space */ + struct intel_fpga_qse_ll_mac __iomem *mac_dev; + + /* Shared DMA structure */ + struct altera_dma_private dma_priv; + + /* Shared PTP structure */ + struct intel_fpga_tod_private ptp_priv; + u32 ptp_enable; + + /* FIFO address space */ + struct intel_fpga_rx_fifo __iomem *rx_fifo; + + /* PHY transceiver (XCVR) address space */ + void __iomem *xcvr_ctrl; + + /* PHY reconfiguration controller address space */ + struct intel_fpga_phy_reconfig __iomem *phy_reconfig_csr; + + /* Interrupts */ + u32 tx_irq; + u32 rx_irq; + + /* RX/TX MAC FIFO configs */ + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 rx_fifo_almost_full; + u32 rx_fifo_almost_empty; + u32 max_mtu; + + /* Hash filter settings */ + u32 hash_filter; + u32 added_unicast; + + /* MAC command_config register protection */ + spinlock_t mac_cfg_lock; + + /* Tx path protection */ + spinlock_t tx_lock; + + /* Rx DMA & interrupt control protection */ + spinlock_t rxdma_irq_lock; + + /* MAC flow control */ + unsigned int flow_ctrl; + unsigned int pause; + + /* PMA digital delay */ + u32 tx_pma_delay_ns; + u32 rx_pma_delay_ns; + u32 tx_pma_delay_fns; + u32 rx_pma_delay_fns; + + /* PHY */ + void __iomem *mac_extra_control; + int phy_addr; /* PHY's MDIO address, -1 for autodetection */ + phy_interface_t phy_iface; + struct mii_bus *mdio; + int oldspeed; + int oldduplex; + int oldlink; + + /* ethtool msglvl option */ + u32 msg_enable; + struct altera_dmaops *dmaops; +}; + +/* XCVR 10GBASE-R registers bit definitions + */ +#define XCVR_10GBASE_R_CHANNEL 0 +#define XCVR_10GBASE_R_RX_DATA_READY BIT(7) + +/* Function prototypes + */ +void intel_fpga_qse_ll_set_ethtool_ops(struct net_device *dev); + +#ifdef CONFIG_INTEL_FPGA_QSE_DEBUG_FS +int intel_fpga_qse_ll_init_fs(struct net_device *dev); +void intel_fpga_qse_ll_exit_fs(struct net_device *dev); +#else +static inline int intel_fpga_qse_ll_init_fs(struct net_device *dev) +{ + return 0; +} + +static inline void intel_fpga_qse_ll_exit_fs(struct net_device *dev) {} +#endif /* CONFIG_INTEL_FPGA_QSE_DEBUG_FS */ + +#endif /* __INTEL_FPGA_QSE_LL_H__ */ diff --git a/drivers/net/ethernet/altera/intel_fpga_qse_ll_ethtool.c b/drivers/net/ethernet/altera/intel_fpga_qse_ll_ethtool.c new file mode 100644 index 0000000000000..2cfb40e6f524c --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_qse_ll_ethtool.c @@ -0,0 +1,788 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Ethtool support for Intel FPGA Quad-Speed Ethernet MAC driver + * Copyright (C) 2019 Intel Corporation. All rights reserved + * + * Contributors: + * Roman Bulgakov + * Yu Ying Choo + * Dalon Westergreen + * Joyce Ooi + * + * Original driver contributed by GlobalLogic. + */ + +#include +#include +#include +#include +#include +#include + +#include "altera_eth_dma.h" +#include "intel_fpga_qse_ll.h" +#include "altera_utils.h" + +#define QSE_STATS_LEN ARRAY_SIZE(stat_gstrings) +#define QSE_NUM_REGS 196 + +static char const stat_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "rx_crc_errors", + "tx_bytes", + "rx_bytes", + "tx_pause", + "rx_pause", + "rx_errors", + "tx_errors", + "rx_unicast", + "rx_multicast", + "rx_broadcast", + "tx_unicast", + "tx_multicast", + "tx_broadcast", + "ether_drops", + "rx_total_bytes", + "rx_total_packets", + "rx_undersize", + "rx_oversize", + "rx_64_bytes", + "rx_65_127_bytes", + "rx_128_255_bytes", + "rx_256_511_bytes", + "rx_512_1023_bytes", + "rx_1024_1518_bytes", + "rx_gte_1519_bytes", + "rx_jabbers", + "rx_runts", +}; + +static void qse_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "intel_fpga_qse_ll", ETH_GSTRING_LEN); + strscpy(info->version, "v1.0", ETH_GSTRING_LEN); + strscpy(info->bus_info, "platform", ETH_GSTRING_LEN); +} + +/* Fill in a buffer with the strings which correspond to the + * stats + */ +static void qse_gstrings(struct net_device *dev, u32 stringset, u8 *buf) +{ + memcpy(buf, stat_gstrings, QSE_STATS_LEN * ETH_GSTRING_LEN); +} + +static void qse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + u32 lsb; + u32 msb; + + /* aFramesTransmittedOK */ + lsb = csrrd32(priv->mac_dev, qse_txstat_csroffs(frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, qse_txstat_csroffs(frames_ok_msb)); + buf[0] = ((u64)msb << 32) | lsb; + + /* aFramesReceivedOK */ + lsb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_ok_msb)); + buf[1] = ((u64)msb << 32) | lsb; + + /* aFrameCheckSequenceErrors */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(frames_crc_err_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(frames_crc_err_msb)); + buf[2] = ((u64)msb << 32) | lsb; + + /* aOctetsTransmittedOK */ + lsb = csrrd32(priv->mac_dev, qse_txstat_csroffs(octets_ok_lsb)); + msb = csrrd32(priv->mac_dev, qse_txstat_csroffs(octets_ok_msb)); + buf[3] = ((u64)msb << 32) | lsb; + + /* aOctetsReceivedOK */ + lsb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(octets_ok_lsb)); + msb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(octets_ok_msb)); + buf[4] = ((u64)msb << 32) | lsb; + + /* aPAUSEMACCtrlFramesTransmitted */ + lsb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(pause_mac_ctrl_frames_lsb)); + msb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(pause_mac_ctrl_frames_msb)); + buf[5] = ((u64)msb << 32) | lsb; + + /* aPAUSEMACCtrlFramesReceived */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(pause_mac_ctrl_frames_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(pause_mac_ctrl_frames_msb)); + buf[6] = ((u64)msb << 32) | lsb; + + /* ifInErrors */ + lsb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(if_errors_lsb)); + msb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(if_errors_msb)); + buf[7] = ((u64)msb << 32) | lsb; + + /* ifOutErrors */ + lsb = csrrd32(priv->mac_dev, qse_txstat_csroffs(if_errors_lsb)); + msb = csrrd32(priv->mac_dev, qse_txstat_csroffs(if_errors_msb)); + buf[8] = ((u64)msb << 32) | lsb; + + /* ifInUcastPkts */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_frames_ok_msb)); + buf[9] = ((u64)msb << 32) | lsb; + + /* ifInMulticastPkts */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_ok_msb)); + buf[10] = ((u64)msb << 32) | lsb; + + /* ifInBroadcastPkts */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_frames_ok_msb)); + buf[11] = ((u64)msb << 32) | lsb; + + /* ifOutUcastPkts */ + lsb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_frames_ok_msb)); + buf[12] = ((u64)msb << 32) | lsb; + + /* ifOutMulticastPkts */ + lsb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_frames_ok_msb)); + buf[13] = ((u64)msb << 32) | lsb; + + /* ifOutBroadcastPkts */ + lsb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_frames_ok_lsb)); + msb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_frames_ok_msb)); + buf[14] = ((u64)msb << 32) | lsb; + + /* etherStatsDropEvents */ + lsb = csrrd32(priv->mac_dev, + qse_csroffs(rx_pktovrflow_eth_stats_dropevents0)); + msb = csrrd32(priv->mac_dev, + qse_csroffs(rx_pktovrflow_eth_stats_dropevents1)); + buf[15] = ((u64)msb << 32) | lsb; + + /* etherStatsOctets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oct_msb)); + buf[16] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_msb)); + buf[17] = ((u64)msb << 32) | lsb; + + /* etherStatsUndersizePkts */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_undersize_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_undersize_pkts_msb)); + buf[18] = ((u64)msb << 32) | lsb; + + /* etherStatsOversizePkts */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oversize_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oversize_pkts_msb)); + buf[19] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts64Octets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_64_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_64_oct_msb)); + buf[20] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts65to127Octets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_65to127_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_65to127_oct_msb)); + buf[21] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts128to255Octets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_128to255_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_128to255_oct_msb)); + buf[22] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts256to511Octets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_256to511_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_256to511_oct_msb)); + buf[23] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts512to1023Octets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_512to1023_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_512to1023_oct_msb)); + buf[24] = ((u64)msb << 32) | lsb; + + /* etherStatsPkts1024to1518Octets */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1024to1518_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1024to1518_oct_msb)); + buf[25] = ((u64)msb << 32) | lsb; + + /* This statistics counts the number of received good and errored + * frames between the length of 1519 and the maximum frame length + * configured in the rx_frame_maxlength register. + */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1519tox_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1519tox_oct_msb)); + buf[26] = ((u64)msb << 32) | lsb; + + /* etherStatsJabbers */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_jabbers_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_jabbers_msb)); + buf[27] = ((u64)msb << 32) | lsb; + + /* etherStatsFragments */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_fragments_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_fragments_msb)); + buf[28] = ((u64)msb << 32) | lsb; +} + +static int qse_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return QSE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 qse_get_msglevel(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void qse_set_msglevel(struct net_device *dev, uint32_t data) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + priv->msg_enable = data; +} + +static int qse_reglen(struct net_device *dev) +{ + return QSE_NUM_REGS * sizeof(u32); +} + +static void qse_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *regbuf) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + u32 *buf = regbuf; + + /* Set version to a known value, so ethtool knows + * how to do any special formatting of this data. + * This version number will need to change if and + * when this register table is changed. + * + * version[31:0] = 1: Dump the 10GbE MAC IP Registers + * Upper bits are all 0 by default + * + * Upper 16-bits will indicate feature presence for + * Ethtool register decoding in future version. + */ + + regs->version = 1; + + buf[0] = csrrd32(priv->mac_dev, qse_csroffs(primary_mac_addr0)); + buf[1] = csrrd32(priv->mac_dev, qse_csroffs(primary_mac_addr1)); + buf[2] = csrrd32(priv->mac_dev, qse_csroffs(mac_reset_control)); + buf[3] = csrrd32(priv->mac_dev, qse_csroffs(tx_packet_control)); + buf[4] = csrrd32(priv->mac_dev, qse_csroffs(tx_transfer_status)); + + buf[5] = csrrd32(priv->mac_dev, qse_csroffs(tx_pad_control)); + buf[6] = csrrd32(priv->mac_dev, qse_csroffs(tx_crc_control)); + buf[7] = csrrd32(priv->mac_dev, qse_csroffs(tx_preamble_control)); + buf[8] = csrrd32(priv->mac_dev, qse_csroffs(tx_src_addr_override)); + buf[9] = csrrd32(priv->mac_dev, qse_csroffs(tx_frame_maxlength)); + + buf[10] = csrrd32(priv->mac_dev, qse_csroffs(tx_vlan_detection)); + buf[11] = csrrd32(priv->mac_dev, qse_csroffs(tx_ipg_10g)); + buf[12] = csrrd32(priv->mac_dev, qse_csroffs(tx_ipg_10m_100m_1g)); + buf[13] = csrrd32(priv->mac_dev, + qse_csroffs(tx_underflow_counter0)); + buf[14] = csrrd32(priv->mac_dev, + qse_csroffs(tx_underflow_counter1)); + + buf[15] = csrrd32(priv->mac_dev, qse_csroffs(tx_pauseframe_control)); + buf[16] = csrrd32(priv->mac_dev, qse_csroffs(tx_pauseframe_quanta)); + buf[17] = csrrd32(priv->mac_dev, + qse_csroffs(tx_pauseframe_holdoff_quanta)); + buf[18] = csrrd32(priv->mac_dev, qse_csroffs(tx_pauseframe_enable)); + buf[19] = csrrd32(priv->mac_dev, + qse_csroffs(tx_pfc_priority_enable)); + + buf[20] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_0)); + buf[21] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_1)); + buf[22] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_2)); + buf[23] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_3)); + buf[24] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_4)); + + buf[25] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_5)); + buf[26] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_6)); + buf[27] = csrrd32(priv->mac_dev, qse_csroffs(pfc_pause_quanta_7)); + buf[28] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_0)); + buf[29] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_1)); + + buf[30] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_2)); + buf[31] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_3)); + buf[32] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_4)); + buf[33] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_5)); + buf[34] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_6)); + + buf[35] = csrrd32(priv->mac_dev, qse_csroffs(pfc_holdoff_quanta_7)); + buf[36] = csrrd32(priv->mac_dev, qse_csroffs(tx_unidir_control)); + buf[37] = csrrd32(priv->mac_dev, qse_csroffs(rx_transfer_control)); + buf[38] = csrrd32(priv->mac_dev, qse_csroffs(rx_transfer_status)); + buf[39] = csrrd32(priv->mac_dev, qse_csroffs(rx_padcrc_control)); + + buf[40] = csrrd32(priv->mac_dev, qse_csroffs(rx_crccheck_control)); + buf[41] = csrrd32(priv->mac_dev, + qse_csroffs(rx_custom_preamble_forward)); + buf[42] = csrrd32(priv->mac_dev, qse_csroffs(rx_preamble_control)); + buf[43] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_control)); + buf[44] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_maxlength)); + + buf[45] = csrrd32(priv->mac_dev, qse_csroffs(rx_vlan_detection)); + buf[46] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr0_0)); + buf[47] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr0_1)); + buf[48] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr1_0)); + buf[49] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr1_1)); + + buf[50] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr2_0)); + buf[51] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr2_1)); + buf[52] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr3_0)); + buf[53] = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_spaddr3_1)); + + buf[54] = csrrd32(priv->mac_dev, qse_csroffs(rx_pfc_control)); + buf[55] = csrrd32(priv->mac_dev, qse_csroffs(rx_pktovrflow_error0)); + buf[56] = csrrd32(priv->mac_dev, qse_csroffs(rx_pktovrflow_error1)); + buf[57] = csrrd32(priv->mac_dev, + qse_csroffs(rx_pktovrflow_eth_stats_dropevents0)); + buf[58] = csrrd32(priv->mac_dev, + qse_csroffs(rx_pktovrflow_eth_stats_dropevents1)); + + /* TX Timestamp */ + buf[59] = csrrd32(priv->mac_dev, qse_tx_ts_csroffs(period_10g)); + buf[60] = csrrd32(priv->mac_dev, + qse_tx_ts_csroffs(fns_adjustment_10g)); + buf[61] = csrrd32(priv->mac_dev, qse_tx_ts_csroffs(ns_adjustment_10g)); + buf[62] = csrrd32(priv->mac_dev, qse_tx_ts_csroffs(period_mult_speed)); + buf[63] = csrrd32(priv->mac_dev, + qse_tx_ts_csroffs(fns_adjustment_mult_speed)); + buf[64] = csrrd32(priv->mac_dev, + qse_tx_ts_csroffs(ns_adjustment_mult_speed)); + buf[65] = csrrd32(priv->mac_dev, qse_csroffs(tx_asymmetry)); + + /* RX Timestamp */ + buf[66] = csrrd32(priv->mac_dev, qse_rx_ts_csroffs(period_10g)); + buf[67] = csrrd32(priv->mac_dev, + qse_rx_ts_csroffs(fns_adjustment_10g)); + buf[68] = csrrd32(priv->mac_dev, qse_rx_ts_csroffs(ns_adjustment_10g)); + buf[69] = csrrd32(priv->mac_dev, qse_rx_ts_csroffs(period_mult_speed)); + buf[70] = csrrd32(priv->mac_dev, + qse_rx_ts_csroffs(fns_adjustment_mult_speed)); + buf[71] = csrrd32(priv->mac_dev, + qse_rx_ts_csroffs(ns_adjustment_mult_speed)); + + /* TX Stats */ + buf[72] = csrrd32(priv->mac_dev, qse_txstat_csroffs(clear)); + buf[73] = csrrd32(priv->mac_dev, qse_txstat_csroffs(frames_ok_lsb)); + buf[74] = csrrd32(priv->mac_dev, qse_txstat_csroffs(frames_ok_msb)); + buf[75] = csrrd32(priv->mac_dev, qse_txstat_csroffs(frames_err_lsb)); + buf[76] = csrrd32(priv->mac_dev, qse_txstat_csroffs(frames_err_msb)); + buf[77] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(frames_crc_err_lsb)); + buf[78] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(frames_crc_err_msb)); + buf[79] = csrrd32(priv->mac_dev, qse_txstat_csroffs(octets_ok_lsb)); + buf[80] = csrrd32(priv->mac_dev, qse_txstat_csroffs(octets_ok_msb)); + buf[81] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(pause_mac_ctrl_frames_lsb)); + buf[82] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(pause_mac_ctrl_frames_msb)); + buf[83] = csrrd32(priv->mac_dev, qse_txstat_csroffs(if_errors_lsb)); + buf[84] = csrrd32(priv->mac_dev, qse_txstat_csroffs(if_errors_msb)); + buf[85] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_frames_ok_lsb)); + buf[86] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_frames_ok_msb)); + buf[87] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_frames_err_lsb)); + buf[88] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_frames_err_msb)); + buf[89] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_frames_ok_lsb)); + buf[90] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_frames_ok_msb)); + buf[91] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_frames_err_lsb)); + buf[92] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_frames_err_msb)); + buf[93] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_frames_ok_lsb)); + buf[94] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_frames_ok_msb)); + buf[95] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_frames_err_lsb)); + buf[96] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_frames_err_msb)); + buf[97] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_oct_lsb)); + buf[98] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_oct_msb)); + buf[99] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_lsb)); + buf[100] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_msb)); + buf[101] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_undersize_pkts_lsb)); + buf[102] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_undersize_pkts_msb)); + buf[103] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_oversize_pkts_lsb)); + buf[104] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_oversize_pkts_msb)); + buf[105] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_64_oct_lsb)); + buf[106] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_64_oct_msb)); + buf[107] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_65to127_oct_lsb)); + buf[108] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_65to127_oct_msb)); + buf[109] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_128to255_oct_lsb)); + buf[100] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_128to255_oct_msb)); + buf[111] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_256to511_oct_lsb)); + buf[112] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_256to511_oct_msb)); + buf[113] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_512to1023_oct_lsb)); + buf[114] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_512to1023_oct_msb)); + buf[115] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_1024to1518_oct_lsb)); + buf[116] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_1024to1518_oct_msb)); + buf[117] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_1519tox_oct_lsb)); + buf[118] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_1519tox_oct_msb)); + buf[119] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_fragments_lsb)); + buf[120] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_fragments_msb)); + buf[121] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_jabbers_lsb)); + buf[122] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_jabbers_msb)); + buf[123] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_crc_err_lsb)); + buf[124] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_crc_err_msb)); + buf[125] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_mac_ctrl_frames_lsb)); + buf[126] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(unicast_mac_ctrl_frames_msb)); + buf[127] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_mac_ctrl_frames_lsb)); + buf[128] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(multicast_mac_ctrl_frames_msb)); + buf[129] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_mac_ctrl_frames_lsb)); + buf[130] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(broadcast_mac_ctrl_frames_msb)); + buf[131] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(pfc_mac_ctrl_frames_lsb)); + buf[132] = csrrd32(priv->mac_dev, + qse_txstat_csroffs(pfc_mac_ctrl_frames_msb)); + + /* RX Stats */ + buf[133] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(clear)); + buf[134] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_ok_lsb)); + buf[135] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_ok_msb)); + buf[136] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_err_lsb)); + buf[137] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_err_msb)); + buf[138] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(frames_crc_err_lsb)); + buf[139] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(frames_crc_err_msb)); + buf[140] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(octets_ok_lsb)); + buf[141] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(octets_ok_msb)); + buf[142] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(pause_mac_ctrl_frames_lsb)); + buf[143] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(pause_mac_ctrl_frames_msb)); + buf[144] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(if_errors_lsb)); + buf[145] = csrrd32(priv->mac_dev, qse_rxstat_csroffs(if_errors_msb)); + buf[146] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_frames_ok_lsb)); + buf[147] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_frames_ok_msb)); + buf[148] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_frames_err_lsb)); + buf[149] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_frames_err_msb)); + buf[150] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_ok_lsb)); + buf[151] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_ok_msb)); + buf[152] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_err_lsb)); + buf[153] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_err_msb)); + buf[154] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_frames_ok_lsb)); + buf[155] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_frames_ok_msb)); + buf[156] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_frames_err_lsb)); + buf[157] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_frames_err_msb)); + buf[158] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oct_lsb)); + buf[159] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oct_msb)); + buf[160] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_lsb)); + buf[161] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_msb)); + buf[162] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_undersize_pkts_lsb)); + buf[163] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_undersize_pkts_msb)); + buf[164] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oversize_pkts_lsb)); + buf[165] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oversize_pkts_msb)); + buf[166] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_64_oct_lsb)); + buf[167] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_64_oct_msb)); + buf[168] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_65to127_oct_lsb)); + buf[169] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_65to127_oct_msb)); + buf[170] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_128to255_oct_lsb)); + buf[171] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_128to255_oct_msb)); + buf[172] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_256to511_oct_lsb)); + buf[173] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_256to511_oct_msb)); + buf[174] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_512to1023_oct_lsb)); + buf[175] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_512to1023_oct_msb)); + buf[176] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1024to1518_oct_lsb)); + buf[177] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1024to1518_oct_msb)); + buf[178] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1519tox_oct_lsb)); + buf[179] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_1519tox_oct_msb)); + buf[180] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_fragments_lsb)); + buf[181] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_fragments_msb)); + buf[182] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_jabbers_lsb)); + buf[183] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_jabbers_msb)); + buf[184] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_crc_err_lsb)); + buf[185] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_crc_err_msb)); + buf[186] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_mac_ctrl_frames_lsb)); + buf[187] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(unicast_mac_ctrl_frames_msb)); + buf[188] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_mac_ctrl_frames_lsb)); + buf[189] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_mac_ctrl_frames_msb)); + buf[190] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_mac_ctrl_frames_lsb)); + buf[191] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(broadcast_mac_ctrl_frames_msb)); + buf[192] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(pfc_mac_ctrl_frames_lsb)); + buf[193] = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(pfc_mac_ctrl_frames_msb)); + + buf[194] = csrrd32(priv->mac_dev, qse_csroffs(ecc_status)); + buf[195] = csrrd32(priv->mac_dev, qse_csroffs(ecc_enable)); +} + +static void qse_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + pauseparam->rx_pause = 0; + pauseparam->tx_pause = 0; + pauseparam->autoneg = 0; + + if (priv->flow_ctrl & FLOW_RX) + pauseparam->rx_pause = 1; + if (priv->flow_ctrl & FLOW_TX) + pauseparam->tx_pause = 1; +} + +static int qse_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + int new_pause = FLOW_OFF; + int ret = 0; + + spin_lock(&priv->mac_cfg_lock); + + if (pauseparam->autoneg != 0) { + ret = -EINVAL; + goto out; + } + + if (pauseparam->rx_pause) { + new_pause |= FLOW_RX; + tse_clear_bit(priv->mac_dev, qse_csroffs(rx_frame_control), + MAC_RX_FRM_CTL_IGNORE_PAUSE); + } else { + tse_set_bit(priv->mac_dev, qse_csroffs(rx_frame_control), + MAC_RX_FRM_CTL_IGNORE_PAUSE); + } + + if (pauseparam->tx_pause) { + new_pause |= FLOW_TX; + tse_set_bit(priv->mac_dev, + qse_csroffs(tx_pauseframe_enable), + MAC_TX_PAUSEFRAME_ENA); + } else { + tse_clear_bit(priv->mac_dev, + qse_csroffs(tx_pauseframe_enable), + MAC_TX_PAUSEFRAME_ENA); + } + + csrwr32(priv->pause, priv->mac_dev, + qse_csroffs(tx_pauseframe_quanta)); + priv->flow_ctrl = new_pause; +out: + spin_unlock(&priv->mac_cfg_lock); + return ret; +} + +static int qse_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (priv->ptp_priv.ptp_clock) + info->phc_index = ptp_clock_index(priv->ptp_priv.ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +/* Set link ksettings (phy address, speed) for ethtools */ +static int qse_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + if (!priv) + return -ENODEV; + + return phylink_ethtool_ksettings_set(priv->phylink, cmd); +} + +/* Get link ksettings (phy address, speed) for ethtools */ +static int qse_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + if (!priv) + return -ENODEV; + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static const struct ethtool_ops qse_ll_ethtool_ops = { + .get_drvinfo = qse_get_drvinfo, + .get_regs_len = qse_reglen, + .get_regs = qse_get_regs, + .get_link = ethtool_op_get_link, + .get_link = ethtool_op_get_link, + .get_strings = qse_gstrings, + .get_sset_count = qse_sset_count, + .get_ethtool_stats = qse_fill_stats, + .get_msglevel = qse_get_msglevel, + .set_msglevel = qse_set_msglevel, + .get_pauseparam = qse_get_pauseparam, + .set_pauseparam = qse_set_pauseparam, + .get_ts_info = qse_get_ts_info, + .get_link_ksettings = qse_get_link_ksettings, + .set_link_ksettings = qse_set_link_ksettings, +}; + +void intel_fpga_qse_ll_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &qse_ll_ethtool_ops; +} diff --git a/drivers/net/ethernet/altera/intel_fpga_qse_ll_main.c b/drivers/net/ethernet/altera/intel_fpga_qse_ll_main.c new file mode 100644 index 0000000000000..ed51582a90a73 --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_qse_ll_main.c @@ -0,0 +1,1635 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Intel FPGA Quad-Speed Ethernet MAC driver + * Copyright (C) 2019 Intel Corporation. All rights reserved + * + * Contributors: + * Roman Bulgakov + * Yu Ying Choov + * Dalon Westergreen + * Joyce Ooi + * + * Original driver contributed by GlobalLogic. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "altera_eth_dma.h" +#include "altera_msgdma.h" +#include "altera_msgdma_prefetcher.h" +#include "intel_fpga_qse_ll.h" +#include "altera_sgdma.h" +#include "altera_utils.h" +#include "intel_fpga_tod.h" + +/* Module parameters */ +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +#define RX_DESCRIPTORS 512 +static int dma_rx_num = RX_DESCRIPTORS; +module_param(dma_rx_num, int, 0644); +MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); + +#define TX_DESCRIPTORS 512 +static int dma_tx_num = TX_DESCRIPTORS; +module_param(dma_tx_num, int, 0644); +MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); + +static int flow_ctrl = FLOW_OFF; +module_param(flow_ctrl, int, 0644); +MODULE_PARM_DESC(flow_ctrl, "Flow control (0: off, 1: rx, 2: tx, 3: on)"); + +static int pause = MAC_TX_PAUSEFRAME_QUANTA; +module_param(pause, int, 0644); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +/* Make sure DMA buffer size is larger than the max frame size + * plus some alignment offset and a VLAN header. If the max frame size is + * 1518, a VLAN header would be additional 4 bytes and additional + * headroom for alignment is 2 bytes, 2048 is just fine. + */ +#define INTEL_FPGA_RXDMABUFFER_SIZE 2048 + +#define INTEL_FPGA_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) + +static const struct of_device_id intel_fpga_qse_ll_ids[]; + +/* Allow network stack to resume queueing packets after we've + * finished transmitting at least 1/4 of the packets in the queue. + */ +#define ETH_TX_THRESH(x) ((x)->dma_priv.tx_ring_size / 4) + +#define TXQUEUESTOP_THRESHOLD 2 + +static inline u32 qse_tx_avail(struct intel_fpga_qse_private *priv) +{ + return priv->dma_priv.tx_cons + priv->dma_priv.tx_ring_size + - priv->dma_priv.tx_prod - 1; +} + +static int qse_init_rx_buffer(struct intel_fpga_qse_private *priv, + struct altera_dma_buffer *rxbuffer, int len) +{ + rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); + if (!rxbuffer->skb) + return -ENOMEM; + + rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, + len, DMA_FROM_DEVICE); + + if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(rxbuffer->skb); + return -EINVAL; + } + rxbuffer->dma_addr &= (dma_addr_t)~3; + rxbuffer->len = len; + + return 0; +} + +static void qse_free_rx_buffer(struct intel_fpga_qse_private *priv, + struct altera_dma_buffer *rxbuffer) +{ + struct sk_buff *skb = rxbuffer->skb; + dma_addr_t dma_addr = rxbuffer->dma_addr; + + if (skb) { + if (dma_addr) + dma_unmap_single(priv->device, dma_addr, + rxbuffer->len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + rxbuffer->skb = NULL; + rxbuffer->dma_addr = 0; + } +} + +/* Unmap and free Tx buffer resources + */ +static void qse_free_tx_buffer(struct intel_fpga_qse_private *priv, + struct altera_dma_buffer *buffer) +{ + if (buffer->dma_addr) { + if (buffer->mapped_as_page) + dma_unmap_page(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + buffer->dma_addr = 0; + } + if (buffer->skb) { + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } +} + +static int alloc_init_skbufs(struct intel_fpga_qse_private *priv) +{ + unsigned int rx_descs = priv->dma_priv.rx_ring_size; + unsigned int tx_descs = priv->dma_priv.tx_ring_size; + int ret = -ENOMEM; + int i; + + /* Create Rx ring buffer */ + priv->dma_priv.rx_ring = kcalloc(rx_descs, + sizeof(struct altera_dma_buffer), + GFP_KERNEL); + if (!priv->dma_priv.rx_ring) + goto err_rx_ring; + + /* Create Tx ring buffer */ + priv->dma_priv.tx_ring = kcalloc(tx_descs, + sizeof(struct altera_dma_buffer), + GFP_KERNEL); + if (!priv->dma_priv.tx_ring) + goto err_tx_ring; + + priv->dma_priv.tx_cons = 0; + priv->dma_priv.tx_prod = 0; + + /* Init Rx FIFO */ + csrwr32(priv->rx_fifo_almost_full, priv->rx_fifo, + rx_fifo_csroffs(almost_full_threshold)); + csrwr32(priv->rx_fifo_almost_empty, priv->rx_fifo, + rx_fifo_csroffs(almost_empty_threshold)); + + /* Init Rx ring */ + for (i = 0; i < rx_descs; i++) { + ret = qse_init_rx_buffer(priv, &priv->dma_priv.rx_ring[i], + priv->dma_priv.rx_dma_buf_sz); + if (ret) + goto err_init_rx_buffers; + } + + priv->dma_priv.rx_cons = 0; + priv->dma_priv.rx_prod = 0; + + return 0; +err_init_rx_buffers: + while (--i >= 0) + qse_free_rx_buffer(priv, &priv->dma_priv.rx_ring[i]); + kfree(priv->dma_priv.tx_ring); +err_tx_ring: + kfree(priv->dma_priv.rx_ring); +err_rx_ring: + return ret; +} + +static void free_skbufs(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + unsigned int rx_descs = priv->dma_priv.rx_ring_size; + unsigned int tx_descs = priv->dma_priv.tx_ring_size; + int i; + + /* Release the DMA TX/RX socket buffers */ + for (i = 0; i < rx_descs; i++) + qse_free_rx_buffer(priv, &priv->dma_priv.rx_ring[i]); + for (i = 0; i < tx_descs; i++) + qse_free_tx_buffer(priv, &priv->dma_priv.tx_ring[i]); +} + +/* Reallocate the skb for the reception process + */ +static inline void qse_rx_refill(struct intel_fpga_qse_private *priv) +{ + unsigned int rxsize = priv->dma_priv.rx_ring_size; + unsigned int entry; + int ret; + + for (; priv->dma_priv.rx_cons - priv->dma_priv.rx_prod > 0; + priv->dma_priv.rx_prod++) { + entry = priv->dma_priv.rx_prod % rxsize; + if (likely(!priv->dma_priv.rx_ring[entry].skb)) { + ret = qse_init_rx_buffer(priv, + &priv->dma_priv.rx_ring[entry], + priv->dma_priv.rx_dma_buf_sz); + if (unlikely(ret != 0)) + break; + priv->dmaops->add_rx_desc(&priv->dma_priv, + &priv->dma_priv.rx_ring[entry]); + } + } +} + +/* Pull out the VLAN tag and fix up the packet + */ +static inline void qse_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + !__vlan_get_tag(skb, &vid)) { + eth_hdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/* Receive a packet: retrieve and pass over to upper levels + */ +static int qse_rx(struct intel_fpga_qse_private *priv, int limit) +{ + unsigned int count = 0; + unsigned int next_entry; + struct sk_buff *skb; + unsigned int entry = + priv->dma_priv.rx_cons % priv->dma_priv.rx_ring_size; + u32 rxstatus; + u16 pktlength; + u16 pktstatus; + + while ((count < limit) && + ((rxstatus = priv->dmaops->get_rx_status(&priv->dma_priv)) + != 0)) { + pktstatus = rxstatus >> 16; + pktlength = rxstatus & 0xffff; + + if ((pktstatus & 0xff) || pktlength == 0) + netdev_err(priv->dev, + "RCV pktstatus %08X pktlength %08X\n", + pktstatus, pktlength); + + /* DMA transfer from TSE starts with 2 additional bytes for + * IP payload alignment. Status returned by get_rx_status() + * contains DMA transfer length. Packet is 2 bytes shorter. + */ + pktlength -= 2; + + count++; + next_entry = (++priv->dma_priv.rx_cons) + % priv->dma_priv.rx_ring_size; + + skb = priv->dma_priv.rx_ring[entry].skb; + if (unlikely(!skb)) { + netdev_err(priv->dev, + "%s: Inconsistent Rx descriptor chain\n", + __func__); + priv->dev->stats.rx_dropped++; + break; + } + priv->dma_priv.rx_ring[entry].skb = NULL; + skb_put(skb, pktlength); + + /* make cache consistent with receive packet buffer */ + dma_sync_single_for_cpu(priv->device, + priv->dma_priv.rx_ring[entry].dma_addr, + priv->dma_priv.rx_ring[entry].len, + DMA_FROM_DEVICE); + + dma_unmap_single(priv->device, + priv->dma_priv.rx_ring[entry].dma_addr, + priv->dma_priv.rx_ring[entry].len, + DMA_FROM_DEVICE); + + if (netif_msg_pktdata(priv)) { + netdev_info(priv->dev, "frame received %d bytes\n", + pktlength); + print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, pktlength, true); + } + + qse_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + skb_checksum_none_assert(skb); + napi_gro_receive(&priv->napi, skb); + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += pktlength; + entry = next_entry; + qse_rx_refill(priv); + } + + return count; +} + +/* Reclaim resources after transmission completes + */ +static int qse_tx_complete(struct intel_fpga_qse_private *priv) +{ + unsigned int txsize = priv->dma_priv.tx_ring_size; + u32 ready; + unsigned int entry; + struct altera_dma_buffer *tx_buff; + int txcomplete = 0; + + spin_lock(&priv->tx_lock); + ready = priv->dmaops->tx_completions(&priv->dma_priv); + + /* Free sent buffers */ + while (ready && (priv->dma_priv.tx_cons != priv->dma_priv.tx_prod)) { + entry = priv->dma_priv.tx_cons % txsize; + tx_buff = &priv->dma_priv.tx_ring[entry]; + + if (likely(tx_buff->skb)) + priv->dev->stats.tx_packets++; + + if (netif_msg_tx_done(priv)) + netdev_info(priv->dev, "%s: curr %d, dirty %d\n", + __func__, priv->dma_priv.tx_prod, + priv->dma_priv.tx_cons); + + qse_free_tx_buffer(priv, tx_buff); + priv->dma_priv.tx_cons++; + + txcomplete++; + ready--; + } + + if (unlikely(netif_queue_stopped(priv->dev) && + qse_tx_avail(priv) > ETH_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_msg_tx_done(priv)) + netdev_info(priv->dev, "%s: restart transmit\n", + __func__); + netif_wake_queue(priv->dev); + netif_tx_unlock(priv->dev); + } + + spin_unlock(&priv->tx_lock); + return txcomplete; +} + +/* NAPI polling function + */ +static int qse_poll(struct napi_struct *napi, int budget) +{ + struct intel_fpga_qse_private *priv = + container_of(napi, struct intel_fpga_qse_private, napi); + int rxcomplete = 0; + unsigned long flags; + + qse_tx_complete(priv); + + rxcomplete = qse_rx(priv, budget); + + if (rxcomplete < budget) { + napi_complete_done(napi, rxcomplete); + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + rxcomplete, budget); + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(&priv->dma_priv); + priv->dmaops->enable_txirq(&priv->dma_priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + } + + return rxcomplete; +} + +/* DMA TX & RX FIFO interrupt routing + */ +static irqreturn_t intel_fpga_isr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct intel_fpga_qse_private *priv; + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + priv = netdev_priv(dev); + + if (unlikely(netif_msg_intr(priv))) + netdev_info(dev, "Got TX/RX Interrupt"); + + spin_lock(&priv->rxdma_irq_lock); + /* reset IRQs */ + priv->dmaops->clear_rxirq(&priv->dma_priv); + priv->dmaops->clear_txirq(&priv->dma_priv); + spin_unlock(&priv->rxdma_irq_lock); + + if (likely(napi_schedule_prep(&priv->napi))) { + spin_lock(&priv->rxdma_irq_lock); + priv->dmaops->disable_rxirq(&priv->dma_priv); + priv->dmaops->disable_txirq(&priv->dma_priv); + spin_unlock(&priv->rxdma_irq_lock); + __napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +/* Transmit a packet (called by the kernel). Dispatches + * either the SGDMA method for transmitting or the + * MSGDMA method, assumes no scatter/gather support, + * implying an assumption that there's only one + * physically contiguous fragment starting at + * skb->data, for length of skb_headlen(skb). + */ +static int qse_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_priv.tx_ring_size; + unsigned int entry; + struct altera_dma_buffer *buffer = NULL; + int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int nopaged_len = skb_headlen(skb); + enum netdev_tx ret = NETDEV_TX_OK; + dma_addr_t dma_addr; + + spin_lock_bh(&priv->tx_lock); + + if (unlikely(qse_tx_avail(priv) < nfrags + 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx list full when queue awake\n", + __func__); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + if (unlikely(netif_msg_tx_queued(priv))) { + netdev_info(dev, "sending 0x%p, len=%d\n", skb, skb->len); + if (netif_msg_pktdata(priv)) + print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); + } + + /* Map the first skb fragment */ + entry = priv->dma_priv.tx_prod % txsize; + buffer = &priv->dma_priv.tx_ring[entry]; + + dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + ret = -EINVAL; + goto out; + } + + buffer->skb = skb; + buffer->dma_addr = dma_addr; + buffer->len = nopaged_len; + + /* Push data out of the cache hierarchy into main memory */ + dma_sync_single_for_device(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + + priv->dmaops->tx_buffer(&priv->dma_priv, buffer); + + /* Provide a hardware time stamp if requested. + */ + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->dma_priv.hwts_tx_en)) + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* Provide a software time stamp if requested and hardware timestamping + * is not possible (SKBTX_IN_PROGRESS not set). + */ + if (!priv->dma_priv.hwts_tx_en) + skb_tx_timestamp(skb); + + priv->dma_priv.tx_prod++; + dev->stats.tx_bytes += skb->len; + + if (unlikely(qse_tx_avail(priv) <= TXQUEUESTOP_THRESHOLD)) { + if (netif_msg_hw(priv)) + netdev_info(priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_stop_queue(dev); + } + +out: + spin_unlock_bh(&priv->tx_lock); + + return ret; +} + +static int check_counter_complete(struct intel_fpga_qse_private *priv, + size_t offs, u32 bit_mask, bool set_bit) +{ + int counter; + + counter = 0; + while (counter++ < INTEL_FPGA_QSE_SW_RESET_WATCHDOG_CNTR) { + if (set_bit) { + if (tse_bit_is_set(priv->mac_dev, + offs, bit_mask)) + break; + } else { + if (tse_bit_is_clear(priv->mac_dev, + offs, bit_mask)) + break; + } + udelay(1); + } + + if (counter >= INTEL_FPGA_QSE_SW_RESET_WATCHDOG_CNTR) { + if (set_bit) { + if (tse_bit_is_clear(priv->mac_dev, + offs, bit_mask)) + return -EINVAL; + } else { + if (tse_bit_is_set(priv->mac_dev, + offs, bit_mask)) + return -EINVAL; + } + } + return 0; +} + +static void qse_set_mac(struct intel_fpga_qse_private *priv, bool enable) +{ + if (enable) { + /* Enable Rx and Tx datapath */ + tse_clear_bit(priv->mac_dev, + qse_csroffs(rx_transfer_control), + MAC_RX_TRANSFER_CTL_RX_DISABLE); + tse_clear_bit(priv->mac_dev, qse_csroffs(tx_packet_control), + MAC_TX_TRANSFER_CTL_TX_DISABLE); + } else { + /* Disable Rx and Tx datapath */ + tse_set_bit(priv->mac_dev, qse_csroffs(rx_transfer_control), + MAC_RX_TRANSFER_CTL_RX_DISABLE); + tse_set_bit(priv->mac_dev, qse_csroffs(tx_packet_control), + MAC_TX_TRANSFER_CTL_TX_DISABLE); + + /* Wait for in transit packets to complete */ + if (check_counter_complete(priv, + qse_csroffs(tx_transfer_status), + MAC_TX_TRANSFER_STATUS_BUSY, false)) + netif_warn(priv, drv, priv->dev, + "Cannot stop MAC Tx datapath\n"); + if (check_counter_complete(priv, + qse_csroffs(rx_transfer_status), + MAC_RX_TRANSFER_STATUS_BUSY, false)) + netif_warn(priv, drv, priv->dev, + "Cannot stop MAC Rx datapath\n"); + + netif_warn(priv, drv, priv->dev, "Stop done\n"); + } +} + +static int reset_mac(struct intel_fpga_qse_private *priv) +{ + /* stop the mac */ + qse_set_mac(priv, false); + + /* reset MAC */ + tse_set_bit(priv->mac_dev, qse_csroffs(mac_reset_control), + MAC_TX_RESET); + tse_set_bit(priv->mac_dev, qse_csroffs(mac_reset_control), + MAC_RX_RESET); + + /* Wait until TX / RX in reset */ + if (check_counter_complete(priv, + qse_csroffs(tx_transfer_status), + MAC_TX_TRANSFER_STATUS_RST, true)) { + netif_warn(priv, drv, priv->dev, + "Failed to reset MAC Tx datapath\n"); + return -EINVAL; + } + if (check_counter_complete(priv, + qse_csroffs(rx_transfer_status), + MAC_RX_TRANSFER_STATUS_RST, true)) { + netif_warn(priv, drv, priv->dev, + "Failed to reset MAC Rx datapath\n"); + return -EINVAL; + } + + /* Pull MAC out of reset */ + tse_clear_bit(priv->mac_dev, qse_csroffs(mac_reset_control), + MAC_TX_RESET); + tse_clear_bit(priv->mac_dev, qse_csroffs(mac_reset_control), + MAC_RX_RESET); + + /* Wait until MAC out of reset */ + if (check_counter_complete(priv, + qse_csroffs(tx_transfer_status), + MAC_TX_TRANSFER_STATUS_RST, false)) { + netif_warn(priv, drv, priv->dev, + "Failed to clear reset for MAC Tx datapath\n"); + return -EINVAL; + } + if (check_counter_complete(priv, qse_csroffs(rx_transfer_status), + MAC_RX_TRANSFER_STATUS_RST, false)) { + netif_warn(priv, drv, priv->dev, + "Failed to clear reset for MAC Rx datapath\n"); + return -EINVAL; + } + + return 0; +} + +/* Change the MTU + */ +static int qse_change_mtu(struct net_device *dev, int new_mtu) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + unsigned int max_mtu = priv->dev->max_mtu; + unsigned int min_mtu = priv->dev->min_mtu; + + if (netif_running(dev)) { + netdev_err(dev, "must be stopped to change its MTU\n"); + return -EBUSY; + } + + if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) { + netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static void qse_update_mac_addr(struct intel_fpga_qse_private *priv, const unsigned char *addr) +{ + u32 msb; + u32 lsb; + + lsb = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; + msb = ((addr[0] << 8) | addr[1]) & 0xffff; + + /* Set primary MAC address */ + csrwr32(lsb, priv->mac_dev, qse_csroffs(primary_mac_addr0)); + csrwr32(msb, priv->mac_dev, qse_csroffs(primary_mac_addr1)); +} + +static void qse_set_mac_flow_ctrl(struct intel_fpga_qse_private *priv) +{ + u32 reg; + + if (priv->flow_ctrl & FLOW_RX) + tse_clear_bit(priv->mac_dev, qse_csroffs(rx_frame_control), + MAC_RX_FRM_CTL_IGNORE_PAUSE); + else + tse_set_bit(priv->mac_dev, qse_csroffs(rx_frame_control), + MAC_RX_FRM_CTL_IGNORE_PAUSE); + + reg = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_control)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "QSE rx_frame_control: 0x%08x\n", + reg); + + if (priv->flow_ctrl & FLOW_TX) + tse_set_bit(priv->mac_dev, qse_csroffs(tx_pauseframe_enable), + MAC_TX_PAUSEFRAME_ENA); + else + tse_clear_bit(priv->mac_dev, + qse_csroffs(tx_pauseframe_enable), + MAC_TX_PAUSEFRAME_ENA); + + reg = csrrd32(priv->mac_dev, qse_csroffs(tx_pauseframe_enable)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "QSE tx_pauseframe_enable: 0x%08x\n", + reg); + + csrwr32(priv->pause, priv->mac_dev, + qse_csroffs(tx_pauseframe_quanta)); + + reg = csrrd32(priv->mac_dev, qse_csroffs(tx_pauseframe_quanta)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "QSE tx_pauseframe_quanta: 0x%08x\n", + reg); +} + +static void qse_init_mac_rx_datapath(struct intel_fpga_qse_private *priv) +{ + u32 reg; + u32 frm_length; + + /* Set the largest packet size that could be received */ + frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; + csrwr32(frm_length, priv->mac_dev, qse_csroffs(rx_frame_maxlength)); + + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "max rx packet size %d\n", + frm_length); + + /* Only CRC removal on receive */ + reg = csrrd32(priv->mac_dev, qse_csroffs(rx_padcrc_control)); + reg &= ~(MAC_RX_PADCRC_CTL_CRC_REMOVE | MAC_RX_PADCRC_CTL_ALL_REMOVE); + csrwr32(reg, priv->mac_dev, qse_csroffs(rx_padcrc_control)); + + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "rx_padrc_control: 0x%08x\n", + reg); + + /* Check CRC error on RX data path */ + tse_set_bit(priv->mac_dev, qse_csroffs(rx_crccheck_control), + MAC_RX_CRCCHECK_CTL_ENA); + + reg = csrrd32(priv->mac_dev, qse_csroffs(rx_crccheck_control)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "rx_crccheck_control: 0x%08x\n", + reg); + + /* Set Rx frame control options */ + reg = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_control)); + reg &= ~(MAC_RX_FRM_CTL_EN_ALLUCAST | MAC_RX_FRM_CTL_EN_ALLMCAST | + MAC_RX_FRM_CTL_FWD_PAUSE) | MAC_RX_FRM_CTL_FWD_CONTROL; + + if (priv->added_unicast) + reg |= (MAC_RX_FRM_CTL_EN_SUPP0 | MAC_RX_FRM_CTL_EN_SUPP1 | + MAC_RX_FRM_CTL_EN_SUPP2 | MAC_RX_FRM_CTL_EN_SUPP3); + else + reg &= ~(MAC_RX_FRM_CTL_EN_SUPP0 | MAC_RX_FRM_CTL_EN_SUPP1 | + MAC_RX_FRM_CTL_EN_SUPP2 | MAC_RX_FRM_CTL_EN_SUPP3); + + csrwr32(reg, priv->mac_dev, qse_csroffs(rx_frame_control)); + + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "rx_frame_control: 0x%08x\n", + reg); + + /* Set Rx PMA delay registers */ + /* need to add for multi speed */ + if (priv->ptp_enable) { + csrwr32(priv->rx_pma_delay_ns, priv->mac_dev, + qse_rx_ts_csroffs(ns_adjustment_10g)); + csrwr32(priv->rx_pma_delay_fns, priv->mac_dev, + qse_rx_ts_csroffs(fns_adjustment_10g)); + } +} + +static void qse_init_mac_tx_datapath(struct intel_fpga_qse_private *priv) +{ + u32 frm_length; + u32 reg; + + /* Set the maximum allowable frame length for the statistic counter. + * The value of this register does not affect the allowable frame size + * that can be sent through the TX path. + */ + frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; + csrwr32(frm_length, priv->mac_dev, qse_csroffs(tx_frame_maxlength)); + + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "tx max frame: 0x%08x\n", + frm_length); + + /* Insert padding bytes into transmit frames until the frame length + * reaches 60 bytes. To achieve the minimum 64 bytes, ensure that the + * CRC field is inserted. + */ + tse_set_bit(priv->mac_dev, qse_csroffs(tx_pad_control), + MAC_TX_PADINS_CTL_ENA); + tse_set_bit(priv->mac_dev, qse_csroffs(tx_crc_control), + MAC_TX_CRCINS_CTL_ALWAYS | MAC_TX_CRCINS_CTL_ENA); + + reg = csrrd32(priv->mac_dev, qse_csroffs(tx_pad_control)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "tx_pad_control: 0x%08x\n", + reg); + reg = csrrd32(priv->mac_dev, qse_csroffs(tx_crc_control)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "tx_crc_control: 0x%08x\n", + reg); + + /* Disable address insertion on the transmit datapath, retain the + * source address received from the stack. + */ + tse_set_bit(priv->mac_dev, qse_csroffs(tx_src_addr_override), + MAC_TX_ADDRINS_CTL_ENA); + + reg = csrrd32(priv->mac_dev, qse_csroffs(tx_src_addr_override)); + if (netif_msg_ifup(priv)) + netdev_info(priv->dev, "tx_src_addr_override: 0x%08x\n", + reg); + + /* Set Tx PMA delay registers */ + /* need to add for mult speed */ + if (priv->ptp_enable) { + csrwr32(priv->tx_pma_delay_ns, priv->mac_dev, + qse_tx_ts_csroffs(ns_adjustment_10g)); + csrwr32(priv->tx_pma_delay_fns, priv->mac_dev, + qse_tx_ts_csroffs(fns_adjustment_10g)); + } +} + +static void qse_clear_mac_statistics(struct intel_fpga_qse_private *priv) +{ + /* Clear all statistics counters for the receive and transmit path */ + tse_set_bit(priv->mac_dev, qse_rxstat_csroffs(clear), + MAC_STATS_CTL_CLEAR); + tse_set_bit(priv->mac_dev, qse_txstat_csroffs(clear), + MAC_STATS_CTL_CLEAR); + + /* Wait for Rx and Tx statistics counters are cleared */ + if (check_counter_complete(priv, qse_rxstat_csroffs(clear), + MAC_STATS_CTL_CLEAR, false)) + netif_warn(priv, drv, priv->dev, + "Cannot clear MAC Rx statistics counters\n"); + if (check_counter_complete(priv, qse_txstat_csroffs(clear), + MAC_STATS_CTL_CLEAR, false)) + netif_warn(priv, drv, priv->dev, + "Cannot clear MAC Tx statistics counters\n"); +} + +/* Change MAC core filtering options + */ +static void qse_set_mac_filter(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + unsigned int flags = dev->flags; + u32 reg = csrrd32(priv->mac_dev, qse_csroffs(rx_frame_control)); + + if (flags & IFF_PROMISC) { + reg |= (MAC_RX_FRM_CTL_EN_ALLUCAST | + MAC_RX_FRM_CTL_EN_ALLMCAST); + } else if ((flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { + reg |= MAC_RX_FRM_CTL_EN_ALLMCAST; + reg &= ~MAC_RX_FRM_CTL_EN_ALLUCAST; + } else if (!netdev_uc_empty(dev)) { + reg |= MAC_RX_FRM_CTL_EN_ALLUCAST; + reg &= ~MAC_RX_FRM_CTL_EN_ALLMCAST; + } else { + reg &= ~(MAC_RX_FRM_CTL_EN_ALLUCAST | + MAC_RX_FRM_CTL_EN_ALLMCAST); + } + + csrwr32(reg, priv->mac_dev, qse_csroffs(rx_frame_control)); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void qse_set_rx_mode(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + spin_lock(&priv->mac_cfg_lock); + qse_set_mac_filter(dev); + spin_unlock(&priv->mac_cfg_lock); +} + +/* Initialize MAC core registers + */ +static int init_mac(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + + qse_update_mac_addr(priv, priv->dev->dev_addr); + qse_init_mac_rx_datapath(priv); + qse_init_mac_tx_datapath(priv); + qse_clear_mac_statistics(priv); + qse_set_mac_flow_ctrl(priv); + + return 0; +} + +/* Control hardware timestamping. + * This function configures the MAC to enable/disable both outgoing(TX) + * and incoming(RX) packets time stamping based on user input. + */ +static int qse_set_hwtstamp_config(struct net_device *dev, struct ifreq *ifr) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + struct hwtstamp_config config; + int ret = 0; + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + netif_info(priv, drv, dev, + "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->dma_priv.hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + priv->dma_priv.hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->dma_priv.hwts_rx_en = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + priv->dma_priv.hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + if (copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + return ret; +} + +/* Entry point for the ioctl. + */ +static int qse_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int ret = 0; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCSHWTSTAMP: + ret = qse_set_hwtstamp_config(dev, ifr); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +/* Open and initialize the interface + */ +static int qse_open(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + int ret = 0; + int i; + unsigned long flags; + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_priv.rx_ring_size = dma_rx_num; + priv->dma_priv.tx_ring_size = dma_tx_num; + /* Reset and configure QSE MAC and probe associated PHY */ + ret = priv->dmaops->init_dma(&priv->dma_priv); + if (ret) { + netdev_err(dev, "Cannot initialize DMA\n"); + goto phy_error; + } + + if (netif_msg_ifup(priv)) + netdev_info(dev, "device MAC address %pM\n", + dev->dev_addr); + + spin_lock(&priv->mac_cfg_lock); + + ret = reset_mac(priv); + if (ret) + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); + + ret = init_mac(dev); + spin_unlock(&priv->mac_cfg_lock); + if (ret) { + netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret); + goto alloc_skbuf_error; + } + + priv->dmaops->reset_dma(&priv->dma_priv); + + ret = alloc_init_skbufs(priv); + if (ret) { + netdev_err(dev, "DMA descriptors initialization failed\n"); + goto alloc_skbuf_error; + } + + /* Register RX interrupt */ + ret = devm_request_irq(priv->device, priv->rx_irq, intel_fpga_isr, + IRQF_SHARED, dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register RX interrupt %d\n", + priv->rx_irq); + goto init_error; + } + + /* Register TX interrupt */ + ret = devm_request_irq(priv->device, priv->tx_irq, intel_fpga_isr, + IRQF_SHARED, dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register TX interrupt %d\n", + priv->tx_irq); + goto init_error; + } + + /* Enable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(&priv->dma_priv); + priv->dmaops->enable_txirq(&priv->dma_priv); + + /* Setup RX descriptor chain */ + for (i = 0; i < priv->dma_priv.rx_ring_size; i++) + priv->dmaops->add_rx_desc(&priv->dma_priv, + &priv->dma_priv.rx_ring[i]); + + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + netdev_warn(dev, "start rxdma\n"); + priv->dmaops->start_rxdma(&priv->dma_priv); + + if (priv->dmaops->start_txdma) + priv->dmaops->start_txdma(&priv->dma_priv); + + /* Start MAC Rx/Tx */ + spin_lock(&priv->mac_cfg_lock); + qse_set_mac(priv, true); + spin_unlock(&priv->mac_cfg_lock); + + if (priv->phylink) + phylink_start(priv->phylink); + + return 0; + +init_error: + free_skbufs(dev); +alloc_skbuf_error: +phy_error: + return ret; +} + +/* Stop TSE MAC interface and put the device in an inactive state + */ +static int qse_shutdown(struct net_device *dev) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + int ret; + unsigned long flags; + + /* Stop the PHY */ + if (priv->phylink) + phylink_stop(priv->phylink); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + /* Disable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->disable_rxirq(&priv->dma_priv); + priv->dmaops->disable_txirq(&priv->dma_priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + /* disable and reset the MAC, empties fifo */ + spin_lock(&priv->mac_cfg_lock); + spin_lock(&priv->tx_lock); + + priv->dmaops->reset_dma(&priv->dma_priv); + ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ + if (ret) + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); + free_skbufs(dev); + + spin_unlock(&priv->tx_lock); + spin_unlock(&priv->mac_cfg_lock); + priv->dmaops->uninit_dma(&priv->dma_priv); + + return 0; +} + +static void qse_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct intel_fpga_qse_private *priv = netdev_priv(dev); + u32 lsb; + u32 msb; + + /* rx stats */ + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_pkts_msb)); + storage->rx_packets = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oct_msb)); + storage->rx_bytes = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(if_errors_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(if_errors_msb)); + storage->rx_errors = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, + qse_csroffs(rx_pktovrflow_eth_stats_dropevents0)); + msb = csrrd32(priv->mac_dev, + qse_csroffs(rx_pktovrflow_eth_stats_dropevents1)); + storage->rx_dropped = ((u64)msb << 32) | lsb; + + /* also count the packets dropped by this network driver */ + storage->rx_dropped += dev->stats.rx_dropped; + + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_ok_lsb)); + + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(multicast_frames_ok_msb)); + + storage->multicast = ((u64)msb << 32) | lsb; + storage->collisions = 0; + + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_undersize_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_undersize_pkts_msb)); + + storage->rx_length_errors = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oversize_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(eth_stats_oversize_pkts_msb)); + + storage->rx_length_errors += (((u64)msb << 32) | lsb); + storage->rx_over_errors = 0; + + lsb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(frames_crc_err_lsb)); + msb = csrrd32(priv->mac_dev, + qse_rxstat_csroffs(frames_crc_err_msb)); + storage->rx_crc_errors = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_err_lsb)); + msb = csrrd32(priv->mac_dev, qse_rxstat_csroffs(frames_err_msb)); + storage->rx_frame_errors = ((u64)msb << 32) | lsb; + + storage->rx_fifo_errors = 0; + storage->rx_missed_errors = 0; + + /* tx stats */ + lsb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_lsb)); + msb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_pkts_msb)); + storage->tx_packets = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_oct_lsb)); + msb = csrrd32(priv->mac_dev, + qse_txstat_csroffs(eth_stats_oct_msb)); + storage->tx_bytes = ((u64)msb << 32) | lsb; + + lsb = csrrd32(priv->mac_dev, qse_txstat_csroffs(if_errors_lsb)); + msb = csrrd32(priv->mac_dev, qse_txstat_csroffs(if_errors_msb)); + storage->tx_errors = ((u64)msb << 32) | lsb; + + storage->tx_dropped = 0; + storage->tx_aborted_errors = 0; + storage->tx_fifo_errors = 0; + storage->tx_heartbeat_errors = 0; + storage->tx_window_errors = 0; + storage->rx_compressed = 0; + storage->tx_compressed = 0; +} + +static struct net_device_ops intel_fpga_qse_netdev_ops = { + .ndo_open = qse_open, + .ndo_stop = qse_shutdown, + .ndo_start_xmit = qse_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = qse_set_rx_mode, + .ndo_change_mtu = qse_change_mtu, + .ndo_eth_ioctl = qse_do_ioctl, + .ndo_get_stats64 = qse_get_stats64 +}; + +static void intel_fpga_qse_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct intel_fpga_qse_private *priv = + netdev_priv(to_net_dev(config->dev)); + u32 speed_reconfig; + + speed_reconfig = csrrd32(priv->phy_reconfig_csr, + phy_csroffs(speed_reconfig)); + + if (tse_bit_is_clear(priv->phy_reconfig_csr, + phy_csroffs(reconfig_busy), PHY_RECONFIG_BUSY)) { + csrwr32(0, priv->phy_reconfig_csr, + phy_csroffs(logical_chan_num)); + switch (state->interface) { + case PHY_INTERFACE_MODE_10GBASER: + speed_reconfig |= (PHY_ETH_SPEED_10000 | + PHY_RECONFIG_START); + break; + default: + if (netif_msg_link(priv)) + netdev_warn(priv->dev, "Speed (%d) is not 10000!\n", + state->speed); + speed_reconfig &= ~PHY_ETH_SPEED_10000; + return; + } + csrwr32(speed_reconfig, priv->phy_reconfig_csr, + phy_csroffs(speed_reconfig)); + } else { + netdev_warn(priv->dev, "fail to reconfigure PHY\n"); + return; + } +} + +static void intel_fpga_qse_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct intel_fpga_qse_private *priv = + netdev_priv(to_net_dev(config->dev)); + + phylink_mac_change(priv->phylink, false); +} + +static void intel_fpga_qse_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, int speed, + int duplex, bool tx_pause, + bool rx_pause) +{ + struct intel_fpga_qse_private *priv = + netdev_priv(to_net_dev(config->dev)); + + phylink_mac_change(priv->phylink, true); +} + +static const struct phylink_mac_ops intel_fpga_qse_phylink_ops = { + .mac_config = intel_fpga_qse_mac_config, + .mac_link_down = intel_fpga_qse_mac_link_down, + .mac_link_up = intel_fpga_qse_mac_link_up, +}; + +/* Probe Altera QSE MAC device + */ +static int intel_fpga_qse_ll_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + int ret = -ENODEV; + struct resource *control_port; + struct resource *rx_fifo; + struct resource *xcvr_ctrl; + struct resource *phy_reconfig_csr; + struct intel_fpga_qse_private *priv; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + u8 addr[ETH_ALEN]; + + ndev = alloc_etherdev(sizeof(struct intel_fpga_qse_private)); + if (!ndev) { + dev_err(&pdev->dev, "Could not allocate network device\n"); + return -ENODEV; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + priv = netdev_priv(ndev); + priv->device = &pdev->dev; + priv->dma_priv.device = &pdev->dev; + priv->dev = ndev; + priv->dma_priv.dev = ndev; + priv->ptp_priv.dev = ndev; + priv->msg_enable = netif_msg_init(debug, default_msg_level); + priv->dma_priv.msg_enable = netif_msg_init(debug, default_msg_level); + priv->pause = pause; + priv->flow_ctrl = flow_ctrl; + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + + of_id = of_match_device(intel_fpga_qse_ll_ids, &pdev->dev); + if (of_id) + priv->dmaops = (struct altera_dmaops *)of_id->data; + + /* PTP is only supported with a modified MSGDMA */ + priv->ptp_enable = of_property_read_bool(pdev->dev.of_node, + "altr,has-ptp"); + + if (priv->ptp_enable && + (priv->dmaops->altera_dtype != ALTERA_DTYPE_MSGDMA_PREF)) { + dev_err(&pdev->dev, "PTP requires modified dma\n"); + ret = -ENODEV; + goto err_free_netdev; + } + + /* MAC address space */ + ret = request_and_map(pdev, "control_port", &control_port, + (void __iomem **)&priv->mac_dev); + if (ret) + goto err_free_netdev; + + /* mSGDMA Tx IRQ */ + priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); + if (priv->tx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + /* mSGDMA Rx IRQ */ + priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); + if (priv->rx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + /* Map DMA */ + ret = altera_eth_dma_probe(pdev, &priv->dma_priv, + priv->dmaops->altera_dtype); + if (ret) { + dev_err(&pdev->dev, "cannot map DMA\n"); + goto err_free_netdev; + } + + /* Rx Fifo */ + ret = request_and_map(pdev, "rx_fifo", &rx_fifo, + (void __iomem **)&priv->rx_fifo); + if (ret) + goto err_free_netdev; + + /* XCVR address space */ + ret = request_and_map(pdev, "xcvr_ctrl", &xcvr_ctrl, + (void __iomem **)&priv->xcvr_ctrl); + if (ret) + goto err_free_netdev; + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "\tXVCR Ctrl at 0x%08lx\n", + (unsigned long)xcvr_ctrl->start); + + /* PHY Reconfiguration address space */ + ret = request_and_map(pdev, "phy_reconfig_csr", &phy_reconfig_csr, + (void __iomem **)&priv->phy_reconfig_csr); + if (ret) + goto err_free_netdev; + + /* we only support ptp with the msgdma */ + if (priv->ptp_enable) { + /* MAP PTP */ + ret = intel_fpga_tod_probe(pdev, &priv->ptp_priv); + if (ret) { + dev_err(&pdev->dev, "cannot map PTP\n"); + goto err_free_netdev; + } + } + + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + dma_set_coherent_mask(priv->device, + DMA_BIT_MASK(priv->dmaops->dmamask)); + else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); + else + goto err_free_netdev; + + /* get FIFO depths from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", + &priv->rx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &priv->tx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-almost-full", + &priv->rx_fifo_almost_full)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-almost-full\n"); + priv->rx_fifo_almost_full = 0x10000; + } + + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-almost-empty", + &priv->rx_fifo_almost_empty)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-almost-empty\n"); + priv->rx_fifo_almost_empty = 0x8000; + } + + /* Set hash filter to not set for now until the + * multicast filter receive issue is debugged + */ + priv->hash_filter = 0; + + /* get supplemental address settings for this instance */ + priv->added_unicast = + of_property_read_bool(pdev->dev.of_node, + "altr,has-supplementary-unicast"); + + priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; + /* Max MTU is 1500, ETH_DATA_LEN */ + priv->dev->max_mtu = ETH_DATA_LEN; + + /* Get the max mtu from the device tree. Note that the + * "max-frame-size" parameter is actually max mtu. Definition + * in the ePAPR v1.1 spec and usage differ, so go with usage. + */ + of_property_read_u32(pdev->dev.of_node, "max-frame-size", + &priv->dev->max_mtu); + + /* The DMA buffer size already accounts for an alignment bias + * to avoid unaligned access exceptions for the NIOS processor, + */ + priv->dma_priv.rx_dma_buf_sz = INTEL_FPGA_RXDMABUFFER_SIZE; + + /* Get MAC PMA digital delays from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "altr,tx-pma-delay-ns", + &priv->tx_pma_delay_ns)) { + dev_warn(&pdev->dev, "cannot obtain Tx PMA delay ns\n"); + priv->tx_pma_delay_ns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, "altr,rx-pma-delay-ns", + &priv->rx_pma_delay_ns)) { + dev_warn(&pdev->dev, "cannot obtain Rx PMA delay\n"); + priv->rx_pma_delay_ns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, "altr,tx-pma-delay-fns", + &priv->tx_pma_delay_fns)) { + dev_warn(&pdev->dev, "cannot obtain Tx PMA delay fns\n"); + priv->tx_pma_delay_fns = 0; + } + + if (of_property_read_u32(pdev->dev.of_node, "altr,rx-pma-delay-fns", + &priv->rx_pma_delay_fns)) { + dev_warn(&pdev->dev, "cannot obtain Rx PMA delay\n"); + priv->rx_pma_delay_fns = 0; + } + + /* get default MAC address from device tree */ + ret = of_get_mac_address(pdev->dev.of_node, addr); + if (ret) + eth_hw_addr_random(ndev); + + /* initialize netdev */ + ndev->mem_start = control_port->start; + ndev->mem_end = control_port->end; + ndev->netdev_ops = &intel_fpga_qse_netdev_ops; + intel_fpga_qse_ll_set_ethtool_ops(ndev); + + /* Scatter/gather IO is not supported, + * so it is turned off + */ + ndev->hw_features &= ~NETIF_F_SG; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + + /* VLAN offloading of tagging, stripping and filtering is not + * supported by hardware, but driver will accommodate the + * extra 4-byte VLAN tag for processing by upper layers + */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + /* setup NAPI interface */ + netif_napi_add(ndev, &priv->napi, qse_poll); + + spin_lock_init(&priv->mac_cfg_lock); + spin_lock_init(&priv->tx_lock); + spin_lock_init(&priv->rxdma_irq_lock); + spin_lock_init(&priv->ptp_priv.tod_lock); + + /* check if phy-mode is present */ + ret = of_get_phy_mode(np, &priv->phy_iface); + if (ret) { + dev_err(&pdev->dev, "incorrect phy-mode\n"); + goto err_free_netdev; + } + + /* create phylink */ + priv->phylink = phylink_create(&priv->phylink_config, pdev->dev.fwnode, + priv->phy_iface, + &intel_fpga_qse_phylink_ops); + if (IS_ERR(priv->phylink)) { + dev_err(&pdev->dev, "failed to create phylink\n"); + ret = PTR_ERR(priv->phylink); + goto err_free_netdev; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register QSE net device\n"); + goto err_register_netdev; + } + + platform_set_drvdata(pdev, ndev); + + if (priv->ptp_enable) { + ret = intel_fpga_tod_register(&priv->ptp_priv, priv->device); + if (ret) { + dev_err(&pdev->dev, "Unable to register PTP clock\n"); + ret = -ENXIO; + goto err_init_phy; + } + } + + return 0; + +err_init_phy: + unregister_netdev(ndev); +err_register_netdev: + netif_napi_del(&priv->napi); +err_free_netdev: + free_netdev(ndev); + return ret; +} + +/* Remove Altera QSE MAC device + */ +static void intel_fpga_qse_ll_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct intel_fpga_qse_private *priv = netdev_priv(ndev); + + if (priv->phylink) + phylink_disconnect_phy(priv->phylink); + + if (priv->ptp_enable) + intel_fpga_tod_unregister(&priv->ptp_priv); + + if (priv->phylink) + phylink_destroy(priv->phylink); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(ndev); + free_netdev(ndev); +} + +static const struct altera_dmaops altera_dtype_prefetcher = { + .altera_dtype = ALTERA_DTYPE_MSGDMA_PREF, + .dmamask = 32, + .reset_dma = msgdma_pref_reset, + .enable_txirq = msgdma_pref_enable_txirq, + .enable_rxirq = msgdma_pref_enable_rxirq, + .disable_txirq = msgdma_pref_disable_txirq, + .disable_rxirq = msgdma_pref_disable_rxirq, + .clear_txirq = msgdma_pref_clear_txirq, + .clear_rxirq = msgdma_pref_clear_rxirq, + .tx_buffer = msgdma_pref_tx_buffer, + .tx_completions = msgdma_pref_tx_completions, + .add_rx_desc = msgdma_pref_add_rx_desc, + .get_rx_status = msgdma_pref_rx_status, + .init_dma = msgdma_pref_initialize, + .uninit_dma = msgdma_pref_uninitialize, + .start_rxdma = msgdma_pref_start_rxdma, + .start_txdma = msgdma_pref_start_txdma, +}; + +static const struct of_device_id intel_fpga_qse_ll_ids[] = { + { .compatible = "altr,qse-msgdma-2.0", + .data = &altera_dtype_prefetcher, }, + {}, +}; +MODULE_DEVICE_TABLE(of, intel_fpga_qse_ll_ids); + +static struct platform_driver intel_fpga_qse_ll_driver = { + .probe = intel_fpga_qse_ll_probe, + .remove = intel_fpga_qse_ll_remove, + .suspend = NULL, + .resume = NULL, + .driver = { + .name = INTEL_FPGA_QSE_LL_RESOURCE_NAME, + .owner = THIS_MODULE, + .of_match_table = intel_fpga_qse_ll_ids, + }, +}; + +module_platform_driver(intel_fpga_qse_ll_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Altera Quad Speed Ethernet Low Latency MAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/altera/intel_fpga_tod.c b/drivers/net/ethernet/altera/intel_fpga_tod.c new file mode 100644 index 0000000000000..60bca41a473e4 --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_tod.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Intel FPGA ToD PTP Hardware Clock (PHC) Linux driver + * Copyright (C) 2015-2016 Altera Corporation. All rights reserved. + * Copyright (C) 2017-2020 Intel Corporation. All rights reserved. + * + * Author(s): + * Dalon Westergreen + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "altera_utils.h" +#include "intel_fpga_tod.h" + +#define NOMINAL_PPB 1000000000ULL +#define TOD_PERIOD_MAX 0xfffff +#define TOD_PERIOD_MIN 0 +#define TOD_DRIFT_ADJUST_FNS_MAX 0xffff +#define TOD_DRIFT_ADJUST_RATE_MAX 0xffff +#define TOD_ADJUST_COUNT_MAX 0xfffff +#define TOD_ADJUST_MS_MAX (((((TOD_PERIOD_MAX) >> 16) + 1) * \ + ((TOD_ADJUST_COUNT_MAX) + 1)) / \ + 1000000UL) + +/* A fine ToD HW clock offset adjustment. + * To perform the fine offset adjustment the AdjustPeriod register is used + * to replace the Period register for AdjustCount clock cycles in hardware. + */ +static int fine_adjust_tod_clock(struct intel_fpga_tod_private *priv, + u32 adjust_period, u32 adjust_count) +{ + int limit; + + csrwr32(adjust_period, priv->tod_ctrl, tod_csroffs(adjust_period)); + csrwr32(adjust_count, priv->tod_ctrl, tod_csroffs(adjust_count)); + + /* Wait for present offset adjustment update to complete */ + limit = TOD_ADJUST_MS_MAX; + while (limit--) { + if (!csrrd32(priv->tod_ctrl, tod_csroffs(adjust_count))) + break; + mdelay(1); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +/* A coarse ToD HW clock offset adjustment. + * The coarse time adjustment performs by adding or subtracting the delta value + * from the current ToD HW clock time. + */ +static int coarse_adjust_tod_clock(struct intel_fpga_tod_private *priv, + s64 delta) +{ + u32 seconds_msb, seconds_lsb, nanosec; + u64 seconds, now; + + if (delta == 0) + goto out; + + /* Get current time */ + nanosec = csrrd32(priv->tod_ctrl, tod_csroffs(nanosec)); + seconds_lsb = csrrd32(priv->tod_ctrl, tod_csroffs(seconds_lsb)); + seconds_msb = csrrd32(priv->tod_ctrl, tod_csroffs(seconds_msb)); + + /* Calculate new time */ + seconds = (((u64)(seconds_msb & 0x0000ffff)) << 32) | seconds_lsb; + now = seconds * NSEC_PER_SEC + nanosec + delta; + + seconds = div_u64_rem(now, NSEC_PER_SEC, &nanosec); + seconds_msb = upper_32_bits(seconds) & 0x0000ffff; + seconds_lsb = lower_32_bits(seconds); + + /* Set corrected time */ + csrwr32(seconds_msb, priv->tod_ctrl, tod_csroffs(seconds_msb)); + csrwr32(seconds_lsb, priv->tod_ctrl, tod_csroffs(seconds_lsb)); + csrwr32(nanosec, priv->tod_ctrl, tod_csroffs(nanosec)); + +out: + return 0; +} + +static int intel_fpga_tod_adjust_fine(struct ptp_clock_info *ptp, + long scaled_ppm) +{ + struct intel_fpga_tod_private *priv = + container_of(ptp, struct intel_fpga_tod_private, ptp_clock_ops); + u32 tod_period, tod_rem, tod_drift_adjust_fns, tod_drift_adjust_rate; + unsigned long flags; + unsigned long rate; + int ret = 0; + s64 ppb; + u64 new_ppb; + + rate = clk_get_rate(priv->tod_clk); + if (!rate) { + ret = -ERANGE; + goto out; + } + + /* From scaled_ppm_to_ppb */ + ppb = 1 + scaled_ppm; + ppb *= 125; + ppb >>= 13; + + new_ppb = (s32)ppb + NOMINAL_PPB; + + tod_period = div_u64_rem(new_ppb << 16, rate, &tod_rem); + if (tod_period > TOD_PERIOD_MAX) { + ret = -ERANGE; + goto out; + } + + /* The drift of ToD adjusted periodically by adding a drift_adjust_fns + * correction value every drift_adjust_rate count of clock cycles. + */ + tod_drift_adjust_fns = tod_rem / gcd(tod_rem, rate); + tod_drift_adjust_rate = rate / gcd(tod_rem, rate); + + while ((tod_drift_adjust_fns > TOD_DRIFT_ADJUST_FNS_MAX) | + (tod_drift_adjust_rate > TOD_DRIFT_ADJUST_RATE_MAX)) { + tod_drift_adjust_fns = tod_drift_adjust_fns >> 1; + tod_drift_adjust_rate = tod_drift_adjust_rate >> 1; + } + + if (tod_drift_adjust_fns == 0) + tod_drift_adjust_rate = 0; + + spin_lock_irqsave(&priv->tod_lock, flags); + csrwr32(tod_period, priv->tod_ctrl, tod_csroffs(period)); + csrwr32(0, priv->tod_ctrl, tod_csroffs(adjust_period)); + csrwr32(0, priv->tod_ctrl, tod_csroffs(adjust_count)); + csrwr32(tod_drift_adjust_fns, priv->tod_ctrl, + tod_csroffs(drift_adjust)); + csrwr32(tod_drift_adjust_rate, priv->tod_ctrl, + tod_csroffs(drift_adjust_rate)); + spin_unlock_irqrestore(&priv->tod_lock, flags); + +out: + return ret; +} + +static int intel_fpga_tod_adjust_time(struct ptp_clock_info *ptp, s64 delta) +{ + struct intel_fpga_tod_private *priv = + container_of(ptp, struct intel_fpga_tod_private, ptp_clock_ops); + unsigned long flags; + u32 period, diff, rem, rem_period, adj_period; + u64 count; + int neg_adj = 0, ret = 0; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + spin_lock_irqsave(&priv->tod_lock, flags); + + /* Get the maximum possible value of the Period register offset + * adjustment in nanoseconds scale. This depends on the current + * Period register setting and the maximum and minimum possible + * values of the Period register. + */ + period = csrrd32(priv->tod_ctrl, tod_csroffs(period)); + + if (neg_adj) + diff = (period - TOD_PERIOD_MIN) >> 16; + else + diff = (TOD_PERIOD_MAX - period) >> 16; + + /* Find the number of cycles required for the + * time adjustment + */ + count = div_u64_rem(delta, diff, &rem); + + if (neg_adj) { + adj_period = period - (diff << 16); + rem_period = period - (rem << 16); + } else { + adj_period = period + (diff << 16); + rem_period = period + (rem << 16); + } + + /* If count is larger than the maximum count, + * just set the time. + */ + if (count > TOD_ADJUST_COUNT_MAX) { + /* Perform the coarse time offset adjustment */ + ret = coarse_adjust_tod_clock(priv, delta); + } else { + /* Adjust the period for count cycles to adjust + * the time. + */ + if (count) + ret = fine_adjust_tod_clock(priv, adj_period, count); + + /* If there is a remainder, adjust the period for an + * additional cycle + */ + if (rem) + ret = fine_adjust_tod_clock(priv, rem_period, 1); + } + + spin_unlock_irqrestore(&priv->tod_lock, flags); + + return ret; +} + +static int intel_fpga_tod_get_time(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct intel_fpga_tod_private *priv = + container_of(ptp, struct intel_fpga_tod_private, ptp_clock_ops); + u32 seconds_msb, seconds_lsb, nanosec; + unsigned long flags; + u64 seconds; + + spin_lock_irqsave(&priv->tod_lock, flags); + nanosec = csrrd32(priv->tod_ctrl, tod_csroffs(nanosec)); + seconds_lsb = csrrd32(priv->tod_ctrl, tod_csroffs(seconds_lsb)); + seconds_msb = csrrd32(priv->tod_ctrl, tod_csroffs(seconds_msb)); + spin_unlock_irqrestore(&priv->tod_lock, flags); + + seconds = (((u64)(seconds_msb & 0x0000ffff)) << 32) | seconds_lsb; + + ts->tv_nsec = nanosec; + ts->tv_sec = (__kernel_old_time_t)seconds; + + return 0; +} + +static int intel_fpga_tod_set_time(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct intel_fpga_tod_private *priv = + container_of(ptp, struct intel_fpga_tod_private, ptp_clock_ops); + u32 seconds_msb = upper_32_bits(ts->tv_sec) & 0x0000ffff; + u32 seconds_lsb = lower_32_bits(ts->tv_sec); + u32 nanosec = lower_32_bits(ts->tv_nsec); + unsigned long flags; + + spin_lock_irqsave(&priv->tod_lock, flags); + csrwr32(seconds_msb, priv->tod_ctrl, tod_csroffs(seconds_msb)); + csrwr32(seconds_lsb, priv->tod_ctrl, tod_csroffs(seconds_lsb)); + csrwr32(nanosec, priv->tod_ctrl, tod_csroffs(nanosec)); + spin_unlock_irqrestore(&priv->tod_lock, flags); + + return 0; +} + +static int intel_fpga_tod_enable_feature(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, + int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info intel_fpga_tod_clock_ops = { + .owner = THIS_MODULE, + .name = "intel_fpga_tod", + .max_adj = 500000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjfine = intel_fpga_tod_adjust_fine, + .adjtime = intel_fpga_tod_adjust_time, + .gettime64 = intel_fpga_tod_get_time, + .settime64 = intel_fpga_tod_set_time, + .enable = intel_fpga_tod_enable_feature, +}; + +/* Register the PTP clock driver to kernel */ +int intel_fpga_tod_register(struct intel_fpga_tod_private *priv, + struct device *device) +{ + int ret = 0; + struct timespec64 ts = { 0, 0 }; + + priv->ptp_clock_ops = intel_fpga_tod_clock_ops; + + priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, device); + if (IS_ERR(priv->ptp_clock)) { + priv->ptp_clock = NULL; + ret = -ENODEV; + } + + if (priv->tod_clk) + ret = clk_prepare_enable(priv->tod_clk); + + /* Initialize the hardware clock to zero */ + intel_fpga_tod_set_time(&priv->ptp_clock_ops, &ts); + + return ret; +} + +/* Remove/unregister the ptp clock driver from the kernel */ +void intel_fpga_tod_unregister(struct intel_fpga_tod_private *priv) +{ + if (priv->ptp_clock) { + ptp_clock_unregister(priv->ptp_clock); + priv->ptp_clock = NULL; + } + + if (priv->tod_clk) + clk_disable_unprepare(priv->tod_clk); +} + +/* Common PTP probe function */ +int intel_fpga_tod_probe(struct platform_device *pdev, + struct intel_fpga_tod_private *priv) +{ + struct resource *ptp_res; + int ret = -ENODEV; + + priv->dev = (struct net_device *)platform_get_drvdata(pdev); + + /* Time-of-Day (ToD) Clock address space */ + ret = request_and_map(pdev, "tod_ctrl", &ptp_res, + (void __iomem **)&priv->tod_ctrl); + if (ret) + goto err; + + dev_info(&pdev->dev, "\tTOD Ctrl at 0x%08lx\n", + (unsigned long)ptp_res->start); + + /* Time-of-Day (ToD) Clock period clock */ + priv->tod_clk = devm_clk_get(&pdev->dev, "tod_clk"); + if (IS_ERR(priv->tod_clk)) { + dev_err(&pdev->dev, "cannot obtain ToD period clock\n"); + ret = -ENXIO; + goto err; + } + + spin_lock_init(&priv->tod_lock); +err: + return ret; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/altera/intel_fpga_tod.h b/drivers/net/ethernet/altera/intel_fpga_tod.h new file mode 100644 index 0000000000000..064b97c2bf380 --- /dev/null +++ b/drivers/net/ethernet/altera/intel_fpga_tod.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Altera PTP Hardware Clock (PHC) Linux driver + * Copyright (C) 2015-2016 Altera Corporation. All rights reserved. + * Copyright (C) 2017-2020 Intel Corporation. All rights reserved. + * + * Author(s): + * Dalon Westergreen + */ + +#ifndef __INTEL_FPGA_TOD_H__ +#define __INTEL_FPGA_TOD_H__ + +#include +#include +#include +#include +#include + +/* Altera Time-of-Day (ToD) clock register space. */ +struct intel_fpga_tod { + u32 seconds_msb; + u32 seconds_lsb; + u32 nanosec; + u32 reserved1[0x1]; + u32 period; + u32 adjust_period; + u32 adjust_count; + u32 drift_adjust; + u32 drift_adjust_rate; +}; + +#define tod_csroffs(a) (offsetof(struct intel_fpga_tod, a)) + +struct intel_fpga_tod_private { + struct net_device *dev; + + struct ptp_clock_info ptp_clock_ops; + struct ptp_clock *ptp_clock; + + /* Time-of-Day (ToD) Clock address space */ + struct intel_fpga_tod __iomem *tod_ctrl; + struct clk *tod_clk; + + /* ToD clock registers protection */ + spinlock_t tod_lock; +}; + +int intel_fpga_tod_init(struct intel_fpga_tod_private *priv); +void intel_fpga_tod_uinit(struct intel_fpga_tod_private *priv); +int intel_fpga_tod_register(struct intel_fpga_tod_private *priv, + struct device *device); +void intel_fpga_tod_unregister(struct intel_fpga_tod_private *priv); +int intel_fpga_tod_probe(struct platform_device *pdev, + struct intel_fpga_tod_private *priv); + +#endif /* __INTEL_FPGA_TOD_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index c2f0e91f6bf83..39e44ba555e64 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o stmmac-platform-objs:= stmmac_platform.o -dwmac-altr-socfpga-objs := dwmac-socfpga.o +dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c new file mode 100644 index 0000000000000..00f6d347eaf75 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright Altera Corporation (C) 2016. All rights reserved. + * + * Author: Tien Hock Loh + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" +#include "altr_tse_pcs.h" + +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII BIT(1) +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII BIT(2) +#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK GENMASK(1, 0) + +#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) +#define TSE_PCS_CONTROL_REG 0x00 +#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) +#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140 +#define TSE_PCS_IF_MODE_REG 0x28 +#define TSE_PCS_LINK_TIMER_0_REG 0x24 +#define TSE_PCS_LINK_TIMER_1_REG 0x26 +#define TSE_PCS_SIZE 0x40 +#define TSE_PCS_STATUS_AN_COMPLETED_MASK BIT(5) +#define TSE_PCS_STATUS_LINK_MASK 0x0004 +#define TSE_PCS_STATUS_REG 0x02 +#define TSE_PCS_SGMII_SPEED_1000 BIT(3) +#define TSE_PCS_SGMII_SPEED_100 BIT(2) +#define TSE_PCS_SGMII_SPEED_10 0x0 +#define TSE_PCS_SW_RST_MASK 0x8000 +#define TSE_PCS_PARTNER_ABILITY_REG 0x0A +#define TSE_PCS_PARTNER_DUPLEX_FULL 0x1000 +#define TSE_PCS_PARTNER_DUPLEX_HALF 0x0000 +#define TSE_PCS_PARTNER_DUPLEX_MASK 0x1000 +#define TSE_PCS_PARTNER_SPEED_MASK GENMASK(11, 10) +#define TSE_PCS_PARTNER_SPEED_1000 BIT(11) +#define TSE_PCS_PARTNER_SPEED_100 BIT(10) +#define TSE_PCS_PARTNER_SPEED_10 0x0000 +#define TSE_PCS_PARTNER_SPEED_1000 BIT(11) +#define TSE_PCS_PARTNER_SPEED_100 BIT(10) +#define TSE_PCS_PARTNER_SPEED_10 0x0000 +#define TSE_PCS_SGMII_SPEED_MASK GENMASK(3, 2) +#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 +#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 +#define TSE_PCS_SW_RESET_TIMEOUT 100 +#define TSE_PCS_USE_SGMII_AN_MASK BIT(1) +#define TSE_PCS_USE_SGMII_ENA BIT(0) +#define TSE_PCS_IF_USE_SGMII 0x03 + +#define AUTONEGO_LINK_TIMER 20 + +static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) +{ + int counter = 0; + u16 val; + + val = readw(base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_SW_RST_MASK; + writew(val, base + TSE_PCS_CONTROL_REG); + + while (counter < TSE_PCS_SW_RESET_TIMEOUT) { + val = readw(base + TSE_PCS_CONTROL_REG); + val &= TSE_PCS_SW_RST_MASK; + if (val == 0) + break; + counter++; + udelay(1); + } + if (counter >= TSE_PCS_SW_RESET_TIMEOUT) { + dev_err(pcs->dev, "PCS could not get out of sw reset\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs) +{ + int ret = 0; + + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG); + + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG); + + writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); + writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); + + ret = tse_pcs_reset(base, pcs); + if (ret == 0) + writew(SGMII_ADAPTER_ENABLE, + pcs->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + return ret; +} + +static void pcs_link_timer_callback(struct tse_pcs *pcs) +{ + u16 val = 0; + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + + val = readw(tse_pcs_base + TSE_PCS_STATUS_REG); + val &= TSE_PCS_STATUS_LINK_MASK; + + if (val != 0) { + dev_dbg(pcs->dev, "Adapter: Link is established\n"); + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + } else { + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} + +static void auto_nego_timer_callback(struct tse_pcs *pcs) +{ + u16 val = 0; + u16 speed = 0; + u16 duplex = 0; + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + + val = readw(tse_pcs_base + TSE_PCS_STATUS_REG); + val &= TSE_PCS_STATUS_AN_COMPLETED_MASK; + + if (val != 0) { + dev_dbg(pcs->dev, "Adapter: Auto Negotiation is completed\n"); + val = readw(tse_pcs_base + TSE_PCS_PARTNER_ABILITY_REG); + speed = val & TSE_PCS_PARTNER_SPEED_MASK; + duplex = val & TSE_PCS_PARTNER_DUPLEX_MASK; + + if (speed == TSE_PCS_PARTNER_SPEED_10 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 10/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_100 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 100/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_1000 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 1000/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_10 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_100 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_1000 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else + dev_err(pcs->dev, + "Adapter: Invalid Partner Speed and Duplex\n"); + + if (duplex == TSE_PCS_PARTNER_DUPLEX_FULL && + (speed == TSE_PCS_PARTNER_SPEED_10 || + speed == TSE_PCS_PARTNER_SPEED_100 || + speed == TSE_PCS_PARTNER_SPEED_1000)) + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + } else { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_RESTART_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + tse_pcs_reset(tse_pcs_base, pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} + +static void aneg_link_timer_callback(struct timer_list *t) +{ + struct tse_pcs *pcs = from_timer(pcs, t, aneg_link_timer); + + if (pcs->autoneg == AUTONEG_ENABLE) + auto_nego_timer_callback(pcs); + else if (pcs->autoneg == AUTONEG_DISABLE) + pcs_link_timer_callback(pcs); +} + +void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed) +{ + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + u32 val; + + pcs->autoneg = phy_dev->autoneg; + + if (phy_dev->autoneg == AUTONEG_ENABLE) { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_AN_EN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val |= TSE_PCS_USE_SGMII_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_RESTART_AN_MASK; + + tse_pcs_reset(tse_pcs_base, pcs); + + timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback, + 0); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } else if (phy_dev->autoneg == AUTONEG_DISABLE) { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val &= ~TSE_PCS_CONTROL_AN_EN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val &= ~TSE_PCS_USE_SGMII_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val &= ~TSE_PCS_SGMII_SPEED_MASK; + + switch (speed) { + case 1000: + val |= TSE_PCS_SGMII_SPEED_1000; + break; + case 100: + val |= TSE_PCS_SGMII_SPEED_100; + break; + case 10: + val |= TSE_PCS_SGMII_SPEED_10; + break; + default: + return; + } + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + tse_pcs_reset(tse_pcs_base, pcs); + + timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback, + 0); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h new file mode 100644 index 0000000000000..694ac25ef426b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright Altera Corporation (C) 2016. All rights reserved. + * + * Author: Tien Hock Loh + */ + +#ifndef __TSE_PCS_H__ +#define __TSE_PCS_H__ + +#include +#include + +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_ENABLE 0x0000 +#define SGMII_ADAPTER_DISABLE 0x0001 + +struct tse_pcs { + struct device *dev; + void __iomem *tse_pcs_base; + void __iomem *sgmii_adapter_base; + struct timer_list aneg_link_timer; + int autoneg; +}; + +int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs); +void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed); + +#endif /* __TSE_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 684489156dcee..3530375abb161 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -228,6 +228,7 @@ struct stmmac_extra_stats { unsigned long mtl_est_btrlm; unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES]; unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES]; + unsigned long mtl_est_txq_hlbs[MTL_MAX_TX_QUEUES]; /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; @@ -609,6 +610,7 @@ struct mac_device_info { u32 vlan_filter[32]; bool vlan_fail_q_en; u8 vlan_fail_q; + unsigned int double_vlan; bool hw_vlan_en; }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index e897b49aa9e05..f7dbf9ed0da57 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -4,6 +4,8 @@ * Adopted from dwmac-sti.c */ +#include +#include #include #include #include @@ -15,8 +17,12 @@ #include #include +#include "dwxgmac2.h" #include "stmmac.h" #include "stmmac_platform.h" +#include "stmmac_ptp.h" + +#include "altr_tse_pcs.h" #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 @@ -41,6 +47,17 @@ #define SGMII_ADAPTER_ENABLE 0x0000 #define SGMII_ADAPTER_DISABLE 0x0001 +#define SMTG_HUB_ADDR 0x15 +#define SMTG_REG_0xC 0xC +#define SMTG_REG_0xD 0xD +#define SMTG_REG_0xE 0xE +#define SMTG_REG_0xF 0xF +#define SMTG_REG_0x10 0x10 +#define SMTG_REG_0x11 0x11 +#define SMTG_REG_0x12 0x12 +#define SMTG_REG_0x13 0x13 +#define SMTG_TIME_SHIFT 16 + struct socfpga_dwmac; struct socfpga_dwmac_ops { int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv); @@ -59,13 +76,14 @@ struct socfpga_dwmac { bool f2h_ptp_ref_clk; const struct socfpga_dwmac_ops *ops; struct mdio_device *pcs_mdiodev; + struct tse_pcs pcs; }; static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; - void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base; + void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; struct device *dev = dwmac->dev; struct net_device *ndev = dev_get_drvdata(dev); struct phy_device *phy_dev = ndev->phydev; @@ -95,9 +113,132 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (phy_dev && sgmii_adapter_base) + if (phy_dev && sgmii_adapter_base) { writew(SGMII_ADAPTER_ENABLE, sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); + } +} + +static void get_smtgtime(struct mii_bus *mii, int smtg_addr, + u64 *smtg_time_ctr0, u64 *smtg_time_ctr1) +{ + u64 ns; + + ns = mdiobus_read(mii, smtg_addr, SMTG_REG_0xF); + ns <<= SMTG_TIME_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_REG_0xE); + ns <<= SMTG_TIME_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_REG_0xD); + ns <<= SMTG_TIME_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_REG_0xC); + + *smtg_time_ctr0 = ns; + + ns = mdiobus_read(mii, smtg_addr, SMTG_REG_0x13); + ns <<= SMTG_TIME_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_REG_0x12); + ns <<= SMTG_TIME_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_REG_0x11); + ns <<= SMTG_TIME_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_REG_0x10); + + *smtg_time_ctr1 = ns; +} + +static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 smtg_time_ctr0 = 0; + u64 smtg_time_ctr1 = 0; + u64 ptp_time = 0; + u32 num_snapshot; + u32 gpio_value; + u32 acr_value; + int ret; + u32 v; + int i; + enum clocksource_ids cs_id; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal + * Create a rising edge by just toggle the GPO0 to low + * and back to high. + */ + gpio_value = readl(ioaddr + XGMAC_GPIO_STATUS); + gpio_value &= ~XGMAC_GPIO_GPO0; + writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS); + gpio_value |= XGMAC_GPIO_GPO0; + writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS); + + /* Poll for time sync operation done */ + ret = readl_poll_timeout(priv->ioaddr + XGMAC_INT_STATUS, v, + (v & XGMAC_INT_TSIS), 100, 10000); + + if (ret == -ETIMEDOUT) { + pr_err("%s: Wait for time sync operation timeout\n", __func__); + return ret; + } + + num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) & + XGMAC_TIMESTAMP_ATSNS_MASK) >> + XGMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + } + + get_smtgtime(priv->mii, SMTG_HUB_ADDR, &smtg_time_ctr0, &smtg_time_ctr1); + ret = get_ptp_clocksource_id(&cs_id); + if (!ret) { + system->cs_id = cs_id; + system->cycles = smtg_time_ctr0; + } + + return ret; } static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) @@ -169,6 +310,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * devm_ioremap_resource(dev, &res_splitter); if (IS_ERR(dwmac->splitter_base)) { + dev_err(dev, + "%s: ERROR: failed mapping emac splitter\n", + __func__); ret = PTR_ERR(dwmac->splitter_base); goto err_node_put; } @@ -187,11 +331,13 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * goto err_node_put; } - dwmac->sgmii_adapter_base = + dwmac->pcs.sgmii_adapter_base = devm_ioremap_resource(dev, &res_sgmii_adapter); - if (IS_ERR(dwmac->sgmii_adapter_base)) { - ret = PTR_ERR(dwmac->sgmii_adapter_base); + if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) { + dev_err(dev, "%s: failed to mapping adapter\n", + __func__); + ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base); goto err_node_put; } } @@ -209,12 +355,16 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * goto err_node_put; } - dwmac->tse_pcs_base = + dwmac->pcs.tse_pcs_base = devm_ioremap_resource(dev, &res_tse_pcs); - if (IS_ERR(dwmac->tse_pcs_base)) { - ret = PTR_ERR(dwmac->tse_pcs_base); + if (IS_ERR(dwmac->pcs.tse_pcs_base)) { + dev_err(dev, + "%s: ERROR: failed ioremap tse control port\n", + __func__); + ret = PTR_ERR(dwmac->pcs.tse_pcs_base); goto err_node_put; + } } } @@ -239,13 +389,6 @@ static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac) return priv->plat->mac_interface; } -static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable) -{ - u16 val = enable ? SGMII_ADAPTER_ENABLE : SGMII_ADAPTER_DISABLE; - - writew(val, dwmac->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); -} - static int socfpga_set_phy_mode_common(int phymode, u32 *val) { switch (phymode) { @@ -321,8 +464,12 @@ static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac) */ reset_control_deassert(dwmac->stmmac_ocp_rst); reset_control_deassert(dwmac->stmmac_rst); - if (phymode == PHY_INTERFACE_MODE_SGMII) - socfpga_sgmii_config(dwmac, true); + if (phymode == PHY_INTERFACE_MODE_SGMII) { + if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { + dev_err(dwmac->dev, "Unable to initialize TSE PCS"); + return -EINVAL; + } + } return 0; } @@ -374,8 +521,12 @@ static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac) */ reset_control_deassert(dwmac->stmmac_ocp_rst); reset_control_deassert(dwmac->stmmac_rst); - if (phymode == PHY_INTERFACE_MODE_SGMII) - socfpga_sgmii_config(dwmac, true); + if (phymode == PHY_INTERFACE_MODE_SGMII) { + if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { + dev_err(dwmac->dev, "Unable to initialize TSE PCS"); + return -EINVAL; + } + } return 0; } @@ -488,6 +639,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->riwt_off = 1; + /* Cross Timestamp support for SMTG Hub */ + if (of_property_read_bool(pdev->dev.of_node, "altr,smtg-hub")) { + dev_info(dev, "SMTG Hub Cross Timestamp supported\n"); + plat_dat->int_snapshot_num = AUX_SNAPSHOT0; + plat_dat->crosststamp = smtg_crosststamp; + } + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 6a2c7d22df1eb..6183759cab576 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -64,7 +64,12 @@ #define XGMAC_VLAN_ESVL BIT(18) #define XGMAC_VLAN_ETV BIT(16) #define XGMAC_VLAN_VID GENMASK(15, 0) +#define XGMAC_VLAN_OFS GENMASK(6, 2) +#define XGMAC_VLAN_OFS_SHIFT 2 +#define XGMAC_VLAN_CT BIT(1) +#define XGMAC_VLAN_OB BIT(0) #define XGMAC_VLAN_HASH_TABLE 0x00000058 +#define XGMAC_VLAN_VLHT GENMASK(15,0) #define XGMAC_VLAN_INCL 0x00000060 #define XGMAC_VLAN_VLTI BIT(20) #define XGMAC_VLAN_CSVL BIT(19) @@ -74,28 +79,32 @@ #define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) #define XGMAC_RXQEN_SHIFT(x) ((x) * 2) #define XGMAC_RXQ_CTRL1 0x000000a4 -#define XGMAC_AVCPQ GENMASK(31, 28) -#define XGMAC_AVCPQ_SHIFT 28 -#define XGMAC_PTPQ GENMASK(27, 24) -#define XGMAC_PTPQ_SHIFT 24 -#define XGMAC_TACPQE BIT(23) -#define XGMAC_DCBCPQ GENMASK(19, 16) -#define XGMAC_DCBCPQ_SHIFT 16 -#define XGMAC_MCBCQEN BIT(15) -#define XGMAC_MCBCQ GENMASK(11, 8) -#define XGMAC_MCBCQ_SHIFT 8 +#define XGMAC_RXQCTRL_AVCPQ_MASK GENMASK(31, 28) +#define XGMAC_RXQCTRL_AVCPQ_SHIFT 28 +#define XGMAC_RXQCTRL_PTPQ_MASK GENMASK(27, 24) +#define XGMAC_RXQCTRL_PTPQ_SHIFT 24 +#define XGMAC_RXQCTRL_TACPQE BIT(23) +#define XGMAC_RXQCTRL_TACPQE_SHIFT 23 +#define XGMAC_RXQCTRL_DCBCPQ_MASK GENMASK(19, 16) +#define XGMAC_RXQCTRL_DCBCPQ_SHIFT 16 +#define XGMAC_RXQCTRL_MCBCQEN BIT(15) +#define XGMAC_RXQCTRL_MCBCQEN_SHIFT 15 +#define XGMAC_RXQCTRL_MCBCQ_MASK GENMASK(11, 8) +#define XGMAC_RXQCTRL_MCBCQ_SHIFT 8 #define XGMAC_RQ GENMASK(7, 4) #define XGMAC_RQ_SHIFT 4 -#define XGMAC_UPQ GENMASK(3, 0) -#define XGMAC_UPQ_SHIFT 0 +#define XGMAC_RXQCTRL_UPQ_MASK GENMASK(3, 0) +#define XGMAC_RXQCTRL_UPQ_SHIFT 0 #define XGMAC_RXQ_CTRL2 0x000000a8 #define XGMAC_RXQ_CTRL3 0x000000ac #define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) #define XGMAC_PSRQ_SHIFT(x) ((x) * 8) #define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_INT_TSIS BIT(12) #define XGMAC_LPIIS BIT(5) #define XGMAC_PMTIS BIT(4) #define XGMAC_INT_EN 0x000000b4 +#define XGMAC_FPIE BIT(15) #define XGMAC_TSIE BIT(12) #define XGMAC_LPIIE BIT(5) #define XGMAC_PMTIE BIT(4) @@ -193,7 +202,15 @@ #define XGMAC_MDIO_ADDR 0x00000200 #define XGMAC_MDIO_DATA 0x00000204 #define XGMAC_MDIO_C22P 0x00000220 +#define XGMAC_GPIO_STATUS 0x0000027c +#define XGMAC_GPIO_GPO0 BIT(16) #define XGMAC_FPE_CTRL_STS 0x00000280 +#define XGMAC_TRSP BIT(19) +#define XGMAC_TVER BIT(18) +#define XGMAC_RRSP BIT(17) +#define XGMAC_RVER BIT(16) +#define XGMAC_SRSP BIT(2) +#define XGMAC_SVER BIT(1) #define XGMAC_EFPE BIT(0) #define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8) #define XGMAC_ADDR_MAX 32 @@ -242,6 +259,8 @@ #define XGMAC_OB BIT(0) #define XGMAC_RSS_DATA 0x00000c8c #define XGMAC_TIMESTAMP_STATUS 0x00000d20 +#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25 #define XGMAC_TXTSC BIT(15) #define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 #define XGMAC_TXTSSTSLO GENMASK(30, 0) @@ -284,6 +303,11 @@ #define XGMAC_TC_PRTY_MAP1 0x00001044 #define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8) #define XGMAC_PSTC_SHIFT(x) ((x) * 8) +#define XGMAC_MTL_FPE_CTRL_STS 0x00001090 +#define XGMAC_PEC GENMASK(15, 8) +#define XGMAC_PEC_SHIFT 8 +#define XGMAC_AFSZ GENMASK(1, 0) +#define XGMAC_AFSZ_SHIFT 0 #define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0 #define XGMAC_RXPI BIT(31) #define XGMAC_NPE GENMASK(23, 16) @@ -348,6 +372,9 @@ /* DMA Registers */ #define XGMAC_DMA_MODE 0x00003000 #define XGMAC_SWR BIT(0) +#define DMA_MODE_INTM_MASK GENMASK(13, 12) +#define DMA_MODE_INTM_SHIFT 12 +#define DMA_MODE_INTM_MODE1 0x1 #define XGMAC_DMA_SYSBUS_MODE 0x00003004 #define XGMAC_WR_OSR_LMT GENMASK(29, 24) #define XGMAC_WR_OSR_LMT_SHIFT 24 @@ -455,6 +482,7 @@ #define XGMAC_TDES2_VTIR GENMASK(15, 14) #define XGMAC_TDES2_VTIR_SHIFT 14 #define XGMAC_TDES2_B1L GENMASK(13, 0) +#define XGMAC_TDES2_VLAN_TAG_MASK GENMASK(15, 14) #define XGMAC_TDES3_OWN BIT(31) #define XGMAC_TDES3_CTXT BIT(30) #define XGMAC_TDES3_FD BIT(29) @@ -477,6 +505,8 @@ #define XGMAC_TDES3_VLTV BIT(16) #define XGMAC_TDES3_VT GENMASK(15, 0) #define XGMAC_TDES3_FL GENMASK(14, 0) +#define XGMAC_RDES2_L4FM BIT(28) +#define XGMAC_RDES2_L3FM BIT(27) #define XGMAC_RDES2_HL GENMASK(9, 0) #define XGMAC_RDES3_OWN BIT(31) #define XGMAC_RDES3_CTXT BIT(30) @@ -486,6 +516,8 @@ #define XGMAC_RDES3_RSV BIT(26) #define XGMAC_RDES3_L34T GENMASK(23, 20) #define XGMAC_RDES3_L34T_SHIFT 20 +#define XGMAC_RDES3_ET_LT GENMASK(19, 16) +#define XGMAC_RDES3_ET_LT_SHIFT 16 #define XGMAC_L34T_IP4TCP 0x1 #define XGMAC_L34T_IP4UDP 0x2 #define XGMAC_L34T_IP6TCP 0x9 @@ -495,4 +527,46 @@ #define XGMAC_RDES3_TSD BIT(6) #define XGMAC_RDES3_TSA BIT(4) +/* RDES0 (write back format) */ +#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15,0) + +/* MAC VLAN Tag Control and VLAN Tag Data */ +#define XGMAC_VLAN_TAG_CTRL 0x00000050 +#define XGMAC_VLAN_TAG_CTRL_OB BIT(0) +#define XGMAC_VLAN_TAG_CTRL_CT BIT(1) +#define XGMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2) +#define XGMAC_VLAN_TAG_CTRL_OFS_SHIFT 2 +#define XGMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) +#define XGMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21 +#define XGMAC_VLAN_TAG_CTRL_EVLRXS BIT(24) + +#define XGMAC_VLAN_TAG_STRIP_NONE (0x0 << XGMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define XGMAC_VLAN_TAG_STRIP_PASS (0x1 << XGMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define XGMAC_VLAN_TAG_STRIP_FAIL (0x2 << XGMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define XGMAC_VLAN_TAG_STRIP_ALL (0x3 << XGMAC_VLAN_TAG_CTRL_EVLS_SHIFT) + +#define XGMAC_VLAN_TAG_DATA 0x00000054 +#define XGMAC_VLAN_TAG_DATA_ERIVLT BIT(20) +#define XGMAC_VLAN_TAG_DATA_ERSVLM BIT(19) +#define XGMAC_VLAN_TAG_DATA_DOVLTC BIT(18) +#define XGMAC_VLAN_TAG_DATA_ETV BIT(17) +#define XGMAC_VLAN_TAG_DATA_VEN BIT(16) +#define XGMAC_VLAN_TAG_DATA_VID GENMASK(15, 0) + +/* Error Type or L2 Type(ET/LT) Field Number */ +#define XGMAC_ET_LT_VLAN_STAG 8 +#define XGMAC_ET_LT_VLAN_CTAG 9 +#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10 +#define XGMAC_ET_LT_DVLAN_STAG_STAG 11 +#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12 +#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13 + +/* EXT VLAN Filtering HW FEAT */ +#define XGMAC_HWFEAT_NO_EXT 0 +#define XGMAC_HWFEAT_EXT_VLAN_4 1 +#define XGMAC_HWFEAT_EXT_VLAN_8 2 +#define XGMAC_HWFEAT_EXT_VLAN_16 3 +#define XGMAC_HWFEAT_EXT_VLAN_24 4 +#define XGMAC_HWFEAT_EXT_VLAN_32 5 + #endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index f519d43738b08..f2f456354d96b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_ptp.h" #include "dwxlgmac2.h" @@ -16,7 +17,7 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; - u32 tx, rx; + u32 tx, rx, value; tx = readl(ioaddr + XGMAC_TX_CONFIG); rx = readl(ioaddr + XGMAC_RX_CONFIG); @@ -44,7 +45,11 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, writel(tx, ioaddr + XGMAC_TX_CONFIG); writel(rx, ioaddr + XGMAC_RX_CONFIG); - writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); + + value = XGMAC_INT_DEFAULT_EN; + if ((XGMAC_HWFEAT_FPESEL & readl(ioaddr + XGMAC_HW_FEATURE3)) >> 26) + value |= XGMAC_FPIE; + writel(value, ioaddr + XGMAC_INT_EN); } static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) @@ -157,26 +162,33 @@ static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw, void __iomem *ioaddr = hw->pcsr; u32 value; - static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = { - { XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT }, - { XGMAC_PTPQ, XGMAC_PTPQ_SHIFT }, - { XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT }, - { XGMAC_UPQ, XGMAC_UPQ_SHIFT }, - { XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT }, + static const struct stmmac_rx_routing route_possibilities[] = { + { XGMAC_RXQCTRL_AVCPQ_MASK, XGMAC_RXQCTRL_AVCPQ_SHIFT }, + { XGMAC_RXQCTRL_PTPQ_MASK, XGMAC_RXQCTRL_PTPQ_SHIFT }, + { XGMAC_RXQCTRL_DCBCPQ_MASK, XGMAC_RXQCTRL_DCBCPQ_SHIFT }, + { XGMAC_RXQCTRL_UPQ_MASK, XGMAC_RXQCTRL_UPQ_SHIFT }, + { XGMAC_RXQCTRL_MCBCQ_MASK, XGMAC_RXQCTRL_MCBCQ_SHIFT }, }; + /* routing packet type not supported */ + if (packet < PACKET_AVCPQ || packet > PACKET_MCBCQ) + return; + value = readl(ioaddr + XGMAC_RXQ_CTRL1); /* routing configuration */ - value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask; - value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) & - dwxgmac2_route_possibilities[packet - 1].reg_mask; + value &= ~route_possibilities[packet - 1].reg_mask; + value |= (queue << route_possibilities[packet - 1].reg_shift) & + route_possibilities[packet - 1].reg_mask; /* some packets require extra ops */ - if (packet == PACKET_AVCPQ) - value |= FIELD_PREP(XGMAC_TACPQE, 1); - else if (packet == PACKET_MCBCQ) - value |= FIELD_PREP(XGMAC_MCBCQEN, 1); + if (packet == PACKET_AVCPQ) { + value &= ~XGMAC_RXQCTRL_TACPQE; + value |= 0x1 << XGMAC_RXQCTRL_TACPQE_SHIFT; + } else if (packet == PACKET_MCBCQ) { + value &= ~XGMAC_RXQCTRL_MCBCQEN; + value |= 0x1 << XGMAC_RXQCTRL_MCBCQEN_SHIFT; + } writel(value, ioaddr + XGMAC_RXQ_CTRL1); } @@ -471,6 +483,177 @@ static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw) writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL); } +static void dwxgmac2_write_single_vlan(struct net_device *dev, u16 vid) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 val; + + val = readl(ioaddr + XGMAC_VLAN_TAG); + val &= ~XGMAC_VLAN_VID; + val |= XGMAC_VLAN_ETV | vid; + + writel(val, ioaddr + XGMAC_VLAN_TAG); +} + +static int dwxgmac2_write_vlan_filter(struct net_device *dev, + struct mac_device_info *hw, + u8 index, u32 data) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + int i, timeout = 10; + u32 val; + + if (index >= hw->num_vlan) + return -EINVAL; + + if (hw->double_vlan) { + data |= XGMAC_VLAN_TAG_DATA_DOVLTC; + data |= XGMAC_VLAN_TAG_DATA_ERIVLT; + data &= ~XGMAC_VLAN_TAG_DATA_ERSVLM; + } else { + data &= ~XGMAC_VLAN_TAG_DATA_DOVLTC; + data &= ~XGMAC_VLAN_TAG_DATA_ERIVLT; + } + + writel(data, ioaddr + XGMAC_VLAN_TAG_DATA); + + val = readl(ioaddr + XGMAC_VLAN_TAG); + val &= ~(XGMAC_VLAN_TAG_CTRL_OFS_MASK | + XGMAC_VLAN_CT | + XGMAC_VLAN_OB); + + val |= (index << XGMAC_VLAN_OFS_SHIFT | XGMAC_VLAN_OB); + + writel(val, ioaddr + XGMAC_VLAN_TAG); + + for (i = 0; i < timeout; i++) { + val = readl(ioaddr + XGMAC_VLAN_TAG); + if(!(val & XGMAC_VLAN_OB)) + return 0; + udelay(1); + + } + + netdev_err(dev, "Timeout accesing MAC_VLAN_TAG_FILTER\n"); + + return -EBUSY; +} + +static int dwxgmac2_add_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int index = -1; + u32 val = 0; + int i, ret; + + if (vid > 4095) + return -EINVAL; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + /* For single VLAN filter, VID 0 means VLAN promiscuous */ + if (vid == 0) { + netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); + return -EPERM; + } + + if (hw->vlan_filter[0] & XGMAC_VLAN_VID) { + netdev_err(dev, "Only single VLAN ID supported\n"); + return -EPERM; + } + + hw->vlan_filter[0] = vid; + dwxgmac2_write_single_vlan(dev, vid); + + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + val |= XGMAC_VLAN_TAG_DATA_ETV | XGMAC_VLAN_TAG_DATA_VEN | vid; + + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] == val) + return 0; + + else if(!(hw->vlan_filter[i] & XGMAC_VLAN_TAG_DATA_VEN)) + index = i; + } + + if (index == -1){ + netdev_err(dev, "MAC_VLAN_TAG_FILTER full (size: %0u)\n", + hw->num_vlan); + return -EPERM; + } + + ret = dwxgmac2_write_vlan_filter(dev, hw, index, val); + + if (!ret) + hw->vlan_filter[index] = val; + + return ret; +} + +static int dwxgmac2_del_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int i, ret = 0; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + if ((hw->vlan_filter[0] & XGMAC_VLAN_VID) == vid) { + hw->vlan_filter[0] = 0; + dwxgmac2_write_single_vlan(dev, 0); + } + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if((hw->vlan_filter[i] & XGMAC_VLAN_TAG_DATA_VID) == vid) { + ret = dwxgmac2_write_vlan_filter(dev, hw, i, 0); + + if (!ret) + hw->vlan_filter[i] = 0; + else + return ret; + } + } + return ret; +} + +static void dwxgmac2_restore_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + u32 hash; + u32 val; + int i; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + dwxgmac2_write_single_vlan(dev, hw->vlan_filter[0]); + return; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] & XGMAC_VLAN_TAG_DATA_VEN) { + val = hw->vlan_filter[i]; + dwxgmac2_write_vlan_filter(dev, hw, i, val); + } + } + + hash = readl(ioaddr + XGMAC_VLAN_HASH_TABLE); + if (hash & XGMAC_VLAN_VLHT) { + value = readl(ioaddr + XGMAC_VLAN_TAG); + value |= XGMAC_VLAN_VTHM; + writel(value, ioaddr + XGMAC_VLAN_TAG); + } +} + static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, int mcbitslog2) { @@ -549,6 +732,12 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw, } } + /* VLAN Filtering */ + if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) + value &= ~XGMAC_FILTER_VTFE; + else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + value |= XGMAC_FILTER_VTFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); } @@ -1300,6 +1489,36 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val) writel(value, ioaddr + XGMAC_TX_CONFIG); } +static void dwxgmac2_rx_hw_vlan(struct mac_device_info *hw, + struct dma_desc *rx_desc, struct sk_buff *skb) +{ + if (hw->desc->get_rx_vlan_valid(rx_desc)) { + u16 vid = (u16)hw->desc->get_rx_vlan_tci(rx_desc); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +static void dwxgmac2_set_hw_vlan_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 val; + + val = readl(ioaddr + XGMAC_VLAN_TAG); + val &= ~XGMAC_VLAN_TAG_CTRL_EVLS_MASK; + + if (hw->hw_vlan_en) + /* Always strip VLAN on Receive */ + val |= XGMAC_VLAN_TAG_STRIP_ALL; + else + /* Do not strip VLAN on Receive */ + val |= XGMAC_VLAN_TAG_STRIP_NONE; + + /* Enable outer VLAN Tag in Rx DMA descriptro */ + val |= XGMAC_VLAN_TAG_CTRL_EVLRXS; + + writel(val, ioaddr + XGMAC_VLAN_TAG); +} + static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type) { void __iomem *ioaddr = hw->pcsr; @@ -1452,36 +1671,40 @@ static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no, value &= ~XGMAC_L4PEN0; } - value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0); - value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0); if (sa) { value |= XGMAC_L4SPM0; if (inv) value |= XGMAC_L4SPIM0; + else + value &= ~XGMAC_L4SPIM0; } else { value |= XGMAC_L4DPM0; if (inv) value |= XGMAC_L4DPIM0; + else + value &= ~XGMAC_L4DPIM0; } ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); if (ret) return ret; - if (sa) { - value = match & XGMAC_L4SP0; + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L4_ADDR, &value); + if (ret) + return ret; - ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); - if (ret) - return ret; + if (sa) { + value &= ~(XGMAC_L4SP0); + value |= match & XGMAC_L4SP0; } else { - value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0; - - ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); - if (ret) - return ret; + value &= ~(XGMAC_L4DP0); + value |= (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0; } + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + if (!en) return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); @@ -1504,32 +1727,149 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, writel(value, ioaddr + XGMAC_RX_CONFIG); } -static void dwxgmac3_fpe_configure(void __iomem *ioaddr, - struct stmmac_fpe_cfg *cfg, +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, bool tx_enable, bool pmac_enable) { u32 value; - if (!tx_enable) { - value = readl(ioaddr + XGMAC_FPE_CTRL_STS); + if (tx_enable) { + cfg->fpe_csr = XGMAC_EFPE; + value = readl(ioaddr + XGMAC_RXQ_CTRL1); + value &= ~XGMAC_RQ; + value |= (num_rxq - 1) << XGMAC_RQ_SHIFT; + writel(value, ioaddr + XGMAC_RXQ_CTRL1); + } else { + cfg->fpe_csr = 0; + } + writel(cfg->fpe_csr, ioaddr + XGMAC_FPE_CTRL_STS); - value &= ~XGMAC_EFPE; + value = readl(ioaddr + XGMAC_INT_EN); - writel(value, ioaddr + XGMAC_FPE_CTRL_STS); - return; + if (!(value & XGMAC_FPIE)) { + /* Dummy read to clear any pending masked interrupts */ + readl(ioaddr + XGMAC_FPE_CTRL_STS); } +} - value = readl(ioaddr + XGMAC_RXQ_CTRL1); - value &= ~XGMAC_RQ; - value |= (num_rxq - 1) << XGMAC_RQ_SHIFT; - writel(value, ioaddr + XGMAC_RXQ_CTRL1); +static int dwxgmac3_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) +{ + u32 value; + int status; + + status = FPE_EVENT_UNKNOWN; + /* Reads from the MAC_FPE_CTRL_STS register should only be performed + * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read" + */ value = readl(ioaddr + XGMAC_FPE_CTRL_STS); - value |= XGMAC_EFPE; + + if (value & XGMAC_TRSP) { + status |= FPE_EVENT_TRSP; + netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n"); + } + + if (value & XGMAC_TVER) { + status |= FPE_EVENT_TVER; + netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n"); + } + + if (value & XGMAC_RRSP) { + status |= FPE_EVENT_RRSP; + netdev_dbg(dev, "FPE: Respond mPacket is received\n"); + } + + if (value & XGMAC_RVER) { + status |= FPE_EVENT_RVER; + netdev_dbg(dev, "FPE: Verify mPacket is received\n"); + } + + return status; +} + +static void dwxgmac3_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type) +{ + u32 value = cfg->fpe_csr; + + if (type == MPACKET_VERIFY) + value |= XGMAC_SVER; + else if (type == MPACKET_RESPONSE) + value |= XGMAC_SRSP; + writel(value, ioaddr + XGMAC_FPE_CTRL_STS); } +static int dwxgmac3_fpe_get_add_frag_size(const void __iomem *ioaddr) +{ + return FIELD_GET(XGMAC_AFSZ, readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS)); +} + +static void dwxgmac3_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS); + writel(u32_replace_bits(value, add_frag_size, XGMAC_AFSZ), + ioaddr + XGMAC_MTL_FPE_CTRL_STS); +} + +#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping" +#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]" + +static int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev, + struct netlink_ext_ack *extack, + u32 pclass) +{ + u32 val, offset, count, queue_weight, preemptible_txqs = 0; + struct stmmac_priv *priv = netdev_priv(ndev); + u32 num_tc = ndev->num_tc; + + if (!pclass) + goto update_mapping; + + /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware. + * + * Synopsys Databook: + * "The number of Tx DMA channels is equal to the number of Tx queues, + * and is direct one-to-one mapping." + */ + for (u32 tc = 0; tc < num_tc; tc++) { + count = ndev->tc_to_txq[tc].count; + offset = ndev->tc_to_txq[tc].offset; + + if (pclass & BIT(tc)) + preemptible_txqs |= GENMASK(offset + count - 1, offset); + + /* This is 1:1 mapping, go to next TC */ + if (count == 1) + continue; + + if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) { + NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG); + return -EINVAL; + } + + queue_weight = priv->plat->tx_queues_cfg[offset].weight; + + for (u32 i = 1; i < count; i++) { + if (priv->plat->tx_queues_cfg[offset + i].weight != + queue_weight) { + NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG, + queue_weight, tc); + return -EINVAL; + } + } + } + +update_mapping: + val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS); + writel(u32_replace_bits(val, preemptible_txqs, XGMAC_PEC), + priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS); + + return 0; +} + const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, .set_mac = dwxgmac2_set_mac, @@ -1569,8 +1909,18 @@ const struct stmmac_ops dwxgmac210_ops = { .enable_vlan = dwxgmac2_enable_vlan, .config_l3_filter = dwxgmac2_config_l3_filter, .config_l4_filter = dwxgmac2_config_l4_filter, + .add_hw_vlan_rx_fltr = dwxgmac2_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwxgmac2_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwxgmac2_restore_hw_vlan_rx_fltr, .set_arp_offload = dwxgmac2_set_arp_offload, .fpe_configure = dwxgmac3_fpe_configure, + .fpe_send_mpacket = dwxgmac3_fpe_send_mpacket, + .fpe_irq_status = dwxgmac3_fpe_irq_status, + .fpe_get_add_frag_size = dwxgmac3_fpe_get_add_frag_size, + .fpe_set_add_frag_size = dwxgmac3_fpe_set_add_frag_size, + .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class, + .rx_hw_vlan = dwxgmac2_rx_hw_vlan, + .set_hw_vlan_mode = dwxgmac2_set_hw_vlan_mode, }; static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, @@ -1628,8 +1978,48 @@ const struct stmmac_ops dwxlgmac2_ops = { .config_l4_filter = dwxgmac2_config_l4_filter, .set_arp_offload = dwxgmac2_set_arp_offload, .fpe_configure = dwxgmac3_fpe_configure, + .rx_hw_vlan = dwxgmac2_rx_hw_vlan, + .set_hw_vlan_mode = dwxgmac2_set_hw_vlan_mode, }; +static u32 dwxgmac2_get_num_vlan(void __iomem *ioaddr) +{ + u32 val, num_vlan; + + val = readl(ioaddr + XGMAC_HW_FEATURE3); + switch (val & XGMAC_HWFEAT_NRVF) { + case 0: + num_vlan = 1; + break; + case 1: + num_vlan = 4; + break; + case 2: + num_vlan = 8; + break; + case 3: + num_vlan = 16; + break; + case 4: + num_vlan = 24; + break; + case 5: + num_vlan = 32; + break; + default: + num_vlan = 1; + } + + return num_vlan; +} + +static u32 dwxgmac2_is_double_vlan(void __iomem *ioaddr) +{ + u32 val; + val = readl(ioaddr + XGMAC_HW_FEATURE3); + return (val & XGMAC_HWFEAT_DVLAN); +} + int dwxgmac2_setup(struct stmmac_priv *priv) { struct mac_device_info *mac = priv->hw; @@ -1646,7 +2036,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv) mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_1000FD | MAC_2500FD | MAC_5000FD | + MAC_10 | MAC_100 | MAC_1000FD | + MAC_2500FD | MAC_5000FD | MAC_10000FD; mac->link.duplex = 0; mac->link.speed10 = XGMAC_CONFIG_SS_10_MII; @@ -1666,7 +2057,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv) mac->mii.reg_mask = GENMASK(15, 0); mac->mii.clk_csr_shift = 19; mac->mii.clk_csr_mask = GENMASK(21, 19); - + mac->num_vlan = dwxgmac2_get_num_vlan(priv->ioaddr); + mac->double_vlan = dwxgmac2_is_double_vlan(priv->ioaddr); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 389aad7b5c1ee..4f0c7147d2133 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -26,6 +26,7 @@ static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x, struct dma_desc *p) { unsigned int rdes3 = le32_to_cpu(p->des3); + unsigned int rdes2 = le32_to_cpu(p->des2); if (unlikely(rdes3 & XGMAC_RDES3_OWN)) return dma_own; @@ -36,6 +37,11 @@ static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x, if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD))) return discard_frame; + if (rdes2 & XGMAC_RDES2_L3FM) + x->l3_filter_match++; + if (rdes2 & XGMAC_RDES2_L4FM) + x->l4_filter_match++; + return good_frame; } @@ -69,6 +75,19 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p) return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0; } +static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK); +} + +static inline bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p) +{ + return((((le32_to_cpu(p->des3) & XGMAC_RDES3_ET_LT) >> + XGMAC_RDES3_ET_LT_SHIFT) >= XGMAC_ET_LT_VLAN_STAG) && + (((le32_to_cpu(p->des3) & XGMAC_RDES3_ET_LT) >> + XGMAC_RDES3_ET_LT_SHIFT) <= XGMAC_ET_LT_DVLAN_STAG_CTAG)); +} + static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe) { return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL); @@ -351,6 +370,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = { .set_tx_owner = dwxgmac2_set_tx_owner, .set_rx_owner = dwxgmac2_set_rx_owner, .get_tx_ls = dwxgmac2_get_tx_ls, + .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci, + .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid, .get_rx_frame_len = dwxgmac2_get_rx_frame_len, .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp, .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 7840bc403788e..4fe536bfbe8e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -31,6 +31,13 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr, value |= XGMAC_EAME; writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); + + if (dma_cfg->multi_irq_en) { + value = readl(ioaddr + XGMAC_DMA_MODE); + value &= ~DMA_MODE_INTM_MASK; + value |= (DMA_MODE_INTM_MODE1 << DMA_MODE_INTM_SHIFT); + writel(value, ioaddr + XGMAC_DMA_MODE); + } } static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv, @@ -203,10 +210,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, } writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); - - /* Enable MTL RX overflow */ - value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); - writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); } static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -364,19 +367,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & XGMAC_NIS)) { - if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_rx; - } - if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_tx; - } + if (likely(intr_status & XGMAC_RI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; } /* Clear interrupts */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 88cce28b2f980..e9620de266bf3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwxgmac_tc_ops, + .tc = &dwmac510_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxgmac2_setup, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ea135203ff2e6..b590bec846247 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -40,6 +40,7 @@ struct stmmac_resources { enum stmmac_txbuf_type { STMMAC_TXBUF_T_SKB, + STMMAC_TXBUF_T_DMA, STMMAC_TXBUF_T_XDP_TX, STMMAC_TXBUF_T_XDP_NDO, STMMAC_TXBUF_T_XSK_TX, @@ -47,6 +48,8 @@ enum stmmac_txbuf_type { struct stmmac_tx_info { dma_addr_t buf; + struct page *page; + struct sk_buff *skb; bool map_as_page; unsigned len; bool last_segment; @@ -64,6 +67,7 @@ struct stmmac_tx_queue { int tbs; struct hrtimer txtimer; u32 queue_index; + struct page_pool *page_pool; struct stmmac_priv *priv_data; struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; struct dma_edesc *dma_entx; @@ -152,7 +156,7 @@ enum stmmac_mpacket_type { MPACKET_RESPONSE = 1, }; -#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3 +#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 20 #define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128 struct stmmac_fpe_cfg { @@ -257,6 +261,8 @@ struct stmmac_est { u32 ter; u32 gcl_unaligned[EST_GCL]; u32 gcl[EST_GCL]; + u32 ti_ns[EST_GCL]; + u32 gates[EST_GCL]; u32 gcl_size; u32 max_sdu[MTL_MAX_TX_QUEUES]; }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c index c9693f77e1f61..d77df3b5b023c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c @@ -58,7 +58,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg, EST_GMAC5_PTOV_SHIFT; } if (cfg->enable) - ctrl |= EST_EEST | EST_SSWL; + ctrl |= EST_EEST | EST_SSWL | EST_DFBS; else ctrl &= ~EST_EEST; @@ -104,6 +104,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, x->mtl_est_hlbs++; + for (i = 0; i < txqcnt; i++) { + if (value & BIT(i)) + x->mtl_est_txq_hlbs[i]++; + } + /* Clear Interrupt */ writel(value, est_addr + EST_SCH_ERR); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h index 7a858c566e7e5..d71544278e1e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h @@ -16,6 +16,7 @@ #define EST_XGMAC_PTOV_MUL 9 #define EST_SSWL BIT(1) #define EST_EEST BIT(0) +#define EST_DFBS BIT(5) #define EST_STATUS 0x00000008 #define EST_GMAC5_BTRL GENMASK(11, 8) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2a37592a62810..e3d2a3c7344f7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -1278,7 +1278,7 @@ static int stmmac_get_mm(struct net_device *ndev, state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; state->verify_enabled = priv->fpe_cfg.verify_enabled; - state->pmac_enabled = priv->fpe_cfg.pmac_enabled; + state->pmac_enabled = true; state->verify_time = priv->fpe_cfg.verify_time; state->tx_enabled = priv->fpe_cfg.tx_enabled; state->verify_status = priv->fpe_cfg.status; @@ -1311,6 +1311,13 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, u32 frag_size; int err; + if (!priv->dma_cap.fpesel) + return -EOPNOTSUPP; + + /* DWMAC always have preemptible MAC enabled */ + if (!cfg->pmac_enabled) + return -EINVAL; + err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, &frag_size, extack); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 918d7f2e8ba99..db7ddc6b564a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -126,11 +126,10 @@ module_param(chain_mode, int, 0444); MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); -/* For MSI interrupts handling */ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); -static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); -static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static irqreturn_t stmmac_intr_tx(int irq, void *data); +static irqreturn_t stmmac_intr_rx(int irq, void *data); static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); static void stmmac_reset_queues_param(struct stmmac_priv *priv); @@ -721,7 +720,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - if (priv->synopsys_id < DWMAC_CORE_4_10) + if (priv->synopsys_id < DWMAC_CORE_4_10 && + !priv->plat->has_xgmac) ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -975,13 +975,7 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) spin_lock_irqsave(&fpe_cfg->lock, flags); - if (is_up && fpe_cfg->pmac_enabled) { - /* VERIFY process requires pmac enabled when NIC comes up */ - stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, - false, true); - + if (is_up && fpe_cfg->tx_enabled) { /* New link => maybe new partner => new verification process */ stmmac_fpe_apply(priv); } else { @@ -990,6 +984,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, false, false); + if (fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_DISABLED) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; } spin_unlock_irqrestore(&fpe_cfg->lock, flags); @@ -1523,7 +1519,10 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, if (tx_q->tx_skbuff_dma[i].buf && tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { - if (tx_q->tx_skbuff_dma[i].map_as_page) + if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_DMA) + page_pool_recycle_direct(tx_q->page_pool, + tx_q->tx_skbuff_dma[i].page); + else if (tx_q->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[i].buf, tx_q->tx_skbuff_dma[i].len, @@ -1546,7 +1545,8 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, tx_q->xsk_frames_done++; if (tx_q->tx_skbuff[i] && - tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { + (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB || + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_DMA)) { dev_kfree_skb_any(tx_q->tx_skbuff[i]); tx_q->tx_skbuff[i] = NULL; } @@ -2000,6 +2000,8 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, kfree(tx_q->tx_skbuff_dma); kfree(tx_q->tx_skbuff); + if (tx_q->page_pool) + page_pool_destroy(tx_q->page_pool); } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, @@ -2134,12 +2136,32 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; size_t size; void *addr; + int ret; tx_q->queue_index = queue; tx_q->priv_data = priv; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = dma_conf->dma_tx_size; + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); + pp_params.dev = priv->device; + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.offset = 0; + pp_params.max_len = num_pages * PAGE_SIZE; + + tx_q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(tx_q->page_pool)) { + ret = PTR_ERR(tx_q->page_pool); + tx_q->page_pool = NULL; + return ret; + } + tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); @@ -2480,9 +2502,10 @@ static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = { static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; + bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; struct xsk_buff_pool *pool = tx_q->xsk_pool; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc = NULL; @@ -2568,9 +2591,17 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) } stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, - true, priv->mode, true, true, + csum, priv->mode, true, true, xdp_desc.len); + if (tx_q->tbs & STMMAC_TBS_EN && xdp_desc.txtime > 0) { + struct dma_edesc *edesc = &tx_q->dma_entx[entry]; + struct timespec64 ts = + ns_to_timespec64(xdp_desc.txtime); + + stmmac_set_desc_tbs(priv, edesc, ts.tv_sec, ts.tv_nsec); + } + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); xsk_tx_metadata_to_compl(meta, @@ -2647,7 +2678,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { xdpf = tx_q->xdpf[entry]; skb = NULL; - } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB || + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_DMA) { xdpf = NULL; skb = tx_q->tx_skbuff[entry]; } else { @@ -2701,7 +2733,10 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, if (likely(tx_q->tx_skbuff_dma[entry].buf && tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { - if (tx_q->tx_skbuff_dma[entry].map_as_page) + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_DMA) + page_pool_recycle_direct(tx_q->page_pool, + tx_q->tx_skbuff_dma[entry].page); + else if (tx_q->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[entry].buf, tx_q->tx_skbuff_dma[entry].len, @@ -2736,7 +2771,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) tx_q->xsk_frames_done++; - if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB || + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_DMA) { if (likely(skb)) { pkts_compl++; bytes_compl += skb->len; @@ -3095,12 +3131,14 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) * Try to cancel any timer if napi is scheduled, timer will be armed * again in the next scheduled napi. */ - if (unlikely(!napi_is_scheduled(napi))) - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(tx_coal_timer), - HRTIMER_MODE_REL); - else + if (unlikely(!napi_is_scheduled(napi))) { + if (unlikely(!(hrtimer_active(&tx_q->txtimer)))) + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(tx_coal_timer), + HRTIMER_MODE_REL); + } else { hrtimer_try_to_cancel(&tx_q->txtimer); + } } /** @@ -3594,7 +3632,7 @@ static void stmmac_free_irq(struct net_device *dev, } } -static int stmmac_request_irq_multi_msi(struct net_device *dev) +static int stmmac_request_irq_multi(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); enum request_irq_err irq_err; @@ -3717,12 +3755,17 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) int_name = priv->int_name_rx_irq[i]; sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); ret = request_irq(priv->rx_irq[i], - stmmac_msi_intr_rx, + stmmac_intr_rx, 0, int_name, &priv->dma_conf.rx_queue[i]); if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: alloc rx-%d MSI %d (error: %d)\n", - __func__, i, priv->rx_irq[i], ret); + if (priv->plat->multi_msi_en) + netdev_err(priv->dev, + "%s: alloc rx-%d MSI %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); + if (priv->plat->dma_cfg->multi_irq_en) + netdev_err(priv->dev, + "%s: alloc rx-%d %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); irq_err = REQ_IRQ_ERR_RX; irq_idx = i; goto irq_error; @@ -3742,12 +3785,17 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) int_name = priv->int_name_tx_irq[i]; sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); ret = request_irq(priv->tx_irq[i], - stmmac_msi_intr_tx, + stmmac_intr_tx, 0, int_name, &priv->dma_conf.tx_queue[i]); if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: alloc tx-%d MSI %d (error: %d)\n", - __func__, i, priv->tx_irq[i], ret); + if (priv->plat->multi_msi_en) + netdev_err(priv->dev, + "%s: alloc tx-%d MSI %d (error: %d)\n", + __func__, i, priv->tx_irq[i], ret); + if (priv->plat->dma_cfg->multi_irq_en) + netdev_err(priv->dev, + "%s: alloc tx-%d %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); irq_err = REQ_IRQ_ERR_TX; irq_idx = i; goto irq_error; @@ -3837,8 +3885,8 @@ static int stmmac_request_irq(struct net_device *dev) int ret; /* Request the IRQ lines */ - if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) - ret = stmmac_request_irq_multi_msi(dev); + if (priv->plat->multi_msi_en || priv->plat->dma_cfg->multi_irq_en) + ret = stmmac_request_irq_multi(dev); else ret = stmmac_request_irq_single(dev); @@ -3887,12 +3935,14 @@ stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; /* Earlier check for TBS */ - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { - struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; - int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + if (priv->dma_cap.tbssel) { + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; - /* Setup per-TXQ tbs flag before TX descriptor alloc */ - tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; + /* Setup per-TXQ tbs flag before TX descriptor alloc */ + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; + } } ret = alloc_dma_desc_resources(priv, dma_conf); @@ -4504,6 +4554,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; @@ -4511,9 +4562,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct dma_edesc *tbs_desc = NULL; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; + struct page *tx_buf_page; bool has_vlan, set_ic; int entry, first_tx; dma_addr_t des; + u32 sdu_len; tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; @@ -4530,13 +4583,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return stmmac_tso_xmit(skb, dev); } - if (priv->est && priv->est->enable && - priv->est->max_sdu[queue] && - skb->len > priv->est->max_sdu[queue]){ - priv->xstats.max_sdu_txq_drop[queue]++; - goto max_sdu_err; - } - if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, @@ -4552,6 +4598,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Check if VLAN can be inserted by HW */ has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + sdu_len = skb->len; + if (has_vlan) { + /* Add VLAN tag length to sdu length in case of txvlan offload */ + if (priv->dev->features & NETIF_F_HW_VLAN_CTAG_TX) + sdu_len += VLAN_HLEN; + if (skb->vlan_proto == htons(ETH_P_8021AD) && + priv->dev->features & NETIF_F_HW_VLAN_STAG_TX) + sdu_len += VLAN_HLEN; + } + + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + sdu_len > priv->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + goto max_sdu_err; + } + entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -4707,14 +4770,30 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!is_jumbo)) { bool last_segment = (nfrags == 0); - des = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, des)) - goto dma_map_err; - - tx_q->tx_skbuff_dma[first_entry].buf = des; - tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; - tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + if (!(last_segment && priv->plat->tx_buf_quirk)) { + des = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + } else { + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_DMA; + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + tx_q->tx_skbuff_dma[first_entry].skb = skb; + tx_q->tx_skbuff_dma[first_entry].page = + page_pool_alloc_pages(tx_q->page_pool, gfp); + tx_buf_page = tx_q->tx_skbuff_dma[first_entry].page; + des = page_pool_get_dma_addr(tx_buf_page); + tx_q->tx_skbuff_dma[first_entry].buf = des; + dma_sync_single_for_cpu(priv->device, des, + nopaged_len, DMA_BIDIRECTIONAL); + skb_copy_from_linear_data(skb, page_address(tx_buf_page), + nopaged_len); + dma_sync_single_for_device(priv->device, des, nopaged_len, + DMA_BIDIRECTIONAL); + } stmmac_set_desc_addr(priv, first, des); @@ -4903,6 +4982,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc; dma_addr_t dma_addr; @@ -4954,7 +5034,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, stmmac_set_desc_addr(priv, tx_desc, dma_addr); stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, - true, priv->mode, true, true, + csum, priv->mode, true, true, xdpf->len); tx_q->tx_count_frames++; @@ -5977,7 +6057,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) /* This is interrupt context, just spin_lock() */ spin_lock(&fpe_cfg->lock); - if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN) + if (status == FPE_EVENT_UNKNOWN) goto unlock_out; /* LP has sent verify mPacket */ @@ -6115,7 +6195,7 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) +static irqreturn_t stmmac_intr_tx(int irq, void *data) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; struct stmmac_dma_conf *dma_conf; @@ -6142,7 +6222,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) +static irqreturn_t stmmac_intr_rx(int irq, void *data) { struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; struct stmmac_dma_conf *dma_conf; @@ -7688,7 +7768,7 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; - if (priv->plat->has_gmac4) { + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; priv->hw->hw_vlan_en = true; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index aaf008bdbbcd4..8137a5b86f6f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -278,6 +278,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev, plat->tx_queues_cfg[queue].coe_unsupported = of_property_read_bool(q_node, "snps,coe-unsupported"); + /* Enable TBS for selected queues */ + plat->tx_queues_cfg[queue].tbs_en = + of_property_read_bool(q_node, "snps,tbs-enable"); queue++; } @@ -582,6 +585,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); + dma_cfg->multi_irq_en = of_property_read_bool(np, "snps,multi-irq-en"); + plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) { plat->force_sf_dma_mode = 0; @@ -591,6 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); + plat->tx_buf_quirk = of_property_read_bool(np, "snps,pagepool-tx-buf-quirk"); + plat->axi = stmmac_axi_setup(pdev); rc = stmmac_mtl_setup(pdev, plat); @@ -701,6 +708,8 @@ EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) { + char irq_name[11]; + int i; memset(stmmac_res, 0, sizeof(*stmmac_res)); /* Get IRQ information early to have an ability to ask for deferred @@ -710,6 +719,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev, if (stmmac_res->irq < 0) return stmmac_res->irq; + /* For RX Channel */ + for (i = 0; i < MTL_MAX_RX_QUEUES; i++) { + sprintf(irq_name, "%s%d", "macirq_rx", i); + stmmac_res->rx_irq[i] = platform_get_irq_byname_optional(pdev, irq_name); + if (stmmac_res->rx_irq[i] < 0) + break; + } + + /* For TX Channel */ + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + sprintf(irq_name, "%s%d", "macirq_tx", i); + stmmac_res->tx_irq[i] = platform_get_irq_byname_optional(pdev, irq_name); + if (stmmac_res->tx_irq[i] < 0) + break; + } + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code * named as "eth_wake_irq" diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 75ad2da1a37f1..b1efbbbfa5d33 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -11,6 +11,8 @@ #include "dwmac5.h" #include "stmmac.h" +#define MAX_IDLE_SLOPE_CREDIT 0x1FFFFF + static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry) { memset(entry, 0, sizeof(*entry)); @@ -332,13 +334,14 @@ static int tc_init(struct stmmac_priv *priv) static int tc_setup_cbs(struct stmmac_priv *priv, struct tc_cbs_qopt_offload *qopt) { + u64 value, scaling = 0, cycle_time_ns = 0, open_time = 0, tti_ns = 0; u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 gate = 0x1 << qopt->queue; s64 port_transmit_rate_kbps; u32 queue = qopt->queue; + u32 ptr, idle_slope; u32 mode_to_use; - u64 value; - u32 ptr; - int ret; + int ret, row; /* Queue 0 is not AVB capable */ if (queue <= 0 || queue >= tx_queues_count) @@ -402,6 +405,52 @@ static int tc_setup_cbs(struct stmmac_priv *priv, value = qopt->locredit * 1024ll * 8; priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + /* If EST is not enable, no need to recalibrate idle slope */ + if (!priv->est) + goto config_cbs; + if (!priv->est->enable) + goto config_cbs; + + /* Check the GCL cycle time. If 0, no need to recalibrate idle slope */ + cycle_time_ns = (priv->est->ctr[1] * NSEC_PER_SEC) + + priv->est->ctr[0]; + if (!cycle_time_ns) + goto config_cbs; + + /* Calculate the total open time for the queue. GCL which exceeds the + * cycle time will be truncated. So, time interval that exceeds the + * cycle time will not be included. The gates wihtout any setting of + * open/close within the cycle time are considered as open. The queue + * that having open time of 0, no need idle slope recalibration. + */ + for (row = 0; row < priv->est->gcl_size; row++) { + tti_ns += priv->est->ti_ns[row]; + if (priv->est->gates[row] & gate) + open_time += priv->est->ti_ns[row]; + if (tti_ns > cycle_time_ns) { + if (priv->est->gates[row] & gate) + open_time -= tti_ns - cycle_time_ns; + break; + } + } + if (tti_ns < cycle_time_ns) + open_time += cycle_time_ns - tti_ns; + if (!open_time) + goto config_cbs; + + /* Calculate the scaling factor to be used to recalculate new idle + * slope. + */ + scaling = cycle_time_ns; + do_div(scaling, open_time); + idle_slope = priv->plat->tx_queues_cfg[queue].idle_slope; + idle_slope *= scaling; + if (idle_slope > MAX_IDLE_SLOPE_CREDIT) + idle_slope = MAX_IDLE_SLOPE_CREDIT; + + priv->plat->tx_queues_cfg[queue].idle_slope = idle_slope; + +config_cbs: ret = stmmac_config_cbs(priv, priv->hw, priv->plat->tx_queues_cfg[queue].send_slope, priv->plat->tx_queues_cfg[queue].idle_slope, @@ -446,6 +495,7 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv, } #define ETHER_TYPE_FULL_MASK cpu_to_be16(~0) +#define IP_PROTO_FULL_MASK 0xFF static int tc_add_basic_flow(struct stmmac_priv *priv, struct flow_cls_offload *cls, @@ -461,6 +511,24 @@ static int tc_add_basic_flow(struct stmmac_priv *priv, flow_rule_match_basic(rule, &match); + /* Both network proto and transport proto not present in the key */ + if (!match.mask || !(match.mask->n_proto || match.mask->ip_proto)) + return -EOPNOTSUPP; + + /* If the proto is present in the key and is not full mask */ + if ((match.mask->n_proto && match.mask->n_proto != ETHER_TYPE_FULL_MASK) || + (match.mask->ip_proto && match.mask->ip_proto != IP_PROTO_FULL_MASK)) + return -EOPNOTSUPP; + + /* Network proto is present in the key and is not IPv4 */ + if (match.mask->n_proto && match.key->n_proto != cpu_to_be16(ETH_P_IP)) + return -EOPNOTSUPP; + + /* Transport proto is present in the key and is not TCP or UDP */ + if (match.mask->ip_proto && !(match.key->ip_proto == IPPROTO_TCP || + match.key->ip_proto == IPPROTO_UDP)) + return -EOPNOTSUPP; + entry->ip_proto = match.key->ip_proto; return 0; } @@ -598,6 +666,12 @@ static int tc_add_flow(struct stmmac_priv *priv, ret = tc_flow_parsers[i].fn(priv, cls, entry); if (!ret) entry->in_use = true; + else if (ret == -EOPNOTSUPP) + /* The basic flow parser will return EOPNOTSUPP, if a + * requested offload not fully supported by the hw. And + * in that case fail early. + */ + break; } if (!entry->in_use) @@ -627,6 +701,7 @@ static int tc_del_flow(struct stmmac_priv *priv, entry->in_use = false; entry->cookie = 0; entry->is_l4 = false; + entry->action = 0; return ret; } @@ -981,7 +1056,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, if (qopt->cmd == TAPRIO_CMD_DESTROY) goto disable; - if (qopt->num_entries >= dep) + if (qopt->num_entries > dep) return -EINVAL; if (!qopt->cycle_time) return -ERANGE; @@ -1012,7 +1087,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, s64 delta_ns = qopt->entries[i].interval; u32 gates = qopt->entries[i].gate_mask; - if (delta_ns > GENMASK(wid, 0)) + if (delta_ns >= BIT(wid)) return -ERANGE; if (gates > GENMASK(31 - wid, 0)) return -ERANGE; @@ -1031,6 +1106,8 @@ static int tc_taprio_configure(struct stmmac_priv *priv, } priv->est->gcl[i] = delta_ns | (gates << wid); + priv->est->ti_ns[i] = delta_ns; + priv->est->gates[i] = gates; } mutex_lock(&priv->est_lock); @@ -1080,6 +1157,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, for (i = 0; i < priv->plat->tx_queues_to_use; i++) { priv->xstats.max_sdu_txq_drop[i] = 0; priv->xstats.mtl_est_txq_hlbf[i] = 0; + priv->xstats.mtl_est_txq_hlbs[i] = 0; } mutex_unlock(&priv->est_lock); } @@ -1097,7 +1175,8 @@ static void tc_taprio_stats(struct stmmac_priv *priv, for (i = 0; i < priv->plat->tx_queues_to_use; i++) window_drops += priv->xstats.max_sdu_txq_drop[i] + - priv->xstats.mtl_est_txq_hlbf[i]; + priv->xstats.mtl_est_txq_hlbf[i] + + priv->xstats.mtl_est_txq_hlbs[i]; qopt->stats.window_drops = window_drops; /* Transmission overrun doesn't happen for stmmac, hence always 0 */ @@ -1111,7 +1190,8 @@ static void tc_taprio_queue_stats(struct stmmac_priv *priv, int queue = qopt->queue_stats.queue; q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] + - priv->xstats.mtl_est_txq_hlbf[queue]; + priv->xstats.mtl_est_txq_hlbf[queue] + + priv->xstats.mtl_est_txq_hlbs[queue]; /* Transmission overrun doesn't happen for stmmac, hence always 0 */ q_stats->stats.tx_overruns = 0; diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig index f6aa437473de8..5994dd34dfad9 100644 --- a/drivers/net/pcs/Kconfig +++ b/drivers/net/pcs/Kconfig @@ -33,4 +33,10 @@ config PCS_RZN1_MIIC on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in pass-through mode for MII. +config PCS_ALTERA_TSE + tristate + help + This module provides helper functions for the Altera Triple Speed + Ethernet SGMII PCS, that can be found on the Intel Socfpga family. + endmenu diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile index 4f7920618b900..0759d8a50466e 100644 --- a/drivers/net/pcs/Makefile +++ b/drivers/net/pcs/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o obj-$(CONFIG_PCS_MTK_LYNXI) += pcs-mtk-lynxi.o obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o +obj-$(CONFIG_PCS_ALTERA_TSE) += pcs-altera-tse.o diff --git a/drivers/net/pcs/pcs-altera-tse.c b/drivers/net/pcs/pcs-altera-tse.c new file mode 100644 index 0000000000000..97a7cabff962a --- /dev/null +++ b/drivers/net/pcs/pcs-altera-tse.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Bootlin + * + * Maxime Chevallier + */ + +#include +#include +#include +#include + +/* SGMII PCS register addresses + */ +#define SGMII_PCS_SCRATCH 0x10 +#define SGMII_PCS_REV 0x11 +#define SGMII_PCS_LINK_TIMER_0 0x12 +#define SGMII_PCS_LINK_TIMER_REG(x) (0x12 + (x)) +#define SGMII_PCS_LINK_TIMER_1 0x13 +#define SGMII_PCS_IF_MODE 0x14 +#define PCS_IF_MODE_SGMII_ENA BIT(0) +#define PCS_IF_MODE_USE_SGMII_AN BIT(1) +#define PCS_IF_MODE_SGMI_SPEED_MASK GENMASK(3, 2) +#define PCS_IF_MODE_SGMI_SPEED_10 (0 << 2) +#define PCS_IF_MODE_SGMI_SPEED_100 (1 << 2) +#define PCS_IF_MODE_SGMI_SPEED_1000 (2 << 2) +#define PCS_IF_MODE_SGMI_HALF_DUPLEX BIT(4) +#define PCS_IF_MODE_SGMI_PHY_AN BIT(5) +#define SGMII_PCS_DIS_READ_TO 0x15 +#define SGMII_PCS_READ_TO 0x16 +#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */ + +struct altera_tse_pcs { + struct phylink_pcs pcs; + void __iomem *base; + int reg_width; +}; + +static struct altera_tse_pcs *phylink_pcs_to_tse_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct altera_tse_pcs, pcs); +} + +static u16 tse_pcs_read(struct altera_tse_pcs *tse_pcs, int regnum) +{ + if (tse_pcs->reg_width == 4) + return readl(tse_pcs->base + regnum * 4); + else + return readw(tse_pcs->base + regnum * 2); +} + +static void tse_pcs_write(struct altera_tse_pcs *tse_pcs, int regnum, + u16 value) +{ + if (tse_pcs->reg_width == 4) + writel(value, tse_pcs->base + regnum * 4); + else + writew(value, tse_pcs->base + regnum * 2); +} + +static int tse_pcs_reset(struct altera_tse_pcs *tse_pcs) +{ + int i = 0; + u16 bmcr; + + /* Reset PCS block */ + bmcr = tse_pcs_read(tse_pcs, MII_BMCR); + bmcr |= BMCR_RESET; + tse_pcs_write(tse_pcs, MII_BMCR, bmcr); + + for (i = 0; i < SGMII_PCS_SW_RESET_TIMEOUT; i++) { + if (!(tse_pcs_read(tse_pcs, MII_BMCR) & BMCR_RESET)) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static int alt_tse_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) +{ + if (state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX) + return 1; + + return -EINVAL; +} + +static int alt_tse_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs); + u32 ctrl, if_mode; + + ctrl = tse_pcs_read(tse_pcs, MII_BMCR); + if_mode = tse_pcs_read(tse_pcs, SGMII_PCS_IF_MODE); + + /* Set link timer to 1.6ms, as per the MegaCore Function User Guide */ + tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_0, 0x0D40); + tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_1, 0x03); + + if (interface == PHY_INTERFACE_MODE_SGMII) { + if_mode |= PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA; + } else if (interface == PHY_INTERFACE_MODE_1000BASEX) { + if_mode &= ~(PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA); + if_mode |= PCS_IF_MODE_SGMI_SPEED_1000; + } + + ctrl |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE); + + tse_pcs_write(tse_pcs, MII_BMCR, ctrl); + tse_pcs_write(tse_pcs, SGMII_PCS_IF_MODE, if_mode); + + return tse_pcs_reset(tse_pcs); +} + +static void alt_tse_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs); + u16 bmsr, lpa; + + bmsr = tse_pcs_read(tse_pcs, MII_BMSR); + lpa = tse_pcs_read(tse_pcs, MII_LPA); + + phylink_mii_c22_pcs_decode_state(state, bmsr, lpa); +} + +static void alt_tse_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs); + u16 bmcr; + + bmcr = tse_pcs_read(tse_pcs, MII_BMCR); + bmcr |= BMCR_ANRESTART; + tse_pcs_write(tse_pcs, MII_BMCR, bmcr); + + /* This PCS seems to require a soft reset to re-sync the AN logic */ + tse_pcs_reset(tse_pcs); +} + +static const struct phylink_pcs_ops alt_tse_pcs_ops = { + .pcs_validate = alt_tse_pcs_validate, + .pcs_get_state = alt_tse_pcs_get_state, + .pcs_config = alt_tse_pcs_config, + .pcs_an_restart = alt_tse_pcs_an_restart, +}; + +struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev, + void __iomem *pcs_base, int reg_width) +{ + struct altera_tse_pcs *tse_pcs; + + if (reg_width != 4 && reg_width != 2) + return ERR_PTR(-EINVAL); + + tse_pcs = devm_kzalloc(&ndev->dev, sizeof(*tse_pcs), GFP_KERNEL); + if (!tse_pcs) + return ERR_PTR(-ENOMEM); + + tse_pcs->pcs.ops = &alt_tse_pcs_ops; + tse_pcs->base = pcs_base; + tse_pcs->reg_width = reg_width; + + return &tse_pcs->pcs; +} +EXPORT_SYMBOL_GPL(alt_tse_pcs_create); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Altera TSE PCS driver"); +MODULE_AUTHOR("Maxime Chevallier "); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 01b235b3bb7e8..fa44429aedb33 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -77,6 +77,14 @@ config SFP depends on HWMON || HWMON=n select MDIO_I2C +config QSFP + tristate "QSFP cage support" + depends on I2C && PHYLINK + depends on HWMON || HWMON=n + select MDIO_I2C + help + Currently supports SFF-8636 Rev2.9 QSFP connectors or cables. + comment "MII PHY device drivers" config AIR_EN8811H_PHY diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 90f886844381d..530c6e60ade87 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -33,6 +33,10 @@ obj-$(CONFIG_SFP) += sfp.o sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) +obj-$(CONFIG_QSFP) += qsfp.o +qsfp-obj-$(CONFIG_QSFP) += qsfp_bus.o +obj-y += $(qsfp-obj-y) $(qsfp-obj-m) + obj-$(CONFIG_ADIN_PHY) += adin.o obj-$(CONFIG_ADIN1100_PHY) += adin1100.o obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 9964bf3dea2fb..477ce5e406aa9 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1848,6 +1848,43 @@ static int marvell_resume(struct phy_device *phydev) return err; } +/* m88e1510_resume + * + * 88e1510 PHY has an errata that causes the phy downshift counter + * not cleared on a link powering down/up. This can cause the link + * to downshift to the next supported speed intermittently. + * Disabling and reenabling the downshift feature will clear the + * downshift counter and this helps PHY to retry the gigabit link + * upto programmed retry count times before downshifting. Currently + * observed only for the copper link. + */ +static int m88e1510_resume(struct phy_device *phydev) +{ + int err; + u8 cnt = 0; + + err = marvell_resume(phydev); + if (err < 0) + return err; + + /* read downshift counter value */ + err = m88e1011_get_downshift(phydev, &cnt); + if (err < 0) + return err; + + if (cnt) { + /* downshift disabled */ + err = m88e1011_set_downshift(phydev, 0); + if (err < 0) + return err; + + /* downshift enabled, with previous counter value */ + err = m88e1011_set_downshift(phydev, cnt); + } + + return err; +} + static int marvell_aneg_done(struct phy_device *phydev) { int retval = phy_read(phydev, MII_M1011_PHY_STATUS); @@ -3887,7 +3924,7 @@ static struct phy_driver marvell_drivers[] = { .handle_interrupt = marvell_handle_interrupt, .get_wol = m88e1318_get_wol, .set_wol = m88e1318_set_wol, - .resume = marvell_resume, + .resume = m88e1510_resume, .suspend = marvell_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 3e9957b6aa148..ada4a2a1cfbd5 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "sfp.h" #include "swphy.h" @@ -82,10 +83,15 @@ struct phylink { bool using_mac_select_pcs; struct sfp_bus *sfp_bus; + struct qsfp_bus *qsfp_bus; bool sfp_may_have_phy; + bool qsfp_may_have_phy; DECLARE_PHY_INTERFACE_MASK(sfp_interfaces); + DECLARE_PHY_INTERFACE_MASK(qsfp_interfaces); __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); + __ETHTOOL_DECLARE_LINK_MODE_MASK(qsfp_support); u8 sfp_port; + u8 qsfp_port; }; #define phylink_printk(level, pl, fmt, ...) \ @@ -1611,6 +1617,7 @@ static void phylink_fixed_poll(struct timer_list *t) } static const struct sfp_upstream_ops sfp_phylink_ops; +static const struct qsfp_upstream_ops qsfp_phylink_ops; static int phylink_register_sfp(struct phylink *pl, const struct fwnode_handle *fwnode) @@ -1635,6 +1642,29 @@ static int phylink_register_sfp(struct phylink *pl, return ret; } +static int phylink_register_qsfp(struct phylink *pl, + const struct fwnode_handle *fwnode) +{ + struct qsfp_bus *bus; + int ret; + + if (!fwnode) + return 0; + + bus = qsfp_bus_find_fwnode(fwnode); + if (IS_ERR(bus)) { + phylink_err(pl, "unable to attach QSFP bus: %pe\n", bus); + return PTR_ERR(bus); + } + + pl->qsfp_bus = bus; + + ret = qsfp_bus_add_upstream(bus, pl, &qsfp_phylink_ops); + qsfp_bus_put(bus); + + return ret; +} + /** * phylink_set_fixed_link() - set the fixed link * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -1769,6 +1799,7 @@ struct phylink *phylink_create(struct phylink_config *config, pl->cur_link_an_mode = pl->cfg_link_an_mode; ret = phylink_register_sfp(pl, fwnode); + ret = phylink_register_qsfp(pl, fwnode); if (ret < 0) { kfree(pl); return ERR_PTR(ret); @@ -1790,6 +1821,7 @@ EXPORT_SYMBOL_GPL(phylink_create); void phylink_destroy(struct phylink *pl) { sfp_bus_del_upstream(pl->sfp_bus); + qsfp_bus_del_upstream(pl->qsfp_bus); if (pl->link_gpio) gpiod_put(pl->link_gpio); @@ -2001,7 +2033,7 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy, if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && - phy_interface_mode_is_8023z(interface) && !pl->sfp_bus))) + phy_interface_mode_is_8023z(interface) && !(pl->qsfp_bus || pl->sfp_bus)))) return -EINVAL; if (pl->phydev) @@ -2271,6 +2303,8 @@ void phylink_start(struct phylink *pl) phy_start(pl->phydev); if (pl->sfp_bus) sfp_upstream_start(pl->sfp_bus); + if (pl->qsfp_bus) + qsfp_upstream_start(pl->qsfp_bus); } EXPORT_SYMBOL_GPL(phylink_start); @@ -2292,6 +2326,8 @@ void phylink_stop(struct phylink *pl) if (pl->sfp_bus) sfp_upstream_stop(pl->sfp_bus); + if (pl->qsfp_bus) + qsfp_upstream_stop(pl->qsfp_bus); if (pl->phydev) phy_stop(pl->phydev); del_timer_sync(&pl->link_poll); @@ -2528,7 +2564,16 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, ASSERT_RTNL(); if (pl->phydev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(req_mode) = { 0, }; struct ethtool_link_ksettings phy_kset = *kset; + unsigned long req_caps; + + req_caps = phylink_cap_from_speed_duplex(kset->base.speed, + kset->base.duplex); + phylink_caps_to_linkmodes(req_mode, req_caps); + linkmode_and(req_mode, req_mode, pl->supported); + if (linkmode_empty(req_mode)) + return -EINVAL; linkmode_and(phy_kset.link_modes.advertising, phy_kset.link_modes.advertising, @@ -3131,6 +3176,8 @@ int phylink_speed_down(struct phylink *pl, bool sync) if (!pl->sfp_bus && pl->phydev) ret = phy_speed_down(pl->phydev, sync); + if (!pl->qsfp_bus && pl->phydev) + ret = phy_speed_down(pl->phydev, sync); return ret; } @@ -3154,6 +3201,8 @@ int phylink_speed_up(struct phylink *pl) if (!pl->sfp_bus && pl->phydev) ret = phy_speed_up(pl->phydev); + if (!pl->qsfp_bus && pl->phydev) + ret = phy_speed_up(pl->phydev); return ret; } @@ -3485,6 +3534,338 @@ static const struct sfp_upstream_ops sfp_phylink_ops = { .disconnect_phy = phylink_sfp_disconnect_phy, }; +/* Helpers for qsfp operation assigned to qsfp upstream ops structure + *in registration of qsfp bus in phy-link creation + */ +static void phylink_qsfp_attach(void *upstream, struct qsfp_bus *bus) +{ + struct phylink *pl = upstream; + + pl->netdev->qsfp_bus = bus; +} + +static void phylink_qsfp_detach(void *upstream, struct qsfp_bus *bus) +{ + struct phylink *pl = upstream; + + pl->netdev->qsfp_bus = NULL; +} + +static const phy_interface_t phylink_qsfp_interface_preference[] = { + PHY_INTERFACE_MODE_25GBASER, + PHY_INTERFACE_MODE_USXGMII, + PHY_INTERFACE_MODE_10GBASER, + PHY_INTERFACE_MODE_5GBASER, + PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_SGMII, + PHY_INTERFACE_MODE_1000BASEX, + PHY_INTERFACE_MODE_100BASEX, +}; + +static DECLARE_PHY_INTERFACE_MASK(phylink_qsfp_interfaces); + +static phy_interface_t phylink_choose_qsfp_interface(struct phylink *pl, + const unsigned long *intf) +{ + phy_interface_t interface; + size_t i; + + interface = PHY_INTERFACE_MODE_NA; + for (i = 0; i < ARRAY_SIZE(phylink_qsfp_interface_preference); i++) + if (test_bit(phylink_qsfp_interface_preference[i], intf)) { + interface = phylink_qsfp_interface_preference[i]; + break; + } + + return interface; +} + +static void phylink_qsfp_set_config(struct phylink *pl, u8 mode, + unsigned long *supported, + struct phylink_link_state *state) +{ + bool changed = false; + + phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n", + phylink_an_mode_str(mode), phy_modes(state->interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, supported); + + if (!linkmode_equal(pl->supported, supported)) { + linkmode_copy(pl->supported, supported); + changed = true; + } + + if (!linkmode_equal(pl->link_config.advertising, state->advertising)) { + linkmode_copy(pl->link_config.advertising, state->advertising); + changed = true; + } + + if (pl->cur_link_an_mode != mode || + pl->link_config.interface != state->interface) { + pl->cur_link_an_mode = mode; + pl->link_config.interface = state->interface; + + changed = true; + + phylink_info(pl, "switched to %s/%s link mode\n", + phylink_an_mode_str(mode), + phy_modes(state->interface)); + } + + if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_mac_initial_config(pl, false); +} + +static int phylink_qsfp_config_phy(struct phylink *pl, u8 mode, + struct phy_device *phy) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(support1); + __ETHTOOL_DECLARE_LINK_MODE_MASK(support); + struct phylink_link_state config; + phy_interface_t iface; + int ret; + + linkmode_copy(support, phy->supported); + + memset(&config, 0, sizeof(config)); + linkmode_copy(config.advertising, phy->advertising); + config.interface = PHY_INTERFACE_MODE_NA; + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.pause = MLO_PAUSE_AN; + + /* Ignore errors if we're expecting a PHY to attach later */ + ret = phylink_validate(pl, support, &config); + if (ret) { + phylink_err(pl, "validation with support %*pb failed: %pe\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support, + ERR_PTR(ret)); + return ret; + } + + iface = qsfp_select_interface(pl->qsfp_bus, config.advertising); + if (iface == PHY_INTERFACE_MODE_NA) { + phylink_err(pl, + "selection of interface failed, advertisement %*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising); + return -EINVAL; + } + + config.interface = iface; + linkmode_copy(support1, support); + ret = phylink_validate(pl, support1, &config); + if (ret) { + phylink_err(pl, + "validation of %s/%s with support %*pb failed: %pe\n", + phylink_an_mode_str(mode), + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support, + ERR_PTR(ret)); + return ret; + } + + pl->link_port = pl->qsfp_port; + + phylink_qsfp_set_config(pl, mode, support, &config); + + return 0; +} + +static int phylink_qsfp_config_optical(struct phylink *pl) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(support); + DECLARE_PHY_INTERFACE_MASK(interfaces); + struct phylink_link_state config; + phy_interface_t interface; + int ret; + + phylink_dbg(pl, "optical QSFP: interfaces=[mac=%*pbl, qsfp=%*pbl]\n", + (int)PHY_INTERFACE_MODE_MAX, + pl->config->supported_interfaces, + (int)PHY_INTERFACE_MODE_MAX, + pl->qsfp_interfaces); + + /* Find the union of the supported interfaces by the PCS/MAC and + * the SFP module. + */ + phy_interface_and(interfaces, pl->config->supported_interfaces, + pl->qsfp_interfaces); + if (phy_interface_empty(interfaces)) { + phylink_err(pl, "unsupported QSFP module: no common interface modes\n"); + return -EINVAL; + } + + memset(&config, 0, sizeof(config)); + linkmode_copy(support, pl->qsfp_support); + linkmode_copy(config.advertising, pl->qsfp_support); + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.pause = MLO_PAUSE_AN; + + /* For all the interfaces that are supported, reduce the qsfp_support + * mask to only those link modes that can be supported. + */ + ret = phylink_validate_mask(pl, NULL, pl->qsfp_support, &config, + interfaces); + if (ret) { + phylink_err(pl, "unsupported QSFP module: validation with support %*pb failed\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + return ret; + } + + interface = phylink_choose_qsfp_interface(pl, interfaces); + if (interface == PHY_INTERFACE_MODE_NA) { + phylink_err(pl, "failed to select QSFP interface\n"); + return -EINVAL; + } + + phylink_dbg(pl, "optical QSFP: chosen %s interface\n", + phy_modes(interface)); + + config.interface = interface; + + /* Ignore errors if we're expecting a PHY to attach later */ + ret = phylink_validate(pl, support, &config); + if (ret) { + phylink_err(pl, "validation with support %*pb failed: %pe\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support, + ERR_PTR(ret)); + return ret; + } + + pl->link_port = pl->qsfp_port; + + phylink_qsfp_set_config(pl, MLO_AN_INBAND, pl->qsfp_support, &config); + + return 0; +} + +static int phylink_qsfp_module_insert(void *upstream, + const struct qsfp_eeprom_id *id) +{ + struct phylink *pl = upstream; + + ASSERT_RTNL(); + + linkmode_zero(pl->qsfp_support); + phy_interface_zero(pl->qsfp_interfaces); + qsfp_parse_support(pl->qsfp_bus, id, pl->qsfp_support, pl->qsfp_interfaces); + pl->qsfp_port = qsfp_parse_port(pl->qsfp_bus, id, pl->qsfp_support); + + /* If this module may have a PHY connecting later, defer until later */ + pl->qsfp_may_have_phy = qsfp_may_have_phy(pl->qsfp_bus, id); + if (pl->qsfp_may_have_phy) + return 0; + + return phylink_qsfp_config_optical(pl); +} + +static int phylink_qsfp_module_start(void *upstream) +{ + struct phylink *pl = upstream; + + /* If this QSFP module has a PHY, start the PHY now. */ + if (pl->phydev) { + phy_start(pl->phydev); + return 0; + } + + /* If the module may have a PHY but we didn't detect one we + * need to configure the MAC here. + */ + if (!pl->qsfp_may_have_phy) + return 0; + + return phylink_qsfp_config_optical(pl); +} + +static void phylink_qsfp_module_stop(void *upstream) +{ + struct phylink *pl = upstream; + + /* If this SFP module has a PHY, stop it. */ + if (pl->phydev) + phy_stop(pl->phydev); +} + +static void phylink_qsfp_link_down(void *upstream) +{ + struct phylink *pl = upstream; + + ASSERT_RTNL(); + + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); +} + +static void phylink_qsfp_link_up(void *upstream) +{ + struct phylink *pl = upstream; + + ASSERT_RTNL(); + + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK); +} + +static int phylink_qsfp_connect_phy(void *upstream, struct phy_device *phy) +{ + struct phylink *pl = upstream; + phy_interface_t interface; + u8 mode; + int ret; + + /* + * This is the new way of dealing with flow control for PHYs, + * as described by Timur Tabi in commit 529ed1275263 ("net: phy: + * phy drivers should not set SUPPORTED_[Asym_]Pause") except + * using our validate call to the MAC, we rely upon the MAC + * clearing the bits from both supported and advertising fields. + */ + phy_support_asym_pause(phy); + + if (phylink_phy_no_inband(phy)) + mode = MLO_AN_PHY; + else + mode = MLO_AN_INBAND; + + /* Set the PHY's host supported interfaces */ + phy_interface_and(phy->host_interfaces, phylink_qsfp_interfaces, + pl->config->supported_interfaces); + + /* Do the initial configuration */ + ret = phylink_qsfp_config_phy(pl, mode, phy); + if (ret < 0) + return ret; + + interface = pl->link_config.interface; + ret = phylink_attach_phy(pl, phy, interface); + if (ret < 0) + return ret; + + ret = phylink_bringup_phy(pl, phy, interface); + if (ret) + phy_detach(phy); + + return ret; +} + +static void phylink_qsfp_disconnect_phy(void *upstream) +{ + phylink_disconnect_phy(upstream); +} + +static const struct qsfp_upstream_ops qsfp_phylink_ops = { + .attach = phylink_qsfp_attach, + .detach = phylink_qsfp_detach, + .module_insert = phylink_qsfp_module_insert, + .module_start = phylink_qsfp_module_start, + .module_stop = phylink_qsfp_module_stop, + .link_up = phylink_qsfp_link_up, + .link_down = phylink_qsfp_link_down, + .connect_phy = phylink_qsfp_connect_phy, + .disconnect_phy = phylink_qsfp_disconnect_phy, +}; + /* Helpers for MAC drivers */ static struct { @@ -3878,6 +4259,10 @@ static int __init phylink_init(void) __set_bit(phylink_sfp_interface_preference[i], phylink_sfp_interfaces); + for (int i = 0; i < ARRAY_SIZE(phylink_qsfp_interface_preference); ++i) + __set_bit(phylink_qsfp_interface_preference[i], + phylink_qsfp_interfaces); + return 0; } diff --git a/drivers/net/phy/qsfp.c b/drivers/net/phy/qsfp.c new file mode 100644 index 0000000000000..a4c618307ecc9 --- /dev/null +++ b/drivers/net/phy/qsfp.c @@ -0,0 +1,1860 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "swphy.h" + +static void qsfp_sm_event(struct qsfp *qsfp, unsigned int event); +static void get_module_revision(struct qsfp *qsfp); + +#define QSFP_TX_CHANNEL_4 0x4 +#define QSFP_TX_CHANNEL_3 0x3 +#define QSFP_TX_CHANNEL_2 0x2 +#define QSFP_TX_CHANNEL_1 0x1 +#define QSFP_RX_CHANNEL_4 0x4 +#define QSFP_RX_CHANNEL_3 0x3 +#define QSFP_RX_CHANNEL_2 0x2 +#define QSFP_RX_CHANNEL_1 0x1 + +enum { + GPIO_MODULE_PRESENT, + GPIO_MODULE_INTERRUPT, + GPIO_MODULE_INIT_MODE, + GPIO_MODULE_RESET, + GPIO_MODULE_SELECT, + GPIO_MAX, + + QSFP_F_PRESENT = BIT(GPIO_MODULE_PRESENT), + QSFP_INTERRUPT = BIT(GPIO_MODULE_INTERRUPT), + QSFP_INIT = BIT(GPIO_MODULE_INIT_MODE), + QSFP_RESET = BIT(GPIO_MODULE_RESET), + QSFP_SELECT = BIT(GPIO_MODULE_SELECT), + + QSFP_E_INSERT = 0, + QSFP_E_REMOVE, + QSFP_E_DEV_ATTACH, + QSFP_E_DEV_DETACH, + QSFP_E_DEV_DOWN, + QSFP_E_DEV_UP, + QSFP_E_TX_FAULT, + QSFP_E_TX_CLEAR, + QSFP_E_TX_LOS, + QSFP_E_RX_LOS, + QSFP_E_TIMEOUT, + + QSFP_MOD_EMPTY = 0, + QSFP_MOD_ERROR, + QSFP_MOD_PROBE, + QSFP_MOD_WAITDEV, + QSFP_MOD_HPOWER, + QSFP_MOD_WAITPWR, + QSFP_MOD_PRESENT, + + QSFP_DEV_DETACHED = 0, + QSFP_DEV_DOWN, + QSFP_DEV_UP, + + QSFP_S_DOWN = 0, + QSFP_S_FAIL, + QSFP_S_WAIT, + QSFP_S_INIT, + QSFP_S_INIT_PHY, + QSFP_S_INIT_TX_FAULT, + QSFP_S_WAIT_LOS, + QSFP_S_LINK_UP, + QSFP_S_TX_FAULT, + QSFP_S_REINIT, + QSFP_S_TX_DISABLE, +}; + +static const char * const mod_state_strings[] = { + [QSFP_MOD_EMPTY] = "empty", + [QSFP_MOD_ERROR] = "error", + [QSFP_MOD_PROBE] = "probe", + [QSFP_MOD_WAITDEV] = "waitdev", + [QSFP_MOD_HPOWER] = "hpower", + [QSFP_MOD_WAITPWR] = "waitpwr", + [QSFP_MOD_PRESENT] = "present", +}; + +static const char *mod_state_to_str(unsigned short mod_state) +{ + if (mod_state >= ARRAY_SIZE(mod_state_strings)) + return "Unknown module state"; + return mod_state_strings[mod_state]; +} + +static const char * const dev_state_strings[] = { + [QSFP_DEV_DETACHED] = "detached", + [QSFP_DEV_DOWN] = "down", + [QSFP_DEV_UP] = "up", +}; + +static const char *dev_state_to_str(unsigned short dev_state) +{ + if (dev_state >= ARRAY_SIZE(dev_state_strings)) + return "Unknown device state"; + return dev_state_strings[dev_state]; +} + +static const char * const event_strings[] = { + [QSFP_E_INSERT] = "insert", + [QSFP_E_REMOVE] = "remove", + [QSFP_E_DEV_ATTACH] = "dev_attach", + [QSFP_E_DEV_DETACH] = "dev_detach", + [QSFP_E_DEV_DOWN] = "dev_down", + [QSFP_E_DEV_UP] = "dev_up", + [QSFP_E_TX_FAULT] = "tx_fault", + [QSFP_E_TX_CLEAR] = "tx_clear", + [QSFP_E_TX_LOS] = "tx_los", + [QSFP_E_RX_LOS] = "rx_los", + [QSFP_E_TIMEOUT] = "timeout", +}; + +static const char *event_to_str(unsigned short event) +{ + if (event >= ARRAY_SIZE(event_strings)) + return "Unknown event"; + return event_strings[event]; +} + +static const char * const sm_state_strings[] = { + [QSFP_S_DOWN] = "down", + [QSFP_S_FAIL] = "fail", + [QSFP_S_WAIT] = "wait", + [QSFP_S_INIT] = "init", + [QSFP_S_INIT_PHY] = "init_phy", + [QSFP_S_INIT_TX_FAULT] = "init_tx_fault", + [QSFP_S_WAIT_LOS] = "wait_los", + [QSFP_S_LINK_UP] = "link_up", + [QSFP_S_TX_FAULT] = "tx_fault", + [QSFP_S_REINIT] = "reinit", + [QSFP_S_TX_DISABLE] = "tx_disable", +}; + +static const char *sm_state_to_str(unsigned short sm_state) +{ + if (sm_state >= ARRAY_SIZE(sm_state_strings)) + return "Unknown state"; + return sm_state_strings[sm_state]; +} + +static const char *gpio_of_names[] = { + "qsfpdd_modprsn", + "qsfpdd_intn", + "qsfpdd_initmode", + "qsfpdd_resetn", + "qsfpdd_modseln", +}; + +static const enum gpiod_flags gpio_flags[] = { + GPIOD_IN, + GPIOD_IN, + GPIOD_ASIS, + GPIOD_ASIS, + GPIOD_ASIS, +}; + +/* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a + * non-cooled module to initialise its laser safety circuitry. We wait + * an initial T_WAIT period before we check the tx fault to give any PHY + * on board (for a copper SFP) time to initialise. + */ +#define T_WAIT msecs_to_jiffies(50) +#define T_WAIT_ROLLBALL msecs_to_jiffies(25000) +#define T_START_UP msecs_to_jiffies(300) +#define T_START_UP_BAD_GPON msecs_to_jiffies(60000) + +/* t_reset is the time required to assert the TX_DISABLE signal to reset + * an indicated TX_FAULT. + */ +#define T_RESET_US 10 +#define T_FAULT_RECOVER msecs_to_jiffies(1000) + +/* N_FAULT_INIT is the number of recovery attempts at module initialisation + * time. If the TX_FAULT signal is not deasserted after this number of + * attempts at clearing it, we decide that the module is faulty. + * N_FAULT is the same but after the module has initialised. + */ +#define N_FAULT_INIT 5 +#define N_FAULT 5 + +/* T_PHY_RETRY is the time interval between attempts to probe the PHY. + * R_PHY_RETRY is the number of attempts. + */ +#define T_PHY_RETRY msecs_to_jiffies(50) +#define R_PHY_RETRY 12 + +/* SFP module presence detection is poor: the three MOD DEF signals are + * the same length on the PCB, which means it's possible for MOD DEF 0 to + * connect before the I2C bus on MOD DEF 1/2. + * + * The SFF-8472 specifies t_serial ("Time from power on until module is + * ready for data transmission over the two wire serial bus.") as 300ms. + */ +#define T_SERIAL msecs_to_jiffies(300) +#define T_HPOWER_LEVEL msecs_to_jiffies(300) +#define T_PROBE_RETRY_INIT msecs_to_jiffies(100) +#define R_PROBE_RETRY_INIT 10 +#define T_PROBE_RETRY_SLOW msecs_to_jiffies(5000) +#define R_PROBE_RETRY_SLOW 12 + +/* SFP modules appear to always have their PHY configured for bus address + * 0x56 (which with mdio-i2c, translates to a PHY address of 22). + * RollBall SFPs access phy via SFP Enhanced Digital Diagnostic Interface + * via address 0x51 (mdio-i2c will use RollBall protocol on this address). + */ +#define QSFP_PHY_ADDR 22 +#define QSFP_PHY_ADDR_ROLLBALL 17 + +struct sff_data { + unsigned int gpios; + bool (*module_supported)(const struct qsfp_eeprom_id *id); +}; + +struct qsfp { + struct device *dev; + struct i2c_adapter *i2c; + struct mii_bus *i2c_mii; + struct qsfp_bus *qsfp_bus; + enum mdio_i2c_proto mdio_protocol; + struct phy_device *mod_phy; + const struct sff_data *type; + size_t i2c_block_size; + u32 max_power_mW; + unsigned int module_revision; + unsigned int module_present; + int channel_number; + unsigned char vendor_name[16]; + unsigned char sn_number[16]; + unsigned char part_number[16]; + + unsigned int (*get_state)(struct qsfp *); + void (*set_state)(struct qsfp *, unsigned int); + int (*read)(struct qsfp *, bool, u8, void *, size_t); + int (*write)(struct qsfp *, bool, u8, void *, size_t); + + struct gpio_desc *gpio[GPIO_MAX]; + int gpio_irq[GPIO_MAX]; + + bool need_poll; + + struct mutex st_mutex; /* Protects state */ + unsigned int state_hw_mask; + unsigned int state_soft_mask; + unsigned int state; + struct delayed_work poll; + struct delayed_work timeout; + struct mutex sm_mutex; /* Protects state machine */ + unsigned char sm_mod_state; + unsigned char sm_mod_tries_init; + unsigned char sm_mod_tries; + unsigned char sm_dev_state; + unsigned short sm_state; + unsigned char sm_fault_retries; + unsigned char sm_phy_retries; + + struct qsfp_eeprom_id id; + unsigned int module_power_mW; + unsigned int module_t_start_up; + unsigned int module_t_wait; + bool tx_fault_ignore; + + const struct qsfp_quirk *quirk; +}; + +static bool qsfp_module_supported(const struct qsfp_eeprom_id *id) +{ + if (id->base.etile_qsfp_identifier == SFF8024_ID_QSFP || + id->base.etile_qsfp_identifier == SFF8024_ID_QSFP_PLUS || + id->base.etile_qsfp_identifier == SFF8024_ID_QSFP_28) + return true; + + /* QSFP GPON module Ubiquiti U-Fiber Instant has in its EEPROM stored + * phys id SFF instead of QSFP. Therefore mark this module explicitly + * as supported based on vendor name and pn match. + */ + if (id->base.etile_qsfp_identifier == SFF8024_ID_QSFP_DD_INF_8628 && + id->base.etile_qsfp_ext_identifier == QSFP_EXT_IDENTIFIER && + !memcmp(id->base.etile_qsfp_vendor_name, "UBNT ", 16) && + !memcmp(id->base.etile_qsfp_vendor_pn, "UF-INSTANT ", 16)) + return true; + + return false; +} + +static const struct sff_data qsfp_data = { + .gpios = QSFP_F_PRESENT | QSFP_INTERRUPT | QSFP_INIT | QSFP_RESET | + QSFP_SELECT, + .module_supported = qsfp_module_supported, +}; + +static const struct of_device_id qsfp_of_match[] = { + { .compatible = "sff,qsfp", .data = &qsfp_data, }, + { }, +}; +MODULE_DEVICE_TABLE(of, qsfp_of_match); + +static void qsfp_fixup_long_startup(struct qsfp *qsfp) +{ + qsfp->module_t_start_up = T_START_UP_BAD_GPON; +} + +static void qsfp_fixup_ignore_tx_fault(struct qsfp *qsfp) +{ + qsfp->tx_fault_ignore = true; +} + +static void qsfp_fixup_halny_gsfp(struct qsfp *qsfp) +{ + /* Ignore the TX_FAULT and LOS signals on this module. + * these are possibly used for other purposes on this + * module, e.g. a serial port. + */ + //qsfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS); +} + +static void qsfp_fixup_rollball(struct qsfp *qsfp) +{ + qsfp->mdio_protocol = MDIO_I2C_ROLLBALL; + qsfp->module_t_wait = T_WAIT_ROLLBALL; +} + +static void qsfp_fixup_rollball_cc(struct qsfp *qsfp) +{ + qsfp_fixup_rollball(qsfp); + + /* Some RollBall SFPs may have wrong (zero) extended compliance code + * burned in EEPROM. For PHY probing we need the correct one. + */ + //qsfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SFI; +} + +static void qsfp_quirk_2500basex(const struct qsfp_eeprom_id *id, + unsigned long *modes, + unsigned long *interfaces) +{ + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); +} + +static void qsfp_quirk_ubnt_uf_instant(const struct qsfp_eeprom_id *id, + unsigned long *modes, + unsigned long *interfaces) +{ + /* Ubiquiti U-Fiber Instant module claims that support all transceiver + * types including 10G Ethernet which is not truth. So clear all claimed + * modes and set only one mode which module supports: 1000baseX_Full. + */ + linkmode_zero(modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes); +} + +#define QSFP_QUIRK(_v, _p, _m, _f) \ + { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, } +#define QSFP_QUIRK_M(_v, _p, _m) QSFP_QUIRK(_v, _p, _m, NULL) +#define QSFP_QUIRK_F(_v, _p, _f) QSFP_QUIRK(_v, _p, NULL, _f) + +static const struct qsfp_quirk qsfp_quirks[] = { + // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly + // report 2500MBd NRZ in their EEPROM + QSFP_QUIRK_M("ALCATELLUCENT", "G010SP", qsfp_quirk_2500basex), + + // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd + // NRZ in their EEPROM + QSFP_QUIRK("ALCATELLUCENT", "3FE46541AA", qsfp_quirk_2500basex, + qsfp_fixup_long_startup), + + QSFP_QUIRK_F("HALNy", "HL-GSFP", qsfp_fixup_halny_gsfp), + + // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in + // their EEPROM + QSFP_QUIRK("HUAWEI", "MA5671A", qsfp_quirk_2500basex, + qsfp_fixup_ignore_tx_fault), + + // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report + // 2500MBd NRZ in their EEPROM + QSFP_QUIRK_M("Lantech", "8330-262D-E", qsfp_quirk_2500basex), + + QSFP_QUIRK_M("UBNT", "UF-INSTANT", qsfp_quirk_ubnt_uf_instant), + + QSFP_QUIRK_F("OEM", "SFP-10G-T", qsfp_fixup_rollball_cc), + QSFP_QUIRK_F("OEM", "RTSFP-10", qsfp_fixup_rollball_cc), + QSFP_QUIRK_F("OEM", "RTSFP-10G", qsfp_fixup_rollball_cc), + QSFP_QUIRK_F("Turris", "RTSFP-10", qsfp_fixup_rollball), + QSFP_QUIRK_F("Turris", "RTSFP-10G", qsfp_fixup_rollball), +}; + +static size_t qsfp_strlen(const char *str, size_t maxlen) +{ + size_t size, i; + + /* Trailing characters should be filled with space chars, but + * some manufacturers can't read SFF-8472 and use NUL. + */ + for (i = 0, size = 0; i < maxlen; i++) + if (str[i] != ' ' && str[i] != '\0') + size = i + 1; + + return size; +} + +static bool qsfp_match(const char *qs, const char *str, size_t len) +{ + if (!qs) + return true; + if (strlen(qs) != len) + return false; + return !strncmp(qs, str, len); +} + +static const struct qsfp_quirk *qsfp_lookup_quirk(const struct qsfp_eeprom_id *id) +{ + const struct qsfp_quirk *q; + unsigned int i; + size_t vs, ps; + + vs = qsfp_strlen(id->base.etile_qsfp_vendor_name, + ARRAY_SIZE(id->base.etile_qsfp_vendor_name)); + ps = qsfp_strlen(id->base.etile_qsfp_vendor_pn, ARRAY_SIZE(id->base.etile_qsfp_vendor_pn)); + + for (i = 0, q = qsfp_quirks; i < ARRAY_SIZE(qsfp_quirks); i++, q++) + if (qsfp_match(q->vendor, id->base.etile_qsfp_vendor_name, vs) && + qsfp_match(q->part, id->base.etile_qsfp_vendor_pn, ps)) + return q; + + return NULL; +} + +static unsigned long poll_jiffies; + +static unsigned int qsfp_gpio_get_state(struct qsfp *qsfp) +{ + unsigned int i, state, v; + + for (i = state = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !qsfp->gpio[i]) + continue; + + v = gpiod_get_value_cansleep(qsfp->gpio[i]); + if (v) + state |= BIT(i); + } + + return state; +} + +static unsigned int sff_gpio_get_state(struct qsfp *qsfp) +{ + return qsfp_gpio_get_state(qsfp) | QSFP_F_PRESENT; +} + +static void qsfp_gpio_set_state(struct qsfp *qsfp, unsigned int state) +{ + if (state & QSFP_F_PRESENT) { + /* If the module is present, drive the signals */ + gpiod_direction_output(qsfp->gpio[GPIO_MODULE_SELECT], + state & QSFP_SELECT); + } else { + /* Otherwise, let them float to the pull-ups */ + gpiod_direction_input(qsfp->gpio[GPIO_MODULE_PRESENT]); + } +} + +static int qsfp_i2c_read(struct qsfp *qsfp, bool a2, u8 dev_addr, void *buf, + size_t len) +{ + struct i2c_msg msgs[2]; + u8 bus_addr = a2 ? 0x51 : 0x50; + size_t block_size = qsfp->i2c_block_size; + size_t this_len; + int ret; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = buf; + + while (len) { + this_len = len; + if (this_len > block_size) + this_len = block_size; + + msgs[1].len = this_len; + + ret = i2c_transfer(qsfp->i2c, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + if (ret != ARRAY_SIZE(msgs)) + break; + + msgs[1].buf += this_len; + dev_addr += this_len; + len -= this_len; + } + + return msgs[1].buf - (u8 *)buf; +} + +static int qsfp_i2c_write(struct qsfp *qsfp, bool a2, u8 dev_addr, void *buf, + size_t len) +{ + struct i2c_msg msgs[1]; + u8 bus_addr = a2 ? 0x51 : 0x50; + int ret; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1 + len; + msgs[0].buf = kmalloc(1 + len, GFP_KERNEL); + if (!msgs[0].buf) + return -ENOMEM; + + msgs[0].buf[0] = dev_addr; + memcpy(&msgs[0].buf[1], buf, len); + + ret = i2c_transfer(qsfp->i2c, msgs, ARRAY_SIZE(msgs)); + + kfree(msgs[0].buf); + + if (ret < 0) + return ret; + + return ret == ARRAY_SIZE(msgs) ? len : 0; +} + +static int qsfp_i2c_configure(struct qsfp *qsfp, struct i2c_adapter *i2c) +{ + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return -EINVAL; + + qsfp->i2c = i2c; + qsfp->read = qsfp_i2c_read; + qsfp->write = qsfp_i2c_write; + + return 0; +} + +static int qsfp_i2c_mdiobus_create(struct qsfp *qsfp) +{ + struct mii_bus *i2c_mii; + int ret; + + i2c_mii = mdio_i2c_alloc(qsfp->dev, qsfp->i2c, qsfp->mdio_protocol); + if (IS_ERR(i2c_mii)) + return PTR_ERR(i2c_mii); + + i2c_mii->name = "QSFP I2C Bus"; + i2c_mii->phy_mask = ~0; + + ret = mdiobus_register(i2c_mii); + if (ret < 0) { + mdiobus_free(i2c_mii); + return ret; + } + + qsfp->i2c_mii = i2c_mii; + + return 0; +} + +static void qsfp_i2c_mdiobus_destroy(struct qsfp *qsfp) +{ + mdiobus_unregister(qsfp->i2c_mii); + qsfp->i2c_mii = NULL; +} + +/* Interface */ +static int qsfp_read(struct qsfp *qsfp, bool a2, u8 addr, void *buf, size_t len) +{ + return qsfp->read(qsfp, a2, addr, buf, len); +} + +static int qsfp_write(struct qsfp *qsfp, bool a2, u8 addr, void *buf, size_t len) +{ + return qsfp->write(qsfp, a2, addr, buf, len); +} + +static void qsfp_soft_stop_poll(struct qsfp *qsfp) +{ + qsfp->state_soft_mask = 0; +} + +static unsigned int qsfp_get_state(struct qsfp *qsfp) +{ + unsigned int state = qsfp->get_state(qsfp); + return state; +} + +static void qsfp_set_state(struct qsfp *qsfp, unsigned int state) +{ + qsfp->set_state(qsfp, state); +} + +static unsigned int qsfp_check(void *buf, size_t len) +{ + u8 *p, check; + + for (p = buf, check = 0; len; p++, len--) + check += *p; + + return check; +} + +/* Helpers */ +static void qsfp_module_tx_disable(struct qsfp *qsfp) +{ + dev_dbg(qsfp->dev, "tx disable %u -> %u\n", + qsfp->id.base.etile_qsfp_options_3 & QSFP_OPTIONS_TX_DISABLE ? + 1 : + 0, + 1); + qsfp->id.base.etile_qsfp_options_3 |= QSFP_OPTIONS_TX_DISABLE; + qsfp_set_state(qsfp, qsfp->state); +} + +static void qsfp_module_tx_fault_reset(struct qsfp *qsfp) +{ + unsigned int state = qsfp->id.base.etile_qsfp_options_3; + int ret; + + ret = qsfp_read(qsfp, false, QSFP_OPTIONS, &state, sizeof(state)); + + if (state & QSFP_OPTIONS_TX_DISABLE) + return; + + qsfp_set_state(qsfp, state | QSFP_OPTIONS_TX_DISABLE); + + udelay(T_RESET_US); + + qsfp_set_state(qsfp, state); +} + +/* QSFP state machine */ +static void qsfp_sm_set_timer(struct qsfp *qsfp, unsigned int timeout) +{ + if (timeout) + mod_delayed_work(system_power_efficient_wq, &qsfp->timeout, + timeout); + else + cancel_delayed_work(&qsfp->timeout); +} + +static void qsfp_sm_next(struct qsfp *qsfp, unsigned int state, + unsigned int timeout) +{ + qsfp->sm_state = state; + qsfp_sm_set_timer(qsfp, timeout); +} + +static void qsfp_sm_mod_next(struct qsfp *qsfp, unsigned int state, + unsigned int timeout) +{ + qsfp->sm_mod_state = state; + qsfp_sm_set_timer(qsfp, timeout); +} + +static void qsfp_sm_phy_detach(struct qsfp *qsfp) +{ + qsfp_remove_phy(qsfp->qsfp_bus); + phy_device_remove(qsfp->mod_phy); + phy_device_free(qsfp->mod_phy); + qsfp->mod_phy = NULL; +} + +static int qsfp_sm_probe_phy(struct qsfp *qsfp, int addr, bool is_c45) +{ + struct phy_device *phy; + int err; + + phy = get_phy_device(qsfp->i2c_mii, addr, is_c45); + if (phy == ERR_PTR(-ENODEV)) + return PTR_ERR(phy); + if (IS_ERR(phy)) { + dev_err(qsfp->dev, "mdiobus scan returned %pe\n", phy); + return PTR_ERR(phy); + } + + err = phy_device_register(phy); + if (err) { + phy_device_free(phy); + dev_err(qsfp->dev, "phy_device_register failed: %pe\n", + ERR_PTR(err)); + return err; + } + + err = qsfp_add_phy(qsfp->qsfp_bus, phy); + if (err) { + phy_device_remove(phy); + phy_device_free(phy); + dev_err(qsfp->dev, "qsfp_add_phy failed: %pe\n", ERR_PTR(err)); + return err; + } + + qsfp->mod_phy = phy; + + return 0; +} + +static void qsfp_sm_link_up(struct qsfp *qsfp) +{ + qsfp_link_up(qsfp->qsfp_bus); + qsfp_sm_next(qsfp, QSFP_S_LINK_UP, 0); +} + +static void qsfp_sm_link_down(struct qsfp *qsfp) +{ + qsfp_link_down(qsfp->qsfp_bus); +} + +static void qsfp_sm_link_check_los(struct qsfp *qsfp) +{ + int ret; + u8 buf[16] = {0}; + + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + + if ((buf[0] & 0x80) || (buf[0] & 0x08)) + qsfp_sm_next(qsfp, QSFP_S_WAIT_LOS, 0); + else + qsfp_sm_link_up(qsfp); +} + +static bool qsfp_los_event_active(struct qsfp *qsfp, unsigned int event) +{ + int ret; + u8 buf[16] = {0}; + + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + + if ((buf[0] & 0x80) || (buf[0] & 0x08)) + return 1; + + return 0; +} + +static bool qsfp_los_event_inactive(struct qsfp *qsfp, unsigned int event) +{ + int ret; + u8 buf[16] = {0}; + + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + + if (!(buf[0] & (1 << 3))) + return 1; + + return 0; +} + +static void qsfp_sm_fault(struct qsfp *qsfp, unsigned int next_state, bool warn) +{ + if (qsfp->sm_fault_retries && !--qsfp->sm_fault_retries) { + dev_err(qsfp->dev, + "module persistently indicates fault, disabling\n"); + qsfp_sm_next(qsfp, QSFP_S_TX_DISABLE, 0); + } else { + if (warn) + dev_err(qsfp->dev, "module transmit fault indicated\n"); + + qsfp_sm_next(qsfp, next_state, T_FAULT_RECOVER); + } +} + +static int qsfp_sm_add_mdio_bus(struct qsfp *qsfp) +{ + if (qsfp->mdio_protocol != MDIO_I2C_NONE) + return qsfp_i2c_mdiobus_create(qsfp); + + return 0; +} + +/* Probe a SFP for a PHY device if the module supports copper - the PHY + * normally sits at I2C bus address 0x56, and may either be a clause 22 + * or clause 45 PHY. + * + * Clause 22 copper SFP modules normally operate in Cisco SGMII mode with + * negotiation enabled, but some may be in 1000base-X - which is for the + * PHY driver to determine. + * + * Clause 45 copper SFP+ modules (10G) appear to switch their interface + * mode according to the negotiated line speed. + */ +static int qsfp_sm_probe_for_phy(struct qsfp *qsfp) +{ + int err = 0; + + switch (qsfp->mdio_protocol) { + case MDIO_I2C_NONE: + break; + + case MDIO_I2C_MARVELL_C22: + err = qsfp_sm_probe_phy(qsfp, QSFP_PHY_ADDR, false); + break; + + case MDIO_I2C_C45: + err = qsfp_sm_probe_phy(qsfp, QSFP_PHY_ADDR, true); + break; + + case MDIO_I2C_ROLLBALL: + err = qsfp_sm_probe_phy(qsfp, QSFP_PHY_ADDR_ROLLBALL, true); + break; + } + + return err; +} + +static int qsfp_module_parse_power(struct qsfp *qsfp) +{ + u32 power_mW = 1000; + + if (power_mW > qsfp->max_power_mW) { + /* Module power specification exceeds the allowed maximum. */ + if (qsfp->id.base.etile_qsfp_spec_compliance_1[0] == + SFF8636_QSFP_DD_ECC_100GBASE_CR4 && + !(qsfp->id.base.etile_qsfp_diag_monitor & + QSFP_DIAGMON_DDM)) { + dev_err(qsfp->dev, + "Host does not support %u.%uW modules\n", + power_mW / 1000, (power_mW / 100) % 10); + return -EINVAL; + } + } + + /* If the module requires a higher power mode, but also requires + * an address change sequence, warn the user that the module may + * not be functional. + */ + if (qsfp->id.base.etile_qsfp_diag_monitor & QSFP_DIAGMON_ADDRMODE && + power_mW > 1000) { + dev_warn(qsfp->dev, + "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n", + power_mW / 1000, (power_mW / 100) % 10); + return 0; + } + + qsfp->module_power_mW = power_mW; + + return 0; +} + +static int qsfp_sm_mod_hpower(struct qsfp *qsfp, bool enable) +{ + u8 val; + int err; + + qsfp_set_state(qsfp, qsfp->state & QSFP_INIT); + + err = qsfp_read(qsfp, false, QSFP_EXT_STATUS, &val, sizeof(val)); + if (err != sizeof(val)) { + dev_err(qsfp->dev, "Failed to read EEPROM: %pe\n", ERR_PTR(err)); + return -EAGAIN; + } + + /* DM7052 reports as a high power module, responds to reads (with + * all bytes 0xff) at 0x51 but does not accept writes. In any case, + * if the bit is already set, we're already in high power mode. + */ + if (!!(val & BIT(0)) == enable) + return 0; + + if (enable) + val |= BIT(0); + else + val &= ~BIT(0); + + err = qsfp_write(qsfp, false, QSFP_EXT_STATUS, &val, sizeof(val)); + if (err != sizeof(val)) { + dev_err(qsfp->dev, "Failed to write EEPROM: %pe\n", + ERR_PTR(err)); + return -EAGAIN; + } + + if (enable) + dev_info(qsfp->dev, "Module switched to %u.%uW power level\n", + qsfp->module_power_mW / 1000, + (qsfp->module_power_mW / 100) % 10); + + return 0; +} + +/* GPON modules based on Realtek RTL8672 and RTL9601C chips (e.g. V-SOL + * V2801F, CarlitoxxPro CPGOS03-0490, Ubiquiti U-Fiber Instant, ...) do + * not support multibyte reads from the EEPROM. Each multi-byte read + * operation returns just one byte of EEPROM followed by zeros. There is + * no way to identify which modules are using Realtek RTL8672 and RTL9601C + * chips. Moreover every OEM of V-SOL V2801F module puts its own vendor + * name and vendor id into EEPROM, so there is even no way to detect if + * module is V-SOL V2801F. Therefore check for those zeros in the read + * data and then based on check switch to reading EEPROM to one byte + * at a time. + */ +static bool qsfp_id_needs_byte_io(struct qsfp *qsfp, void *buf, size_t len) +{ + size_t i, block_size = qsfp->i2c_block_size; + + /* Already using byte IO */ + if (block_size == 1) + return false; + + for (i = 1; i < len; i += block_size) { + if (memchr_inv(buf + i, '\0', min(block_size - 1, len - i))) + return false; + } + return true; +} + +static void get_module_revision(struct qsfp *qsfp) +{ + int ret; + u8 buf[16] = {0}; + char buf_16[16] = {'\0'}; + + ret = qsfp_read(qsfp, false, QSFP_VENDOR_NAME, buf_16, 16); + buf_16[15] = '\0'; + strcpy(qsfp->vendor_name, buf_16); + ret = qsfp_read(qsfp, false, QSFP_VENDOR_PN, buf_16, 16); + buf_16[15] = '\0'; + strcpy(qsfp->part_number, buf_16); + ret = qsfp_read(qsfp, false, QSFP_VENDOR_SN, buf_16, 16); + buf_16[15] = '\0'; + strcpy(qsfp->sn_number, buf_16); + ret = qsfp_read(qsfp, false, QSFP_STATUS, buf, 1); + qsfp->module_revision = buf[0]; +} + +static int qsfp_select_eeprom_page(struct qsfp *qsfp) +{ + int err; + u8 buf[16]; + u8 i = 0; + int ret; + + err = qsfp_write(qsfp, false, QSFP_PAGE_SELECT_BYTE, &i, 1); + + ret = qsfp_read(qsfp, false, QSFP_PAGE_SELECT_BYTE, buf, 1); + + return 0; +} + +static int qsfp_cotsworks_fixup_check(struct qsfp *qsfp, struct qsfp_eeprom_id *id) +{ + u8 check; + int err; + + err = qsfp_write(qsfp, false, QSFP_PAGE_SELECT_BYTE, (u8 *)0x2, 1); + + if (id->base.etile_qsfp_identifier != SFF8024_ID_QSFP_DD_INF_8628 || + id->base.etile_qsfp_ext_identifier != QSFP_EXT_IDENTIFIER || + id->base.etile_qsfp_connector_type != + SFF8024_QSFP_DD_CONNECTOR_LC) { + dev_warn(qsfp->dev, + "Rewriting fiber module EEPROM with corrected values\n"); + id->base.etile_qsfp_identifier = SFF8024_ID_QSFP_DD_INF_8628; + id->base.etile_qsfp_ext_identifier = QSFP_EXT_IDENTIFIER; + id->base.etile_qsfp_connector_type = + SFF8024_QSFP_DD_CONNECTOR_LC; + err = qsfp_write(qsfp, false, QSFP_PHYS_ID, &id->base, 3); + if (err != 3) { + dev_err(qsfp->dev, + "Failed to rewrite module EEPROM: %d\n", err); + return err; + } + + /* Cotsworks modules have been found to require a delay between write operations. */ + mdelay(50); + + /* Update base structure checksum */ + check = qsfp_check(&id->base, sizeof(id->base) - 1); + err = qsfp_write(qsfp, false, QSFP_CC_BASE, &check, 1); + if (err != 1) { + dev_err(qsfp->dev, + "Failed to update base structure checksum in fiber module EEPROM: %d\n", + err); + return err; + } + } + return 0; +} + +static int qsfp_sm_mod_probe(struct qsfp *qsfp, bool report) +{ + /* SFP module inserted - read I2C data */ + struct qsfp_eeprom_id id; + bool cotsworks_sfbg; + bool cotsworks; + int ret; + + /* Some QSFP modules and also some Linux I2C drivers do not like reads + * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at + * a time. + */ + qsfp->i2c_block_size = 16; + + qsfp_select_eeprom_page(qsfp); + + ret = qsfp_read(qsfp, false, 0x0, &id.base, sizeof(id.base)); + if (ret < 0) { + if (report) + dev_err(qsfp->dev, "failed to read EEPROM: %pe\n", + ERR_PTR(ret)); + return -EAGAIN; + } + + if (ret != sizeof(id.base)) { + dev_err(qsfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret)); + return -EAGAIN; + } + + /* Some SFP modules (e.g. Nokia 3FE46541AA) lock up if read from + * address 0x51 is just one byte at a time. Also SFF-8472 requires + * that EEPROM supports atomic 16bit read operation for diagnostic + * fields, so do not switch to one byte reading at a time unless it + * is really required and we have no other option. + */ + if (qsfp_id_needs_byte_io(qsfp, &id.base, sizeof(id.base))) { + dev_info(qsfp->dev, + "Detected broken RTL8672/RTL9601C emulated EEPROM\n"); + dev_info(qsfp->dev, + "Switching to reading EEPROM to one byte at a time\n"); + qsfp->i2c_block_size = 1; + + ret = qsfp_read(qsfp, false, 0, &id.base, sizeof(id.base)); + if (ret < 0) { + if (report) + dev_err(qsfp->dev, + "failed to read EEPROM: %pe\n", + ERR_PTR(ret)); + return -EAGAIN; + } + + if (ret != sizeof(id.base)) { + dev_err(qsfp->dev, "EEPROM short read: %pe\n", + ERR_PTR(ret)); + return -EAGAIN; + } + } + + /* Cotsworks do not seem to update the checksums when they + * do the final programming with the final module part number, + * serial number and date code. + */ + cotsworks = !memcmp(id.base.etile_qsfp_vendor_name, "COTSWORKS ", 16); + cotsworks_sfbg = !memcmp(id.base.etile_qsfp_vendor_pn, "SFBG", 4); + + /* Cotsworks SFF module EEPROM do not always have valid phys_id, + * phys_ext_id, and connector bytes. Rewrite SFF EEPROM bytes if + * Cotsworks PN matches and bytes are not correct. + */ + if (cotsworks && cotsworks_sfbg) { + ret = qsfp_cotsworks_fixup_check(qsfp, &id); + if (ret < 0) + return ret; + } + + qsfp->id = id; + + dev_info(qsfp->dev, "module %.*s %.*s rev %.*x sn %.*s dc %.*s\n", + (int)sizeof(id.base.etile_qsfp_vendor_name), + id.base.etile_qsfp_vendor_name, + (int)sizeof(id.base.etile_qsfp_vendor_pn), + id.base.etile_qsfp_vendor_pn, + (int)sizeof(id.base.etile_qsfp_revision), + id.base.etile_qsfp_revision, + (int)sizeof(id.base.etile_qsfp_vendor_serial_number), + id.base.etile_qsfp_vendor_serial_number, + (int)sizeof(id.base.etile_qsfp_vendor_date_code), + id.base.etile_qsfp_vendor_date_code); + + /* Check whether we support this module */ + if (!qsfp->type->module_supported(&id)) { + dev_err(qsfp->dev, + "module is not supported - phys id 0x%02x 0x%02x\n", + qsfp->id.base.etile_qsfp_identifier_1, + qsfp->id.base.etile_qsfp_ext_identifier); + return -EINVAL; + } + + /* Parse the module power requirement */ + ret = qsfp_module_parse_power(qsfp); + + if (ret < 0) + return ret; + + qsfp->module_t_start_up = T_START_UP; + qsfp->module_t_wait = T_WAIT; + + qsfp->quirk = qsfp_lookup_quirk(&id); + if (qsfp->quirk && qsfp->quirk->fixup) + qsfp->quirk->fixup(qsfp); + + return 0; +} + +static void qsfp_sm_mod_remove(struct qsfp *qsfp) +{ + if (qsfp->sm_mod_state > QSFP_MOD_WAITDEV) + qsfp_module_remove(qsfp->qsfp_bus); + + memset(&qsfp->id, 0, sizeof(qsfp->id)); + qsfp->module_power_mW = 0; + + dev_info(qsfp->dev, "module removed\n"); +} + +/* This state machine tracks the upstream's state */ +static void qsfp_sm_device(struct qsfp *qsfp, unsigned int event) +{ + int ret; + u8 buf[16] = {0}; + + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + + switch (qsfp->sm_dev_state) { + default: + if (event == QSFP_E_DEV_ATTACH) + qsfp->sm_dev_state = QSFP_DEV_DOWN; + break; + + case QSFP_DEV_DOWN: + if (event == QSFP_E_DEV_DETACH) + qsfp->sm_dev_state = QSFP_DEV_DETACHED; + else if (event == QSFP_E_DEV_UP) + qsfp->sm_dev_state = QSFP_DEV_UP; + break; + + case QSFP_DEV_UP: + if (event == QSFP_E_DEV_DETACH) + qsfp->sm_dev_state = QSFP_DEV_DETACHED; + else if (event == QSFP_E_DEV_DOWN) + qsfp->sm_dev_state = QSFP_DEV_DOWN; + break; + } +} + +/* This state machine tracks the insert/remove state of the module, probes + * the on-board EEPROM, and sets up the power level. + */ +static void qsfp_sm_module(struct qsfp *qsfp, unsigned int event) +{ + int err; + int ret; + u8 buf[16] = {0}; + + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + + /* modules have been found to require a delay between read and write operations. + *Hence adding delay which will keep the i2C bus idle for few seconds + */ + + mdelay(50); + + /* Handle remove event globally, it resets this state machine */ + if (event == QSFP_E_REMOVE) { + if (qsfp->sm_mod_state > QSFP_MOD_PROBE) + qsfp_sm_mod_remove(qsfp); + qsfp_sm_mod_next(qsfp, QSFP_MOD_EMPTY, 0); + return; + } + + /* Handle device detach globally */ + if (qsfp->sm_dev_state < QSFP_DEV_DOWN && + qsfp->sm_mod_state > QSFP_MOD_WAITDEV) { + if (qsfp->module_power_mW > 1000 && + qsfp->sm_mod_state > QSFP_MOD_HPOWER) + qsfp_sm_mod_hpower(qsfp, false); + qsfp_sm_mod_next(qsfp, QSFP_MOD_WAITDEV, 0); + return; + } + + switch (qsfp->sm_mod_state) { + default: + if (event == QSFP_E_INSERT) { + qsfp_sm_mod_next(qsfp, QSFP_MOD_PROBE, T_SERIAL); + qsfp->sm_mod_tries_init = R_PROBE_RETRY_INIT; + qsfp->sm_mod_tries = R_PROBE_RETRY_SLOW; + } + break; + + case QSFP_MOD_PROBE: + /* Wait for T_PROBE_INIT to time out */ + if (event != QSFP_E_TIMEOUT) + break; + + err = qsfp_sm_mod_probe(qsfp, qsfp->sm_mod_tries == 1); + if (err == -EAGAIN) { + if (qsfp->sm_mod_tries_init && + --qsfp->sm_mod_tries_init) { + qsfp_sm_set_timer(qsfp, T_PROBE_RETRY_INIT); + break; + } else if (qsfp->sm_mod_tries && --qsfp->sm_mod_tries) { + if (qsfp->sm_mod_tries == R_PROBE_RETRY_SLOW - 1) + dev_warn(qsfp->dev, + "please wait, module slow to respond\n"); + qsfp_sm_set_timer(qsfp, T_PROBE_RETRY_SLOW); + break; + } + } + if (err < 0) { + qsfp_sm_mod_next(qsfp, QSFP_MOD_ERROR, 0); + break; + } + + qsfp_sm_mod_next(qsfp, QSFP_MOD_WAITDEV, 0); + fallthrough; + case QSFP_MOD_WAITDEV: + /* Ensure that the device is attached before proceeding */ + if (qsfp->sm_dev_state < QSFP_DEV_DOWN) + break; + + /* Report the module insertion to the upstream device */ + err = qsfp_module_insert(qsfp->qsfp_bus, &qsfp->id, + qsfp->quirk); + if (err < 0) { + qsfp_sm_mod_next(qsfp, QSFP_MOD_ERROR, 0); + break; + } + + /* If this is a power level 1 module, we are done */ + if (qsfp->module_power_mW <= 1000) + goto insert; + + qsfp_sm_mod_next(qsfp, QSFP_MOD_HPOWER, 0); + fallthrough; + case QSFP_MOD_HPOWER: + /* Enable high power mode */ + err = qsfp_sm_mod_hpower(qsfp, true); + if (err < 0) { + if (err != -EAGAIN) { + qsfp_module_remove(qsfp->qsfp_bus); + qsfp_sm_mod_next(qsfp, QSFP_MOD_ERROR, 0); + } else { + qsfp_sm_set_timer(qsfp, T_PROBE_RETRY_INIT); + } + break; + } + + qsfp_sm_mod_next(qsfp, QSFP_MOD_WAITPWR, T_HPOWER_LEVEL); + break; + + case QSFP_MOD_WAITPWR: + /* Wait for T_HPOWER_LEVEL to time out */ + if (event != QSFP_E_TIMEOUT) + break; + + insert: + qsfp_sm_mod_next(qsfp, QSFP_MOD_PRESENT, 0); + break; + + case QSFP_MOD_PRESENT: + case QSFP_MOD_ERROR: + break; + } +} + +static void qsfp_sm_main(struct qsfp *qsfp, unsigned int event) +{ + unsigned long timeout; + int ret; + + /* Some events are global */ + if (qsfp->sm_state != QSFP_S_DOWN && + (qsfp->sm_mod_state != QSFP_MOD_PRESENT || + qsfp->sm_dev_state != QSFP_DEV_UP)) { + if (qsfp->sm_state == QSFP_S_LINK_UP && + qsfp->sm_dev_state == QSFP_DEV_UP) + qsfp_sm_link_down(qsfp); + if (qsfp->sm_state > QSFP_S_INIT) + qsfp_module_stop(qsfp->qsfp_bus); + if (qsfp->mod_phy) + qsfp_sm_phy_detach(qsfp); + if (qsfp->i2c_mii) + qsfp_i2c_mdiobus_destroy(qsfp); + qsfp_module_tx_disable(qsfp); + qsfp_soft_stop_poll(qsfp); + qsfp_sm_next(qsfp, QSFP_S_DOWN, 0); + return; + } + + /* The main state machine */ + switch (qsfp->sm_state) { + case QSFP_S_DOWN: + if (qsfp->sm_mod_state != QSFP_MOD_PRESENT || + qsfp->sm_dev_state != QSFP_DEV_UP) + break; + + /* Initialise the fault clearance retries */ + qsfp->sm_fault_retries = N_FAULT_INIT; + + /* We need to check the TX_FAULT state, which is not defined + * while TX_DISABLE is asserted. The earliest we want to do + * anything (such as probe for a PHY) is 50ms (or more on + * specific modules). + */ + qsfp_sm_next(qsfp, QSFP_S_WAIT, qsfp->module_t_wait); + break; + + case QSFP_S_WAIT: + if (event != QSFP_E_TIMEOUT) + break; + + if (qsfp->id.base.etile_qsfp_options_3 & + QSFP_OPTIONS_TX_FAULT) { + /* Wait up to t_init (SFF-8472) or t_start_up (SFF-8431) + * from the TX_DISABLE deassertion for the module to + * initialise, which is indicated by TX_FAULT + * deasserting. + */ + timeout = qsfp->module_t_start_up; + if (timeout > qsfp->module_t_wait) + timeout -= qsfp->module_t_wait; + else + timeout = 1; + + qsfp_sm_next(qsfp, QSFP_S_INIT, timeout); + } else { + /* TX_FAULT is not asserted, assume the module has + * finished initialising. + */ + goto init_done; + } + break; + + case QSFP_S_INIT: + if (event == QSFP_E_TIMEOUT && + qsfp->id.base.etile_qsfp_options_3 & QSFP_OPTIONS_TX_FAULT) { + /* TX_FAULT is still asserted after t_init + * or t_start_up, so assume there is a fault. + */ + qsfp_sm_fault(qsfp, QSFP_S_INIT_TX_FAULT, + qsfp->sm_fault_retries == N_FAULT_INIT); + } else if (event == QSFP_E_TIMEOUT || event == QSFP_E_TX_CLEAR) { + init_done: + /* Create mdiobus and start trying for PHY */ + ret = qsfp_sm_add_mdio_bus(qsfp); + if (ret < 0) { + qsfp_sm_next(qsfp, QSFP_S_FAIL, 0); + break; + } + qsfp->sm_phy_retries = R_PHY_RETRY; + goto phy_probe; + } + break; + + case QSFP_S_INIT_PHY: + if (event != QSFP_E_TIMEOUT) + break; + phy_probe: + /* TX_FAULT deasserted or we timed out with TX_FAULT + * clear. Probe for the PHY and check the LOS state. + */ + ret = qsfp_sm_probe_for_phy(qsfp); + if (ret == -ENODEV) { + if (--qsfp->sm_phy_retries) { + qsfp_sm_next(qsfp, QSFP_S_INIT_PHY, T_PHY_RETRY); + break; + } else { + dev_info(qsfp->dev, "no PHY detected\n"); + } + } else if (ret) { + qsfp_sm_next(qsfp, QSFP_S_FAIL, 0); + break; + } + + if (qsfp_module_start(qsfp->qsfp_bus)) { + qsfp_sm_next(qsfp, QSFP_S_FAIL, 0); + break; + } + qsfp_sm_link_check_los(qsfp); + + /* Reset the fault retry count */ + qsfp->sm_fault_retries = N_FAULT; + break; + + case QSFP_S_INIT_TX_FAULT: + if (event == QSFP_E_TIMEOUT) { + qsfp_module_tx_fault_reset(qsfp); + qsfp_sm_next(qsfp, QSFP_S_INIT, qsfp->module_t_start_up); + } + break; + + case QSFP_S_WAIT_LOS: + if (event == QSFP_E_TX_FAULT) + qsfp_sm_fault(qsfp, QSFP_S_TX_FAULT, true); + else if (qsfp_los_event_inactive(qsfp, event)) + qsfp_sm_link_up(qsfp); + break; + + case QSFP_S_LINK_UP: + if (event == QSFP_E_TX_FAULT) { + qsfp_sm_link_down(qsfp); + qsfp_sm_fault(qsfp, QSFP_S_TX_FAULT, true); + } else if (qsfp_los_event_active(qsfp, event)) { + qsfp_sm_link_down(qsfp); + qsfp_sm_next(qsfp, QSFP_S_WAIT_LOS, 0); + } + break; + + case QSFP_S_TX_FAULT: + if (event == QSFP_E_TIMEOUT) { + qsfp_module_tx_fault_reset(qsfp); + qsfp_sm_next(qsfp, QSFP_S_REINIT, qsfp->module_t_start_up); + } + break; + + case QSFP_S_REINIT: + if (event == QSFP_E_TIMEOUT && qsfp->state & QSFP_S_TX_FAULT) { + qsfp_sm_fault(qsfp, QSFP_S_TX_FAULT, false); + } else if (event == QSFP_E_TIMEOUT || event == QSFP_E_TX_CLEAR) { + dev_info(qsfp->dev, "module transmit fault recovered\n"); + qsfp_sm_link_check_los(qsfp); + } + break; + + case QSFP_S_TX_DISABLE: + break; + } +} + +static void qsfp_sm_event(struct qsfp *qsfp, unsigned int event) +{ + mutex_lock(&qsfp->sm_mutex); + + dev_dbg(qsfp->dev, "SM: enter %s:%s:%s event %s\n", + mod_state_to_str(qsfp->sm_mod_state), + dev_state_to_str(qsfp->sm_dev_state), + sm_state_to_str(qsfp->sm_state), + event_to_str(event)); + + qsfp_sm_device(qsfp, event); + qsfp_sm_module(qsfp, event); + qsfp_sm_main(qsfp, event); + + dev_dbg(qsfp->dev, "SM: exit %s:%s:%s\n", + mod_state_to_str(qsfp->sm_mod_state), + dev_state_to_str(qsfp->sm_dev_state), + sm_state_to_str(qsfp->sm_state)); + + mutex_unlock(&qsfp->sm_mutex); +} + +static void qsfp_attach(struct qsfp *qsfp) +{ + qsfp_sm_event(qsfp, QSFP_E_DEV_ATTACH); +} + +static void qsfp_detach(struct qsfp *qsfp) +{ + qsfp_sm_event(qsfp, QSFP_E_DEV_DETACH); +} + +static void qsfp_start(struct qsfp *qsfp) +{ + qsfp_sm_event(qsfp, QSFP_E_DEV_UP); +} + +static void qsfp_stop(struct qsfp *qsfp) +{ + qsfp_sm_event(qsfp, QSFP_E_DEV_DOWN); +} + +static int qsfp_module_info(struct qsfp *qsfp, struct ethtool_modinfo *modinfo) +{ + /* locking... and check module is present */ + + if (qsfp->id.base.etile_qsfp_spec_compliance_1[0] && + !(qsfp->id.base.etile_qsfp_diag_monitor & QSFP_DIAGMON_ADDRMODE)) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + + return 0; +} + +static int qsfp_module_eeprom(struct qsfp *qsfp, struct ethtool_eeprom *ee, + u8 *data) +{ + unsigned int first, last, len; + int ret; + + if (ee->len == 0) + return -EINVAL; + + first = ee->offset; + last = ee->offset + ee->len; + if (first < ETH_MODULE_SFF_8079_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); + len -= first; + + ret = qsfp_read(qsfp, false, first, data, len); + if (ret < 0) + return ret; + + first += len; + data += len; + } + if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); + len -= first; + first -= ETH_MODULE_SFF_8079_LEN; + + ret = qsfp_read(qsfp, true, first, data, len); + if (ret < 0) + return ret; + } + + return 0; +} + +static const struct qsfp_socket_ops qsfp_module_ops = { + .attach = qsfp_attach, + .detach = qsfp_detach, + .start = qsfp_start, + .stop = qsfp_stop, + .module_info = qsfp_module_info, + .module_eeprom = qsfp_module_eeprom, +}; + +static void qsfp_timeout(struct work_struct *work) +{ + struct qsfp *qsfp = container_of(work, struct qsfp, timeout.work); + + rtnl_lock(); + qsfp_sm_event(qsfp, QSFP_E_TIMEOUT); + rtnl_unlock(); +} + +static void qsfp_check_state(struct qsfp *qsfp) +{ + unsigned int state, changed; + int ret; + u8 buf[16] = {0}; + static unsigned int prv_buf, prv_buf_rx; + bool flag = false; + + mutex_lock(&qsfp->st_mutex); + state = qsfp_get_state(qsfp); + changed = state ^ qsfp->state; + changed &= QSFP_F_PRESENT; + qsfp->state = state; + + rtnl_lock(); + if (changed & QSFP_F_PRESENT) { + qsfp_sm_event(qsfp, state & QSFP_F_PRESENT ? QSFP_E_INSERT : + QSFP_E_REMOVE); + } + + ret = qsfp_read(qsfp, false, QSFP_OPTIONS, buf, 1); + if (buf[0] & QSFP_OPTIONS_TX_LOSS_SIGNAL) { + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + if (prv_buf != buf[0]) { + prv_buf = buf[0]; + flag = true; + } + if (flag) { + qsfp_sm_event(qsfp, state & QSFP_OPTIONS_TX_LOSS_SIGNAL ? + QSFP_E_TX_LOS : + QSFP_E_RX_LOS); + } + } else { + ret = qsfp_read(qsfp, false, QSFP_RX_TX_LOSS, buf, 1); + buf[0] = buf[0] & 0xf; + if (prv_buf_rx != buf[0]) { + prv_buf_rx = buf[0]; + flag = true; + } + if (flag) + qsfp_sm_event(qsfp, state & QSFP_OPTIONS_TX_LOSS_SIGNAL ? + QSFP_E_TX_LOS : + QSFP_E_RX_LOS); + } + rtnl_unlock(); + mutex_unlock(&qsfp->st_mutex); +} + +static irqreturn_t qsfp_irq(int irq, void *data) +{ + struct qsfp *qsfp = data; + + qsfp_check_state(qsfp); + + return IRQ_HANDLED; +} + +static void qsfp_poll(struct work_struct *work) +{ + struct qsfp *qsfp = container_of(work, struct qsfp, poll.work); + + qsfp_check_state(qsfp); + + if (qsfp->state_soft_mask & (QSFP_OPTIONS_TX_LOSS_SIGNAL | QSFP_OPTIONS_TX_FAULT) || + qsfp->need_poll) + mod_delayed_work(system_wq, &qsfp->poll, poll_jiffies); +} + +static struct qsfp *qsfp_alloc(struct device *dev) +{ + struct qsfp *qsfp; + + qsfp = kzalloc(sizeof(*qsfp), GFP_KERNEL); + if (!qsfp) + return ERR_PTR(-ENOMEM); + + qsfp->dev = dev; + + mutex_init(&qsfp->sm_mutex); + mutex_init(&qsfp->st_mutex); + INIT_DELAYED_WORK(&qsfp->poll, qsfp_poll); + INIT_DELAYED_WORK(&qsfp->timeout, qsfp_timeout); + + return qsfp; +} + +static void qsfp_cleanup(void *data) +{ + struct qsfp *qsfp = data; + + cancel_delayed_work_sync(&qsfp->poll); + cancel_delayed_work_sync(&qsfp->timeout); + if (qsfp->i2c_mii) { + mdiobus_unregister(qsfp->i2c_mii); + mdiobus_free(qsfp->i2c_mii); + } + if (qsfp->i2c) + i2c_put_adapter(qsfp->i2c); + kfree(qsfp); +} + +static int qsfp_probe(struct platform_device *pdev) +{ + const struct sff_data *sff; + struct i2c_adapter *i2c; + char *qsfp_irq_name; + struct qsfp *qsfp; + int err, i; + + qsfp = qsfp_alloc(&pdev->dev); + if (IS_ERR(qsfp)) + return PTR_ERR(qsfp); + + platform_set_drvdata(pdev, qsfp); + + err = devm_add_action(qsfp->dev, qsfp_cleanup, qsfp); + if (err < 0) + return err; + + sff = qsfp->type = &qsfp_data; + + if (pdev->dev.of_node) { + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *id; + struct device_node *np; + + id = of_match_node(qsfp_of_match, node); + if (WARN_ON(!id)) + return -EINVAL; + + sff = qsfp->type = id->data; + + np = of_parse_phandle(node, "i2c-bus", 0); + if (!np) { + dev_err(qsfp->dev, "missing 'i2c-bus' property\n"); + return -ENODEV; + } + + i2c = of_find_i2c_adapter_by_node(np); + of_node_put(np); + } else if (has_acpi_companion(&pdev->dev)) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + struct fwnode_handle *fw = acpi_fwnode_handle(adev); + struct fwnode_reference_args args; + struct acpi_handle *acpi_handle; + int ret; + + ret = acpi_node_get_property_reference(fw, "i2c-bus", 0, &args); + if (ret || !is_acpi_device_node(args.fwnode)) { + dev_err(&pdev->dev, "missing 'i2c-bus' property\n"); + return -ENODEV; + } + + acpi_handle = ACPI_HANDLE_FWNODE(args.fwnode); + i2c = i2c_acpi_find_adapter_by_handle(acpi_handle); + } else { + return -EINVAL; + } + + if (!i2c) + return -EPROBE_DEFER; + + err = qsfp_i2c_configure(qsfp, i2c); + if (err < 0) { + i2c_put_adapter(i2c); + return err; + } + + for (i = 0; i < GPIO_MAX; i++) + if (sff->gpios & BIT(i)) { + qsfp->gpio[i] = devm_gpiod_get_optional(qsfp->dev, + gpio_of_names[i], gpio_flags[i]); + if (IS_ERR(qsfp->gpio[i])) + return PTR_ERR(qsfp->gpio[i]); + } + + qsfp->state_hw_mask = QSFP_F_PRESENT; + + qsfp->get_state = qsfp_gpio_get_state; + qsfp->set_state = qsfp_gpio_set_state; + + /* Modules that have no detect signal are always present */ + if (!(qsfp->gpio[GPIO_MODULE_PRESENT])) + qsfp->get_state = sff_gpio_get_state; + + device_property_read_u32(&pdev->dev, "maximum-power-milliwatt", + &qsfp->max_power_mW); + if (!qsfp->max_power_mW) + qsfp->max_power_mW = 1000; + + dev_info(qsfp->dev, "Host maximum power %u.%uW\n", + qsfp->max_power_mW / 1000, (qsfp->max_power_mW / 100) % 10); + + /* Get the initial state, and always signal TX disable, + * since the network interface will not be up. + */ + qsfp->state = qsfp_get_state(qsfp) | QSFP_OPTIONS_TX_DISABLE; + + if (qsfp->state & QSFP_F_PRESENT) { + qsfp->state |= QSFP_SELECT; + qsfp->state = qsfp_get_state(qsfp) | QSFP_OPTIONS_TX_DISABLE; + qsfp_set_state(qsfp, qsfp->state); + rtnl_lock(); + qsfp_sm_event(qsfp, QSFP_E_INSERT); + rtnl_unlock(); + } else { + dev_err(&pdev->dev, "qsfp is not present\n"); + } + + qsfp->gpio_irq[GPIO_MODULE_INTERRUPT] = + gpiod_to_irq(qsfp->gpio[GPIO_MODULE_INTERRUPT]); + if (qsfp->gpio_irq[GPIO_MODULE_INTERRUPT] < 0) { + qsfp->gpio_irq[GPIO_MODULE_INTERRUPT] = 0; + qsfp->need_poll = true; + } + + qsfp_irq_name = devm_kasprintf(qsfp->dev, GFP_KERNEL, "%s-%s", + dev_name(qsfp->dev), + gpio_of_names[GPIO_MODULE_INTERRUPT]); + + if (!qsfp_irq_name) + return -ENOMEM; + + err = devm_request_threaded_irq(qsfp->dev, qsfp->gpio_irq[GPIO_MODULE_INTERRUPT], + NULL, qsfp_irq, + IRQF_ONESHOT | + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + qsfp_irq_name, qsfp); + + if (err) { + qsfp->gpio_irq[GPIO_MODULE_INTERRUPT] = 0; + qsfp->need_poll = true; + } + + if (qsfp->need_poll) + mod_delayed_work(system_wq, &qsfp->poll, poll_jiffies); + + /* We could have an issue in cases no Tx disable pin is available or + * wired as modules using a laser as their light source will continue to + * be active when the fiber is removed. This could be a safety issue and + * we should at least warn the user about that. + */ + if (qsfp->id.base.etile_qsfp_options_3 & QSFP_OPTIONS_TX_DISABLE) + dev_warn(qsfp->dev, "No tx_disable pin: qsfp modules will always be emitting.\n"); + + qsfp->qsfp_bus = + qsfp_register_socket(qsfp->dev, qsfp, &qsfp_module_ops); + if (!qsfp->qsfp_bus) + return -ENOMEM; + + get_module_revision(qsfp); + + return 0; +} + +static void qsfp_remove(struct platform_device *pdev) +{ + struct qsfp *qsfp = platform_get_drvdata(pdev); + + qsfp_unregister_socket(qsfp->qsfp_bus); + + rtnl_lock(); + qsfp_sm_event(qsfp, QSFP_E_REMOVE); + rtnl_unlock(); +} + +static void qsfp_shutdown(struct platform_device *pdev) +{ + struct qsfp *qsfp = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < GPIO_MAX; i++) { + if (!qsfp->gpio_irq[i]) + continue; + + devm_free_irq(qsfp->dev, qsfp->gpio_irq[i], qsfp); + } + + cancel_delayed_work_sync(&qsfp->poll); + cancel_delayed_work_sync(&qsfp->timeout); +} + +static struct platform_driver qsfp_driver = { + .probe = qsfp_probe, + .remove_new = qsfp_remove, + .shutdown = qsfp_shutdown, + .driver = { + .name = "qsfp", + .of_match_table = qsfp_of_match, + }, +}; + +static int qsfp_init(void) +{ + poll_jiffies = msecs_to_jiffies(100); + + return platform_driver_register(&qsfp_driver); +} +module_init(qsfp_init); + +static void qsfp_exit(void) +{ + platform_driver_unregister(&qsfp_driver); +} +module_exit(qsfp_exit); + +MODULE_ALIAS("platform:qsfp"); +MODULE_AUTHOR("Mun Yew Tham"); +MODULE_LICENSE("GPL v2"); \ No newline at end of file diff --git a/drivers/net/phy/qsfp_bus.c b/drivers/net/phy/qsfp_bus.c new file mode 100644 index 0000000000000..ac91098907b60 --- /dev/null +++ b/drivers/net/phy/qsfp_bus.c @@ -0,0 +1,778 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * struct qsfp_bus - internal representation of a qsfp bus + */ +struct qsfp_bus { + /* private: */ + struct kref kref; + struct list_head node; + struct fwnode_handle *fwnode; + + const struct qsfp_socket_ops *socket_ops; + struct device *qsfp_dev; + struct qsfp *qsfp; + const struct qsfp_quirk *qsfp_quirk; + + const struct qsfp_upstream_ops *upstream_ops; + void *upstream; + struct phy_device *phydev; + + bool registered; + bool started; +}; + +/** + * qsfp_parse_port() - Parse the EEPROM base ID, setting the port type + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @id: a pointer to the module's &struct sfp_eeprom_id + * @support: optional pointer to an array of unsigned long for the + * ethtool support mask + * + * Parse the EEPROM identification given in @id, and return one of + * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL, + * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with + * the connector type. + * + * If the port type is not known, returns %PORT_OTHER. + */ +int qsfp_parse_port(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + unsigned long *support) +{ + int port; + + /* port is the physical connector, set this from the connector field. */ + switch (id->base.etile_qsfp_connector_type) { + case SFF8024_QSFP_DD_CONNECTOR_SC: + case SFF8024_QSFP_DD_CONNECTOR_FIBERJACK: + case SFF8024_QSFP_DD_CONNECTOR_LC: + case SFF8024_QSFP_DD_CONNECTOR_MT_RJ: + case SFF8024_QSFP_DD_CONNECTOR_MU: + case SFF8024_QSFP_DD_CONNECTOR_OPTICAL_PIGTAIL: + case SFF8024_QSFP_DD_CONNECTOR_MPO_1X12: + case SFF8024_QSFP_DD_CONNECTOR_MPO_2X16: + port = PORT_FIBRE; + break; + + case SFF8024_QSFP_DD_CONNECTOR_RJ45: + port = PORT_TP; + break; + + case SFF8024_QSFP_DD_CONNECTOR_COPPER_PIGTAIL: + port = PORT_DA; + break; + + case SFF8024_QSFP_DD_CONNECTOR_UNSPEC: + port = PORT_TP; + break; + + case SFF8024_QSFP_DD_CONNECTOR_SG: /* guess */ + case SFF8024_QSFP_DD_CONNECTOR_HSSDC_II: + case SFF8024_QSFP_DD_CONNECTOR_NOSEPARATE: + case SFF8024_QSFP_DD_CONNECTOR_MXC_2X16: + /*supporting connector type with extended + *spec for both electrical and optical interface + */ + if (id->base.etile_qsfp_ext_spec_compliance & + SFF8024_QSFP_ECC_100G_25GAUI_C2M_AOC_LOW_BER) { + port = PORT_AUI; + break; + } + port = PORT_OTHER; + break; + default: + dev_warn(bus->qsfp_dev, "QSFP: unknown connector id 0x%02x\n", + id->base.etile_qsfp_connector_type); + port = PORT_OTHER; + break; + } + + if (support) { + switch (port) { + case PORT_FIBRE: + phylink_set(support, FIBRE); + break; + + case PORT_TP: + phylink_set(support, TP); + break; + + /*added support to AUI(Attachment Unit Interface) port*/ + case PORT_AUI: + phylink_set(support, AUI); + break; + } + } + + return port; +} +EXPORT_SYMBOL_GPL(qsfp_parse_port); + +/** + * qsfp_may_have_phy() - indicate whether the module may have a PHY + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * @id: a pointer to the module's &struct qsfp_eeprom_id + * + * Parse the EEPROM identification given in @id, and return whether + * this module may have a PHY. + */ +bool qsfp_may_have_phy(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id) +{ + if (id->base.etile_qsfp_identifier != SFF8024_ID_QSFP_DD_INF_8628) { + switch (id->base.etile_qsfp_spec_compliance_1[0]) { + case SFF8636_QSFP_ECC_40G_ACTIVE_CABLE: + case SFF8636_QSFP_ECC_40GBASE_LR4: + case SFF8636_QSFP_ECC_40GBASE_SR4: + case SFF8636_QSFP_ECC_40GBASE_CR4: + case SFF8636_QSFP_ECC_10GBASE_SR: + case SFF8636_QSFP_ECC_10GBASE_LR: + case SFF8636_QSFP_ECC_10GBASE_LRM: + case SFF8636_QSFP_ECC_EXTENDED: + return true; + default: + break; + } + } + + return false; +} +EXPORT_SYMBOL_GPL(qsfp_may_have_phy); + +/** + * qsfp_parse_support() - Parse the eeprom id for supported link modes + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * @id: a pointer to the module's &struct qsfp_eeprom_id + * @support: pointer to an array of unsigned long for the ethtool support mask + * @interfaces: pointer to an array of unsigned long for phy interface modes + * mask + * + * Parse the EEPROM identification information and derive the supported + * ethtool link modes for the module. + */ +void qsfp_parse_support(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + unsigned long *support, unsigned long *interfaces) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; + + /* Set ethtool support from the compliance fields. */ + if (id->base.etile_qsfp_spec_compliance_1[0] & SFF8636_QSFP_ECC_10GBASE_SR) { + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + + if (id->base.etile_qsfp_spec_compliance_1[0] & SFF8636_QSFP_ECC_10GBASE_LR) { + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + + if (id->base.etile_qsfp_spec_compliance_1[0] & SFF8636_QSFP_ECC_10GBASE_LRM) { + phylink_set(modes, 10000baseLRM_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + + if ((id->base.etile_qsfp_spec_compliance_1[3] & SFF8024_QSFP_SCC_1000BASE_SX) || + (id->base.etile_qsfp_spec_compliance_1[3] & SFF8024_QSFP_SCC_1000BASE_LX) || + (id->base.etile_qsfp_spec_compliance_1[3] & SFF8024_QSFP_SCC_1000BASE_CX)) { + phylink_set(modes, 1000baseX_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + } + + if (id->base.etile_qsfp_spec_compliance_1[3] & SFF8024_QSFP_SCC_1000BASE_T) { + phylink_set(modes, 1000baseT_Half); + phylink_set(modes, 1000baseT_Full); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, interfaces); + } + + switch (id->base.etile_qsfp_ext_spec_compliance) { + case SFF8024_QSFP_ECC_UNSPEC: + phylink_set(modes, 25000baseKR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_100GBASE_SR4_25GBASE_SR: + phylink_set(modes, 100000baseSR4_Full); + phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_100GBASE_LR4_25GBASE_LR: + case SFF8024_QSFP_ECC_100GBASE_ER4_25GBASE_ER: + phylink_set(modes, 100000baseLR4_ER4_Full); + break; + case SFF8024_QSFP_ECC_100GBASE_CR4: + phylink_set(modes, 100000baseCR4_Full); + fallthrough; + case SFF8024_QSFP_ECC_25GBASE_CR_S: + case SFF8024_QSFP_ECC_25GBASE_CR_N: + phylink_set(modes, 25000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_10GBASE_T_SFI: + case SFF8024_QSFP_ECC_10GBASE_T_SR: + phylink_set(modes, 10000baseT_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_5GBASE_T: + phylink_set(modes, 5000baseT_Full); + __set_bit(PHY_INTERFACE_MODE_5GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_2_5GBASE_T: + phylink_set(modes, 2500baseT_Full); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); + break; + case SFF8024_QSFP_ECC_100G_25GAUI_C2M_AOC_LOW_BER: + phylink_set(modes, 100000baseKR4_Full); + phylink_set(modes, 25000baseKR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_100GBASE_SR10: + phylink_set(modes, 100000baseSR4_Full); + break; + case SFF8024_QSFP_ECC_100G_25GAUI_C2M_AOC: + phylink_set(modes, 100000baseSR4_Full); + phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_100G_CWDM4: + phylink_set(modes, 100000baseCR4_Full); + break; + case SFF8024_QSFP_ECC_100G_PSM4: + phylink_set(modes, 100000baseCR4_Full); + break; + case SFF8024_QSFP_ECC_10M: + phylink_set(modes, 10baseT_Full); + break; + case SFF8024_QSFP_ECC_40GBASE_ER: + phylink_set(modes, 40000baseLR4_Full); + break; + case SFF8024_QSFP_ECC_10GBASE_SR: + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + break; + case SFF8024_QSFP_ECC_100G_CLR4: + phylink_set(modes, 100000baseLR4_ER4_Full); + break; + case SFF8024_QSFP_ECC_100G_ACC_25G_ACC: + phylink_set(modes, 100000baseCR4_Full); + phylink_set(modes, 25000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + break; + default: + dev_warn(bus->qsfp_dev, + "Unknown/unsupported extended compliance code: 0x%02x\n", + id->base.etile_qsfp_ext_spec_compliance); + break; + } + + /* For fibre channel QSFP, derive possible BaseX modes */ + if ((id->base.etile_qsfp_spec_compliance_1[7] & SFF8024_QSFP_SCC_FC_SPEED_100) || + (id->base.etile_qsfp_spec_compliance_1[7] & SFF8024_QSFP_SCC_FC_SPEED_200) || + (id->base.etile_qsfp_spec_compliance_1[7] & SFF8024_QSFP_SCC_FC_SPEED_400)) { + phylink_set(modes, 2500baseX_Full); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces); + } + + /* If we haven't discovered any modes that this module supports, try + * the bitrate to determine supported modes. Some BiDi modules (eg, + * 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing + * wavelengths, so do not set any transceiver bits. + * + * Do the same for modules supporting 2500BASE-X. Note that some + * modules use 2500Mbaud rather than 3100 or 3200Mbaud for + * 2500BASE-X, so we allow some slack here. + */ + //if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) + //if (bus->qsfp_quirk && bus->qsfp_quirk->modes) + // bus->qsfp_quirk->modes(id, modes, interfaces); + + linkmode_or(support, support, modes); + //bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); + + phylink_set(support, Autoneg); + phylink_set(support, Pause); + phylink_set(support, Asym_Pause); +} +EXPORT_SYMBOL_GPL(qsfp_parse_support); + +/** + * qsfp_select_interface() - Select appropriate phy_interface_t mode + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * @link_modes: ethtool link modes mask + * + * Derive the phy_interface_t mode for the QSFP module from the link + * modes mask. + */ +phy_interface_t qsfp_select_interface(struct qsfp_bus *bus, + unsigned long *link_modes) +{ + if (phylink_test(link_modes, 25000baseCR_Full) || + phylink_test(link_modes, 25000baseKR_Full) || + phylink_test(link_modes, 25000baseSR_Full)) + return PHY_INTERFACE_MODE_25GBASER; + + if (phylink_test(link_modes, 10000baseCR_Full) || + phylink_test(link_modes, 10000baseSR_Full) || + phylink_test(link_modes, 10000baseLR_Full) || + phylink_test(link_modes, 10000baseLRM_Full) || + phylink_test(link_modes, 10000baseER_Full) || + phylink_test(link_modes, 10000baseT_Full)) + return PHY_INTERFACE_MODE_10GBASER; + + if (phylink_test(link_modes, 5000baseT_Full)) + return PHY_INTERFACE_MODE_5GBASER; + + if (phylink_test(link_modes, 2500baseX_Full)) + return PHY_INTERFACE_MODE_2500BASEX; + + if (phylink_test(link_modes, 1000baseT_Half) || + phylink_test(link_modes, 1000baseT_Full)) + return PHY_INTERFACE_MODE_SGMII; + + if (phylink_test(link_modes, 1000baseX_Full)) + return PHY_INTERFACE_MODE_1000BASEX; + + if (phylink_test(link_modes, 100baseFX_Full)) + return PHY_INTERFACE_MODE_100BASEX; + + return PHY_INTERFACE_MODE_NA; +} +EXPORT_SYMBOL_GPL(qsfp_select_interface); + +static LIST_HEAD(qsfp_buses); +static DEFINE_MUTEX(qsfp_mutex); + +static const struct qsfp_upstream_ops *qsfp_get_upstream_ops(struct qsfp_bus *bus) +{ + return bus->registered ? bus->upstream_ops : NULL; +} + +static struct qsfp_bus *qsfp_bus_get(struct fwnode_handle *fwnode) +{ + struct qsfp_bus *qsfp, *new, *found = NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + + mutex_lock(&qsfp_mutex); + + list_for_each_entry(qsfp, &qsfp_buses, node) { + if (qsfp->fwnode == fwnode) { + kref_get(&qsfp->kref); + found = qsfp; + break; + } + } + + if (!found && new) { + kref_init(&new->kref); + new->fwnode = fwnode; + list_add(&new->node, &qsfp_buses); + found = new; + new = NULL; + } + + mutex_unlock(&qsfp_mutex); + + kfree(new); + + return found; +} + +static void qsfp_bus_release(struct kref *kref) +{ + struct qsfp_bus *bus = container_of(kref, struct qsfp_bus, kref); + + list_del(&bus->node); + mutex_unlock(&qsfp_mutex); + kfree(bus); +} + +/** + * qsfp_bus_put() - put a reference on the &struct qsfp_bus + * @bus: the &struct qsfp_bus found via qsfp_bus_find_fwnode() + * + * Put a reference on the &struct qsfp_bus and free the underlying structure + * if this was the last reference. + */ +void qsfp_bus_put(struct qsfp_bus *bus) +{ + if (bus) + kref_put_mutex(&bus->kref, qsfp_bus_release, &qsfp_mutex); +} +EXPORT_SYMBOL_GPL(qsfp_bus_put); + +static int qsfp_register_bus(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = bus->upstream_ops; + int ret; + + if (ops) { + if (ops->link_down) + ops->link_down(bus->upstream); + if (ops->connect_phy && bus->phydev) { + ret = ops->connect_phy(bus->upstream, bus->phydev); + if (ret) + return ret; + } + } + bus->registered = true; + bus->socket_ops->attach(bus->qsfp); + if (bus->started) + bus->socket_ops->start(bus->qsfp); + if (ops) + bus->upstream_ops->attach(bus->upstream, bus); + return 0; +} + +static void qsfp_unregister_bus(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = bus->upstream_ops; + + if (bus->registered) { + bus->upstream_ops->detach(bus->upstream, bus); + if (bus->started) + bus->socket_ops->stop(bus->qsfp); + bus->socket_ops->detach(bus->qsfp); + if (bus->phydev && ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + } + bus->registered = false; +} + +/** + * qsfp_get_module_info() - Get the ethtool_modinfo for a QSFP module + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * @modinfo: a &struct ethtool_modinfo + * + * Fill in the type and eeprom_len parameters in @modinfo for a module on + * the sfp bus specified by @bus. + * + * Returns 0 on success or a negative errno number. + */ +int qsfp_get_module_info(struct qsfp_bus *bus, struct ethtool_modinfo *modinfo) +{ + return bus->socket_ops->module_info(bus->qsfp, modinfo); +} +EXPORT_SYMBOL_GPL(qsfp_get_module_info); + +/** + * qsfp_get_module_eeprom() - Read the QSFP module EEPROM + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * @ee: a &struct ethtool_eeprom + * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes) + * + * Read the EEPROM as specified by the supplied @ee. See the documentation + * for &struct ethtool_eeprom for the region to be read. + * + * Returns 0 on success or a negative errno number. + */ +int qsfp_get_module_eeprom(struct qsfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data) +{ + return bus->socket_ops->module_eeprom(bus->qsfp, ee, data); +} +EXPORT_SYMBOL_GPL(qsfp_get_module_eeprom); + +/** + * qsfp_upstream_start() - Inform the QSFP that the network device is up + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * + * Inform the QSFP socket that the network device is now up, so that the + * module can be enabled by allowing TX_DISABLE to be deasserted. This + * should be called from the network device driver's &struct net_device_ops + * ndo_open() method. + */ +void qsfp_upstream_start(struct qsfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->start(bus->qsfp); + bus->started = true; +} +EXPORT_SYMBOL_GPL(qsfp_upstream_start); + +/** + * qsfp_upstream_stop() - Inform the QSFP that the network device is down + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * + * Inform the QSFP socket that the network device is now up, so that the + * module can be disabled by asserting TX_DISABLE, disabling the laser + * in optical modules. This should be called from the network device + * driver's &struct net_device_ops ndo_stop() method. + */ +void qsfp_upstream_stop(struct qsfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->stop(bus->qsfp); + bus->started = false; +} +EXPORT_SYMBOL_GPL(qsfp_upstream_stop); + +static void qsfp_upstream_clear(struct qsfp_bus *bus) +{ + bus->upstream_ops = NULL; + bus->upstream = NULL; +} + +/** + * qsfp_bus_find_fwnode() - parse and locate the QSFP bus from fwnode + * @fwnode: firmware node for the parent device (MAC or PHY) + * + * Parse the parent device's firmware node for a QSFP bus, and locate + * the qsfp_bus structure, incrementing its reference count. This must + * be put via qsfp_bus_put() when done. + * + * Returns: + * - on success, a pointer to the sfp_bus structure, + * - %NULL if no QSFP is specified, + * - on failure, an error pointer value: + * + * - corresponding to the errors detailed for + * fwnode_property_get_reference_args(). + * - %-ENOMEM if we failed to allocate the bus. + * - an error from the upstream's connect_phy() method. + */ +struct qsfp_bus *qsfp_bus_find_fwnode(const struct fwnode_handle *fwnode) +{ + struct fwnode_reference_args ref; + struct qsfp_bus *bus; + int ret; + + ret = fwnode_property_get_reference_args(fwnode, "qsfp", NULL, + 0, 0, &ref); + if (ret == -ENOENT) + return NULL; + else if (ret < 0) + return ERR_PTR(ret); + + if (!fwnode_device_is_available(ref.fwnode)) { + fwnode_handle_put(ref.fwnode); + return NULL; + } + + bus = qsfp_bus_get(ref.fwnode); + fwnode_handle_put(ref.fwnode); + if (!bus) + return ERR_PTR(-ENOMEM); + + return bus; +} +EXPORT_SYMBOL_GPL(qsfp_bus_find_fwnode); + +/** + * qsfp_bus_add_upstream() - parse and register the neighbouring device + * @bus: the &struct qsfp_bus found via qsfp_bus_find_fwnode() + * @upstream: the upstream private data + * @ops: the upstream's &struct qsfp_upstream_ops + * + * Add upstream driver for the QSFP bus, and if the bus is complete, register + * the QSFP bus using qsfp_register_upstream(). This takes a reference on the + * bus, so it is safe to put the bus after this call. + * + * Returns: + * - on success, a pointer to the qsfp_bus structure, + * - %NULL if no QSFP is specified, + * - on failure, an error pointer value: + * + * - corresponding to the errors detailed for + * fwnode_property_get_reference_args(). + * - %-ENOMEM if we failed to allocate the bus. + * - an error from the upstream's connect_phy() method. + */ +int qsfp_bus_add_upstream(struct qsfp_bus *bus, void *upstream, + const struct qsfp_upstream_ops *ops) +{ + int ret; + + /* If no bus, return success */ + if (!bus) + return 0; + + rtnl_lock(); + kref_get(&bus->kref); + bus->upstream_ops = ops; + bus->upstream = upstream; + + if (bus->qsfp) { + ret = qsfp_register_bus(bus); + if (ret) + qsfp_upstream_clear(bus); + } else { + ret = 0; + } + rtnl_unlock(); + + if (ret) + qsfp_bus_put(bus); + + return ret; +} +EXPORT_SYMBOL_GPL(qsfp_bus_add_upstream); + +/** + * qsfp_bus_del_upstream() - Delete a qsfp bus + * @bus: a pointer to the &struct qsfp_bus structure for the qsfp module + * + * Delete a previously registered upstream connection for the QSFP + * module. @bus should have been added by sfp_bus_add_upstream(). + */ +void qsfp_bus_del_upstream(struct qsfp_bus *bus) +{ + if (bus) { + rtnl_lock(); + if (bus->qsfp) + qsfp_unregister_bus(bus); + qsfp_upstream_clear(bus); + rtnl_unlock(); + + qsfp_bus_put(bus); + } +} +EXPORT_SYMBOL_GPL(qsfp_bus_del_upstream); + +/* Socket driver entry points */ +int qsfp_add_phy(struct qsfp_bus *bus, struct phy_device *phydev) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->connect_phy) + ret = ops->connect_phy(bus->upstream, phydev); + + if (ret == 0) + bus->phydev = phydev; + + return ret; +} +EXPORT_SYMBOL_GPL(qsfp_add_phy); + +void qsfp_remove_phy(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + + if (ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + bus->phydev = NULL; +} +EXPORT_SYMBOL_GPL(qsfp_remove_phy); + +void qsfp_link_up(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + + if (ops && ops->link_up) + ops->link_up(bus->upstream); +} +EXPORT_SYMBOL_GPL(qsfp_link_up); + +void qsfp_link_down(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + + if (ops && ops->link_down) + ops->link_down(bus->upstream); +} +EXPORT_SYMBOL_GPL(qsfp_link_down); + +int qsfp_module_insert(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + const struct qsfp_quirk *quirk) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + int ret = 0; + + bus->qsfp_quirk = quirk; + + if (ops && ops->module_insert) + ret = ops->module_insert(bus->upstream, id); + + return ret; +} +EXPORT_SYMBOL_GPL(qsfp_module_insert); + +void qsfp_module_remove(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + + if (ops && ops->module_remove) + ops->module_remove(bus->upstream); + + bus->qsfp_quirk = NULL; +} +EXPORT_SYMBOL_GPL(qsfp_module_remove); + +int qsfp_module_start(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->module_start) + ret = ops->module_start(bus->upstream); + + return ret; +} +EXPORT_SYMBOL_GPL(qsfp_module_start); + +void qsfp_module_stop(struct qsfp_bus *bus) +{ + const struct qsfp_upstream_ops *ops = qsfp_get_upstream_ops(bus); + + if (ops && ops->module_stop) + ops->module_stop(bus->upstream); +} +EXPORT_SYMBOL_GPL(qsfp_module_stop); + +static void qsfp_socket_clear(struct qsfp_bus *bus) +{ + bus->qsfp_dev = NULL; + bus->qsfp = NULL; + bus->socket_ops = NULL; +} + +struct qsfp_bus *qsfp_register_socket(struct device *dev, struct qsfp *qsfp, + const struct qsfp_socket_ops *ops) +{ + struct qsfp_bus *bus = qsfp_bus_get(dev->fwnode); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->qsfp_dev = dev; + bus->qsfp = qsfp; + bus->socket_ops = ops; + + if (bus->upstream_ops) { + ret = qsfp_register_bus(bus); + if (ret) + qsfp_socket_clear(bus); + } + rtnl_unlock(); + } + + if (ret) { + qsfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(qsfp_register_socket); + +void qsfp_unregister_socket(struct qsfp_bus *bus) +{ + rtnl_lock(); + if (bus->upstream_ops) + qsfp_unregister_bus(bus); + qsfp_socket_clear(bus); + rtnl_unlock(); + + qsfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(qsfp_unregister_socket); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index e1329d4974fd6..228daf004c410 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1442,9 +1442,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) nvme_poll_irqdisable(nvmeq); if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { - dev_warn(dev->ctrl.device, + /*dev_warn(dev->ctrl.device, "I/O tag %d (%04x) QID %d timeout, completion polled\n", - req->tag, nvme_cid(req), nvmeq->qid); + req->tag, nvme_cid(req), nvmeq->qid);*/ return BLK_EH_DONE; } diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 0e2d608c3e207..dfb0180113f04 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -117,6 +117,14 @@ config OF_OVERLAY_KUNIT_TEST If unsure, say N here, but this option is safe to enable. +config OF_CONFIGFS + bool "Device Tree Overlay ConfigFS interface" + select CONFIGFS_FS + select OF_FLATTREE + depends on OF_OVERLAY + help + Enable a simple user-space driven DT overlay interface. + config OF_NUMA bool diff --git a/drivers/of/Makefile b/drivers/of/Makefile index 379a0afcbdc0b..f294b36207f80 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-y = base.o cpu.o device.o module.o platform.o property.o obj-$(CONFIG_OF_KOBJ) += kobj.o +obj-$(CONFIG_OF_CONFIGFS) += configfs.o obj-$(CONFIG_OF_DYNAMIC) += dynamic.o obj-$(CONFIG_OF_FLATTREE) += fdt.o empty_root.dtb.o obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o diff --git a/drivers/of/configfs.c b/drivers/of/configfs.c new file mode 100644 index 0000000000000..b8048047ad4ed --- /dev/null +++ b/drivers/of/configfs.c @@ -0,0 +1,284 @@ +/* + * Configfs entries for device-tree + * + * Copyright (C) 2013 - Pantelis Antoniou + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "of_private.h" + +struct cfs_overlay_item { + struct config_item item; + + char path[PATH_MAX]; + + const struct firmware *fw; + struct device_node *overlay; + int ov_id; + + void *dtbo; + int dtbo_size; +}; + +static int create_overlay(struct cfs_overlay_item *overlay, const void *blob, + size_t size) +{ + int err; + + err = of_overlay_fdt_apply(blob, size, &overlay->ov_id, NULL); + if (err < 0) + pr_err("%s: Failed to create overlay (err=%d)\n", __func__, + err); + + return err; +} + +static inline struct cfs_overlay_item *to_cfs_overlay_item( + struct config_item *item) +{ + return item ? container_of(item, struct cfs_overlay_item, item) : NULL; +} + +static ssize_t cfs_overlay_item_path_show(struct config_item *item, char *page) +{ + return sprintf(page, "%s\n", to_cfs_overlay_item(item)->path); +} + +static ssize_t cfs_overlay_item_path_store(struct config_item *item, + const char *page, size_t count) +{ + struct cfs_overlay_item *overlay = to_cfs_overlay_item(item); + const char *p = page; + char *s; + int err; + + /* if it's set do not allow changes */ + if (overlay->path[0] != '\0' || overlay->dtbo_size > 0) + return -EPERM; + + /* copy to path buffer (and make sure it's always zero terminated */ + count = snprintf(overlay->path, sizeof(overlay->path) - 1, "%s", p); + overlay->path[sizeof(overlay->path) - 1] = '\0'; + + /* strip trailing newlines */ + s = overlay->path + strlen(overlay->path); + while (s > overlay->path && *--s == '\n') + *s = '\0'; + + pr_debug("%s: path is '%s'\n", __func__, overlay->path); + + err = request_firmware(&overlay->fw, overlay->path, NULL); + if (err != 0) + goto out_err; + + err = create_overlay(overlay, overlay->fw->data, overlay->fw->size); + if (err < 0) + goto out_err; + + return count; + +out_err: + + release_firmware(overlay->fw); + overlay->fw = NULL; + + overlay->path[0] = '\0'; + return err; +} + +static ssize_t cfs_overlay_item_status_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%s\n", to_cfs_overlay_item(item)->ov_id >= 0 ? + "applied" : "unapplied"); +} + +CONFIGFS_ATTR(cfs_overlay_item_, path); +CONFIGFS_ATTR_RO(cfs_overlay_item_, status); + +static struct configfs_attribute *cfs_overlay_attrs[] = { + &cfs_overlay_item_attr_path, + &cfs_overlay_item_attr_status, + NULL, +}; + +static ssize_t cfs_overlay_item_dtbo_read(struct config_item *item, void *buf, + size_t max_count) +{ + struct cfs_overlay_item *overlay = to_cfs_overlay_item(item); + + pr_debug("%s: buf=%p max_count=%zu\n", __func__, + buf, max_count); + + if (overlay->dtbo == NULL) + return 0; + + /* copy if buffer provided */ + if (buf != NULL) { + /* the buffer must be large enough */ + if (overlay->dtbo_size > max_count) + return -ENOSPC; + + memcpy(buf, overlay->dtbo, overlay->dtbo_size); + } + + return overlay->dtbo_size; +} + +static ssize_t cfs_overlay_item_dtbo_write(struct config_item *item, + const void *buf, size_t count) +{ + struct cfs_overlay_item *overlay = to_cfs_overlay_item(item); + int err; + + /* if it's set do not allow changes */ + if (overlay->path[0] != '\0' || overlay->dtbo_size > 0) + return -EPERM; + + /* copy the contents */ + overlay->dtbo = kmemdup(buf, count, GFP_KERNEL); + if (overlay->dtbo == NULL) + return -ENOMEM; + + overlay->dtbo_size = count; + + err = create_overlay(overlay, overlay->dtbo, overlay->dtbo_size); + if (err < 0) + goto out_err; + + return count; + +out_err: + kfree(overlay->dtbo); + overlay->dtbo = NULL; + overlay->dtbo_size = 0; + + return err; +} + +CONFIGFS_BIN_ATTR(cfs_overlay_item_, dtbo, NULL, SZ_1M); + +static struct configfs_bin_attribute *cfs_overlay_bin_attrs[] = { + &cfs_overlay_item_attr_dtbo, + NULL, +}; + +static void cfs_overlay_release(struct config_item *item) +{ + struct cfs_overlay_item *overlay = to_cfs_overlay_item(item); + + if (overlay->ov_id >= 0) + of_overlay_remove(&overlay->ov_id); + if (overlay->fw) + release_firmware(overlay->fw); + /* kfree with NULL is safe */ + kfree(overlay->dtbo); + kfree(overlay); +} + +static struct configfs_item_operations cfs_overlay_item_ops = { + .release = cfs_overlay_release, +}; + +static struct config_item_type cfs_overlay_type = { + .ct_item_ops = &cfs_overlay_item_ops, + .ct_attrs = cfs_overlay_attrs, + .ct_bin_attrs = cfs_overlay_bin_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item *cfs_overlay_group_make_item( + struct config_group *group, const char *name) +{ + struct cfs_overlay_item *overlay; + + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); + if (!overlay) + return ERR_PTR(-ENOMEM); + overlay->ov_id = -1; + + config_item_init_type_name(&overlay->item, name, &cfs_overlay_type); + return &overlay->item; +} + +static void cfs_overlay_group_drop_item(struct config_group *group, + struct config_item *item) +{ + struct cfs_overlay_item *overlay = to_cfs_overlay_item(item); + + config_item_put(&overlay->item); +} + +static struct configfs_group_operations overlays_ops = { + .make_item = cfs_overlay_group_make_item, + .drop_item = cfs_overlay_group_drop_item, +}; + +static struct config_item_type overlays_type = { + .ct_group_ops = &overlays_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_group_operations of_cfs_ops = { + /* empty - we don't allow anything to be created */ +}; + +static struct config_item_type of_cfs_type = { + .ct_group_ops = &of_cfs_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_group of_cfs_overlay_group; + +static struct configfs_subsystem of_cfs_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "device-tree", + .ci_type = &of_cfs_type, + }, + }, + .su_mutex = __MUTEX_INITIALIZER(of_cfs_subsys.su_mutex), +}; + +static int __init of_cfs_init(void) +{ + int ret; + + pr_info("%s\n", __func__); + + config_group_init(&of_cfs_subsys.su_group); + config_group_init_type_name(&of_cfs_overlay_group, "overlays", + &overlays_type); + configfs_add_default_group(&of_cfs_overlay_group, + &of_cfs_subsys.su_group); + + ret = configfs_register_subsystem(&of_cfs_subsys); + if (ret != 0) { + pr_err("%s: failed to register subsys\n", __func__); + goto out; + } + pr_info("%s: OK\n", __func__); +out: + return ret; +} +late_initcall(of_cfs_init); diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 650b2dd81c482..07649e3695c83 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -59,7 +59,7 @@ (((cfg) << 24) | \ TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ - (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) + (((PCI_DEVID((pcie)->root_bus_nr, RP_DEVFN)) << 16) | ((tag) << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ (((bus) << 24) | ((devfn) << 16) | (offset)) #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) @@ -77,9 +77,20 @@ #define S10_TLP_FMTTYPE_CFGWR0 0x45 #define S10_TLP_FMTTYPE_CFGWR1 0x44 +#define AGLX_RP_CFG_ADDR(pcie, reg) \ + (((pcie)->hip_base) + (reg)) +#define AGLX_RP_SECONDARY(pcie) \ + readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) + +#define AGLX_BDF_REG 0x00002004 +#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c +#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150 +#define CFG_AER BIT(4) + enum altera_pcie_version { ALTERA_PCIE_V1 = 0, ALTERA_PCIE_V2, + ALTERA_PCIE_V3, }; struct altera_pcie { @@ -102,6 +113,11 @@ struct altera_pcie_ops { int size, u32 *value); int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno, int where, int size, u32 value); + int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 *value); + int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 value); + void (*rp_isr)(struct irq_desc *desc); }; struct altera_pcie_data { @@ -112,6 +128,9 @@ struct altera_pcie_data { u32 cfgrd1; u32 cfgwr0; u32 cfgwr1; + u32 port_conf_offset; + u32 port_irq_status_offset; + u32 port_irq_enable_offset; }; struct tlp_rp_regpair_t { @@ -131,6 +150,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) return readl_relaxed(pcie->cra_base + reg); } +static inline void cra_writew(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writew_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg) +{ + return readw_relaxed(pcie->cra_base + reg); +} + +static inline void cra_writeb(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writeb_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg) +{ + return readb_relaxed(pcie->cra_base + reg); +} + static bool altera_pcie_link_up(struct altera_pcie *pcie) { return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); @@ -145,6 +186,15 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie) return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA); } +static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie) +{ + void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, + pcie->pcie_data->cap_offset + + PCI_EXP_LNKSTA); + + return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA); +} + /* * Altera PCIe port uses BAR0 of RC's configuration space as the translation * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space @@ -157,8 +207,7 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie) static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, int offset) { - if (pci_is_root_bus(bus) && (devfn == 0) && - (offset == PCI_BASE_ADDRESS_0)) + if (pci_is_root_bus(bus) && devfn == 0 && offset == PCI_BASE_ADDRESS_0) return true; return false; @@ -372,7 +421,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, * Monitor changes to PCI_PRIMARY_BUS register on root port * and update local copy of root bus number accordingly. */ - if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) + if (bus == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) pcie->root_bus_nr = (u8)(value); return PCIBIOS_SUCCESSFUL; @@ -425,6 +474,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno, return PCIBIOS_SUCCESSFUL; } +static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where, + int size, u32 *value) +{ + void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where); + + switch (size) { + case 1: + *value = readb(addr); + break; + case 2: + *value = readw(addr); + break; + default: + *value = readl(addr); + break; + } + + /* interrupt pin not programmed in hardware, set to INTA */ + if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value)) + *value = 0x01; + else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00)) + *value |= 0x0100; + + return PCIBIOS_SUCCESSFUL; +} + +static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno, + int where, int size, u32 value) +{ + void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where); + + switch (size) { + case 1: + writeb(value, addr); + break; + case 2: + writew(value, addr); + break; + default: + writel(value, addr); + break; + } + + /* + * Monitor changes to PCI_PRIMARY_BUS register on root port + * and update local copy of root bus number accordingly. + */ + if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) + pcie->root_bus_nr = value & 0xff; + + return PCIBIOS_SUCCESSFUL; +} + +static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 value) +{ + cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG); + if (busno > AGLX_RP_SECONDARY(pcie)) + where |= (1 << 12); /* type 1 */ + + switch (size) { + case 1: + cra_writeb(pcie, value, where); + break; + case 2: + cra_writew(pcie, value, where); + break; + default: + cra_writel(pcie, value, where); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 *value) +{ + cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG); + if (busno > AGLX_RP_SECONDARY(pcie)) + where |= (1 << 12); /* type 1 */ + + switch (size) { + case 1: + *value = cra_readb(pcie, where); + break; + case 2: + *value = cra_readw(pcie, where); + break; + default: + *value = cra_readl(pcie, where); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int where, int size, u32 *value) @@ -437,6 +583,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, return pcie->pcie_data->ops->rp_read_cfg(pcie, where, size, value); + if (pcie->pcie_data->ops->ep_read_cfg) + return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn, + where, size, value); + switch (size) { case 1: byte_en = 1 << (where & 3); @@ -481,6 +631,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, return pcie->pcie_data->ops->rp_write_cfg(pcie, busno, where, size, value); + if (pcie->pcie_data->ops->ep_write_cfg) + return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn, + where, size, value); + switch (size) { case 1: data32 = (value & 0xff) << shift; @@ -576,7 +730,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie) dev_err(dev, "link retrain timeout\n"); break; } - udelay(100); + usleep_range(50, 150); } /* Wait for link is up */ @@ -589,7 +743,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie) dev_err(dev, "link up timeout\n"); break; } - udelay(100); + usleep_range(50, 150); } } @@ -659,7 +813,30 @@ static void altera_pcie_isr(struct irq_desc *desc) dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit); } } + chained_irq_exit(chip, desc); +} + +static void aglx_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct altera_pcie *pcie; + struct device *dev; + u32 status; + int ret; + + chained_irq_enter(chip, desc); + pcie = irq_desc_get_handler_data(desc); + dev = &pcie->pdev->dev; + status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset + + pcie->pcie_data->port_irq_status_offset); + if (status & CFG_AER) { + ret = generic_handle_domain_irq(pcie->irq_domain, 0); + if (ret) + dev_err_ratelimited(dev, "unexpected IRQ,\n"); + } + writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset + + pcie->pcie_data->port_irq_status_offset)); chained_irq_exit(chip, desc); } @@ -670,7 +847,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) /* Setup INTx */ pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, - &intx_domain_ops, pcie); + &intx_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; @@ -694,9 +871,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); - if (pcie->pcie_data->version == ALTERA_PCIE_V2) { - pcie->hip_base = - devm_platform_ioremap_resource_byname(pdev, "Hip"); + if (pcie->pcie_data->version == ALTERA_PCIE_V2 || + pcie->pcie_data->version == ALTERA_PCIE_V3) { + pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip"); if (IS_ERR(pcie->hip_base)) return PTR_ERR(pcie->hip_base); } @@ -706,7 +883,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) if (pcie->irq < 0) return pcie->irq; - irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); + irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie); return 0; } @@ -719,6 +896,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = { .tlp_read_pkt = tlp_read_packet, .tlp_write_pkt = tlp_write_packet, .get_link_status = altera_pcie_link_up, + .rp_isr = altera_pcie_isr, }; static const struct altera_pcie_ops altera_pcie_ops_2_0 = { @@ -727,6 +905,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = { .get_link_status = s10_altera_pcie_link_up, .rp_read_cfg = s10_rp_read_cfg, .rp_write_cfg = s10_rp_write_cfg, + .rp_isr = altera_pcie_isr, +}; + +static const struct altera_pcie_ops altera_pcie_ops_3_0 = { + .rp_read_cfg = aglx_rp_read_cfg, + .rp_write_cfg = aglx_rp_write_cfg, + .get_link_status = aglx_altera_pcie_link_up, + .ep_read_cfg = aglx_ep_read_cfg, + .ep_write_cfg = aglx_ep_write_cfg, + .rp_isr = aglx_isr, }; static const struct altera_pcie_data altera_pcie_1_0_data = { @@ -749,11 +937,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = { .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1, }; +static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = { + .ops = &altera_pcie_ops_3_0, + .version = ALTERA_PCIE_V3, + .cap_offset = 0x70, + .port_conf_offset = 0x14000, + .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS, + .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE, +}; + +static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = { + .ops = &altera_pcie_ops_3_0, + .version = ALTERA_PCIE_V3, + .cap_offset = 0x70, + .port_conf_offset = 0x104000, + .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS, + .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE, +}; + +static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = { + .ops = &altera_pcie_ops_3_0, + .version = ALTERA_PCIE_V3, + .cap_offset = 0x70, + .port_conf_offset = 0x1300, + .port_irq_status_offset = 0x0, + .port_irq_enable_offset = 0x4, +}; + static const struct of_device_id altera_pcie_of_match[] = { {.compatible = "altr,pcie-root-port-1.0", .data = &altera_pcie_1_0_data }, {.compatible = "altr,pcie-root-port-2.0", .data = &altera_pcie_2_0_data }, + {.compatible = "altr,pcie-root-port-3.0-f-tile", + .data = &altera_pcie_3_0_f_tile_data }, + {.compatible = "altr,pcie-root-port-3.0-p-tile", + .data = &altera_pcie_3_0_p_tile_data }, + {.compatible = "altr,pcie-root-port-3.0-r-tile", + .data = &altera_pcie_3_0_r_tile_data }, {}, }; @@ -791,11 +1012,18 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } - /* clear all interrupts */ - cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); - /* enable all interrupts */ - cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); - altera_pcie_host_init(pcie); + if (pcie->pcie_data->version == ALTERA_PCIE_V1 || + pcie->pcie_data->version == ALTERA_PCIE_V2) { + /* clear all interrupts */ + cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); + /* enable all interrupts */ + cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); + altera_pcie_host_init(pcie); + } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) { + writel(CFG_AER, + pcie->hip_base + pcie->pcie_data->port_conf_offset + + pcie->pcie_data->port_irq_enable_offset); + } bridge->sysdata = pcie; bridge->busnr = pcie->root_bus_nr; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 73b1edd0531b4..e20105db0198f 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -44,6 +44,7 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) #define CQSPI_RD_NO_IRQ BIT(6) #define CQSPI_DISABLE_STIG_MODE BIT(7) +#define CQSPI_DISABLE_RUNTIME_PM BIT(8) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) @@ -105,13 +106,16 @@ struct cqspi_st { bool is_jh7110; /* Flag for StarFive JH7110 SoC */ bool disable_stig_mode; + bool runtime_pm; + refcount_t refcount; + refcount_t inflight_ops; const struct cqspi_driver_platdata *ddata; }; struct cqspi_driver_platdata { u32 hwcaps_mask; - u8 quirks; + u16 quirks; int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, u_char *rxbuf, loff_t from_addr, size_t n_rx); u32 (*get_dma_status)(struct cqspi_st *cqspi); @@ -730,6 +734,9 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, u8 *rxbuf_end = rxbuf + n_rx; int ret = 0; + if (!refcount_read(&cqspi->refcount)) + return -ENODEV; + writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); @@ -1047,6 +1054,9 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, unsigned int write_bytes; int ret; + if (!refcount_read(&cqspi->refcount)) + return -ENODEV; + writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); @@ -1437,12 +1447,26 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); struct device *dev = &cqspi->pdev->dev; + if (refcount_read(&cqspi->inflight_ops) == 0) + return -ENODEV; + ret = pm_runtime_resume_and_get(dev); if (ret) { dev_err(&mem->spi->dev, "resume failed with %d\n", ret); return ret; } + if (!refcount_read(&cqspi->refcount)) + return -EBUSY; + + refcount_inc(&cqspi->inflight_ops); + + if (!refcount_read(&cqspi->refcount)) { + if (refcount_read(&cqspi->inflight_ops)) + refcount_dec(&cqspi->inflight_ops); + return -EBUSY; + } + ret = cqspi_mem_process(mem, op); pm_runtime_mark_last_busy(dev); @@ -1451,6 +1475,9 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) if (ret) dev_err(&mem->spi->dev, "operation failed with %d\n", ret); + if (refcount_read(&cqspi->inflight_ops) > 1) + refcount_dec(&cqspi->inflight_ops); + return ret; } @@ -1662,7 +1689,7 @@ static const struct spi_controller_mem_caps cqspi_mem_caps = { static int cqspi_setup_flash(struct cqspi_st *cqspi) { - unsigned int max_cs = cqspi->num_chipselect - 1; + unsigned int max_cs = 0; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; struct cqspi_flash_pdata *f_pdata; @@ -1680,7 +1707,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) if (cs >= cqspi->num_chipselect) { dev_err(dev, "Chip select %d out of range.\n", cs); return -EINVAL; - } else if (cs < max_cs) { + } else if (cs > max_cs) { max_cs = cs; } @@ -1894,6 +1921,9 @@ static int cqspi_probe(struct platform_device *pdev) } } + refcount_set(&cqspi->refcount, 1); + refcount_set(&cqspi->inflight_ops, 1); + ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, pdev->name, cqspi); if (ret) { @@ -1923,10 +1953,17 @@ static int cqspi_probe(struct platform_device *pdev) goto probe_setup_failed; } - ret = devm_pm_runtime_enable(dev); - if (ret) { - if (cqspi->rx_chan) - dma_release_channel(cqspi->rx_chan); + pm_runtime_enable(dev); + + if (ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)) { + pm_runtime_disable(dev); + cqspi->runtime_pm = false; + } else { + cqspi->runtime_pm = true; + } + + if (cqspi->rx_chan) { + dma_release_channel(cqspi->rx_chan); goto probe_setup_failed; } @@ -1946,6 +1983,8 @@ static int cqspi_probe(struct platform_device *pdev) return 0; probe_setup_failed: cqspi_controller_enable(cqspi, 0); + pm_runtime_disable(dev); + cqspi->runtime_pm = false; probe_reset_failed: if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); @@ -1958,19 +1997,26 @@ static void cqspi_remove(struct platform_device *pdev) { struct cqspi_st *cqspi = platform_get_drvdata(pdev); + refcount_set(&cqspi->refcount, 0); + + if (!refcount_dec_and_test(&cqspi->inflight_ops)) + cqspi_wait_idle(cqspi); + spi_unregister_controller(cqspi->host); cqspi_controller_enable(cqspi, 0); if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); - clk_disable_unprepare(cqspi->clk); + if (pm_runtime_get_sync(&pdev->dev) >= 0) + clk_disable(cqspi->clk); if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); + if (cqspi->runtime_pm) + pm_runtime_disable(&pdev->dev); } static int cqspi_runtime_suspend(struct device *dev) @@ -2049,7 +2095,8 @@ static const struct cqspi_driver_platdata socfpga_qspi = { .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | CQSPI_SLOW_SRAM - | CQSPI_DISABLE_STIG_MODE, + | CQSPI_DISABLE_STIG_MODE + | CQSPI_DISABLE_RUNTIME_PM, }; static const struct cqspi_driver_platdata versal_ospi = { diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index a45d423ad10f0..11eedac81f7dc 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -191,6 +191,12 @@ config LDISC_AUTOLOAD source "drivers/tty/serial/Kconfig" +config NEWHAVEN_LCD + tristate "NEWHAVEN LCD" + depends on I2C + help + Add support for a TTY device on a Newhaven I2C LCD device. + config SERIAL_NONSTANDARD bool "Non-standard serial port support" depends on HAS_IOMEM diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile index 07aca5184a55d..6cb78604efe8e 100644 --- a/drivers/tty/Makefile +++ b/drivers/tty/Makefile @@ -27,5 +27,6 @@ obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o obj-$(CONFIG_VCC) += vcc.o obj-$(CONFIG_RPMSG_TTY) += rpmsg_tty.o +obj-$(CONFIG_NEWHAVEN_LCD) += newhaven_lcd.o obj-y += ipwireless/ diff --git a/drivers/tty/newhaven_lcd.c b/drivers/tty/newhaven_lcd.c new file mode 100644 index 0000000000000..f5a5470c03b33 --- /dev/null +++ b/drivers/tty/newhaven_lcd.c @@ -0,0 +1,633 @@ +/* + * TTY on a LCD connected to I2C + * Supports Newhaven NHD-0216K3Z-NSW-BBW + * + * Copyright (C) 2013 Altera Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define DRV_NAME "lcd-comm" +#define DEV_NAME "ttyLCD" + +#include +#include +#include +#include +#include +#include + +#define LCD_COMMAND 0xfe +#define LCD_DISPLAY_ON 0x41 +#define LCD_DISPLAY_OFF 0x42 +#define LCD_SET_CURSOR 0x45 +#define LCD_BACKSPACE 0x4e +#define LCD_CLEAR_SCREEN 0x51 +#define LCD_BRIGHTNESS 0x53 +#define LCD_CUSTOM_CHAR 0x54 +#define LCD_BYTES_PER_FONT 8 +#define LCD_BYTES_PER_FONT_CMD (LCD_BYTES_PER_FONT + 3) + +#define LCD_BRIGHTNESS_MIN 1 +#define LCD_BRIGHTNESS_MAX 8 + +#define ASCII_BS 0x08 +#define ASCII_LF 0x0a +#define ASCII_CR 0x0d +#define ASCII_ESC 0x1b +#define ASCII_SPACE 0x20 +#define ASCII_BACKSLASH 0x5c +#define ASCII_TILDE 0x7e + +/* The NewHaven display has 8 custom characters that are user-loadable init + its cg ram. */ +#define CUSTOM_BACKSLASH 0x00 +#define CUSTOM_TILDE 0x01 + +struct custom_font { + const char font_cmd[LCD_BYTES_PER_FONT_CMD]; +}; + +/* Array of commands to send to set up custom fonts. */ +static struct custom_font custom_fonts[] = { + { { LCD_COMMAND, LCD_CUSTOM_CHAR, CUSTOM_BACKSLASH, 0x00, 0x10, 0x08, 0x04, 0x02, 0x01, 0x00, 0x00, }, }, + { { LCD_COMMAND, LCD_CUSTOM_CHAR, CUSTOM_TILDE, 0x00, 0x00, 0x00, 0x08, 0x15, 0x02, 0x00, 0x00, }, }, +}; + +struct lcd { + struct device *dev; + struct i2c_client *client; + struct tty_driver *lcd_tty_driver; + struct tty_port port; + unsigned int width; + unsigned int height; + unsigned int brightness; + char *buffer; + unsigned int top_line; + unsigned int cursor_line; + unsigned int cursor_col; +}; + +#define MAX_LCDS 1 +static struct lcd lcd_data_static[MAX_LCDS]; + +static int lcd_cmd_no_params(struct lcd *lcd_data, u8 cmd) +{ + int count; + u8 buf[2] = {LCD_COMMAND, cmd}; + + count = i2c_master_send(lcd_data->client, buf, sizeof(buf)); + if (count != sizeof(buf)) { + pr_err("%s: i2c_master_send returns %d\n", __func__, count); + return -1; + } + msleep(1); + return 0; +} + +static int lcd_cmd_one_param(struct lcd *lcd_data, u8 cmd, u8 param) +{ + int count; + u8 buf[3] = {LCD_COMMAND, cmd, param}; + + count = i2c_master_send(lcd_data->client, buf, sizeof(buf)); + if (count != sizeof(buf)) { + pr_err("%s: i2c_master_send returns %d\n", __func__, count); + return -1; + } + msleep(1); + return 0; +} + +static int lcd_cmd_backlight_brightness(struct lcd *lcd_data, u8 brightness) +{ + return lcd_cmd_one_param(lcd_data, LCD_BRIGHTNESS, brightness); +} + +static int lcd_cmd_display_on(struct lcd *lcd_data) +{ + return lcd_cmd_no_params(lcd_data, LCD_DISPLAY_ON); +} + +static int lcd_cmd_display_off(struct lcd *lcd_data) +{ + return lcd_cmd_no_params(lcd_data, LCD_DISPLAY_OFF); +} + +static int lcd_cmd_clear_screen(struct lcd *lcd_data) +{ + return lcd_cmd_no_params(lcd_data, LCD_CLEAR_SCREEN); +} + +static int lcd_cmd_backspace(struct lcd *lcd_data) +{ + return lcd_cmd_no_params(lcd_data, LCD_BACKSPACE); +} + +/* Note that this has to happen early on or the LCD module will not + process the command */ +static int lcd_load_custom_fonts(struct lcd *lcd_data) +{ + int count, i; + + for (i = 0; i < ARRAY_SIZE(custom_fonts); i++) { + count = i2c_master_send(lcd_data->client, + (const char *)&custom_fonts[i].font_cmd, + LCD_BYTES_PER_FONT_CMD); + if (count != LCD_BYTES_PER_FONT_CMD) { + pr_err("%s: i2c_master_send returns %d\n", __func__, count); + return -1; + } + } + return 0; +} + +static char lcd_translate_printable_char(char val) +{ + if (val == ASCII_BACKSLASH) + return CUSTOM_BACKSLASH; + else if (val == ASCII_TILDE) + return CUSTOM_TILDE; + + return val; +} + +/* From NHD-0216K3Z-NSW-BBY Display Module datasheet. */ +#define LCD_CURSOR_LINE_MULTIPLIER 0x40 + +static int lcd_cmd_set_cursor(struct lcd *lcd_data, u8 line, u8 col) +{ + u8 cursor; + + BUG_ON((line >= lcd_data->height) || (col >= lcd_data->width)); + + cursor = col + (LCD_CURSOR_LINE_MULTIPLIER * line); + return lcd_cmd_one_param(lcd_data, LCD_SET_CURSOR, cursor); +} + +/* + * Map a line on the lcd display to a line on the buffer. + * Note that the top line on the display (line 0) may not be line 0 on the + * buffer due to scrolling. + */ +static unsigned int lcd_line_to_buf_line(struct lcd *lcd_data, + unsigned int line) +{ + unsigned int buf_line; + + buf_line = line + lcd_data->top_line; + + if (buf_line >= lcd_data->height) + buf_line -= lcd_data->height; + + return buf_line; +} + +/* Returns a pointer to the line, column position in the lcd buffer */ +static char *lcd_buf_pointer(struct lcd *lcd_data, unsigned int line, + unsigned int col) +{ + unsigned int buf_line; + char *buf; + + if ((lcd_data->cursor_line >= lcd_data->height) || + (lcd_data->cursor_col >= lcd_data->width)) + return lcd_data->buffer; + + buf_line = lcd_line_to_buf_line(lcd_data, line); + + buf = lcd_data->buffer + (buf_line * lcd_data->width) + col; + + return buf; +} + +static void lcd_clear_buffer_line(struct lcd *lcd_data, int line) +{ + char *buf; + + BUG_ON(line >= lcd_data->height); + + buf = lcd_buf_pointer(lcd_data, line, 0); + memset(buf, ASCII_SPACE, lcd_data->width); +} + +static void lcd_clear_buffer(struct lcd *lcd_data) +{ + memset(lcd_data->buffer, ASCII_SPACE, + lcd_data->width * lcd_data->height); + lcd_data->cursor_line = 0; + lcd_data->cursor_col = 0; + lcd_data->top_line = 0; +} + +static void lcd_reprint_one_line(struct lcd *lcd_data, u8 line) +{ + char *buf = lcd_buf_pointer(lcd_data, line, 0); + + lcd_cmd_set_cursor(lcd_data, line, 0); + i2c_master_send(lcd_data->client, buf, lcd_data->width); +} + +static void lcd_print_top_n_lines(struct lcd *lcd_data, u8 lines) +{ + unsigned int disp_line = 0; + + while (disp_line < lines) + lcd_reprint_one_line(lcd_data, disp_line++); +} + +static void lcd_add_char_at_cursor(struct lcd *lcd_data, char val) +{ + char *buf; + + buf = lcd_buf_pointer(lcd_data, lcd_data->cursor_line, + lcd_data->cursor_col); + + *buf = val; + + if (lcd_data->cursor_col < (lcd_data->width - 1)) + lcd_data->cursor_col++; +} + +static void lcd_crlf(struct lcd *lcd_data) +{ + if (lcd_data->cursor_line < (lcd_data->height - 1)) { + /* Next line is blank, carriage return to beginning of line. */ + lcd_data->cursor_line++; + if (lcd_data->cursor_line >= lcd_data->height) + lcd_data->cursor_line = 0; + + } else { + /* Display is full. Scroll up one line. */ + lcd_data->top_line++; + if (lcd_data->top_line >= lcd_data->height) + lcd_data->top_line = 0; + + lcd_cmd_clear_screen(lcd_data); + lcd_clear_buffer_line(lcd_data, lcd_data->cursor_line); + lcd_print_top_n_lines(lcd_data, lcd_data->height); + } + + lcd_cmd_set_cursor(lcd_data, lcd_data->height - 1, 0); + lcd_data->cursor_col = 0; +} + +static void lcd_backspace(struct lcd *lcd_data) +{ + if (lcd_data->cursor_col > 0) { + lcd_cmd_backspace(lcd_data); + lcd_data->cursor_col--; + } +} + +static int lcd_write(struct tty_struct *tty, const unsigned char *buf, + unsigned int count) +{ + struct lcd *lcd_data = tty->driver_data; + int buf_i = 0, left; + char val; + +#ifdef DEBUG + char *dbgbuf = kzalloc(count + 1, GFP_KERNEL); + strncpy(dbgbuf, buf, count); + pr_debug("\n%s: count=%d buf[0]=%02x --->%s<---\n", __func__, count, + buf[0], dbgbuf); +#endif /* DEBUG */ + + if (count == 0) { +#ifdef DEBUG + kfree(dbgbuf); +#endif /* DEBUG */ + return 0; + } + + while (buf_i < count) { + left = count - buf_i; + + /* process displayable chars */ + if ((0x20 <= buf[buf_i]) && (buf[buf_i] <= 0x7f)) { + while ((buf_i < count) && + ((0x20 <= buf[buf_i]) && (buf[buf_i] <= 0x7f))) { + val = lcd_translate_printable_char(buf[buf_i]); + lcd_add_char_at_cursor(lcd_data, val); + buf_i++; + } + + /* flush the line out to the display when we get to eol */ + lcd_reprint_one_line(lcd_data, lcd_data->cursor_line); + + /* + * ECMA-48 CSI sequences (from console_codes man page) + * + * ESC [ 2 J : erase whole display. + * ESC [ 2 K : erase whole line. + */ + } else if (buf[buf_i] == ASCII_ESC) { + if ((left >= 4) && + (buf[buf_i + 1] == '[') && + (buf[buf_i + 2] == '2') && + (buf[buf_i + 3] == 'J')) { + pr_debug("ESC [2J = clear screan\n"); + lcd_clear_buffer(lcd_data); + lcd_cmd_clear_screen(lcd_data); + buf_i += 4; + + } else if ((left >= 4) && + (buf[buf_i + 1] == '[') && + (buf[buf_i + 2] == '2') && + (buf[buf_i + 3] == 'K')) { + pr_debug("ESC [2K = clear line\n"); + lcd_clear_buffer_line(lcd_data, lcd_data->cursor_line); + lcd_reprint_one_line(lcd_data, lcd_data->cursor_line); + lcd_cmd_set_cursor(lcd_data, lcd_data->cursor_line, 0); + lcd_data->cursor_col = 0; + buf_i += 4; + + } else { + pr_debug("Unsupported escape sequence\n"); + buf_i++; + } + + } else if ((left >= 2) && + (buf[buf_i] == ASCII_CR) && (buf[buf_i + 1] == ASCII_LF)) { + pr_debug("ASCII_CR/LF\n"); + lcd_crlf(lcd_data); + buf_i += 2; + + } else if ((left >= 1) && (buf[buf_i] == ASCII_CR)) { + pr_debug("ASCII_CR\n"); + lcd_crlf(lcd_data); + buf_i++; + + } else if ((left >= 1) && (buf[buf_i] == ASCII_LF)) { + pr_debug("ASCII_LF\n"); + lcd_crlf(lcd_data); + buf_i++; + + } else if ((left >= 1) && (buf[buf_i] == ASCII_BS)) { + pr_debug("ASCII_BS\n"); + lcd_backspace(lcd_data); + buf_i++; + + } else { + pr_debug("%s - Unsupported command 0x%02x\n", __func__, buf[buf_i]); + buf_i++; + } + } + +#ifdef DEBUG + kfree(dbgbuf); +#endif /* DEBUG */ + return count; +} + +static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct lcd *lcd_data = dev_get_drvdata(dev); + + return scnprintf(buf, 2, "%d\n", lcd_data->brightness); +} + +static ssize_t brightness_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct lcd *lcd_data = dev_get_drvdata(dev); + int ret, brightness; + + ret = sscanf(buf, "%d", &brightness); + if (ret != 1) + return -EINVAL; + + if ((brightness < LCD_BRIGHTNESS_MIN) || + (brightness > LCD_BRIGHTNESS_MAX)) { + dev_err(lcd_data->dev, "out of range (%d to %d)\n", + LCD_BRIGHTNESS_MIN, LCD_BRIGHTNESS_MAX); + return -EINVAL; + } + + lcd_data->brightness = brightness; + lcd_cmd_backlight_brightness(lcd_data, brightness); + + return count; +} +static DEVICE_ATTR(brightness, S_IRUGO | S_IWUSR, brightness_show, brightness_store); + +static struct attribute *lcd_attrs[] = { + &dev_attr_brightness.attr, + NULL, +}; + +static struct attribute_group lcd_attr_group = { + .attrs = lcd_attrs, +}; + +static int lcd_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct lcd *lcd_data; + + lcd_data = &lcd_data_static[tty->index]; + if (lcd_data == NULL) + return -ENODEV; + + tty->driver_data = lcd_data; + + return tty_port_install(&lcd_data->port, driver, tty); +} + +static int lcd_open(struct tty_struct *tty, struct file *filp) +{ + struct lcd *lcd_data = tty->driver_data; + unsigned long flags; + + tty->driver_data = lcd_data; + spin_lock_irqsave(&lcd_data->port.lock, flags); + lcd_data->port.count++; + spin_unlock_irqrestore(&lcd_data->port.lock, flags); + tty_port_tty_set(&lcd_data->port, tty); + + return 0; +} + +static void lcd_close(struct tty_struct *tty, struct file *filp) +{ + struct lcd *lcd_data = tty->driver_data; + unsigned long flags; + bool last; + + spin_lock_irqsave(&lcd_data->port.lock, flags); + --lcd_data->port.count; + last = (lcd_data->port.count == 0); + spin_unlock_irqrestore(&lcd_data->port.lock, flags); + if (last) + tty_port_tty_set(&lcd_data->port, NULL); +} + +static unsigned int lcd_write_room(struct tty_struct *tty) +{ + struct lcd *lcd_data = tty->driver_data; + + return lcd_data->height * lcd_data->width; +} + +static const struct tty_operations lcd_ops = { + .install = lcd_install, + .open = lcd_open, + .close = lcd_close, + .write = lcd_write, + .write_room = lcd_write_room, +}; + +static int lcd_probe(struct i2c_client *client) +{ + struct device_node *np = client->dev.of_node; + struct lcd *lcd_data; + struct tty_driver *lcd_tty_driver; + unsigned int width = 0, height = 0, i, brightness = 0; + char *buffer; + int ret = -ENOMEM; + + of_property_read_u32(np, "height", &height); + of_property_read_u32(np, "width", &width); + if ((width == 0) || (height == 0)) { + dev_err(&client->dev, + "Need to specify lcd width/height in device tree\n"); + ret = -EINVAL; + goto err_devtree; + } + + of_property_read_u32(np, "brightness", &brightness); + if ((brightness < LCD_BRIGHTNESS_MIN) || + (brightness > LCD_BRIGHTNESS_MAX)) { + dev_info(&client->dev, + "lcd brighness not set or out of range, defaulting to maximum\n"); + brightness = LCD_BRIGHTNESS_MAX; + } + + for (i = 0 ; i < MAX_LCDS ; i++) + if (lcd_data_static[i].client == NULL) + break; + if (i >= MAX_LCDS) { + ret = -ENODEV; + dev_warn(&client->dev, + "More than %d I2C LCD displays found. Giving up.\n", + MAX_LCDS); + goto err_devtree; + } + lcd_data = &lcd_data_static[i]; + + buffer = kzalloc(height * width, GFP_KERNEL); + if (!buffer) + goto err_devtree; + + i2c_set_clientdata(client, lcd_data); + + lcd_data->client = client; + lcd_data->dev = &client->dev; + lcd_data->height = height; + lcd_data->width = width; + lcd_data->buffer = buffer; + lcd_data->brightness = brightness; + + dev_set_drvdata(&client->dev, lcd_data); + tty_port_init(&lcd_data->port); + lcd_tty_driver = tty_alloc_driver(MAX_LCDS, 0); + if (!lcd_tty_driver) + goto err_driver; + + lcd_tty_driver->driver_name = DRV_NAME; + lcd_tty_driver->name = DEV_NAME; + lcd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + lcd_tty_driver->subtype = SERIAL_TYPE_NORMAL; + lcd_tty_driver->init_termios = tty_std_termios; + tty_set_operations(lcd_tty_driver, &lcd_ops); + + ret = tty_register_driver(lcd_tty_driver); + if (ret) + goto err_register; + + lcd_data->lcd_tty_driver = lcd_tty_driver; + + lcd_clear_buffer(lcd_data); + lcd_load_custom_fonts(lcd_data); + lcd_cmd_display_on(lcd_data); + lcd_cmd_backlight_brightness(lcd_data, brightness); + lcd_cmd_clear_screen(lcd_data); + + ret = sysfs_create_group(&lcd_data->dev->kobj, &lcd_attr_group); + if (ret) { + dev_err(lcd_data->dev, "Can't create sysfs attrs for lcd\n"); + return ret; + } + + dev_info(&client->dev, "LCD driver initialized\n"); + + return 0; + +err_register: + tty_driver_kref_put(lcd_data->lcd_tty_driver); +err_driver: + kfree(buffer); +err_devtree: + return ret; +} + +static void __exit lcd_remove(struct i2c_client *client) +{ + struct lcd *lcd_data = i2c_get_clientdata(client); + + lcd_cmd_display_off(lcd_data); + + sysfs_remove_group(&lcd_data->dev->kobj, &lcd_attr_group); + tty_unregister_driver(lcd_data->lcd_tty_driver); + tty_driver_kref_put(lcd_data->lcd_tty_driver); + kfree(lcd_data->buffer); +} + +static const struct of_device_id lcd_of_match[] = { + { .compatible = "newhaven,nhd-0216k3z-nsw-bbw", }, + {}, +}; + +static const struct i2c_device_id lcd_id[] = { + { DRV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lcd_id); + +static struct i2c_driver lcd_i2c_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = lcd_of_match, + }, + .probe = lcd_probe, + .remove = lcd_remove, + .id_table = lcd_id, +}; + +static int __init lcd_init(void) +{ + return i2c_add_driver(&lcd_i2c_driver); +} +subsys_initcall(lcd_init); + +static void __exit lcd_exit(void) +{ + i2c_del_driver(&lcd_i2c_driver); +} +module_exit(lcd_exit); + +MODULE_DESCRIPTION("LCD 2x16"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 28e4beeabf8f3..2da0a96bb4562 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1134,7 +1134,7 @@ config SERIAL_ALTERA_UART_MAXPORTS config SERIAL_ALTERA_UART_BAUDRATE int "Default baudrate for Altera UART ports" depends on SERIAL_ALTERA_UART - default 115200 + default 921600 help This setting lets you define what the default baudrate is for the Altera UART ports. The usual default varies from board to board, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index bd4c788f03bc1..6f3e0c501a5f7 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1780,6 +1780,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *ep; int ret; bool halted; + u32 otgctl; u32 recip; u32 wValue; u32 wIndex; @@ -1809,6 +1810,39 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, hsotg->test_mode = wIndex >> 8; break; + case USB_DEVICE_B_HNP_ENABLE: + if (!hsotg->params.otg_caps.hnp_support) { + dev_info(hsotg->dev, "HNP requested but not supported\n"); + return -ENOENT; + } else { + otgctl = dwc2_readl(hsotg, GOTGCTL); + if (set){ + hsotg->gadget.b_hnp_enable = 1; + otgctl |= GOTGCTL_DEVHNPEN; + } + else{ + hsotg->gadget.b_hnp_enable = 0; + otgctl &= ~GOTGCTL_DEVHNPEN; + } + + dwc2_writel(hsotg, otgctl, GOTGCTL); + dev_info(hsotg->dev, "HNP enabled\n"); + break; + } + case USB_DEVICE_A_HNP_SUPPORT: + if (set) + hsotg->gadget.a_hnp_support = 1; + else + hsotg->gadget.a_hnp_support = 0; + dev_info(hsotg->dev,"a_hnp_support processing\n"); + break; + case USB_DEVICE_A_ALT_HNP_SUPPORT: + if (set) + hsotg->gadget.a_alt_hnp_support = 1; + else + hsotg->gadget.a_alt_hnp_support = 0; + dev_info(hsotg->dev,"a_alt_hnp_support processing\n"); + break; default: return -ENOENT; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7820d6815bedd..c80b8611605bc 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1809,6 +1809,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->dis_split_quirk = device_property_read_bool(dev, "snps,dis-split-quirk"); + dwc->dma_set_40_bit_mask_quirk = device_property_read_bool(dev, + "snps,dma_set_40_bit_mask_quirk"); + dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; @@ -2213,7 +2216,11 @@ static int dwc3_probe(struct platform_device *pdev) if (!dwc->sysdev_is_parent && DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { - ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); + if(dwc->dma_set_40_bit_mask_quirk) + ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(40)); + else + ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); + if (ret) goto err_disable_clks; } diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index f288d88cd1051..cfce5cff810e9 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1157,6 +1157,7 @@ struct dwc3_scratchpad_array { * @suspended: set to track suspend event due to U3/L2. * @susphy_state: state of DWC3_GUSB2PHYCFG_SUSPHY + DWC3_GUSB3PIPECTL_SUSPHY * before PM suspend. + * @dma_set_40_bit_mask_quirk: set if we want to set dma bit mask to 40 bits. * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. * @max_cfg_eps: current max number of IN eps used across all USB configs. @@ -1390,6 +1391,7 @@ struct dwc3 { unsigned wakeup_configured:1; unsigned suspended:1; unsigned susphy_state:1; + unsigned dma_set_40_bit_mask_quirk:1; u16 imod_interval; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index be7be00ecb349..a90618f1ae676 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -174,6 +174,7 @@ static const struct of_device_id of_dwc3_simple_match[] = { { .compatible = "hisilicon,hi3670-dwc3" }, { .compatible = "hisilicon,hi3798mv200-dwc3" }, { .compatible = "intel,keembay-dwc3" }, + { .compatible = "intel,agilex5-dwc3" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index e6660472501e4..4b66ed6395cf1 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -265,6 +265,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s if (device_property_read_bool(tmpdev, "xhci-skip-phy-init-quirk")) xhci->quirks |= XHCI_SKIP_PHY_INIT; + if (device_property_read_bool(tmpdev, "dma_set_40_bit_mask_quirk")) + ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(40)); + device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); } diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index de035071fedb1..6ed328d473d89 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -61,6 +61,31 @@ config FB_MACMODES tristate depends on FB +comment "Frame buffer hardware drivers" + depends on FB + +config FB_ALTERA_VIP_FB2 + tristate "Altera VIP Frame Buffer II framebuffer support" + depends on FB + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + This driver supports the Altera Video and Image Processing(VIP) + Frame Buffer II. This core driver only supports Arria 10 HW and newer + families of FPGA + +config FB_ALTERA_VIP_FB2_PLAT + tristate "Altera VIP Frame Buffer II framebuffer support OF Device" + depends on FB && OF + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + This driver supports the Altera Video and Image Processing(VIP) + Frame Buffer II. This driver only supports Arria 10 HW + and newer families of FPGA on the OF Device + config FB_GRVGA tristate "Aeroflex Gaisler framebuffer support" depends on FB && SPARC diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index b3d12f977c06b..b5d94f1b44b80 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -12,6 +12,9 @@ obj-$(CONFIG_FB_SBUS) += sbuslib.o obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o # Hardware specific drivers go first +obj-$(CONFIG_FB_ALTERA_VIP_FB2) += altvipfb2.o +obj-$(CONFIG_FB_ALTERA_VIP_FB2_PLAT) += altvipfb2_drv.o +altvipfb2_drv-objs := altvipfb2-plat.o altvipfb2.o obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o obj-$(CONFIG_FB_ARC) += arcfb.o obj-$(CONFIG_FB_CLPS711X) += clps711x-fb.o diff --git a/drivers/video/fbdev/altvipfb2-plat.c b/drivers/video/fbdev/altvipfb2-plat.c new file mode 100644 index 0000000000000..251a7928a0c8b --- /dev/null +++ b/drivers/video/fbdev/altvipfb2-plat.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2017 Intel Corporation + * + * Intel Video and Image Processing(VIP) Frame Buffer II driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "altvipfb2.h" +#include +#include +#include +#include +#include + +static int altvipfb2_of_setup(struct altvipfb2_priv *fbdev, + struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + int mem_word_width; + u32 bits_per_color; + + ret = of_property_read_u32(np, "altr,max-width", &fbdev->info.var.xres); + if (ret) { + dev_err(&pdev->dev, + "Missing required parameter 'altr,max-width'"); + return ret; + } + fbdev->info.var.xres_virtual = fbdev->info.var.xres, + + ret = of_property_read_u32(np, "altr,max-height", + &fbdev->info.var.yres); + if (ret) { + dev_err(&pdev->dev, + "Missing required parameter 'altr,max-height'"); + return ret; + } + fbdev->info.var.yres_virtual = fbdev->info.var.yres; + + ret = of_property_read_u32(np, "altr,bits-per-symbol", &bits_per_color); + if (ret) { + dev_err(&pdev->dev, + "Missing required parameter 'altr,bits-per-symbol'"); + return ret; + } + if (bits_per_color != 8) { + dev_err(&pdev->dev, + "bits-per-color is set to %i. Currently only 8 is supported.", + bits_per_color); + return -ENODEV; + } + fbdev->info.var.bits_per_pixel = bits_per_color * BYTES_PER_PIXEL; + + ret = of_property_read_u32(np, "altr,mem-port-width", &mem_word_width); + if (ret) { + dev_err(&pdev->dev, + "Missing required parameter 'altr,mem-port-width '"); + return ret; + } + if (!(mem_word_width >= 32 && mem_word_width % 32 == 0)) { + dev_err(&pdev->dev, + "mem-word-width is set to %i. must be >= 32 and multiple of 32.", + mem_word_width); + return -ENODEV; + } + + fbdev->info.fix.line_length = (fbdev->info.var.xres * + (fbdev->info.var.bits_per_pixel >> 3)); + fbdev->info.fix.smem_len = + fbdev->info.fix.line_length * fbdev->info.var.yres; + + return 0; +} + +static int altvipfb2_plat_probe(struct platform_device *pdev) +{ + int retval; + + struct device *dev = &pdev->dev; + struct resource *reg_res; + struct altvipfb2_priv *fbdev; + + fbdev = devm_kzalloc(dev, sizeof(*fbdev), GFP_KERNEL); + if (!fbdev) + return -ENOMEM; + + reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!reg_res) + return -ENODEV; + + fbdev->base = devm_ioremap_resource(dev, reg_res); + if (IS_ERR(fbdev->base)) { + dev_err(dev, "devm_ioremap_resource failed\n"); + retval = PTR_ERR(fbdev->base); + return -ENOMEM; + } + + altvipfb2_of_setup(fbdev, pdev); + + platform_set_drvdata(pdev, fbdev); + + return altvipfb2_probe(dev, fbdev->base); +} + +static int altvipfb2_plat_remove(struct platform_device *pdev) +{ + return altvipfb2_remove(&pdev->dev); +} + +static const struct of_device_id altvipfb2_match[] = { + { .compatible = "altr,vip-frame-buffer-2.0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, altvipfb2_match); + +static struct platform_driver altvipfb2_driver = { + .probe = altvipfb2_plat_probe, + .remove = altvipfb2_plat_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = altvipfb2_match, + }, +}; + +module_platform_driver(altvipfb2_driver); diff --git a/drivers/video/fbdev/altvipfb2.c b/drivers/video/fbdev/altvipfb2.c new file mode 100644 index 0000000000000..ef95a88ce2eaa --- /dev/null +++ b/drivers/video/fbdev/altvipfb2.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2017 Intel Corporation. + * + * Intel Video and Image Processing(VIP) Frame Buffer II driver. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * This is based on a driver made by Thomas Chou and + * Walter Goossens This driver supports the Intel VIP + * Frame Buffer II component. A large portion of this file was derived from + * altvipfb2.c which was created by Chris Rauer . + * More info on the hardware can be found in the Intel Video and Image + * Processing Suite User Guide at this address + * http://www.altera.com/literature/ug/ug_vip.pdf. + * + */ + +#include "altvipfb2.h" +#include +#include +#include +#include +#include +#include + +static int altvipfb2_setcolreg(unsigned int regno, unsigned int red, + unsigned int green, unsigned int blue, + unsigned int transp, struct fb_info *info) +{ + /* + * Set a single color register. The values supplied have a 32 bit + * magnitude. + * Return != 0 for invalid regno. + */ + + if (regno > 255) + return 1; + + red >>= 8; + green >>= 8; + blue >>= 8; + + if (regno < 255) { + ((u32 *)info->pseudo_palette)[regno] = + ((red & 255) << 16) | ((green & 255) << 8) | (blue & 255); + } + + return 0; +} + +static struct fb_ops altvipfb2_ops = { + .owner = THIS_MODULE, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_setcolreg = altvipfb2_setcolreg, +}; + +static void altvipfb2_start_hw(void __iomem *base, struct fb_info *info) +{ + /* + * The frameinfo variable has to correspond to the size of the VIP Suite + * Frame Reader register 7 which will determine the maximum size used + * in this frameinfo + */ + u32 frameinfo = + readl(base + ALTVIPFB2_FRAME_READER) & 0x00ffffff; + + writel(frameinfo, base + ALTVIPFB2_FRAME_INFO); + + writel(info->fix.smem_start, base + ALTVIPFB2_FRAME_START); + /* Finally set the control register to 1 to start streaming */ + writel(1, base + ALTVIPFB2_CONTROL); +} + +static void altvipfb2_disable_hw(void __iomem *base) +{ + /* set the control register to 0 to stop streaming */ + writel(0, base + ALTVIPFB2_CONTROL); +} + +static void altvipfb2_setup_fb_info(struct altvipfb2_priv *fbpriv) +{ + struct fb_info *info = &fbpriv->info; + + strncpy(info->fix.id, DRIVER_NAME, sizeof(info->fix.id)); + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.accel = FB_ACCEL_NONE; + + info->fbops = &altvipfb2_ops; + info->var.activate = FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + info->var.vmode = FB_VMODE_NONINTERLACED; + + info->var.pixclock = 6734; + info->var.left_margin = 148; + info->var.right_margin = 88; + info->var.upper_margin = 36; + info->var.lower_margin = 4; + info->var.hsync_len = 44; + info->var.vsync_len = 5; + + /* settings for 32bit pixels */ + info->var.red.offset = 16; + info->var.red.length = 8; + info->var.red.msb_right = 0; + info->var.green.offset = 8; + info->var.green.length = 8; + info->var.green.msb_right = 0; + info->var.blue.offset = 0; + info->var.blue.length = 8; + info->var.blue.msb_right = 0; + info->pseudo_palette = fbpriv->pseudo_palette; + + info->flags = FBINFO_FLAG_DEFAULT; +} + +int altvipfb2_probe(struct device *dev, void __iomem *base) +{ + int retval; + void *fbmem_virt; + struct altvipfb2_priv *fbpriv = dev_get_drvdata(dev); + + fbmem_virt = dma_alloc_coherent(NULL, + fbpriv->info.fix.smem_len, + (void *)&fbpriv->info.fix.smem_start, + GFP_KERNEL); + if (!fbmem_virt) { + dev_err(dev, + "altvipfb2: unable to allocate %d Bytes fb memory\n", + fbpriv->info.fix.smem_len); + return -ENOMEM; + } + + fbpriv->info.screen_base = (char *)fbmem_virt; + + retval = fb_alloc_cmap(&fbpriv->info.cmap, PALETTE_SIZE, 0); + if (retval < 0) + goto err_dma_free; + + altvipfb2_setup_fb_info(fbpriv); + + altvipfb2_start_hw(base, &fbpriv->info); + + dev_info(dev, "fb%d: %s frame buffer device at 0x%x+0x%x\n", + fbpriv->info.node, fbpriv->info.fix.id, + (unsigned int)fbpriv->info.fix.smem_start, + fbpriv->info.fix.smem_len); + + return register_framebuffer(&fbpriv->info); + +err_dma_free: + fb_dealloc_cmap(&fbpriv->info.cmap); + dma_free_coherent(NULL, fbpriv->info.fix.smem_len, fbmem_virt, + fbpriv->info.fix.smem_start); + return retval; +} +EXPORT_SYMBOL_GPL(altvipfb2_probe); + +int altvipfb2_remove(struct device *dev) +{ + struct altvipfb2_priv *fbpriv = dev_get_drvdata(dev); + + altvipfb2_disable_hw(fbpriv->base); + dma_free_coherent(NULL, fbpriv->info.fix.smem_len, + (void *)&fbpriv->info.screen_base, + fbpriv->info.fix.smem_start); + + unregister_framebuffer(&fbpriv->info); + return 0; +} +EXPORT_SYMBOL_GPL(altvipfb2_remove); + +MODULE_AUTHOR("Ong Hean Loong "); +MODULE_DESCRIPTION("Altera VIP Frame Buffer II driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/fbdev/altvipfb2.h b/drivers/video/fbdev/altvipfb2.h new file mode 100644 index 0000000000000..ac6145304c9c1 --- /dev/null +++ b/drivers/video/fbdev/altvipfb2.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2017 Intel Corporation. + * + * Intel Video and Image Processing(VIP) Frame Buffer II driver. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef _ALTVIPFB2_H +#define _ALTVIPFB2_H +#include +#include + +#define DRIVER_NAME "altvipfb2" +#define PALETTE_SIZE 256 +#define BYTES_PER_PIXEL 4 + +/* control registers */ +#define ALTVIPFB2_CONTROL 0 +#define ALTVIPFB2_STATUS 0x4 +#define ALTVIPFB2_INTERRUPT 0x8 +#define ALTVIPFB2_FRAME_COUNTER 0xC +#define ALTVIPFB2_FRAME_DROP 0x10 +#define ALTVIPFB2_FRAME_INFO 0x14 +#define ALTVIPFB2_FRAME_START 0x18 +#define ALTVIPFB2_FRAME_READER 0x1C + +int altvipfb2_probe(struct device *dev, void __iomem *base); +int altvipfb2_remove(struct device *dev); + +struct altvipfb2_priv { + struct fb_info info; + void __iomem *base; + int irq_base; + u32 pseudo_palette[PALETTE_SIZE]; +}; + +#endif /* _ALTVIPFB2_H */ diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h index 06feca07e08e1..f751aad4dafc8 100644 --- a/include/dt-bindings/clock/agilex-clock.h +++ b/include/dt-bindings/clock/agilex-clock.h @@ -44,29 +44,28 @@ /* Gate clocks */ #define AGILEX_MPU_CLK 30 -#define AGILEX_MPU_L2RAM_CLK 31 -#define AGILEX_MPU_PERIPH_CLK 32 -#define AGILEX_L4_MAIN_CLK 33 -#define AGILEX_L4_MP_CLK 34 -#define AGILEX_L4_SP_CLK 35 -#define AGILEX_CS_AT_CLK 36 -#define AGILEX_CS_TRACE_CLK 37 -#define AGILEX_CS_PDBG_CLK 38 -#define AGILEX_CS_TIMER_CLK 39 -#define AGILEX_S2F_USER0_CLK 40 -#define AGILEX_EMAC0_CLK 41 -#define AGILEX_EMAC1_CLK 43 -#define AGILEX_EMAC2_CLK 44 -#define AGILEX_EMAC_PTP_CLK 45 -#define AGILEX_GPIO_DB_CLK 46 -#define AGILEX_NAND_CLK 47 -#define AGILEX_PSI_REF_CLK 48 -#define AGILEX_S2F_USER1_CLK 49 -#define AGILEX_SDMMC_CLK 50 -#define AGILEX_SPI_M_CLK 51 -#define AGILEX_USB_CLK 52 -#define AGILEX_NAND_X_CLK 53 -#define AGILEX_NAND_ECC_CLK 54 -#define AGILEX_NUM_CLKS 55 +#define AGILEX_MPU_PERIPH_CLK 31 +#define AGILEX_L4_MAIN_CLK 32 +#define AGILEX_L4_MP_CLK 33 +#define AGILEX_L4_SP_CLK 34 +#define AGILEX_CS_AT_CLK 35 +#define AGILEX_CS_TRACE_CLK 36 +#define AGILEX_CS_PDBG_CLK 37 +#define AGILEX_CS_TIMER_CLK 38 +#define AGILEX_S2F_USER0_CLK 39 +#define AGILEX_EMAC0_CLK 40 +#define AGILEX_EMAC1_CLK 41 +#define AGILEX_EMAC2_CLK 42 +#define AGILEX_EMAC_PTP_CLK 43 +#define AGILEX_GPIO_DB_CLK 44 +#define AGILEX_NAND_CLK 45 +#define AGILEX_PSI_REF_CLK 46 +#define AGILEX_S2F_USER1_CLK 47 +#define AGILEX_SDMMC_CLK 48 +#define AGILEX_SPI_M_CLK 49 +#define AGILEX_USB_CLK 50 +#define AGILEX_NAND_X_CLK 51 +#define AGILEX_NAND_ECC_CLK 52 +#define AGILEX_NUM_CLKS 53 #endif /* __AGILEX_CLOCK_H */ diff --git a/include/dt-bindings/clock/agilex5-clock.h b/include/dt-bindings/clock/agilex5-clock.h new file mode 100755 index 0000000000000..8ddb96407eacd --- /dev/null +++ b/include/dt-bindings/clock/agilex5-clock.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022, Intel Corporation + */ + +#ifndef __AGILEX5_CLOCK_H +#define __AGILEX5_CLOCK_H + +/* fixed rate clocks */ +#define AGILEX5_OSC1 0 +#define AGILEX5_CB_INTOSC_HS_DIV2_CLK 1 +#define AGILEX5_CB_INTOSC_LS_CLK 2 +#define AGILEX5_F2S_FREE_CLK 3 + +/* PLL clocks */ +#define AGILEX5_MAIN_PLL_CLK 4 +#define AGILEX5_MAIN_PLL_C0_CLK 5 +#define AGILEX5_MAIN_PLL_C1_CLK 6 +#define AGILEX5_MAIN_PLL_C2_CLK 7 +#define AGILEX5_MAIN_PLL_C3_CLK 8 +#define AGILEX5_PERIPH_PLL_CLK 9 +#define AGILEX5_PERIPH_PLL_C0_CLK 10 +#define AGILEX5_PERIPH_PLL_C1_CLK 11 +#define AGILEX5_PERIPH_PLL_C2_CLK 12 +#define AGILEX5_PERIPH_PLL_C3_CLK 13 +#define AGILEX5_CORE0_FREE_CLK 14 +#define AGILEX5_CORE1_FREE_CLK 15 +#define AGILEX5_CORE2_FREE_CLK 16 +#define AGILEX5_CORE3_FREE_CLK 17 +#define AGILEX5_DSU_FREE_CLK 18 +#define AGILEX5_BOOT_CLK 19 + +/* fixed factor clocks */ +#define AGILEX5_L3_MAIN_FREE_CLK 20 +#define AGILEX5_NOC_FREE_CLK 21 +#define AGILEX5_S2F_USR0_CLK 22 +#define AGILEX5_NOC_CLK 23 +#define AGILEX5_EMAC_A_FREE_CLK 24 +#define AGILEX5_EMAC_B_FREE_CLK 25 +#define AGILEX5_EMAC_PTP_FREE_CLK 26 +#define AGILEX5_GPIO_DB_FREE_CLK 27 +#define AGILEX5_S2F_USER0_FREE_CLK 28 +#define AGILEX5_S2F_USER1_FREE_CLK 29 +#define AGILEX5_PSI_REF_FREE_CLK 30 +#define AGILEX5_USB31_FREE_CLK 31 + +/* Gate clocks */ +#define AGILEX5_CORE0_CLK 32 +#define AGILEX5_CORE1_CLK 33 +#define AGILEX5_CORE2_CLK 34 +#define AGILEX5_CORE3_CLK 35 +#define AGILEX5_MPU_CLK 36 +#define AGILEX5_MPU_PERIPH_CLK 37 +#define AGILEX5_MPU_CCU_CLK 38 +#define AGILEX5_L4_MAIN_CLK 39 +#define AGILEX5_L4_MP_CLK 40 +#define AGILEX5_L4_SYS_FREE_CLK 41 +#define AGILEX5_L4_SP_CLK 42 +#define AGILEX5_CS_AT_CLK 43 +#define AGILEX5_CS_TRACE_CLK 44 +#define AGILEX5_CS_PDBG_CLK 45 +#define AGILEX5_EMAC1_CLK 47 +#define AGILEX5_EMAC2_CLK 48 +#define AGILEX5_EMAC_PTP_CLK 49 +#define AGILEX5_GPIO_DB_CLK 50 +#define AGILEX5_S2F_USER0_CLK 51 +#define AGILEX5_S2F_USER1_CLK 52 +#define AGILEX5_PSI_REF_CLK 53 +#define AGILEX5_USB31_SUSPEND_CLK 54 +#define AGILEX5_EMAC0_CLK 46 +#define AGILEX5_USB31_BUS_CLK_EARLY 55 +#define AGILEX5_USB2OTG_HCLK 56 +#define AGILEX5_SPIM_0_CLK 57 +#define AGILEX5_SPIM_1_CLK 58 +#define AGILEX5_SPIS_0_CLK 59 +#define AGILEX5_SPIS_1_CLK 60 +#define AGILEX5_DMA_CORE_CLK 61 +#define AGILEX5_DMA_HS_CLK 62 +#define AGILEX5_I3C_0_CORE_CLK 63 +#define AGILEX5_I3C_1_CORE_CLK 64 +#define AGILEX5_I2C_0_PCLK 65 +#define AGILEX5_I2C_1_PCLK 66 +#define AGILEX5_I2C_EMAC0_PCLK 67 +#define AGILEX5_I2C_EMAC1_PCLK 68 +#define AGILEX5_I2C_EMAC2_PCLK 69 +#define AGILEX5_UART_0_PCLK 70 +#define AGILEX5_UART_1_PCLK 71 +#define AGILEX5_SPTIMER_0_PCLK 72 +#define AGILEX5_SPTIMER_1_PCLK 73 +#define AGILEX5_DFI_CLK 74 +#define AGILEX5_NAND_NF_CLK 75 +#define AGILEX5_NAND_BCH_CLK 76 +#define AGILEX5_SDMMC_SDPHY_REG_CLK 77 +#define AGILEX5_SDMCLK 78 +#define AGILEX5_SOFTPHY_REG_PCLK 79 +#define AGILEX5_SOFTPHY_PHY_CLK 80 +#define AGILEX5_SOFTPHY_CTRL_CLK 81 +#define AGILEX5_NUM_CLKS 82 + +#endif /* __AGILEX5_CLOCK_H */ diff --git a/include/dt-bindings/reset/altr,rst-mgr-agilex5.h b/include/dt-bindings/reset/altr,rst-mgr-agilex5.h new file mode 100644 index 0000000000000..6fe3c0073c063 --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-agilex5.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 Intel Corporation. All rights reserved + * + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_AGILEX5_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_AGILEX5_H + +/* PER0MODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define EMAC2_RESET 34 +#define USB0_RESET 35 +#define USB1_RESET 36 +#define NAND_RESET 37 +#define SOFT_PHY_RESET 38 +#define SDMMC_RESET 39 +#define EMAC0_OCP_RESET 40 +#define EMAC1_OCP_RESET 41 +#define EMAC2_OCP_RESET 42 +#define USB0_OCP_RESET 43 +#define USB1_OCP_RESET 44 +#define NAND_OCP_RESET 45 +/* 46 is empty */ +#define SDMMC_OCP_RESET 47 +#define DMA_RESET 48 +#define SPIM0_RESET 49 +#define SPIM1_RESET 50 +#define SPIS0_RESET 51 +#define SPIS1_RESET 52 +#define DMA_OCP_RESET 53 +#define EMAC_PTP_RESET 54 +/* 55 is empty*/ +#define DMAIF0_RESET 56 +#define DMAIF1_RESET 57 +#define DMAIF2_RESET 58 +#define DMAIF3_RESET 59 +#define DMAIF4_RESET 60 +#define DMAIF5_RESET 61 +#define DMAIF6_RESET 62 +#define DMAIF7_RESET 63 + +/* PER1MODRST */ +#define WATCHDOG0_RESET 64 +#define WATCHDOG1_RESET 65 +#define WATCHDOG2_RESET 66 +#define WATCHDOG3_RESET 67 +#define L4SYSTIMER0_RESET 68 +#define L4SYSTIMER1_RESET 69 +#define SPTIMER0_RESET 70 +#define SPTIMER1_RESET 71 +#define I2C0_RESET 72 +#define I2C1_RESET 73 +#define I2C2_RESET 74 +#define I2C3_RESET 75 +#define I2C4_RESET 76 +#define I3C0_RESET 77 +#define I3C1_RESET 78 +/* 79 is empty */ +#define UART0_RESET 80 +#define UART1_RESET 81 +/* 82-87 is empty */ +#define GPIO0_RESET 88 +#define GPIO1_RESET 89 +#define WATCHDOG4_RESET 90 + +/* BRGMODRST */ +#define SOC2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2SOC_RESET 98 +#define F2SSDRAM0_RESET 99 +/* 100-101 is empty */ +#define MPFE_RESET 102 + +/* DBGMODRST */ +#define DBG_RESET 128 + +#endif diff --git a/include/linux/altera_hwmutex.h b/include/linux/altera_hwmutex.h new file mode 100644 index 0000000000000..166502b379f6e --- /dev/null +++ b/include/linux/altera_hwmutex.h @@ -0,0 +1,41 @@ +/* + * Copyright Altera Corporation (C) 2013. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#ifndef _ALTERA_MUTEX_H +#define _ALTERA_MUTEX_H + +#include +#include + +struct altera_mutex { + struct list_head list; + struct platform_device *pdev; + struct mutex lock; + void __iomem *regs; + bool requested; +}; + +extern struct altera_mutex *altera_mutex_request(struct device_node *mutex_np); +extern int altera_mutex_free(struct altera_mutex *mutex); + +extern int altera_mutex_lock(struct altera_mutex *mutex, u16 owner, u16 value); + +extern int altera_mutex_trylock(struct altera_mutex *mutex, u16 owner, + u16 value); +extern int altera_mutex_unlock(struct altera_mutex *mutex, u16 owner); +extern int altera_mutex_owned(struct altera_mutex *mutex, u16 owner); +extern int altera_mutex_is_locked(struct altera_mutex *mutex); + +#endif /* _ALTERA_MUTEX_H */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index c846436b64593..98bfc607b0813 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -230,6 +230,7 @@ extern u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); extern void clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); +int get_ptp_clocksource_id(enum clocksource_ids *cs_id); /* * Don't call __clocksource_register_scale directly, use diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index ee80ca4bb0d0c..b2d3fea4051d7 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2017-2024, Intel Corporation */ #ifndef __STRATIX10_SMC_H @@ -47,6 +47,10 @@ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ ARM_SMCCC_OWNER_SIP, (func_num)) +#define INTEL_SIP_SMC_ASYNC_VAL(func_name) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, (func_name)) + /** * Return values in INTEL_SIP_SMC_* call * @@ -62,18 +66,27 @@ * INTEL_SIP_SMC_STATUS_REJECTED: * Secure monitor software reject the service client's request. * + * INTEL_SIP_SMC_STATUS_NO_RESPONSE: + * Secure monitor software doesn't recieve any response for the + * service client's request yet. + * * INTEL_SIP_SMC_STATUS_ERROR: * There is error during the process of service request. * * INTEL_SIP_SMC_RSU_ERROR: * There is error during the process of remote status update request. + * + * INTEL_SIP_SMC_STATUS_NOT_SUPPORTED: + * Secure monitor software doesn't support the request */ #define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF #define INTEL_SIP_SMC_STATUS_OK 0x0 #define INTEL_SIP_SMC_STATUS_BUSY 0x1 #define INTEL_SIP_SMC_STATUS_REJECTED 0x2 +#define INTEL_SIP_SMC_STATUS_NO_RESPONSE 0x3 #define INTEL_SIP_SMC_STATUS_ERROR 0x4 #define INTEL_SIP_SMC_RSU_ERROR 0x7 +#define INTEL_SIP_SMC_STATUS_NOT_SUPPORTED 0x8 /** * Request INTEL_SIP_SMC_FPGA_CONFIG_START @@ -424,6 +437,29 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_RSU_DCMF_STATUS \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS) +/** + * Request INTEL_SIP_SMC_RSU_GET_DEVICE_INFO + * + * Sync call used by service driver at EL1 to query QSPI device info from FW + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_GET_DEVICE_INFO + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 erasesize0 | size0 + * a2 erasesize1 | size1 + * a3 erasesize2 | size2 + * a4 erasesize3 | size3 + * Or + * + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_GET_DEVICE_INFO 22 +#define INTEL_SIP_SMC_RSU_GET_DEVICE_INFO \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_GET_DEVICE_INFO) + /** * Request INTEL_SIP_SMC_SERVICE_COMPLETED * Sync call to check if the secure world have completed service request @@ -434,7 +470,8 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1: this register is optional. If used, it is the physical address for * secure firmware to put output data * a2: this register is optional. If used, it is the size of output data - * a3-a7: not used + * a3: this register is optional. Set to 0x00004F4E for asynchronous mode + * a4-a7: not used * * Return status: * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR, @@ -509,6 +546,25 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_SVC_VERSION \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION) +/** + * Request INTEL_SIP_SMC_ATF_BUILD_VER + * + * Sync call used to query the ATF Build Version + * + * Call register usage: + * a0 INTEL_SIP_SMC_ATF_BUILD_VER + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Major + * a2 Minor + * a3 Patch + */ +#define INTEL_SIP_SMC_ATF_BUILD_VERSION 155 +#define INTEL_SIP_SMC_ATF_BUILD_VER \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_ATF_BUILD_VERSION) + /** * SMC call protocol for FPGA Crypto Service (FCS) * FUNCID starts from 90 @@ -582,7 +638,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) /** * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE - * Sync call to send a signed certificate + * Async call to send a signed certificate * * Call register usage: * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE @@ -591,7 +647,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a3-a7 not used * * Return status: - * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED + * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REJECTED * a1-a3 not used */ #define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93 @@ -604,20 +660,2499 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * * Call register usage: * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA - * a1 the physical address for firmware to write structure of fuse and - * key hashes + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or + * INTEL_SIP_SMC_STATUS_REJECTED + * a1-a3 not used + * + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94 +#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA) + +/** + * Request INTEL_SIP_SMC_FCS_COUNTER_SET_PREAUTHORIZED + * Sync call to update counter value w/o signed certificate + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_COUNTER_SET_PREAUTHORIZED + * a1 counter type + * a2 counter value + * a3 test bit + * a3-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR + * a1-a4 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_COUNTER_SET_PREAUTHORIZED 95 +#define INTEL_SIP_SMC_FCS_COUNTER_SET_PREAUTHORIZED \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_COUNTER_SET_PREAUTHORIZED) + +/** + * Request INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN + * Sync call to tear down all previous black key provision sessions and to + * delete keys assicated with those sessions + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN + * a1 the session ID + * a2-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or + * INTEL_SIP_SMC_STATUS_REJECTED + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR, + * not used if a0 is INTEL_SIP_SMC_STATUS_OK or + * INTEL_SIP_SMC_STATUS_REJECTED + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_PSGSIGMA_TEARDOWN 100 +#define INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_PSGSIGMA_TEARDOWN) + +/** + * Request INTEL_SIP_SMC_FCS_CHIP_ID + * Sync call to get the device ID + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_CHIP_ID + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or + * INTEL_SIP_SMC_STATUS_REJECTED + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 retrieved chipID value low 32 bits + * a3 retrieved chipID value high 32 bits + */ +#define INTEL_SIP_SMC_FUNCID_FCS_CHIP_ID 101 +#define INTEL_SIP_SMC_FCS_CHIP_ID \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CHIP_ID) + +/** + * Request INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY + * Sync call to the device attestation subkey + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY + * a1 physical address of subkey command data + * a2 subkey command data size + * a3 physical address of to be filled subkey response data + * a4 subkey response data size + * a5-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of the filled subkey response data + * a3 size of the filled subkey response dat + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ATTESTATION_SUBKEY 102 +#define INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ATTESTATION_SUBKEY) + +/** + * Request INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS + * Async call to get device attestation measurements + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS + * a1 physical address of measurement command data + * a2 measurement command data size + * a3 physical address of to be filled measurement response data + * a4 measurement response data size + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of the filled subkey measurement data + * a3 size of the filled subkey measurement data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ATTESTATION_MEASUREMENTS 103 +#define INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ATTESTATION_MEASUREMENTS) + +/** + * Request INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERTIFICATE + * Sync call to get device attestation certificate + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERTIFICATE + * a1 the type of certificate request + * a2 the physical address which holds certificate response data + * a3 the size of the certificate response data + * a4-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of the requested certificate + * a3 sized of the requested certificate + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_ATTESTATION_CERTIFICATE 104 +#define INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERTIFICATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_ATTESTATION_CERTIFICATE) + +/** + * Request INTEL_SIP_SMC_FCS_CREATE_CERTIFICATE_ON_RELOAD + * Sync call to specify what certificate is to be generated + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_CREATE_CERTIFICATE_ON_RELOAD + * a1 the type of certificat request + * a2-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_CREATE_CERTIFICATE_ON_RELOAD 105 +#define INTEL_SIP_SMC_FCS_CREATE_CERTIFICATE_ON_RELOAD \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CREATE_CERTIFICATE_ON_RELOAD) + +/** + * Request INTEL_SIP_SMC_FCS_GET_ROM_PATCH_SHA384 + * + * Sync call used to dump the SHA384 hash of the rom patch + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_ROM_PATCH_SHA384 + * a1 the physical address for firmware to write generated SHA384 data * a2-a7 not used * * Return status: * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or * INTEL_SIP_SMC_FCS_REJECTED * a1 mailbox error - * a2 physical address for the structure of fuse and key hashes - * a3 the size of structure + * a2 the physical address of the SHA384 checksum + * a3 size of the SHA384 checksum + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_ROM_PATCH_SHA384 64 +#define INTEL_SIP_SMC_FCS_GET_ROM_PATCH_SHA384 \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_ROM_PATCH_SHA384) + +/** + * Request INTEL_SIP_SMC_FCS_OPEN_CRYPTO_SERVICE_SESSION + * Sync call to open and establish a crypto service session with firmware * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_OPEN_CRYPTO_SERVICE_SESSION + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 session ID + * a3 not used */ -#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94 -#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \ - INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA) +#define INTEL_SIP_SMC_FUNCID_FCS_OPEN_CRYPTO_SERVICE_SESSION 110 +#define INTEL_SIP_SMC_FCS_OPEN_CRYPTO_SERVICE_SESSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_OPEN_CRYPTO_SERVICE_SESSION) + +/** + * Request INTEL_SIP_SMC_FCS_CLOSE_CRYPTO_SERVICE_SESSION + * Sync call to close a service session + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_CLOSE_CRYPTO_SERVICE_SESSION + * a1 session ID + * a2-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_CLOSE_CRYPTO_SERVICE_SESSION 111 +#define INTEL_SIP_SMC_FCS_CLOSE_CRYPTO_SERVICE_SESSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CLOSE_CRYPTO_SERVICE_SESSION) + +/** + * Request INTEL_SIP_SMC_FCS_IMPORT_CRYPTO_SERVICE_KEY + * Async call to import crypto service key to the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_IMPORT_CRYPTO_SERVICE_KEY + * a1 physical address of the service key object with header + * a3 size of the service key object + * a4-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or + * INTEL_SIP_SMC_STATUS_REJECTED + * a1-3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_IMPORT_CRYPTO_SERVICE_KEY 112 +#define INTEL_SIP_SMC_FCS_IMPORT_CRYPTO_SERVICE_KEY \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_IMPORT_CRYPTO_SERVICE_KEY) + +/** + * Request INTEL_SIP_SMC_FCS_EXPORT_CRYPTO_SERVICE_KEY + * Sync call to export crypto service key from the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_EXPORT_CRYPTO_SERVICE_KEY + * a1 session ID + * a2 key UID + * a3 physical address of the exported service key object + * a4 size of the exported service key object, max is (88 words + 3 header words) + * a5-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox and status errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * 31:24 -- reserved + * 23:16 -- import/export/removal status error + * 15:11 -- reserved + * 10:0 -- mailbox error + * a2 physical address of the exported service key object + * a3 size of the exported service key object + */ +#define INTEL_SIP_SMC_FUNCID_FCS_EXPORT_CRYPTO_SERVICE_KEY 113 +#define INTEL_SIP_SMC_FCS_EXPORT_CRYPTO_SERVICE_KEY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_EXPORT_CRYPTO_SERVICE_KEY) + +/** + * Request INTEL_SIP_SMC_FCS_REMOVE_CRYPTO_SERVICE_KEY + * Sync call to remove the crypto service kers from the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_REMOVE_CRYPTO_SERVICE_KEY + * a1 session ID + * a2 key UID + * a3-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox and status errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * 31:24 -- reserved + * 23:16 -- import/export/removal status error + * 15:11 -- reserved + * 10:0 -- mailbox error + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_REMOVE_CRYPTO_SERVICE_KEY 114 +#define INTEL_SIP_SMC_FCS_REMOVE_CRYPTO_SERVICE_KEY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_REMOVE_CRYPTO_SERVICE_KEY) + +/** + * Request INTEL_SIP_SMC_FCS_GET_CRYPTO_SERVICE_KEY_INFO + * Sync call to query the crypto service keys on the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_CRYPTO_SERVICE_KEY_INFO + * a1 session ID + * a2 key UID + * a3 physical address of the reponse data + * a4 max size of the response data (36 words with header) + * a3-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox and status errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * 31:24 -- reserved + * 23:16 -- import/export/removal status error + * 15:11 -- reserved + * 10:0 -- mailbox error + * a2 physical address of the reponse data + * a3 size of the response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_CRYPTO_SERVICE_KEY_INFO 115 +#define INTEL_SIP_SMC_FCS_GET_CRYPTO_SERVICE_KEY_INFO \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_CRYPTO_SERVICE_KEY_INFO) + +/** + * Request INTEL_SIP_SMC_FCS_AES_CRYPTO_INIT + * Sync call to initialize AES crypto operation + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_AES_CRYPTO_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 physical address of AES crypto parameter data (include block mode, + * encrypt/decrypt, IV fields + * a5 size of of AES crypto parameter data + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_AES_CRYPTO_INIT 116 +#define INTEL_SIP_SMC_FCS_AES_CRYPTO_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_AES_CRYPTO_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_AES_CRYPTO_UPDATE + * Sync call to decrypt/encrypt a data block + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_AES_CRYPTO_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_AES_CRYPTO_UPDATE 117 +#define INTEL_SIP_SMC_FCS_AES_CRYPTO_UPDATE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_AES_CRYPTO_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_AES_CRYPTO_FINALIZE + * Sync call to decrypt/encrypt a data block + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_AES_CRYPTO_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_AES_CRYPTO_FINALIZE 118 +#define INTEL_SIP_SMC_FCS_AES_CRYPTO_FINALIZE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_AES_CRYPTO_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_GET_DIGEST_INIT + * Sync call to request the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_DIGEST_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 the crypto parameter + * 3:0 SHA opeation mode + * 7:4 digist size + * 63:8 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_INIT 119 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE + * Sync call to request the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_UPDATE 120 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE + * Sync call to request the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_FINALIZE 121 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT + * Sync call to check the integrity and authenticity of a blob by comparing + * the calculated MAC with tagged MAC + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 crypto parameter data + * 3:0 not used + * 7:4 digist size + * 63:8 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_INIT 122 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE + * Sync call to check the integrity and authenticity of a blob by comparing + * the calculated MAC with tagged MAC + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_UPDATE 123 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE + * Sync call to check the integrity and authenticity of a blob by comparing + * the calculated MAC with tagged MAC + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_FINALIZE 124 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_FINALIZE) +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_INIT + * Sync call to sends digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 size of crypto parameter data + * 3:0 ECC algoritim + * 63:4 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_HASH_SIGNING_INIT 125 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_HASH_SIGNING_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_FINALIZE + * Sync call to sends digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_HASH_SIGNING_FINALIZE 127 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNING_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_HASH_SIGNING_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_INIT + * Sync call to digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 crypto parameter data + * 3:0 ECC algorithm + * 63:4 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_INIT 128 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_UPDATE + * Sync call to digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_UPDATE 129 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_UPDATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_FINALIZE + * Sync call to digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_FINALIZE 130 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_INIT + * Sync call to sends digital signature verify request with precalculated hash + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 crypto parameter data + * 3:0 ECC algorithm + * 63:4 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_HASH_SIGNATURE_VERIFY_INIT 131 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_HASH_SIGNATURE_VERIFY_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_FINALIZE + * Sync call to sends digital signature verify request with precalculated hash + * + * a0 INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_HASH_SIGNATURE_VERIFY_FINALIZE 133 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGNATURE_VERIFY_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_HASH_SIGNATURE_VERIFY_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_INIT + * Sync call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 crypto parameter data + * 3:0 ECC algorithm + * 63:4 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_INIT 134 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_UPDATE + * Sync call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source (contain user data) + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 size of user data + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_UPDATE 135 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_UPDATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_FINALIZE + * Sync call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 size of user data + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_FINALIZE 136 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_INIT + * Sync call to send the request to get the public key + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 crypto parameter data + * 3:0 EE algorithm + * 63:4 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_GET_PUBLIC_KEY_INIT 137 +#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_GET_PUBLIC_KEY_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_FINALIZE + * Sync call to send the request to get the public key + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of response data + * a4 size of response data + * a5-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_FCS_ECDSA_GET_PUBLIC_KEY_FINALIZE 139 +#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBLIC_KEY_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_FCS_ECDSA_GET_PUBLIC_KEY_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDH_INIT + * Sync call to send the request on generating a share secret on + * Diffie-Hellman key exchange + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDH_INIT + * a1 session ID + * a2 context ID + * a3 key UID + * a4 size of crypto parameter data + * a5 crypto parameter data + * 3:0 ECC algorithm + * 63:4 not used + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDH_INIT 140 +#define INTEL_SIP_SMC_FCS_ECDH_INIT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDH_INIT) + +/** + * Request INTEL_SIP_SMC_FCS_ECDH_FINALIZE + * Sync call to send the request on generating a share secret on + * Diffie-Hellman key exchange + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDH_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDH_FINALIZE 142 +#define INTEL_SIP_SMC_FCS_ECDH_FINALIZE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDH_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT + * + * Async call used to query the random number generated by the firmware, + * the maxim random number is 4080 bytes. + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT + * a1 session ID + * a2 context ID + * a3 size of the requested random data + * a4-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER_EXT 143 +#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER_EXT) + +/** + * Request INTEL_SIP_SMC_FCS_CRYPTION_EXT + * Sync call for data encryption or data decryption. + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_CRYPTION_EXT + * a1 session ID + * a2 context ID + * a3 cryption operating mode (1 for encryption and 0 for decryption) + * a4 physical address which stores to be encrypted or decrypted data + * a5 size of input data + * a6 physical address which will hold the encrypted or decrypted output data + * a7 size of output data + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of (output) decrypted or encrypted data + * a3 size of (output) decrypted or encrypted data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION_EXT 144 +#define INTEL_SIP_SMC_FCS_CRYPTION_EXT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION_EXT) + +/** + * Request INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_UPDATE + * Sync call to request the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_SMMU_UPDATE 145 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_UPDATE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_SMMU_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_FINALIZE + * Sync call to request the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_SMMU_FINALIZE 146 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_FINALIZE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_DIGEST_SMMU_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_UPDATE + * Sync call to check the integrity and authenticity of a blob by comparing + * the calculated MAC with tagged MAC + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_SMMU_UPDATE 147 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_UPDATE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_SMMU_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_FINALIZE + * Sync call to check the integrity and authenticity of a blob by comparing + * the calculated MAC with tagged MAC + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_SMMU_FINALIZE 148 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_FINALIZE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_MAC_VERIFY_SMMU_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_UPDATE + * Sync call to digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_SMMU_UPDATE 149 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_UPDATE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_SMMU_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_FINALIZE + * Sync call to digital signature signing request on a data blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_SMMU_FINALIZE 150 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNING_SMMU_FINALIZE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECDSA_SHA2_DATA_SIGNING_SMMU_FINALIZE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_UPDATE + * Sync call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_UPDATE + * a1 session ID + * a2 context ID + * a3 physical address of source (contain user data) + * a4 size of source + * a5 physical address of destination + * a6 size of destination + * a7 size of user data + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_UPDATE 151 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_UPDATE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_UPDATE) + +/** + * Request INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_FINALIZE + * Sync call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_FINALIZE + * a1 session ID + * a2 context ID + * a3 physical address of source + * a4 size of source + * a5 physical address of destation + * a6 size of destation + * a7 size of user data + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 mailbox errors if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2 physical address of response data + * a3 size of response data + */ +#define INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_FINALIZE 152 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_FINALIZE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_ECDSA_SHA2_DATA_SIGNATURE_VERIFY_SMMU_FINALIZE) + +/** + * Request INTEL_SIP_SMC_SVC_VERSION + * + * Sync call used to query the SIP SMC API Version + * + * Call register usage: + * a0 INTEL_SIP_SMC_SVC_VERSION + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Major + * a2 Minor + */ +#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512 +#define INTEL_SIP_SMC_SVC_VERSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION) +/** + * Request INTEL_SIP_SMC_HWMON_READTEMP + * Sync call to request temperature + * + * Call register usage: + * a0 Temperature Channel + * a1-a7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Temperature Value + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_HWMON_READTEMP 32 +#define INTEL_SIP_SMC_HWMON_READTEMP \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READTEMP) + +/** + * Request INTEL_SIP_SMC_SEU_ERR_STATUS + * Sync call to get previous Double Bit ECC error information. + * + * Call register usage: + * a0 INTEL_SIP_SMC_SEU_ERR_STATUS + * a1-7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1 error count of response data + * a2 sector address of response data + * a3 error information + */ +#define INTEL_SIP_SMC_FUNCID_SEU_ERR_STATUS 153 +#define INTEL_SIP_SMC_SEU_ERR_STATUS \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SEU_ERR_STATUS) + +/** + * Request INTEL_SIP_SMC_SAFE_INJECT_SEU_ERR + * Sync call to inject SEU Error. + * + * Call register usage: + * a0 INTEL_SIP_SMC_FUNCID_SAFE_INJECT_SEU_ERR + * a1 Number of words + * a2-7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1-a3 Not used + */ +#define INTEL_SIP_SMC_FUNCID_SAFE_INJECT_SEU_ERR 154 +#define INTEL_SIP_SMC_SAFE_INJECT_SEU_ERR \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SAFE_INJECT_SEU_ERR) + +/** + * Request INTEL_SIP_SMC_IO96B_INJECT_ECC_ERR + * Sync call to inject IO96B ECC Error. + * + * Call register usage: + * a0 INTEL_SIP_SMC_FUNCID_IO96B_INJECT_ECC_ERR + * a1 IO96B error syndrome + * a2 I096B mailbox command + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_NOT_SUPPORTED or + * INTEL_SIP_SMC_STATUS_ERROR + * a1-a3 Not used + */ +#define INTEL_SIP_SMC_FUNCID_IO96B_INJECT_ECC_ERR 156 +#define INTEL_SIP_SMC_IO96B_INJECT_ECC_ERR \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_IO96B_INJECT_ECC_ERR) + +/** + * Request INTEL_SIP_SMC_HWMON_READVOLT + * Sync call to request voltage + * + * Call register usage: + * a0 Voltage Channel + * a1-a7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Voltage Value + * a2-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_HWMON_READVOLT 33 +#define INTEL_SIP_SMC_HWMON_READVOLT \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READVOLT) + +/** + * Request INTEL_SIP_SMC_SDM_REMAPPER_CONFIG +* + * Sync call to configure SDM remapper + * + * Call register usage: + * a0: INTEL_SIP_SMC_SDM_REMAPPER_CONFIG. + * a1: Remapper bypass configuration + * a2-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. + * a1-3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_SDM_REMAPPER_CONFIG 513 +#define INTEL_SIP_SMC_SDM_REMAPPER_CONFIG \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SDM_REMAPPER_CONFIG) + +/** + * Request INTEL_SIP_SMC_ASYNC_HWMON_READTEMP + * Async call to request temperature + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FUNCID_HWMON_READTEMP + * a1 transaction job id + * a2 Temperature Channel + * a3-a17 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNCID_HWMON_READTEMP (0xE8) +#define INTEL_SIP_SMC_ASYNC_HWMON_READTEMP \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNCID_HWMON_READTEMP) + +/** + * Request INTEL_SIP_SMC_ASYNC_HWMON_READVOLT + * Async call to request voltage + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_HWMON_READVOLT + * a1 transaction job id + * a2 Voltage Channel + * a3-a17 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNCID_HWMON_READVOLT (0xE9) +#define INTEL_SIP_SMC_ASYNC_HWMON_READVOLT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNCID_HWMON_READVOLT) + +/** + * Request INTEL_SIP_SMC_ASYNC_POLL + * Async call used by service driver at EL1 to query mailbox response from SDM. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_POLL + * a1 transaction job id + * a2-17 will be used to return the response data + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1-17 will contain the response values from mailbox for the previous send transaction + * Or + * a0 INTEL_SIP_SMC_STATUS_NO_RESPONSE + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL (0xC8) +#define INTEL_SIP_SMC_ASYNC_POLL \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL) + +/** + * Request INTEL_SIP_SMC_ASYNC_POLL_ON_IRQ + * Async call used by service driver at EL1 to read response from SDM mailbox and + * to retrieve the transaction id's of the read response's. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_POLL_ON_IRQ + * a1 transaction job id + * a2-7 will be used to return the response data + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1-a4 will contain bitmap of available responses's transaction id as set bit position. + * a5-17 not used + * Or + * a0 INTEL_SIP_SMC_STATUS_NO_RESPONSE + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_IRQ_POLL (0xC9) +#define INTEL_SIP_SMC_ASYNC_POLL_ON_IRQ \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_IRQ_POLL) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_OPEN_CS_SESSION + * Async call to open and establish a crypto service session with firmware + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_OPEN_CRYPTO_SERVICE_SESSION + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_OPEN_CS_SESSION (0x13A) +#define INTEL_SIP_SMC_ASYNC_FCS_OPEN_CS_SESSION \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_OPEN_CS_SESSION) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_CLOSE_CS_SESSION + * Async call to close a service session + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_CLOSE_CS_SESSION + * a1 transaction job id + * a2 session ID + * a3-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CLOSE_CS_SESSION (0x13B) +#define INTEL_SIP_SMC_ASYNC_FCS_CLOSE_CS_SESSION \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CLOSE_CS_SESSION) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_IMPORT_CS_KEY + * Async call to import crypto service key to the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_IMPORT_CS_KEY + * a1 transaction job id + * a2 physical address of the service key object with header + * a3 size of the service key object + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_IMPORT_CS_KEY (0x13C) +#define INTEL_SIP_SMC_ASYNC_FCS_IMPORT_CS_KEY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_IMPORT_CS_KEY) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_EXPORT_CS_KEY + * Async call to export crypto service key from the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_EXPORT_CS_KEY + * a1 transaction job id + * a2 session ID + * a3 key UID + * a4 physical address of the exported service key object + * a5 size of the exported service key object + * a6-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_EXPORT_CS_KEY (0x13D) +#define INTEL_SIP_SMC_ASYNC_FCS_EXPORT_CS_KEY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_EXPORT_CS_KEY) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_REMOVE_CS_KEY + * Async call to remove the crypto service kers from the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_REMOVE_CS_KEY + * a1 transaction job id + * a2 session ID + * a3 key UID + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_REMOVE_CS_KEY (0x13E) +#define INTEL_SIP_SMC_ASYNC_FCS_REMOVE_CS_KEY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_REMOVE_CS_KEY) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_GET_CS_KEY_INFO + * Sync call to query the crypto service keys on the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_GET_CS_KEY_INFO + * a1 transaction job id + * a2 session ID + * a3 key UID + * a4 physical address of the response data + * a5 max size of the response data (36 words with header) + * a6-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_CS_KEY_INFO (0x13F) +#define INTEL_SIP_SMC_ASYNC_FCS_GET_CS_KEY_INFO \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_CS_KEY_INFO) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_RANDOM_NUMBER_EXT + * + * Async call used to query the random number generated by the firmware, + * the maxim random number is 4080 bytes. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_RANDOM_NUMBER_EXT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 buffer pointer for random data + * a5 size of the requested random data + * a6-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_RANDOM_NUMBER_EXT (0x12D) +#define INTEL_SIP_SMC_ASYNC_FCS_RANDOM_NUMBER_EXT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_RANDOM_NUMBER_EXT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_GET_PROVISION_DATA + * Async call to dump all the fuses and key hashes + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_GET_PROVISION_DATA + * a1 transaction job id + * a2 buffer pointer to store the provision data + * a3 size of the buffer + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + * + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_PROVISION_DATA (0x132) +#define INTEL_SIP_SMC_ASYNC_FCS_GET_PROVISION_DATA \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_PROVISION_DATA) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_SEND_CERTIFICATE + * Async call to send a signed certificate + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_SEND_CERTIFICATE + * a1 transaction job id + * a2 the physical address of CERTIFICATE block + * a3 size of data block + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_SEND_CERTIFICATE (0x131) +#define INTEL_SIP_SMC_ASYNC_FCS_SEND_CERTIFICATE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_SEND_CERTIFICATE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_CNTR_SET_PREAUTH + * Async call to update counter value w/o signed certificate + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_CNTR_SET_PREAUTH + * a1 transaction job id + * a2 counter type + * a3 counter value + * a4 test bit + * a5-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CNTR_SET_PREAUTH (0x133) +#define INTEL_SIP_SMC_ASYNC_FCS_CNTR_SET_PREAUTH \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CNTR_SET_PREAUTH) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_HKDF_REQUEST + * Sync call to send the request on performing HKDF extract or expand with + * input key and data. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_HKDF_REQUEST + * a1 transaction job id in lower half,smmu addr offset in upper half + * a2 session ID + * a3 step type + * a4 MAC mode + * a5 physical address of source + * a6 key_id + * a7 output key object length + * a8-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FCS_FUNC_ID_HKDF_REQUEST (0x166) +#define INTEL_SIP_SMC_ASYNC_FCS_HKDF_REQUEST \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FCS_FUNC_ID_HKDF_REQUEST) +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_CREATE_CRYPTO_SERVICE_KEY + * Async call to create crypto service key to the device + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_CREATE_CRYPTO_SERVICE_KEY + * a1 transaction job id + * a2 physical address of the service key object with header + * a3 size of the service key object + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CREATE_CRYPTO_SERVICE_KEY (0x167) +#define INTEL_SIP_SMC_ASYNC_FCS_CREATE_CRYPTO_SERVICE_KEY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CREATE_CRYPTO_SERVICE_KEY) + +/** + * Request INTEL_SIP_SMC_ASYNC_GET_IDCODE + * Async call to retrieve the JTAG IDCODE + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_GET_IDCODE + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_GET_IDCODE (0xD3) +#define INTEL_SIP_SMC_ASYNC_GET_IDCODE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_GET_IDCODE) + +/** + * Request INTEL_SIP_SMC_ASYNC_GET_DEVICE_IDENTITY + * Async call to retrieve device identity Hashed Message + * Authentication Code + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_GET_DEVICE_IDENTITY + * a1 transaction job id + * a2 physical address of response buffer + * a3 size of response buffer + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_GET_DEVICE_IDENTITY (0xD2) +#define INTEL_SIP_SMC_ASYNC_GET_DEVICE_IDENTITY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_GET_DEVICE_IDENTITY) + +/** + * Request INTEL_SIP_SMC_ASYNC_QSPI_OPEN + * Async call to open QSPI device through SDM for client + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_OPEN + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_OPEN (0xCC) +#define INTEL_SIP_SMC_ASYNC_QSPI_OPEN \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_OPEN) + +/** + * Request INTEL_SIP_SMC_ASYNC_QSPI_CLOSE + * Async call to close QSPI device through SDM for client. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_QSPI_CLOSE + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_CLOSE (0xCD) +#define INTEL_SIP_SMC_ASYNC_QSPI_CLOSE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_CLOSE) + +/** + * Request INTEL_SIP_SMC_ASYNC_QSPI_SET_CS + * Async call to close QSPI device through SDM for client. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_QSPI_SET_CS + * a1 transaction job id + * a2 QSPI CS number + * a3 Combined address mode + * a4 External decoder mode + * a5-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_SET_CS (0xCE) +#define INTEL_SIP_SMC_ASYNC_QSPI_SET_CS \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_SET_CS) + +/** + * Request INTEL_SIP_SMC_ASYNC_QSPI_ERASE + * Async call to erase QSPI device through SDM for client. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_QSPI_ERASE + * a1 transaction job id + * a2 erase address + * a3 erase size + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_ERASE (0xCF) +#define INTEL_SIP_SMC_ASYNC_QSPI_ERASE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_ERASE) + +/** + * Request INTEL_SIP_SMC_ASYNC_QSPI_WRITE + * Async call to write to QSPI device through SDM for client. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_QSPI_WRITE + * a1 transaction job id + * a2 physical address of the payload buffer, payloadbuffer includes the write + * address, size and data to be written. + * a3 payload size + * a4-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_WRITE (0xD0) +#define INTEL_SIP_SMC_ASYNC_QSPI_WRITE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_WRITE) + +/** + * Request INTEL_SIP_SMC_ASYNC_QSPI_READ + * Async call to read from QSPI device through SDM for client. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_QSPI_READ + * a1 transaction job id + * a2 read address + * a3 physical address of the response buffer + * a4 read size + * a5-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_READ (0xD1) +#define INTEL_SIP_SMC_ASYNC_QSPI_READ \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_QSPI_READ) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_MCTP + * Async call to send MCTP message + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_MCTP + * a1 transaction job id + * a2 payload address + * a3 payload size + * a4 response address + * a5 response size + * a6-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MCTP (0x165) +#define INTEL_SIP_SMC_ASYNC_FCS_MCTP \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MCTP) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_INIT + * Async call to initialize the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 the crypto parameter + * 3:0 SHA opeation mode + * 7:4 digist size + * 63:8 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_DIGEST_INIT (0x143) +#define INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_DIGEST_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_UPDATE + * Async call to update the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_UPDATE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 smmu remapped address of source + * a9-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_DIGEST_UPDATE (0x144) +#define INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_UPDATE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_DIGEST_UPDATE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_FINALIZE + * Async call to finalize the SHA-2 hash digest on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 smmu remapped address of source + * a9-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_DIGEST_FINALIZE (0x145) +#define INTEL_SIP_SMC_ASYNC_FCS_GET_DIGEST_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_DIGEST_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_INIT + * Async call to initialize the MAC verification on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter + * 3:0 MAC opeation mode + * 7:4 MAC size + * 63:8 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MAC_VERIFY_INIT (0x148) +#define INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MAC_VERIFY_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_UPDATE + * Async call to update the MAC verification on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_UPDATE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 size of user data + * a9 smmu remapped address of source + * a10-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MAC_VERIFY_UPDATE (0x149) +#define INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_UPDATE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MAC_VERIFY_UPDATE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_FINALIZE + * Async call to finalize the MAC verification on a blob + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 size of user data + * a9 smmu remapped address of source + * a10-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MAC_VERIFY_FINALIZE (0x14A) +#define INTEL_SIP_SMC_ASYNC_FCS_MAC_VERIFY_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_MAC_VERIFY_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_INIT + * Async call to initialize AES crypto operation + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 physical address of AES crypto parameter data (include block mode, + * encrypt/decrypt, IV fields + * a6 size of AES crypto parameter data + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_AES_CRYPT_INIT (0x140) +#define INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_AES_CRYPT_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_UPDATE + * Async call to update AES crypto operation + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_UPDATE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 size of additionbal authenticated data + * a9 smmu remapped address of source + * a10 smmu remapped address of destination + * a11-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_AES_CRYPT_UPDATE (0x141) +#define INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_UPDATE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_AES_CRYPT_UPDATE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_FINALIZE + * Async call to finalize AES crypto operation + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 size of additionbal authenticated data + * a9 smmu remapped address of source + * a10 smmu remapped address of destination + * a11-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_AES_CRYPT_FINALIZE (0x142) +#define INTEL_SIP_SMC_ASYNC_FCS_AES_CRYPT_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_AES_CRYPT_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_CHIP_ID + * Async call to retrieve the chip ID + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_CHIP_ID + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CHIP_ID (0x135) +#define INTEL_SIP_SMC_ASYNC_FCS_CHIP_ID \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CHIP_ID) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_GET_ATTESTATION_CERT + * Async call to get the attestation certificate + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_GET_ATTESTATION_CERT + * a1 transaction job id + * a2 type of certificate request + * a3 physical address of response buffer + * a4 size of response buffer + * a5-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_ATTESTATION_CERT (0x138) +#define INTEL_SIP_SMC_ASYNC_FCS_GET_ATTESTATION_CERT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_GET_ATTESTATION_CERT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_CREATE_CERT_ON_RELOAD + * Async call to create certificate on reload + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_CREATE_CERT_ON_RELOAD + * a1 transaction job id + * a2 type of certificat request + * a3-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CREATE_CERT_ON_RELOAD (0x139) +#define INTEL_SIP_SMC_ASYNC_FCS_CREATE_CERT_ON_RELOAD \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CREATE_CERT_ON_RELOAD) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_CRYPTION_EXT + * Async call to perform encryption/decryption + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_CRYPTION_EXT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 cryption operating mode (1 for encryption and 0 for decryption) + * a5 physical address of source + * a6 size of source + * a7 physical address of destination + * a8 size of destination + * a9 sdos ownership + * a10 smmu remapped address of source + * a11 smmu remapped address of destination + * a12-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CRYPTION_EXT (0x12F) +#define INTEL_SIP_SMC_ASYNC_FCS_CRYPTION_EXT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_CRYPTION_EXT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_INIT + * Async call to send the request to get the public key + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter data + * 3:0 EE algorithm + * 63:4 not used + * a6-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_GET_PUBKEY_INIT (0x160) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_GET_PUBKEY_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_FINALIZE + * Async call to send the request to get the public key + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of response data + * a5 size of response data + * a6-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_GET_PUBKEY_FINALIZE (0x161) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_GET_PUBKEY_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_GET_PUBKEY_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_INIT + * Async call to send the request on generating a share secret on + * Diffie-Hellman key exchange + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter data + * 3:0 EE algorithm + * 63:4 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDH_REQUEST_INIT (0x162) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDH_REQUEST_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_FINALIZE + * Async call to send the request on generating a share secret on + * Diffie-Hellman key exchange + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDH_REQUEST_FINALIZE (0x163) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDH_REQUEST_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDH_REQUEST_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_INIT + * Async call to send digital signature verify request with precalculated hash + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter data + * 3:0 EE algorithm + * 63:4 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIG_VERIFY_INIT (0x154) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIG_VERIFY_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE + * Async call to send final stage of digital signature verify request + * with precalculated hash + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE (0x155) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT + * Async call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter data + * 3:0 EE algorithm + * 63:4 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT (0x156) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE + * Async call to send digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source (contain user data) + * a5 size of source + * a6 physical address of destination (contain signature) + * a7 size of destination + * a8 size of user data + * a9 smmu remapped address of source + * a10-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE (0x157) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE + * Async call to send final stage of digital signature verify request + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source (contain user data) + * a5 size of source + * a6 physical address of destination (contain signature) + * a7 size of destination + * a8 size of user data + * a9 smmu remapped address of source + * a10-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINLZE (0x158) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINLZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_INIT + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter data + * 3:0 ECC algorithm + * 63:4 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIGN_INIT (0x14D) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIGN_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_FINALIZE + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIGN_FINALIZE (0x14E) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_HASH_SIGN_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_HASH_SIGN_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_INIT + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_INIT + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 key UID + * a5 size of crypto parameter data + * a6 crypto parameter data + * 3:0 ECC algorithm + * 63:4 not used + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ + #define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIGN_INIT (0x14F) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_INIT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIGN_INIT) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 smmu remapped address of source + * a9-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE (0x150) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE) + +/** + * Request INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE + * + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE + * a1 transaction job id + * a2 session ID + * a3 context ID + * a4 physical address of source + * a5 size of source + * a6 physical address of destination + * a7 size of destination + * a8 smmu remapped address of source + * a9-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE (0x151) +#define INTEL_SIP_SMC_ASYNC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE) + +/** + * Request INTEL_SIP_SMC_ASYNC_RSU_GET_SPT + * Async call to get RSU SPT from SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_SPT + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT (0xEA) +#define INTEL_SIP_SMC_ASYNC_RSU_GET_SPT \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT) + +/** + * Request INTEL_SIP_SMC_ASYNC_MBOX_SEND + * Async call to send a generic mailbox command to the SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_MBOX_SEND + * a1 transaction job id + * a2 mailbox command + * a3 physical address of command buffer + * a4 size of message buffer + * a5 physical address of response buffer + * a6 size of response buffer + * a7-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_MBOX_SEND (0xEE) +#define INTEL_SIP_SMC_ASYNC_MBOX_SEND \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_MBOX_SEND) + +/** + * Request INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS + * Async call to get RSU error status from SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS + * a1 transaction job id + * a2-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS (0xEB) +#define INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS) + +/** + * Request INTEL_SIP_SMC_ASYNC_RSU_NOTIFY + * Async call to send NOTIFY value to SDM. + * Call register usage: + * a0 INTEL_SIP_SMC_ASYNC_RSU_NOTIFY + * a1 transaction job id + * a2 notify value + * a3-a17 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED + * or INTEL_SIP_SMC_STATUS_BUSY + * a1-a17 not used + */ +#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY (0xEC) +#define INTEL_SIP_SMC_ASYNC_RSU_NOTIFY \ + INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY) #endif diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 60ed82112680e..b303500a97db2 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2017-2024, Intel Corporation */ #ifndef __STRATIX10_SVC_CLIENT_H @@ -11,12 +11,14 @@ * * fpga: for FPGA configuration * rsu: for remote status update + * hwmon: for hardware monitoring (volatge and temperature) */ #define SVC_CLIENT_FPGA "fpga" #define SVC_CLIENT_RSU "rsu" #define SVC_CLIENT_FCS "fcs" +#define SVC_CLIENT_HWMON "hwmon" -/* +/** * Status of the sent command, in bit number * * SVC_STATUS_OK: @@ -51,25 +53,31 @@ #define SVC_STATUS_ERROR 5 #define SVC_STATUS_NO_SUPPORT 6 #define SVC_STATUS_INVALID_PARAM 7 - -/* +#define SVC_STATUS_NO_RESPONSE 8 +/** * Flag bit for COMMAND_RECONFIG * * COMMAND_RECONFIG_FLAG_PARTIAL: * Set to FPGA configuration type (full or partial). + * + * COMMAND_AUTHENTICATE_BITSTREAM: + * Set for bitstream authentication, which makes sure a signed bitstream + * has valid signatures before committing it to device. */ #define COMMAND_RECONFIG_FLAG_PARTIAL 0 +#define COMMAND_AUTHENTICATE_BITSTREAM 1 /* * Timeout settings for service clients: * timeout value used in Stratix10 FPGA manager driver. * timeout value used in RSU driver */ -#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 -#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 -#define SVC_RSU_REQUEST_TIMEOUT_MS 300 +#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 5000 +#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 5000 +#define SVC_RSU_REQUEST_TIMEOUT_MS 2000 #define SVC_FCS_REQUEST_TIMEOUT_MS 2000 #define SVC_COMPLETED_TIMEOUT_MS 30000 +#define SVC_HWMON_REQUEST_TIMEOUT_MS 2000 struct stratix10_svc_chan; @@ -124,6 +132,12 @@ struct stratix10_svc_chan; * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status * return status is SVC_STATUS_OK or SVC_STATUS_ERROR * + * @COMMAND_RSU_GET_DEVICE_INFO: query firmware for QSPI device info + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_RSU_GET_SPT_TABLE: query firmware for SPT table + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware, * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM * @@ -140,7 +154,141 @@ struct stratix10_svc_chan; * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR * * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status - * is SVC_STATUS_OK, SVC_STATUS_ERROR + * is SVC_STATUS_OK, or SVC_STATUS_ERROR + * + * @COMMAND_FCS_COUNTER_SET_PREAUTHORIZED: update the counter value for + * the selected counter without the signed certificate, return status is + * SVC_STATUS_OK, or SVC_STATUS_ERROR + * + * @COMMAND_FCS_PSGSIGMA_TEARDOWN: tear down all previous black key + * provision sessions and delete keys assicated with those sessions, + * return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR + * + * @COMMAND_FCS_GET_CHIP_ID: get the device's chip ID, return status is + * SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR + * + * @COMMAND_FCS_ATTESTATION_SUBKEY: get device's attestation subkey, + * return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR + * + * @COMMAND_FCS_ATTESTATION_MEASUREMENTS: to get device's attestation + * measurements, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR + * + * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete, + * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY + * + * @COMMAND_FCS_ATTESTATION_CERTIFICATE: get FPGA attestation certificate, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD: reload FPGA attestation + * certificate, return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_GET_ROM_PATCH_SHA384: read the ROM patch SHA384 value, + * return status is SVC_STATUS_OK, or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_OPEN_SESSION: open the crypto service session(s), + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_CLOSE_SESSION: close the crypto service session(s), + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_IMPORT_KEY: import the crypto service key object, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_EXPORT_KEY: export the crypto service key object, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_REMOVE_KEY: remove the crypto service key object + * from the device, return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_GET_KEY_INFO: get the crypto service key object + * info, return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_CREATE_KEY: create the crypto service key object, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_AES_CRYPT: sends request to encrypt or decrypt a + * data block, return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_GET_DIGEST (INIT and FINALIZE): request the SHA-2 + * hash digest on a data block, return status is SVC_STATUS_OK or + * SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_MAC_VERIFY (INIT and FINALIZE): check the integrity + * and authenticity of a blob, return status is SVC_STATUS_OK or + * SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING (INIT and FINALIZE): send + * digital signature signing request on a data blob, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING (INIT and FINALIZE): send + * SHA2 digital signature signing request on a data blob, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY (INIT and FINALIZE): send + * digital signature verify request with precalculated hash, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY (INIT and FINALIZE): send digital + * signature verify request, return status is SVC_STATUS_OK or + * SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY (INIT and FINALIZE): send the + * request to get the public key, return status is SVC_STATUS_OK or + * SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_ECDH_REQUEST (INIT and FINALIZE): send the request + * on generating a share secret on Diffie-Hellman key exchange, return + * status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_CRYPTO_HKDF_REQUEST: performs HKDF extract or expand with + * input key and data, the output key will be placed in key vault, return + * status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_RANDOM_NUMBER_GEN_EXT: extend random number generation, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_SDOS_DATA_EXT: extend SDOS data encryption & decryption, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version, + * return status is SVC_STATUS_OK + * + * @COMMAND_FCS_CRYPTO_GET_DEVICE_IDENTITY: send the request to get the + * device identity, return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_MCTP_SEND: send the MCTP message, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_GET_IDCODE: get the device's IDCODE, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_QSPI_OPEN: open the QSPI proxy, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_QSPI_CLOSE: close the QSPI proxy, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_QSPI_SET_CS: set the QSPI proxy chip select, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_QSPI_READ: read from the QSPI proxy, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_QSPI_WRITE: write to the QSPI proxy, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_QSPI_ERASE: erase the QSPI proxy, return status is + * SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_SMC_ATF_BUILD_VER: Non-mailbox SMC ATF Build Version, + * return status is SVC_STATUS_OK + * + * @ + * */ enum stratix10_svc_command_code { /* for FPGA */ @@ -157,7 +305,8 @@ enum stratix10_svc_command_code { COMMAND_RSU_MAX_RETRY, COMMAND_RSU_DCMF_VERSION, COMMAND_RSU_DCMF_STATUS, - COMMAND_FIRMWARE_VERSION, + COMMAND_RSU_GET_DEVICE_INFO, + COMMAND_RSU_GET_SPT_TABLE, /* for FCS */ COMMAND_FCS_REQUEST_SERVICE = 20, COMMAND_FCS_SEND_CERTIFICATE, @@ -165,12 +314,84 @@ enum stratix10_svc_command_code { COMMAND_FCS_DATA_ENCRYPTION, COMMAND_FCS_DATA_DECRYPTION, COMMAND_FCS_RANDOM_NUMBER_GEN, + COMMAND_FCS_COUNTER_SET_PREAUTHORIZED, + COMMAND_FCS_GET_ROM_PATCH_SHA384, + /* for Attestation */ + COMMAND_FCS_PSGSIGMA_TEARDOWN = 30, + COMMAND_FCS_GET_CHIP_ID, + COMMAND_FCS_ATTESTATION_SUBKEY, + COMMAND_FCS_ATTESTATION_MEASUREMENTS, + COMMAND_FCS_ATTESTATION_CERTIFICATE, + COMMAND_FCS_ATTESTATION_CERTIFICATE_RELOAD, /* for general status poll */ COMMAND_POLL_SERVICE_STATUS = 40, + COMMAND_FIRMWARE_VERSION, + COMMAND_POLL_SERVICE_STATUS_ASYNC, + /* for crypto service */ + COMMAND_FCS_CRYPTO_OPEN_SESSION = 50, + COMMAND_FCS_CRYPTO_CLOSE_SESSION, + COMMAND_FCS_CRYPTO_IMPORT_KEY, + COMMAND_FCS_CRYPTO_EXPORT_KEY, + COMMAND_FCS_CRYPTO_REMOVE_KEY, + COMMAND_FCS_CRYPTO_GET_KEY_INFO, + COMMAND_FCS_CRYPTO_CREATE_KEY, + COMMAND_FCS_CRYPTO_AES_CRYPT_INIT, + COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE, + COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE, + COMMAND_FCS_CRYPTO_GET_DIGEST_INIT, + COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE, + COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE, + COMMAND_FCS_CRYPTO_MAC_VERIFY_INIT, + COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE, + COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE, + COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_INIT, + COMMAND_FCS_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE, + COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_INIT, + COMMAND_FCS_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_INIT, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE, + COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT, + COMMAND_FCS_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE, + COMMAND_FCS_CRYPTO_ECDH_REQUEST_INIT, + COMMAND_FCS_CRYPTO_ECDH_REQUEST_FINALIZE, + COMMAND_FCS_CRYPTO_HKDF_REQUEST, + COMMAND_FCS_RANDOM_NUMBER_GEN_EXT, + COMMAND_FCS_SDOS_DATA_EXT, + COMMAND_WRITE_TO_SECURE_REG, + COMMAND_READ_SECURE_REG, + COMMAND_FCS_CRYPTO_AES_CRYPT_UPDATE_SMMU, + COMMAND_FCS_CRYPTO_AES_CRYPT_FINALIZE_SMMU, + COMMAND_FCS_CRYPTO_GET_DIGEST_UPDATE_SMMU, + COMMAND_FCS_CRYPTO_GET_DIGEST_FINALIZE_SMMU, + COMMAND_FCS_CRYPTO_MAC_VERIFY_UPDATE_SMMU, + COMMAND_FCS_CRYPTO_MAC_VERIFY_FINALIZE_SMMU, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE_SMMU, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE_SMMU, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_UPDATE_SMMU, + COMMAND_FCS_CRYPTO_ECDSA_SHA2_VERIFY_FINALIZE_SMMU, + COMMAND_FCS_CRYPTO_GET_DEVICE_IDENTITY, + COMMAND_FCS_MCTP_SEND, /* for generic mailbox send command */ COMMAND_MBOX_SEND_CMD = 100, /* Non-mailbox SMC Call */ COMMAND_SMC_SVC_VERSION = 200, + /* for HWMON */ + COMMAND_HWMON_READTEMP, + COMMAND_HWMON_READVOLT, + /*for Device identity*/ + COMMAND_GET_IDCODE, + /*for QSPI proxy commands via SDM*/ + COMMAND_QSPI_OPEN, + COMMAND_QSPI_CLOSE, + COMMAND_QSPI_SET_CS, + COMMAND_QSPI_READ, + COMMAND_QSPI_WRITE, + COMMAND_QSPI_ERASE, + COMMAND_SMC_ATF_BUILD_VER }; /** @@ -188,7 +409,7 @@ struct stratix10_svc_client_msg { void *payload_output; size_t payload_length_output; enum stratix10_svc_command_code command; - u64 arg[3]; + u64 arg[6]; }; /** @@ -205,12 +426,14 @@ struct stratix10_svc_command_config_type { * @kaddr1: address of 1st completed data block * @kaddr2: address of 2nd completed data block * @kaddr3: address of 3rd completed data block + * @kaddr4: address of 4th completed data block */ struct stratix10_svc_cb_data { u32 status; void *kaddr1; void *kaddr2; void *kaddr3; + void *kaddr4; }; /** @@ -284,5 +507,93 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg); * request process. */ void stratix10_svc_done(struct stratix10_svc_chan *chan); + +/** + * @typedef async_callback_t + * @brief A type definition for an asynchronous callback function. + * + * This type defines a function pointer for an asynchronous callback. + * The callback function takes a single argument, which is a pointer to + * user-defined data. + * + * @param cb_arg A pointer to user-defined data passed to the callback function. + */ +typedef void (*async_callback_t)(void *cb_arg); + +/** + * stratix10_svc_add_async_client - Add an asynchronous client to a Stratix 10 + * service channel. + * @chan: Pointer to the Stratix 10 service channel structure. + * @use_unique_clientid: Boolean flag indicating whether to use a unique client ID. + * + * This function registers an asynchronous client with the specified Stratix 10 + * service channel. If the use_unique_clientid flag is set to true, a unique client + * ID will be assigned to the client. + * + * Return: 0 on success, or a negative error code on failure: + * -EINVAL if the channel is NULL or the async controller is not initialized. + * -EALREADY if the async channel is already allocated. + * -ENOMEM if memory allocation fails. + * Other negative values if ID allocation fails + */ +int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, bool use_unique_clientid); + +/** + * stratix10_svc_remove_async_client - Remove an asynchronous client from the Stratix 10 + * service channel. + * @chan: Pointer to the Stratix 10 service channel structure. + * + * This function removes an asynchronous client from the specified Stratix 10 service channel. + * It is typically used to clean up and release resources associated with the client. + * + * Return: 0 on success, -EINVAL if the channel or asynchronous channel is invalid. + */ +int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan); + +/** + * stratix10_svc_async_send - Send an asynchronous message to the SDM mailbox + * in EL3 secure firmware. + * @chan: Pointer to the service channel structure. + * @msg: Pointer to the message to be sent. + * @handler: Pointer to the handler object used by caller to track the transaction. + * @cb: Callback function to be called upon completion. + * @cb_arg: Argument to be passed to the callback function. + * + * This function sends a message asynchronously to the SDM mailbox in EL3 secure firmware. + * and registers a callback function to be invoked when the operation completes. + * + * Return: 0 on success,and negative error codes on failure. + */ +int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, void **handler, + async_callback_t cb, void *cb_arg); + +/** + * stratix10_svc_async_poll - Polls the status of an asynchronous service request. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being polled. + * @data: Pointer to the callback data structure to be filled with the result. + * + * This function checks the status of an asynchronous service request + * and fills the provided callback data structure with the result. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid or if the + * async controller is not initialized, -EAGAIN if the transaction is + * still in progress, or other negative error codes on failure. + */ +int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, void *tx_handle, + struct stratix10_svc_cb_data *data); + +/** + * stratix10_svc_async_done - Complete an asynchronous transaction + * @chan: Pointer to the service channel structure + * @tx_handle: Pointer to the transaction handle + * + * This function completes an asynchronous transaction by removing the + * transaction from the hash table and deallocating the associated resources. + * + * Return: 0 on success, -EINVAL on invalid input or errors. + */ +int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle); + #endif diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 0d4fe068f3d8a..d7ff2de060e7b 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -71,12 +71,15 @@ enum fpga_mgr_states { * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first * * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed + * + * %FPGA_MGR_BITSTREAM_AUTHENTICATE: do FPGA bitstream authentication only */ #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) #define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2) #define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3) #define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4) +#define FPGA_MGR_BITSTREAM_AUTHENTICATE BIT(5) /** * struct fpga_image_info - information specific to an FPGA image @@ -213,6 +216,9 @@ struct fpga_manager { const struct fpga_manager_ops *mops; struct module *mops_owner; void *priv; +#if IS_ENABLED(CONFIG_FPGA_MGR_DEBUG_FS) + void *debugfs; +#endif }; #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 6e5328c6c6afd..e2fb64af59aaf 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -254,6 +254,7 @@ struct i3c_device { #define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000 #define I3C_BUS_I2C_FM_SCL_RATE 400000 #define I3C_BUS_TLOW_OD_MIN_NS 200 +#define I3C_BUS_THIGH_INIT_OD_MIN_NS 200 /** * enum i3c_bus_mode - I3C bus mode diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h index d616da4b3c4c2..c64376be5bcec 100644 --- a/include/linux/mfd/altera-a10sr.h +++ b/include/linux/mfd/altera-a10sr.h @@ -49,13 +49,71 @@ #define ALTR_A10SR_IN_VALID_RANGE_HI 15 #define ALTR_A10SR_PWR_GOOD1_REG 0x08 /* Power Good1 Read */ +/* Power Good #1 Register Bit Definitions */ +#define ALTR_A10SR_PG1_OP_FLAG_SHIFT 7 /* Power On Complete */ +#define ALTR_A10SR_PG1_1V8_SHIFT 6 /* 1.8V Power Good */ +#define ALTR_A10SR_PG1_2V5_SHIFT 5 /* 2.5V Power Good */ +#define ALTR_A10SR_PG1_3V3_SHIFT 4 /* 3.3V Power Good */ +#define ALTR_A10SR_PG1_5V0_SHIFT 3 /* 5.0V Power Good */ +#define ALTR_A10SR_PG1_0V9_SHIFT 2 /* 0.9V Power Good */ +#define ALTR_A10SR_PG1_0V95_SHIFT 1 /* 0.95V Power Good */ +#define ALTR_A10SR_PG1_1V0_SHIFT 0 /* 1.0V Power Good */ + #define ALTR_A10SR_PWR_GOOD2_REG 0x0A /* Power Good2 Read */ +/* Power Good #2 Register Bit Definitions */ +#define ALTR_A10SR_PG2_HPS_SHIFT 7 /* HPS Power Good */ +#define ALTR_A10SR_PG2_HL_HPS_SHIFT 6 /* HILOHPS_VDD Power Good */ +#define ALTR_A10SR_PG2_HL_VDD_SHIFT 5 /* HILO VDD Power Good */ +#define ALTR_A10SR_PG2_HL_VDDQ_SHIFT 4 /* HILO VDDQ Power Good */ +#define ALTR_A10SR_PG2_FMCAVADJ_SHIFT 3 /* FMCA VADJ Power Good */ +#define ALTR_A10SR_PG2_FMCBVADJ_SHIFT 2 /* FMCB VADJ Power Good */ +#define ALTR_A10SR_PG2_FAC2MP_SHIFT 1 /* FAC2MP Power Good */ +#define ALTR_A10SR_PG2_FBC2MP_SHIFT 0 /* FBC2MP Power Good */ + #define ALTR_A10SR_PWR_GOOD3_REG 0x0C /* Power Good3 Read */ +/* Power Good #3 Register Bit Definitions */ +#define ALTR_A10SR_PG3_FAM2C_SHIFT 7 /* FAM2C Power Good */ +#define ALTR_A10SR_PG3_10V_FAIL_SHIFT 6 /* 10V Fail n */ +#define ALTR_A10SR_PG3_BF_PR_SHIFT 5 /* BF Present n */ +#define ALTR_A10SR_PG3_FILE_PR_SHIFT 4 /* File Present n */ +#define ALTR_A10SR_PG3_FMCA_PR_SHIFT 3 /* FMCA Present n */ +#define ALTR_A10SR_PG3_FMCB_PR_SHIFT 2 /* FMCB Present n */ +#define ALTR_A10SR_PG3_PCIE_PR_SHIFT 1 /* PCIE Present n */ +#define ALTR_A10SR_PG3_PCIE_WAKE_SHIFT 0 /* PCIe Wake N */ + #define ALTR_A10SR_FMCAB_REG 0x0E /* FMCA/B & PCIe Pwr Enable */ +/* FMCA/B & PCIe Power Bit Definitions */ +#define ALTR_A10SR_PCIE_EN_SHIFT 7 /* PCIe Pwr Enable */ +#define ALTR_A10SR_PCIE_AUXEN_SHIFT 6 /* PCIe Aux Pwr Enable */ +#define ALTR_A10SR_FMCA_EN_SHIFT 5 /* FMCA Pwr Enable */ +#define ALTR_A10SR_FMCA_AUXEN_SHIFT 4 /* FMCA Aux Pwr Enable */ +#define ALTR_A10SR_FMCB_EN_SHIFT 3 /* FMCB Pwr Enable */ +#define ALTR_A10SR_FMCB_AUXEN_SHIFT 2 /* FMCB Aux Pwr Enable */ + #define ALTR_A10SR_HPS_RST_REG 0x10 /* HPS Reset */ +#define ALTR_A10SR_HPS_UARTA_RSTN_SHIFT 7 /* UARTA Reset n */ +#define ALTR_A10SR_HPS_WARM_RSTN_SHIFT 6 /* WARM Reset n */ +#define ALTR_A10SR_HPS_WARM_RST1N_SHIFT 5 /* WARM Reset1 n */ +#define ALTR_A10SR_HPS_COLD_RSTN_SHIFT 4 /* COLD Reset n */ +#define ALTR_A10SR_HPS_NPOR_SHIFT 3 /* N Power On Reset */ +#define ALTR_A10SR_HPS_NRST_SHIFT 2 /* N Reset */ +#define ALTR_A10SR_HPS_ENET_RSTN_SHIFT 1 /* Ethernet Reset n */ +#define ALTR_A10SR_HPS_ENET_INTN_SHIFT 0 /* Ethernet IRQ n */ + #define ALTR_A10SR_USB_QSPI_REG 0x12 /* USB, BQSPI, FILE Reset */ +#define ALTR_A10SR_USB_RST_SHIFT 7 /* USB Reset */ +#define ALTR_A10SR_BQSPI_RST_N_SHIFT 6 /* BQSPI Reset n */ +#define ALTR_A10SR_FILE_RST_N_SHIFT 5 /* FILE Reset n */ +#define ALTR_A10SR_PCIE_PERST_N_SHIFT 4 /* PCIe PE Reset n */ + #define ALTR_A10SR_SFPA_REG 0x14 /* SFPA Control Reg */ #define ALTR_A10SR_SFPB_REG 0x16 /* SFPB Control Reg */ +/* SFPA Bit Definitions */ +#define ALTR_A10SR_SFP_TXDIS_SHIFT 7 /* SFPA TX Disable */ +#define ALTR_A10SR_SFP_RATESEL10 0x60 /* SFPA_Rate Select [1:0] */ +#define ALTR_A10SR_SFP_LOS_SHIFT 4 /* SFPA LOS */ +#define ALTR_A10SR_SFP_FAULT_SHIFT 3 /* SFPA Fault */ + #define ALTR_A10SR_I2C_M_REG 0x18 /* I2C Master Select */ #define ALTR_A10SR_WARM_RST_REG 0x1A /* HPS Warm Reset */ #define ALTR_A10SR_WR_KEY_REG 0x1C /* HPS Warm Reset Key */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 35b886385f329..82d61b2fd1550 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -65,6 +65,7 @@ struct macsec_ops; struct netdev_name_node; struct sd_flow_limit; struct sfp_bus; +struct qsfp_bus; /* 802.11 specific */ struct wireless_dev; /* 802.15.4 specific */ @@ -2356,6 +2357,7 @@ struct net_device { struct phy_link_topology *link_topo; struct phy_device *phydev; struct sfp_bus *sfp_bus; + struct qsfp_bus *qsfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; bool threaded; diff --git a/include/linux/pcs-altera-tse.h b/include/linux/pcs-altera-tse.h new file mode 100644 index 0000000000000..92ab9f08e8359 --- /dev/null +++ b/include/linux/pcs-altera-tse.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Bootlin + * + * Maxime Chevallier + */ + +#ifndef __LINUX_PCS_ALTERA_TSE_H +#define __LINUX_PCS_ALTERA_TSE_H + +struct phylink_pcs; +struct net_device; + +struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev, + void __iomem *pcs_base, int reg_width); + +#endif /* __LINUX_PCS_ALTERA_TSE_H */ diff --git a/include/linux/qsfp.h b/include/linux/qsfp.h new file mode 100644 index 0000000000000..945519f14c12c --- /dev/null +++ b/include/linux/qsfp.h @@ -0,0 +1,552 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Intel QSFP_MODULE_DRIVER + * Copyright (C) 2020-2021 Intel Corporation. All rights reserved. + * + * Contributors: + * Deepak Nagaraju + * Original driver contributed by Intel. + */ +#ifndef LINUX_QSFP_H +#define LINUX_QSFP_H + +#include +#include + +struct qsfp; + +struct qsfp_eeprom_base { + u8 etile_qsfp_identifier; //0x00 00 + u8 etile_qsfp_revision; //0x01 01 + u8 etile_qsfp_status; //0x02 02 + u8 etile_qsfp_interrupt_flags[19]; //0x03 03 + u8 etile_qsfp_device_monitors[12]; //0x16 22 + u8 etile_qsfp_channel_monitors[48]; //0x22 34 + u8 RESERVED_0[4]; //0x52 82 + u8 etile_qsfp_control[13]; //0x56 86 + u8 RESERVED_1; //0x63 99 + u8 etile_qsfp_device_channel_masks[5]; //0x64 100 + u8 etile_qsfp_vendor_specific[2]; //0x69 105 + u8 RESERVED_2; //0x6b 107 + u8 etile_device_properties_1[3]; //0x6c 108 + u8 etile_pci_express[2]; //0x6f 111 + u8 etile_device_properties_2[2]; //0x71 113 + u8 RESERVED_3[4]; //0x74 115 + u8 etile_qsfp_password_change[4]; //0x77 119 + u8 etile_qsfp_password_entry_area[4]; //0x7b 123 + u8 etile_qsfp_page_select_byte; //0x7f 127 + u8 etile_qsfp_identifier_1; //0x80 128 + u8 etile_qsfp_ext_identifier; //0x81 129 + u8 etile_qsfp_connector_type; //0x82 130 + u8 etile_qsfp_spec_compliance_1[8]; //0x83 131 + u8 etile_qsfp_encoding; //0x8b 139 + u8 etile_qsfp_br_nom; //0x8c 140 + u8 etile_qsfp_ext_compliance; //0x8d 141 + u8 etile_qsfp_link_lenghth_1; //0x8e 142 + u8 etile_qsfp_link_lenghth_2; //0x8f 143 + u8 etile_qsfp_link_lenghth_3; //0x90 144 + u8 etile_qsfp_link_lenghth_4; //0x91 145 + u8 etile_qsfp_link_lenghth_5; //0x92 146 + u8 etile_qsfp_device_technology; //0x93 147 + char etile_qsfp_vendor_name[16]; //0x94 148 + u8 etile_qsfp_extended_module; //0xa4 164 + char etile_qsfp_vendor_oui[3]; //0xa5 165 + char etile_qsfp_vendor_pn[16]; //0xa8 168 + char etile_qsfp_vendor_rev[2]; //0xb8 184 + u8 etile_qsfp_wavelength_copper[2]; //0xba 186 + u8 etile_qsfp_wavelength_tolerance[2]; //0xbc 188 + u8 etile_qsfp_max_case_temp[1]; //0xbe 190 + u8 etile_qsfp_cc_base; //0xbf 191 + u8 etile_qsfp_ext_spec_compliance; //0xc0 192 + u8 etile_qsfp_options_1; //0xc1 193 + u8 etile_qsfp_options_2; //0xc2 194 + u8 etile_qsfp_options_3; //0xc3 195 + char etile_qsfp_vendor_serial_number[16]; //0xc4 196 + char etile_qsfp_vendor_date_code[8]; //0xd4 212 + u8 etile_qsfp_diag_monitor; //0xdc 220 + u8 etile_qsfp_enhanced_options; //0xdd 221 + u8 etile_qsfp_br_nom_1; //0xde 222 + u8 etile_qsfp_cc_ext; //0xdf 223 + u8 etile_qsfp_venodor_specific_id[32]; //0xe0 224 + +} __packed; + + /** + * struct qsfp_eeprom_id - raw qsfp module identification information + * @base: base qsfp module identification structure + * @ext: extended qsfp module identification structure + * + * See the SFF-8472 specification and related documents for the definition + * of these structure members. This can be obtained from + * https://www.snia.org/technology-communities/sff/specifications + */ +struct qsfp_eeprom_id { + struct qsfp_eeprom_base base; + +} __packed; + +enum { + SFF8024_QSFP_DD_ENCODING_UNSPEC = 0x00, + SFF8024_QSFP_DD_ENCODING_8B10B = 0x01, + SFF8024_QSFP_DD_ENCODING_4B5B = 0x02, + SFF8024_QSFP_DD_ENCODING_NRZ = 0x03, + SFF8024_QSFP_DD_ENCODING_8436_SONET = 0x04, + SFF8024_QSFP_DD_ENCODING_8436_64B66B = 0x05, + SFF8024_QSFP_DD_ENCODING_8436_MANCHESTER = 0x06, + SFF8024_QSFP_DD_ENCODING_256B257B = 0x07, + SFF8024_QSFP_DD_ENCODING_PAM4 = 0x08, + SFF8024_ID_QSFP = 0x0c, + SFF8024_ID_QSFP_PLUS = 0x0d, + SFF8024_ID_QSFP_28 = 0x11, + SFF8024_ID_QSFP_DD_INF_8628 = 0x18, + SFF8024_ID_QSFP_8X = 0x19, + + SFF8024_QSFP_DD_CONNECTOR_UNSPEC = 0x00, + SFF8024_QSFP_DD_CONNECTOR_SC = 0x01, + SFF8024_QSFP_DD_CONNECTOR_FIBRE_CHANNEL_STYLE1 = 0x02, + SFF8024_QSFP_DD_CONNECTOR_FIBRE_CHANNEL_STYLE2 = 0x03, + SFF8024_QSFP_DD_CONNECTOR_BNC_TNC = 0x04, + SFF8024_QSFP_DD_CONNECTOR_FIBRE_CHANNEL_COAX_HEADERS = 0x05, + SFF8024_QSFP_DD_CONNECTOR_FIBERJACK = 0x06, + SFF8024_QSFP_DD_CONNECTOR_LC = 0x07, + SFF8024_QSFP_DD_CONNECTOR_MT_RJ = 0x08, + SFF8024_QSFP_DD_CONNECTOR_MU = 0x09, + SFF8024_QSFP_DD_CONNECTOR_SG = 0x0a, + SFF8024_QSFP_DD_CONNECTOR_OPTICAL_PIGTAIL = 0x0b, + SFF8024_QSFP_DD_CONNECTOR_MPO_1X12 = 0x0c, + SFF8024_QSFP_DD_CONNECTOR_MPO_2X16 = 0x0d, + SFF8024_QSFP_DD_CONNECTOR_HSSDC_II = 0x20, + SFF8024_QSFP_DD_CONNECTOR_COPPER_PIGTAIL = 0x21, + SFF8024_QSFP_DD_CONNECTOR_RJ45 = 0x22, + SFF8024_QSFP_DD_CONNECTOR_NOSEPARATE = 0x23, + SFF8024_QSFP_DD_CONNECTOR_MXC_2X16 = 0x24, + SFF8024_QSFP_DD_CONNECTOR_CS_OPTICAL_CONNECTOR = 0x25, + SFF8024_QSFP_DD_CONNECTOR_SN_OPICAL_CONNECTOR = 0x26, + SFF8024_QSFP_DD_CONNECTOR_MPO_2X12 = 0x27, + SFF8024_QSFP_DD_CONNECTOR_MXC_1X16 = 0x28, + + SFF8636_QSFP_DD_ECC_100GBASE_CR4 = 1, + SFF8636_QSFP_DD_ECC_CAUI4 = 0, + + SFF8636_QSFP_DD_ECC_LAUI2_C2M = 3, + SFF8636_QSFP_DD_ECC_50GAUI2_C2M = 2, + SFF8636_QSFP_DD_ECC_50GAUI1_C2M = 1, + SFF8636_QSFP_DD_ECC_CDAUI8_C2M = 0, + + /* SFF8636 Rev2.11 Specification Compliance Code */ + /* Byte 131 */ + SFF8636_QSFP_ECC_EXTENDED = BIT(7), + SFF8636_QSFP_ECC_10GBASE_LRM = BIT(6), + SFF8636_QSFP_ECC_10GBASE_LR = BIT(5), + SFF8636_QSFP_ECC_10GBASE_SR = BIT(4), + SFF8636_QSFP_ECC_40GBASE_CR4 = BIT(3), + SFF8636_QSFP_ECC_40GBASE_SR4 = BIT(2), + SFF8636_QSFP_ECC_40GBASE_LR4 = BIT(1), + SFF8636_QSFP_ECC_40G_ACTIVE_CABLE = BIT(0), + + SFF8024_QSFP_ECC_UNSPEC = 0x00, + SFF8024_QSFP_ECC_100G_25GAUI_C2M_AOC = 0x01, + SFF8024_QSFP_ECC_100GBASE_SR4_25GBASE_SR = 0x02, + SFF8024_QSFP_ECC_100GBASE_LR4_25GBASE_LR = 0x03, + SFF8024_QSFP_ECC_100GBASE_ER4_25GBASE_ER = 0x04, + SFF8024_QSFP_ECC_100GBASE_SR10 = 0x05, + SFF8024_QSFP_ECC_100G_CWDM4 = 0x06, + SFF8024_QSFP_ECC_100G_PSM4 = 0x07, + SFF8024_QSFP_ECC_100G_ACC_25G_AUI = 0x08, + SFF8024_QSFP_ECC_100GBASE_CR4 = 0x0b, + SFF8024_QSFP_ECC_25GBASE_CR_S = 0x0c, + SFF8024_QSFP_ECC_25GBASE_CR_N = 0x0d, + SFF8024_QSFP_ECC_10M = 0x0e, + SFF8024_QSFP_ECC_40GBASE_ER = 0x10, + SFF8024_QSFP_ECC_10GBASE_SR = 0x11, + SFF8024_QSFP_ECC_40G_PSM4 = 0x12, + SFF8024_QSFP_ECC_10GBASE_T_SFI = 0x16, + SFF8024_QSFP_ECC_100G_CLR4 = 0x17, + SFF8024_QSFP_ECC_100G_AOC_25G_AOC = 0x18, + SFF8024_QSFP_ECC_100G_ACC_25G_ACC = 0x19, + SFF8024_QSFP_ECC_10GBASE_T_SR = 0x1c, + SFF8024_QSFP_ECC_5GBASE_T = 0x1d, + SFF8024_QSFP_ECC_2_5GBASE_T = 0x1e, + SFF8024_QSFP_ECC_100G_25GAUI_C2M_AOC_LOW_BER = 0x18, + + SFF8024_QSFP_10g_base_unsp = 0x8, + SFF8024_QSFP_10g_base_lrm = 0x7, + SFF8024_QSFP_10g_base_lr = 0x6, + SFF8024_QSFP_10g_base_sr = 0x5, + SFF8024_QSFP_40g_base_cr4 = 0x4, + SFF8024_QSFP_40g_base_sr4 = 0x3, + SFF8024_QSFP_40g_base_lr4 = 0x2, + SFF8024_QSFP_40g_active_cable = 0x1, + + SFF8024_QSFP_sonet_oc48_long_reach = 2, + SFF8024_QSFP_sonet_oc48_intermediate_reach = 1, + SFF8024_QSFP_sonet_oc48_short_reach = 0, + + SFF8024_QSFP_SAS_24 = 7, + SFF8024_QSFP_SAS_12 = 6, + SFF8024_QSFP_SAS_6 = 5, + SFF8024_QSFP_SAS_3 = 4, + + SFF8024_QSFP_SCC_1000BASE_T = BIT(3), + SFF8024_QSFP_SCC_1000BASE_CX = BIT(2), + SFF8024_QSFP_SCC_1000BASE_LX = BIT(1), + SFF8024_QSFP_SCC_1000BASE_SX = BIT(0), + + SFF8024_QSFP_fc_ll_v = 7, + SFF8024_QSFP_fc_ll_s = 6, + SFF8024_QSFP_fc_ll_i = 5, + SFF8024_QSFP_fc_ll_l = 4, + SFF8024_QSFP_fc_ll_m = 3, + SFF8024_QSFP_fc_tech_reserved = 2, + SFF8024_QSFP_fc_tech_lc = 1, + SFF8024_QSFP_fc_tech_electrical_inter_enclosure = 0, + + SFF8024_QSFP_fc_tech_electrical_intra_enclosure = 7, + SFF8024_QSFP_fc_tech_sn = 6, + SFF8024_QSFP_fc_tech_sl = 5, + SFF8024_QSFP_fc_tech_ll = 4, + + SFF8024_QSFP_fc_media_tw = 7, + SFF8024_QSFP_fc_media_tp = 6, + SFF8024_QSFP_fc_media_mi = 5, + SFF8024_QSFP_fc_media_tv = 4, + SFF8024_QSFP_fc_media_m6 = 3, + SFF8024_QSFP_fc_media_m5 = 2, + SFF8024_QSFP_unallocated_9_1 = 1, + SFF8024_QSFP_fc_media_sm = 0, + + SFF8024_QSFP_SCC_FC_SPEED_1200 = BIT(7), + SFF8024_QSFP_SCC_FC_SPEED_800 = BIT(6), + SFF8024_QSFP_SCC_FC_SPEED_1600 = BIT(5), + SFF8024_QSFP_SCC_FC_SPEED_400 = BIT(4), + SFF8024_QSFP_SCC_FC_SPEED_3200 = BIT(3), + SFF8024_QSFP_SCC_FC_SPEED_200 = BIT(2), + SFF8024_QSFP_SCC_FC_EXTENDED = BIT(1), + SFF8024_QSFP_SCC_FC_SPEED_100 = BIT(0), +}; + +struct qsfp_quirk { + const char *vendor; + const char *part; + void (*modes)(const struct qsfp_eeprom_id *id, unsigned long *modes, + unsigned long *interfaces); + void (*fixup)(struct qsfp *qsfp); +}; + +/* qsfp EEPROM registers */ +enum { + QSFP_PHYS_ID = 0x00, + QSFP_STATUS = 0x01, + QSFP_STATUS_1 = 0x02, + QSFP_RX_TX_LOSS = 0x03, + QSFP_TX_FAULT = 0x04, + QSFP_DEVICE_MONITORS = 0x16, + QSFP_CHANNEL_MONITORS = 0x22, + QSFP_CONTROL = 0x56, + QSFP_DEVICE_CHANNEL_MASKS = 0x64, + QSFP_VENDOR_SPECIFIC = 0x69, + QSFP_DEVICE_PROPERTIES = 0x6C, + QSFP_PCI_EXPRESS = 0x6F, + QSFP_DEVICE_PROPERTIES_1 = 0x71, + QSFP_PASSWORD_CHANGE_AREA = 0x77, + QSFP_PASSWORD_ENTRY_AREA = 0x7B, + QSFP_PAGE_SELECT_BYTE = 0x7F, + QSFP_IDENTIFIER = 0x80, + QSFP_EXT_IDENTIFIER = 0x81, + QSFP_CONNECTOR = 0x82, + QSFP_COMPLIANCE = 0x83, + QSFP_ENCODING = 0x8b, + QSFP_BR_NOMINAL = 0x8C, + QSFP_EXT_RATE_SELECT = 0x8d, + QSFP_LINK_LEN_SMF = 0x8e, + QSFP_LINK_LEN_OM3_50 = 0x8f, + QSFP_LINK_LEN_OM2_50 = 0x90, + QSFP_LINK_LEN_OM2_62_5 = 0x91, + QSFP_LINK_LEN_COPPER_1M = 0x92, + QSFP_DEVICE_TECHNOLOGY = 0x93, + QSFP_VENDOR_NAME = 0x94, + QSFP_VENDOR_OUI = 0xa5, + QSFP_VENDOR_PN = 0xa8, + QSFP_VENDOR_REV = 0xb8, + QSFP_MAX_CASE_TEMP = 0xbe, + QSFP_CC_BASE = 0xbf, + QSFP_OPTIONS = 0xC3, + QSFP_VENDOR_SN = 0xC4, + QSFP_DATECODE = 0xD4, + QSFP_DIAGMON = 0xDc, + QSFP_ENHOPTS = 0xDD, + QSFP_CC_EXT = 0xDf, + + QSFP_TX4_LOS_CHANNEL_4 = BIT(7), + QSFP_TX3_LOS_CHANNEL_3 = BIT(6), + QSFP_TX2_LOS_CHANNEL_2 = BIT(5), + QSFP_TX1_LOS_CHANNEL_1 = BIT(4), + QSFP_RX4_LOS_CHANNEL_4 = BIT(3), + QSFP_RX3_LOS_CHANNEL_3 = BIT(2), + QSFP_RX2_LOS_CHANNEL_2 = BIT(1), + QSFP_RX1_LOS_CHANNEL_1 = BIT(0), + + QSFP_OPTIONS_TX_INPUT_ADAPTIVE = BIT(20), + QSFP_OPTIONS_TX_INPUT_EQUALIZATION = BIT(19), + QSFP_OPTIONS_TX_INPUT_EQUALIZATION_FIXED = BIT(18), + QSFP_OPTIONS_RX_OUTPUT_EMPHASIS = BIT(17), + QSFP_OPTIONS_RX_OUTPUT_APLITUDE = BIT(16), + QSFP_OPTIONS_TX_CDR_CONTROL = BIT(15), + QSFP_OPTIONS_RX_CDR_CONTROL = BIT(14), + QSFP_OPTIONS_TX_CDR_LOSS = BIT(13), + QSFP_OPTIONS_RX_CDR_LOSS = BIT(12), + QSFP_OPTIONS_RX_SQUELCH_DISABLE = BIT(11), + QSFP_OPTIONS_RX_OUTPUT_DISBALE = BIT(10), + QSFP_OPTIONS_TX_SQUELCH_DISABLE = BIT(9), + QSFP_OPTIONS_TX_OUTPUT_DISBALE = BIT(8), + QSFP_OPTIONS_MEMORY_PAGE_2 = BIT(7), + QSFP_OPTIONS_MEMORY_PAGE_1 = BIT(6), + QSFP_OPTIONS_RATE_SELECT = BIT(5), + QSFP_OPTIONS_TX_DISABLE = BIT(4), + QSFP_OPTIONS_TX_FAULT = BIT(3), + QSFP_OPTIONS_TX_SQUELCH_IMPLEMENTED = BIT(2), + QSFP_OPTIONS_TX_LOSS_SIGNAL = BIT(1), + QSFP_OPTIONS_PAGES = BIT(0), + + QSFP_DIAGMON_DDM = BIT(6), + QSFP_DIAGMON_INT_CAL = BIT(5), + QSFP_DIAGMON_EXT_CAL = BIT(4), + QSFP_DIAGMON_RXPWR_AVG = BIT(3), + QSFP_DIAGMON_ADDRMODE = BIT(2), + + QSFP_DIAGMON_RX_OPTICAL_POWER_MONITOR = BIT(4), + QSFP_DIAGMON_RX_OPTICAL_POWER_MEASUREMENT_TYPE = BIT(3), + QSFP_DIAGMON_TX_OPTICAL_POWER_MONITOR = BIT(2), + QSFP_DIAGMON_TX_BIAS_MONITOR_IMPLEMENTED = BIT(1), + QSFP_DIAGMON_RESERVED = BIT(0), + + QSFP_ENHOPTS_USER_DEFINED = BIT(7), + QSFP_ENHOPTS_VENDOR_SPECIFIC = BIT(6), + QSFP_ENHOPTS_INTERNAL_3_VOLTS = BIT(5), + QSFP_ENHOPTS_POWER_CHANGE_COMPLETE = BIT(4), + QSFP_ENHOPTS_RX_RATE_SELECT = BIT(3), + QSFP_ENHOPTS_APPLICATION_SELECTED = BIT(2), + + QSFP_SFF8472_COMPLIANCE_NONE = 0x00, + QSFP_SFF8472_COMPLIANCE_REV9_3 = 0x01, + QSFP_SFF8472_COMPLIANCE_REV9_5 = 0x02, + QSFP_SFF8472_COMPLIANCE_REV10_2 = 0x03, + QSFP_SFF8472_COMPLIANCE_REV10_4 = 0x04, + QSFP_SFF8472_COMPLIANCE_REV11_0 = 0x05, + QSFP_SFF8472_COMPLIANCE_REV11_3 = 0x06, + QSFP_SFF8472_COMPLIANCE_REV11_4 = 0x07, + QSFP_SFF8472_COMPLIANCE_REV12_0 = 0x08, + + QSFP_EXT_STATUS = 0x76, +}; + +struct fwnode_handle; +struct ethtool_eeprom; +struct ethtool_modinfo; +struct qsfp_bus; + + /** + * struct qsfp_upstream_ops - upstream operations structure + * @attach: called when the qsfp socket driver is bound to the upstream + * (mandatory). + * @detach: called when the qsfp socket driver is unbound from the upstream + * (mandatory). + * @module_insert: called after a module has been detected to determine + * whether the module is supported for the upstream device. + * @module_remove: called after the module has been removed. + * @module_start: called after the PHY probe step + * @module_stop: called before the PHY is removed + * @link_down: called when the link is non-operational for whatever + * reason. + * @link_up: called when the link is operational. + * @connect_phy: called when an I2C accessible PHY has been detected + * on the module. + * @disconnect_phy: called when a module with an I2C accessible PHY has + * been removed. + */ +struct qsfp_upstream_ops { + void (*attach)(void *priv, struct qsfp_bus *bus); + void (*detach)(void *priv, struct qsfp_bus *bus); + int (*module_insert)(void *priv, const struct qsfp_eeprom_id *id); + void (*module_remove)(void *priv); + int (*module_start)(void *priv); + void (*module_stop)(void *priv); + void (*link_down)(void *priv); + void (*link_up)(void *priv); + int (*connect_phy)(void *priv, struct phy_device *phydev); + void (*disconnect_phy)(void *priv); +}; + +struct qsfp_socket_ops { + void (*attach)(struct qsfp *qsfp); + void (*detach)(struct qsfp *qsfp); + void (*start)(struct qsfp *qsfp); + void (*stop)(struct qsfp *qsfp); + int (*module_info)(struct qsfp *qsfp, struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct qsfp *qsfp, struct ethtool_eeprom *ee, + u8 *data); +}; + +#if IS_ENABLED(CONFIG_QSFP_MULTI_CHANNEL) +unsigned int qsfp_gpio_get_state(struct qsfp *qsfp); +void qsfp_gpio_set_state(struct qsfp *qsfp, unsigned int state); +unsigned int sff_gpio_get_state(struct qsfp *qsfp); +unsigned int qsfp_get_state(struct qsfp *qsfp); +void qsfp_set_state(struct qsfp *qsfp, unsigned int state); + +int qsfp_multi_module_revision(struct qsfp *old); +int get_cable_attach(struct qsfp *old); +void get_cable_info(char *vendor_name); +int get_tx_rx_loss(struct qsfp *old); +int get_tx_fault(struct qsfp *old); +int ext_status(struct qsfp *old); +int eeprom_id_base(struct qsfp *old); +int soft_poll_start(struct qsfp *old); +int get_qsfp_options(struct qsfp *old); +int soft_mask(struct qsfp *old); +struct phy_device *gsfp_mod_phy(struct qsfp *old); +int gsfp_probe_phy(struct qsfp *old); +int get_module_revision(struct qsfp *qsfp); + +int qsfp_parse_port(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + unsigned long *support); +bool qsfp_may_have_phy(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id); +void qsfp_parse_support(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + unsigned long *support, unsigned long *interfaces); +phy_interface_t qsfp_select_interface(struct qsfp_bus *bus, + unsigned long *link_modes); + +int qsfp_get_module_info(struct qsfp_bus *bus, struct ethtool_modinfo *modinfo); +int qsfp_get_module_eeprom(struct qsfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data); +void qsfp_upstream_start(struct qsfp_bus *bus); +void qsfp_upstream_stop(struct qsfp_bus *bus); +void qsfp_bus_put(struct qsfp_bus *bus); +struct qsfp_bus *qsfp_bus_find_fwnode(const struct fwnode_handle *fwnode); +int qsfp_bus_add_upstream(struct qsfp_bus *bus, void *upstream, + const struct qsfp_upstream_ops *ops); +void qsfp_bus_del_upstream(struct qsfp_bus *bus); + +int qsfp_add_phy(struct qsfp_bus *bus, struct phy_device *phydev); +void qsfp_remove_phy(struct qsfp_bus *bus); +void qsfp_link_up(struct qsfp_bus *bus); +void qsfp_link_down(struct qsfp_bus *bus); +int qsfp_module_insert(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + const struct qsfp_quirk *quirk); +void qsfp_module_remove(struct qsfp_bus *bus); +int qsfp_module_start(struct qsfp_bus *bus); +void qsfp_module_stop(struct qsfp_bus *bus); +int qsfp_link_configure(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id); +struct qsfp_bus *qsfp_register_socket(struct device *dev, struct qsfp *qsfp, + const struct qsfp_socket_ops *ops); +void qsfp_unregister_socket(struct qsfp_bus *bus); + +int get_cable_attach(struct qsfp *old); +int get_channel_info(struct qsfp *old); + +#elif IS_ENABLED(CONFIG_QSFP) +int qsfp_parse_port(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + unsigned long *support); +bool qsfp_may_have_phy(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id); +void qsfp_parse_support(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + unsigned long *support, unsigned long *interfaces); +phy_interface_t qsfp_select_interface(struct qsfp_bus *bus, + unsigned long *link_modes); + +int qsfp_get_module_info(struct qsfp_bus *bus, struct ethtool_modinfo *modinfo); +int qsfp_get_module_eeprom(struct qsfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data); +void qsfp_upstream_start(struct qsfp_bus *bus); +void qsfp_upstream_stop(struct qsfp_bus *bus); +void qsfp_bus_put(struct qsfp_bus *bus); +struct qsfp_bus *qsfp_bus_find_fwnode(const struct fwnode_handle *fwnode); +int qsfp_bus_add_upstream(struct qsfp_bus *bus, void *upstream, + const struct qsfp_upstream_ops *ops); +void qsfp_bus_del_upstream(struct qsfp_bus *bus); + +int qsfp_add_phy(struct qsfp_bus *bus, struct phy_device *phydev); +void qsfp_remove_phy(struct qsfp_bus *bus); +void qsfp_link_up(struct qsfp_bus *bus); +void qsfp_link_down(struct qsfp_bus *bus); +int qsfp_module_insert(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id, + const struct qsfp_quirk *quirk); +void qsfp_module_remove(struct qsfp_bus *bus); +int qsfp_module_start(struct qsfp_bus *bus); +void qsfp_module_stop(struct qsfp_bus *bus); +int qsfp_link_configure(struct qsfp_bus *bus, const struct qsfp_eeprom_id *id); +struct qsfp_bus *qsfp_register_socket(struct device *dev, struct qsfp *qsfp, + const struct qsfp_socket_ops *ops); +void qsfp_unregister_socket(struct qsfp_bus *bus); + +int get_cable_attach(struct qsfp *old); +int get_channel_info(struct qsfp *old); + +#else + +static inline int qsfp_parse_port(struct qsfp_bus *bus, + const struct qsfp_eeprom_id *id, + unsigned long *support) +{ + return PORT_OTHER; +} + +static inline bool qsfp_may_have_phy(struct qsfp_bus *bus, + const struct qsfp_eeprom_id *id) +{ + return false; +} + +static inline void qsfp_parse_support(struct qsfp_bus *bus, + const struct qsfp_eeprom_id *id, + unsigned long *support, + unsigned long *interfaces) +{ +} + +static inline phy_interface_t qsfp_select_interface(struct qsfp_bus *bus, + unsigned long *link_modes) +{ + return PHY_INTERFACE_MODE_NA; +} + +static inline int qsfp_get_module_info(struct qsfp_bus *bus, + struct ethtool_modinfo *modinfo) +{ + return -EOPNOTSUPP; +} + +static inline int qsfp_get_module_eeprom(struct qsfp_bus *bus, + struct ethtool_eeprom *ee, u8 *data) +{ + return -EOPNOTSUPP; +} + +static inline void qsfp_upstream_start(struct qsfp_bus *bus) +{ +} + +static inline void qsfp_upstream_stop(struct qsfp_bus *bus) +{ +} + +static inline void qsfp_bus_put(struct qsfp_bus *bus) +{ +} + +static inline struct qsfp_bus * +qsfp_bus_find_fwnode(const struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline int qsfp_bus_add_upstream(struct qsfp_bus *bus, void *upstream, + const struct qsfp_upstream_ops *ops) +{ + return 0; +} + +static inline void qsfp_bus_del_upstream(struct qsfp_bus *bus) +{ +} + +#endif +#endif /* LINUX_QSFP_H */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index d79ff252cfdc1..7973229862bb8 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -99,6 +99,7 @@ struct stmmac_dma_cfg { bool aal; bool eame; bool multi_msi_en; + bool multi_irq_en; bool dche; bool atds; }; @@ -227,6 +228,7 @@ struct plat_stmmacenet_data { u32 tx_queues_to_use; u8 rx_sched_algorithm; u8 tx_sched_algorithm; + bool tx_buf_quirk; struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); @@ -266,6 +268,11 @@ struct plat_stmmacenet_data { unsigned int eee_usecs_rate; struct pci_dev *pdev; int int_snapshot_num; + int ext_snapshot_num; + bool int_snapshot_en; + bool ext_snapshot_en; + bool multi_msi_en; + bool multi_irq_en; int msi_mac_vec; int msi_wol_vec; int msi_lpi_vec; diff --git a/include/misc/socfpga_fcs_hal.h b/include/misc/socfpga_fcs_hal.h new file mode 100644 index 0000000000000..70a5b36d06d89 --- /dev/null +++ b/include/misc/socfpga_fcs_hal.h @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ +/* + * Copyright (C) 2024 Altera + */ + +/** + * + * @file socfpga_fcs_hal.h + * @brief contains API interfaces description to be called by upper layer. + */ +#ifndef SPCFPGA_FCS_HAL_H +#define SPCFPGA_FCS_HAL_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "socfpga_fcs_types.h" + +#define MAX_SESSION 1 +#define CRYPTO_EXPORTED_KEY_OBJECT_MAX_SZ 364 +#define CRYPTO_KEY_INFO_MAX_SZ 144 +#define CRYPTO_CREATE_KEY_STATUS_MAX_SZ 1 +#define CRYPTO_PROVISION_DATA_MAX_SZ 1024 +#define FCS_KDK_MAX_SZ 384 +#define FCS_DIGEST_STAGE_INIT 0 +#define FCS_DIGEST_STAGE_UPDATE 1 +#define FCS_DIGEST_STAGE_FINAL 2 + +#define SDOS_HEADER_SZ 40 +#define SDOS_HMAC_SZ 48 +#define SDOS_MAGIC_WORD 0xACBDBDED +#define SDOS_HEADER_PADDING 0x01020304 +#define SDOS_PLAINDATA_MIN_SZ 32 +#define SDOS_PLAINDATA_MAX_SZ 32672 +#define SDOS_DECRYPTED_MIN_SZ (SDOS_PLAINDATA_MIN_SZ + SDOS_HEADER_SZ) +#define SDOS_DECRYPTED_MAX_SZ (SDOS_PLAINDATA_MAX_SZ + SDOS_HEADER_SZ) +#define SDOS_ENCRYPTED_MIN_SZ (SDOS_PLAINDATA_MIN_SZ + SDOS_HEADER_SZ + SDOS_HMAC_SZ) +#define SDOS_ENCRYPTED_MAX_SZ (SDOS_PLAINDATA_MAX_SZ + SDOS_HEADER_SZ + SDOS_HMAC_SZ) + +#pragma pack(push, 1) +struct fcs_cmd_context { + /* Error status variable address */ + FCS_HAL_INT *error_code_addr; + union { + struct { + /* Session id */ + FCS_HAL_CHAR *suuid; + FCS_HAL_UINT *suuid_len; + } open_session; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + } close_session; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + FCS_HAL_CHAR *key; + FCS_HAL_UINT key_len; + FCS_HAL_CHAR *status; + FCS_HAL_UINT *status_len; + } import_key; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + FCS_HAL_U32 key_id; + FCS_HAL_CHAR *key; + FCS_HAL_UINT *key_len; + } export_key; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + FCS_HAL_U32 key_id; + } remove_key; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + /* random number size */ + FCS_HAL_U32 key_id; + FCS_HAL_CHAR *info; + FCS_HAL_UINT *info_len; + } key_info; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + FCS_HAL_CHAR *key; + FCS_HAL_UINT key_len; + FCS_HAL_CHAR *status; + FCS_HAL_UINT *status_len; + } create_key; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + FCS_HAL_U32 key_id; + FCS_HAL_U32 step_type; + FCS_HAL_U32 mac_mode; + FCS_HAL_CHAR *ikm; + FCS_HAL_U32 ikm_len; + FCS_HAL_CHAR *info; + FCS_HAL_U32 info_len; + FCS_HAL_CHAR *output_key_obj; + FCS_HAL_U32 output_key_obj_len; + FCS_HAL_U32 *hkdf_resp; + } hkdf_req; + + struct { + FCS_HAL_CHAR *data; + FCS_HAL_U32 *data_len; + } prov_data; + + struct { + FCS_HAL_U32 cache; + FCS_HAL_CHAR *ccert; + FCS_HAL_U32 ccert_len; + FCS_HAL_CHAR *status; + FCS_HAL_UINT *status_len; + } ctr_set; + + struct { + FCS_HAL_U32 ctr_type; + FCS_HAL_U32 ctr_val; + FCS_HAL_INT test; + } ctr_set_preauth; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + /* context id */ + FCS_HAL_U32 context_id; + FCS_HAL_CHAR *rng; + FCS_HAL_U32 rng_len; + } rng; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + /* context id */ + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 sha_op_mode; + FCS_HAL_U32 sha_digest_sz; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_len; + FCS_HAL_CHAR *digest; + FCS_HAL_U32 *digest_len; + FCS_HAL_UINT stage; + } dgst; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + /* context id */ + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 sha_op_mode; + FCS_HAL_U32 sha_digest_sz; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_size; + FCS_HAL_CHAR *dst; + FCS_HAL_U32 *dst_size; + FCS_HAL_U32 user_data_size; + } mac_verify; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_UINT cid; /* Context ID */ + FCS_HAL_UINT kid; /* Key ID */ + FCS_HAL_U8 mode; /* ECB/CBS/CTR */ + FCS_HAL_U8 crypt; /* Encrypt/Decrypt */ + FCS_HAL_U32 aad_len; /* AAD Length */ + FCS_HAL_U16 tag_len; /* Tag length */ + FCS_HAL_U8 iv_source; /* IV source External/Internal */ + FCS_HAL_CHAR *iv; /* IV */ + FCS_HAL_CHAR *aad; /* AAD */ + FCS_HAL_CHAR *tag; /* Tag */ + FCS_HAL_CHAR *input; /* Input data */ + FCS_HAL_UINT ip_len; /* Input Length */ + FCS_HAL_CHAR *output; /* Output data */ + FCS_HAL_UINT *op_len; /* Output Length */ + FCS_HAL_UINT input_pad; /* Source data padding (only GCM mode) */ + } aes; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_U32 kid; + FCS_HAL_U32 cid; + FCS_HAL_U32 ecc_curve; + FCS_HAL_CHAR *pubkey; + FCS_HAL_U32 pubkey_len; + FCS_HAL_CHAR *sh_secret; + FCS_HAL_U32 *sh_secret_len; + } ecdh_req; + + struct { + FCS_HAL_U32 *chip_id_lo; + FCS_HAL_U32 *chip_id_hi; + } chip_id; + + struct { + FCS_HAL_INT cert_request; + FCS_HAL_CHAR *cert; + FCS_HAL_INT *cert_size; + } attestation_cert; + + struct { + FCS_HAL_INT cert_request; + } attestation_cert_reload; +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG + struct { + FCS_HAL_U32 mbox_cmd; + FCS_HAL_VOID *cmd_data; + FCS_HAL_U32 cmd_data_sz; + FCS_HAL_VOID *resp_data; + FCS_HAL_U32 *resp_data_sz; + } mbox; +#endif + struct { + FCS_HAL_CHAR *mctp_req; + FCS_HAL_U32 mctp_req_len; + FCS_HAL_CHAR *mctp_resp; + FCS_HAL_U32 *mctp_resp_len; + } mctp; + + struct { + FCS_HAL_U32 *jtag_idcode; + } jtag_id; + + struct { + FCS_HAL_CHAR *identity; + FCS_HAL_U32 *identity_len; + } device_identity; + + struct { + FCS_HAL_U32 chipsel; + } qspi_cs; + + struct { + FCS_HAL_U32 qspi_addr; + FCS_HAL_U32 qspi_len; + FCS_HAL_CHAR *qspi_data; + FCS_HAL_U32 *qspi_data_len; + } qspi_read, qspi_write; + + struct { + FCS_HAL_U32 qspi_addr; + FCS_HAL_U32 len; + } qspi_erase; + + struct { + FCS_HAL_VOID *qspi_info; + FCS_HAL_U32 qspi_info_len; + } qspi_dev_info; + + struct { + /* Session id */ + FCS_HAL_UUID suuid; + /* context id */ + FCS_HAL_U32 context_id; + FCS_HAL_U32 op_mode; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_size; + FCS_HAL_CHAR *dst; + FCS_HAL_U32 *dst_size; + FCS_HAL_U16 id; + FCS_HAL_U64 own; + FCS_HAL_INT pad; + } sdos; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 ecc_curve; + FCS_HAL_CHAR *pubkey; + FCS_HAL_U32 *pubkey_len; + } ecdsa_pub_key; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 ecc_curve; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_len; + FCS_HAL_CHAR *dst; + FCS_HAL_U32 *dst_len; + } ecdsa_hash_sign; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 ecc_curve; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_len; + FCS_HAL_CHAR *signature; + FCS_HAL_U32 signature_len; + FCS_HAL_CHAR *pubkey; + FCS_HAL_U32 pubkey_len; + FCS_HAL_CHAR *dst; + FCS_HAL_U32 *dst_len; + } ecdsa_hash_verify; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 ecc_curve; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_len; + FCS_HAL_CHAR *dst; + FCS_HAL_U32 *dst_len; + } ecdsa_sha2_data_sign; + + struct { + FCS_HAL_UUID suuid; + FCS_HAL_U32 context_id; + FCS_HAL_U32 key_id; + FCS_HAL_U32 ecc_curve; + FCS_HAL_CHAR *signature; + FCS_HAL_U32 signature_len; + FCS_HAL_CHAR *pubkey; + FCS_HAL_U32 pubkey_len; + FCS_HAL_U32 user_data_sz; + FCS_HAL_CHAR *src; + FCS_HAL_U32 src_len; + FCS_HAL_CHAR *dst; + FCS_HAL_U32 *dst_len; + } ecdsa_sha2_data_verify; + + /* This command sends the certificate to the device requesting validation + * of an HPS image + */ + struct { + FCS_HAL_CHAR *vab_cert; + FCS_HAL_U32 vab_cert_len; + FCS_HAL_U32 test; + FCS_HAL_U32 *resp; + } hps_img_validate; + }; +}; + +#pragma pack(pop) + +/** + * @brief data struct of message which stands for the communication + * format with ATF when talk with OS dependent layer API + */ +struct socfpga_fcs_priv { + /** Communication channel */ + FCS_HAL_CHAN *chan; + /** plat data */ + struct socfpga_fcs_service_ops *plat_data; + /* command context */ + struct fcs_cmd_context k_ctx; + /** cli structure */ + FCS_SVC_CLIENT client; + /** Completion status */ + FCS_HAL_COMPLETION completion; + /** Mutex lock */ + FCS_HAL_MUTEX lock; + /** status */ + FCS_HAL_INT status; + /** response */ + FCS_HAL_U32 resp; + /** Size */ + FCS_HAL_U32 resp_size; + /** chip ID */ + FCS_HAL_U32 chip_id_lo; + FCS_HAL_U32 chip_id_hi; + /* Session ID */ + FCS_HAL_U32 session_id; + /** UUID */ + FCS_HAL_UUID uuid_id; + /** Client ID */ + FCS_HAL_U32 client_id; + /** Hardware RNG */ + FCS_HAL_VOID *hwrng; + /** device to issue command */ + FCS_HAL_DEV *dev; + /** ATF version */ + FCS_HAL_U32 atf_version[3]; + /** Reserved */ + FCS_HAL_VOID *preserved; +}; + +enum fcs_command_code { + FCS_DEV_COMMAND_NONE = 0, + FCS_DEV_CERTIFICATE, + FCS_DEV_HPS_IMG_VALIDATE_REQUEST, + FCS_DEV_HPS_IMG_VALIDATE_POLL_SERVICE, + FCS_DEV_COUNTER_SET, + FCS_DEV_COUNTER_SET_POLL_SERVICE, + FCS_DEV_COUNTER_SET_PREAUTHORIZED, + FCS_DEV_GET_PROVISION_DATA, + FCS_DEV_GET_PROVISION_DATA_POLL_SERVICE, + FCS_DEV_DATA_ENCRYPTION, + FCS_DEV_DATA_DECRYPTION, + FCS_DEV_PSGSIGMA_TEARDOWN, + FCS_DEV_CHIP_ID, + FCS_DEV_ATTESTATION_SUBKEY, + FCS_DEV_ATTESTATION_MEASUREMENT, + FCS_DEV_ATTESTATION_GET_CERTIFICATE, + FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD, + FCS_DEV_GET_ROM_PATCH_SHA384, + FCS_DEV_CRYPTO_OPEN_SESSION, + FCS_DEV_CRYPTO_CLOSE_SESSION, + FCS_DEV_CRYPTO_IMPORT_KEY, + FCS_DEV_IMPORT_KEY_POLL_SERVICE, + FCS_DEV_CRYPTO_EXPORT_KEY, + FCS_DEV_CRYPTO_REMOVE_KEY, + FCS_DEV_CRYPTO_GET_KEY_INFO, + FCS_DEV_CRYPTO_CREATE_KEY, + FCS_DEV_CRYPTO_CREATE_KEY_POLL_SERVICE, + FCS_DEV_CRYPTO_AES_CRYPT, + FCS_DEV_CRYPTO_GET_DIGEST_INIT, + FCS_DEV_CRYPTO_GET_DIGEST_UPDATE, + FCS_DEV_CRYPTO_GET_DIGEST_FINAL, + FCS_DEV_CRYPTO_MAC_VERIFY_INIT, + FCS_DEV_CRYPTO_MAC_VERIFY_UPDATE, + FCS_DEV_CRYPTO_MAC_VERIFY_FINAL, + FCS_DEV_CRYPTO_AES_CRYPT_INIT, + FCS_DEV_CRYPTO_AES_CRYPT_UPDATE, + FCS_DEV_CRYPTO_AES_CRYPT_FINAL, + FCS_DEV_CRYPTO_AES_CRYPT_POLL_SERVICE, + FCS_DEV_CRYPTO_GET_DIGEST, + FCS_DEV_CRYPTO_MAC_VERIFY, + FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING, + FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_INIT, + FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_FINALIZE, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_INIT, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_UPDATE, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_FINALIZE, + FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY, + FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_INIT, + FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_FINALIZE, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_INIT, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_UPDATE, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_FINALIZE, + FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_INIT, + FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_FINALIZE, + FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY, + FCS_DEV_CRYPTO_ECDH_REQUEST_INIT, + FCS_DEV_CRYPTO_ECDH_REQUEST_FINALIZE, + FCS_DEV_CRYPTO_HKDF_REQUEST, + FCS_DEV_RANDOM_NUMBER_GEN, + FCS_DEV_RNG_ASYNC_POLL_SERVICE, + FCS_DEV_SDOS_DATA_EXT, + FCS_DEV_CRYPTO_AES_CRYPT_SMMU, + FCS_DEV_CRYPTO_GET_DIGEST_SMMU, + FCS_DEV_CRYPTO_MAC_VERIFY_SMMU, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_SMMU, + FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_SMMU, + FCS_DEV_CHECK_SMMU_ENABLED, + FCS_DEV_MCTP_REQUEST, + FCS_DEV_GET_IDCODE, + FCS_DEV_GET_DEVICE_IDENTITY, + FCS_DEV_QSPI_OPEN, + FCS_DEV_QSPI_CLOSE, + FCS_DEV_QSPI_CS, + FCS_DEV_QSPI_READ, + FCS_DEV_QSPI_WRITE, + FCS_DEV_QSPI_ERASE, + FCS_DEV_ATF_VERSION, + +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG + FCS_DEV_MBOX_SEND, +#endif +}; + +/** + * @brief Gets the FCS command context. + * + * This function gets the FCS command context. + * + * @return Returns a pointer to the FCS command context. + */ +struct fcs_cmd_context *hal_get_fcs_cmd_ctx(void); + +/** + * @brief Destroys the FCS command context. + * + * This function creates the FCS command context. + * + * @return Returns a pointer to the FCS command context. + */ +FCS_HAL_VOID hal_destroy_fcs_cmd_ctx(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Releases the FCS command context. + * + * This function releases the FCS command context. + * + * @param k_ctx A pointer to the command context structure. + */ +FCS_HAL_VOID hal_release_fcs_cmd_ctx(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Initializes the FCS HAL. + * + * This function initializes the FCS HAL and performs any necessary setup. + * + * @return Returns an FCS_HAL_INT value indicating the status of the initialization. + */ +FCS_HAL_INT hal_fcs_init(struct device *dev); + +/** + * @brief Cleans up the FCS HAL. + * + * This function cleans up the FCS HAL and performs any necessary cleanup. + * + * @return Returns an FCS_HAL_INT value indicating the status of the cleanup. + */ + +FCS_HAL_VOID hal_fcs_cleanup(void); + +/** + * @brief Requests SDM to open a session. + * + * This function is used to request the SDM (System Device Manager) to open a session. + * + * @param ctx A pointer to the command context structure. + * @return Returns 0 if the session is opened successfully, otherwise returns an error code. + */ +FCS_HAL_INT hal_session_open(struct fcs_cmd_context *const ctx); + +/** + * @brief Requests to SDM for closing a given opened session. + * + * This function is used to request the SDM (System Device Manager) to close a + * previously opened session. + * + * @param ctx A pointer to the command context structure. + * @return Returns 0 if the session is closed successfully, otherwise returns an error code. + */ +FCS_HAL_INT hal_session_close(struct fcs_cmd_context *const ctx); + +/** + * @brief Requests to get CHIP ID from SDM. + * + * This function is used to request the SDM (System Device Manager) to get the + * CHIP ID. + * + * @param ctx A pointer to the command context structure. + * @return Returns 0 if the CHIP ID is retrieved successfully, otherwise returns an error code. + */ +FCS_HAL_INT hal_get_chip_id(struct fcs_cmd_context *const ctx); +/** + * @brief Requests to generate a random number from SDM. + * + * @param ctx A pointer to the command context structure. + * @return 0 if the random number is generated successfully, otherwise an error code. + */ +FCS_HAL_INT hal_random_number(struct fcs_cmd_context *const ctx); + +/** + * @brief Stores the context information. + * + * @param ctx A pointer to the command context structure. + * @return 0 if the context is stored successfully, otherwise an error code. + */ +FCS_HAL_INT hal_store_context(struct fcs_cmd_context *const ctx); + +/** + * Imports a key into the device. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the key import operation. + */ +FCS_HAL_INT hal_import_key(struct fcs_cmd_context *const ctx); + +/** + * @brief Retrieves the version of the ATF (Arm Trusted Firmware). + * + * This function fetches the current version of the ATF and stores it in the + * provided version pointer. + * + * @param[out] version Pointer to a variable where the ATF version will be stored. + * + * @return void + */ +FCS_HAL_VOID hal_get_atf_version(FCS_HAL_U32 *version); + +/** + * Exports a key from the device. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the key export operation. + */ +FCS_HAL_INT hal_export_key(struct fcs_cmd_context *const ctx); + +/** + * Removes an imported key. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the key removal operation. + */ +FCS_HAL_INT hal_remove_key(struct fcs_cmd_context *const ctx); + +/** + * Gets the key information of imported key from the device. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the key information retrieval operation. + */ +FCS_HAL_INT hal_get_key_info(struct fcs_cmd_context *const ctx); + +/** + * Creates a key in the device. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the key creation operation. + */ +FCS_HAL_INT hal_create_key(struct fcs_cmd_context *const ctx); + +/** + * Requests the SDM to perform HKDF operation. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the HKDF operation. + */ +FCS_HAL_INT hal_hkdf_request(struct fcs_cmd_context *const ctx); + +/** + * Requests the SDM to get the provision data. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the provision data retrieval operation. + */ +FCS_HAL_INT hal_get_provision_data(struct fcs_cmd_context *const ctx); + +/** + * Sets the counter value. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the counter value setting operation. + */ +FCS_HAL_INT hal_counter_set(struct fcs_cmd_context *const ctx); + +/** + * Sets the preauthorized counter value. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the operation. + */ +FCS_HAL_INT hal_counter_set_preauth(struct fcs_cmd_context *const ctx); + +/** + * hal_digest_free_resource - Frees resources associated with the digest operation. + * @k_ctx: Pointer to the FCS command context structure. + * + * This function releases any resources that were allocated for the digest operation + * in the given FCS command context. + * + * @param k_ctx Pointer to the command context structure. + * + * Return: + * None + */ +FCS_HAL_VOID hal_digest_free_resource(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Verifies the MAC for the given command context. + * + * This function verifies the MAC (Message Authentication Code) based on the + * provided command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the MAC verification. + */ +FCS_HAL_INT hal_mac_verify(struct fcs_cmd_context *const k_ctx); + +/** + * Requests the SDM to perform AES encryption/decryption operation. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the AES encryption/decryption operation. + */ +FCS_HAL_INT hal_aes_crypt(struct fcs_cmd_context *const ctx); + +/** + * Requests the SDM to perform AES encryption/decryption operation. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the AES encryption/decryption operation. + */ +FCS_HAL_INT hal_ecdh_req(struct fcs_cmd_context *const ctx); + +/** + * Gets the chip ID. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the chip ID retrieval operation. + */ +FCS_HAL_INT hal_get_chip_id(struct fcs_cmd_context *const k_ctx); + +/** + * Retrieves the attestation certificate. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the attestation + * certificate retrieval operation. + */ +FCS_HAL_INT hal_attestation_get_certificate(struct fcs_cmd_context *const ctx); + +/** + * Reloads the attestation certificate. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the attestation + * certificate reload operation. + */ +FCS_HAL_INT hal_attestation_certificate_reload(struct fcs_cmd_context *const ctx); + +/** + * Requests the SDM to perform an MCTP operation. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the MCTP operation. + */ +FCS_HAL_INT hal_mctp_request(struct fcs_cmd_context *const k_ctx); + +/** + * requests access to the qspi interface + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to get access to qspi interface. + */ +FCS_HAL_INT hal_qspi_open(struct fcs_cmd_context *const ctx); + +/** + * requests to close the qspi interface + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to close the qspi interface. + */ +FCS_HAL_INT hal_qspi_close(struct fcs_cmd_context *const ctx); + +/** + * requests to select the qspi device + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to select the qspi device. + */ +FCS_HAL_INT hal_qspi_cs(struct fcs_cmd_context *const ctx); + +/** + * requests to read from the qspi device + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to read from the qspi device. + */ +FCS_HAL_INT hal_qspi_read(struct fcs_cmd_context *const ctx); + +/** + * requests to write to the qspi device + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to write to the qspi device. + */ +FCS_HAL_INT hal_qspi_write(struct fcs_cmd_context *const ctx); + +/** + * requests to erase the qspi device + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to erase the qspi device. + */ +FCS_HAL_INT hal_qspi_erase(struct fcs_cmd_context *const ctx); + +/** + * requests to get the jtag idcode + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to get the jtag idcode. + */ +FCS_HAL_INT hal_jtag_idcode(struct fcs_cmd_context *const ctx); + +/** + * requests to get the device identity + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure to get the device identity. + */ +FCS_HAL_INT hal_get_device_identity(struct fcs_cmd_context *const ctx); + +/** + * @brief Data Encrypts/Decrypt using SDOS (Secure Data Object Storage). + * + * This function encrypts/decrypts data based on the provided command context using SDOS. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the encryption operation. + */ +FCS_HAL_INT hal_sdos_crypt(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Retrieves the ECDSA public key. + * + * This function retrieves the ECDSA public key from the given command context. + * + * @param ctx Pointer to the command context structure. + * @return FCS_HAL_INT Status code indicating the result of the operation. + */ +FCS_HAL_INT hal_ecdsa_get_pubkey(struct fcs_cmd_context *const ctx); + +/** + * @brief Signs the hash using ECDSA. + * + * This function signs the hash using ECDSA based on the provided command context. + * + * @param ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the hash signing operation. + */ +FCS_HAL_INT hal_ecdsa_hash_sign(struct fcs_cmd_context *const ctx); + +/** + * @brief Verifies the hash using ECDSA. + * + * This function verifies the hash using ECDSA based on the provided command context. + * + * @param ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the hash verification operation. + */ +FCS_HAL_INT hal_ecdsa_hash_verify(struct fcs_cmd_context *const ctx); + +/** + * @brief Signs the data using ECDSA. + * + * This function signs the data using ECDSA based on the provided command context. + * + * @param ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the data signing operation. + */ +FCS_HAL_INT hal_ecdsa_sha2_data_sign(struct fcs_cmd_context *const ctx); + +/** + * @brief Verifies the data using ECDSA. + * + * This function verifies the data using ECDSA based on the provided command context. + * + * @param ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the data verification operation. + */ +FCS_HAL_INT hal_ecdsa_sha2_data_verify(struct fcs_cmd_context *const ctx); + +/** + * Requests the SDM to validate an HPS image. + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the HPS image validation operation. + */ + +FCS_HAL_INT hal_hps_img_validate(struct fcs_cmd_context *const ctx); + +/** + * Check if the fcs hal is loaded in kernel. + * + * @return true, if the module is loaded successfully + * false, on error. + */ +FCS_HAL_BOOL hal_fcs_is_ready(void); + +#ifdef CONFIG_ALTERA_SOCFPGA_FCS_DEBUG +/** + * @brief Sends a generic mailbox command + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the mailbox operation. + */ +FCS_HAL_INT hal_generic_mbox(struct fcs_cmd_context *k_ctx); +#endif + +/** + * @brief API sends the init command for the AES encryption/decryption request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of AES + * encryption/decryption init stage. + */ +FCS_HAL_INT hal_aes_streaming_init(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends data update for AES encryption/decryption request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the AES + * encryption/decryption update stage. + */ +FCS_HAL_INT hal_aes_streaming_update(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends AES encryption/decryption final request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the AES + * encryption/decryption final stage + */ +FCS_HAL_INT hal_aes_streaming_final(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends the init command for the ECDSA sha2 data signing request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the ECDSA + * data signing operation. + */ +FCS_HAL_INT +hal_ecdsa_data_sign_streaming_init(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends data update for ECDSA sha2 data signing request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the ECDSA + * data signing operation. + */ +FCS_HAL_INT +hal_ecdsa_data_sign_streaming_update(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends ECDSA sha2 data signing final request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the ECDSA + * data signing operation. + */ +FCS_HAL_INT +hal_ecdsa_data_sign_streaming_final(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends the init command for the ECDSA sha2 data verification request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of ECDSA + * data verification operation: init stage. + */ +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_init(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends data update for ECDSA sha2 data verification request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the ECDSA + * data verification operation: init stage. + */ +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_init(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends data update for ECDSA sha2 data verification request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the ECDSA + * data verification operation: update stage. + */ +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_update(struct fcs_cmd_context *const k_ctx); + +/** + * @brief API sends ECDSA sha2 data verification final request + * + * @param ctx A pointer to the command context structure. + * @return An integer indicating the success or failure of the ECDSA + * data verification operation: final stage. + */ +FCS_HAL_INT +hal_ecdsa_data_verify_streaming_final(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Initializes the digest generation operation: Init stage. + * + * This function initializes the digest operation for the given command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the initialization. + */ +FCS_HAL_INT hal_digest_streaming_init(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Updates the digest operation with new data. + * + * This function updates the digest operation with the provided data for the + * given command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the update. + */ +FCS_HAL_INT hal_digest_streaming_update(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Finalizes the digest operation. + * + * This function finalizes the digest operation and retrieves the final digest + * for the given command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the finalization. + */ +FCS_HAL_INT hal_digest_streaming_final(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Computes the digest for the given command context. + * + * This function calculates the digest based on the provided command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the digest computation. + */ +FCS_HAL_INT hal_get_digest(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Initializes the MAC verification operation: Init stage. + * + * This function initializes the MAC verification operation for the given + * command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the initialization. + */ +FCS_HAL_INT hal_mac_verify_streaming_init(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Updates the MAC verification operation: Update stage. + * + * This function updates the MAC verification operation with the provided data + * for the given command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the update. + */ +FCS_HAL_INT +hal_mac_verify_streaming_update(struct fcs_cmd_context *const k_ctx); + +/** + * @brief Finalizes the MAC verification operation: Final stage. + * + * This function finalizes the MAC verification operation and retrieves the + * final result for the given command context. + * + * @param k_ctx Pointer to the command context structure. + * @return FCS_HAL_INT Result of the finalization. + */ +FCS_HAL_INT hal_mac_verify_streaming_final(struct fcs_cmd_context *const k_ctx); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* SPCFPGA_FCS_HAL_H */ diff --git a/include/misc/socfpga_fcs_types.h b/include/misc/socfpga_fcs_types.h new file mode 100644 index 0000000000000..48ad6998e7f2d --- /dev/null +++ b/include/misc/socfpga_fcs_types.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ +/* + * Copyright (C) 2025 Altera + */ + +#ifndef SOCFPGA_FCS_TYPES_H +#define SOCFPGA_FCS_TYPES_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include +#include +#include + +#define LOG_ERR(fmt, ...) pr_err(fmt, ##__VA_ARGS__) +#define LOG_DBG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__) +#define LOG_INF(fmt, ...) pr_info(fmt, ##__VA_ARGS__) +#define LOG_WRN(fmt, ...) pr_warn(fmt, ##__VA_ARGS__) + +#define FCS_REQUEST_TIMEOUT (msecs_to_jiffies(SVC_FCS_REQUEST_TIMEOUT_MS)) +#define FCS_COMPLETED_TIMEOUT (msecs_to_jiffies(SVC_COMPLETED_TIMEOUT_MS)) + +#define FCS_DMA_FROM_DEVICE DMA_FROM_DEVICE +#define FCS_DMA_TO_DEVICE DMA_TO_DEVICE + +#define FCS_AES_BLOCK_MODE_ECB 0 +#define FCS_AES_BLOCK_MODE_CBC 1 +#define FCS_AES_BLOCK_MODE_CTR 2 +#define FCS_AES_BLOCK_MODE_GCM 3 +#define FCS_AES_BLOCK_MODE_GHASH 4 +#define FCS_MAX_AES_CRYPT_MODE 5 +#define FCS_AES_GCM_TAG_SIZE 3 +#define FCS_AES_IV_SOURCE_EXTERNAL 0 +#define FCS_AES_IV_SOURCE_INTERNAL 1 +#define FCS_AES_ENCRYPT 0 +#define FCS_AES_DECRYPT 1 + +#define FCS_ECC_CURVE_NIST_P256 1 +#define FCS_ECC_CURVE_NIST_P384 2 +#define FCS_ECC_CURVE_BRAINPOOL_P256 3 +#define FCS_ECC_CURVE_BRAINPOOL_P384 4 + +#define FCS_ECC_CURVE_MASK 0xF + +#define FCS_ECDH_P256_PUBKEY_LEN 64 +#define FCS_ECDH_P384_PUBKEY_LEN 96 +#define FCS_ECDH_BP256_PUBKEY_LEN 64 +#define FCS_ECDH_BP384_PUBKEY_LEN 96 +#define FCS_ECDH_P256_SECRET_LEN 32 +#define FCS_ECDH_P384_SECRET_LEN 48 +#define FCS_ECDH_BP256_SECRET_LEN 32 +#define FCS_ECDH_BP384_SECRET_LEN 48 + +/** unsigned 64 bit*/ +typedef u64 FCS_HAL_U64; +/** unsigned 32 bit*/ +typedef u32 FCS_HAL_U32; +/** unsigned 16 bit*/ +typedef u16 FCS_HAL_U16; +/** unsigned 8 bit*/ +typedef u8 FCS_HAL_U8; + +/** signed 64 bit*/ +typedef s64 FCS_HAL_S64; +/** signed 32 bit*/ +typedef s32 FCS_HAL_S32; +/** unsigned 16 bit*/ +typedef s16 FCS_HAL_S16; +/** unsigned 8 bit*/ +typedef s8 FCS_HAL_S8; + +/** void type*/ +typedef void FCS_HAL_VOID; +/** character data type*/ +typedef char FCS_HAL_CHAR; +/** boolean data type*/ +typedef bool FCS_HAL_BOOL; + +/** integer data type*/ +typedef int FCS_HAL_INT; +/** integer data type*/ +typedef unsigned int FCS_HAL_UINT; +/** data type to denote offset */ +typedef off_t FCS_HAL_OFFSET; +/** data type to denote size*/ +typedef size_t FCS_HAL_SIZE; + +/** integer data type uuid for session ids*/ +typedef uuid_t FCS_HAL_UUID; + +/** Unsigned long */ +typedef unsigned long FCS_HAL_ULONG; + +// TODO: which data type +typedef int FCS_HAL_ERROR; + +typedef struct completion FCS_HAL_COMPLETION; +typedef struct mutex FCS_HAL_MUTEX; + +typedef struct device FCS_HAL_DEV; + +typedef struct stratix10_svc_client_msg FCS_SVC_CLIENT_MSG; +typedef struct stratix10_svc_client FCS_SVC_CLIENT; +typedef struct stratix10_svc_cb_data FCS_SVC_CB_DATA; +typedef struct stratix10_svc_chan FCS_HAL_CHAN; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* SOCFPGA_FCS_TYPES_H */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index 42ec5ddaab8dc..b007f3e75850e 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -156,6 +156,7 @@ struct xdp_desc { __u64 addr; __u32 len; __u32 options; + __u64 txtime; }; /* UMEM descriptor is __u64 */ diff --git a/include/uapi/linux/intel_fcs-ioctl.h b/include/uapi/linux/intel_fcs-ioctl.h new file mode 100644 index 0000000000000..84e1f277a9424 --- /dev/null +++ b/include/uapi/linux/intel_fcs-ioctl.h @@ -0,0 +1,711 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020, Intel Corporation + */ + +#ifndef __INTEL_FCS_IOCTL_H +#define __INTEL_FCS_IOCTL_H + +#include + +/* the value may need be changed when upstream */ +#define INTEL_FCS_IOCTL 0xC0 + +/* define macro to be used to fix the size of struct intel_fcs_dev_ioctl */ +#define INTEL_FCS_IOCTL_MAX_SZ 256U +/* the header include the 8 bytes stucture padding and 4 bytes status */ +#define INTEL_FCS_IOCTL_HEADER_SZ 16U +#define INTEL_FCS_IOCTL_PLACEHOLDER_SZ (INTEL_FCS_IOCTL_MAX_SZ - \ + INTEL_FCS_IOCTL_HEADER_SZ) / 4 + +/** + * enum fcs_vab_img_type - enumeration of image types + * @INTEL_FCS_IMAGE_HPS: Image to validate is HPS image + * @INTEL_FCS_IMAGE_BITSTREAM: Image to validate is bitstream + */ +enum fcs_vab_img_type { + INTEL_FCS_IMAGE_HPS = 0, + INTEL_FCS_IMAGE_BITSTREAM = 1 +}; + +/** + * enum fcs_certificate_test - enumeration of certificate test + * @INTEL_FCS_NO_TEST: Write to eFuses + * @INTEL_FCS_TEST: Write to cache, do not write eFuses + */ +enum fcs_certificate_test { + INTEL_FCS_NO_TEST = 0, + INTEL_FCS_TEST = 1 +}; + +/** + * struct fcs_mbox_send_cmd - send generic mailbox command + * @mbox_cmd: mailbox command code + * @urgent: 0 for CASUAL, 1 for URGENT + * @cmd_data: virtual address of mailbox command data + * @cmd_data_sz: size of mailbox command data in bytes + * @rsp_data: virtual address to store response data + * @rsp_data_sz: maximun size to store response data in bytes + */ +struct fcs_mbox_send_cmd { + uint32_t mbox_cmd; + uint8_t urgent; + void *cmd_data; + uint16_t cmd_data_sz; + void *rsp_data; + uint16_t rsp_data_sz; +}; + +/** + * struct fcs_placeholder - placeholder of ioctl stuct + * @data: placeholder of iotcl struct + */ +struct fcs_placeholder { + uint32_t data[INTEL_FCS_IOCTL_PLACEHOLDER_SZ]; +}; + +/** + * struct intel_fcs_cert_test_word - certificate test word + * @test_word: if set, do not write fuses, write to cache only. + */ +struct intel_fcs_cert_test_word { + uint32_t test_word; +}; + +/** + * struct fcs_validation_request - validate HPS or bitstream image + * @so_type: the type of signed object, 0 for HPS and 1 for bitstream + * @src: the source of signed object, + * for HPS, this is the virtual address of the signed source + * for Bitstream, this is path of the signed source, the default + * path is /lib/firmware + * @size: the size of the signed object + */ +struct fcs_validation_request { + enum fcs_vab_img_type so_type; + void *src; + uint32_t size; +}; + +/** + * struct fcs_key_manage_request - Request key management from SDM + * @addr: the virtual address of the signed object, + * @size: the size of the signed object + */ +struct fcs_key_manage_request { + void *addr; + uint32_t size; +}; + +/** + * struct fcs_certificate_request - Certificate request to SDM + * @test: test bit (1 if want to write to cache instead of fuses) + * @addr: the virtual address of the signed object, + * @size: the size of the signed object + * @c_status: returned certificate status + */ +struct fcs_certificate_request { + struct intel_fcs_cert_test_word test; + void *addr; + uint32_t size; + uint32_t c_status; +}; + +/** + * struct fcs_single_certificate_request - Single certificate to SDM + * @test: test bit (1 if want to write to cache instead of fuses) + * @counter_type: select the counter type with valid value from 1 to 5 + * @counter_value: counter value + */ +struct fcs_single_certificate_request { + struct intel_fcs_cert_test_word test; + uint8_t counter_type; + uint32_t counter_value; +}; + +/** + * struct fcs_data_encryption - aes data encryption command layout + * @src: the virtual address of the input data + * @src_size: the size of the unencrypted source + * @dst: the virtual address of the output data + * @dst_size: the size of the encrypted result + */ +struct fcs_data_encryption { + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; +}; + +/** + * struct fcs_data_decryption - aes data decryption command layout + * @src: the virtual address of the input data + * @src_size: the size of the encrypted source + * @dst: the virtual address of the output data + * @dst_size: the size of the decrypted result + */ +struct fcs_data_decryption { + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; +}; + +/** + * struct fcs_random_number_gen + * @rndm: 8 words of random data. + */ +struct fcs_random_number_gen { + uint32_t rndm[8]; +}; + +/** + * struct fcs_psgsigma_teardown + * @teardown + * @sid: the session ID + */ +struct fcs_psgsigma_teardown { + bool teardown; + uint32_t sid; +}; + +/** + * struct fcs_attestation_chipid + * @chip_id_low: device chip ID lower 32 + * @chip_id_high: device chip ID high 32 + */ +struct fcs_attestation_chipid { + uint32_t chip_id_low; + uint32_t chip_id_high; +}; + +/** + * struct intel_fcs_attestation_resv_word - attestation reserve word + * @resv_word: a reserve word required by firmware + */ +struct intel_fcs_attestation_resv_word { + uint32_t resv_word; +}; + +/** + * struct fcs_attestation_subkey + * @resv: reserve word + * @cmd_data: command data + * @cmd_data_sz: command data size + * @rsp_data: response data + * @rsp_data_sz: response data size + */ +struct fcs_attestation_subkey { + struct intel_fcs_attestation_resv_word resv; + char *cmd_data; + uint32_t cmd_data_sz; + char *rsp_data; + uint32_t rsp_data_sz; +}; + +/** + * struct fcs_attestation_measuerments + * @resv: reserve word + * @cmd_data: command data + * @cmd_data_sz: command data size + * @rsp_data: response data + * @rsp_data_sz: response data size + */ +struct fcs_attestation_measuerments { + struct intel_fcs_attestation_resv_word resv; + char *cmd_data; + uint32_t cmd_data_sz; + char *rsp_data; + uint32_t rsp_data_sz; +}; + +/** + * struct fcs_attestation_certificate + * @c_request: certificate request + * @rsp_data: response data of the request certificate + * @rsp_data_sz: size of response data of the request certificate + */ +struct fcs_attestation_certificate { + int c_request; + char *rsp_data; + uint32_t rsp_data_sz; +}; + +/** + * fcs_attestation_certificate_reload + * @c_request: certificate request + */ +struct fcs_attestation_certificate_reload { + int c_request; +}; + +/** + * struct fcs_rom_patch_sha384 + * @checksum: 12 words of checksum calculated from rom patch area + */ +struct fcs_rom_patch_sha384 { + uint32_t checksum[12]; +}; + +/** + * struct fcs_crypto_service_session + * @sid: the crypto service session ID + */ +struct fcs_crypto_service_session { + uint32_t sid; +}; + +struct fcs_crypto_key_header { + uint32_t sid; + uint32_t res1; + uint32_t res2; +}; + +struct fcs_crypto_key_import { + struct fcs_crypto_key_header hd; + char *obj_data; + uint32_t obj_data_sz; +}; + +struct fcs_crypto_key_object { + uint32_t sid; + uint32_t kid; + char *obj_data; + uint32_t obj_data_sz; +}; + +/** + * struct fcs_acs_crypt_parameter + * @bmode: block mode + * @aes_mode: encrypt or decrypt + * 0 encrypt + * 1 decrypt + * @resv: reserved + * @iv: 128-bit IV field + */ +struct fcs_acs_crypt_parameter { + char bmode; + char aes_mode; + char resv[10]; + char iv_field[16]; +}; + +/** + * struct fcs_aes_crypt + * @sid: session ID + * @cid: context ID + * @kuid: key UID + * @src: source + * @src_size: size of source + * @dst: destination + * @dst_size: size of destination + * @cpara: crypto parameter + */ +struct fcs_aes_crypt { + uint32_t sid; + uint32_t cid; + uint32_t kuid; + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; + int cpara_size; + struct fcs_acs_crypt_parameter cpara; + bool init; + uint32_t buffer_offset; +}; + +/** + * struct fcs_sha2_mac_data + * @sid: session ID + * @cid: context ID + * @kuid: key UID + * @src: source + * @src_size: size of source + * @dst: destination + * @dst_size: size of destination + * @sha_op_mode: SHA operating mode + * @sha_digest_sz: SHA digest size + */ +struct fcs_sha2_mac_data { + uint32_t sid; + uint32_t cid; + uint32_t kuid; + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; + int sha_op_mode; + int sha_digest_sz; + uint32_t userdata_sz; + bool init; +}; + +/** + * struct fcs_ecdsa_data + * @sid: session ID + * @cid: context ID + * @kuid: key UID + * @src: source + * @src_size: size of source + * @dst: destination + * @dst_size: size of destination + * @ecc_algorithm: ECC algorithm + */ +struct fcs_ecdsa_data { + uint32_t sid; + uint32_t cid; + uint32_t kuid; + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; + int ecc_algorithm; + bool init; +}; + +/** + * struct fcs_ecdsa_sha2_data + * @sid: session ID + * @cid: context ID + * @kuid: key UID + * @src: pointer of source + * @src_size: size of source + * @dst: pointer of destination + * @dst_size: size of destination + * @ecc_algorithm: ECC algorithm + * @userdata_sz: size of user data + */ +struct fcs_ecdsa_sha2_data { + uint32_t sid; + uint32_t cid; + uint32_t kuid; + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; + int ecc_algorithm; + uint32_t userdata_sz; + bool init; +}; + +/** + * struct fcs_random_number_gen_ext + * @sid: session ID + * @cid: context ID + * @rng_data: random data + * @rng_sz: size of random data + */ +struct fcs_random_number_gen_ext { + uint32_t sid; + uint32_t cid; + void *rng_data; + uint32_t rng_sz; +}; + +/** + * struct fcs_sdos_data_ext - SDOS encryption/decryption + * @sid: session ID + * @cid: context ID + * @op_mode: SDOS operation mode + * 1 encryption + * 0 decryption + * @oid1: owner ID word 1, valid for date decryption only + * @oid2: owner ID word 2, valid for date decryption only + * @src: the virtual address of the input data + * @src_size: the size of the input data + * @dst: the virtual address of the output data + * dst_size: the size of the output data + */ +struct fcs_sdos_data_ext { + uint32_t sid; + uint32_t cid; + int op_mode; + void *src; + uint32_t src_size; + void *dst; + uint32_t dst_size; +}; + +/** + * struct intel_fcs_dev_ioctl: common structure passed to Linux + * kernel driver for all commands. + * @status: Used for the return code. + * -1 -- operation is not started + * 0 -- operation is successfully completed + * non-zero -- operation failed + * @s_request: Validation of a bitstream. + * @c_request: Certificate request. + * hps_vab: validation of an HPS image + * counter set: burn fuses for new counter values + * @gp_data: view the eFuse provisioning state. + * @d_encryption: AES encryption (SDOS) + * @d_decryption: AES decryption (SDOS) + * @rn_gen: random number generator result + * @sdos_data_ext: SDOS ext data + */ +struct intel_fcs_dev_ioctl { + /* used for return status code */ + int status; + + /* command parameters */ + union { + struct fcs_mbox_send_cmd mbox_send_cmd; + struct fcs_placeholder placeholder; + struct fcs_validation_request s_request; + struct fcs_certificate_request c_request; + struct fcs_single_certificate_request i_request; + struct fcs_key_manage_request gp_data; + struct fcs_data_encryption d_encryption; + struct fcs_data_decryption d_decryption; + struct fcs_random_number_gen rn_gen; + struct fcs_psgsigma_teardown tdown; + struct fcs_attestation_chipid c_id; + struct fcs_attestation_subkey subkey; + struct fcs_attestation_measuerments measurement; + struct fcs_attestation_certificate certificate; + struct fcs_attestation_certificate_reload c_reload; + struct fcs_rom_patch_sha384 sha384; + struct fcs_crypto_service_session s_session; + struct fcs_crypto_key_import k_import; + struct fcs_crypto_key_object k_object; + struct fcs_aes_crypt a_crypt; + struct fcs_sha2_mac_data s_mac_data; + struct fcs_ecdsa_data ecdsa_data; + struct fcs_ecdsa_sha2_data ecdsa_sha2_data; + struct fcs_random_number_gen_ext rn_gen_ext; + struct fcs_sdos_data_ext data_sdos_ext; + } com_paras; + + int mbox_status; +}; + +/** + * intel_fcs_command_code - support fpga crypto service commands + * + * Values are subject to change as a result of upstreaming. + * + * @INTEL_FCS_DEV_VERSION_CMD: + * + * @INTEL_FCS_DEV_MBOX_SEND_CMD: + * + * @INTEL_FCS_DEV_CERTIFICATE_CMD: + * + * @INTEL_FCS_DEV_VALIDATE_REQUEST_CMD: + * + * @INTEL_FCS_DEV_COUNTER_SET_CMD: + * + * @INTEL_FCS_DEV_COUNTER_SET_PREAUTHORIZED_CMD: + * + * @INTEL_FCS_DEV_GET_PROVISION_DATA_CMD: + * + * @INTEL_FCS_DEV_DATA_ENCRYPTION_CMD: + * + * @INTEL_FCS_DEV_DATA_DECRYPTION_CMD: + * + * @INTEL_FCS_DEV_RANDOM_NUMBER_GEN_CMD: + * + * @INTEL_FCS_DEV_GET_ROM_PATCH_SHA384_CMD: + */ +enum intel_fcs_command_code { + INTEL_FCS_DEV_COMMAND_NONE = 0, + INTEL_FCS_DEV_VERSION_CMD = 1, + INTEL_FCS_DEV_MBOX_SEND_CMD, + INTEL_FCS_DEV_CERTIFICATE_CMD = 0xB, + INTEL_FCS_DEV_VALIDATE_REQUEST_CMD = 0x78, + INTEL_FCS_DEV_COUNTER_SET_CMD, + INTEL_FCS_DEV_COUNTER_SET_PREAUTHORIZED_CMD, + INTEL_FCS_DEV_GET_PROVISION_DATA_CMD, + INTEL_FCS_DEV_DATA_ENCRYPTION_CMD = 0x7E, + INTEL_FCS_DEV_DATA_DECRYPTION_CMD, + INTEL_FCS_DEV_RANDOM_NUMBER_GEN_CMD, + INTEL_FCS_DEV_PSGSIGMA_TEARDOWN_CMD = 0x88, + INTEL_FCS_DEV_CHIP_ID_CMD, + INTEL_FCS_DEV_ATTESTATION_SUBKEY_CMD, + INTEL_FCS_DEV_ATTESTATION_MEASUREMENT_CMD, + INTEL_FCS_DEV_ATTESTATION_GET_CERTIFICATE_CMD, + INTEL_FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD_CMD, + INTEL_FCS_DEV_GET_ROM_PATCH_SHA384_CMD, + INTEL_FCS_DEV_CRYPTO_OPEN_SESSION_CMD = 0xA0, + INTEL_FCS_DEV_CRYPTO_CLOSE_SESSION_CMD, + INTEL_FCS_DEV_CRYPTO_IMPORT_KEY_CMD, + INTEL_FCS_DEV_CRYPTO_EXPORT_KEY_CMD, + INTEL_FCS_DEV_CRYPTO_REMOVE_KEY_CMD, + INTEL_FCS_DEV_CRYPTO_GET_KEY_INFO_CMD, + INTEL_FCS_DEV_CRYPTO_AES_CRYPT_CMD, + INTEL_FCS_DEV_CRYPTO_GET_DIGEST_CMD, + INTEL_FCS_DEV_CRYPTO_MAC_VERIFY_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_CMD, + INTEL_FCS_DEV_CRYPTO_ECDH_REQUEST_CMD, + INTEL_FCS_DEV_RANDOM_NUMBER_GEN_EXT_CMD, + INTEL_FCS_DEV_SDOS_DATA_EXT_CMD, + INTEL_FCS_DEV_CRYPTO_AES_CRYPT_SMMU_CMD, + INTEL_FCS_DEV_CRYPTO_GET_DIGEST_SMMU_CMD, + INTEL_FCS_DEV_CRYPTO_MAC_VERIFY_SMMU_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_SMMU_CMD, + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_SMMU_CMD, + INTEL_FCS_DEV_CHECK_SMMU_ENABLED_CMD, +}; + +#define INTEL_FCS_DEV_VERSION_REQUEST \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_VERSION_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_MBOX_SEND \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_MBOX_SEND_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_VALIDATION_REQUEST \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_VALIDATE_REQUEST_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_SEND_CERTIFICATE \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CERTIFICATE_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_COUNTER_SET_PREAUTHORIZED \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_COUNTER_SET_PREAUTHORIZED_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_GET_PROVISION_DATA \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_GET_PROVISION_DATA_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_DATA_ENCRYPTION \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_DATA_ENCRYPTION_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_DATA_DECRYPTION \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_DATA_DECRYPTION_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_RANDOM_NUMBER_GEN \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_RANDOM_NUMBER_GEN_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_PSGSIGMA_TEARDOWN \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_PSGSIGMA_TEARDOWN_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CHIP_ID \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CHIP_ID_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_ATTESTATION_SUBKEY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_ATTESTATION_SUBKEY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_ATTESTATION_MEASUREMENT \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_ATTESTATION_MEASUREMENT_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_ATTESTATION_GET_CERTIFICATE \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_ATTESTATION_GET_CERTIFICATE_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_ATTESTATION_CERTIFICATE_RELOAD_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_GET_ROM_PATCH_SHA384 \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_GET_ROM_PATCH_SHA384_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_OPEN_SESSION \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_OPEN_SESSION_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_CLOSE_SESSION \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_CLOSE_SESSION_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_IMPORT_KEY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_IMPORT_KEY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_EXPORT_KEY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_EXPORT_KEY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_REMOVE_KEY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_REMOVE_KEY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_GET_KEY_INFO \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_GET_KEY_INFO_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_AES_CRYPT \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_AES_CRYPT_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_GET_DIGEST \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_GET_DIGEST_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_MAC_VERIFY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_MAC_VERIFY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_SIGNING_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_HASH_VERIFY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_GET_PUBLIC_KEY_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDH_REQUEST \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDH_REQUEST_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_RANDOM_NUMBER_GEN_EXT \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_RANDOM_NUMBER_GEN_EXT_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_SDOS_DATA_EXT \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_SDOS_DATA_EXT_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CHECK_SMMU_ENABLED \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CHECK_SMMU_ENABLED_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_AES_CRYPT_SMMU \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_AES_CRYPT_SMMU_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_GET_DIGEST_SMMU \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_GET_DIGEST_SMMU_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_MAC_VERIFY_SMMU \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_MAC_VERIFY_SMMU_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_SMMU \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_SIGNING_SMMU_CMD, struct intel_fcs_dev_ioctl) + +#define INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_SMMU \ + _IOWR(INTEL_FCS_IOCTL, \ + INTEL_FCS_DEV_CRYPTO_ECDSA_SHA2_DATA_VERIFY_SMMU_CMD, struct intel_fcs_dev_ioctl) + +#endif +