diff --git a/.github/workflows/build-kernel-arm64.yml b/.github/workflows/build-kernel-arm64.yml new file mode 100644 index 0000000000000..5c722bf29b282 --- /dev/null +++ b/.github/workflows/build-kernel-arm64.yml @@ -0,0 +1,29 @@ +name: build kernel arm64 +on: + push: + pull_request: + workflow_dispatch: + +env: + KBUILD_BUILD_USER: deepin-kernel-sig + KBUILD_BUILD_HOST: deepin-kernel-builder + email: support@deepin.org + +permissions: + pull-requests: read + +jobs: + build-kernel: + runs-on: [self-hosted, linux, ARM64] + steps: + - uses: actions/checkout@v3 + - name: "Install Deps" + run: | + git config --global user.email $email + git config --global user.name $KBUILD_BUILD_USER + + - name: "Compile kernel" + run: | + # .config + make deepin_arm64_desktop_defconfig + make -j$(nproc) diff --git a/.github/workflows/build-kernel.yml b/.github/workflows/build-kernel.yml new file mode 100644 index 0000000000000..69d0aabbed15d --- /dev/null +++ b/.github/workflows/build-kernel.yml @@ -0,0 +1,30 @@ +name: build kernel +on: + push: + pull_request: + workflow_dispatch: + +env: + KBUILD_BUILD_USER: deepin-kernel-sig + KBUILD_BUILD_HOST: deepin-kernel-builder + email: support@deepin.org + +permissions: + pull-requests: read + +jobs: + build-kernel: + runs-on: [self-hosted, linux, x64] + steps: + - uses: actions/checkout@v3 + - name: "Install Deps" + run: | + git config --global user.email $email + git config --global user.name $KBUILD_BUILD_USER + + - name: "Compile kernel" + run: | + # .config + make deepin_x86_desktop_defconfig + make -j$(nproc) + diff --git a/.github/workflows/check-patches.yml b/.github/workflows/check-patches.yml index 4549b77fb72c1..b9f2d7b2d7a03 100644 --- a/.github/workflows/check-patches.yml +++ b/.github/workflows/check-patches.yml @@ -26,7 +26,6 @@ jobs: repo = "${{ github.event.repository.name }}" pull_number = "${{ github.event.number }}" commit_id = "${{ github.event.pull_request.head.sha }}" - access_token = os.environ.get("GITHUB_TOKEN") # 获取检查脚本 check_url = 'https://raw.githubusercontent.com/deepin-community/kernel/linux-6.6.y/scripts/checkpatch.pl' @@ -50,7 +49,10 @@ jobs: # 获取文件列表 url = f'https://api.github.com/repos/{owner}/{repo}/pulls/{pull_number}/files' print(url) - headers = {'Authorization': f'Bearer {access_token}'} + headers = { + "Accept": "application/vnd.github+json", + "Authorization":"Bearer " + os.environ.get("GITHUB_TOKEN") + } response = requests.get(url, headers=headers) files = response.json() @@ -97,14 +99,15 @@ jobs: line_number = match.group().split(' ')[-1] line_number = line_number.replace(':', '') body = r.split('\n')[0] - comment_json = { - "body": body, - "commit_id": commit_id, - "path": path, - "line": int(line_number), - "side": "RIGHT" - } - - comment_url = f"https://api.github.com/repos/{owner}/{repo}/pulls/{pull_number}/comments" - response = requests.post(comment_url, json=comment_json, headers=headers) - print(response.json()) + if "It's generally not useful to have the filename in the file" not in body: + comment_json = { + "body": body, + "commit_id": commit_id, + "path": path, + "line": int(line_number), + "side": "RIGHT" + } + + comment_url = f"https://api.github.com/repos/{owner}/{repo}/pulls/{pull_number}/comments" + response = requests.post(comment_url, json=comment_json, headers=headers) + print(response.json()) diff --git a/.github/workflows/package-kernel-amd64-daily.yml b/.github/workflows/package-kernel-amd64-daily.yml new file mode 100644 index 0000000000000..5902521bfe633 --- /dev/null +++ b/.github/workflows/package-kernel-amd64-daily.yml @@ -0,0 +1,34 @@ +name: package kernel amd64 daily +on: + schedule: + - cron: "0 2 * * *" + +env: + KBUILD_BUILD_USER: deepin-kernel-sig + KBUILD_BUILD_HOST: deepin-kernel-builder + email: support@deepin.org + +permissions: + pull-requests: read + +jobs: + build-kernel: + runs-on: [self-hosted, linux, x64] + steps: + - uses: actions/checkout@v3 + - name: "Install Deps" + run: | + git config --global user.email $email + git config --global user.name $KBUILD_BUILD_USER + + - name: "Compile kernel" + run: | + # .config + make deepin_x86_desktop_defconfig + make bindeb-pkg -j$(nproc) + + - name: 'Upload Kernel Artifact' + uses: actions/upload-artifact@v3 + with: + name: kernel-amd64-deb + path: "../*.deb" diff --git a/Documentation/devicetree/bindings/dma/phytium-ddma.yaml b/Documentation/devicetree/bindings/dma/phytium-ddma.yaml new file mode 100644 index 0000000000000..098dbd867a199 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/phytium-ddma.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +title: Phytium DDMA Controller bindings + +description: +The Phytium DDMA is a general-purpose direct memory access +controller capable of supporting 8 independent DMA channels. +Each channel can have up to 32 requests.DMA clients connected +to the Phytium DDMA controller must use the format described +in the dma.txt file, using a two-cell specifier for each +channel: + a phandle to the DMA controller plus the following two integer cells: + 1. The channel id + 2. The request line number + +maintainers: + - Huang Jie + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + "#dma-cells": + const: 2 + + compatible: + const: phytium,ddma + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + dma-channels: + minItems: 1 + maxItems: 8 + description: it indicates that the number of channels are used + +required: + - compatible + - reg + - interrupts + - dma-channels + +unevaluatedProperties: false + +examples: + ddma0: ddma@28003000 { + compatible = "phytium,ddma"; + reg = <0x0 0x28003000 0x0 0x1000>; + interrupts = ; + #dma-cells = <2>; + dma-channels = <8>; + }; +... diff --git a/Documentation/devicetree/bindings/gpio/phytium,gpio.yaml b/Documentation/devicetree/bindings/gpio/phytium,gpio.yaml new file mode 100644 index 0000000000000..4a2d586d18803 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/phytium,gpio.yaml @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/phytium,gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium GPIO controller + +description: | + Phytium GPIO controllers have one or two configurable ports, each of which + are intended to be represented as child nodes with the generic GPIO-controller + properties as desribed in this bindings file. + +maintainers: + - Chen Baozi + +properties: + $nodename: + pattern: "^gpio@[0-9a-f]+$" + + compatible: + const: phytium,gpio + + reg: + maxItems: 1 + + gpio-controller: true + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + '#gpio-cells': + const: 2 + + interrupts: + description: | + The interrupts to the parent controller raised when GPIOs generate + the interrupts. If the controller provides one combined interrupt + for all GPIOs, specify a single interrupt. If the controller provides + one interrupt for each GPIO, provide a list of interrupts that + correspond to each of the GPIO pins. + minItems: 1 + maxItems: 32 + + interrupt-controller: true + + '#interrupt-cells': + const: 2 + +patternProperties: + "^gpio-(port|controller)@[0-9a-f]+$": + type: object + properties: + compatible: + const: phytium,gpio-port + + reg: + maxItems: 1 + + nr-gpios: + $ref: /schemas/types.yaml#/definitions/uint32 + description: The number of GPIO pins exported by the port. + default: 32 + minimum: 1 + maximum: 32 + + required: + - compatible + - reg + - gpio-controller + - '#gpio-cells' + + dependencies: + interrupt-controller: [ interrupts ] + + additionalProperties: false + +additionalProperties: false + +required: + - compatible + - reg + - "#address-cells" + - "#size-cells" + +examples: + - | + gpio: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <2>; + + porta: gpio-port@0 { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb: gpio-port@1 { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; +}; +... diff --git a/Documentation/devicetree/bindings/hwmon/phytium,tacho.yaml b/Documentation/devicetree/bindings/hwmon/phytium,tacho.yaml new file mode 100644 index 0000000000000..d2443e023a6bb --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/phytium,tacho.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwmon/phytium,tacho.yaml +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Fan Tacho and capture counter controller device driver + +maintainers: + - Chen Baozi + +description: | + The controller can support one input signal. The function of controller is to + measure the speed of fan and the edge number of input signal. The function + can be selected by devicetree setting. The edging mode and anti-jitter level + can also setted in devicetree. + +properties: + compatible: + const: phytium,tacho + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 1 + + tacho: + $ref: /schemas/types.yaml#/definitions/flag + description: + set the controller work as fan tachometer, which is a default option. + + capture: + $ref: /schemas/types.yaml#/definitions/flag + description: + set the controller work as capture counter. + + up: + $ref: /schemas/types.yaml#/definitions/flag + description: + set the input edging mode as ascending, which is a default option. + + down: + $ref: /schemas/types.yaml#/definitions/flag + description: + set the input edging mode as descending. + + double: + $ref: /schemas/types.yaml#/definitions/flag + description: + set the input edging mode as doule-edging. + + debounce-level: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2, 3] + +required: + - compatible + - reg + - clocks + - '#address-cells' + - '#size-cells' + +examples: + - | + tacho: tacho@28054000 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0x0 0x28054000 0x0 0x1000>; + compatible = "phytium,tacho"; + clocks = <&sysclk>; + tacho; + up; + debounce-level = <2>; + }; diff --git a/Documentation/devicetree/bindings/mailbox/phytium,mbox.yaml b/Documentation/devicetree/bindings/mailbox/phytium,mbox.yaml new file mode 100644 index 0000000000000..d875b08b55320 --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/phytium,mbox.yaml @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mailbox/phytium,mbox.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Mailbox Driver + +maintainers: + - Chen Baozi + +description: | + The Phytium mailbox controller that has a channel/link to communicate + with the remote end. A link raises interrupt for any received data. However, + there is no specified way of knowing if the sent data has been read by the + remote. This driver assumes the sender polls STAT register and the remote + clears it after having read the data. + +properties: + compatible: + const: phytium,mbox + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + '#mbox-cells': + const: 1 + description: the index of the channel needed + +required: + - compatible + - reg + - interrupts + - '#mbox-cells' + +additionalProperties: false + +examples: + - | + mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + #mbox-cells = <1>; + interrupts = <0 48 4>; + }; diff --git a/Documentation/devicetree/bindings/mmc/phytium,mci.yaml b/Documentation/devicetree/bindings/mmc/phytium,mci.yaml new file mode 100644 index 0000000000000..a0748c70af37b --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/phytium,mci.yaml @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mmc/phytium,mci.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Multimedia Card Interface controller + +description: | + The highspeed MMC host controller on Phytium SoCs provides an interface + for MMC, SD and SDIO types of memory cards. + +maintainers: + - Chen Baozi + +allOf: + - $ref: "mmc-controller.yaml" + +properties: + compatible: + const: phytium,mci + + reg: + maxItems: 1 + description: mmc controller base registers. + + interrupts: + maxItems: 1 + description: mmc controller interrupt. + + clocks: + maxItems: 1 + description: phandles to input clocks. + + clock-names: + items: + - const: phytium_mci_clk + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +examples: + - | + mmc0: mmc@28000000 { + compatible = "phytium,mci"; + reg = <0x0 0x28000000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_1200mhz>; + clock-names = "phytium_mci_clk"; + status = "disabled"; + }; + + &mmc0 { + bus-width = <4>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + no-mmc; + status = "ok"; + }; diff --git a/Documentation/devicetree/bindings/mmc/phytium,sdci.yaml b/Documentation/devicetree/bindings/mmc/phytium,sdci.yaml new file mode 100644 index 0000000000000..9c56e5fe01870 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/phytium,sdci.yaml @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mmc/phytium,sdci.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium SDCI Controller Binding + +maintainers: + - Chen Baozi + +allOf: + - $ref: mmc-controller.yaml# + +properties: + compatible: + enum: + - phytium,sdci + + reg: + maxItems: 1 + + interrupts: + minItems: 3 + maxItems: 3 + + clocks: + minItems: 1 + items: + - description: core clock + + clock-names: + minItems: 1 + items: + - const: phytium_sdc_clk + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +unevaluatedProperties: false + +examples: + - | + sdci: sdci@28207c00 { + compatible = "phytium,sdci"; + reg = <0x0 0x28207c00 0x0 0x100>; + interrupts = , + , + ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_sdc_clk"; + }; + +... diff --git a/Documentation/devicetree/bindings/mtd/phytium,nfc.yaml b/Documentation/devicetree/bindings/mtd/phytium,nfc.yaml new file mode 100644 index 0000000000000..4d3bac96d5846 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/phytium,nfc.yaml @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/phytium,nfc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Nand Flash controller + +maintainers: + - Chen Baozi + +properties: + compatible: + const: phytium,nfc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + nand-ecc-strength: + const: 8 + + nand-ecc-step-size: + const: 512 + +allOf: + - $ref: "nand-controller.yaml#" + +required: + - compatible + - reg + - interrupts + +examples: + - | + nand0: nand@28002000 { + compatible = "phytium,nfc"; + reg = <0x0 0x28002000 0x0 0x1000>; + interrupts = ; + }; diff --git a/Documentation/devicetree/bindings/net/can/phytium,can.yaml b/Documentation/devicetree/bindings/net/can/phytium,can.yaml new file mode 100644 index 0000000000000..dd4b8000830ca --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/phytium,can.yaml @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/can/phytium,can.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: + Phytium CAN controller + +maintainers: + - Chen Baozi + +properties: + compatible: + enum: + - phytium,can + - phytium,canfd + + reg: + maxItems: 1 + description: Should contain the controller registers location and length + + interrupts: + maxItems: 1 + description: Should contain IRQ line for the controller + + clocks: + maxItems: 1 + description: Input clock used by the controller + + clock-names: + maxItems: 1 + description: Input clock name, should be "can_clk" + const: can_clk + + tx-fifo-depth: + $ref: "/schemas/types.yaml#/definitions/uint32" + description: Indicates the length of TX FIFO + + rx-fifo-depth: + $ref: "/schemas/types.yaml#/definitions/uint32" + description: Indicates the length of RX FIFO + + extend_brp: + description: | + Indicates to apply the extend BRP parameter of bit timming for + early version of CAN controller + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - tx-fifo-depth + - rx-fifo-depth + +allOf: + - $ref: can-controller.yaml# + +examples: + - | + + can0: can@2800a000 { + compatible = "phytium,canfd"; + reg = <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + }; diff --git a/Documentation/devicetree/bindings/rng/phytium,rng.yaml b/Documentation/devicetree/bindings/rng/phytium,rng.yaml new file mode 100644 index 0000000000000..e32fc39ea86c0 --- /dev/null +++ b/Documentation/devicetree/bindings/rng/phytium,rng.yaml @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/phytium,rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Random Number Generator + +maintainers: + - Chen Baozi + +properties: + compatible: + const: phytium,rng + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + rng@0x31a06000 { + compatible = "phytium,rng"; + reg = <0x0 0x31a06000 0x0 0x1000>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 4d40d0cce6321..1c4e69e014e32 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20835,7 +20835,9 @@ ARM/PHYTIUM SOC SUPPORT M: Wang Yinfeng S: Maintained W: https://gerrit.b.cpu.ac/c/linux +F: Documentation/devicetree/bindings/gpio/phytium,gpio.yaml F: arch/arm64/boot/dts/phytium/* +F: drivers/gpio/gpio-phytium* QAT DRIVER M: Giovanni Cabiddu diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index 2daab038ad148..2e9538807e285 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -1,5 +1,5 @@ CONFIG_COMPILE_TEST=y -CONFIG_BUILD_SALT="arm64-desktop-hwe" +CONFIG_BUILD_SALT="-arm64-desktop-hwe" CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_HOSTNAME="" CONFIG_SYSVIPC=y @@ -21,7 +21,6 @@ CONFIG_UCLAMP_TASK=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index 84434dd9da007..1928e203b576b 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -1,5 +1,5 @@ CONFIG_COMPILE_TEST=y -CONFIG_BUILD_SALT="-loongarch-desktop-hwe" +CONFIG_BUILD_SALT="-loong64-desktop-hwe" CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_HOSTNAME="" CONFIG_SYSVIPC=y @@ -60,6 +60,10 @@ CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y CONFIG_CPU_HAS_LBT=y CONFIG_RANDOMIZE_BASE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_HIBERNATION=y CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y @@ -70,10 +74,6 @@ CONFIG_ACPI_TAD=m CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_PCI_SLOT=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_ACPI_HED=y CONFIG_ACPI_CUSTOM_METHOD=y @@ -3628,7 +3628,6 @@ CONFIG_FB_ASILIANT=y CONFIG_FB_IMSTT=y CONFIG_FB_UVESA=m CONFIG_FB_EFI=y -CONFIG_FB_LS2K500=m CONFIG_FB_OPENCORES=m CONFIG_FB_S1D13XXX=m CONFIG_FB_RIVA=m @@ -3672,6 +3671,7 @@ CONFIG_FB_METRONOME=m CONFIG_FB_MB862XX=m CONFIG_FB_SSD1307=m CONFIG_FB_SM712=m +CONFIG_FB_LS2K500=m CONFIG_FIRMWARE_EDID=y CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_L4F00242T03=m @@ -5626,7 +5626,6 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y CONFIG_REISERFS_FS_XATTR=y @@ -5979,7 +5978,6 @@ CONFIG_FONT_6x8=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO_DWARF5=y -CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y CONFIG_STRIP_ASM_SYMS=y CONFIG_READABLE_ASM=y @@ -6019,6 +6017,6 @@ CONFIG_TRACE_EVENT_INJECT=y CONFIG_RV=y CONFIG_RV_MON_WIP=y CONFIG_RV_MON_WWNR=y -CONFIG_IO_STRICT_DEVMEM=y +# CONFIG_STRICT_DEVMEM is not set CONFIG_FUNCTION_ERROR_INJECTION=y # CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index ab1b723d13582..1ddcbd4950ab1 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -69,8 +69,10 @@ static int __init efimap_populate_hugepages( if (pud_none(*pud)) { void *p = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!p) + if (!p) { + pr_err("can not alloc efimap huge pages!\n"); return -1; + } pmd_init(p); pud_populate(&init_mm, pud, p); } @@ -88,7 +90,8 @@ static void __init efi_map_pgt(void) { unsigned long node; unsigned long start, end; - unsigned long start_pfn, end_pfn; + efi_memory_desc_t *md; + u32 mem_type; pgd_efi = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!pgd_efi) { @@ -105,13 +108,33 @@ static void __init efi_map_pgt(void) /* MMIO Registers, Uncached */ efimap_populate_hugepages(SZ_256M | (node << 44), SZ_512M | (node << 44), PAGE_KERNEL_SUC); + } - get_pfn_range_for_nid(node, &start_pfn, &end_pfn); - start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); - end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); - - /* System memory, Cached */ - efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + /* Parse memory information */ + for_each_efi_memory_desc(md) { + mem_type = md->type; + start = ALIGN_DOWN(md->phys_addr, PMD_SIZE); + end = ALIGN(start + (md->num_pages << EFI_PAGE_SHIFT), PMD_SIZE); + node = start >> 44; + + switch (mem_type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_PAL_CODE: + case EFI_UNUSABLE_MEMORY: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + break; + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL_SUC); + break; + } } } diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index cc5ca07bb08f1..a23f47e4815cc 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -256,7 +256,7 @@ early_param("rd_size", rd_size_early); #endif -static __init void fw_init_cmdline(unsigned long argc, unsigned long cmdp) +__init void fw_init_cmdline(unsigned long argc, unsigned long cmdp) { int i; char **_fw_argv; diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h index f69011a2f6bb5..0d0b428dd7f26 100644 --- a/arch/loongarch/kernel/legacy_boot.h +++ b/arch/loongarch/kernel/legacy_boot.h @@ -4,6 +4,7 @@ #include #include #include +#include #define ADDRESS_TYPE_SYSRAM 1 #define ADDRESS_TYPE_RESERVED 2 @@ -89,4 +90,6 @@ extern int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end); extern struct irq_domain *get_pchpic_irq_domain(void); + +extern __init void fw_init_cmdline(unsigned long argc, unsigned long cmdp); #endif diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index b5e2312a2fca5..121c3a8080e00 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -16,6 +16,7 @@ #include #include #include +#include "legacy_boot.h" #define RELOCATED(x) ((void *)((long)x + reloc_offset)) #define RELOCATED_KASLR(x) ((void *)((long)x + random_offset)) @@ -219,7 +220,10 @@ unsigned long __init relocate_kernel(void) void *location_new = _text; /* Default to original kernel start */ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ - strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + if (fw_arg0 < 2) + strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + else + fw_init_cmdline(fw_arg0, TO_CACHE(fw_arg1)); /* OLD BPI parameters */ #ifdef CONFIG_RANDOMIZE_BASE location_new = determine_relocation_address(); diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index b1a81edae3609..92155ac20cbf5 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -5197,6 +5197,7 @@ CONFIG_CRYPTO_DEV_ASPEED=m CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH=y CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO=y CONFIG_CRYPTO_DEV_ASPEED_ACRY=y +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_PKCS8_PRIVATE_KEY_PARSER=m CONFIG_PKCS7_TEST_KEY=m CONFIG_SIGNED_PE_FILE_VERIFICATION=y diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile index 642c1174d6626..767d6212bac16 100644 --- a/arch/x86/events/zhaoxin/Makefile +++ b/arch/x86/events/zhaoxin/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += core.o +obj-y += uncore.o diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 4bdfcf0912001..b6117c713a922 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -20,15 +20,14 @@ #include "../perf_event.h" /* - * Zhaoxin PerfMon, used on zxc and later. + * Zhaoxin PerfMon, used on Lujiazui and later. */ static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { - - [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515, - [PERF_COUNT_HW_CACHE_MISSES] = 0x051a, - [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x0029, }; static struct event_constraint zxc_event_constraints[] __read_mostly = { @@ -37,7 +36,7 @@ static struct event_constraint zxc_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static struct event_constraint zxd_event_constraints[] __read_mostly = { +static struct event_constraint wudaokou_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */ FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ @@ -45,7 +44,7 @@ static struct event_constraint zxd_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static __initconst const u64 zxd_hw_cache_event_ids +static __initconst const u64 wudaokou_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -149,7 +148,7 @@ static __initconst const u64 zxd_hw_cache_event_ids }, }; -static __initconst const u64 zxe_hw_cache_event_ids +static __initconst const u64 lujiazui_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -260,7 +259,9 @@ static void zhaoxin_pmu_disable_all(void) static void zhaoxin_pmu_enable_all(int added) { - wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); } static inline u64 zhaoxin_pmu_get_status(void) @@ -287,12 +288,31 @@ static inline void zxc_pmu_ack_status(u64 ack) zhaoxin_pmu_disable_all(); } -static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) +static inline void zhaoxin_set_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + if (event->attr.exclude_host) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + if (event->attr.exclude_guest) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); +} + +static inline void zhaoxin_clear_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); +} + +static void zhaoxin_pmu_disable_fixed(struct perf_event *event) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + struct hw_perf_event *hwc = &event->hw; u64 ctrl_val, mask; + int idx = hwc->idx; - mask = 0xfULL << (idx * 4); + mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); rdmsrq(hwc->config_base, ctrl_val); ctrl_val &= ~mask; @@ -302,19 +322,23 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) static void zhaoxin_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + zhaoxin_clear_masks(event, idx); if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - zhaoxin_pmu_disable_fixed(hwc); + zhaoxin_pmu_disable_fixed(event); return; } x86_pmu_disable_event(event); } -static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) +static void zhaoxin_pmu_enable_fixed(struct perf_event *event) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, bits, mask; + struct hw_perf_event *hwc = &event->hw; + u64 ctrl_val, mask, bits = 0; + int idx = hwc->idx; /* * Enable IRQ generation (0x8), @@ -327,6 +351,7 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) bits |= 0x1; + idx -= INTEL_PMC_IDX_FIXED; bits <<= (idx * 4); mask = 0xfULL << (idx * 4); @@ -339,9 +364,12 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) static void zhaoxin_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + zhaoxin_set_masks(event, idx); if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - zhaoxin_pmu_enable_fixed(hwc); + zhaoxin_pmu_enable_fixed(event); return; } @@ -456,6 +484,19 @@ static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } +static struct perf_guest_switch_msr *zhaoxin_guest_get_msrs(int *nr, void *data) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; + + arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; + arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; + arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + *nr = 1; + + return arr; +} + static const struct x86_pmu zhaoxin_pmu __initconst = { .name = "zhaoxin", .handle_irq = zhaoxin_pmu_handle_irq, @@ -471,13 +512,15 @@ static const struct x86_pmu zhaoxin_pmu __initconst = { .max_events = ARRAY_SIZE(zx_pmon_event_map), .apic = 1, /* - * For zxd/zxe, read/write operation for PMCx MSR is 48 bits. + * For wudaokou/lujiazui, read/write operation for PMCx MSR is 48 bits. */ .max_period = (1ULL << 47) - 1, .get_event_constraints = zhaoxin_get_event_constraints, .format_attrs = zx_arch_formats_attr, .events_sysfs_show = zhaoxin_event_sysfs_show, + + .guest_get_msrs = zhaoxin_guest_get_msrs, }; static const struct { int id; char *name; } zx_arch_events_map[] __initconst = { @@ -559,6 +602,8 @@ __init int zhaoxin_pmu_init(void) zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0; zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0; pr_cont("ZXC events, "); break; @@ -574,26 +619,50 @@ __init int zhaoxin_pmu_init(void) switch (boot_cpu_data.x86_model) { case 0x1b: - memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids, + memcpy(hw_cache_event_ids, wudaokou_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; - pr_cont("ZXD events, "); + pr_cont("Wudaokou events, "); break; case 0x3b: - memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + + pr_cont("Lujiazui events, "); + break; + case 0x5b: + case 0x6b: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, + .cmask = 0x01); + + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; + + if (boot_cpu_data.x86_model == 0x5b) + pr_cont("Yongfeng events, "); + + if (boot_cpu_data.x86_model == 0x6b) + pr_cont("Shijidadao events, "); - pr_cont("ZXE events, "); break; default: return -ENODEV; @@ -616,4 +685,3 @@ __init int zhaoxin_pmu_init(void) return 0; } - diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c new file mode 100644 index 0000000000000..50b0591b4eae1 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.c @@ -0,0 +1,2944 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include "uncore.h" + +static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; + +static bool pcidrv_registered; +static struct pci_driver *uncore_pci_driver; + +/* mask of cpus that collect uncore events */ +static cpumask_t uncore_cpu_mask; +static cpumask_t uncore_cpu_subnode_mask; +static cpumask_t uncore_cpu_cluster_mask; + +/* constraint for the fixed counter */ +static struct event_constraint uncore_constraint_fixed = + EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); + +static int max_packages, max_subnodes, max_clusters; +static int clusters_per_subnode; +static int subnodes_per_die; +static int dies_per_socket; + +#define KH40000_MAX_SUBNODE_NUMBER 8 +static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; + +/* get CPU topology register */ +#define BJ_GLOBAL_STATUS_MSR 0x1610 +#define BJ_HDW_CONFIG_MSR 0X1628 + +/* KX5000/KX6000 event control */ +#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff +#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EN (1 << 22) +#define KX5000_UNC_CTL_INVERT (1 << 23) +#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 +#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define KX5000_UNC_RAW_EVENT_MASK (KX5000_UNC_CTL_EV_SEL_MASK | \ + KX5000_UNC_CTL_UMASK_MASK | \ + KX5000_UNC_CTL_EDGE_DET | \ + KX5000_UNC_CTL_INVERT | \ + KX5000_UNC_CTL_CMASK_MASK) + +/* KX5000/KX6000 uncore global register */ +#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 +#define KX5000_UNC_FIXED_CTR 0x394 +#define KX5000_UNC_FIXED_CTR_CTRL 0x395 + +/* KX5000/KX6000 uncore global control */ +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) + +/* KX5000/KX6000 uncore register */ +#define KX5000_UNC_PERFEVTSEL0 0x3c0 +#define KX5000_UNC_UNCORE_PMC0 0x3b0 + +/* KH40000 event control */ +#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KH40000_PMON_CTL_RST (1 << 17) +#define KH40000_PMON_CTL_EDGE_DET (1 << 18) +#define KH40000_PMON_CTL_EN (1 << 22) +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_THRESH_MASK 0xff000000 +#define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ + KH40000_PMON_CTL_UMASK_MASK | \ + KH40000_PMON_CTL_EDGE_DET | \ + KH40000_PMON_CTL_INVERT | \ + KH40000_PMON_CTL_THRESH_MASK) + +/* KH40000 LLC register*/ +#define KH40000_LLC_MSR_PMON_CTL0 0x1660 +#define KH40000_LLC_MSR_PMON_CTR0 0x165c +#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 + +/* KH40000 HIF register*/ +#define KH40000_HIF_MSR_PMON_CTL0 0x1656 +#define KH40000_HIF_MSR_PMON_CTR0 0x1651 +#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 +#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 +#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b + +/* KH40000 ZZI(ZPI+ZOI+INI) register*/ +#define KH40000_ZZI_MSR_PMON_CTL0 0x166A +#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 +#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f + +/* KH40000 MC register*/ +#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 +#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 +#define KH40000_MC0_CHy_PMON_CTR0 0xf00 +#define KH40000_MC0_CHy_PMON_CTL0 0xf28 +#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 + +#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 +#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 +#define KH40000_MC1_CHy_PMON_CTR0 0xf50 +#define KH40000_MC1_CHy_PMON_CTL0 0xf78 +#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 + +/* KH40000 PCI register*/ +#define KH40000_PCI_PMON_CTR0 0xf00 +#define KH40000_PCI_PMON_CTL0 0xf28 +#define KH40000_PCI_PMON_BLK_CTL 0xf44 + +/* KH40000 ZPI_DLL register*/ +#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 ZDI_DLL register*/ +#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 PXPTRF register*/ +#define KH40000_PXPTRF_PMON_CTR0 0xf00 +#define KH40000_PXPTRF_PMON_CTL0 0xf28 +#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 + +/* KH40000 Box level control */ +#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) +#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) + +#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS) + +#define KH40000_PMON_PCI_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS | \ + KH40000_PMON_PCI_BOX_PMON_EN) + +/* KX7000 event control */ +#define KX7000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KX7000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KX7000_PMON_CTL_RST (1 << 17) +#define KX7000_PMON_CTL_EDGE_DET (1 << 18) +#define KX7000_PMON_CTL_LOGIC_OP0 (1 << 19) +#define KX7000_PMON_CTL_LOGIC_OP1 (1 << 21) +#define KX7000_PMON_CTL_EN (1 << 22) +#define KX7000_PMON_CTL_INVERT (1 << 23) +#define KX7000_PMON_CTL_THRESH_MASK 0xff000000 +#define KX7000_PMON_RAW_EVENT_MASK (KX7000_PMON_CTL_EV_SEL_MASK | \ + KX7000_PMON_CTL_UMASK_MASK | \ + KX7000_PMON_CTL_EDGE_DET | \ + KX7000_PMON_CTL_LOGIC_OP0 | \ + KX7000_PMON_CTL_LOGIC_OP1 | \ + KX7000_PMON_CTL_INVERT | \ + KX7000_PMON_CTL_THRESH_MASK) + +/* KX7000 LLC register*/ +#define KX7000_LLC_MSR_PMON_CTL0 0x1979 +#define KX7000_LLC_MSR_PMON_CTR0 0x1975 +#define KX7000_LLC_MSR_PMON_BLK_CTL 0x197e + +/* KX7000 MESH register*/ +#define KX7000_MESH_MSR_PMON_CTL0 0x1983 +#define KX7000_MESH_MSR_PMON_CTR0 0x197f +#define KX7000_MESH_MSR_PMON_BLK_CTL 0x1987 + +/* KX7000 HOMESTOP register*/ +#define KX7000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX7000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX7000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 + +/* KX7000 CCDie ZDI_PL register*/ +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 + +/* KX7000 cIODie ZDI_PL register*/ +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B + +/* KX7000 MC register*/ +#define KX7000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX7000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX7000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX7000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX7000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX7000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX7000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX7000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX7000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX7000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX7000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX7000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX7000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX7000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX7000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX7000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX7000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX7000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX7000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX7000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX7000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX7000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX7000_ZDI_DL_MMIO_SIZE 0x1000 + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(logic_op0, logic_op0, "config:19"); +DEFINE_UNCORE_FORMAT_ATTR(logic_op1, logic_op1, "config:21"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask3, cmask, "config:24-26"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); + +static void get_hdw_config_msr(void *config) +{ + u64 *data = (u64 *)config; + + rdmsrl(BJ_HDW_CONFIG_MSR, *data); +} + +static void get_global_status_msr(void *status) +{ + u64 *data = (u64 *)status; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, *data); +} + +/*topology number : get max packages/subnode/clusters number*/ +static void get_topology_number(void) +{ + int clusters; + int subnodes; + int dies; + int packages; + u64 data; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, data); + + /* check packages number */ + packages = data & 0x1; + if (packages) + max_packages = 2; + else + max_packages = 1; + + /* only Yongfeng needs die/subnode/cluster info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + return; + + /* check dies_per_socket */ + dies = (data >> 12) & 0x1; + if (dies) + dies_per_socket = 2; + else + dies_per_socket = 1; + + /* check subnodes_per_die */ + subnodes = (data >> 32) & 0x3; + if (subnodes == 0x3) + subnodes_per_die = 2; + else + subnodes_per_die = 1; + + /* check clusters_per_subnode */ + clusters = (data >> 6) & 0x3; + if (clusters == 0x3) + clusters_per_subnode = 2; + else + clusters_per_subnode = 1; + + max_subnodes = max_packages * dies_per_socket * subnodes_per_die; + max_clusters = clusters_per_subnode * max_subnodes; +} + +static int get_pcibus_limit(void) +{ + struct pci_dev *dev; + u32 val; + int i = 0; + + dev = pci_get_device(0x1D17, 0x31B1, NULL); + if (dev == NULL) + return -ENODEV; + + pci_read_config_dword(dev, 0x94, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + + if (max_packages == 2) { + pci_read_config_dword(dev, 0x9c, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + } + + return 0; +} + +static int uncore_pcibus_to_subnodeid(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < KH40000_MAX_SUBNODE_NUMBER; i++) { + if (bus->number < kh40000_pcibus_limit[i]) + break; + } + + return i; +} + +DEFINE_PER_CPU(int, zx_package_id); +DEFINE_PER_CPU(int, zx_subnode_id); +DEFINE_PER_CPU(int, zx_cluster_id); + +static void get_topology_info(void) +{ + int cpu; + int cluster_id; + int socket_id; + int die_id; + int subnode_id; + + int die_info; + int subnode_info; + int cluster_info; + + u64 config; + + for_each_present_cpu(cpu) { + smp_call_function_single(cpu, get_global_status_msr, &config, 1); + socket_id = (int)((config >> 3) & 0x1); + per_cpu(zx_package_id, cpu) = socket_id; + + /* only kh40000 needs cluster and subnode info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + continue; + + smp_call_function_single(cpu, get_hdw_config_msr, &config, 1); + + die_info = (int)((config >> 21) & 0x3); + die_id = socket_id * dies_per_socket + die_info; + + subnode_info = (int)((config >> 20) & 0x1); + subnode_id = die_id * subnodes_per_die + subnode_info; + per_cpu(zx_subnode_id, cpu) = subnode_id; + + cluster_info = (int)((config >> 18) & 0x3); + cluster_id = subnode_id * clusters_per_subnode + cluster_info; + per_cpu(zx_cluster_id, cpu) = cluster_id; + } +} + +static int zx_topology_cluster_id(int cpu) +{ + return per_cpu(zx_cluster_id, cpu); +} + +static int zx_topology_subnode_id(int cpu) +{ + return per_cpu(zx_subnode_id, cpu); +} + +static int zx_topology_package_id(int cpu) +{ + return per_cpu(zx_package_id, cpu); +} + +DEFINE_PER_CPU(cpumask_t, zx_cluster_core_bits); +DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); + +static void zx_gen_core_map(void) +{ + int i, nr, cpu; + int cluster_id, subnode_id; + + for_each_present_cpu(cpu) { + cluster_id = zx_topology_cluster_id(cpu); + + for (i = 0; i < 4; i++) { + nr = (cluster_id << 2) + i; + cpumask_set_cpu(nr, &per_cpu(zx_cluster_core_bits, cpu)); + } + } + + for_each_present_cpu(cpu) { + subnode_id = zx_topology_subnode_id(cpu); + + for (i = 0; i < 8; i++) { + nr = (subnode_id << 3) + i; + cpumask_set_cpu(nr, &per_cpu(zx_subnode_core_bits, cpu)); + } + } +} + +static struct cpumask *topology_cluster_core_cpumask(int cpu) +{ + return &per_cpu(zx_cluster_core_bits, cpu); +} + +static struct cpumask *topology_subnode_core_cpumask(int cpu) +{ + return &per_cpu(zx_subnode_core_bits, cpu); +} + +static void uncore_free_pcibus_map(void) +{ + +} + +static int kh40000_pci2node_map_init(void) +{ + return 0; +} + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu) +{ + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + return pmu->boxes[zx_topology_cluster_id(cpu)]; + else + return pmu->boxes[zx_topology_subnode_id(cpu)]; + } else { + return pmu->boxes[zx_topology_package_id(cpu)]; + } +} + +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count; + + WARN_ON_ONCE(box->cpu != smp_processor_id()); + rdmsrl(event->hw.event_base, count); + return count; +} + +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, struct perf_event *event, + int idx) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->last_tag = ++box->tags[idx]; + + if (uncore_pmc_fixed(hwc->idx)) { + hwc->event_base = uncore_fixed_ctr(box); + hwc->config_base = uncore_fixed_ctl(box); + return; + } + + hwc->config_base = uncore_event_ctl(box, hwc->idx); + hwc->event_base = uncore_perf_ctr(box, hwc->idx); +} + +void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 prev_count, new_count, delta; + int shift; + + if (uncore_pmc_fixed(event->hw.idx)) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); + + /* the hrtimer might modify the previous event value */ +again: + prev_count = local64_read(&event->hw.prev_count); + new_count = uncore_read_counter(box, event); + if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) + goto again; + + delta = (new_count << shift) - (prev_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + +/*KX5000/KX6000 uncore ops start*/ +static void kx5000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + wrmsrq(event->hw.config_base, 0); +} + +static void kx5000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrq(KX5000_UNC_PERF_GLOBAL_CTL, 0); +} + +static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrq(KX5000_UNC_PERF_GLOBAL_CTL, + KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); +} + +static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrq(hwc->config_base, hwc->config | KX5000_UNC_CTL_EN); + else + wrmsrq(hwc->config_base, KX5000_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *kx5000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask3.attr, + NULL, +}; + +static struct attribute_group kx5000_uncore_format_group = { + .name = "format", + .attrs = kx5000_uncore_formats_attr, +}; + +static struct uncore_event_desc kx5000_uncore_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { + .disable_box = kx5000_uncore_msr_disable_box, + .enable_box = kx5000_uncore_msr_enable_box, + .disable_event = kx5000_uncore_msr_disable_event, + .enable_event = kx5000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kx5000_uncore_box = { + .name = "", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX5000_UNC_PERFEVTSEL0, + .perf_ctr = KX5000_UNC_UNCORE_PMC0, + .fixed_ctr = KX5000_UNC_FIXED_CTR, + .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, + .event_mask = KX5000_UNC_RAW_EVENT_MASK, + .event_descs = kx5000_uncore_events, + .ops = &kx5000_uncore_msr_ops, + .format_group = &kx5000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { + &kx5000_uncore_box, + NULL, +}; +/*KX5000/KX6000 uncore ops end*/ + +/*KH40000 msr ops start*/ +static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrq(hwc->config_base, hwc->config); +} + +static void kh40000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrq(hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config |= KH40000_PMON_BOX_CTL_FRZ; + wrmsrq(msr, config); + } +} + +static void kh40000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + wrmsrq(msr, config); + } +} + +static void kh40000_uncore_msr_init_box(struct zhaoxin_uncore_box *box) +{ + unsigned int msr = uncore_msr_box_ctl(box); + + if (msr) { + wrmsrq(msr, KH40000_PMON_BOX_CTL_INT); + wrmsrq(msr, 0); + } +} + +static struct attribute *kh40000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kh40000_uncore_format_group = { + .name = "format", + .attrs = kh40000_uncore_formats_attr, +}; + +static struct uncore_event_desc kh40000_uncore_llc_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_hif_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { + &kh40000_uncore_llc_box, + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; +/*KH40000 msr ops end*/ + +/*KH40000 pci ops start*/ +static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static void kh40000_uncore_pci_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_pci_disable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config |= KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void kh40000_uncore_pci_enable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config &= ~KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count + 1); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count); + + return count; +} + +static void kh40000_uncore_pci_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, KH40000_PMON_PCI_BOX_CTL_INT); +} + +static struct uncore_event_desc kh40000_uncore_imc_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pci_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zpi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zdi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +enum { + KH40000_PCI_UNCORE_MC0, + KH40000_PCI_UNCORE_MC1, + KH40000_PCI_UNCORE_PCI, + KH40000_PCI_UNCORE_ZPI_DLL, + KH40000_PCI_UNCORE_ZDI_DLL, + KH40000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kh40000_uncore_pci_ids[] = { + { /* MC Channe0/1 */ + PCI_DEVICE(0x1D17, 0x31b2), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x071A), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x0731), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 9), + }, + + { /* ZPI_DLL */ + PCI_DEVICE(0x1D17, 0x91c1), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), + }, + + { /* ZDI_DLL */ + PCI_DEVICE(0x1D17, 0x3b03), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZDI_DLL, 0), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kh40000_uncore_pci_driver = { + .name = "kh40000_uncore", + .id_table = kh40000_uncore_pci_ids, +}; +/*KH40000 pci ops end*/ + +/*KX7000 msr ops start*/ +static unsigned int kx7000_uncore_msr_offsets[] = { + 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b +}; + +static struct attribute *kx7000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_logic_op0.attr, + &format_attr_logic_op1.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kx7000_uncore_format_group = { + .name = "format", + .attrs = kx7000_uncore_formats_attr, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mesh_box = { + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX7000_MESH_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX7000_LLC_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_homestop = { + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX7000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX7000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_pl = { + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_pl = { + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx7000_msr_uncores[] = { + &kx7000_uncore_llc_box, + &kx7000_uncore_mesh_box, + &kx7000_uncore_hif_box, + &kx7000_uncore_homestop, + &kx7000_uncore_ccd_zdi_pl, + &kx7000_uncore_iod_zdi_pl, + NULL, +}; +/*KX7000 msr ops end*/ + +/*KX7000 pci ops start*/ +static unsigned int kx7000_mc_ctr_lh_offsets[] = { + 0xc, 0xe, 0x10, 0x12, 0x14 +}; + +static u64 kx7000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); + pci_read_config_dword(pdev, hwc->event_base + kx7000_mc_ctr_lh_offsets[hwc->idx], + (u32 *)&count); + + return count; +} + +static struct zhaoxin_uncore_ops kx7000_uncore_pci_mc_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx7000_uncore_pci_mc_read_counter +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_a0 = { + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_a1 = { + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_b0 = { + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_b1 = { + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 17, + .perf_ctr_bits = 48, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, +}; + +enum { + KX7000_PCI_UNCORE_MC_A0, + KX7000_PCI_UNCORE_MC_A1, + KX7000_PCI_UNCORE_MC_B0, + KX7000_PCI_UNCORE_MC_B1, + KX7000_PCI_UNCORE_PCI, + KX7000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kx7000_pci_uncores[] = { + [KX7000_PCI_UNCORE_MC_A0] = &kx7000_uncore_mc_a0, + [KX7000_PCI_UNCORE_MC_A1] = &kx7000_uncore_mc_a1, + [KX7000_PCI_UNCORE_MC_B0] = &kx7000_uncore_mc_b0, + [KX7000_PCI_UNCORE_MC_B1] = &kx7000_uncore_mc_b1, + [KX7000_PCI_UNCORE_PCI] = &kx7000_uncore_pci, + [KX7000_PCI_UNCORE_PXPTRF] = &kx7000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kx7000_uncore_pci_ids[] = { + { /* MC Channe A0/A1/B0/B1 */ + PCI_DEVICE(0x1D17, 0x31B2), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_MC_A0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D2F2 */ + PCI_DEVICE(0x1D17, 0x0733), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D2F3 */ + PCI_DEVICE(0x1D17, 0x0734), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x0735), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x0739), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D3F3 */ + PCI_DEVICE(0x1D17, 0x073A), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 9), + }, + + { /* PCIE D4F2 */ + PCI_DEVICE(0x1D17, 0x0736), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 10), + }, + + { /* PCIE D4F3 */ + PCI_DEVICE(0x1D17, 0x0737), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 11), + }, + + { /* PCIE D4F4 */ + PCI_DEVICE(0x1D17, 0x0738), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 12), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 13), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 14), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 15), + }, + + { /* PCIE D5F3 */ + PCI_DEVICE(0x1D17, 0x073B), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 16), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kx7000_uncore_pci_driver = { + .name = "kx7000_uncore", + .id_table = kx7000_uncore_pci_ids, +}; +/*KX7000 pci ops end*/ + +/*KX7000 mmio ops start*/ +static void kx7000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = NULL; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + resource_size_t addr; + u32 pci_dword; + int mmio_base_offset; + + pdev = pci_get_device(0x1d17, 0x31b1, pdev); + if (!pdev) + return; + + if (!strcmp(box->pmu->name, "iod_zdi_dl")) + mmio_base_offset = KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET; + else + mmio_base_offset = KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET; + + pci_read_config_dword(pdev, mmio_base_offset, &pci_dword); + addr = (u64)(pci_dword & KX7000_ZDI_DL_MMIO_BASE_MASK) << 32; + + pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); + addr |= pci_dword & KX7000_ZDI_DL_MMIO_MEM0_MASK; + + box->io_addr = ioremap(addr, KX7000_ZDI_DL_MMIO_SIZE); + if (!box->io_addr) + return; + + writel(KH40000_PMON_PCI_BOX_CTL_INT, box->io_addr + box_ctl); +} + +static void kx7000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config |= KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx7000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx7000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config | KH40000_PMON_CTL_EN, box->io_addr + hwc->config_base); +} + +static void kx7000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box) +{ + if (box->io_addr) + iounmap(box->io_addr); +} + +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count = 0; + u64 count_low = 0; + u64 count_high = 0; + + if (!box->io_addr) + return 0; + + count_high = readl(box->io_addr + event->hw.event_base) & 0xffff; + count_low = readl(box->io_addr + event->hw.event_base + 4); + count = (count_high << 32) + count_low; + + return count; +} + +static struct zhaoxin_uncore_ops kx7000_uncore_mmio_ops = { + .init_box = kx7000_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = kx7000_uncore_mmio_disable_box, + .enable_box = kx7000_uncore_mmio_enable_box, + .disable_event = kx7000_uncore_mmio_disable_event, + .enable_event = kx7000_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_dl = { + .name = "iod_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_dl = { + .name = "ccd_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx7000_mmio_uncores[] = { + &kx7000_uncore_iod_zdi_dl, + &kx7000_uncore_ccd_zdi_dl, + NULL, +}; + +/*KX7000 mmio ops end*/ +static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) +{ + struct zhaoxin_uncore_box *box; + struct perf_event *event; + unsigned long flags; + int bit; + + box = container_of(hrtimer, struct zhaoxin_uncore_box, hrtimer); + if (!box->n_active || box->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + /* + * disable local interrupt to prevent uncore_pmu_event_start/stop + * to interrupt the update process + */ + local_irq_save(flags); + + /* + * handle boxes with an active event list as opposed to active + * counters + */ + list_for_each_entry(event, &box->active_list, active_entry) { + uncore_perf_event_update(box, event); + } + + for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) + uncore_perf_event_update(box, box->events[bit]); + + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), HRTIMER_MODE_REL_PINNED); +} + +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_cancel(&box->hrtimer); +} + +static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +} + +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, int node) +{ + int i, size, numshared = type->num_shared_regs; + struct zhaoxin_uncore_box *box; + + size = sizeof(*box) + numshared * sizeof(struct zhaoxin_uncore_extra_reg); + + box = kzalloc_node(size, GFP_KERNEL, node); + if (!box) + return NULL; + + for (i = 0; i < numshared; i++) + raw_spin_lock_init(&box->shared_regs[i].lock); + + uncore_pmu_init_hrtimer(box); + box->cpu = -1; + box->package_id = -1; + box->cluster_id = -1; + box->subnode_id = -1; + + /* set default hrtimer timeout */ + box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; + + INIT_LIST_HEAD(&box->active_list); + + return box; +} + +static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return &box->pmu->pmu == event->pmu; +} + +static int uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, + bool dogrp) +{ + struct perf_event *event; + int n, max_count; + + max_count = box->pmu->type->num_counters; + if (box->pmu->type->fixed_ctl) + max_count++; + + if (box->n_events >= max_count) + return -EINVAL; + + n = box->n_events; + + if (is_box_event(box, leader)) { + box->event_list[n] = leader; + n++; + } + + if (!dogrp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_box_event(box, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + box->event_list[n] = event; + n++; + } + return n; +} + +static struct event_constraint *uncore_get_event_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct zhaoxin_uncore_type *type = box->pmu->type; + struct event_constraint *c; + + if (type->ops->get_constraint) { + c = type->ops->get_constraint(box, event); + if (c) + return c; + } + + if (event->attr.config == UNCORE_FIXED_EVENT) + return &uncore_constraint_fixed; + + if (type->constraints) { + for_each_event_constraint(c, type->constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &type->unconstrainted; +} + +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + if (box->pmu->type->ops->put_constraint) + box->pmu->type->ops->put_constraint(box, event); +} + +static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], int n) +{ + unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + struct event_constraint *c; + int i, wmin, wmax, ret = 0; + struct hw_perf_event *hwc; + + bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); + + for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { + c = uncore_get_event_constraint(box, box->event_list[i]); + box->event_constraint[i] = c; + wmin = min(wmin, c->weight); + wmax = max(wmax, c->weight); + } + + /* fastpath, try to reuse previous register */ + for (i = 0; i < n; i++) { + hwc = &box->event_list[i]->hw; + c = box->event_constraint[i]; + + /* never assigned */ + if (hwc->idx == -1) + break; + + /* constraint still honored */ + if (!test_bit(hwc->idx, c->idxmsk)) + break; + + /* not already used */ + if (test_bit(hwc->idx, used_mask)) + break; + + __set_bit(hwc->idx, used_mask); + if (assign) + assign[i] = hwc->idx; + } + /* slow path */ + if (i != n) + ret = perf_assign_events(box->event_constraint, n, + wmin, wmax, n, assign); + + if (!assign || ret) { + for (i = 0; i < n; i++) + uncore_put_event_constraint(box, box->event_list[i]); + } + return ret ? -EINVAL : 0; +} + +static void uncore_pmu_event_start(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int idx = event->hw.idx; + + + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + return; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + box->events[idx] = event; + box->n_active++; + __set_bit(idx, box->active_mask); + + local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); + uncore_enable_event(box, event); + + if (box->n_active == 1) + uncore_pmu_start_hrtimer(box); +} + +static void uncore_pmu_event_stop(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { + uncore_disable_event(box, event); + box->n_active--; + box->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + uncore_perf_event_update(box, event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int uncore_pmu_event_add(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + int assign[UNCORE_PMC_IDX_MAX]; + int i, n, ret; + + if (!box) + return -ENODEV; + + ret = n = uncore_collect_events(box, event, false); + if (ret < 0) + return ret; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + ret = uncore_assign_events(box, assign, n); + if (ret) + return ret; + + /* save events moving to new counters */ + for (i = 0; i < box->n_events; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == assign[i] && + hwc->last_tag == box->tags[assign[i]]) + continue; + /* + * Ensure we don't accidentally enable a stopped + * counter simply because we rescheduled. + */ + if (hwc->state & PERF_HES_STOPPED) + hwc->state |= PERF_HES_ARCH; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + } + + /* reprogram moved events into new counters */ + for (i = 0; i < n; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx != assign[i] || + hwc->last_tag != box->tags[assign[i]]) + uncore_assign_hw_event(box, event, assign[i]); + else if (i < box->n_events) + continue; + + if (hwc->state & PERF_HES_ARCH) + continue; + + uncore_pmu_event_start(event, 0); + } + box->n_events = n; + + return 0; +} + +static void uncore_pmu_event_del(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int i; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < box->n_events; i++) { + if (event == box->event_list[i]) { + uncore_put_event_constraint(box, event); + + for (++i; i < box->n_events; i++) + box->event_list[i - 1] = box->event_list[i]; + + --box->n_events; + break; + } + } + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; +} + +static void uncore_pmu_event_read(struct perf_event *event) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + + uncore_perf_event_update(box, event); +} + +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct zhaoxin_uncore_box *fake_box; + int ret = -EINVAL, n; + + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); + if (!fake_box) + return -ENOMEM; + + fake_box->pmu = pmu; + /* + * the event is not yet connected with its + * siblings therefore we must first collect + * existing siblings, then add the new event + * before we can simulate the scheduling + */ + n = uncore_collect_events(fake_box, leader, true); + if (n < 0) + goto out; + + fake_box->n_events = n; + n = uncore_collect_events(fake_box, event, false); + if (n < 0) + goto out; + + fake_box->n_events = n; + + ret = uncore_assign_events(fake_box, NULL, n); +out: + kfree(fake_box); + return ret; +} + +static int uncore_pmu_event_init(struct perf_event *event) +{ + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct hw_perf_event *hwc = &event->hw; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + pmu = uncore_event_to_pmu(event); + /* no device found for this pmu */ + if (pmu->func_id < 0) + return -ENOENT; + + /* Sampling not supported yet */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Place all uncore events for a particular physical package + * onto a single cpu + */ + if (event->cpu < 0) + return -EINVAL; + box = uncore_pmu_to_box(pmu, event->cpu); + if (!box || box->cpu < 0) + return -EINVAL; + event->cpu = box->cpu; + event->pmu_private = box; + + //event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + + if (event->attr.config == UNCORE_FIXED_EVENT) { + /* no fixed counter */ + if (!pmu->type->fixed_ctl) + return -EINVAL; + /* + * if there is only one fixed counter, only the first pmu + * can access the fixed counter + */ + if (pmu->type->single_fixed && pmu->pmu_idx > 0) + return -EINVAL; + + /* fixed counters have event field hardcoded to zero */ + hwc->config = 0ULL; + } else { + hwc->config = event->attr.config & + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); + if (pmu->type->ops->hw_config) { + ret = pmu->type->ops->hw_config(box, event); + if (ret) + return ret; + } + } + + if (event->group_leader != event) + ret = uncore_validate_group(pmu, event); + else + ret = 0; + + return ret; +} + +static void uncore_pmu_enable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->enable_box) + uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->disable_box) + uncore_pmu->type->ops->disable_box(box); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + cpumask_t *active_mask; + struct pmu *pmu; + struct zhaoxin_uncore_pmu *uncore_pmu; + + pmu = dev_get_drvdata(dev); + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(uncore_pmu->type->name, "llc")) + active_mask = &uncore_cpu_cluster_mask; + else + active_mask = &uncore_cpu_subnode_mask; + } else { + active_mask = &uncore_cpu_mask; + } + return cpumap_print_to_pagebuf(true, buf, active_mask); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group uncore_pmu_attr_group = { + .attrs = uncore_pmu_attrs, +}; + +static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) +{ + int ret; + + if (!pmu->type->pmu) { + pmu->pmu = (struct pmu) { + .attr_groups = pmu->type->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, + .event_init = uncore_pmu_event_init, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + } else { + pmu->pmu = *pmu->type->pmu; + pmu->pmu.attr_groups = pmu->type->attr_groups; + } + + if (pmu->type->num_boxes == 1) { + if (strlen(pmu->type->name) > 0) + sprintf(pmu->name, "uncore_%s", pmu->type->name); + else + sprintf(pmu->name, "uncore"); + } else { + sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, pmu->pmu_idx); + } + + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); + if (!ret) + pmu->registered = true; + return ret; +} + +static void uncore_pmu_unregister(struct zhaoxin_uncore_pmu *pmu) +{ + if (!pmu->registered) + return; + perf_pmu_unregister(&pmu->pmu); + pmu->registered = false; +} + +static void uncore_free_boxes(struct zhaoxin_uncore_pmu *pmu) +{ + int i, max; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + max = max_clusters; + else + max = max_subnodes; + } else { + max = max_packages; + } + + for (i = 0; i < max; i++) + kfree(pmu->boxes[i]); + kfree(pmu->boxes); +} + +static void uncore_type_exit(struct zhaoxin_uncore_type *type) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + int i; + + if (pmu) { + for (i = 0; i < type->num_boxes; i++, pmu++) { + uncore_pmu_unregister(pmu); + uncore_free_boxes(pmu); + } + kfree(type->pmus); + type->pmus = NULL; + } + kfree(type->events_group); + type->events_group = NULL; +} + +static void uncore_types_exit(struct zhaoxin_uncore_type **types) +{ + for (; *types; types++) + uncore_type_exit(*types); +} + +static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) +{ + struct zhaoxin_uncore_pmu *pmus; + size_t size; + int i, j; + + pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); + if (!pmus) + return -ENOMEM; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) + size = max_clusters * sizeof(struct zhaoxin_uncore_box *); + else + size = max_subnodes * sizeof(struct zhaoxin_uncore_box *); + } else { + size = max_packages * sizeof(struct zhaoxin_uncore_box *); + } + + for (i = 0; i < type->num_boxes; i++) { + pmus[i].func_id = setid ? i : -1; + pmus[i].pmu_idx = i; + pmus[i].type = type; + pmus[i].boxes = kzalloc(size, GFP_KERNEL); + if (!pmus[i].boxes) + goto err; + } + + type->pmus = pmus; + type->unconstrainted = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 0, + type->num_counters, 0, 0); + + if (type->event_descs) { + struct { + struct attribute_group group; + struct attribute *attrs[]; + } *attr_group; + for (i = 0; type->event_descs[i].attr.attr.name; i++) + ; + + attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), GFP_KERNEL); + if (!attr_group) + goto err; + + attr_group->group.name = "events"; + attr_group->group.attrs = attr_group->attrs; + + for (j = 0; j < i; j++) + attr_group->attrs[j] = &type->event_descs[j].attr.attr; + + type->events_group = &attr_group->group; + } + + type->pmu_group = &uncore_pmu_attr_group; + + return 0; + +err: + for (i = 0; i < type->num_boxes; i++) + kfree(pmus[i].boxes); + kfree(pmus); + + return -ENOMEM; +} + +static int __init uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +{ + int ret; + + for (; *types; types++) { + ret = uncore_type_init(*types, setid); + if (ret) + return ret; + } + return 0; +} + +/* + * add a pci uncore device + */ +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_box **boxes; + char mc_dev[10]; + int loop = 1; + int i, j = 0; + int subnode_id = 0; + int ret = 0; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) + subnode_id = uncore_pcibus_to_subnodeid(pdev->bus); + + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + strscpy(mc_dev, "mc0", sizeof("mc0")); + if (!strcmp(type->name, mc_dev)) + loop = 2; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { + strscpy(mc_dev, "mc_a0", sizeof("mc_a0")); + if (!strcmp(type->name, mc_dev)) + loop = 4; + } + + boxes = kcalloc(loop, sizeof(struct zhaoxin_uncore_box *), GFP_KERNEL); + if (!boxes) + return -ENOMEM; + + for (i = 0; i < loop; i++) { + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + j]; + + if (!type) + continue; + /* + * for performance monitoring unit with multiple boxes, + * each box has a different function id. + */ + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + + if (WARN_ON_ONCE(pmu->boxes[subnode_id] != NULL)) + return -EINVAL; + + box = uncore_alloc_box(type, NUMA_NO_NODE); + if (!box) + return -ENOMEM; + + if (pmu->func_id < 0) + pmu->func_id = pdev->devfn; + else + WARN_ON_ONCE(pmu->func_id != pdev->devfn); + + atomic_inc(&box->refcnt); + box->subnode_id = subnode_id; + box->pci_dev = pdev; + box->pmu = pmu; + uncore_box_init(box); + boxes[i] = box; + + pci_set_drvdata(pdev, boxes); + pmu->boxes[subnode_id] = box; + if (atomic_inc_return(&pmu->activeboxes) > 1) { + if (!strcmp(type->name, mc_dev)) + goto next_loop; + else + return 0; + } + /* First active box registers the pmu */ + ret = uncore_pmu_register(pmu); + if (ret) { + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + uncore_box_exit(box); + kfree(box); + } +next_loop: + j++; + } + + return ret; +} + +static void uncore_pci_remove(struct pci_dev *pdev) +{ + struct zhaoxin_uncore_box **boxes; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_pmu *pmu; + int subnode_id = 0; + int i = 0; + int loop = 1; + + boxes = pci_get_drvdata(pdev); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc0")) + loop = 2; + else + loop = 1; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) + loop = 4; + else + loop = 1; + } + + for (i = 0; i < loop; i++) { + box = boxes[i]; + pmu = box->pmu; + if (WARN_ON_ONCE(subnode_id != box->subnode_id)) + return; + + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); + + uncore_box_exit(box); + kfree(box); + } + + kfree(boxes); +} + +static int __init uncore_pci_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_pci_uncores, false); + if (ret) + goto errtype; + + uncore_pci_driver->probe = uncore_pci_probe; + uncore_pci_driver->remove = uncore_pci_remove; + + ret = pci_register_driver(uncore_pci_driver); + if (ret) + goto errtype; + + pcidrv_registered = true; + return 0; + +errtype: + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + uncore_pci_uncores = empty_uncore; + return ret; +} + +static void uncore_pci_exit(void) +{ + if (pcidrv_registered) { + pcidrv_registered = false; + pci_unregister_driver(uncore_pci_driver); + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + } +} + +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, int new_cpu) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + struct zhaoxin_uncore_box *box; + int i, package_id, cluster_id = 0, subnode_id = 0; + + package_id = zx_topology_package_id(old_cpu < 0 ? new_cpu : old_cpu); + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + cluster_id = zx_topology_cluster_id(old_cpu < 0 ? new_cpu : old_cpu); + subnode_id = zx_topology_subnode_id(old_cpu < 0 ? new_cpu : old_cpu); + } + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) { + box = pmu->boxes[cluster_id]; + if (!box) + continue; + } else { + box = pmu->boxes[subnode_id]; + if (!box) + continue; + } + } else { + box = pmu->boxes[package_id]; + if (!box) + continue; + } + + if (old_cpu < 0) { + WARN_ON_ONCE(box->cpu != -1); + box->cpu = new_cpu; + continue; + } + WARN_ON_ONCE(box->cpu != old_cpu); + box->cpu = -1; + if (new_cpu < 0) + continue; + + uncore_pmu_cancel_hrtimer(box); + perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); + box->cpu = new_cpu; + } +} + +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, int old_cpu, int new_cpu) +{ + for (; *uncores; uncores++) + uncore_change_type_ctx(*uncores, old_cpu, new_cpu); +} + +static void uncore_box_unref(struct zhaoxin_uncore_type **types, int id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +struct zhaoxin_uncore_type *uncore_msr_cluster_uncores[] = { + &kh40000_uncore_llc_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_msr_subnode_uncores[] = { + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_pci_subnode_uncores[] = { + &kh40000_uncore_mc0, + &kh40000_uncore_mc1, + &kh40000_uncore_pci, + &kh40000_uncore_zpi_dll, + &kh40000_uncore_zdi_dll, + &kh40000_uncore_pxptrf, + NULL, +}; + +static void kx5000_event_cpu_offline(int cpu) +{ + int package, target; + + /* Check if exiting cpu is used for collecting uncore events */ + + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + goto unref_cpu_mask; + + /* Find a new cpu to collect uncore events */ + target = cpumask_any_but(topology_core_cpumask(cpu), cpu); + + /* Migrate uncore events to the new target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &uncore_cpu_mask); + else + target = -1; + + uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_mmio_uncores, cpu, target); + uncore_change_context(uncore_pci_uncores, cpu, target); + +unref_cpu_mask: + /*clear the references*/ + package = zx_topology_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, package); + uncore_box_unref(uncore_mmio_uncores, package); +} + +static void kh40000_event_cpu_offline(int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + /* Check if exiting cpu is used for collecting uncore events */ + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_cluster_mask)) { + cluster_target = cpumask_any_but(topology_cluster_core_cpumask(cpu), cpu); + if (cluster_target < nr_cpu_ids) + cpumask_set_cpu(cluster_target, &uncore_cpu_cluster_mask); + else + cluster_target = -1; + uncore_change_context(uncore_msr_cluster_uncores, cpu, cluster_target); + } else { + uncore_box_unref(uncore_msr_cluster_uncores, cluster_id); + } + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_subnode_mask)) { + subnode_target = cpumask_any_but(topology_subnode_core_cpumask(cpu), cpu); + if (subnode_target < nr_cpu_ids) + cpumask_set_cpu(subnode_target, &uncore_cpu_subnode_mask); + else + subnode_target = -1; + uncore_change_context(uncore_msr_subnode_uncores, cpu, subnode_target); + uncore_change_context(uncore_pci_subnode_uncores, cpu, subnode_target); + } else { + uncore_box_unref(uncore_msr_subnode_uncores, subnode_id); + } + +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + unsigned int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_event_cpu_offline(cpu); + else + kx5000_event_cpu_offline(cpu); + + return 0; +} + +static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, unsigned int id, + unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->package_id = id; + list_add(&box->active_list, &allocated); + } + } + + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, unsigned int id, + unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + if (!strcmp(type->name, "llc")) + box->cluster_id = id; + else + box->subnode_id = id; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int uncore_box_ref(struct zhaoxin_uncore_type **types, int id, unsigned int cpu) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i, ret = 0; + + int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + ret = kh40000_allocate_boxes(types, id, cpu); + else + ret = kx5000_allocate_boxes(types, id, cpu); + + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } + return 0; +} + +static int kx5000_event_cpu_online(unsigned int cpu) +{ + int package, target, msr_ret, mmio_ret; + + package = zx_topology_package_id(cpu); + msr_ret = uncore_box_ref(uncore_msr_uncores, package, cpu); + mmio_ret = uncore_box_ref(uncore_mmio_uncores, package, cpu); + if (msr_ret && mmio_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the package + * which collects uncore events already. + */ + target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + if (!msr_ret) + uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!mmio_ret) + uncore_change_context(uncore_mmio_uncores, -1, cpu); + uncore_change_context(uncore_pci_uncores, -1, cpu); + + return 0; +} + +static int kh40000_event_cpu_online(unsigned int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + int cluster_ret, subnode_ret; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + cluster_ret = uncore_box_ref(uncore_msr_cluster_uncores, cluster_id, cpu); + subnode_ret = uncore_box_ref(uncore_msr_subnode_uncores, subnode_id, cpu); + + if (cluster_ret && subnode_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the cluster or subnode + * which collects uncore events already. + */ + cluster_target = + cpumask_any_and(&uncore_cpu_cluster_mask, topology_cluster_core_cpumask(cpu)); + subnode_target = + cpumask_any_and(&uncore_cpu_subnode_mask, topology_subnode_core_cpumask(cpu)); + + if (cluster_target < nr_cpu_ids && subnode_target < nr_cpu_ids) + return 0; + + if (!cluster_ret && cluster_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_cluster_mask); + uncore_change_context(uncore_msr_cluster_uncores, -1, cpu); + } + + if (!subnode_ret && subnode_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_subnode_mask); + uncore_change_context(uncore_msr_subnode_uncores, -1, cpu); + uncore_change_context(uncore_pci_subnode_uncores, -1, cpu); + } + + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int x86_model; + int kx5000_ret = 0, kh40000_ret = 0; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_ret = kh40000_event_cpu_online(cpu); + else + kx5000_ret = kx5000_event_cpu_online(cpu); + + if (kx5000_ret || kh40000_ret) + return -ENOMEM; + + return 0; +} + +static int __init type_pmu_register(struct zhaoxin_uncore_type *type) +{ + int i, ret; + + for (i = 0; i < type->num_boxes; i++) { + ret = uncore_pmu_register(&type->pmus[i]); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_msr_pmus_register(void) +{ + struct zhaoxin_uncore_type **types = uncore_msr_uncores; + int ret; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_cpu_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_msr_uncores, true); + if (ret) + goto err; + + ret = uncore_msr_pmus_register(); + if (ret) + goto err; + return 0; +err: + uncore_types_exit(uncore_msr_uncores); + uncore_msr_uncores = empty_uncore; + return ret; +} + +static int __init uncore_mmio_init(void) +{ + struct zhaoxin_uncore_type **types = uncore_mmio_uncores; + int ret; + + ret = uncore_types_init(types, true); + if (ret) + goto err; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + goto err; + } + return 0; +err: + uncore_types_exit(uncore_mmio_uncores); + uncore_mmio_uncores = empty_uncore; + return ret; +} + +struct zhaoxin_uncore_init_fun { + void (*cpu_init)(void); + int (*pci_init)(void); + void (*mmio_init)(void); +}; + +static void kx5000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx5000_msr_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx5000_uncore_init __initconst = { + .cpu_init = kx5000_uncore_cpu_init, +}; + +static void kh40000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kh40000_msr_uncores; +} + +static int kh40000_uncore_pci_init(void) +{ + int ret = kh40000_pci2node_map_init(); /*pci_bus to package mapping, do nothing*/ + + if (ret) + return ret; + uncore_pci_uncores = kh40000_pci_uncores; + uncore_pci_driver = &kh40000_uncore_pci_driver; + return 0; +} + +static const struct zhaoxin_uncore_init_fun kh40000_uncore_init __initconst = { + .cpu_init = kh40000_uncore_cpu_init, + .pci_init = kh40000_uncore_pci_init, +}; + +static void kx7000_uncore_cpu_init(void) +{ + u64 val; + int cpu; + uncore_msr_uncores = kx7000_msr_uncores; + + /* clear bit 16 of MSR 0x1877 so that HIF can work normally */ + for_each_present_cpu(cpu) { + rdmsrl_on_cpu(cpu, 0x1877, &val); + val = val & 0xfffffffffffeffffULL; + wrmsrq_on_cpu(cpu, 0x1877, val); + } +} + +static int kx7000_uncore_pci_init(void) +{ + uncore_pci_uncores = kx7000_pci_uncores; + uncore_pci_driver = &kx7000_uncore_pci_driver; + + return 0; +} + +static void kx7000_uncore_mmio_init(void) +{ + uncore_mmio_uncores = kx7000_mmio_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx7000_uncore_init __initconst = { + .cpu_init = kx7000_uncore_cpu_init, + .pci_init = kx7000_uncore_pci_init, + .mmio_init = kx7000_uncore_mmio_init, +}; + +static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX7000, &kx7000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX7000, &kx7000_uncore_init), + {}, +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); + +static int __init zhaoxin_uncore_init(void) +{ + const struct x86_cpu_id *id = NULL; + struct zhaoxin_uncore_init_fun *uncore_init; + int pret = 0, cret = 0, mret = 0, ret; + + id = x86_match_cpu(zhaoxin_uncore_match); + if (!id) + return -ENODEV; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return -ENODEV; + + pr_info("welcome to uncore.\n"); + + get_topology_number(); + get_topology_info(); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + zx_gen_core_map(); + get_pcibus_limit(); + } + + uncore_init = (struct zhaoxin_uncore_init_fun *)id->driver_data; + + if (uncore_init->pci_init) { + pret = uncore_init->pci_init(); + if (!pret) + pret = uncore_pci_init(); + } + + if (uncore_init->cpu_init) { + uncore_init->cpu_init(); + cret = uncore_cpu_init(); + } + + if (uncore_init->mmio_init) { + uncore_init->mmio_init(); + mret = uncore_mmio_init(); + } + + if (cret && pret && mret) + return -ENODEV; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "perf/x86/zhaoxin/uncore:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; + pr_info("uncore init success!\n"); + + return 0; + +err: + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); + pr_info("uncore init fail!\n"); + + return ret; +} +module_init(zhaoxin_uncore_init); + +static void __exit zhaoxin_uncore_exit(void) +{ + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); +} +module_exit(zhaoxin_uncore_exit); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h new file mode 100644 index 0000000000000..d17c7d26944b9 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.h @@ -0,0 +1,354 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include + +#include +#include "../perf_event.h" + +#define ZHAOXIN_FAM7_KX5000 0x1b +#define ZHAOXIN_FAM7_KX6000 0x3b +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX7000 0x6b + +#define UNCORE_PMU_NAME_LEN 32 +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) + +#define UNCORE_FIXED_EVENT 0xff +#define UNCORE_PMC_IDX_MAX_GENERIC 4 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC + +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) + +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) + +struct zhaoxin_uncore_ops; +struct zhaoxin_uncore_pmu; +struct zhaoxin_uncore_box; +struct uncore_event_desc; + +struct zhaoxin_uncore_type { + const char *name; + int num_counters; + int num_boxes; + int perf_ctr_bits; + int fixed_ctr_bits; + unsigned int perf_ctr; + unsigned int event_ctl; + unsigned int event_mask; + unsigned int event_mask_ext; + unsigned int fixed_ctr; + unsigned int fixed_ctl; + unsigned int box_ctl; + union { + unsigned int msr_offset; + unsigned int mmio_offset; + }; + unsigned int num_shared_regs:8; + unsigned int single_fixed:1; + unsigned int pair_ctr_ctl:1; + unsigned int *msr_offsets; + struct event_constraint unconstrainted; + struct event_constraint *constraints; + struct zhaoxin_uncore_pmu *pmus; + struct zhaoxin_uncore_ops *ops; + struct uncore_event_desc *event_descs; + const struct attribute_group *attr_groups[4]; + struct pmu *pmu; /* for custom pmu ops */ +}; + +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] + +struct zhaoxin_uncore_ops { + void (*init_box)(struct zhaoxin_uncore_box *box); + void (*exit_box)(struct zhaoxin_uncore_box *box); + void (*disable_box)(struct zhaoxin_uncore_box *box); + void (*enable_box)(struct zhaoxin_uncore_box *box); + void (*disable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + void (*enable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + u64 (*read_counter)(struct zhaoxin_uncore_box *box, struct perf_event *event); + int (*hw_config)(struct zhaoxin_uncore_box *box, struct perf_event *event); + struct event_constraint *(*get_constraint)(struct zhaoxin_uncore_box *box, + struct perf_event *event); + void (*put_constraint)(struct zhaoxin_uncore_box *box, struct perf_event *event); +}; + +struct zhaoxin_uncore_pmu { + struct pmu pmu; + char name[UNCORE_PMU_NAME_LEN]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_box **boxes; +}; + +struct zhaoxin_uncore_extra_reg { + raw_spinlock_t lock; + u64 config, config1, config2; + atomic_t ref; +}; + +struct zhaoxin_uncore_box { + int pci_phys_id; + int package_id; /*Package ID */ + int cluster_id; + int subnode_id; + int n_active; /* number of active events */ + int n_events; + int cpu; /* cpu to collect events */ + unsigned long flags; + atomic_t refcnt; + struct perf_event *events[UNCORE_PMC_IDX_MAX]; + struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; + unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + u64 tags[UNCORE_PMC_IDX_MAX]; + struct pci_dev *pci_dev; + struct zhaoxin_uncore_pmu *pmu; + u64 hrtimer_duration; /* hrtimer timeout for this box */ + struct hrtimer hrtimer; + struct list_head list; + struct list_head active_list; + void __iomem *io_addr; + struct zhaoxin_uncore_extra_reg shared_regs[]; +}; + +#define UNCORE_BOX_FLAG_INITIATED 0 + +struct uncore_event_desc { + struct device_attribute attr; + const char *config; +}; + +struct hw_info { + u64 config_info; + u64 active_state; +}; + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf); + +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), \ + .config = _config, \ +} + +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl + box->pmu->type->mmio_offset * box->pmu->pmu_idx; +} + +static inline unsigned int uncore_pci_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr; +} + +static inline unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return idx * 4 + box->pmu->type->event_ctl; +} + +static inline unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (!strncmp(box->pmu->type->name, "mc_", 3)) + return idx * 2 + box->pmu->type->perf_ctr; + else + return idx * 8 + box->pmu->type->perf_ctr; +} + +static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) +{ + struct zhaoxin_uncore_pmu *pmu = box->pmu; + + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->box_ctl) + return 0; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->fixed_ctl) + return 0; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->event_ctl + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->perf_ctr + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctl(box); + else + return uncore_msr_fixed_ctl(box); +} + +static inline unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctr(box); + else + return uncore_msr_fixed_ctr(box); +} + +static inline unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + if (box->pci_dev || box->io_addr) + return uncore_pci_event_ctl(box, idx); + else + return uncore_msr_event_ctl(box, idx); +} + +static inline unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (box->pci_dev || box->io_addr) + return uncore_pci_perf_ctr(box, idx); + else + return uncore_msr_perf_ctr(box, idx); +} + +static inline int uncore_perf_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->perf_ctr_bits; +} + +static inline int uncore_fixed_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr_bits; +} + +static inline int uncore_num_counters(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->num_counters; +} + +static inline void uncore_disable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->disable_box) + box->pmu->type->ops->disable_box(box); +} + +static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->enable_box) + box->pmu->type->ops->enable_box(box); +} + +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + box->pmu->type->ops->disable_event(box, event); +} + +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + box->pmu->type->ops->enable_event(box, event); +} + +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return box->pmu->type->ops->read_counter(box, event); +} + +static inline void uncore_box_init(struct zhaoxin_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + +static inline void uncore_box_exit(struct zhaoxin_uncore_box *box) +{ + if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->exit_box) + box->pmu->type->ops->exit_box(box); + } +} + +static inline bool uncore_box_is_fake(struct zhaoxin_uncore_box *box) +{ + return (box->package_id < 0); +} + +static inline struct zhaoxin_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct zhaoxin_uncore_pmu, pmu); +} + +static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *event) +{ + return event->pmu_private; +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box); +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_event_start(struct perf_event *event, int flags); +static void uncore_pmu_event_stop(struct perf_event *event, int flags); +static int uncore_pmu_event_add(struct perf_event *event, int flags); +static void uncore_pmu_event_del(struct perf_event *event, int flags); +static void uncore_pmu_event_read(struct perf_event *event); +static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint *uncore_get_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event); +void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index a03aa6f999d15..bf280255266e9 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -111,7 +111,8 @@ static inline bool arch_has_acpi_pdc(void) { struct cpuinfo_x86 *c = &cpu_data(0); return (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR); + c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN); } static inline void arch_acpi_set_proc_cap_bits(u32 *cap) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4091a776e37aa..8e7ed0e5246b1 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -138,6 +138,8 @@ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS ( 5*32+ 4) /* "sm3 sm4" present */ +#define X86_FEATURE_CCS_EN ( 5*32+ 5) /* "sm3_en sm4_en" enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* "ace2" Advanced Cryptography Engine v2 */ @@ -146,6 +148,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* "phe_en" PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* "pmm" PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* "pmm_en" PMM enabled */ +#define X86_FEATURE_ZX_FMA ( 5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX ( 5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN ( 5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS ( 5*32+18) /* Overstress for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN ( 5*32+19) /* Overstress for auto overclock enabled */ +#define X86_FEATURE_TM3 ( 5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN ( 5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 ( 5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN ( 5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM ( 5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 ( 5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN ( 5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX ( 5*32+27) /* "rsa" XMODEXP and MONTMUL2 are present */ +#define X86_FEATURE_XMODX_EN ( 5*32+28) /* "rsa_en" XMODEXP and MONTMUL2 are enabled */ +#define X86_FEATURE_VEX ( 5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN ( 5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK ( 5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* "lahf_lm" LAHF/SAHF in long mode */ diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 0281703da5e26..97b19ce2ba52e 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -233,7 +233,9 @@ static int __init ffh_cstate_init(void) if (c->x86_vendor != X86_VENDOR_INTEL && c->x86_vendor != X86_VENDOR_AMD && - c->x86_vendor != X86_VENDOR_HYGON) + c->x86_vendor != X86_VENDOR_HYGON && + c->x86_vendor != X86_VENDOR_CENTAUR && + c->x86_vendor != X86_VENDOR_ZHAOXIN) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 460e90a1a0b17..c26582d1b0840 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -520,6 +520,7 @@ bool mce_usable_address(struct mce *m) return amd_mce_usable_address(m); case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: return intel_mce_usable_address(m); @@ -537,6 +538,7 @@ bool mce_is_memory_error(struct mce *m) return amd_mce_is_memory_error(m); case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -1310,7 +1312,8 @@ static noinstr bool mce_check_crashing_cpu(void) mcgstatus = native_rdmsrq(MSR_IA32_MCG_STATUS); - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) return false; } @@ -1590,6 +1593,7 @@ noinstr void do_machine_check(struct pt_regs *regs) * on Intel, Zhaoxin only. */ if (m->cpuvendor == X86_VENDOR_INTEL || + m->cpuvendor == X86_VENDOR_CENTAUR || m->cpuvendor == X86_VENDOR_ZHAOXIN) lmce = m->mcgstatus & MCG_STATUS_LMCES; @@ -1955,6 +1959,7 @@ static void zhaoxin_apply_global_quirks(struct cpuinfo_x86 *c) if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { if (mca_cfg.monarch_timeout < 0) mca_cfg.monarch_timeout = USEC_PER_SEC; + mca_cfg.bios_cmci_threshold = 1; } } @@ -1979,21 +1984,6 @@ static bool __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) return false; } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) -{ - struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; - } -} - static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); @@ -2035,9 +2025,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) break; case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); - break; - case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_init(c); break; @@ -2054,6 +2041,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) mce_intel_feature_clear(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_clear(c); break; @@ -2223,6 +2211,7 @@ void mca_bsp_init(struct cpuinfo_x86 *c) case X86_VENDOR_INTEL: intel_apply_global_quirks(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: zhaoxin_apply_global_quirks(c); break; @@ -2404,6 +2393,7 @@ static void vendor_disable_error_reporting(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) return; diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index 4655223ba5606..7884f7274f906 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -88,6 +88,7 @@ static bool cmci_supported(int *banks) * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return false; diff --git a/arch/x86/kernel/cpu/topology_common.c b/arch/x86/kernel/cpu/topology_common.c index b5a5e14114699..e2fbe72e31f52 100644 --- a/arch/x86/kernel/cpu/topology_common.c +++ b/arch/x86/kernel/cpu/topology_common.c @@ -158,8 +158,12 @@ static void parse_topology(struct topo_scan *tscan, bool early) cpu_parse_topology_amd(tscan); break; case X86_VENDOR_CENTAUR: + if (!IS_ENABLED(CONFIG_CPU_SUP_CENTAUR) || !cpu_parse_topology_ext(tscan)) + parse_legacy(tscan); + break; case X86_VENDOR_ZHAOXIN: - parse_legacy(tscan); + if (!IS_ENABLED(CONFIG_CPU_SUP_ZHAOXIN) || !cpu_parse_topology_ext(tscan)) + parse_legacy(tscan); break; case X86_VENDOR_INTEL: if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan)) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 120a2b7067fc7..505900a80da88 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -568,6 +568,16 @@ config SATA_VITESSE If unsure, say N. +config SATA_ZHAOXIN + tristate "Zhaoxin SATA support" + depends on PCI + default y + select SATA_HOST + help + This option enables support for Zhaoxin Serial ATA. + + If unsure, say N. + comment "PATA SFF controllers with BMDMA" config PATA_ALI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 20e6645ab7371..4b846692e3656 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SATA_SIL) += sata_sil.o obj-$(CONFIG_SATA_SIS) += sata_sis.o obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o +obj-$(CONFIG_SATA_ZHAOXIN) += sata_zhaoxin.o obj-$(CONFIG_SATA_VIA) += sata_via.o obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o diff --git a/drivers/ata/sata_zhaoxin.c b/drivers/ata/sata_zhaoxin.c new file mode 100644 index 0000000000000..bd2c0cc2a6cea --- /dev/null +++ b/drivers/ata/sata_zhaoxin.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sata_zhaoxin.c - ZhaoXin Serial ATA controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "sata_zx" +#define DRV_VERSION "2.6.1" + +enum board_ids_enum { + zx100s, +}; + +enum { + SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ + SATA_INT_GATE = 0x41, /* SATA interrupt gating */ + SATA_NATIVE_MODE = 0x42, /* Native mode enable */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + ALL_PORTS = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), + + SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ +}; + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static int zx_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); + +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); + +static const struct pci_device_id zx_pci_tbl[] = { + { PCI_VDEVICE(ZHAOXIN, 0x9002), zx100s }, + { PCI_VDEVICE(ZHAOXIN, 0x9003), zx100s }, + { } /* terminate list */ +}; + +static struct pci_driver zx_pci_driver = { + .name = DRV_NAME, + .id_table = zx_pci_tbl, + .probe = zx_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif + .remove = ata_pci_remove_one, +}; + +static struct scsi_host_template zx_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations zx_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = zx_tf_load, +}; + +static struct ata_port_operations zx_ops = { + .inherits = &zx_base_ops, + .reset.hardreset = zx_hardreset, + .scr_read = zx_scr_read, + .scr_write = zx_scr_write, +}; + +static struct ata_port_info zx100s_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &zx_ops, +}; + +static int zx_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) +{ + int rc; + + rc = sata_std_hardreset(link, class, deadline); + if (!rc || rc == -EAGAIN) { + struct ata_port *ap = link->ap; + int pmp = link->pmp; + int tmprc; + + if (pmp) { + ap->ops->sff_dev_select(ap, pmp); + tmprc = ata_sff_wait_ready(&ap->link, deadline); + } else { + tmprc = ata_sff_wait_ready(link, deadline); + } + if (tmprc) + ata_link_err(link, "COMRESET failed for wait (errno=%d)\n", rc); + else + ata_link_err(link, "wait for bsy success\n"); + + ata_link_err(link, "COMRESET success (errno=%d) ap=%d link %d\n", rc, + link->ap->port_no, link->pmp); + } else { + ata_link_err(link, "COMRESET failed (errno=%d) ap=%d link %d\n", rc, + link->ap->port_no, link->pmp); + } + return rc; +} + +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + v |= raw & 0x30; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ((ipm_tbl[(raw >> 2) & 0x3])<<8); + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != 0x9002 && pdev->device != 0x9003); + pci_write_config_byte(pdev, 0x42, slot); + pci_read_config_dword(pdev, 0xA8, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + + break; + + default: + return -EINVAL; + } + + *val = v; + + return 0; +} + +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + WARN_ON(pdev == NULL); + + switch (scr) { + case SCR_ERROR: + /* devices 0x9002 uses 0xA8 as base */ + WARN_ON(pdev->device != 0x9002 && pdev->device != 0x9003); + pci_write_config_byte(pdev, 0x42, slot); + pci_write_config_dword(pdev, 0xA8, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + pci_write_config_byte(pdev, 0xA4 + slot, v); + + return 0; + + default: + return -EINVAL; + } +} + +/* + * zx_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of zx chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static const unsigned int zx_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static const unsigned int zx100s_bar_sizes0[] = { + 8, 4, 8, 4, 16, 0 +}; + +static const unsigned int zx100s_bar_sizes1[] = { + 8, 4, 0, 0, 16, 0 +}; + +static int zx_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) +{ + const struct ata_port_info *ppi0[] = { + &zx100s_port_info, NULL + }; + const struct ata_port_info *ppi1[] = { + &zx100s_port_info, &ata_dummy_port_info + }; + struct ata_host *host; + int i, rc; + + if (pdev->device == 0x9002) + rc = ata_pci_bmdma_prepare_host(pdev, ppi0, &host); + else if (pdev->device == 0x9003) + rc = ata_pci_bmdma_prepare_host(pdev, ppi1, &host); + else + rc = -EINVAL; + + if (rc) + return rc; + + *r_host = host; + + /* 9002 hosts four sata ports as M/S of the two channels */ + /* 9003 hosts two sata ports as M/S of the one channel */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); + + return 0; +} + +static void zx_configure(struct pci_dev *pdev, int board_id) +{ + u8 tmp8; + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", (int)tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", (int) tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + dev_dbg(&pdev->dev, "enabling SATA channel native mode (0x%x)\n", (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } +} + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int i; + int rc; + struct ata_host *host = NULL; + int board_id = (int) ent->driver_data; + const unsigned int *bar_sizes; + int legacy_mode = 0; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + if (pdev->device == 0x9002 || pdev->device == 0x9003) { + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + if (legacy_mode) + return -EINVAL; + } + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (board_id == zx100s && pdev->device == 0x9002) + bar_sizes = &zx100s_bar_sizes0[0]; + else if (board_id == zx100s && pdev->device == 0x9003) + bar_sizes = &zx100s_bar_sizes1[0]; + else + bar_sizes = &zx_bar_sizes[0]; + + for (i = 0; i < ARRAY_SIZE(zx_bar_sizes); i++) { + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < bar_sizes[i])) { + if (bar_sizes[i] == 0) + continue; + + dev_err(&pdev->dev, "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", i, + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + + return -ENODEV; + } + } + + switch (board_id) { + case zx100s: + rc = zx_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + zx_configure(pdev, board_id); + + pci_set_master(pdev); + + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &zx_sht); +} + +module_pci_driver(zx_pci_driver); + +MODULE_AUTHOR("Yanchen:YanchenSun@zhaoxin.com"); +MODULE_DESCRIPTION("SCSI low-level driver for ZX SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, zx_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 492a2a61a65be..43101f39d3967 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -600,6 +600,16 @@ config HW_RANDOM_JH7110 To compile this driver as a module, choose M here. The module will be called jh7110-trng. +config HW_RANDOM_PHYTIUM + tristate "Phytium Random Number Generator support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Phytium SoCs. + + To compile this driver as a module, choose M here: the + module will be called phytium-rng. + config HW_RANDOM_ROCKCHIP tristate "Rockchip True Random Number Generator" depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST) diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index b9132b3f5d210..0fa9f0a271d46 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -52,3 +52,4 @@ obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o +obj-$(CONFIG_HW_RANDOM_PHYTIUM) += phytium-rng.o diff --git a/drivers/char/hw_random/phytium-rng.c b/drivers/char/hw_random/phytium-rng.c new file mode 100644 index 0000000000000..ef4c137c17ccc --- /dev/null +++ b/drivers/char/hw_random/phytium-rng.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC RNG Driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRNG_CR 0x00 +#define TRNG_CR_RNGEN BIT(0) +#define TRNG_CR_ROSEN_MASK GENMASK(7, 4) +#define TRNG_CR_DIEN BIT(16) +#define TRNG_CR_ERIEN BIT(17) +#define TRNG_CR_IRQEN BIT(24) +#define TRNG_MSEL 0x04 +#define TRNG_MSEL_MSEL BIT(0) +#define TRNG_SR 0x08 +#define TRNG_SR_HTF BIT(0) +#define TRNG_SR_DRDY BIT(1) +#define TRNG_SR_ERERR BIT(3) +#define TRNG_DR 0x0C +#define TRNG_RESEED 0x40 +#define TRNG_RESEED_RSED BIT(0) + +#define DELAY 10 +#define TIMEOUT 100 + +static int msel; +module_param(msel, int, 0444); +MODULE_PARM_DESC(msel, "Phytium RNG mode selection: 0 - TRNG. 1 - PRNG."); + +struct phytium_rng { + struct hwrng rng; + void __iomem *base; +}; + +static int phytium_rng_init(struct hwrng *rng) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + u32 reg; + + /* Mode Selection */ + reg = msel ? TRNG_MSEL_MSEL : 0; + writel(reg, priv->base + TRNG_MSEL); + + /* If PRGN mode is on, do reseed operations */ + if (msel) + writel(TRNG_RESEED_RSED, priv->base + TRNG_RESEED); + + /* Clear status */ + writel(0x7, priv->base + TRNG_SR); + + /* Enable TRNG */ + reg = readl(priv->base + TRNG_CR) | TRNG_CR_ROSEN_MASK | TRNG_CR_RNGEN; + writel(reg, priv->base + TRNG_CR); + + return 0; +} + +static void phytium_rng_cleanup(struct hwrng *rng) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + + writel(0x7, priv->base + TRNG_SR); +} + +static int phytium_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + u32 reg; + int ret = 0; + + /* TRNG can generate at most 8*32bit random number per time */ + max = max > 8 ? 8 : max; + + reg = readl(priv->base + TRNG_SR); + if (!(reg & TRNG_SR_DRDY) && wait) { + ret = readl_poll_timeout(priv->base + TRNG_SR, reg, + reg & TRNG_SR_DRDY, DELAY, TIMEOUT); + if (ret) { + dev_err((struct device *)priv->rng.priv, + "%s: timeout %x!\n", __func__, reg); + return -EIO; + } + } + + while (max > 4) { + *(u32 *)buf = readl(priv->base + TRNG_DR); + + ret += sizeof(u32); + buf += sizeof(u32); + max -= sizeof(u32); + } + + /* Clear DRDY by writing 1 */ + writel(reg | TRNG_SR_DRDY, priv->base + TRNG_SR); + + return ret; +} + +static int scto_rng_probe(struct platform_device *pdev) +{ + struct phytium_rng *priv; + struct resource *mem; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->rng.name = pdev->name; + priv->rng.init = phytium_rng_init; + priv->rng.cleanup = phytium_rng_cleanup; + priv->rng.read = phytium_rng_read; + priv->rng.priv = (unsigned long)&pdev->dev; + + return devm_hwrng_register(&pdev->dev, &priv->rng); +} + +static const struct of_device_id phytium_rng_dt_ids[] = { + { .compatible = "phytium,rng" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_rng_dt_ids); + +static struct platform_driver phytium_rng_driver = { + .probe = scto_rng_probe, + .driver = { + .name = "phytium-rng", + .of_match_table = of_match_ptr(phytium_rng_dt_ids), + } +}; +module_platform_driver(phytium_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium random number generator driver"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index a6688d54984c4..1cb8bd55ef13d 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -52,6 +52,48 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha. +config CRYPTO_DEV_ZHAOXIN + tristate "Support for Zhaoxin ACE" + depends on X86 && !UML + default m + help + Some Zhaoxin processors come with an integrated crypto engine + (so called Zhaoxin ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_ZHAOXIN_AES + tristate "Zhaoxin ACE driver for AES algorithm" + depends on CRYPTO_DEV_ZHAOXIN + default CRYPTO_DEV_ZHAOXIN + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use Zhaoxin ACE for AES algorithm. + + Available in Zhaoxin CPUs. + + If unsure say M. The compiled module will be + called zhaoxin-aes. + +config CRYPTO_DEV_ZHAOXIN_SHA + tristate "Zhaoxin ACE driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_ZHAOXIN + default CRYPTO_DEV_ZHAOXIN + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use Zhaoxin ACE for SHA1/SHA256 algorithms. + + Available in Zhaoxin processors. + + If unsure say M. The compiled module will be + called zhaoxin-sha. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI @@ -877,5 +919,6 @@ source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" source "drivers/crypto/inside-secure/eip93/Kconfig" source "drivers/crypto/ti/Kconfig" +source "drivers/crypto/montage/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 322ae8854e3ea..2131a0c369fd5 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -26,6 +26,8 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SHA) += zhaoxin-sha.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o @@ -50,3 +52,4 @@ obj-y += intel/ obj-y += starfive/ obj-y += cavium/ obj-y += ti/ +obj-y += montage/ diff --git a/drivers/crypto/montage/Kconfig b/drivers/crypto/montage/Kconfig new file mode 100644 index 0000000000000..e8e4b287a7927 --- /dev/null +++ b/drivers/crypto/montage/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +source "drivers/crypto/montage/tsse/Kconfig" diff --git a/drivers/crypto/montage/Makefile b/drivers/crypto/montage/Makefile new file mode 100644 index 0000000000000..a50415fe10c70 --- /dev/null +++ b/drivers/crypto/montage/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CRYPTO_DEV_TSSE) += tsse/ diff --git a/drivers/crypto/montage/tsse/Kconfig b/drivers/crypto/montage/tsse/Kconfig new file mode 100644 index 0000000000000..5854f8e4525cf --- /dev/null +++ b/drivers/crypto/montage/tsse/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CRYPTO_DEV_TSSE + tristate "Support for Montage(R) TSSE" + depends on X86 && PCI + select FW_LOADER + help + Support for Montage(R) TSSE for accelerating crypto workloads. + + To compile this as a module, choose M here. \ No newline at end of file diff --git a/drivers/crypto/montage/tsse/Makefile b/drivers/crypto/montage/tsse/Makefile new file mode 100644 index 0000000000000..d67ffde3a5b04 --- /dev/null +++ b/drivers/crypto/montage/tsse/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This file is part of tsse driver for Linux +# +# Copyright © 2023 Montage Technology. All rights reserved. + +obj-m += tsse.o + +tsse-objs := tsse_dev_mgr.o \ + tsse_ipc.o \ + tsse_fw_service.o \ + tsse_service.o \ + tsse_irq.o \ + tsse_dev_drv.o \ + tsse_vuart.o diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h new file mode 100644 index 0000000000000..c16d2ae7c4140 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_H__ +#define __TSSE_DEV_H__ +#include +#include +#include +#include +#include +#include +#include +#include "tsse_ipc.h" + +#define TSSE_PCI_MAX_BARS 4 +#define TSSE_FW_VERSION_LEN 32 + +struct tsse_bar { + void __iomem *virt_addr; + resource_size_t addr; + resource_size_t size; +}; +struct tsse_dev_pci { + struct pci_dev *pci_dev; + struct tsse_bar bars[TSSE_PCI_MAX_BARS]; + u8 revid; +}; +enum tsse_dev_status_bit { + TSSE_DEV_STATUS_STARTING = 0, + TSSE_DEV_STATUS_STARTED = 1 + +}; +struct tsse_qpairs_bank { + struct tsse_dev *tsse_dev; + void __iomem *reg_base; + + u32 num_qparis; + u32 irq_vec; +}; +struct tsse_dev { + struct module *owner; + struct dentry *debugfs_dir; + unsigned long status; + struct list_head list; + struct tsse_dev_pci tsse_pci_dev; + struct tsse_qpairs_bank qpairs_bank; + atomic_t ref_count; + bool is_vf; + int id; + u32 num_irqs; + u32 num_vfs; + struct uart_port *port; + struct tsse_ipc *ipc; + void *adi; + void *mbx_hw; + const struct firmware *fw; + char fw_version[TSSE_FW_VERSION_LEN]; + bool fw_version_exist; +}; +#define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) +#define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) + +#include "tsse_log.h" + +struct list_head *tsse_devmgr_get_head(void); + +int tsse_dev_get(struct tsse_dev *tsse_dev); +void tsse_dev_put(struct tsse_dev *tsse_dev); +int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); +void tsse_devmgr_rm_dev(struct tsse_dev *tdev); +int tsse_prepare_restart_dev(struct tsse_dev *tdev); +int tsse_start_dev(struct tsse_dev *tdev); + +static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) +{ + return (struct tsse_dev *)pci_get_drvdata(pci_dev); +} + +static inline int tsse_get_cur_node(void) +{ + int cpu, node; + + cpu = get_cpu(); + node = topology_physical_package_id(cpu); + put_cpu(); + + return node; +} + +static inline int tsse_dev_started(struct tsse_dev *tdev) +{ + return test_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); +} +static inline int tsse_dev_in_use(struct tsse_dev *tdev) +{ + return atomic_read(&tdev->ref_count) != 0; +} +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c new file mode 100644 index 0000000000000..81cbf74ec91ff --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev_drv.h" +#include "tsse_vuart.h" +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +#define CLUSTER_SLOT_CONFIG_OFFSET 0x5780000 +#define QPAIR_SETTING_OFFSET 0x50000 +#define BAR_START 2 +#define BAR_END 4 + +static DEFINE_IDA(tsse_ida); + +static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) +{ + writel(enable ? 1 : 0, + TSSE_DEV_BARS(tdev)[2].virt_addr + + CLUSTER_SLOT_CONFIG_OFFSET + QPAIR_SETTING_OFFSET); +} +static int tsse_sriov_disable(struct tsse_dev *tdev) +{ + pci_disable_sriov(tdev->tsse_pci_dev.pci_dev); + tsse_qpair_enable_pf(tdev, true); + + return 0; +} + +static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + int totalvfs = pci_sriov_get_totalvfs(pdev); + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + int ret = 0; + + if ((!tdev) || (num_vfs_param < 0) || (totalvfs <= 0)) { + dev_err(&pdev->dev, + "%s %d: failed to config sriov, tdev=%p totalvfs=%d num_vfs_param=%d\n", + __func__, __LINE__, tdev, totalvfs, num_vfs_param); + return -EBADE; + } + + if (num_vfs_param > totalvfs) + num_vfs_param = totalvfs; + + dev_info(&pdev->dev, "%s %d: has total %d vfs, and enable %d vfs\n", + __func__, __LINE__, totalvfs, num_vfs_param); + + if ((num_vfs_param > TSSE_PF_MAX_IRQ_NUM) || + (num_vfs_param > TSSE_PF_MAX_QPAIR_NUM)) { + tsse_dev_err( + tdev, + "vfs number is greater than pf's \"max_irq_num=%d or max_qpairs_num=%d\"\n", + TSSE_PF_MAX_IRQ_NUM, TSSE_PF_MAX_QPAIR_NUM); + return -EBADE; + } + + if (!tsse_dev_started(tdev)) { + dev_err(&pdev->dev, "%s %d: device is not started\n", __func__, + __LINE__); + return -EBADE; + } + + if (tsse_dev_in_use(tdev)) { + dev_err(&pdev->dev, "%s %d: device is busy\n", __func__, + __LINE__); + return -EBUSY; + } + + tsse_sriov_disable(tdev); + + tsse_prepare_restart_dev(tdev); + + tdev->num_vfs = num_vfs_param; + + if (tdev->num_vfs > 0) { + tdev->num_irqs = TSSE_SRIOV_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_SRIOV_PF_MAX_QPAIR_NUM; + } else { + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + } + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + ret = tsse_start_dev(tdev); + if (ret) { + dev_err(&pdev->dev, "%s %d: failed to start the device\n", + __func__, __LINE__); + return ret; + } + + if (num_vfs_param > 0) { + tsse_qpair_enable_pf(tdev, false); + pci_enable_sriov(pdev, num_vfs_param); + } + + return num_vfs_param; +} + +/** + * tsse_image_load_store() - This function will be called when user + * writes string to /sys/bus/pci/devices/.../tsse_image_load. + * Driver will always loads /lib/firmware/tsse_firmware.bin. + * @dev: device + * @attr: device attribute + * @buf: string that user writes + * @count: string length that user writes + * Return: the number of bytes used from the buffer, here it is just the count argument. +*/ +static ssize_t tsse_image_load_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = NULL; + struct tsse_dev *tdev = NULL; + + pdev = container_of(dev, struct pci_dev, dev); + if (pdev) + tdev = pci_to_tsse_dev(pdev); + if (buf && count && tdev) { + tsse_dev_info(tdev, "receive command to load firmware %s\n", TSSE_FIRMWARE); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + if (tsse_fw_manual_load_ipc(pdev)) + dev_err(&pdev->dev, "%s %d: firmware update failed\n", + __func__, __LINE__); + } + } + return count; +} + +DEVICE_ATTR_WO(tsse_image_load); + +static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int status = 0; + int bar; + u32 tmp_val; + struct tsse_dev *tdev; + + if (!pdev->is_physfn) { + dev_err(&pdev->dev, "%s %d: this is not Physical fn\n", + __func__, __LINE__); + return -EPERM; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + dev_err(&pdev->dev, + "%s %d: invalid numa configuration for tsse\n", + __func__, __LINE__); + return -EINVAL; + } + + tdev = kzalloc_node(sizeof(*tdev), GFP_KERNEL, dev_to_node(&pdev->dev)); + + if (!tdev) + return -ENOMEM; + + status = pcim_enable_device(pdev); + + if (status) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto out_err; + } + + pci_set_master(pdev); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(48))) { + if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, + "failed to set tsse dma address width\n"); + status = -EFAULT; + goto out_err; + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(48)); + } + + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + + status = pcim_iomap_regions(pdev, BIT(0) | BIT(2), TSSE_DEV_NAME); + if (status) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out_err; + } + + for (bar = BAR_START; bar < BAR_END;) { + TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); + TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); + TSSE_DEV_BARS(tdev) + [bar].virt_addr = pcim_iomap_table(pdev)[bar]; + + dev_info(&pdev->dev, + "bar[%d]: addr=0x%llx, size=0x%llx, virt_addr=0x%lx\n", + bar, TSSE_DEV_BARS(tdev)[bar].addr, + TSSE_DEV_BARS(tdev)[bar].size, + (ulong)TSSE_DEV_BARS(tdev)[bar].virt_addr); + + bar += 2; + } + + tdev->owner = THIS_MODULE; + tdev->is_vf = false; + tdev->tsse_pci_dev.pci_dev = pdev; + tdev->id = ida_alloc(&tsse_ida, GFP_KERNEL); + if (tdev->id < 0) { + dev_err(&pdev->dev, "Unable to get id\n"); + status = tdev->id; + goto out_err; + } + + pci_set_drvdata(pdev, tdev); + + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + tdev->qpairs_bank.irq_vec = TSSE_PF_QPAIR_START_IRQ_VECTOR; + tdev->qpairs_bank.reg_base = + TSSE_DEV_BARS(tdev)[2].virt_addr + TSSE_PF_QPAIR_REG_BASE; + + tsse_qpair_enable_pf(tdev, true); + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + if (tsse_devmgr_add_dev(tdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_devmgr failed to add new device\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_ida_free; + } + + if (vuart_init_port(pdev)) { + dev_err(&pdev->dev, + "%s %d: vuart_init_port failed to init vuart.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_port_init; + } + + tdev->fw_version_exist = false; + /* Its result not break driver init process */ + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + } + + if (tsse_ipc_init(pdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_ipc_init failed to tsse_ipc.\n", __func__, + __LINE__); + status = -EFAULT; + goto out_err_ipc; + } + + if (sysfs_create_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr)) { + dev_err(&pdev->dev, + "%s %d: sysfs_create_file failed for tsse image load.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_image_load; + } + + tsse_dev_info(tdev, "successful\n"); + + pci_read_config_dword(pdev, 0x720, &tmp_val); + tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); + + return 0; +out_err_image_load: + tsse_ipc_deinit(tdev); +out_err_ipc: + vuart_uninit_port(pdev); +out_err_port_init: + tsse_devmgr_rm_dev(tdev); +out_err_ida_free: + ida_free(&tsse_ida, tdev->id); +out_err: + kfree(tdev); + return status; +} + +static void device_remove(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + pr_info("%s %d: pci_dev 0x%lx tsse_dev 0x%lx\n", __func__, __LINE__, + (ulong)pdev, (ulong)tdev); + + tsse_sriov_disable(tdev); + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr); + tsse_ipc_deinit(tdev); + vuart_uninit_port(pdev); + tsse_devmgr_rm_dev(tdev); + ida_free(&tsse_ida, tdev->id); + kfree(tdev); + dev_info(&pdev->dev, "%s %d: successful\n", __func__, __LINE__); +} + +static const struct pci_device_id pci_ids[] = { + { + PCI_DEVICE(0x1b00, 0xc011), + }, + { + PCI_DEVICE(0x1b00, 0xd011), + }, + { 0 } +}; + +static struct pci_driver pci_driver = { + .name = TSSE_DEV_NAME, + .id_table = pci_ids, + .probe = device_probe, + .remove = device_remove, + .sriov_configure = tsse_sriov_configure, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +static int __init tsse_init(void) +{ + int status; + + status = vuart_register(); + if (status) { + pr_err("vuart_register failed[%d].\n", status); + return status; + } + + status = pci_register_driver(&pci_driver); + if (status) { + vuart_unregister(); + return status; + } + + pr_info(KBUILD_MODNAME ": loaded.\n"); + + return 0; +} + +static void __exit tsse_exit(void) +{ + pci_unregister_driver(&pci_driver); + vuart_unregister(); + + pr_info(KBUILD_MODNAME ": unloaded.\n"); +} + +module_init(tsse_init); +module_exit(tsse_exit); + +MODULE_AUTHOR("montage-tech.com"); +MODULE_DESCRIPTION("TSSE device driver"); +MODULE_VERSION("1.0.0"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TSSE_FIRMWARE); diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.h b/drivers/crypto/montage/tsse/tsse_dev_drv.h new file mode 100644 index 0000000000000..6a05572a38494 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_DRV_H__ +#define __TSSE_DEV_DRV_H__ +#define TSSE_DEV_NAME "tsse" + +// TODO: need to support full qpairs +#define TSSE_PF_MAX_QPAIR_NUM 16 + +#define TSSE_PF_MAX_IRQ_NUM 96 +#define TSSE_PF_QPAIR_START_IRQ_VECTOR 32 + +#define TSSE_SRIOV_PF_MAX_QPAIR_NUM 0 +#define TSSE_SRIOV_PF_MAX_IRQ_NUM 16 + +#define TSSE_PF_QPAIR_REG_BASE 0x5700000 + +#include "tsse_dev.h" + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c new file mode 100644 index 0000000000000..39553eb968323 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" +static DEFINE_MUTEX(tsse_dev_table_lock); +static LIST_HEAD(tsse_dev_table); + +static DEFINE_MUTEX(algs_lock); + +static inline void tsse_list_del(struct list_head *entry) +{ + WRITE_ONCE(entry->next->prev, entry->prev); + WRITE_ONCE(entry->prev->next, entry->next); +} +static inline void tsse_list_add(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + WRITE_ONCE(new->next, next); + WRITE_ONCE(new->prev, prev); + mb(); /* Make sure new node updates first */ + WRITE_ONCE(next->prev, new); + WRITE_ONCE(prev->next, new); +} + +static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) +{ + int ret = 0; + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return 0; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_add_return(1, &pf_tsse_dev->ref_count) == 1) { + if (!try_module_get(pf_tsse_dev->owner)) + ret = -EFAULT; + } + } + return ret; +} + +static void tsse_dev_pf_put(struct tsse_dev *vf_tsse_dev) +{ + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_sub_return(1, &pf_tsse_dev->ref_count) == 0) + module_put(pf_tsse_dev->owner); + } +} + +int tsse_dev_get(struct tsse_dev *tdev) +{ + int ref_count = atomic_add_return(1, &tdev->ref_count); + + if (!tsse_dev_started(tdev)) { + atomic_sub(1, &tdev->ref_count); + return -EAGAIN; + } + + if (ref_count == 1) { + if (!try_module_get(tdev->owner)) + return -EFAULT; + if (tdev->is_vf) + return tsse_dev_pf_get(tdev); + } + return 0; +} +void tsse_dev_put(struct tsse_dev *tdev) +{ + if (atomic_sub_return(1, &tdev->ref_count) == 0) { + module_put(tdev->owner); + if (tdev->is_vf) + tsse_dev_pf_put(tdev); + } +} + +static int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +{ + int times, max_retry = 150; + + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + + for (times = 0; times < max_retry; times++) { + if (!tsse_dev_in_use(tdev)) + break; + msleep(100); + } + + if (times >= max_retry) { + tsse_dev_err(tdev, "Failed to stop busy device\n"); + if (busy_exit) + return -EBUSY; + } + if (tdev->qpairs_bank.num_qparis != 0) { + mutex_lock(&tsse_dev_table_lock); + tsse_list_del(&tdev->list); + mutex_unlock(&tsse_dev_table_lock); + tsse_dev_info(tdev, "removed from active dev table list\n"); + } + + tsse_dev_info(tdev, "device stopped\n"); + + return 0; +} + +int tsse_start_dev(struct tsse_dev *tdev) +{ + struct tsse_dev *tmp_dev; + struct list_head *prev_node = &tsse_dev_table; + int ret = 0; + + if (tdev->qpairs_bank.num_qparis == 0) { + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_dev_info(tdev, "device started\n"); + return 0; + } + + set_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + + mutex_lock(&tsse_dev_table_lock); + + list_for_each_entry(tmp_dev, &tsse_dev_table, list) { + if (tmp_dev == tdev) { + ret = -EEXIST; + tsse_dev_err(tdev, + "The device cannot be added repeatedly\n"); + goto clear_status; + } + } + + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_list_add(&tdev->list, prev_node, prev_node->next); + + tsse_dev_info(tdev, "device started\n"); + mutex_unlock(&tsse_dev_table_lock); + + return 0; +clear_status: + mutex_unlock(&tsse_dev_table_lock); + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + return ret; +} + +int tsse_prepare_restart_dev(struct tsse_dev *tdev) +{ + return tsse_stop_dev(tdev, false); +} + +void tsse_devmgr_rm_dev(struct tsse_dev *tdev) +{ + tsse_stop_dev(tdev, false); + tsse_dev_free_irq_vectors(tdev); + msleep(300); +} + +int tsse_devmgr_add_dev(struct tsse_dev *tdev) +{ + int ret; + + ret = tsse_dev_alloc_irq_vectors(tdev); + if (ret == 0) { + atomic_set(&tdev->ref_count, 0); + tdev->status = 0; + ret = tsse_start_dev(tdev); + + if (ret != 0) + tsse_dev_free_irq_vectors(tdev); + } + return ret; +} + +struct list_head *tsse_devmgr_get_head(void) +{ + return &tsse_dev_table; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c new file mode 100644 index 0000000000000..aeada6ec7fa0c --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_service.h" + +#define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" +#define SPACE_CH ' ' + +static int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + u8 *h2d; + u32 int_reg; + + mutex_lock(&tsseipc->list_lock); + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + mutex_unlock(&tsseipc->list_lock); + return -EFAULT; + } + if (msg->header.i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + dev_err(tsseipc->dev, "msg format error\n"); + return -EFAULT; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, + msg->header.i_len - sizeof(struct ipc_header)); + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + + dev_info(tsseipc->dev, "notify device to get firmware\n"); + mutex_unlock(&tsseipc->list_lock); + return 0; +} + +/** + * get_firmware_version() - Get version information from firmware + * @fw: firmware pointer + * @fw_version_out: firmware version string output + * Return: 0 on success, error code otherwise +*/ +int get_firmware_version(const struct firmware *fw, char *fw_version_out) +{ + const char *pattern = SEARCH_PATTERN; + const uint8_t *fw_buffer = fw->data; + uint32_t pattern_i = 0, buffer_i = 0; + uint32_t pattern_len = strlen(pattern); // Not include "\0" + uint32_t version_start = 0; + uint32_t version_len = 0; + + while (buffer_i < fw->size) { + if (pattern[pattern_i] == (char) fw_buffer[buffer_i]) { + buffer_i++; + pattern_i++; + } + if (pattern_i == pattern_len) { + break; // pattern found + } else if ((buffer_i < fw->size) && + (pattern[pattern_i] != (char) fw_buffer[buffer_i])) { + // mismatch after pattern_i matches + if (pattern_i != 0) { + // since the pattern has no common prefix, when mismatch, + // the next compare should start from pattern beginning + pattern_i = 0; + } else { + buffer_i++; + } + } + } + if (pattern_i == pattern_len) { + buffer_i++; + version_start = buffer_i; + while (buffer_i < fw->size) { + if (fw_buffer[buffer_i] == SPACE_CH) { + version_len = buffer_i - version_start; + if (version_len >= TSSE_FW_VERSION_LEN - 1) + version_len = TSSE_FW_VERSION_LEN - 2; + strscpy(fw_version_out, fw_buffer + version_start, version_len + 1); + return 0; + } + buffer_i++; + } + } + return -EINVAL; +} + +/** + * fw_service() - Firmware service to handle IPC message from mainCPU. + * It will write init or manual load firmware to PCIe BAR and send message back. + * @tsseipc_t: pointer to a structure used for IPC + * @msg_t: pointer to IPC message +*/ +void fw_service(void *tsseipc_t, void *msg_t) +{ + void __iomem *fw; + uint32_t size; + uint32_t task_offset; + struct fw_load *fw_task; + struct tsse_dev *tdev; + struct tsse_ipc *tsseipc = (struct tsse_ipc *)tsseipc_t; + struct ipc_msg *msg = (struct ipc_msg *)msg_t; + + task_offset = sizeof(struct msg_info); + fw_task = (struct fw_load *)((uint8_t *)msg->i_data + task_offset); + tdev = pci_to_tsse_dev(tsseipc->pdev); + + if (!tdev || !tdev->fw) { + fw_task->result = 1; + fw_task->size = 0; + dev_info(tsseipc->dev, "firmware loading failed\n"); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); + return; + } + + fw_task->result = 0; + fw_task->size = tdev->fw->size; + size = tdev->fw->size; + fw = tsseipc->virt_addr + fw_task->offset + FW_BASE; + + memcpy_toio((u8 *)fw, tdev->fw->data, size); + dev_info(tsseipc->dev, "firmware loading done\n"); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); + + if (tdev->fw_version_exist) + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + memset(tdev->fw_version, 0, TSSE_FW_VERSION_LEN); + tdev->fw_version_exist = false; + } +} + +/** + * tsse_fw_load() - Load firmware from /lib/firmware + * @pdev: pci device + * @name: firmware file name + * @fw: pointer to firmware pointer + * Return: 0 on success, error code otherwise +*/ +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw) +{ + int result; + + result = request_firmware(fw, name, &pdev->dev); + if (result) + dev_err(&pdev->dev, "%s failed for %s\n", __func__, name); + return result; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h new file mode 100644 index 0000000000000..706ea6d297696 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_FW_SERVICE_H__ +#define __TSSE_FW_SERVICE_H__ + +#include + +#define FW_BASE 0x7000000 +#define TSSE_FIRMWARE "tsse_firmware.bin" + +void fw_service(void *tsseipc_t, void *msg_t); +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw); +int get_firmware_version(const struct firmware *fw, char *fw_version_out); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c new file mode 100644 index 0000000000000..c03d5209551b3 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include + +#include "tsse_ipc.h" +#include "tsse_dev.h" +#include "tsse_service.h" + +/** + * get_msginf() - Create ipc_msg and read message from BAR. + * Return the pointer to ipc_msg, the caller is responsible for free it. + * @d2h: device2host memory pointer + * Return: new ipc_msg pointer, which points to message read from device +*/ +static struct ipc_msg *get_msginf(void __iomem *d2h) +{ + uint32_t u_len = 0; + struct ipc_msg *msg = NULL; + + uint8_t *device_msg_data = NULL; + struct ipc_header *ipc_info = (struct ipc_header *)d2h; + + // The memory layout in d2h should at least contains: + // ipc_header, msg_info and fw_load (message body) + if (ipc_info->i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + pr_info("%s(): msg format error\n", __func__); + return NULL; + } + u_len = ipc_info->i_len - sizeof(struct ipc_header); + msg = (struct ipc_msg *)(kzalloc(sizeof(struct ipc_msg) + u_len, + GFP_ATOMIC)); + if (!msg) { + pr_info("%s(): ipc_msg kzalloc failed\n", __func__); + return NULL; + } + + msg->header.inst_id = ipc_info->inst_id; + msg->header.tgid = ipc_info->tgid; + msg->header.i_len = ipc_info->i_len; + + device_msg_data = (uint8_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio((uint8_t *)msg->i_data, device_msg_data, u_len); + + return msg; +} + +static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)dev_id; + + writel(0x0, tsseipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); + tasklet_hi_schedule(&tsseipc->ipc_handle); + dev_err(tsseipc->dev, "irq%d\n", irq); + return IRQ_HANDLED; +} + +bool check_send_enbit(struct tsse_ipc *tsseipc) +{ + u32 int_reg; + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) == 0) + return true; + else + return false; +} + +void notify_device(struct tsse_ipc *tsseipc) +{ + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + return; + +} + +/** + * ipc_hw_init()- Enable main2host interrupt, cleanup interrupt + * set value in host2main and main2host. + * @hw_ipc: pointer to a structure used for IPC +*/ +static void ipc_hw_init(struct tsse_ipc *hw_ipc) +{ + writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); + writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); +} + +static int ipc_init_msg(struct tsse_ipc *tsseipc) +{ + u8 *h2d; + u32 int_reg; + u32 cmd_len; + u32 i_len; + struct ipc_msg *msg; + struct msg_info *info_msg; + + cmd_len = sizeof(uint32_t); + i_len = sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg = (struct ipc_msg *)(kzalloc(i_len, GFP_ATOMIC)); + + if (!msg) { + pr_info("%s(): msg kzalloc failed\n", __func__); + return -EFAULT; + } + msg->header.i_len = i_len; + info_msg = (struct msg_info *)msg->i_data; + info_msg->msg_class = IPC_MESSAGE_BASIC; + *(uint32_t *)((uint8_t *)msg->i_data + sizeof(struct msg_info)) = IPC_BASIC_CMD_HOST_INIT; + + mutex_lock(&tsseipc->list_lock); + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + return -EFAULT; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, + sizeof(struct msg_info) + sizeof(uint32_t)); + + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + + return 0; +} + +static void tsse_ipc_bh_handler(unsigned long data) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; + + void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + struct ipc_msg *msg = get_msginf(d2h_payload); + + if (!msg) { + dev_err(tsseipc->dev, "get_msginf is NULL\n"); + return; + } + if (service_rout(tsseipc, msg)) + dev_err(tsseipc->dev, "illegal message class\n"); + kfree(msg); +} + +int tsse_ipc_init(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc; + int rc; + + ipc = devm_kzalloc(&pdev->dev, sizeof(*ipc), GFP_KERNEL); + if (ipc == NULL) + return -ENOMEM; + tdev->ipc = ipc; + ipc->pdev = pdev; + ipc->dev = &pdev->dev; + ipc->virt_addr = TSSE_DEV_BARS(tdev)[2].virt_addr; + + mutex_init(&ipc->list_lock); + tasklet_init(&(ipc->ipc_handle), tsse_ipc_bh_handler, + (ulong)(ipc)); + + rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, + tsse_ipc_d2h_irqhandler, IRQF_SHARED, + "pf-ipc", ipc); + if (rc) { + dev_err(&pdev->dev, "request_threaded_irq failed\n"); + return rc; + } + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) { + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + tsse_ipc_deinit(tdev); + } + return rc; +} + +void tsse_ipc_deinit(void *tdev_t) +{ + struct tsse_ipc *tsseipc; + struct pci_dev *pdev; + struct tsse_dev *tdev; + + tdev = tdev_t; + tsseipc = tdev->ipc; + pdev = tsseipc->pdev; + if (tsseipc) { + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + tdev->ipc = NULL; + } +} + +int tsse_fw_manual_load_ipc(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc = tdev->ipc; + int rc = -EFAULT; + + if (ipc) { + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + } + return rc; +} diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h new file mode 100644 index 0000000000000..82f8df71c9837 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TM_HOST_IPC_H__ +#define __TM_HOST_IPC_H__ + +#include +#include +#include + +#define TSSE_PASID_SVA + +#define HOST2MAIN_INTR_SET_OFFSET 0x2000 +#define HOST2MAIN_INTR_ENABLE_OFFSET 0x2004 +#define HOST2MAIN_ACK_INTR_CLR_OFFSET 0x2008 +#define HOST2MAIN_ACK_INTR_ENABLE_OFFSET 0x200c +#define HOST2MAIN_VLD_INTR_STATUS_OFFSET 0x2010 +#define HOST2MAIN_ACK_INTR_STATUS_OFFSET 0x2014 +#define MSIX_MASK_EN_REG_OFFSET 0x2020 +#define INTR_MASK_BIT_OFFSET 0x2024 +#define INTR_PENDING_BIT_OFFSET 0x2028 +#define HOST2MAIN_IPC_OFFSET 0x2400 + +#define MAIN2HOST_INTR_SET_OFFSET 0x3000 +#define MAIN2HOST_INTR_ENABLE_OFFSET 0x3004 +#define MAIN2HOST_ACK_INTR_CLR_OFFSET 0x3008 +#define MAIN2HOST_ACK_INTR_ENABLE_OFFSET 0x300c +#define MAIN2HOST_VEN_MSI_FUNC_NUM_OFFSET 0x3010 +#define MAIN2HOST_VEN_MSI_VFUNC_ACTIVE_OFFSET 0x3014 +#define MAIN2HOST_IPC_OFFSET 0x3400 + +#define IPC_REGISTER_INT_SET BIT(0) +#define IPC_REGISTER_INT_MASK BIT(1) + +enum IPC_BASIC_CMD { + IPC_BASIC_CMD_HOST_INIT = 0x1, + IPC_BASIC_CMD_PING = 0x2 +}; + +enum IPC_BOOT_CMD { + IPC_BOOT_CMD_GET_FIRMWARE = 0x1 +}; + +enum IPC_MESSAGE_CLASS { + IPC_MESSAGE_BASIC = 1, + IPC_MESSAGE_BOOT, + IPC_MESSAGE_CLASS_NUM, +}; + +struct ipc_header { + uint32_t inst_id; + pid_t tgid; + uint32_t i_len; + uint32_t pasid : 20; + uint32_t reserved_1 : 4; + uint32_t pasid_en : 8; + + uint32_t reserved[2]; +}; + +struct ipc_msg { + struct ipc_header header; + uint32_t i_data[]; +}; + +struct fw_load { + uint32_t command; + uint32_t result; + uint8_t name[32]; + uint32_t offset; + uint32_t size; +}; + +struct msg_info { + uint32_t host_id; + uint32_t msg_class; + uint32_t flags; + uint32_t reserved[3]; +}; + +struct ipc_layout { + struct ipc_header header; + struct msg_info info; +}; + +struct tsse_ipc { + struct device *dev; + struct pci_dev *pdev; + void __iomem *virt_addr; + struct mutex list_lock; + struct tasklet_struct ipc_handle; +}; + +int tsse_ipc_init(struct pci_dev *pdev); +void tsse_ipc_deinit(void *tdev); +int tsse_fw_manual_load_ipc(struct pci_dev *pdev); +bool check_send_enbit(struct tsse_ipc *tsseipc); +void notify_device(struct tsse_ipc *tsseipc); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_irq.c b/drivers/crypto/montage/tsse/tsse_irq.c new file mode 100644 index 0000000000000..8cb94fea3da4e --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" + +#undef TSSE_IRQ_DBG + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev) +{ + int request_num = tdev->num_irqs; + int irq_num = pci_alloc_irq_vectors(tdev->tsse_pci_dev.pci_dev, + request_num, request_num, + PCI_IRQ_MSIX); + + if (irq_num < 0) { + dev_err(TSSEDEV_TO_DEV(tdev), + "%s %d :failed to alloc MSIX interrupt vectors\n", + __func__, __LINE__); + return irq_num; + } + + return 0; +} diff --git a/drivers/crypto/montage/tsse/tsse_irq.h b/drivers/crypto/montage/tsse/tsse_irq.h new file mode 100644 index 0000000000000..09bed4e6d58a6 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_IRQ_H__ +#define __TSSE_IRQ_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "tsse_dev.h" + +static inline void tsse_dev_free_irq_vectors(struct tsse_dev *tdev) +{ + pci_free_irq_vectors(tdev->tsse_pci_dev.pci_dev); +} + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_log.h b/drivers/crypto/montage/tsse/tsse_log.h new file mode 100644 index 0000000000000..153cbe16374e7 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_log.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_LOG_H__ +#define __TSSE_LOG_H__ + +#define tsse_dev_err(tssedev, fmt, ...) \ + dev_err(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_warn(tssedev, fmt, ...) \ + dev_warn(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_info(tssedev, fmt, ...) \ + dev_info(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_dbg(tssedev, fmt, ...) \ + dev_dbg(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c new file mode 100644 index 0000000000000..e4be85535b776 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ +#include +#include "tsse_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + struct msg_info *info; + uint32_t msg_class; + int ret = 0; + + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + switch (msg_class) { + case IPC_MESSAGE_BOOT: + fw_service(tsseipc, msg); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/crypto/montage/tsse/tsse_service.h b/drivers/crypto/montage/tsse/tsse_service.h new file mode 100644 index 0000000000000..d5fd87ee7dce4 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_SERVICE_H__ +#define __TSSE_SERVICE_H__ + +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart.c b/drivers/crypto/montage/tsse/tsse_vuart.c new file mode 100644 index 0000000000000..7036444f95401 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_vuart_regs.h" +#include "tsse_vuart.h" + +#ifdef DEBUG +#define VUART_PRINT(fmt, ...) pr_info(fmt, ##__VA_ARGS__) +#else +#define VUART_PRINT(fmt, ...) +#endif + +#define TSSE_VUART_BAUD (38400) +#define TSSE_VUART_MAX_RX_COUNT (256) +#define BOTH_EMPTY (VUART_FSR_TXFIFOE | VUART_FSR_RXFIFO) +struct tsse_vuart { + struct uart_port port; + unsigned int tx_threshold; + unsigned int rx_threshold; + unsigned int tx_loadsz; + unsigned char shutdown; + unsigned char confige_done; +}; + +#define SERIAL_LSR_NAME "tsse_vuart" + +static struct uart_driver g_vuart_reg = { + .owner = THIS_MODULE, + .driver_name = SERIAL_LSR_NAME, + .dev_name = "ttyTSSE", + .nr = TSSE_VUART_MAX_DEV, +}; + +static unsigned int g_trigger_level[4] = { 0, 31, 63, 111 }; +static unsigned long g_line[TSSE_VUART_BITMAP_SIZE]; + +static unsigned int vuart_serial_in(struct uart_port *port, int offset) +{ + unsigned int ret = le32_to_cpu(readl(port->membase + offset)); +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, ret); +#endif + return ret; +} + +static void vuart_serial_out(struct uart_port *port, int offset, int value) +{ +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, value); +#endif + value = cpu_to_le32(value); + writel(value, port->membase + offset); +} + +static void vuart_wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + for (;;) { + status = vuart_serial_in(port, VUART_FSR); + if (FIELD_GET(VUART_FSR_TXFIFOE, status)) + break; + if (--tmout == 0) { + pr_err("%s:timeout(10ms), TX is not empty.\n", + __func__); + break; + } + udelay(1); + touch_nmi_watchdog(); + } +} + +static unsigned int vuart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int lsr; + + spin_lock_irqsave(&port->lock, flags); + lsr = vuart_serial_in(port, VUART_FSR); + spin_unlock_irqrestore(&port->lock, flags); + + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; +} + +static void vuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int vuart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void vuart_stop_tx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~VUART_IER_HETXEI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_tx_chars(struct uart_port *port) +{ + struct tty_port *tport = &port->state->port; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + int count; + + if (port->x_char) { + pr_err("x_char %d\n", port->x_char); + return; + } + + if (uart_tx_stopped(port) || kfifo_is_empty(&tport->xmit_fifo)) { + vuart_stop_tx(port); + return; + } + + count = vuart->tx_loadsz; + do { + unsigned char c; + + if (!uart_fifo_get(port, &c)) + break; + + vuart_serial_out(port, UART_TX, c); + port->icount.tx++; + if (kfifo_is_empty(&tport->xmit_fifo)) + break; + } while (--count > 0); + + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void vuart_start_tx(struct uart_port *port) +{ + unsigned int ier, fsr; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + if (uart_tx_stopped(port)) { + vuart_stop_tx(port); + return; + } + + fsr = vuart_serial_in(port, VUART_FSR); + VUART_PRINT("==>Existing Data number in TX FIFO %ld\n", + FIELD_GET(VUART_FSR_TFIFODN, fsr)); + VUART_PRINT("==>Existing Data number in RX FIFO %ld\n", + FIELD_GET(VUART_FSR_RFIFODN, fsr)); + if (fsr & VUART_FSR_TXFIFOE) + vuart_tx_chars(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_HETXEI | VUART_IER_HETXUI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_throttle(struct uart_port *port) +{ +} + +static void vuart_unthrottle(struct uart_port *port) +{ +} + +static void vuart_stop_rx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~(VUART_IER_HERXTOI | VUART_IER_HETXDRI | VUART_IER_HERXOI); + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_enable_ms(struct uart_port *port) +{ +} + +static void vuart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static irqreturn_t vuart_interrupt(int irq, void *port) +{ + int handled = 0; + struct uart_port *p = (struct uart_port *)port; + + if (p->handle_irq(p)) + handled = 1; + + return IRQ_RETVAL(handled); +} + +static void vuart_check_config_done(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (vuart_serial_in(port, VUART_CFG) == 1) + vuart->confige_done = 1; +} + +static int vuart_startup(struct uart_port *port) +{ + unsigned int ret, hcr, ier, fcr = 0; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (port->flags & UPF_SHARE_IRQ) + port->irqflags |= IRQF_SHARED; + ret = request_irq(port->irq, vuart_interrupt, port->irqflags, + "tsse_uart", port); + if (ret) + return ret; + + hcr = vuart_serial_in(port, VUART_HCR); + vuart->rx_threshold = FIELD_GET(VUART_HCR_RFIFOT, hcr); + vuart->tx_threshold = FIELD_GET(VUART_HCR_TFIFOT, hcr); + fcr |= FIELD_PREP(VUART_FCR_RFIFOT, vuart->rx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFOT, vuart->tx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFORST, 1); + fcr |= FIELD_PREP(VUART_FCR_RFIFORST, 1); + vuart_serial_out(port, VUART_FCR, fcr); + + vuart->rx_threshold = g_trigger_level[vuart->rx_threshold]; + vuart->tx_threshold = g_trigger_level[vuart->tx_threshold]; + + vuart_check_config_done(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_CCFGDI | VUART_IER_HETXDRI | VUART_IER_HERXTOI; + vuart_serial_out(port, VUART_IER, ier); + + vuart_serial_out(port, VUART_SCR, FIELD_PREP(VUART_SCR_SCR, 1)); + + vuart->shutdown = 0; + + return 0; +} + +static void vuart_shutdown(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + vuart->shutdown = 1; + vuart_stop_rx(port); + vuart_stop_tx(port); + free_irq(port->irq, port); + vuart_serial_out(port, VUART_SCR, 0); +} + +static void vuart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + unsigned long flags; + + if ((termios->c_cflag & CSIZE) != CS8) + pr_err("Warning:termios is not CS8.\n"); + + baud = uart_get_baud_rate(port, termios, old, 0, TSSE_VUART_BAUD); + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = + VUART_FSR_TXFIFOE | VUART_FSR_TXOE | VUART_FSR_RXDR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= VUART_FSR_RXUE; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= VUART_FSR_RXUE; + if (termios->c_iflag & (IGNBRK | IGNPAR)) + port->ignore_status_mask |= VUART_FSR_TXFIFOE; + + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= VUART_FSR_RXDR; + pr_err("Warning:termios is not set CREAD.\n"); + } + + spin_unlock_irqrestore(&port->lock, flags); + + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static void vuart_set_ldisc(struct uart_port *port, struct ktermios *ktermios) +{ +} + +static void vuart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ +} + +static void vuart_release_port(struct uart_port *port) +{ +} + +static int vuart_request_port(struct uart_port *port) +{ + return 0; +} + +static void vuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_16550A; +} + +static int vuart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (port->type != PORT_16550A) + return -EINVAL; + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static void vuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned int ier_save; + + ier_save = vuart_serial_in(port, VUART_IER); + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_TX, c); + + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_IER, ier_save); +} + +static int vuart_poll_get_char(struct uart_port *port) +{ + int status; + + status = vuart_serial_in(port, VUART_FSR); + if (!FIELD_GET(VUART_FSR_RXDR, status)) + return NO_POLL_CHAR; + + return vuart_serial_in(port, VUART_RX); +} + +#endif + +static const char *vuart_type(struct uart_port *port) +{ + return "tsse_vuart"; +} + +static const struct uart_ops vuart_ops = { + .tx_empty = vuart_tx_empty, + .set_mctrl = vuart_set_mctrl, + .get_mctrl = vuart_get_mctrl, + .stop_tx = vuart_stop_tx, + .start_tx = vuart_start_tx, + .throttle = vuart_throttle, + .unthrottle = vuart_unthrottle, + .stop_rx = vuart_stop_rx, + .enable_ms = vuart_enable_ms, + .break_ctl = vuart_break_ctl, + .startup = vuart_startup, + .shutdown = vuart_shutdown, + .set_termios = vuart_set_termios, + .set_ldisc = vuart_set_ldisc, + .pm = vuart_pm, + .type = vuart_type, + .release_port = vuart_release_port, + .request_port = vuart_request_port, + .config_port = vuart_config_port, + .verify_port = vuart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = vuart_poll_get_char, + .poll_put_char = vuart_poll_put_char, +#endif +}; + +static unsigned int vuart_rx_chars(struct uart_port *port, unsigned int lsr) +{ + int max_count = TSSE_VUART_MAX_RX_COUNT; + unsigned char ch; + struct tty_port *tport = &port->state->port; + + do { + if (lsr & VUART_FSR_RXDR) + ch = vuart_serial_in(port, VUART_RX); + else + ch = 0; + port->icount.rx++; + if (lsr & VUART_FSR_RXUE) { + port->icount.overrun++; + pr_err("income byte underflow, record and clear int.\n"); + vuart_serial_out(port, VUART_IIR, VUART_IIR_RXUE); + } + + if (!uart_prepare_sysrq_char(port, ch)) { + if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0) + ++port->icount.buf_overrun; + } + + if (--max_count == 0) + break; + lsr = vuart_serial_in(port, VUART_FSR); + } while (lsr & VUART_FSR_RXDR); + + tty_flip_buffer_push(&port->state->port); + return lsr; +} + +static int vuart_deal_irq(struct uart_port *port, unsigned int iir) +{ + unsigned char status; + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (iir & VUART_IIR_CPUCD) + vuart->confige_done = 1; + + status = vuart_serial_in(port, VUART_FSR); + if (port->read_status_mask & VUART_FSR_RXDR) + vuart_rx_chars(port, status); + else + pr_err("read_status_mask not set VUART_FSR_RXDR, ignor rx.\n"); + + ier = vuart_serial_in(port, VUART_IER); + if (!(status & VUART_FSR_TXOE) && (status & VUART_FSR_TXFIFOE) && + (ier & VUART_IER_HETXEI)) + vuart_tx_chars(port); + + return 1; +} + +#ifdef DEBUG +static void vuart_debug_iir(unsigned int iir) +{ + VUART_PRINT("%s called iir %u.\n", __func__, iir); + if (iir & VUART_IIR_TXEI) + pr_err("TX FIFO empty interrupt.\n"); + + if (iir & VUART_IIR_RXTOI) + pr_err("Host RX FIFO character timeout interrupt.\n"); + + if (iir & VUART_IIR_RXDAI) + pr_err("Host RX FIFO data available interrupt.\n"); + + if (iir & VUART_IIR_RXUE) + pr_err("HOST RX FIFO Underflow error.\n"); + + if (iir & VUART_IIR_TXOE) + pr_err("HOST TX FIFO Overrun error.\n"); + + if (iir & VUART_IIR_CPUCD) + pr_err("CPU has finished configuration for virtual UART"); + + if (iir & VUART_IIR_TXFI) + pr_err("Host TX FIFO full interrupt.\n"); +} +#endif + +static int vuart_handle_irq(struct uart_port *port) +{ + unsigned int iir; + unsigned long flags; + int ret; + + iir = vuart_serial_in(port, VUART_IIR); + vuart_serial_out(port, VUART_IIR, iir); +#ifdef DEBUG + vuart_debug_iir(iir); +#endif + spin_lock_irqsave(&port->lock, flags); + ret = vuart_deal_irq(port, iir); + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return ret; +} + +static int vuart_get_line(void) +{ + int bit = 0; + + bit = find_first_zero_bit(&g_line[0], TSSE_VUART_MAX_DEV); + if (bit >= TSSE_VUART_MAX_DEV) + return -ENOSPC; + set_bit(bit, &g_line[0]); + return bit; +} + +static void vuart_free_line(int line) +{ + clear_bit(line, &g_line[0]); +} + +int vuart_init_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = NULL; + struct uart_port *p = NULL; + int ret = 0; + int line = vuart_get_line(); + + if (line == -ENOSPC) { + dev_err(&pdev->dev, "device too more, max is 64.\n"); + return -ENOMEM; + } + + vuart = kzalloc_node(sizeof(struct tsse_vuart), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!vuart) { + ret = -ENOMEM; + goto zalloc_fail; + } + vuart->shutdown = 1; + p = &(vuart->port); + p->mapbase = 0; + p->mapsize = 0; + p->membase = TSSE_DEV_BARS(tdev)[2].virt_addr + RLS_VUART_OFFSET; + p->irq = pci_irq_vector(pdev, RLS_VUART_IRQ_NUM); + p->handle_irq = vuart_handle_irq; + spin_lock_init(&p->lock); + p->line = line; + p->type = PORT_16550A; + p->uartclk = TSSE_VUART_BAUD * 16; + p->iotype = UPIO_MEM; + p->ops = &vuart_ops; + p->fifosize = 128; + vuart->tx_loadsz = 128; + p->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_FIXED_PORT | + UPF_SHARE_IRQ; + p->dev = &pdev->dev; + p->private_data = tdev; + + tdev->port = (struct uart_port *)vuart; + ret = uart_add_one_port(&g_vuart_reg, p); + if (ret != 0) { + dev_err(&pdev->dev, "add port fialed.[%d]\n", ret); + goto add_port_fail; + } + return 0; +add_port_fail: + kfree(vuart); +zalloc_fail: + vuart_free_line(line); + + return ret; +} + +void vuart_uninit_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = (struct tsse_vuart *)(tdev->port); + + if (tdev->port) { + if (!vuart->shutdown) + free_irq(tdev->port->irq, tdev->port); + vuart_free_line(tdev->port->line); + uart_remove_one_port(&g_vuart_reg, tdev->port); + kfree(vuart); + } +} + +int vuart_register(void) +{ + return uart_register_driver(&g_vuart_reg); +} + +void vuart_unregister(void) +{ + uart_unregister_driver(&g_vuart_reg); +} diff --git a/drivers/crypto/montage/tsse/tsse_vuart.h b/drivers/crypto/montage/tsse/tsse_vuart.h new file mode 100644 index 0000000000000..1ed43368751af --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_H__ +#define __TSSE_VUART_H__ + +#include + +#define RLS_VUART_OFFSET (0x680000) +#define RLS_VUART_IRQ_NUM (10) +#define TSSE_VUART_MAX_DEV (64) +#define TSSE_VUART_BITMAP_SIZE (ALIGN(TSSE_VUART_MAX_DEV, 64) / 64) + +int vuart_register(void); +void vuart_unregister(void); +int vuart_init_port(struct pci_dev *pdev); +void vuart_uninit_port(struct pci_dev *pdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart_regs.h b/drivers/crypto/montage/tsse/tsse_vuart_regs.h new file mode 100644 index 0000000000000..26fa62f5014a5 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart_regs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_REGS_H__ +#define __TSSE_VUART_REGS_H__ + +#include +#include + +#define VUART_ID 0x0 +#define VUART_ID_MASK GENMASK(31, 0) + +#define VUART_HCR 0x10 +#define VUART_HCR_RFIFOT GENMASK(3, 2) +#define VUART_HCR_TFIFOT GENMASK(5, 4) + +#define INTRID_NONE BIT(0) +#define INTRID_CPU_LSR (BIT(2) | BIT(1)) +#define INTRID_TRIGGER_LEVEL BIT(2) +#define INTRID_RX_TIMEOUT (BIT(2) | BIT(3)) +#define INTRID_TX_EMPTY BIT(1) + +#define VUART_IIR 0x28 +#define VUART_IIR_TXEI GENMASK(0, 0) +#define VUART_IIR_RXTOI GENMASK(1, 1) +#define VUART_IIR_RXDAI GENMASK(2, 2) +#define VUART_IIR_CPUCD GENMASK(3, 3) +#define VUART_IIR_TXFI GENMASK(4, 4) +#define VUART_IIR_RXUE GENMASK(5, 5) +#define VUART_IIR_TXOE GENMASK(6, 6) + +#define VUART_FCR 0x30 +#define VUART_FCR_TFIFORST GENMASK(0, 0) +#define VUART_FCR_RFIFORST GENMASK(1, 1) +#define VUART_FCR_RFIFOT GENMASK(3, 2) +#define VUART_FCR_TFIFOT GENMASK(5, 4) + +#define VUART_FSR 0x34 +#define VUART_FSR_TXDR GENMASK(0, 0) +#define VUART_FSR_RXDR GENMASK(1, 1) +#define VUART_FSR_RXFIFO GENMASK(2, 2) +#define VUART_FSR_TXFIFOE GENMASK(3, 3) +#define VUART_FSR_RXFIFOF GENMASK(4, 4) +#define VUART_FSR_TXFIFOF GENMASK(5, 5) +#define VUART_FSR_TFIFODN GENMASK(13, 6) +#define VUART_FSR_RFIFODN GENMASK(21, 14) +#define VUART_FSR_TXOE GENMASK(23, 23) +#define VUART_FSR_RXUE GENMASK(24, 24) + +#define VUART_SCR 0x3c +#define VUART_SCR_SCR GENMASK(7, 0) + +#define VUART_TX 0x40 +#define VUART_RX 0x40 + +#define VUART_IER 0x48 +#define VUART_IER_HETXEI GENMASK(0, 0) +#define VUART_IER_HERXTOI GENMASK(1, 1) +#define VUART_IER_HETXDRI GENMASK(2, 2) +#define VUART_IER_CCFGDI GENMASK(3, 3) +#define VUART_IER_HETXFI GENMASK(4, 4) +#define VUART_IER_HETXUI GENMASK(5, 5) +#define VUART_IER_HERXOI GENMASK(6, 6) + +#define VUART_CFG 0x4c +#define VUART_CFG_CCFGD GENMASK(0, 0) + +#endif diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1be549a07a219..f0c3127941ae2 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -475,7 +475,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 329f60ad422e6..28366bfcb231c 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -317,7 +317,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c new file mode 100644 index 0000000000000..2bb8e4eb30487 --- /dev/null +++ b/drivers/crypto/zhaoxin-aes.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __packed + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __aligned(PADLOCK_ALIGNMENT); + +/* + * Whenever making any changes to the following structure *make sure* you keep E, d_data and cword + * aligned on 16 Bytes boundaries and the Hardware can access 16 * 16 bytes of E and d_data (only + * the first 15 * 16 bytes matter but the HW reads more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + u32 d_data[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, paes_last_cword); + +/* Tells whether the ACE is capable to generate the extended key for a given key_len. */ +static inline int aes_hw_extkey_available(uint8_t key_len) +{ + /* + * TODO: We should check the actual CPU model/stepping as it's possible that the + * capability will be added in the next CPU revisions. + */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) +{ + return aes_ctx_common(crypto_skcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) + return -EINVAL; + + /* + * If the hardware is capable of generating the extended key itself we must supply the + * plain key for both encryption and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (aes_expandkey(&gen_aes, in_key, key_len)) + return -EINVAL; + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) + per_cpu(paes_last_cword, cpu) = NULL; + + return 0; +} + +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they generate a spurious DNA fault + * when CR0.TS is '1'. Fortunately, the kernel doesn't use CR0.TS. + */ +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, + int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, + u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.decrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, +}; + +static int cbc_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, +}; + +static const struct x86_cpu_id zhaoxin_cpu_id[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + + if (!x86_match_cpu(zhaoxin_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { + pr_notice("ACE detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + ret = crypto_register_alg(&aes_alg); + if (!!ret) + goto aes_err; + + ret = crypto_register_skcipher(&ecb_aes_alg); + if (!!ret) + goto ecb_aes_err; + + ret = crypto_register_skcipher(&cbc_aes_alg); + if (!!ret) + goto cbc_aes_err; + + pr_notice("Using ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_skcipher(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + pr_err("ACE AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_skcipher(&cbc_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("aes"); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c new file mode 100644 index 0000000000000..5fa117a1a50e6 --- /dev/null +++ b/drivers/crypto/zhaoxin-sha.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +static inline void padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +/* + * Add two shash_alg instance for hardware-implemented multiple-parts hash + * supported by Zhaoxin Processor. + */ +static int padlock_sha1_init_zhaoxin(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)1)); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_zhaoxin(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state) { + .state = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }, + }; + + return 0; +} + +static int padlock_sha256_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); + src = sctx->buf; + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)1)); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data */ + if (len - done >= SHA256_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_zhaoxin(struct shash_desc *desc, void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_zhaoxin(struct shash_desc *desc, const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_zhaoxin = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_zhaoxin, + .update = padlock_sha1_update_zhaoxin, + .final = padlock_sha1_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_zhaoxin = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_zhaoxin, + .update = padlock_sha256_update_zhaoxin, + .final = padlock_sha256_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id zhaoxin_sha_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + return -ENODEV; + + sha1 = &sha1_alg_zhaoxin; + sha256 = &sha256_alg_zhaoxin; + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + pr_notice("Using ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + pr_err("ACE SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_shash(&sha1_alg_zhaoxin); + crypto_unregister_shash(&sha256_alg_zhaoxin); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b8a74b1798ba1..d0c03400b15ff 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -749,6 +749,14 @@ config XILINX_ZYNQMP_DPDMA driver provides the dmaengine required by the DisplayPort subsystem display driver. +config PHYTIUM_DDMA + bool "Phytium PE220x DDMA support" + depends on (ARCH_PHYTIUM || COMPILE_TEST) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for Phytium PE220x DDMA controller. + # driver files source "drivers/dma/amd/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a54d7688392b1..d8850be540125 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -92,3 +92,4 @@ obj-y += qcom/ obj-y += stm32/ obj-y += ti/ obj-y += xilinx/ +obj-y += phytium/ diff --git a/drivers/dma/phytium/Makefile b/drivers/dma/phytium/Makefile new file mode 100644 index 0000000000000..71ba9b9fcd406 --- /dev/null +++ b/drivers/dma/phytium/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHYTIUM_DDMA) += phytium-ddmac.o diff --git a/drivers/dma/phytium/phytium-ddmac.c b/drivers/dma/phytium/phytium-ddmac.c new file mode 100644 index 0000000000000..f6ff6df92b8d9 --- /dev/null +++ b/drivers/dma/phytium/phytium-ddmac.c @@ -0,0 +1,958 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Device DDMA Controller driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-ddmac.h" + + +static inline struct phytium_ddma_device *to_ddma_device + (struct dma_chan *chan) +{ + return container_of(chan->device, + struct phytium_ddma_device, dma_dev); +} + +static inline struct phytium_ddma_chan *to_ddma_chan + (struct dma_chan *chan) +{ + return container_of(chan, + struct phytium_ddma_chan, vchan.chan); +} + +static inline struct phytium_ddma_desc *to_ddma_desc + (struct virt_dma_desc *vd) +{ + return container_of(vd, + struct phytium_ddma_desc, vdesc); +} + +static inline struct device *chan_to_dev + (struct phytium_ddma_chan *chan) +{ + return chan->vchan.chan.device->dev; +} + +static inline struct phytium_ddma_device *chan_to_ddma + (struct phytium_ddma_chan *chan) +{ + return to_ddma_device(&chan->vchan.chan); +} + +static inline void phytium_ddma_iowrite32 + (const struct phytium_ddma_device *ddma, + const u32 reg, const u32 val) +{ + iowrite32(val, ddma->base + reg); +} + +static inline u32 phytium_ddma_ioread32 + (const struct phytium_ddma_device *ddma, + const u32 reg) +{ + return ioread32(ddma->base + reg); +} + +static inline void phytium_chan_iowrite32 + (const struct phytium_ddma_chan *chan, + const u32 reg, const u32 val) +{ + iowrite32(val, chan->base + reg); +} + +static inline u32 phytium_chan_ioread32 + (const struct phytium_ddma_chan *chan, + const u32 reg) +{ + return ioread32(chan->base + reg); +} + +static void phytium_ddma_disable + (const struct phytium_ddma_device *ddma) +{ + dev_dbg(ddma->dev, "ddma disable\n"); + phytium_ddma_iowrite32(ddma, DMA_CTL, !DMA_CTL_EN); +} + +static void phytium_ddma_enable + (const struct phytium_ddma_device *ddma) +{ + dev_dbg(ddma->dev, "ddma enable\n"); + phytium_ddma_iowrite32(ddma, DMA_CTL, DMA_CTL_EN); +} + +static void phytium_ddma_reset + (const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + dev_dbg(ddma->dev, "dma reset\n"); + val = phytium_ddma_ioread32(ddma, DMA_CTL); + val |= DMA_CTL_SRST; + phytium_ddma_iowrite32(ddma, DMA_CTL, val); + + udelay(10); + val &= ~DMA_CTL_SRST; + phytium_ddma_iowrite32(ddma, DMA_CTL, val); +} + +static void phytium_ddma_irq_disable + (const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + dev_dbg(ddma->dev, "ddma irq disable\n"); + val = phytium_ddma_ioread32(ddma, DMA_MASK_INT); + val |= DMA_INT_EN; + phytium_ddma_iowrite32(ddma, DMA_MASK_INT, val); +} + +static void phytium_ddma_irq_enable + (const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + dev_dbg(ddma->dev, "ddma irq enable\n"); + val = phytium_ddma_ioread32(ddma, DMA_MASK_INT); + val &= ~DMA_INT_EN; + phytium_ddma_iowrite32(ddma, DMA_MASK_INT, val); +} + +static u32 phytium_ddma_irq_read + (const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + val = phytium_ddma_ioread32(ddma, DMA_STAT); + + return val; +} + +static void phytium_chan_irq_disable + (struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq disable\n", chan->id); + val = phytium_ddma_ioread32(chan_to_ddma(chan), DMA_MASK_INT); + val |= DMA_INT_CHAL_EN(chan->id); + phytium_ddma_iowrite32(chan_to_ddma(chan), DMA_MASK_INT, val); +} + +static void phytium_chan_irq_enable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq enable\n", chan->id); + val = phytium_ddma_ioread32(chan_to_ddma(chan), DMA_MASK_INT); + val &= ~DMA_INT_CHAL_EN(chan->id); + phytium_ddma_iowrite32(chan_to_ddma(chan), DMA_MASK_INT, val); +} + +static void phytium_chan_irq_clear(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq clear\n", chan->id); + val = DMA_STAT_CHAL(chan->id); + phytium_ddma_iowrite32(chan_to_ddma(chan), DMA_STAT, val); +} + +static int phytium_chan_disable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + int ret = 0; + u32 value1; + + dev_dbg(chan_to_dev(chan), "channel %d disable\n", chan->id); + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + value1 = val |= DMA_CHAL_EN; + if (value1) { + val &= ~DMA_CHAL_EN; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); + + ret = readl_relaxed_poll_timeout_atomic + (chan->base + DMA_CHALX_CTL, val, + !(val & DMA_CHAL_EN), 0, 100000); + } + return ret; +} + +static void phytium_chan_enable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d enable\n", chan->id); + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + val |= DMA_CHAL_EN; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); +} + +static bool phytium_chan_is_running(const struct phytium_ddma_chan *chan) +{ + u32 val; + + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + + if (val & DMA_CHAL_EN) + return true; + else + return false; +} + +static void phytium_chan_reset(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d reset\n", chan->id); + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + val |= DMA_CHAL_SRST; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); + + udelay(10); + val &= ~DMA_CHAL_SRST; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); +} + +static void phytium_ddma_vdesc_free(struct virt_dma_desc *vd) +{ + kfree(to_ddma_desc(vd)); +} + +static int phytium_chan_pause(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + int ret = 0; + + ret = phytium_chan_disable(pchan); + pchan->busy = false; + pchan->is_pasued = true; + + return ret; +} + +static int phytium_chan_resume(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + + phytium_chan_enable(pchan); + pchan->is_pasued = false; + + return 0; +} + +static void phytium_chan_start_xfer(struct phytium_ddma_chan *chan) +{ + struct virt_dma_desc *vdesc = NULL; + struct phytium_ddma_sg_req *sg_req = NULL; + char *tmp = NULL; + int i = 0; + unsigned long flags = 0; + + /* chan first xfer settings */ + if (!chan->desc) { + vdesc = vchan_next_desc(&chan->vchan); + if (!vdesc) + return; + + list_del(&vdesc->node); + chan->desc = to_ddma_desc(vdesc); + chan->next_sg = 0; + chan->current_sg = NULL; + dev_dbg(chan_to_dev(chan), "xfer start\n"); + } + + if (chan->next_sg == chan->desc->num_sgs) + chan->next_sg = 0; + + sg_req = &chan->desc->sg_req[chan->next_sg]; + chan->current_sg = sg_req; + /* fill to 4 bytes */ + switch (sg_req->direction) { + case DMA_MEM_TO_DEV: + tmp = phys_to_virt(sg_req->mem_addr_l); + memset(chan->buf, 0, sg_req->len * 4); + for (i = 0; i < sg_req->len; i++) + chan->buf[i * 4] = tmp[i]; + break; + + case DMA_DEV_TO_MEM: + memset(chan->buf, 0, sg_req->len * 4); + break; + + default: + break; + } + + /* start transfer */ + phytium_chan_iowrite32(chan, DMA_CHALX_DDR_LWADDR, + chan->paddr & 0xFFFFFFFF); + phytium_chan_iowrite32(chan, DMA_CHALX_DDR_UPADDR, + (chan->paddr >> 32) & 0xFFFFFFFF); + phytium_chan_iowrite32(chan, DMA_CHALX_DEV_ADDR, sg_req->dev_addr); + phytium_chan_iowrite32(chan, DMA_CHALX_TS, sg_req->len * 4); + + spin_lock_irqsave(&chan_to_ddma(chan)->lock, flags); + phytium_chan_irq_enable(chan); + spin_unlock_irqrestore(&chan_to_ddma(chan)->lock, flags); + phytium_chan_enable(chan); + + chan->next_sg++; + chan->busy = true; +} + +static void phytium_chan_xfer_done(struct phytium_ddma_chan *chan) +{ + struct phytium_ddma_sg_req *sg_req = chan->current_sg; + char *tmp = NULL; + int i = 0; + + if (chan->desc) { + if (sg_req->direction == DMA_DEV_TO_MEM) { + tmp = phys_to_virt(sg_req->mem_addr_l); + for (i = 0; i < sg_req->len; i++) + tmp[i] = chan->buf[i * 4]; + } + + chan->busy = false; + if (chan->next_sg == chan->desc->num_sgs) { + dev_dbg(chan_to_dev(chan), "xfer complete\n"); + vchan_cookie_complete(&chan->desc->vdesc); + chan->desc = NULL; + chan->current_sg = NULL; + } + phytium_chan_disable(chan); + phytium_chan_irq_clear(chan); + phytium_chan_start_xfer(chan); + } +} + +static void phytium_dma_hw_init(struct phytium_ddma_device *ddma) +{ + u32 i = 0; + int ret = 0; + + phytium_ddma_disable(ddma); + phytium_ddma_reset(ddma); + phytium_ddma_irq_enable(ddma); + phytium_ddma_enable(ddma); + + for (i = 0; i < ddma->dma_channels; i++) { + phytium_chan_irq_disable(&ddma->chan[i]); + ret = phytium_chan_disable(&ddma->chan[i]); + if (ret) + dev_err(ddma->dev, "can't disable channel %d\n", i); + + } +} + +static size_t phytium_ddma_desc_residue(struct phytium_ddma_chan *chan) +{ + u32 trans_cnt = 0; + u32 residue = 0; + int i = 0; + + trans_cnt = phytium_chan_ioread32(chan, DMA_CHALX_TRANS_CNT); + residue = chan->current_sg->len - trans_cnt; + + for (i = chan->next_sg; i < chan->desc->num_sgs; i++) + residue += chan->desc->sg_req[i].len; + + return residue; +} + +static enum dma_status phytium_ddma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + struct virt_dma_desc *vd = NULL; + enum dma_status ret = 0; + unsigned long flags = 0; + size_t residue = 0; + + ret = dma_cookie_status(chan, cookie, txstate); + if ((ret == DMA_COMPLETE) || !txstate) + return ret; + + spin_lock_irqsave(&pchan->vchan.lock, flags); + vd = vchan_find_desc(&pchan->vchan, cookie); + if (pchan->desc && cookie == pchan->desc->vdesc.tx.cookie) + residue = phytium_ddma_desc_residue(pchan); + + dma_set_residue(txstate, residue); + spin_unlock_irqrestore(&pchan->vchan.lock, flags); + + if (pchan->is_pasued && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; + + return ret; +} + +static void phytium_ddma_issue_pending(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + unsigned long flags = 0; + + spin_lock_irqsave(&pchan->vchan.lock, flags); + + if (vchan_issue_pending(&pchan->vchan) && !pchan->desc && !pchan->busy) + phytium_chan_start_xfer(pchan); + + spin_unlock_irqrestore(&pchan->vchan.lock, flags); +} + +static int phytium_ddma_terminate_all(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + unsigned long flags = 0; + LIST_HEAD(head); + + spin_lock_irqsave(&pchan->vchan.lock, flags); + if (pchan->desc) { + vchan_terminate_vdesc(&pchan->desc->vdesc); + if (pchan->busy) { + u32 tmp_ctl, timeout; + + phytium_chan_disable(pchan); + /* save some registers, reset will clear it */ + timeout = phytium_chan_ioread32(pchan, + DMA_CHALX_TIMEOUT_CNT); + tmp_ctl = phytium_chan_ioread32(pchan, + DMA_CHALX_CTL); + spin_lock(&chan_to_ddma(pchan)->lock); + phytium_chan_irq_disable(pchan); + spin_unlock(&chan_to_ddma(pchan)->lock); + /* need reset when terminate */ + phytium_chan_reset(pchan); + phytium_chan_irq_clear(pchan); + /* recover it */ + phytium_chan_iowrite32(pchan, + DMA_CHALX_CTL, tmp_ctl); + phytium_chan_iowrite32(pchan, + DMA_CHALX_TIMEOUT_CNT, timeout); + pchan->busy = false; + } + pchan->desc = NULL; + } + + vchan_get_all_descriptors(&pchan->vchan, &head); + spin_unlock_irqrestore(&pchan->vchan.lock, flags); + vchan_dma_desc_free_list(&pchan->vchan, &head); + + return 0; +} + +static int phytium_ddma_alloc_chan_resources(struct dma_chan *chan) +{ + struct phytium_ddma_device *ddma = to_ddma_device(chan); + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + u32 bind_status = 0; + int ret = 0; + unsigned long flags = 0; + + bind_status = phytium_ddma_ioread32(ddma, DMA_CHAL_BIND); + + if ((pchan->is_used) || (bind_status & BIT(pchan->id))) { + dev_err(ddma->dev, "channel %d already used\n", pchan->id); + ret = -EBUSY; + goto out; + } + + /* prepare channel */ + ret = phytium_chan_disable(pchan); + if (ret) { + dev_err(ddma->dev, "can't disable channel %d\n", pchan->id); + goto out; + } + phytium_chan_reset(pchan); + phytium_chan_irq_clear(pchan); + + /* channel bind */ + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flags); + bind_status |= BIT(pchan->id); + phytium_ddma_iowrite32(ddma, DMA_CHAL_BIND, bind_status); + pchan->is_used = true; + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flags); + + /* alloc dma memory */ + pchan->buf = dma_alloc_coherent(ddma->dev, 4 * PAGE_SIZE, + &pchan->paddr, GFP_KERNEL); + if (!pchan->buf) { + ret = -EBUSY; + dev_err(ddma->dev, "failed to alloc dma memory\n"); + } + + dev_info(ddma->dev, "alloc channel %d\n", pchan->id); + +out: + return ret; +} + +static void phytium_ddma_free_chan_resources(struct dma_chan *chan) +{ + struct phytium_ddma_device *ddma = to_ddma_device(chan); + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + u32 bind_status = 0; + unsigned long flags = 0; + + if (!pchan->is_used) + return; + + dev_dbg(ddma->dev, "free channel %d\n", pchan->id); + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flags); + bind_status = phytium_ddma_ioread32(ddma, DMA_CHAL_BIND); + bind_status &= ~BIT(pchan->id); + phytium_ddma_iowrite32(ddma, DMA_CHAL_BIND, bind_status); + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flags); + + phytium_chan_disable(pchan); + + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flags); + phytium_chan_irq_disable(pchan); + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flags); + + vchan_free_chan_resources(to_virt_chan(chan)); + pchan->is_used = false; + + if (pchan->buf) + dma_free_coherent(ddma->dev, 4 * PAGE_SIZE, + pchan->buf, pchan->paddr); +} + +static int phytium_ddma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + u32 chal_cfg = 0; + u32 req_mode = 0; + const u32 timeout = 0xffff; + unsigned long flag = 0; + + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(config->direction)) + return -EINVAL; + + memcpy(&pchan->dma_config, config, sizeof(*config)); + + /* set channel config reg */ + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flag); + if (pchan->id > 3) { + chal_cfg = phytium_ddma_ioread32(chan_to_ddma(pchan), + DMA_CHAL_CFG_H); + chal_cfg &= ~(0xFF << ((pchan->id - 4) * 8)); + chal_cfg |= DMA_CHAL_SEL((pchan->id - 4), pchan->request_line); + chal_cfg |= DMA_CHAL_SEL_EN(pchan->id - 4); + phytium_ddma_iowrite32(chan_to_ddma(pchan), + DMA_CHAL_CFG_H, chal_cfg); + } else { + chal_cfg = phytium_ddma_ioread32(chan_to_ddma(pchan), + DMA_CHAL_CFG_L); + chal_cfg &= ~(0xFF << (pchan->id * 8)); + chal_cfg |= DMA_CHAL_SEL((pchan->id), pchan->request_line); + chal_cfg |= DMA_CHAL_SEL_EN(pchan->id); + phytium_ddma_iowrite32(chan_to_ddma(pchan), + DMA_CHAL_CFG_L, chal_cfg); + } + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flag); + + /* set channel mode */ + req_mode = + (config->direction == DMA_DEV_TO_MEM) ? DMA_RX_REQ : DMA_TX_REQ; + phytium_chan_iowrite32(pchan, DMA_CHALX_CTL, req_mode << 2); + + /* set channel timeout */ + phytium_chan_iowrite32(pchan, DMA_CHALX_TIMEOUT_CNT, + timeout | DMA_CHAL_TIMEOUT_EN); + + return 0; +} + +static struct dma_async_tx_descriptor *phytium_ddma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + u32 sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct phytium_ddma_device *ddma = to_ddma_device(chan); + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + struct dma_slave_config *sconfig = &pchan->dma_config; + struct phytium_ddma_desc *desc = NULL; + struct scatterlist *sg = NULL; + int i = 0; + char *tmp; + + if (unlikely(!is_slave_direction(direction))) { + dev_err(ddma->dev, "invalid dma direction\n"); + return NULL; + } + + if (unlikely(sg_len < 1)) { + dev_err(ddma->dev, "invalid segment length: %d\n", sg_len); + return NULL; + } + + desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); + if (!desc) + return NULL; + + /* set sg list */ + for_each_sg(sgl, sg, sg_len, i) { + tmp = phys_to_virt(sg_dma_address(sg)); + desc->sg_req[i].direction = direction; + + switch (direction) { + case DMA_MEM_TO_DEV: + desc->sg_req[i].len = sg_dma_len(sg); + desc->sg_req[i].mem_addr_l = sg_dma_address(sg) & 0xFFFFFFFF; + desc->sg_req[i].mem_addr_h = + (sg_dma_address(sg) >> 32) & 0xFFFFFFFF; + desc->sg_req[i].dev_addr = sconfig->dst_addr & 0xFFFFFFFF; + break; + + case DMA_DEV_TO_MEM: + desc->sg_req[i].len = sg_dma_len(sg); + desc->sg_req[i].mem_addr_l = sg_dma_address(sg) & 0xFFFFFFFF; + desc->sg_req[i].mem_addr_h = + (sg_dma_address(sg) >> 32) & 0xFFFFFFFF; + desc->sg_req[i].dev_addr = sconfig->src_addr & 0xFFFFFFFF; + break; + + default: + return NULL; + } + } + + desc->num_sgs = sg_len; + + return vchan_tx_prep(&pchan->vchan, &desc->vdesc, flags); +} + +static irqreturn_t phytium_dma_interrupt(int irq, void *dev_id) +{ + struct phytium_ddma_device *ddma = dev_id; + struct phytium_ddma_chan *chan; + u32 irq_status = 0; + u32 i = 0; + u32 val = 0; + + phytium_ddma_irq_disable(ddma); + + irq_status = phytium_ddma_irq_read(ddma); + val = phytium_ddma_ioread32(ddma, DMA_CTL); + + /* Poll, clear and process every chanel interrupt status */ + for (i = 0; i < ddma->dma_channels; i++) { + if (!(irq_status & BIT(i * 4))) + continue; + + chan = &ddma->chan[i]; + phytium_chan_xfer_done(chan); + } + + phytium_ddma_irq_enable(ddma); + + return IRQ_HANDLED; +} + + +static struct dma_chan *phytium_ddma_of_xlate( + struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct phytium_ddma_device *ddma = ofdma->of_dma_data; + struct device *dev = ddma->dev; + struct phytium_ddma_chan *chan = NULL; + struct dma_chan *c = NULL; + u32 channel_id = 0; + + channel_id = dma_spec->args[0]; + + if (channel_id > ddma->dma_channels) { + dev_err(dev, "bad channel %d\n", channel_id); + return NULL; + } + + chan = &ddma->chan[channel_id]; + chan->request_line = dma_spec->args[1]; + c = dma_get_slave_channel(&chan->vchan.chan); + if (!c) { + dev_err(dev, "no more channels available\n"); + return NULL; + } + + return c; +} + +static int phytium_ddma_probe(struct platform_device *pdev) +{ + struct phytium_ddma_device *ddma; + struct dma_device *dma_dev; + struct resource *mem; + u32 i = 0; + int ret = 0; + u32 nr_channels = 0; + + ddma = devm_kzalloc(&pdev->dev, sizeof(*ddma), GFP_KERNEL); + if (!ddma) { + ret = -ENOMEM; + goto out; + } + + dma_dev = &ddma->dma_dev; + ddma->dev = &pdev->dev; + + spin_lock_init(&ddma->lock); + + ddma->irq = platform_get_irq(pdev, 0); + if (ddma->irq < 0) { + dev_err(&pdev->dev, "no irq resource\n"); + ret = -EINVAL; + goto out; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ddma->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(ddma->base)) { + dev_err(&pdev->dev, "no resource address"); + ret = PTR_ERR(ddma->base); + goto out; + } + + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", + &nr_channels); + if (ret < 0) { + dev_err(&pdev->dev, "can't get the number of dma channels: %d\n", + ret); + goto out; + } + + if (nr_channels > DDMA_MAX_NR_PCHANNELS) { + dev_warn(&pdev->dev, "over the max number of channels\n"); + nr_channels = DDMA_MAX_NR_PCHANNELS; + } + + ddma->dma_channels = DDMA_MAX_NR_PCHANNELS; + + ret = devm_request_irq(&pdev->dev, ddma->irq, phytium_dma_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), ddma); + if (ret) { + dev_err(&pdev->dev, "could not to request irq %d", ddma->irq); + goto out; + } + + /* Set capabilities */ + dma_cap_set(DMA_SLAVE, ddma->dma_dev.cap_mask); + + /* DMA capabilities */ + dma_dev->dev = ddma->dev; + dma_dev->chancnt = ddma->dma_channels; + dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + /* function callback */ + dma_dev->device_tx_status = phytium_ddma_tx_status; + dma_dev->device_issue_pending = phytium_ddma_issue_pending; + dma_dev->device_terminate_all = phytium_ddma_terminate_all; + dma_dev->device_alloc_chan_resources = phytium_ddma_alloc_chan_resources; + dma_dev->device_free_chan_resources = phytium_ddma_free_chan_resources; + dma_dev->device_config = phytium_ddma_slave_config; + dma_dev->device_prep_slave_sg = phytium_ddma_prep_slave_sg; + dma_dev->device_pause = phytium_chan_pause; + dma_dev->device_resume = phytium_chan_resume; + + /* init dma physical channels */ + INIT_LIST_HEAD(&dma_dev->channels); + ddma->chan = devm_kcalloc(ddma->dev, ddma->dma_channels, + sizeof(*ddma->chan), GFP_KERNEL); + if (!ddma->chan) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < ddma->dma_channels; i++) { + ddma->chan[i].id = i; + ddma->chan[i].buf = NULL; + ddma->chan[i].base = ddma->base + DMA_REG_LEN + i * CHAN_REG_LEN; + ddma->chan[i].vchan.desc_free = phytium_ddma_vdesc_free; + ddma->chan[i].desc = NULL; + ddma->chan[i].current_sg = NULL; + vchan_init(&ddma->chan[i].vchan, dma_dev); + } + + phytium_dma_hw_init(ddma); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto out; + + ret = of_dma_controller_register(pdev->dev.of_node, + phytium_ddma_of_xlate, ddma); + if (ret < 0) { + dev_err(&pdev->dev, "phytium ddma of register failed %d\n", + ret); + goto err_unregister; + } + + platform_set_drvdata(pdev, ddma); + dev_info(ddma->dev, "phytium DDMA Controller registered\n"); + + return 0; + +err_unregister: + dma_async_device_unregister(dma_dev); + +out: + return ret; +} + +static void phytium_ddma_chan_remove(struct phytium_ddma_chan *chan) +{ + phytium_chan_irq_disable(chan); + phytium_chan_disable(chan); + + if (chan->buf) + dma_free_coherent(chan_to_dev(chan), 4 * PAGE_SIZE, chan->buf, + chan->paddr); + + tasklet_kill(&chan->vchan.task); + list_del(&chan->vchan.chan.device_node); +} + +static void phytium_ddma_remove(struct platform_device *pdev) +{ + struct phytium_ddma_device *ddma = platform_get_drvdata(pdev); + struct phytium_ddma_chan *chan = NULL; + int i = 0; + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&ddma->dma_dev); + + for (i = 0; i < ddma->dma_channels; i++) { + chan = &ddma->chan[i]; + phytium_ddma_chan_remove(chan); + } + + phytium_ddma_irq_disable(ddma); + phytium_ddma_disable(ddma); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_ddma_suspend(struct device *dev) +{ + struct phytium_ddma_device *ddma = dev_get_drvdata(dev); + int i = 0; + + for (i = 0; i < ddma->dma_channels; i++) { + if (phytium_chan_is_running(&ddma->chan[i])) { + dev_warn(dev, "suspend is prevented by channel %d\n", i); + return -EBUSY; + } + } + + ddma->dma_reg.dma_chal_cfg0 = + phytium_ddma_ioread32(ddma, DMA_CHAL_CFG_L); + ddma->dma_reg.dma_chal_bind = + phytium_ddma_ioread32(ddma, DMA_CHAL_BIND); + ddma->dma_reg.dma_chal_cfg1 = + phytium_ddma_ioread32(ddma, DMA_CHAL_CFG_H); + + for (i = 0; i < ddma->dma_channels; i++) { + struct phytium_ddma_chan *chan = &ddma->chan[i]; + + if (!chan->is_used) + continue; + ddma->dma_chal_reg[i].dma_chalx_ctl = + phytium_chan_ioread32(chan, DMA_CHALX_CTL); + ddma->dma_chal_reg[i].dma_chalx_timeout_cnt = + phytium_chan_ioread32(chan, DMA_CHALX_TIMEOUT_CNT); + } + + phytium_ddma_irq_disable(ddma); + phytium_ddma_disable(ddma); + pm_runtime_force_suspend(dev); + + return 0; +} + +static int phytium_ddma_resume(struct device *dev) +{ + struct phytium_ddma_device *ddma = dev_get_drvdata(dev); + u32 i = 0; + int ret = 0; + + phytium_dma_hw_init(ddma); + phytium_ddma_iowrite32(ddma, DMA_CHAL_CFG_L, + ddma->dma_reg.dma_chal_cfg0); + phytium_ddma_iowrite32(ddma, DMA_CHAL_BIND, + ddma->dma_reg.dma_chal_bind); + phytium_ddma_iowrite32(ddma, DMA_CHAL_CFG_H, + ddma->dma_reg.dma_chal_cfg1); + + for (i = 0; i < ddma->dma_channels; i++) { + struct phytium_ddma_chan *chan = &ddma->chan[i]; + + if (!chan->is_used) + continue; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, + ddma->dma_chal_reg[i].dma_chalx_ctl); + phytium_chan_iowrite32(chan, DMA_CHALX_TIMEOUT_CNT, + ddma->dma_chal_reg[i].dma_chalx_timeout_cnt); + } + + ret = pm_runtime_force_resume(dev); + + return ret; +} +#endif + +static const struct dev_pm_ops phytium_ddma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(phytium_ddma_suspend, + phytium_ddma_resume) +}; + +static const struct of_device_id phytium_dma_of_id_table[] = { + { .compatible = "phytium,ddma" }, + {} +}; +MODULE_DEVICE_TABLE(of, phytium_dma_of_id_table); + +static struct platform_driver phytium_driver = { + .probe = phytium_ddma_probe, + .remove = phytium_ddma_remove, + .driver = { + .name = "phytium-ddma", + .of_match_table = of_match_ptr(phytium_dma_of_id_table), + .pm = &phytium_ddma_pm_ops, + }, +}; + +module_platform_driver(phytium_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium DDMA Controller platform driver"); +MODULE_AUTHOR("HuangJie "); diff --git a/drivers/dma/phytium/phytium-ddmac.h b/drivers/dma/phytium/phytium-ddmac.h new file mode 100644 index 0000000000000..d88fda6b0844d --- /dev/null +++ b/drivers/dma/phytium/phytium-ddmac.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Phytium Device DMA Controller driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd + * + */ + +#ifndef _PHYTIUM_DDMAC_H +#define _PHYTIUM_DDMAC_H + +#include +#include +#include +#include +#include +#include "../virt-dma.h" + +/* the number of physical channel */ +#define DDMA_MAX_NR_PCHANNELS 8 + +#define DMAC_MAX_MASTERS 1 +#define DMAC_MAX_BLK_SIZE PAGE_SIZE + +#define CHAN_REG_LEN 0x40 +#define DMA_REG_LEN 0x40 + +#define DMA_CTL 0x00 +#define DMA_CHAL_CFG_L 0x04 +#define DMA_CHAL_CFG_H 0x28 +#define DMA_STAT 0x08 +#define DMA_MASK_INT 0x0C +#define DMA_CHAL_BIND 0x20 +#define DMA_GCAP 0x24 + +#define DMA_CHALX_DDR_UPADDR 0x00 +#define DMA_CHALX_DDR_LWADDR 0x04 +#define DMA_CHALX_DEV_ADDR 0x08 +#define DMA_CHALX_TS 0x0C +#define DMA_CHALX_CRT_UPADDR 0x10 +#define DMA_CHALX_CRT_LWADDR 0x14 +#define DMA_CHALX_CTL 0x18 +#define DMA_CHALX_STS 0x1C +#define DMA_CHALX_TIMEOUT_CNT 0x20 +#define DMA_CHALX_TRANS_CNT 0x24 + +#define DMA_CTL_EN BIT(0) +#define DMA_CTL_SRST BIT(1) + +#define DMA_CHAL_SEL(id, x) (min_t(unsigned int, x, 0x7F) << ((id) * 8)) +#define DMA_CHAL_SEL_EN(id) BIT((id) * 8 + 7) + +#define DMA_STAT_CHAL(id) BIT((id) * 4) + +#define DMA_INT_EN BIT(31) +#define DMA_INT_CHAL_EN(id) BIT(id) + +#define DMA_CHAL_EN BIT(0) +#define DMA_CHAL_SRST BIT(1) +#define DMA_CHAL_MODE BIT(2) + +#define DMA_RX_REQ 1 +#define DMA_TX_REQ 0 + +#define DMA_CHAL_TIMEOUT_EN BIT(31) +#define DMA_CHAL_TIMEOUT_CNT(x) min_t(unsigned int, x, 0xFFFFF) + +#define DMA_TIMEOUT 10 + +/** + * struct phytium_ddma_sg_req - scatter-gatter list data info + * @len: number of bytes to transform + * @mem_addr_l: bus address low 32bit + * @mem_addr_h: bus address high 32bit + * @dev_addr: dma cousumer data reg addr + * @direction: dma transmit direction + */ +struct phytium_ddma_sg_req { + u32 len; + u32 mem_addr_l; + u32 mem_addr_h; + u32 dev_addr; + enum dma_transfer_direction direction; +}; + +/** + * struct phytium_ddma_desc - + * the struct holding info describing ddma request + * descriptor + * @vdesc: ddma request descriptor + * @num_sgs: the size of scatter-gatter list + * @sg_req: use to save scatter-gatter list info + */ +struct phytium_ddma_desc { + struct virt_dma_desc vdesc; + u32 num_sgs; + struct phytium_ddma_sg_req sg_req[]; +}; + +/** + * struct phytium_ddma_chan - + * the struct holding info describing dma channel + * @vchan: virtual dma channel + * @base: the mapped register I/O of dma physical channel + * @id: the id of ddma physical channel + * @request_line: the request line of ddma channel + * @desc: the transform request descriptor + * @dma_config: config parameters for dma channel + * @busy: the channel busy flag, this flag set when channel is tansferring + * @is_used: the channel bind flag, this flag set when channel binded + * @next_sg: the index of next scatter-gatter + * @current_sg: use to save the current transfer scatter-gatter info + * @paddr: use to align data between dma provider and consumer + */ +struct phytium_ddma_chan { + struct virt_dma_chan vchan; + void __iomem *base; + u32 id; + u32 request_line; + struct phytium_ddma_desc *desc; + struct dma_slave_config dma_config; + bool busy; + bool is_used; + bool is_pasued; + u32 next_sg; + struct phytium_ddma_sg_req *current_sg; + dma_addr_t paddr; + char *buf; +}; + +struct global_reg { + u32 dma_chal_cfg0; + u32 dma_chal_bind; + u32 dma_chal_cfg1; +}; + +struct channel_reg { + u32 dma_chalx_ctl; + u32 dma_chalx_timeout_cnt; +}; + +/** + * struct phytium_ddma_device - + * the struct holding info describing DDMA device + * @dma_dev: an instance for struct dma_device + * @irq: the irq that DDMA using + * @base: the mapped register I/O base of this DDMA + * @core_clk: DDMA clock + * @dma_channels: the number of DDMA physical channels + * @chan: the phyical channels of DDMA + * @lock: spinlock to lock when set global registers + * @dma_reg: store global register value which need recover after resume + * @dma_chal_reg: store channel register value which need recover after resume + */ +struct phytium_ddma_device { + struct dma_device dma_dev; + struct device *dev; + int irq; + void __iomem *base; + struct clk *core_clk; + u32 dma_channels; + struct phytium_ddma_chan *chan; + spinlock_t lock; + struct global_reg dma_reg; + struct channel_reg dma_chal_reg[DDMA_MAX_NR_PCHANNELS]; +}; + +#endif /* _PHYTIUM_DDMAC_H */ diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c index ae0f67e6cc45f..cc522e61d81f1 100644 --- a/drivers/firmware/arm_scmi/transports/mailbox.c +++ b/drivers/firmware/arm_scmi/transports/mailbox.c @@ -187,6 +187,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, struct scmi_mailbox *smbox; int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan; struct mbox_client *cl; + struct of_phandle_args args; ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); if (ret) @@ -241,6 +242,16 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } } + ret = of_parse_phandle_with_args(cdev->of_node, "mboxes", + "#mbox-cells", 1, &args); + if (ret) { + dev_err(cdev, "failed to get SCMI %s mailbox\n", desc); + return ret; + } + + if (of_device_is_compatible(args.np, "phytium,mbox")) + cinfo->no_completion_irq = true; + cinfo->transport_info = smbox; smbox->cinfo = cinfo; mutex_init(&smbox->chan_lock); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index e053524c5e35f..d5a01d6ed496c 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -124,6 +124,10 @@ config GPIO_SWNODE_UNDEFINED # put drivers in the right section, in alphabetical order +# This symbol is selected by both MMIO and PCI expanders +config GPIO_PHYTIUM_CORE + tristate + # This symbol is selected by both I2C and SPI expanders config GPIO_MAX730X tristate @@ -562,6 +566,18 @@ config GPIO_OMAP help Say yes here to enable GPIO support for TI OMAP SoCs. +config GPIO_PHYTIUM_PLAT + tristate "Phytium GPIO Platform support" + default y if ARCH_PHYTIUM + depends on ARM64 + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say yes here to support the on-chip GPIO controller for the + Phytium SoC family. + config GPIO_PL061 tristate "PrimeCell PL061 GPIO support" depends on ARM_AMBA || COMPILE_TEST @@ -1856,6 +1872,19 @@ config GPIO_PCIE_IDIO_24 Input filter control is not supported by this driver, and the input filters are deactivated by this driver. +config GPIO_PHYTIUM_PCI + tristate "Phytium GPIO PCI support" + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say Y here to support Phytium PCI GPIO controller on Px210 chipset. + An interrupt is generated when any of the inputs change state + (low to high or high to low). + + This driver can be used for Phytium Px210. + config GPIO_RDC321X tristate "RDC R-321x GPIO support" select MFD_CORE diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index ec296fa14bfdb..53e8537750d1f 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -141,6 +141,9 @@ obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o obj-$(CONFIG_GPIO_PCH) += gpio-pch.o obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o +obj-$(CONFIG_GPIO_PHYTIUM_CORE) += gpio-phytium-core.o +obj-$(CONFIG_GPIO_PHYTIUM_PCI) += gpio-phytium-pci.o +obj-$(CONFIG_GPIO_PHYTIUM_PLAT) += gpio-phytium-platform.o obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c new file mode 100644 index 0000000000000..a221f8c880805 --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static int get_pin_location(struct phytium_gpio *gpio, unsigned int offset, + struct pin_loc *pl) +{ + int ret; + + if (offset < gpio->ngpio[0]) { + pl->port = 0; + pl->offset = offset; + ret = 0; + } else if (offset < (gpio->ngpio[0] + gpio->ngpio[1])) { + pl->port = 1; + pl->offset = offset - gpio->ngpio[0]; + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void phytium_gpio_toggle_trigger(struct phytium_gpio *gpio, + unsigned int offset) +{ + struct gpio_chip *gc; + u32 pol; + int val; + + /* Only port A can provide interrupt source */ + if (offset >= gpio->ngpio[0]) + return; + + gc = &gpio->gc; + + pol = readl(gpio->regs + GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offset); + if (val) + pol &= ~BIT(offset); + else + pol |= BIT(offset); + + writel(pol, gpio->regs + GPIO_INT_POLARITY); +} + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dat; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + + dat = gpio->regs + GPIO_EXT_PORTA + (loc.port * GPIO_PORT_STRIDE); + + return !!(readl(dat) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get); + +int phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dr; + unsigned long flags; + u32 mask; + + if (get_pin_location(gpio, offset, &loc)) + return 0; + dr = gpio->regs + GPIO_SWPORTA_DR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + if (value) + mask = readl(dr) | BIT(loc.offset); + else + mask = readl(dr) & ~BIT(loc.offset); + + writel(mask, dr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_set); + +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) & ~(BIT(loc.offset)), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_input); + +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) | BIT(loc.offset), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + phytium_gpio_set(gc, offset, value); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_output); + +void phytium_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val = BIT(irqd_to_hwirq(d)); + + raw_spin_lock(&gpio->lock); + + writel(val, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_ack); + +void phytium_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); + + gpiochip_disable_irq(gc, irqd_to_hwirq(d)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_mask); + +void phytium_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + gpiochip_enable_irq(gc, irqd_to_hwirq(d)); + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_unmask); + +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + int hwirq = irqd_to_hwirq(d); + unsigned long flags, lvl, pol; + + if (hwirq < 0 || hwirq >= gpio->ngpio[0]) + return -EINVAL; + + if ((flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) && + (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { + dev_err(gc->parent, + "trying to configure line %d for both level and edge detection, choose one!\n", + hwirq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&gpio->lock, flags); + + lvl = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + pol = readl(gpio->regs + GPIO_INT_POLARITY); + + switch (flow_type) { + case IRQ_TYPE_EDGE_BOTH: + lvl |= BIT(hwirq); + phytium_gpio_toggle_trigger(gpio, hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on both edges\n", hwirq); + break; + case IRQ_TYPE_EDGE_RISING: + lvl |= BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on RISING edge\n", hwirq); + break; + case IRQ_TYPE_EDGE_FALLING: + lvl |= BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on FALLING edge\n", hwirq); + break; + case IRQ_TYPE_LEVEL_HIGH: + lvl &= ~BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on HIGH level\n", hwirq); + break; + case IRQ_TYPE_LEVEL_LOW: + lvl &= ~BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on LOW level\n", hwirq); + break; + } + + writel(lvl, gpio->regs + GPIO_INTTYPE_LEVEL); + if (flow_type != IRQ_TYPE_EDGE_BOTH) + writel(pol, gpio->regs + GPIO_INT_POLARITY); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_type); + +void phytium_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + gpiochip_enable_irq(gc, irqd_to_hwirq(d)); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + gpiochip_disable_irq(gc, irqd_to_hwirq(d)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_enable); + +void phytium_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_disable); + +void phytium_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + + seq_printf(p, dev_name(gc->parent)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_print_chip); + +void phytium_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long pending; + int offset; + + chained_irq_enter(irqchip, desc); + + pending = readl(gpio->regs + GPIO_INTSTATUS); + if (pending) { + for_each_set_bit(offset, &pending, gpio->ngpio[0]) { + int gpio_irq = irq_find_mapping(gc->irq.domain, + offset); + generic_handle_irq(gpio_irq); + + if ((irq_get_trigger_type(gpio_irq) & + IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) + phytium_gpio_toggle_trigger(gpio, offset); + } + } + + chained_irq_exit(irqchip, desc); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_handler); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + return !(readl(ddr) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get_direction); + +int phytium_gpio_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) +{ + int hwirq = irqd_to_hwirq(d); + struct gpio_chip *chip_data = irq_data_get_irq_chip_data(d); + struct irq_chip *chip = irq_get_chip(chip_data->irq.parents[hwirq]); + struct irq_data *data = irq_get_irq_data(chip_data->irq.parents[hwirq]); + + if (chip && chip->irq_set_affinity) + return chip->irq_set_affinity(data, mask_val, force); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_affinity); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium GPIO Controller core"); diff --git a/drivers/gpio/gpio-phytium-core.h b/drivers/gpio/gpio-phytium-core.h new file mode 100644 index 0000000000000..53f4307974f11 --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _GPIO_PHYTIUM_H +#define _GPIO_PHYTIUM_H + +#include +#include + +#include "gpiolib.h" + +#define GPIO_SWPORTA_DR 0x00 /* WR Port A Output Data Register */ +#define GPIO_SWPORTA_DDR 0x04 /* WR Port A Data Direction Register */ +#define GPIO_EXT_PORTA 0x08 /* RO Port A Input Data Register */ +#define GPIO_SWPORTB_DR 0x0c /* WR Port B Output Data Register */ +#define GPIO_SWPORTB_DDR 0x10 /* WR Port B Data Direction Register */ +#define GPIO_EXT_PORTB 0x14 /* RO Port B Input Data Register */ + +#define GPIO_INTEN 0x18 /* WR Port A Interrput Enable Register */ +#define GPIO_INTMASK 0x1c /* WR Port A Interrupt Mask Register */ +#define GPIO_INTTYPE_LEVEL 0x20 /* WR Port A Interrupt Level Register */ +#define GPIO_INT_POLARITY 0x24 /* WR Port A Interrupt Polarity Register */ +#define GPIO_INTSTATUS 0x28 /* RO Port A Interrupt Status Register */ +#define GPIO_RAW_INTSTATUS 0x2c /* RO Port A Raw Interrupt Status Register */ +#define GPIO_LS_SYNC 0x30 /* WR Level-sensitive Synchronization Enable Register */ +#define GPIO_DEBOUNCE 0x34 /* WR Debounce Enable Register */ +#define GPIO_PORTA_EOI 0x38 /* WO Port A Clear Interrupt Register */ + +#define MAX_NPORTS 2 +#define NGPIO_DEFAULT 8 +#define NGPIO_MAX 32 +#define GPIO_PORT_STRIDE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) + +struct pin_loc { + unsigned int port; + unsigned int offset; +}; + +#ifdef CONFIG_PM_SLEEP +struct phytium_gpio_ctx { + u32 swporta_dr; + u32 swporta_ddr; + u32 ext_porta; + u32 swportb_dr; + u32 swportb_ddr; + u32 ext_portb; + u32 inten; + u32 intmask; + u32 inttype_level; + u32 int_polarity; + u32 intstatus; + u32 raw_intstatus; + u32 ls_sync; + u32 debounce; +}; +#endif + +struct phytium_gpio { + raw_spinlock_t lock; + void __iomem *regs; + struct gpio_chip gc; + unsigned int ngpio[2]; + int irq[32]; +#ifdef CONFIG_PM_SLEEP + struct phytium_gpio_ctx ctx; +#endif +}; + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value); + +void phytium_gpio_irq_ack(struct irq_data *d); +void phytium_gpio_irq_mask(struct irq_data *d); +void phytium_gpio_irq_unmask(struct irq_data *d); +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type); +void phytium_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p); +void phytium_gpio_irq_enable(struct irq_data *d); +void phytium_gpio_irq_disable(struct irq_data *d); +void phytium_gpio_irq_handler(struct irq_desc *desc); +int phytium_gpio_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force); +#endif diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c new file mode 100644 index 0000000000000..b5e2092729c92 --- /dev/null +++ b/drivers/gpio/gpio-phytium-pci.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static const struct irq_chip phytium_gpio_irq_chip = { + .irq_ack = phytium_gpio_irq_ack, + .irq_mask = phytium_gpio_irq_mask, + .irq_unmask = phytium_gpio_irq_unmask, + .irq_set_type = phytium_gpio_irq_set_type, + .irq_print_chip = phytium_gpio_irq_print_chip, + .irq_enable = phytium_gpio_irq_enable, + .irq_disable = phytium_gpio_irq_disable, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static int phytium_gpio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct phytium_gpio *gpio; + struct gpio_irq_chip *girq; + int err; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device: err %d\n", err); + goto out; + } + + err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (err) { + dev_err(dev, "Failed to iomap PCI device: err %d\n", err); + goto out; + } + + gpio->regs = pcim_iomap_table(pdev)[0]; + if (!gpio->regs) { + dev_err(dev, "Cannot map PCI resource\n"); + err = -ENOMEM; + goto out; + } + + err = pci_enable_msi(pdev); + if (err < 0) + goto out; + + pci_set_master(pdev); + + gpio->irq[0] = pdev->irq; + if (gpio->irq[0] < 0) + dev_warn(dev, "no irq is found.\n"); + + /* There is only one group of Pins at the moment. */ + gpio->ngpio[0] = NGPIO_MAX; + + /* irq_chip support */ + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + girq = &gpio->gc.irq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + girq->num_parents = 1; + girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = gpio->irq[0]; + girq->parent_handler = phytium_gpio_irq_handler; + + gpio_irq_chip_set_chip(girq, &phytium_gpio_irq_chip); + + err = devm_gpiochip_add_data(dev, &gpio->gc, gpio); + if (err) + goto out; + + dev_info(dev, "Phytium PCI GPIO controller @%pa registered\n", + &gpio->regs); + + pci_set_drvdata(pdev, gpio); + +out: + return err; +} + +static const struct pci_device_id phytium_gpio_pci_ids[] = { + { PCI_DEVICE(0x1DB7, 0xDC31) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, phytium_gpio_pci_ids); + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pci_pm_ops, + phytium_gpio_pci_suspend, + phytium_gpio_pci_resume); + +static struct pci_driver phytium_gpio_pci_driver = { + .name = "gpio-phytium-pci", + .id_table = phytium_gpio_pci_ids, + .probe = phytium_gpio_pci_probe, + .driver = { + .pm = &phytium_gpio_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_gpio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c new file mode 100644 index 0000000000000..b95f0315cf981 --- /dev/null +++ b/drivers/gpio/gpio-phytium-platform.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support functions for Phytium GPIO + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + * + * Derived from drivers/gpio/gpio-pl061.c + * Copyright (C) 2008, 2009 Provigent Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static const struct of_device_id phytium_gpio_of_match[] = { + { .compatible = "phytium,gpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_gpio_of_match); + +static const struct acpi_device_id phytium_gpio_acpi_match[] = { + { "PHYT0001", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_gpio_acpi_match); + +static const struct irq_chip phytium_gpio_irq_chip = { + .irq_ack = phytium_gpio_irq_ack, + .irq_mask = phytium_gpio_irq_mask, + .irq_unmask = phytium_gpio_irq_unmask, + .irq_set_type = phytium_gpio_irq_set_type, + .irq_print_chip = phytium_gpio_irq_print_chip, + .irq_enable = phytium_gpio_irq_enable, + .irq_disable = phytium_gpio_irq_disable, + .irq_set_affinity = phytium_gpio_irq_set_affinity, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static int phytium_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct phytium_gpio *gpio; + struct gpio_irq_chip *girq; + struct fwnode_handle *fwnode; + int err, irq_count; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + if (!device_get_child_node_count(dev)) + return -ENODEV; + + device_for_each_child_node(dev, fwnode) { + int idx; + + if (fwnode_property_read_u32(fwnode, "reg", &idx) || + idx >= MAX_NPORTS) { + dev_err(dev, "missing/invalid port index\n"); + fwnode_handle_put(fwnode); + return -EINVAL; + } + + if (fwnode_property_read_u32(fwnode, "ngpios", + &gpio->ngpio[idx]) + && fwnode_property_read_u32(fwnode, "nr-gpios", + &gpio->ngpio[idx])) { + dev_info(dev, + "failed to get number of gpios for Port%c\n", + idx ? 'B' : 'A'); + gpio->ngpio[idx] = NGPIO_DEFAULT; + } + } + + /* irq_chip support */ + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + girq = &gpio->gc.irq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + for (irq_count = 0; irq_count < platform_irq_count(pdev); irq_count++) { + gpio->irq[irq_count] = -ENXIO; + gpio->irq[irq_count] = platform_get_irq(pdev, irq_count); + if (gpio->irq[irq_count] < 0) { + dev_warn(dev, "no irq is found.\n"); + break; + } + }; + + girq->num_parents = irq_count; + girq->parents = gpio->irq; + girq->parent_handler = phytium_gpio_irq_handler; + + gpio_irq_chip_set_chip(girq, &phytium_gpio_irq_chip); + + err = devm_gpiochip_add_data(dev, &gpio->gc, gpio); + if (err) + return err; + + platform_set_drvdata(pdev, gpio); + dev_info(dev, "Phytium GPIO controller @%pa registered\n", + &res->start); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pm_ops, phytium_gpio_suspend, + phytium_gpio_resume); + +static struct platform_driver phytium_gpio_driver = { + .driver = { + .name = "gpio-phytium-platform", + .pm = &phytium_gpio_pm_ops, + .of_match_table = of_match_ptr(phytium_gpio_of_match), + .acpi_match_table = ACPI_PTR(phytium_gpio_acpi_match), + }, + .probe = phytium_gpio_probe, +}; + +module_platform_driver(phytium_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium GPIO driver"); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 51a3e0fc2f56b..e4f0286047360 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8093,6 +8093,9 @@ int cik_irq_process(struct radeon_device *rdev) if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bc4ab71613a55..a0b3e6268e6be 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4919,6 +4919,9 @@ int evergreen_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 8b62f7faa5b99..df075e025801c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4328,6 +4328,9 @@ int r600_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 26197aceb001c..479d12ed82dee 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6423,6 +6423,9 @@ int si_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 2760feb9f83b5..3d119fe952fa1 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -443,6 +443,15 @@ config SENSORS_ASPEED_G6 This driver can also be built as a module. If so, the module will be called aspeed_g6_pwm_tach. +config SENSORS_PHYTIUM + tristate "Phytium Fan tach and capture counter driver" + help + This driver provides support for Phytium Fan Tacho and capture + counter controllers. + + This driver can also be built as a module. If so, the module + will be called tacho-phytium. + config SENSORS_ATXP1 tristate "Attansic ATXP1 VID controller" depends on I2C @@ -2451,6 +2460,15 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI && HAS_IOPORT diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 73b2abdcc6dd9..21413e840c5b5 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -169,6 +169,7 @@ obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o obj-$(CONFIG_SENSORS_MC33XS2410) += mc33xs2410_hwmon.o obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o +obj-$(CONFIG_SENSORS_PHYTIUM) += tacho-phytium.o obj-$(CONFIG_SENSORS_TC654) += tc654.o obj-$(CONFIG_SENSORS_TPS23861) += tps23861.o obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o @@ -235,6 +236,7 @@ obj-$(CONFIG_SENSORS_TMP464) += tmp464.o obj-$(CONFIG_SENSORS_TMP513) += tmp513.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP) += zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 0b4bdcd33c7b8..523a4da6f3ab1 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -1336,4 +1336,3 @@ module_exit(hwmon_exit); MODULE_AUTHOR("Mark M. Hoffman "); MODULE_DESCRIPTION("hardware monitoring sysfs/class support"); MODULE_LICENSE("GPL"); - diff --git a/drivers/hwmon/tacho-phytium.c b/drivers/hwmon/tacho-phytium.c new file mode 100644 index 0000000000000..aa6b508b2050e --- /dev/null +++ b/drivers/hwmon/tacho-phytium.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hwmon driver for Phytium tachometer. + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define TIMER_CTRL_REG 0x00 +#define TIMER_CTRL_MODE_SHIFT 0//0:1 +#define TIMER_CTRL_RESET_SHIFT BIT(2) +#define TIMER_CTRL_FORCE_SHIFT BIT(3) +#define TIMER_CTRL_CAPTURE_EN_SHIFT BIT(4) +#define TIMER_CTRL_CAPTURE_CNT_SHIFT 5//5:11 +#define TIMER_CTRL_ANTI_JITTER_SHIFT 18//18:19 +#define TIMER_CTRL_TACHO_MODE_SHIFT 20//20:21 +#define TIMER_CTRL_TIMER_CNT_MODE_SHIFT BIT(22) +#define TIMER_CTRL_BIT_SET_SHIFT 24 +#define TIMER_CTRL_CNT_EN_SHIFT BIT(25) +#define TIMER_CTRL_CNT_CLR_SHIFT BIT(26) +#define TIMER_CTRL_TIMER_MODE_SHIFT BIT(27) +#define TIMER_CTRL_PULSE_NUM_SHIFT 28//28:30 +#define TIMER_CTRL_TACHO_EN_SHIFT BIT(31) +#define TIMER_TACHO_RES_REG 0x04 +#define TIMER_TACHO_RES_VALID_SHIFT BIT(31) +#define TIMER_TACHO_RES_MASK GENMASK(30, 0) +#define TIMER_CMP_VALUE_UP_REG 0x08 +#define TIMER_CMP_VALUE_LOW_REG 0x1C +#define TIMER_CNT_VALUE_UP_REG 0x20 +#define TIMER_CNT_VALUE_LOW_REG 0x24 +#define TIMER_INT_MASK_REG 0x28 +#define TIMER_INT_STAT_REG 0x2C +#define TIMER_INT_CAPTURE_SHIFT BIT(5) +#define TIMER_INT_CYC_COMP_SHIFT BIT(4) +#define TIMER_INT_ONE_COMP_SHIFT BIT(3) +#define TIMER_INT_ROLLOVER_SHIFT BIT(2) +#define TIMER_INT_TACHO_UNDER_SHIFT BIT(1) +#define TIMER_INT_TACHO_OVER_SHIFT BIT(0) +#define TIMER_TACHO_OVER_REG 0x30 +#define TIMER_TACHO_UNDER_REG 0x34 +#define TIMER_START_VALUE_REG 0x38 + +#define TIMER_INT_CLR_MASK GENMASK(5, 0) + +enum tacho_modes { + tacho_mode = 1, + capture_mode, +}; + +enum edge_modes { + rising_edge, + falling_edge, + double_edge, +}; + +struct phytium_tacho { + struct device *dev; + struct device *hwmon; + void __iomem *base; + struct clk *clk; + u32 freq; + int irq; + u8 work_mode; + u8 edge_mode; + u32 debounce; +}; + +static u16 capture_count; + +static void phytium_tacho_init(struct phytium_tacho *tacho) +{ + u32 val; + + if (tacho->work_mode == tacho_mode) { + val = (TIMER_CTRL_TACHO_EN_SHIFT | + TIMER_CTRL_CNT_EN_SHIFT | + (tacho->edge_mode << TIMER_CTRL_TACHO_MODE_SHIFT) | + (tacho->debounce << TIMER_CTRL_ANTI_JITTER_SHIFT) | + (tacho->work_mode << TIMER_CTRL_MODE_SHIFT)); + writel_relaxed(val, tacho->base + TIMER_CTRL_REG); + writel_relaxed(0x2faf07f, tacho->base + TIMER_CMP_VALUE_LOW_REG); + } else { + val = (TIMER_CTRL_TACHO_EN_SHIFT | + TIMER_CTRL_CNT_EN_SHIFT | + (tacho->edge_mode << TIMER_CTRL_TACHO_MODE_SHIFT) | + (tacho->debounce << TIMER_CTRL_ANTI_JITTER_SHIFT) | + TIMER_CTRL_CAPTURE_EN_SHIFT | + (0x7f << TIMER_CTRL_CAPTURE_CNT_SHIFT) | + (tacho->work_mode << TIMER_CTRL_MODE_SHIFT)), + writel_relaxed(val, tacho->base + TIMER_CTRL_REG); + writel_relaxed(0x20, tacho->base + TIMER_INT_MASK_REG); + } +} + +static int phytium_get_fan_tach_rpm(struct phytium_tacho *priv) +{ + u64 raw_data, tach_div, clk_source; + u8 mode, both; + unsigned long timeout; + unsigned long loopcounter; + + timeout = jiffies + msecs_to_jiffies(500); + + for (loopcounter = 0;; loopcounter++) { + raw_data = readl_relaxed(priv->base + TIMER_TACHO_RES_REG); + + if (raw_data & TIMER_TACHO_RES_VALID_SHIFT) + break; + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + if (loopcounter > 3000) + msleep(20); + else { + udelay(100); + cond_resched(); + } + } + + raw_data = raw_data & TIMER_TACHO_RES_MASK; + clk_source = priv->freq; + mode = priv->edge_mode; + both = (mode == double_edge) ? 1 : 0; + tach_div = 1 << both; + + if (raw_data == 0) + return 0; + + return (clk_source * 60 * raw_data) / 0x2faf080 / tach_div; +} + +static ssize_t show_rpm(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int rpm; + struct phytium_tacho *priv = dev_get_drvdata(dev); + + rpm = phytium_get_fan_tach_rpm(priv); + if (rpm < 0) + return rpm; + + return sprintf(buf, "%d\n", rpm); +} + +static SENSOR_DEVICE_ATTR(fan_input, 0444, + show_rpm, NULL, 0); + +static struct attribute *tacho_dev_attrs[] = { + &sensor_dev_attr_fan_input.dev_attr.attr, + NULL +}; + +static umode_t tacho_dev_is_visible(struct kobject *kobj, + struct attribute *a, int index) +{ + return a->mode; +} + +static const struct attribute_group tacho_group = { + .attrs = tacho_dev_attrs, + .is_visible = tacho_dev_is_visible, +}; + +static const struct attribute_group *tacho_groups[] = { + &tacho_group, + NULL +}; + +static irqreturn_t capture_irq_handler(int irq, void *dev_id) +{ + struct phytium_tacho *priv = dev_id; + u32 status = readl_relaxed(priv->base + TIMER_INT_STAT_REG); + + if (status & TIMER_INT_CAPTURE_SHIFT) { + capture_count++; + + if (capture_count == 0) + dev_err(priv->dev, "Capture counter is overflowed"); + + writel_relaxed(status, priv->base + TIMER_INT_STAT_REG); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static ssize_t show_capture(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int cnt; + struct phytium_tacho *priv = dev_get_drvdata(dev); + + cnt = capture_count * 0x7f + readl_relaxed(priv->base + TIMER_CNT_VALUE_LOW_REG); + + return sprintf(buf, "%d\n", cnt); +} + +static SENSOR_DEVICE_ATTR(capture_input, 0444, + show_capture, NULL, 0); + +static struct attribute *capture_dev_attrs[] = { + &sensor_dev_attr_capture_input.dev_attr.attr, + NULL +}; + +static umode_t capture_dev_is_visible(struct kobject *kobj, + struct attribute *a, int index) +{ + return a->mode; +} + +static const struct attribute_group capture_group = { + .attrs = capture_dev_attrs, + .is_visible = capture_dev_is_visible, +}; + +static const struct attribute_group *capture_groups[] = { + &capture_group, + NULL +}; + +static int phytium_tacho_get_work_mode(struct phytium_tacho *tacho) +{ + struct device_node *nc = tacho->dev->of_node; + struct fwnode_handle *fwn = tacho->dev->fwnode; + + if (of_property_read_bool(nc, "tacho")) + return tacho_mode; + else if (has_acpi_companion(tacho->dev) && fwnode_property_read_bool( + fwn, "tacho")) + return tacho_mode; + if (of_property_read_bool(nc, "capture")) + return capture_mode; + else if (has_acpi_companion(tacho->dev) && fwnode_property_read_bool( + fwn, "capture")) + return capture_mode; + return tacho_mode; +} + +static int phytium_tacho_get_edge_mode(struct phytium_tacho *tacho) +{ + struct device_node *nc = tacho->dev->of_node; + struct fwnode_handle *fwn = tacho->dev->fwnode; + + if (of_property_read_bool(nc, "up")) + return rising_edge; + else if (has_acpi_companion(tacho->dev) && fwnode_property_read_bool( + fwn, "up")) + return rising_edge; + if (of_property_read_bool(nc, "down")) + return falling_edge; + else if (has_acpi_companion(tacho->dev) && fwnode_property_read_bool( + fwn, "down")) + return falling_edge; + if (of_property_read_bool(nc, "double")) + return double_edge; + else if (has_acpi_companion(tacho->dev) && fwnode_property_read_bool( + fwn, "double")) + return double_edge; + return rising_edge; +} + +static int phytium_tacho_get_debounce(struct phytium_tacho *tacho) +{ + u32 value; + struct device_node *nc = tacho->dev->of_node; + struct fwnode_handle *fwn = tacho->dev->fwnode; + + if (!of_property_read_u32(nc, "debounce-level", &value)) + return value; + if (has_acpi_companion(tacho->dev)) { + if (!fwnode_property_read_u32(fwn, "debounce-level", &value)) + return value; + } + return 0; +} + +static void phytium_tacho_get_of_data(struct phytium_tacho *tacho) +{ + tacho->work_mode = phytium_tacho_get_work_mode(tacho); + tacho->edge_mode = phytium_tacho_get_edge_mode(tacho); + tacho->debounce = phytium_tacho_get_debounce(tacho); +} + +static int phytium_tacho_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct phytium_tacho *tacho; + int ret; + + tacho = devm_kzalloc(dev, sizeof(*tacho), GFP_KERNEL); + if (!tacho) + return -ENOMEM; + + tacho->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOENT; + + tacho->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tacho->base)) { + dev_err(&pdev->dev, "region map failed\n"); + return PTR_ERR(tacho->base); + } + + if (!has_acpi_companion(tacho->dev)) { + tacho->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(tacho->clk)) + return PTR_ERR(tacho->clk); + ret = clk_prepare_enable(tacho->clk); + if (ret) + return ret; + tacho->freq = clk_get_rate(tacho->clk); + } else { + u32 fre; + + ret = clk_prepare_enable(tacho->clk); + if (ret) + return ret; + fwnode_property_read_u32(tacho->dev->fwnode, "clock-frequency", &fre); + tacho->freq = fre; + } + + tacho->irq = platform_get_irq(pdev, 0); + if (tacho->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return tacho->irq; + } + + ret = devm_request_irq(dev, tacho->irq, capture_irq_handler, + 0, "phytium_tacho", tacho); + if (ret) { + dev_err(&pdev->dev, "Cannot request IRQ\n"); + return ret; + } + + phytium_tacho_get_of_data(tacho); + + phytium_tacho_init(tacho); + + if (tacho->work_mode == tacho_mode) + tacho->hwmon = devm_hwmon_device_register_with_groups(dev, + "phytium_tacho", + tacho, tacho_groups); + else + tacho->hwmon = devm_hwmon_device_register_with_groups(dev, + "phytium_capture", + tacho, capture_groups); + + platform_set_drvdata(pdev, tacho); + + return PTR_ERR_OR_ZERO(tacho->hwmon); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_tacho_suspend(struct device *dev) +{ + return 0; +} + +static int phytium_tacho_resume(struct device *dev) +{ + struct phytium_tacho *tacho = dev_get_drvdata(dev); + + phytium_tacho_init(tacho); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_tacho_pm, phytium_tacho_suspend, phytium_tacho_resume); + +static const struct of_device_id tacho_of_match[] = { + { .compatible = "phytium,tacho", }, + {}, +}; +MODULE_DEVICE_TABLE(of, tacho_of_match); + +static const struct acpi_device_id tacho_acpi_match[] = { + { "PHYT0033", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, tacho_acpi_match); + +static struct platform_driver phytium_tacho_driver = { + .probe = phytium_tacho_probe, + .driver = { + .name = "phytium_tacho", + .pm = &phytium_tacho_pm, + .of_match_table = of_match_ptr(tacho_of_match), + .acpi_match_table = ACPI_PTR(tacho_acpi_match), + }, +}; + +module_platform_driver(phytium_tacho_driver); + +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("Phytium tachometer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index 823bff2871e1e..8f89b892214e4 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -272,7 +272,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 0000000000000..50dfdee9fd2a1 --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME, SHOW_CRIT, SHOW_MAX }; + +/* Functions declaration */ + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u32 id; + u32 msr_temp; + u32 msr_crit; + u32 msr_max; + u32 msr_index; +}; + +/* Sysfs stuff */ + +static ssize_t name_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static int temperature_access(struct zhaoxin_cputemp_data *data, u32 *l, u32 *h); +static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t crit_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_crit, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static ssize_t max_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_max, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP); +static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL); +static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME); +static SENSOR_DEVICE_ATTR_RO(temp1_crit, crit, SHOW_CRIT); +static SENSOR_DEVICE_ATTR_RO(temp1_max, max, SHOW_MAX); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +static int temperature_access(struct zhaoxin_cputemp_data *data, u32 *l, u32 *h) +{ + int err; + *l = 0x19; + + if (data->msr_index) { + err = wrmsr_safe_on_cpu(data->id, data->msr_index, *l, *h); + if (err) { + pr_info("Unable to write TEMPERATURE INDEX MSR\n"); + return err; + } + } + + *l = 0; + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, l, h); + if (err) { + pr_info("Unable to read TEMPERATURE DATA MSR\n"); + return err; + } + return 0; +} + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + struct cpuinfo_x86 *c = &cpu_data(pdev->id); + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + if (c->x86_model == 0x6b) { + data->msr_index = 0x174c; + data->msr_temp = 0x174d; + data->msr_crit = 0x175b; + data->msr_max = 0x175a; + } else { + data->msr_index = 0; + data->msr_temp = 0x1423; + data->msr_crit = 0x1416; + data->msr_max = 0x1415; + } + + /* test if we can access the TEMPERATURE MSR */ + err = temperature_access(data, &eax, &edx); + if (err) { + dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + data->hwmon_dev = hwmon_device_register_for_thermal(&pdev->dev, data->name, data); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit_remove; + } + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static void zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id __initconst cputemp_ids[] = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x6b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) + +MODULE_IMPORT_NS("HWMON_THERMAL"); diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 29f16f220384f..6c0a06504911e 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -72,6 +72,15 @@ config IMX_MBOX help Mailbox implementation for i.MX Messaging Unit (MU). +config PHYTIUM_MBOX + tristate "Phytium SoC Mailbox Support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + Mailbox driver implementation for the Phytium platform. It is used + to send message between application processors and on-chip management + firmware. Say Y here if you want to build this mailbox controller + driver. + config PLATFORM_MHU tristate "Platform MHU Mailbox" depends on OF diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 81820a4f55285..5eaa5b6e51c4e 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -82,3 +82,5 @@ obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o + +obj-$(CONFIG_PHYTIUM_MBOX) += phytium_mailbox.o diff --git a/drivers/mailbox/phytium_mailbox.c b/drivers/mailbox/phytium_mailbox.c new file mode 100644 index 0000000000000..67b80793e9b78 --- /dev/null +++ b/drivers/mailbox/phytium_mailbox.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC mailbox driver + * + * Copyright (c) 2020-2023, Phytium Technology Co., Ltd. + * + * Derived from drivers/mailbox/arm_mhu.c + * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd. + * Copyright (C) 2015 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INTR_STAT 0x0 +#define INTR_SET 0x8 +#define INTR_CLR 0x10 + +#define TX_REG 0x100 + +#define NR_CHANS 1 + +struct phytium_mbox_link { + unsigned int irq; + void __iomem *tx_reg; + void __iomem *rx_reg; +}; + +struct phytium_mbox { + void __iomem *base; + struct phytium_mbox_link mlink; + struct mbox_chan chan; + struct mbox_controller mbox; +}; + +static irqreturn_t phytium_mbox_rx_irq(int irq, void *ch) +{ + struct mbox_chan *chan = ch; + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + + val = readl_relaxed(mlink->rx_reg + INTR_STAT); + if (!val) + return IRQ_NONE; + + mbox_chan_received_data(chan, (void *)&val); + + writel_relaxed(val, mlink->rx_reg + INTR_CLR); + + return IRQ_HANDLED; +} + +static int phytium_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 *arg = data; + + writel_relaxed(*arg, mlink->tx_reg + INTR_SET); + + return 0; +} + +static int phytium_mbox_startup(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + int ret; + + val = readl_relaxed(mlink->tx_reg + INTR_STAT); + writel_relaxed(val, mlink->tx_reg + INTR_CLR); + + ret = request_irq(mlink->irq, phytium_mbox_rx_irq, + IRQF_SHARED, "phytium_mbox_link", chan); + if (ret) { + dev_err(chan->mbox->dev, + "Unable to acquire IRQ %d\n", mlink->irq); + } + + return ret; +} + +static void phytium_mbox_shutdown(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + + free_irq(mlink->irq, chan); +} + +static bool phytium_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT); + + return (val == (u32)(1U << 31)); +} + +static const struct mbox_chan_ops phytium_mbox_ops = { + .send_data = phytium_mbox_send_data, + .startup = phytium_mbox_startup, + .shutdown = phytium_mbox_shutdown, + .last_tx_done = phytium_mbox_last_tx_done, +}; + +static const struct acpi_device_id phytium_mbox_acpi_match[] = { + { "PHYT0009", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_mbox_acpi_match); + +static const struct of_device_id phytium_mbox_of_match[] = { + { .compatible = "phytium,mbox", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phytium_mbox_of_match); + +static int phytium_mbox_probe(struct platform_device *pdev) +{ + struct phytium_mbox *mbox; + struct resource *res; + int err, irq; + + /* Allocate memory for device */ + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mbox->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mbox->base)) { + dev_err(&pdev->dev, "ioremap base failed\n"); + return PTR_ERR(mbox->base); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "cannot obtain irq\n"); + return irq; + } + + mbox->chan.con_priv = &mbox->mlink; + mbox->mlink.irq = irq; + mbox->mlink.rx_reg = mbox->base; + mbox->mlink.tx_reg = mbox->mlink.rx_reg + TX_REG; + + mbox->mbox.dev = &pdev->dev; + mbox->mbox.chans = &mbox->chan; + mbox->mbox.num_chans = NR_CHANS; + mbox->mbox.ops = &phytium_mbox_ops; + mbox->mbox.txdone_irq = false; + mbox->mbox.txdone_poll = true; + mbox->mbox.txpoll_period = 1; + + platform_set_drvdata(pdev, mbox); + + err = mbox_controller_register(&mbox->mbox); + if (err) { + dev_err(&pdev->dev, "Failed to register mailboxes %d\n", err); + goto fail; + } + + dev_info(&pdev->dev, "Phytium SoC Mailbox registered\n"); +fail: + return err; +} + +static void phytium_mbox_remove(struct platform_device *pdev) +{ + struct phytium_mbox *mbox = platform_get_drvdata(pdev); + + mbox_controller_unregister(&mbox->mbox); +} + +static struct platform_driver phytium_mbox_driver = { + .probe = phytium_mbox_probe, + .remove = phytium_mbox_remove, + .driver = { + .name = "phytium-mbox", + .of_match_table = of_match_ptr(phytium_mbox_of_match), + .acpi_match_table = ACPI_PTR(phytium_mbox_acpi_match), + }, +}; + +module_platform_driver(phytium_mbox_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium SoC Mailbox Driver"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 10d0ef58ef493..55ba6ea634b43 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1141,3 +1141,38 @@ config MMC_LITEX module will be called litex_mmc. If unsure, say N. + +config MMC_PHYTIUM_SDCI + tristate "Phytium SD Host Controller support" + depends on ARM64 + help + This selects support for the Phytium SD Host Controller on Phytium + Px210 chipset. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_PHYTIUM_MCI_PCI + tristate "Phytium PCI MultiMedia Card Interface support" + depends on ARCH_PHYTIUM + default y if ARCH_PHYTIUM + help + This selects support for the PCI MultiMedia Card Interface on Phytium + Px210 chipset. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_PHYTIUM_MCI_PLTFM + tristate "Phytium MultiMedia Card Interface support" + depends on ARCH_PHYTIUM && OF + default y if ARCH_PHYTIUM + help + This selects support for the MultiMedia Card Interface on Phytium + Px210 chipset. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 5057fea8afb69..9ce08fefc3ed0 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -73,6 +73,9 @@ obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o obj-$(CONFIG_MMC_BCM2835) += bcm2835.o obj-$(CONFIG_MMC_OWL) += owl-mmc.o obj-$(CONFIG_MMC_LOONGSON2) += loongson2-mmc.o +obj-$(CONFIG_MMC_PHYTIUM_SDCI) += phytium-sdci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PCI) += phytium-mci-pci.o phytium-mci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PLTFM) += phytium-mci-plat.o phytium-mci.o obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c new file mode 100644 index 0000000000000..ce1f76c976886 --- /dev/null +++ b/drivers/mmc/host/phytium-mci-pci.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | + MMC_CAP_CMD23 | MMC_CAP_4_BIT_DATA; +static u32 sd_caps2 = MMC_CAP2_NO_MMC; + +static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_WAIT_WHILE_BUSY | + MMC_CAP_CMD23 | MMC_CAP_HW_RESET | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_NONREMOVABLE; +static u32 emmc_caps2 = MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; + +#define PCI_BAR_NO 0 + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int +phytium_mci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + int ret; + + ret = pcim_enable_device(pdev); + + if (ret) + return ret; + pci_set_master(pdev); + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + + pci_enable_msi(pdev); + + host->irq = pdev->irq; + host->irq_flags = IRQF_SHARED; + host->dev = &pdev->dev; + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); + + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto host_free; + } + + host->base = pcim_iomap_table(pdev)[PCI_BAR_NO]; + host->is_use_dma = 1; + host->is_device_x100 = 1; + + if (pdev->devfn == 2) { + host->caps = emmc_caps; + host->caps2 = emmc_caps2; + } else { + host->caps = sd_caps; + host->caps2 = sd_caps2; + mmc->f_max = 25000000; /* stable frequency */ + } + + host->mmc = mmc; + host->clk_rate = MCI_CLK; + + dev_info(&pdev->dev, "%s %d: [bar %d] addr: 0x%llx size: 0x%llx km: 0x%llx devfn:%d\n", + __func__, __LINE__, PCI_BAR_NO, pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), (uint64_t)host->base, pdev->devfn); + + dev_dbg(&pdev->dev, "%s %d:irq:0x%x\n", __func__, __LINE__, host->irq); + + ret = phytium_mci_common_probe(host); + + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + pci_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + pci_disable_device(pdev); + return ret; +} + +static void phytium_mci_pci_remove(struct pci_dev *pdev) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + + mmc = pci_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); + return; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return; + } + + timer_delete(&host->hotplug_timer); + + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id phytium_mci_pci_tbl[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_PHYTIUM, 0xdc28), + .class = 0x5, + .class_mask = 0, + }, + {} +}; +MODULE_DEVICE_TABLE(pci, phytium_mci_pci_tbl); + +static struct pci_driver phytium_mci_pci_driver = { + .name = "phytium-mci-pci", + .id_table = phytium_mci_pci_tbl, + .probe = phytium_mci_pci_probe, + .remove = phytium_mci_pci_remove, + .driver = { + .pm = &phytium_mci_dev_pm_ops, + } +}; +module_pci_driver(phytium_mci_pci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci-plat.c b/drivers/mmc/host/phytium-mci-plat.c new file mode 100644 index 0000000000000..1ba76453528a1 --- /dev/null +++ b/drivers/mmc/host/phytium-mci-plat.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 mci_caps = MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP + +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int phytium_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + struct resource *res; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + int ret; + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (device_property_read_bool(dev, "use-hold")) + host->use_hold = 1; + + if (device_property_read_bool(dev, "clk-set")) + host->clk_set = 1; + + if (host->clk_set) { + host->clk_smpl_drv_25m = -1; + host->clk_smpl_drv_50m = -1; + host->clk_smpl_drv_66m = -1; + host->clk_smpl_drv_100m = -1; + device_property_read_u32(dev, "clk-smpl-drv-25m", &host->clk_smpl_drv_25m); + device_property_read_u32(dev, "clk-smpl-drv-50m", &host->clk_smpl_drv_50m); + device_property_read_u32(dev, "clk-smpl-drv-66m", &host->clk_smpl_drv_66m); + device_property_read_u32(dev, "clk-smpl-drv-100m", &host->clk_smpl_drv_100m); + } + dev_info(dev, "mci clk set %d %d 0x%x 0x%x 0x%x 0x%x\n", + host->use_hold, host->clk_set, host->clk_smpl_drv_25m, + host->clk_smpl_drv_50m, host->clk_smpl_drv_66m, host->clk_smpl_drv_100m); + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_mci_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + host->clk_rate = 1200000000; + } + + host->is_use_dma = 1; + host->is_device_x100 = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 0); + + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + host->irq_flags = IRQF_SHARED; + dev_dbg(&pdev->dev, "%s %d:irq:%d\n", __func__, __LINE__, host->irq); + host->dev = &pdev->dev; + host->caps = mci_caps; + host->mmc = mmc; + ret = phytium_mci_common_probe(host); + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + return ret; +} + +static void phytium_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + + mmc = platform_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); + return; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return; + } + timer_delete(&host->hotplug_timer); + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + platform_set_drvdata(pdev, NULL); +} + +static const struct of_device_id phytium_mci_of_ids[] = { + { .compatible = "phytium,mci", }, + {} +}; + +MODULE_DEVICE_TABLE(of, phytium_mci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_mci_acpi_ids[] = { + { .id = "PHYT0017" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_mci_acpi_ids); +#else +#define phytium_mci_acpi_ids NULL +#endif + +static struct platform_driver phytium_mci_driver = { + .probe = phytium_mci_probe, + .remove = phytium_mci_remove, + .driver = { + .name = "phytium-mci-platform", + .of_match_table = phytium_mci_of_ids, + .acpi_match_table = phytium_mci_acpi_ids, + .pm = &phytium_mci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_mci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c new file mode 100644 index 0000000000000..9b435de7e8ddd --- /dev/null +++ b/drivers/mmc/host/phytium-mci.c @@ -0,0 +1,1530 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static const u32 cmd_ints_mask = MCI_INT_MASK_RE | MCI_INT_MASK_CMD | MCI_INT_MASK_RCRC | + MCI_INT_MASK_RTO | MCI_INT_MASK_HTO | MCI_RAW_INTS_HLE; + +static const u32 data_ints_mask = MCI_INT_MASK_DTO | MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_INT_MASK_SBE_BCI; +static const u32 cmd_err_ints_mask = MCI_INT_MASK_RTO | MCI_INT_MASK_RCRC | MCI_INT_MASK_RE | + MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_MASKED_INTS_SBE_BCI; + +static const u32 dmac_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_NIS | MCI_DMAC_INT_ENA_AIS; +static const u32 dmac_err_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_AIS; + +static void phytium_mci_cmd_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); +static void phytium_mci_adma_reset(struct phytium_mci_host *host); +static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg); +static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data); +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma); +static void phytium_mci_init_hw(struct phytium_mci_host *host); +static int phytium_mci_get_cd(struct mmc_host *mmc); +static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events); + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +static void phytium_mci_reset_hw(struct phytium_mci_host *host) +{ + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + phytium_mci_send_cmd(host, MCI_CMD_UPD_CLK, 0); +} + +static void phytium_mci_update_external_clk(struct phytium_mci_host *host, u32 uhs_reg_value) +{ + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs_reg_value, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + +} + +static void phytium_mci_prepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MCI_PREPARE_FLAG)) { + data->host_cookie |= MCI_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } +} + +static void phytium_mci_unprepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MCI_ASYNC_FLAG) + return; + + if (data->host_cookie & MCI_PREPARE_FLAG) { + dma_unmap_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); + data->host_cookie &= ~MCI_PREPARE_FLAG; + } +} + +static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg) +{ + int rc; + u32 data; + + writel(arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + + rc = readl_relaxed_poll_timeout(host->base + MCI_STATUS, + data, + !(data & MCI_STATUS_CARD_BUSY), + 0, 5000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_status: 0x%08x\n", __func__, __LINE__, data); + + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + rc = readl_relaxed_poll_timeout(host->base + MCI_CMD, + data, + !(data & MCI_CMD_START), + 0, 5000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_cmd: 0x%08x\n", __func__, __LINE__, data); +} + +static void phytium_mci_update_cmd11(struct phytium_mci_host *host, u32 cmd) +{ + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *ios) +{ + u32 div = 0xff, drv = 0, sample = 0; + unsigned long clk_rate; + u32 mci_cmd_bits = MCI_CMD_UPD_CLK; + u32 cmd_reg; + u32 cur_cmd_index; + u32 first_uhs_div, tmp_ext_reg; + + cmd_reg = readl(host->base + MCI_CMD); + cur_cmd_index = cmd_reg & 0x3F; + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + mci_cmd_bits |= MCI_CMD_VOLT_SWITCH; + if (ios->clock) { + if (host->current_ios_clk == ios->clock) + return; + + dev_dbg(host->dev, "will change clock, host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + + if (ios->clock >= 25000000) + tmp_ext_reg = 0x202; + else if (ios->clock == 400000) + tmp_ext_reg = 0x502; + else + tmp_ext_reg = 0x302; + + phytium_mci_update_external_clk(host, tmp_ext_reg); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + clk_rate = host->clk_rate; + first_uhs_div = 1 + ((tmp_ext_reg >> 8)&0xFF); + div = clk_rate / (2 * first_uhs_div * ios->clock); + + if (host->clk_smpl_drv_25m >= 0 + && ios->clock == 25000000 && host->clk_set) { + writel((host->clk_smpl_drv_25m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (host->clk_smpl_drv_50m >= 0 + && ios->clock == 50000000 && host->clk_set){ + writel((host->clk_smpl_drv_50m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (host->clk_smpl_drv_66m >= 0 + && ios->clock == 66000000 && host->clk_set){ + writel((host->clk_smpl_drv_66m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (host->clk_smpl_drv_100m >= 0 + && ios->clock == 100000000 && host->clk_set){ + writel((host->clk_smpl_drv_100m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else { + if (div > 2) { + sample = div / 2 + 1; + drv = sample - 1; + writel((sample << 16) | (drv << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (div == 2) { + drv = 0; + sample = 1; + writel((drv << 8) | (sample << 16) | (div & 0xff), + host->base + MCI_CLKDIV); + } + } + + dev_dbg(host->dev, "UHS_REG_EXT ext: %x, CLKDIV: %x\n", + readl(host->base + MCI_UHS_REG_EXT), readl(host->base + MCI_CLKDIV)); + + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + host->current_ios_clk = ios->clock; + + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } else { + host->current_ios_clk = 0; + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + +static inline u32 +phytium_mci_cmd_find_resp(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + resp = 0x5; + break; + + case MMC_RSP_R2: + resp = 0x7; + break; + + case MMC_RSP_R3: + resp = 0x1; + break; + + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline +u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 opcode = cmd->opcode; + u32 resp = phytium_mci_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = ((opcode & 0x3f) | ((resp & 0x7) << 6)); + + if (opcode == MMC_GO_INACTIVE_STATE || + (opcode == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 28); + + if (test_and_clear_bit(MCI_CARD_NEED_INIT, &host->flags)) + rawcmd |= (0x1 << 15); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + rawcmd |= (0x1 << 9); + + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 10); + } + if (host->use_hold) + rawcmd |= (0x1 << 29); + + return (rawcmd | (0x1 << 31)); +} + +static inline void +phytium_mci_adma_write_desc(struct phytium_mci_host *host, + struct phytium_adma2_64_desc *desc, + dma_addr_t addr, u32 len, u32 attribute) +{ + desc->attribute = attribute; + desc->len = len; + desc->addr_lo = lower_32_bits(addr); + desc->addr_hi = upper_32_bits(addr); + dev_dbg(host->dev, "%s %d:addr_lo:0x%x ddr_hi:0x%x\n", __func__, + __LINE__, desc->addr_lo, desc->addr_hi); + + if ((attribute == 0x80000004) || (attribute == 0x8000000c)) { + desc->desc_lo = 0; + desc->desc_hi = 0; + } +} + +static void +phytium_mci_data_sg_write_2_admc_table(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct phytium_adma2_64_desc *desc; + u32 dma_len, i; + dma_addr_t dma_address; + struct scatterlist *sg; + + phytium_mci_init_adma_table(host, &host->dma); + + desc = host->dma.adma_table; + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + if (i == 0) { + if (sg_is_last(sg) || (data->sg_count == 1 && dma_len == SD_BLOCK_SIZE)) + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000000c); + else + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000001a); + } else if (sg_is_last(sg)) { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000004); + } else { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000012); + } + + desc++; + } +} + +static void +phytium_mci_data_sg_write_2_fifo(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct scatterlist *sg; + u32 dma_len, i, j; + u32 *virt_addr; + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + writel(0x1<<10, host->base + MCI_CMD); + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + } + } +} + +static void phytium_mci_restart_clk(struct phytium_mci_host *host) +{ + u32 clk_div, uhs; + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + clk_div = readl(host->base + MCI_CLKDIV); + uhs = readl(host->base + MCI_UHS_REG_EXT); + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + + writel(clk_div, host->base + MCI_CLKDIV); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + writel(MCI_CMD_START | MCI_CMD_UPD_CLK, host->base + MCI_CMD); + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static int +phytim_mci_start_multiple_write(struct phytium_mci_host *host, + struct mmc_request *mrq, u32 cnts, u32 offset) +{ + u32 rawcmd, cmd_status; + struct mmc_command *cmd = mrq->cmd; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + writel(0xffffe, host->base + MCI_RAW_INTS); + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(cnts * mrq->data->blksz, host->base + MCI_BYTCNT); + writel(cmd->arg + offset, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + deadline_time = jiffies + msecs_to_jiffies(200); + + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + deadline_time = jiffies + msecs_to_jiffies(1000); + while (!(cmd_status & MCI_MASKED_INTS_DTO)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + if (cmd_status & MCI_MASKED_INTS_DTO) + return 0; + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + return 0; +} + +static int +phytium_mci_start_sbc_stop_cmd(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, u32 arg) +{ + u32 rawcmd, cmd_status; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + writel(0xffffe, host->base + MCI_RAW_INTS); + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + + deadline_time = jiffies + msecs_to_jiffies(200); + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ENOMEDIUM; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ETIMEDOUT; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + return 0; +} + +static void +phytium_mci_start_write_multiple_non_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + u32 write_cnts, last_cnts; + u32 i, j, k, send_cnt_one_sg, block_offset; + int ret = 0, dma_len; + struct scatterlist *sg; + u32 *virt_addr = NULL; + + write_cnts = data->blocks / 4; + (data->blocks % 4) ? write_cnts++ : write_cnts; + last_cnts = data->blocks % 4; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + ret = -ENOMEDIUM; + goto write_err; + } + + dev_dbg(host->dev, "%s: cmd:%d, block counts:%d\n", + __func__, mrq->cmd->opcode, data->blocks); + + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + block_offset = 0; + for_each_sg(data->sg, sg, data->sg_count, i) { + /* Each SG data transfor starts */ + dma_len = sg_dma_len(sg); + send_cnt_one_sg = (dma_len / MCI_MAX_FIFO_CNT) + 1; + virt_addr = sg_virt(sg); + for (k = 0; k < send_cnt_one_sg; k++) { + if (dma_len && dma_len >= MCI_MAX_FIFO_CNT) { + /*first write sbc cmd*/ + ret = phytium_mci_start_sbc_stop_cmd(host, mrq, + mrq->sbc, 4); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (MCI_MAX_FIFO_CNT / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + + /*second write cmd25 here*/ + ret = phytim_mci_start_multiple_write(host, mrq, 4, + block_offset); + if (ret) + goto write_err; + block_offset += 4; + dma_len -= MCI_MAX_FIFO_CNT; + } else if (dma_len > 0) { + /*first write sbc cmd*/ + last_cnts = dma_len / 512; + ret = phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->sbc, + last_cnts); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + /*second write cmd25 here*/ + ret = phytim_mci_start_multiple_write(host, mrq, last_cnts, + block_offset); + if (ret) + goto write_err; + block_offset += last_cnts; + dma_len = 0; + } else { + dev_dbg(host->dev, "%s: sg %d end\n", __func__, i); + break; + } + } + } + } + +write_err: + host->data = NULL; + host->cmd = NULL; + host->mrq = NULL; + writel(0xffffe, host->base + MCI_RAW_INTS); + if (ret) { + data->bytes_xfered = 0; + if (ret == -ESHUTDOWN) { + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); + while (readl(host->base + MCI_STATUS) & MCI_STATUS_CARD_BUSY) + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); + phytium_mci_restart_clk(host); + phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->stop, mrq->stop->arg); + } + data->error = -ETIMEDOUT; + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + return; + } + data->bytes_xfered = data->blocks * data->blksz; + mmc_request_done(host->mmc, mrq); +} + +static void +phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read; + u32 rawcmd; + unsigned long flags; + + + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_err_irq(host, 0, MCI_INT_MASK_RTO); + return; + } + /* clear interrupts */ + writel(0xffffe, host->base + MCI_RAW_INTS); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + + if (host->adtc_type == COMMOM_ADTC) + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + else + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + wmb(); /* drain writebuffer */ + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) + phytium_mci_data_sg_write_2_admc_table(host, data); + else + phytium_mci_data_sg_write_2_fifo(host, data); + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask | data_ints_mask); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) { + sdr_set_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + /* Enable the IDMAC */ + sdr_set_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + writel((u32)host->dma.adma_addr, host->base + MCI_DESC_LIST_ADDRL); + writel((u32)(host->dma.adma_addr >> 32), host->base + MCI_DESC_LIST_ADDRH); + } + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(mrq->data->blocks * mrq->data->blksz, host->base + MCI_BYTCNT); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + writel(cmd->arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void phytium_mci_track_cmd_data(struct phytium_mci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_mci_request_done(struct phytium_mci_host *host, struct mmc_request *mrq) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + if (host->cmd) + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + phytium_mci_track_cmd_data(host, mrq->cmd, mrq->data); + + if (mrq->data) + phytium_mci_unprepare_data(host, mrq); + + mmc_request_done(host->mmc, mrq); +} + +static bool phytium_mci_cmd_done(struct phytium_mci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + bool done = false; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (!(events & (MCI_RAW_INTS_RCRC | MCI_RAW_INTS_RE | MCI_RAW_INTS_CMD | + MCI_RAW_INTS_RTO | MCI_INT_MASK_HTO))) { + dev_err(host->dev, "No interrupt generation:h%x\n", events); + return done; + } + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + if (done) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + sdr_clr_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + spin_unlock_irqrestore(&host->lock, flags); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + + if (cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + if (!(events & (MCI_RAW_INTS_CMD | MCI_INT_MASK_HTO))) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && (events & MCI_RAW_INTS_RTO) + && readl(host->base + MCI_CARD_DETECT)) { + cmd->error = -ENOMEDIUM; + rsp[0] = 0; + } else if (events & MCI_RAW_INTS_RTO || + (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) { + cmd->error = -ETIMEDOUT; + } else if (events & MCI_RAW_INTS_RCRC) { + cmd->error = -EILSEQ; + } else { + cmd->error = -ETIMEDOUT; + } + } + phytium_mci_cmd_next(host, mrq, cmd); + return true; +} + +static void phytium_mci_start_command(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + writel(0xffffe, host->base + MCI_RAW_INTS); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + spin_unlock_irqrestore(&host->lock, flags); + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, cmd); + return; + } + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + writel(cmd->arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void +phytium_mci_cmd_next(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if ((cmd->error && !(cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) || + (mrq->sbc && mrq->sbc->error)) { + phytium_mci_request_done(host, mrq); + } else if (cmd == mrq->sbc) { + if ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK)) { + dev_dbg(host->dev, "%s %d:sbc done and next cmd :%d length:%d\n", + __func__, __LINE__, mrq->cmd->opcode, mrq->data->sg->length); + phytium_mci_prepare_data(host, mrq); + if (host->is_use_dma) + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + } else { + dev_err(host->dev, "%s %d:ERROR: cmd %d followers the SBC\n", + __func__, __LINE__, cmd->opcode); + } + } else if (!cmd->data) { + phytium_mci_request_done(host, mrq); + } +} + +static void phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 data; + int rc; + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + rc = readl_relaxed_poll_timeout(host->base + MCI_STATUS, + data, + !(data & MCI_STATUS_CARD_BUSY), + 0, 2000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_status: 0x%08x\n", __func__, __LINE__, data); + + dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, + mrq->cmd->opcode, mrq->cmd->arg); + + if (host->is_device_x100 && mrq->sbc && mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) { + phytium_mci_start_write_multiple_non_dma(mmc, mrq); + return; + } + + if (mrq->sbc) { + phytium_mci_start_command(host, mrq, mrq->sbc); + return; + } + if (mrq->data) { + phytium_mci_prepare_data(host, mrq); + + if ((mrq->data->sg->length >= 512) && host->is_use_dma && + ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK) || + (mrq->cmd->opcode == SD_IO_RW_EXTENDED))) + + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + return; + } + phytium_mci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + phytium_mci_prepare_data(host, mrq); + data->host_cookie |= MCI_ASYNC_FLAG; +} + +static void phytium_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + if (data->host_cookie & MCI_ASYNC_FLAG) { + data->host_cookie &= ~MCI_ASYNC_FLAG; + phytium_mci_unprepare_data(host, mrq); + } +} + +static void phytium_mci_data_read_without_dma(struct phytium_mci_host *host, + struct mmc_data *data) +{ + u32 length, i, data_val, dma_len, tmp = 0; + u32 *virt_addr; + unsigned long flags; + struct scatterlist *sg; + + length = data->blocks * data->blksz; + + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + spin_lock_irqsave(&host->lock, flags); + if (data->host_cookie & MCI_ASYNC_FLAG) { + tmp = MCI_ASYNC_FLAG; + phytium_mci_post_req(host->mmc, data->mrq, 0); + } else { + phytium_mci_unprepare_data(host, data->mrq); + } + + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + + for (i = 0; i < (dma_len / 4); i++) { + data_val = readl(host->base + MCI_DATA); + memcpy(virt_addr, &data_val, 4); + ++virt_addr; + } + } + + if (tmp & MCI_ASYNC_FLAG) + phytium_mci_pre_req(host->mmc, data->mrq); + else + phytium_mci_prepare_data(host, data->mrq); + + spin_unlock_irqrestore(&host->lock, flags); + } + data->bytes_xfered = length; +} + +static void phytium_mci_data_xfer_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && + (data->error || !mrq->sbc)) { + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + phytium_mci_start_command(host, mrq, mrq->stop); + } else { + phytium_mci_request_done(host, mrq); + } +} + +static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) +{ + unsigned long flags; + bool done; + + unsigned int check_data = events & (MCI_RAW_INTS_DTO | MCI_RAW_INTS_RCRC | + MCI_RAW_INTS_DCRC | MCI_RAW_INTS_RE | + MCI_RAW_INTS_DRTO | MCI_RAW_INTS_EBE | + MCI_DMAC_STATUS_AIS | MCI_DMAC_STATUS_DU | + MCI_RAW_INTS_SBE_BCI | MCI_INT_MASK_RTO); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + + if (check_data || host->data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + if (check_data) { + spin_lock_irqsave(&host->lock, flags); + sdr_clr_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + sdr_clr_bits(host->base + MCI_INT_MASK, data_ints_mask); + /* Stop the IDMAC running */ + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + dev_dbg(host->dev, "DMA stop\n"); + spin_unlock_irqrestore(&host->lock, flags); + + if (events & MCI_RAW_INTS_DTO) { + if (!host->is_use_dma || + (host->is_use_dma && host->adtc_type == COMMOM_ADTC && + (mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)) + phytium_mci_data_read_without_dma(host, data); + else + data->bytes_xfered = data->blocks * data->blksz; + } else { + data->bytes_xfered = 0; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) + && readl(host->base + MCI_CARD_DETECT) + && (events & cmd_err_ints_mask)) { + data->error = -ENOMEDIUM; + data->mrq->cmd->error = -ENOMEDIUM; + } else if (events & (MCI_RAW_INTS_DCRC | MCI_RAW_INTS_EBE | + MCI_RAW_INTS_SBE_BCI)) { + data->error = -EILSEQ; + host->cmd = NULL; + } else { + data->error = -ETIMEDOUT; + host->cmd = NULL; + } + } + + phytium_mci_data_xfer_next(host, mrq, data); + done = true; + } + return done; +} + +static int phytium_mci_card_busy(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + status = readl(host->base + MCI_STATUS); + + return !!(status & MCI_STATUS_CARD_BUSY); +} + +static void __phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable) +{ + if (enable) + sdr_set_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); + else + sdr_clr_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); +} + +static void phytium_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + __phytium_mci_enable_sdio_irq(host, enable); +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_mci_host *host; + u32 status; + + host = timer_container_of(host, t, hotplug_timer); + if (!host) + return; + + status = readl(host->base + MCI_CARD_DETECT); + + if (status & 0x1) { + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); + } + } else { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events) +{ + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (cmd && (cmd == mrq->sbc)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->sbc); + } else if (cmd && (cmd == mrq->stop)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->stop); + } else if (data) { + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_mci_data_xfer_done(host, events | dmac_events, mrq, data); + } else if (cmd) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->cmd); + } + + return 0; +} + +static irqreturn_t phytium_mci_irq(int irq, void *dev_id) +{ + struct phytium_mci_host *host = (struct phytium_mci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events, event_mask, dmac_events, dmac_evt_mask; + + if (!host) + return IRQ_NONE; + writel(0, host->base + 0xfd0); + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + MCI_RAW_INTS); + dmac_events = readl(host->base + MCI_DMAC_STATUS); + event_mask = readl(host->base + MCI_INT_MASK); + dmac_evt_mask = readl(host->base + MCI_DMAC_INT_ENA); + if ((!events) && (!(dmac_events&0x1fff))) { + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_NONE; + } + dev_dbg(host->dev, "%s:events:%x,mask:0x%x,dmac_events:%x,dmac_mask:0x%x,cmd:%d\n", + __func__, events, event_mask, dmac_events, dmac_evt_mask, + host->mrq ? host->mrq->cmd->opcode : 255); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if ((events & event_mask) & MCI_RAW_INTS_SDIO) + __phytium_mci_enable_sdio_irq(host, 0); + + writel((events & event_mask), host->base + MCI_RAW_INTS); + writel(dmac_events, host->base + MCI_DMAC_STATUS); + spin_unlock_irqrestore(&host->lock, flags); + + if ((events & event_mask) & MCI_RAW_INTS_SDIO) + sdio_signal_irq(host->mmc); + + if (((events & event_mask) == 0) && ((dmac_evt_mask & dmac_events) == 0)) + goto irq_out; + + if (((events & event_mask) & MCI_RAW_INTS_CD) && + !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) { + mod_timer(&host->hotplug_timer, jiffies + usecs_to_jiffies(20000)); + dev_dbg(host->dev, "sd status changed here ! status:[%d] [%s %d]", + readl(host->base + MCI_CARD_DETECT), __func__, __LINE__); + + if ((events & event_mask) == MCI_RAW_INTS_CD) + goto irq_out; + } + + if (!mrq) { + if (events & MCI_RAW_INTS_HLE) + dev_dbg(host->dev, + "%s: MRQ=NULL and HW write locked, events=%08x,event_mask=%08x\n", + __func__, events, event_mask); + else + dev_dbg(host->dev, "%s: MRQ=NULL events:%08X evt_mask=%08X,sd_status:%d\n", + __func__, events, event_mask, readl(host->base + MCI_CARD_DETECT)); + goto irq_out; + } + + if ((dmac_events & dmac_err_ints_mask) || (events & cmd_err_ints_mask)) { + dev_dbg(host->dev, "ERR:events:%x,mask:0x%x,dmac_evts:%x,dmac_mask:0x%x,cmd:%d\n", + events, event_mask, dmac_events, dmac_evt_mask, mrq->cmd->opcode); + phytium_mci_err_irq(host, dmac_events & dmac_err_ints_mask, + events & cmd_err_ints_mask); + goto irq_out; + } + + if (cmd && (events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { + phytium_mci_cmd_done(host, events, mrq, cmd); + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), mrq, data); + } else if (cmd && (events & MCI_MASKED_INTS_CMD || + ((events & MCI_INT_MASK_HTO) && (cmd->opcode == SD_SWITCH_VOLTAGE)))) { + phytium_mci_cmd_done(host, events, mrq, cmd); + } else if (events & MCI_MASKED_INTS_DTO) { + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), mrq, data); + } + +irq_out: + return IRQ_HANDLED; +} + +static void phytium_mci_init_hw(struct phytium_mci_host *host) +{ + u32 val; + int uhs_reg_value = 0x502; + + writel(MCI_SET_FIFOTH(0x2, 0x7, 0x100), host->base + MCI_FIFOTH); + writel(0x800001, host->base + MCI_CARD_THRCTL); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + phytium_mci_update_external_clk(host, uhs_reg_value); + + sdr_set_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_set_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + + phytium_mci_reset_hw(host); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + else + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE | + MCI_CNTRL_USE_INTERNAL_DMAC); + + writel(0xFFFFFFFF, host->base + MCI_TMOUT); + dev_info(host->dev, "init hardware done!"); + +} + +void phytium_mci_deinit_hw(struct phytium_mci_host *host) +{ + u32 val; + + sdr_clr_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); +} +EXPORT_SYMBOL_GPL(phytium_mci_deinit_hw); + +static void phytium_mci_adma_reset(struct phytium_mci_host *host) +{ + u32 bmod = readl(host->base + MCI_BUS_MODE); + + bmod |= MCI_BUS_MODE_SWR; + writel(bmod, host->base + MCI_BUS_MODE); +} + +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma) +{ + struct phytium_adma2_64_desc *adma_table = dma->adma_table; + dma_addr_t dma_addr; + int i; + + memset(adma_table, 0, sizeof(struct phytium_adma2_64_desc) * MAX_BD_NUM); + + for (i = 0; i < (MAX_BD_NUM - 1); i++) { + dma_addr = dma->adma_addr + sizeof(*adma_table) * (i + 1); + adma_table[i].desc_lo = lower_32_bits(dma_addr); + adma_table[i].desc_hi = upper_32_bits(dma_addr); + adma_table[i].attribute = 0; + adma_table[i].NON1 = 0; + adma_table[i].len = 0; + adma_table[i].NON2 = 0; + } + + phytium_mci_adma_reset(host); +} + +static void phytium_mci_set_buswidth(struct phytium_mci_host *host, u32 width) +{ + u32 val; + + switch (width) { + case MMC_BUS_WIDTH_1: + val = MCI_BUS_1BITS; + break; + + case MMC_BUS_WIDTH_4: + val = MCI_BUS_4BITS; + break; + + case MMC_BUS_WIDTH_8: + val = MCI_BUS_8BITS; + break; + default: + val = MCI_BUS_4BITS; + break; + } + writel(val, host->base + MCI_CTYPE); + dev_dbg(host->dev, "Bus Width = %d, set value:0x%x\n", width, val); +} + +static void phytium_mci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + if (ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_DDR50) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + else + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + + phytium_mci_set_buswidth(host, ios->bus_width); + + switch (ios->power_mode) { + case MMC_POWER_UP: + set_bit(MCI_CARD_NEED_INIT, &host->flags); + writel(MCI_POWER_ON, host->base + MCI_PWREN); + break; + + case MMC_POWER_ON: + break; + + case MMC_POWER_OFF: + writel(MCI_POWER_OFF, host->base + MCI_PWREN); + break; + + default: + break; + } + phytium_mci_set_clk(host, ios); +} + +static void phytium_mci_ack_sdio_irq(struct mmc_host *mmc) +{ + unsigned long flags; + struct phytium_mci_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __phytium_mci_enable_sdio_irq(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int phytium_mci_get_cd(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + if (mmc->caps & MMC_CAP_NONREMOVABLE) + return 1; + + status = readl(host->base + MCI_CARD_DETECT); + + if ((status & 0x1) == 0x1) + return 0; + + return 1; +} + +static int phytium_mci_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + unsigned int is_voltage_180 = 0; + + is_voltage_180 = readl(host->base + MCI_UHS_REG); + if ((mmc->caps & MMC_CAP_NONREMOVABLE) && (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180)) + return -EINVAL; + + if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) && (is_voltage_180 & 0x1)) + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) && (!(is_voltage_180 & 0x1))) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) + return -EINVAL; + return 0; +} + +static void phytium_mci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 reset_flag; + + if (host->is_use_dma) { + reset_flag = MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET; + phytium_mci_adma_reset(host); + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } else { + reset_flag = MCI_CNTRL_FIFO_RESET; + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } + + while (readl(host->base + MCI_CNTRL) & reset_flag) + cpu_relax(); + + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + udelay(5); + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + usleep_range(200, 300); +} + +#ifdef CONFIG_PM_SLEEP +int phytium_mci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_suspend); + +int phytium_mci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_resume); + +#endif + +#ifdef CONFIG_PM +int phytium_mci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_suspend); + +int phytium_mci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_resume); + +#endif + +static struct mmc_host_ops phytium_mci_ops = { + .post_req = phytium_mci_post_req, + .pre_req = phytium_mci_pre_req, + .request = phytium_mci_ops_request, + .set_ios = phytium_mci_ops_set_ios, + .get_cd = phytium_mci_get_cd, + .enable_sdio_irq = phytium_mci_enable_sdio_irq, + .ack_sdio_irq = phytium_mci_ack_sdio_irq, + .card_busy = phytium_mci_card_busy, + .start_signal_voltage_switch = phytium_mci_ops_switch_volt, + .card_hw_reset = phytium_mci_hw_reset, +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *dev = host->dev; + int ret; + + dma_set_mask(dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + + mmc->f_min = MCI_F_MIN; + if (!mmc->f_max) + mmc->f_max = MCI_F_MAX; + + mmc->ops = &phytium_mci_ops; + mmc->ocr_avail_sdio = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_mmc = MMC_VDD_165_195; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps |= host->caps; + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + dev_dbg(host->dev, "%s %d: MMC_CAP_SDIO_IRQ\n", __func__, __LINE__); + } + mmc->caps2 |= host->caps2; + if (host->is_use_dma) { + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + host->dma.adma_table = dma_alloc_coherent(host->dev, + MAX_BD_NUM * + sizeof(struct phytium_adma2_64_desc), + &host->dma.adma_addr, GFP_KERNEL); + if (!host->dma.adma_table) + return MCI_REALEASE_MEM; + + host->dma.desc_sz = ADMA2_64_DESC_SZ; + phytium_mci_init_adma_table(host, &host->dma); + } else { + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 4 * 512; + mmc->max_blk_count = mmc->max_req_size / 512; + } + + spin_lock_init(&host->lock); + + phytium_mci_init_hw(host); + ret = devm_request_irq(host->dev, host->irq, phytium_mci_irq, + host->irq_flags, "phytium-mci", host); + + if (ret) + return ret; + + ret = mmc_add_host(mmc); + + if (ret) { + dev_err(host->dev, "%s %d: mmc add host!\n", __func__, __LINE__); + return ret; + } + return 0; +} +EXPORT_SYMBOL(phytium_mci_common_probe); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h new file mode 100644 index 0000000000000..8006fb4f6340e --- /dev/null +++ b/drivers/mmc/host/phytium-mci.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_MCI_H +#define __PHYTIUM_MCI_H + +#include +#include +#include +#include +#include +#include +#include + +/*------------------------------------------------------*/ +/* Common Definition */ +/*------------------------------------------------------*/ +#define MAX_BD_NUM 128 +#define SD_BLOCK_SIZE 512 + +#define MCI_BUS_1BITS 0x0 +#define MCI_BUS_4BITS 0x1 +#define MCI_BUS_8BITS (0x1 << 16) + +#define MCI_SD_DRV_VALUE 0 +#define MCI_SD_SAMP_VALUE_MAX 0 +#define MCI_SD_SAMP_VALUE_MIN 0 + +#define MCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define MCI_POWER_ON 1 +#define MCI_POWER_OFF 0 + +#define MCI_PREPARE_FLAG (0x1 << 0) +#define MCI_ASYNC_FLAG (0x1 << 1) +#define MCI_MMAP_FLAG (0x1 << 2) + +#define MCI_CMD_TIMEOUT (HZ/10 * 50) /* 100ms x5 */ +#define MCI_DATA_TIMEOUT (HZ * 10) /* 1000ms x5 */ + +#define MCI_CMD_TYPE_ADTC 0x2 + +#define MCI_F_MIN 400000 +#define MCI_F_MAX 50000000 + +#define MCI_CLK 1200000000 +#define MCI_REALEASE_MEM 0x1 +#define MCI_MAX_FIFO_CNT 0x800 + +/* FIFOTH register defines */ +#define MCI_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | ((t) & 0xFFF)) +/* Card read threshold */ +#define MCI_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define MCI_CARD_WR_THR_EN BIT(2) +#define MCI_CARD_RD_THR_EN BIT(0) + +/*----------------------------------------------------------------------*/ +/* Register Offset */ +/*----------------------------------------------------------------------*/ +#define MCI_CNTRL 0x00 /* the controller config reg */ +#define MCI_PWREN 0x04 /* the power enable reg */ +#define MCI_CLKDIV 0x08 /* the clock divider reg */ +#define MCI_CLKENA 0x10 /* the clock enable reg */ +#define MCI_TMOUT 0x14 /* the timeout reg */ +#define MCI_CTYPE 0x18 /* the card type reg */ +#define MCI_BLKSIZ 0x1C /* the block size reg */ +#define MCI_BYTCNT 0x20 /* the byte count reg */ +#define MCI_INT_MASK 0x24 /* the interrupt mask reg */ +#define MCI_CMDARG 0x28 /* the command argument reg */ +#define MCI_CMD 0x2C /* the command reg */ +#define MCI_RESP0 0x30 /* the response reg0 */ +#define MCI_RESP1 0x34 /* the response reg1 */ +#define MCI_RESP2 0x38 /* the response reg2 */ +#define MCI_RESP3 0X3C /* the response reg3 */ +#define MCI_MASKED_INTS 0x40 /* the masked interrupt status reg */ +#define MCI_RAW_INTS 0x44 /* the raw interrupt status reg */ +#define MCI_STATUS 0x48 /* the status reg */ +#define MCI_FIFOTH 0x4C /* the FIFO threshold watermark reg */ +#define MCI_CARD_DETECT 0x50 /* the card detect reg */ +#define MCI_CARD_WRTPRT 0x54 /* the card write protect reg */ +#define MCI_CCLK_RDY 0x58 /* first div is ready? 1:ready,0:not ready*/ +#define MCI_TRAN_CARD_CNT 0x5C /* the transferred CIU card byte count reg */ +#define MCI_TRAN_FIFO_CNT 0x60 /* the transferred host to FIFO byte count reg */ +#define MCI_DEBNCE 0x64 /* the debounce count reg */ +#define MCI_UID 0x68 /* the user ID reg */ +#define MCI_VID 0x6C /* the controller version ID reg */ +#define MCI_HWCONF 0x70 /* the hardware configuration reg */ +#define MCI_UHS_REG 0x74 /* the UHS-I reg */ +#define MCI_CARD_RESET 0x78 /* the card reset reg */ +#define MCI_BUS_MODE 0x80 /* the bus mode reg */ +#define MCI_DESC_LIST_ADDRL 0x88 /* the descriptor list low base address reg */ +#define MCI_DESC_LIST_ADDRH 0x8C /* the descriptor list high base address reg */ +#define MCI_DMAC_STATUS 0x90 /* the internal DMAC status reg */ +#define MCI_DMAC_INT_ENA 0x94 /* the internal DMAC interrupt enable reg */ +#define MCI_CUR_DESC_ADDRL 0x98 /* the current host descriptor low address reg */ +#define MCI_CUR_DESC_ADDRH 0x9C /* the current host descriptor high address reg */ +#define MCI_CUR_BUF_ADDRL 0xA0 /* the current buffer low address reg */ +#define MCI_CUR_BUF_ADDRH 0xA4 /* the current buffer high address reg */ +#define MCI_CARD_THRCTL 0x100 /* the card threshold control reg */ +#define MCI_UHS_REG_EXT 0x108 /* the UHS register extension */ +#define MCI_EMMC_DDR_REG 0x10C /* the EMMC DDR reg */ +#define MCI_ENABLE_SHIFT 0x110 /* the enable phase shift reg */ +#define MCI_DATA 0x200 /* the data FIFO access */ + +/* Command register defines */ +#define MCI_CMD_START BIT(31) +#define MCI_CMD_USE_HOLD_REG BIT(29) +#define MCI_CMD_VOLT_SWITCH BIT(28) +#define MCI_CMD_CCS_EXP BIT(23) +#define MCI_CMD_CEATA_RD BIT(22) +#define MCI_CMD_UPD_CLK BIT(21) +#define MCI_CMD_INIT BIT(15) +#define MCI_CMD_STOP BIT(14) +#define MCI_CMD_PRV_DAT_WAIT BIT(13) +#define MCI_CMD_SEND_STOP BIT(12) +#define MCI_CMD_STRM_MODE BIT(11) +#define MCI_CMD_DAT_WR BIT(10) +#define MCI_CMD_DAT_EXP BIT(9) +#define MCI_CMD_RESP_CRC BIT(8) +#define MCI_CMD_RESP_LONG BIT(7) +#define MCI_CMD_RESP_EXP BIT(6) +#define MCI_CMD_INDX(n) ((n) & 0x1F) + +/*------------------------------------------------------*/ +/* Register Mask */ +/*------------------------------------------------------*/ +/* MCI_CNTRL mask */ +#define MCI_CNTRL_CONTROLLER_RESET (0x1 << 0) /* RW */ +#define MCI_CNTRL_FIFO_RESET (0x1 << 1) /* RW */ +#define MCI_CNTRL_DMA_RESET (0x1 << 2) /* RW */ +#define MCI_CNTRL_RES (0x1 << 3) /* */ +#define MCI_CNTRL_INT_ENABLE (0x1 << 4) /* RW */ +#define MCI_CNTRL_DMA_ENABLE (0x1 << 5) /* RW */ +#define MCI_CNTRL_READ_WAIT (0x1 << 6) /* RW */ +#define MCI_CNTRL_SEND_IRQ_RESPONSE (0x1 << 7) /* RW */ +#define MCI_CNTRL_ABORT_READ_DATA (0x1 << 8) /* RW */ +#define MCI_CNTRL_ENDIAN (0x1 << 11) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_A (0xF << 16) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_B (0xF << 20) /* RW */ +#define MCI_CNTRL_ENABLE_OD_PULLUP (0x1 << 24) /* RW */ +#define MCI_CNTRL_USE_INTERNAL_DMAC (0x1 << 25) /* RW */ + +/* MCI_PWREN mask */ +#define MCI_PWREN_ENABLE (0x1 << 0) /* RW */ + +/* MCI_CLKENA mask */ +#define MCI_CLKENA_CCLK_ENABLE (0x1 << 0) /* RW */ +#define MCI_CLKENA_CCLK_LOW_POWER (0x1 << 16) /* RW */ +#define MCI_EXT_CLK_ENABLE (0x1 << 1) + +/* MCI_INT_MASK mask */ +#define MCI_INT_MASK_CD (0x1 << 0) /* RW */ +#define MCI_INT_MASK_RE (0x1 << 1) /* RW */ +#define MCI_INT_MASK_CMD (0x1 << 2) /* RW */ +#define MCI_INT_MASK_DTO (0x1 << 3) /* RW */ +#define MCI_INT_MASK_TXDR (0x1 << 4) /* RW */ +#define MCI_INT_MASK_RXDR (0x1 << 5) /* RW */ +#define MCI_INT_MASK_RCRC (0x1 << 6) /* RW */ +#define MCI_INT_MASK_DCRC (0x1 << 7) /* RW */ +#define MCI_INT_MASK_RTO (0x1 << 8) /* RW */ +#define MCI_INT_MASK_DRTO (0x1 << 9) /* RW */ +#define MCI_INT_MASK_HTO (0x1 << 10) /* RW */ +#define MCI_INT_MASK_FRUN (0x1 << 11) /* RW */ +#define MCI_INT_MASK_HLE (0x1 << 12) /* RW */ +#define MCI_INT_MASK_SBE_BCI (0x1 << 13) /* RW */ +#define MCI_INT_MASK_ACD (0x1 << 14) /* RW */ +#define MCI_INT_MASK_EBE (0x1 << 15) /* RW */ +#define MCI_INT_MASK_SDIO (0x1 << 16) /* RW */ + +/* MCI_MASKED_INTS mask */ +#define MCI_MASKED_INTS_CD (0x1 << 0) /* RO */ +#define MCI_MASKED_INTS_RE (0x1 << 1) /* RO */ +#define MCI_MASKED_INTS_CMD (0x1 << 2) /* RO */ +#define MCI_MASKED_INTS_DTO (0x1 << 3) /* RO */ +#define MCI_MASKED_INTS_TXDR (0x1 << 4) /* RO */ +#define MCI_MASKED_INTS_RXDR (0x1 << 5) /* RO */ +#define MCI_MASKED_INTS_RCRC (0x1 << 6) /* RO */ +#define MCI_MASKED_INTS_DCRC (0x1 << 7) /* RO */ +#define MCI_MASKED_INTS_RTO (0x1 << 8) /* RO */ +#define MCI_MASKED_INTS_DRTO (0x1 << 9) /* RO */ +#define MCI_MASKED_INTS_HTO (0x1 << 10) /* RO */ +#define MCI_MASKED_INTS_FRUN (0x1 << 11) /* RO */ +#define MCI_MASKED_INTS_HLE (0x1 << 12) /* RO */ +#define MCI_MASKED_INTS_SBE_BCI (0x1 << 13) /* RO */ +#define MCI_MASKED_INTS_ACD (0x1 << 14) /* RO */ +#define MCI_MASKED_INTS_EBE (0x1 << 15) /* RO */ +#define MCI_MASKED_INTS_SDIO (0x1 << 16) /* RO */ + +/* MCI_RAW_INTS mask */ +#define MCI_RAW_INTS_CD (0x1 << 0) /* W1C */ +#define MCI_RAW_INTS_RE (0x1 << 1) /* W1C */ +#define MCI_RAW_INTS_CMD (0x1 << 2) /* W1C */ +#define MCI_RAW_INTS_DTO (0x1 << 3) /* W1C */ +#define MCI_RAW_INTS_TXDR (0x1 << 4) /* W1C */ +#define MCI_RAW_INTS_RXDR (0x1 << 5) /* W1C */ +#define MCI_RAW_INTS_RCRC (0x1 << 6) /* W1C */ +#define MCI_RAW_INTS_DCRC (0x1 << 7) /* W1C */ +#define MCI_RAW_INTS_RTO (0x1 << 8) /* W1C */ +#define MCI_RAW_INTS_DRTO (0x1 << 9) /* W1C */ +#define MCI_RAW_INTS_HTO (0x1 << 10) /* W1C */ +#define MCI_RAW_INTS_FRUN (0x1 << 11) /* W1C */ +#define MCI_RAW_INTS_HLE (0x1 << 12) /* W1C */ +#define MCI_RAW_INTS_SBE_BCI (0x1 << 13) /* W1C */ +#define MCI_RAW_INTS_ACD (0x1 << 14) /* W1C */ +#define MCI_RAW_INTS_EBE (0x1 << 15) /* W1C */ +#define MCI_RAW_INTS_SDIO (0x1 << 16) /* W1C */ + +/* MCI_STATUS mask */ +#define MCI_STATUS_FIFO_RX (0x1 << 0) /* RO */ +#define MCI_STATUS_FIFO_TX (0x1 << 1) /* RO */ +#define MCI_STATUS_FIFO_EMPTY (0x1 << 2) /* RO */ +#define MCI_STATUS_FIFO_FULL (0x1 << 3) /* RO */ +#define MCI_STATUS_CARD_STATUS (0x1 << 8) /* RO */ +#define MCI_STATUS_CARD_BUSY (0x1 << 9) /* RO */ +#define MCI_STATUS_DATA_BUSY (0x1 << 10) /* RO */ +#define MCI_STATUS_DMA_ACK (0x1 << 31) /* RO */ +#define MCI_STATUS_DMA_REQ (0x1 << 32) /* RO */ + +/* MCI_UHS_REG mask */ +#define MCI_UHS_REG_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_DDR (0x1 << 16) /* RW */ + +/* MCI_CARD_RESET mask */ +#define MCI_CARD_RESET_ENABLE (0x1 << 0) /* RW */ + +/* MCI_BUS_MODE mask */ +#define MCI_BUS_MODE_SWR (0x1 << 0) /* RW */ +#define MCI_BUS_MODE_FB (0x1 << 1) /* RW */ +#define MCI_BUS_MODE_DE (0x1 << 7) /* RW */ + +/* MCI_DMAC_STATUS mask */ +#define MCI_DMAC_STATUS_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_STATUS_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_STATUS_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_STATUS_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_STATUS_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_STATUS_AIS (0x1 << 9) /* RW */ + +/* MCI_DMAC_INT_ENA mask */ +#define MCI_DMAC_INT_ENA_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_INT_ENA_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_INT_ENA_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_INT_ENA_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_INT_ENA_CES (0x1 << 5) /* RW */ +#define MCI_DMAC_INT_ENA_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_INT_ENA_AIS (0x1 << 9) /* RW */ + +/* MCI_CARD_THRCTL mask */ +#define MCI_CARD_THRCTL_CARDRD (0x1 << 0) /* RW */ +#define MCI_CARD_THRCTL_BUSY_CLR (0x1 << 1) /* RW */ +#define MCI_CARD_THRCTL_CARDWR (0x1 << 2) /* RW */ + +/* MCI_UHS_REG_EXT mask */ +#define MCI_UHS_REG_EXT_MMC_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_EXT_CLK_ENA (0x1 << 1) /* RW */ + +/* MCI_EMMC_DDR_REG mask */ +#define MCI_EMMC_DDR_CYCLE (0x1 << 0) /* RW */ + +/*--------------------------------------*/ +/* Structure Type */ +/*--------------------------------------*/ +/* Maximum segments assuming a 512KiB maximum requisition */ +/* size and a minimum4KiB page size. */ +#define MCI_MAX_SEGS 128 +/* ADMA2 64-bit DMA descriptor size */ +#define ADMA2_64_DESC_SZ 32 + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +/*ADMA2 64-bit descriptor.*/ +struct phytium_adma2_64_desc { + u32 attribute; +#define IDMAC_DES0_DIC BIT(1) +#define IDMAC_DES0_LD BIT(2) +#define IDMAC_DES0_FD BIT(3) +#define IDMAC_DES0_CH BIT(4) +#define IDMAC_DES0_ER BIT(5) +#define IDMAC_DES0_CES BIT(30) +#define IDMAC_DES0_OWN BIT(31) + u32 NON1; + u32 len; + u32 NON2; + u32 addr_lo; /* Lower 32-bits of Buffer Address Pointer 1*/ + u32 addr_hi; /* Upper 32-bits of Buffer Address Pointer 1*/ + u32 desc_lo; /* Lower 32-bits of Next Descriptor Address */ + u32 desc_hi; /* Upper 32-bits of Next Descriptor Address */ +} __packed __aligned(4); + +struct phytium_mci_dma { + struct scatterlist *sg; /* I/O scatter list */ + /* ADMA descriptor table, pointer to adma_table array */ + struct phytium_adma2_64_desc *adma_table; + /* Mapped ADMA descr. table, the physical address of adma_table array */ + dma_addr_t adma_addr; + unsigned int desc_sz; /* ADMA descriptor size */ +}; + +enum adtc_t { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +struct phytium_mci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + u32 caps2; + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + void __iomem *base; /* host base address */ + void *adma_table1; + dma_addr_t adma_addr1; + struct phytium_mci_dma dma_rx; /* dma channel */ + struct phytium_mci_dma dma_tx; /* dma channel */ + struct phytium_mci_dma dma; /* dma channel */ + u64 dma_mask; + bool vqmmc_enabled; + u32 *sg_virt_addr; + enum adtc_t adtc_type; /* 0:common adtc cmd; 1:block r/w adtc cmd;*/ + struct timer_list hotplug_timer; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + u32 current_rca; /*the current rca value*/ + u32 current_ios_clk; + u32 is_use_dma; + u32 is_device_x100; + struct clk *src_clk; /* phytium_mci source clock */ + unsigned long clk_rate; + unsigned long clk_div; + unsigned long irq_flags; + unsigned long flags; +#define MCI_CARD_NEED_INIT 1 + bool use_hold; /*use hold reg*/ + bool clk_set; /*clock set function enable*/ + s32 clk_smpl_drv_25m; /*25M clk smpl & drv*/ + s32 clk_smpl_drv_50m; /*50M clk smpl & drv*/ + s32 clk_smpl_drv_66m; /*66M clk smpl & drv*/ + s32 clk_smpl_drv_100m; /*100M clk smpl & drv*/ +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host); +void phytium_mci_deinit_hw(struct phytium_mci_host *host); +int phytium_mci_runtime_suspend(struct device *dev); +int phytium_mci_runtime_resume(struct device *dev); +int phytium_mci_resume(struct device *dev); +int phytium_mci_suspend(struct device *dev); + +#endif /* __PHYTIUM_MCI_HW_H */ diff --git a/drivers/mmc/host/phytium-sdci.c b/drivers/mmc/host/phytium-sdci.c new file mode 100644 index 0000000000000..617aeccc582cc --- /dev/null +++ b/drivers/mmc/host/phytium-sdci.c @@ -0,0 +1,1438 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SDCI dirver + * + * Copyright (C) 2019-2023, Phytium Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "phytium-sdci.h" + +static const u32 cmd_ints_mask = SDCI_SDCI_NORMAL_ISER_ECC_EN | SDCI_SDCI_NORMAL_ISER_EEI_EN; +static const u32 data_ints_mask = SDCI_BD_ISER_ETRS_EN; +static const u32 err_ints_mask = SDCI_ERROR_ISER_ECTE_EN | SDCI_ERROR_ISR_CCRCE_EN | + SDCI_ERROR_ISR_CIR_EN | SDCI_ERROR_ISR_CNR_EN; + +static void hotplug_timer_func(struct timer_list *t); +static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, + u32 cmd, u32 resp_type, u32 arg); +static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, + struct mmc_command *cmd); +static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, + u32 events, struct mmc_request *mrq, + struct mmc_data *data); +static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); + +static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + u32 wait_timeout_ms, + u32 send_once_time_ms); + +static int phytium_sd_error(struct phytium_sdci_host *host) +{ + int temp; + + temp = readl(host->base + SDCI_NORMAL_ISR); + dev_err(host->dev, "[%s %d]SDCI_NORMAL_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_BD_ISR); + temp = readl(host->base + SDCI_ERROR_ISR); + dev_err(host->dev, "[%s %d]SDCI_ERROR_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_BD_ISR); + dev_err(host->dev, "[%s %d]SDCI_BD_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_RESP0); + dev_err(host->dev, "[%s %d]SDCI_RESP0:%x\n", __func__, __LINE__, temp); + + return 0; +} + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val; + + val = readl(reg); + val |= bs; + + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val; + + val = readl(reg); + val &= ~bs; + + writel(val, reg); +} + +static void phytium_sdci_reset_hw(struct phytium_sdci_host *host) +{ + sdr_set_bits(host->base + SDCI_SOFTWARE, + SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, + SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); +} + +static void phytium_sdci_prepare_data(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + bool read; + + read = (data->flags & MMC_DATA_READ) != 0; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +} + +static void phytium_sdci_unprepare_data(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + bool read; + struct mmc_data *data = mrq->data; + + read = (data->flags & MMC_DATA_READ) != 0; + dma_unmap_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +} + +static void phytium_sdci_set_clk(struct phytium_sdci_host *host, + struct mmc_ios *ios) +{ + unsigned long clk_rate; + u32 div = 0xffffffff, div_reg; + + if (ios->clock) { + clk_rate = host->clk_rate; + div = ((clk_rate / (2 * ios->clock)) - 1); + div_reg = readl(host->base + SDCI_CLOCK_D); + if (div_reg == div) + return; + writel(div, host->base + SDCI_CLOCK_D); + writel(0, host->base + SDCI_SD_DRV); + writel(5, host->base + SDCI_SD_SAMP); + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + + +static inline u32 phytium_sdci_cmd_find_resp(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + resp = 0x2; + break; + case MMC_RSP_R1B: + resp = 0x2; + break; + case MMC_RSP_R2: + resp = 0x1; + break; + case MMC_RSP_R3: + resp = 0x3; + break; + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline u32 phytium_sdci_cmd_prepare_raw_cmd(struct phytium_sdci_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + /* + * rawcmd : + * trty << 14 | opcode << 8 | cmdw << 6 | cice << 4 | crce << 3 | resp + */ + u32 resp, rawcmd; + u32 opcode = cmd->opcode; + + resp = phytium_sdci_cmd_find_resp(host, mrq, cmd); + rawcmd = ((opcode << 8) | resp); + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) + rawcmd = (rawcmd | (SDCI_CMD_TYPE_ADTC << 14)); + + return rawcmd; +} + +static void +phytium_sdci_unexpected_error_handler(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + int err_type) +{ + unsigned long flags; + int error; + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (err_type & ERR_CARD_ABSENT) { + host->mmc->detect_change = 1; + dev_dbg(host->dev, "SD is absent when send cmd:%d\n", mrq->cmd->opcode); + } + + switch (err_type) { + case ERR_CARD_ABSENT: + error = -ENOMEDIUM; + break; + case ERR_TIMEOUT: + error = -ETIMEDOUT; + break; + case ERR_CMD_RESPONED: + error = -EIO; + break; + default: + error = -ETIMEDOUT; + break; + } + + if (data) { + data->error = error; + phytium_sdci_unprepare_data(host, mrq); + + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, mrq, data); + } else { + mrq->cmd->error = error; + } + + mmc_request_done(host->mmc, mrq); +} + +static bool phytium_sdci_start_data(struct phytium_sdci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read, res; + u32 sg_dma_addrh, sg_dma_addrl; + u32 sd_block_addrh, sd_block_addrl; + u32 temp, timeout, sd_status; + u32 block_cnt = 0; + u32 sd_block_addr = cmd->arg; + u32 private_cmd, resp_type, arg; + u32 j, dma_len; + unsigned long deadline_time; + dma_addr_t dma_address; + struct scatterlist *sg; + int ret; + + WARN_ON(host->cmd); + host->cmd = cmd; + + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + for_each_sg(data->sg, sg, data->sg_count, j) { + writel(0, host->base + SDCI_COMMAND); + + dma_address = sg_dma_address(sg); + sg_dma_addrh = (u32) (dma_address >> 32); + sg_dma_addrl = (u32) dma_address; + + dma_len = sg_dma_len(sg); + block_cnt = (dma_len / SD_BLOCK_SIZE); + + sd_block_addrh = 0; + sd_block_addrl = sd_block_addr; + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); + writel(block_cnt, host->base + SDCI_BLK_CNT); + + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + writel(sg_dma_addrl, host->base + SDCI_BD_RX); + writel(sg_dma_addrh, host->base + SDCI_BD_RX); + writel(sd_block_addrl, host->base + SDCI_BD_RX); + writel(sd_block_addrh, host->base + SDCI_BD_RX); + timeout = 100 * block_cnt; + } else { + timeout = 250 * block_cnt; + ret = phytium_sdci_cmd13_process(host, mrq, data, timeout, 1); + if (ret != SDCI_CMD13_OK) + return false; + + writel(sg_dma_addrl, host->base + SDCI_BD_TX); + writel(sg_dma_addrh, host->base + SDCI_BD_TX); + writel(sd_block_addrl, host->base + SDCI_BD_TX); + writel(sd_block_addrh, host->base + SDCI_BD_TX); + } + + deadline_time = jiffies + msecs_to_jiffies(timeout); + + temp = readl(host->base + SDCI_BD_ISR); + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + while ((temp & SDCI_BD_ISR_TRS_R) != SDCI_BD_ISR_TRS_R) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + if (temp & SDCI_BD_ISR_DAIS) + writel(1, host->base + SDCI_BD_ISR); + return false; + } + + temp = readl(host->base + SDCI_BD_ISR); + if (time_after(jiffies, deadline_time)) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + dev_err(host->dev, + "Read Data timeout:jiffies:0x%lx,dt_jiffies:0x%lx, BD_isr_reg:0x%x,cmd:%d, REG_D0:0x%x\n", + jiffies, jiffies - deadline_time, temp, + cmd->opcode, readl(host->base + SDCI_STATUS)); + + return false; + } + } + } else { + while ((temp & SDCI_BD_ISR_TRS_W) != SDCI_BD_ISR_TRS_W) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + dev_err(host->dev, "[%s][%d]: Card absent ! cmd(%d)\n", + __func__, __LINE__, mrq->cmd->opcode); + return false; + } + + temp = readl(host->base + SDCI_BD_ISR); + if (time_after(jiffies, deadline_time)) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + dev_err(host->dev, + "Write Date timeout: jiffies:0x%lx,dt_jiffies:0x%lx,BD_isr_reg:0x%x\n", + jiffies, jiffies - deadline_time, temp); + return false; + } + } + } + writel(1, host->base + SDCI_BD_ISR); + writel(1, host->base + SDCI_NORMAL_ISR); + sd_block_addr = sd_block_addr + block_cnt; + + if (j < (data->sg_count - 1) && 1 < block_cnt) { + private_cmd = MMC_STOP_TRANSMISSION; + resp_type = 0x2; + arg = 0; + res = phytium_sdci_private_send_cmd(host, private_cmd, + resp_type, arg); + if (!res) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + writel(1, host->base + SDCI_BD_ISR); + dev_err(host->dev, + "[%s][%d]:Card absent ! private_cmd(%d)\n", + __func__, __LINE__, private_cmd); + } else { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CMD_RESPONED); + dev_err(host->dev, + "[%s][%d] cmd(%d) response errored\n", + __func__, __LINE__, mrq->cmd->opcode); + phytium_sd_error(host); + } + writel(1, host->base + SDCI_NORMAL_ISR); + return false; + } + writel(1, host->base + SDCI_NORMAL_ISR); + } + } + + host->is_multi_rw_only_one_blkcnt = false; + + if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK && block_cnt == 1) || + (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK && block_cnt == 1)) + host->is_multi_rw_only_one_blkcnt = true; + + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_CC, mrq, cmd); + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, + mrq, data); + else + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_W, + mrq, data); + + return true; +} + +static int phytium_sdci_auto_cmd_done(struct phytium_sdci_host *host, + int events, struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + + rsp[0] = readl(host->base + SDCI_RESP0); + + if (events & SDCI_NORMAL_ISR_CC) + cmd->error = 0; + else { + phytium_sdci_reset_hw(host); + dev_err(host->dev, + "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); + } + + return cmd->error; +} + +static void phytium_sdci_track_cmd_data(struct phytium_sdci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_sdci_request_done(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + unsigned long flags; + + dev_dbg(host->dev, + "%s_%d:mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x resp 0x%x 0x%x 0x%x 0x%x\n", + __func__, __LINE__, mrq->cmd->opcode, mrq->cmd->arg, + mrq->cmd->resp[0], mrq->cmd->resp[1], mrq->cmd->resp[2], + mrq->cmd->resp[3]); + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + phytium_sdci_track_cmd_data(host, mrq->cmd, mrq->data); + if (mrq->data && host->adtc_type != COMMOM_ADTC) + phytium_sdci_unprepare_data(host, mrq); + mmc_request_done(host->mmc, mrq); +} + +static bool +phytium_sdci_auto_command_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + + rsp[0] = 0x900; + phytium_sdci_request_done(host, mrq); + return true; +} + +/* returns true if command is fully handled; returns false otherwise */ +static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + bool done = false; + bool sbc_error; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & SDCI_NORMAL_ISR_CC)) + phytium_sdci_auto_cmd_done(host, events, mrq->sbc); + + sbc_error = mrq->sbc && mrq->sbc->error; + + if (!sbc_error && !(events & (SDCI_NORMAL_ISR_CC | + SDCI_NORMAL_ISR_CR | + SDCI_NORMAL_ISR_TIMEOUT))) + return done; + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[0] = readl(host->base + SDCI_RESP0); + rsp[1] = readl(host->base + SDCI_RESP1); + rsp[2] = readl(host->base + SDCI_RESP2); + rsp[3] = readl(host->base + SDCI_RESP3); + } else { + rsp[0] = readl(host->base + SDCI_RESP0); + } + + if (cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + + if (!sbc_error && + !(events & SDCI_NORMAL_ISR_CC) && + (events & SDCI_NORMAL_ISR_TIMEOUT)) + cmd->error = -ETIMEDOUT; + + if (cmd->error) + dev_dbg(host->dev, + "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], + cmd->error); + + phytium_sdci_cmd_next(host, mrq, cmd); + + return true; +} + +static bool set_databus_width(struct phytium_sdci_host *host) +{ + bool res; + u32 cmd, resp_type, arg; + + cmd = SD_APP_SET_BUS_WIDTH; + resp_type = 0x2; + arg = 0x2; + res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); + if (!res) + return false; + + cmd = MMC_APP_CMD; + resp_type = 0x2; + arg = host->current_rca; + res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); + if (!res) + return false; + + return true; +} + + +static void phytium_sdci_start_command(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + struct mmc_data *data = mrq->data; + dma_addr_t dma_adtc_buf; + u32 dma_bufh, dma_bufl; + u32 block_cnt = 0; + + WARN_ON(host->cmd); + host->cmd = cmd; + + cmd->error = 0; + rawcmd = phytium_sdci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_SEND_STATUS) + writel(1, host->base + SDCI_ERROR_ISR); + sdr_set_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + writel(rawcmd, host->base + SDCI_COMMAND); + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { + WARN_ON(host->data); + host->data = data; + + dma_adtc_buf = host->dma_rx.bd_addr; + dma_bufh = (u32) (dma_adtc_buf >> 32); + dma_bufl = (u32) dma_adtc_buf; + block_cnt = mrq->data->blocks; + sdr_set_bits(host->base + SDCI_BD_ISER, data_ints_mask); + writel(block_cnt, host->base + SDCI_BLK_CNT); + + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + writel(dma_bufl, host->base + SDCI_BD_RX); + writel(dma_bufh, host->base + SDCI_BD_RX); + writel(cmd->arg, host->base + SDCI_BD_RX); + writel(0, host->base + SDCI_BD_RX); + } else { + writel(dma_bufl, host->base + SDCI_BD_TX); + writel(dma_bufh, host->base + SDCI_BD_TX); + writel(cmd->arg, host->base + SDCI_BD_TX); + writel(0, host->base + SDCI_BD_TX); + } + } else { + writel(cmd->arg, host->base + SDCI_ARGUMENT); + } +} + +static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if (cmd->error || (mrq->sbc && mrq->sbc->error)) + phytium_sdci_request_done(host, mrq); + else if (cmd == mrq->sbc) + phytium_sdci_start_command(host, mrq, mrq->cmd); + else if (!cmd->data) + phytium_sdci_request_done(host, mrq); +} + +static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + u32 wait_timeout_ms, + u32 send_once_time_ms) +{ + u32 private_cmd, resp_type, arg, temp, sd_status; + unsigned long deadline_time; + bool res; + + deadline_time = jiffies + msecs_to_jiffies(wait_timeout_ms); + + do { + private_cmd = MMC_SEND_STATUS; + resp_type = 0x2; + arg = host->current_rca; + + res = phytium_sdci_private_send_cmd(host, private_cmd, resp_type, arg); + if (!res) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + dev_err(host->dev, + "[%s][%d] Card absent! private_cmd(%d)\n", + __func__, __LINE__, private_cmd); + } else { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CMD_RESPONED); + + dev_err(host->dev, + "[%s][%d] private_cmd(%d) response errored\n", + __func__, __LINE__, private_cmd); + phytium_sd_error(host); + } + writel(1, host->base + SDCI_BD_ISR); + return SDCI_CMD13_FAILED; + } + + temp = readl(host->base + SDCI_RESP0); + + if (time_after(jiffies, deadline_time)) { + + if (mrq->cmd->opcode == MMC_SEND_STATUS) + return SDCI_CMD13_OK; + + dev_err(host->dev, + "SD card is not in transfer mode,timeout:%d,rsp[0]:%x\n", + wait_timeout_ms, temp); + + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + phytium_sd_error(host); + return SDCI_CMD13_FAILED; + } + + writel(1, host->base + SDCI_NORMAL_ISR); + + if (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE) && send_once_time_ms) + mdelay(send_once_time_ms); + + } while (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE)); + + return SDCI_CMD13_OK; +} + +static void phytium_sdci_ops_request(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + unsigned long flags; + bool res; + u32 status_sd; + int res_cmd13; + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + dev_dbg(host->dev, "%s: mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x\n", + __func__, mrq->cmd->opcode, mrq->cmd->arg); + + if (mrq->cmd->opcode == MMC_SEND_STATUS && + (mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_ADTC) { + u32 status = readl(host->base + SDCI_STATUS); + + if (status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, NULL, + ERR_CARD_ABSENT); + return; + } + + res_cmd13 = phytium_sdci_cmd13_process(host, mrq, NULL, 400, 5); + if (res_cmd13 == SDCI_CMD13_FAILED) + return; + } else if (mrq->cmd->opcode == MMC_STOP_TRANSMISSION) { + status_sd = readl(host->base + SDCI_STATUS); + if (status_sd & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, NULL, + ERR_CARD_ABSENT); + return; + } + } + + if (mrq->data) { + if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK || + mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + mrq->cmd->opcode == MMC_WRITE_BLOCK) { + host->adtc_type = BLOCK_RW_ADTC; + phytium_sdci_prepare_data(host, mrq); + phytium_sdci_start_data(host, mrq, + mrq->cmd, mrq->data); + return; + } + host->adtc_type = COMMOM_ADTC; + } + + if (mrq->cmd->opcode == SD_IO_RW_DIRECT || + mrq->cmd->opcode == SD_IO_SEND_OP_COND) { + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EINVAL; + mmc_request_done(host->mmc, mrq); + + return; + } + + if (mrq->cmd->opcode == SD_APP_SEND_SCR) { + res = set_databus_width(host); + if (!res) { + phytium_sdci_unexpected_error_handler(host, mrq, NULL, ERR_CMD_RESPONED); + return; + } + } + + /* if SBC is required, we have HW option and SW option. + * if HW option is enabled, and SBC does not have "special" flags, + * use HW option, otherwise use SW option + */ + if (mrq->sbc && + (!mmc_card_mmc(mmc->card) || (mrq->sbc->arg & 0xFFFF0000))) + phytium_sdci_start_command(host, mrq, mrq->sbc); + else + phytium_sdci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_sdci_data_xfer_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && + mrq->stop && !mrq->stop->error && + !mrq->sbc && host->is_multi_rw_only_one_blkcnt) { + host->is_multi_rw_only_one_blkcnt = false; + phytium_sdci_auto_command_done(host, SDCI_NORMAL_ISR_CC, mrq, mrq->stop); + } else if (mmc_op_multi(mrq->cmd->opcode) && + mrq->stop && !mrq->stop->error && + !mrq->sbc) + phytium_sdci_start_command(host, mrq, mrq->stop); + else + phytium_sdci_request_done(host, mrq); +} + +static inline void get_data_buffer(struct mmc_data *data, + u32 *bytes, u32 **pointer) +{ + struct scatterlist *sg; + + sg = &data->sg[0]; + *bytes = sg->length; + *pointer = sg_virt(sg); +} + +static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, + u32 events, struct mmc_request *mrq, + struct mmc_data *data) +{ + struct mmc_command *stop = data->stop; + unsigned long flags; + bool done; + unsigned int check_data; + u32 sg_length, i; + u32 *sg_virt_addr; + + check_data = events & (SDCI_BD_ISR_TRS_R | SDCI_BD_ISR_TRS_W | SDCI_BD_ISR_EDTE); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + if (check_data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + if (check_data || (stop && stop->error)) { + sdr_clr_bits(host->base + SDCI_BD_ISER, data_ints_mask); + dev_dbg(host->dev, "DMA stop\n"); + + if (((events & SDCI_BD_ISR_TRS_R) || + (events & SDCI_BD_ISR_TRS_W)) && + (!stop || !stop->error)) { + if ((mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC && + (host->adtc_type == COMMOM_ADTC)) { + get_data_buffer(data, &sg_length, + &host->sg_virt_addr); + sg_virt_addr = host->sg_virt_addr; + + for (i = 0; i < (sg_length/4); i++) { + *sg_virt_addr = host->dma_rx.buf[i]; + sg_virt_addr++; + } + } + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_dbg(host->dev, "interrupt events: %x\n", events); + phytium_sdci_reset_hw(host); + data->bytes_xfered = 0; + dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", + __func__, mrq->cmd->opcode, data->blocks); + dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", + (int)data->error, data->bytes_xfered); + } + + phytium_sdci_data_xfer_next(host, mrq, data); + done = true; + } + + return done; +} + + +static int phytium_sdci_card_busy(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + u32 status; + + /* check if any pin between dat[0:3] is low */ + status = readl(host->base + SDCI_STATUS); + if (((status >> 20) & 0xf) != 0xf) + return 1; + + return 0; +} + +static void phytium_sdci_request_timeout(struct work_struct *work) +{ + struct phytium_sdci_host *host; + + host = container_of(work, struct phytium_sdci_host, req_timeout.work); + dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); + if (host->mrq) { + dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, + host->mrq, host->mrq->cmd->opcode); + if (host->cmd) { + dev_err(host->dev, "%s: aborting cmd=%d\n", + __func__, host->cmd->opcode); + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, + host->mrq, host->cmd); + } else if (host->data) { + dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", + __func__, host->mrq->cmd->opcode, + host->data->blocks); + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE, + host->mrq, host->data); + } + } +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_sdci_host *host; + u32 status; + + host = timer_container_of(host, t, hotplug_timer); + if (!host) + dev_err(host->dev, "%s: Not find host!\n", __func__); + status = readl(host->base + SDCI_STATUS); + + if (status & SDCI_STATUS_CDSL) { /* card absent */ + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, + msecs_to_jiffies(100)); + } + } else { /* card insert */ + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static irqreturn_t phytium_sdci_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + u32 events; + + if (!host) + return IRQ_NONE; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_NORMAL_ISR); + /* clear interrupts */ + writel(1, host->base + SDCI_NORMAL_ISR); + + mrq = host->mrq; + cmd = host->cmd; + spin_unlock_irqrestore(&host->lock, flags); + + if (events & (SDCI_NORMAL_ISR_CR | SDCI_NORMAL_ISR_CI)) { + mod_timer(&host->hotplug_timer, + jiffies + usecs_to_jiffies(30000)); + goto irq_out; + } + + if (!(events & cmd_ints_mask)) + goto irq_out; + + if (!mrq) { + dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", + __func__, events); + WARN_ON(1); + goto irq_out; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (cmd) + phytium_sdci_cmd_done(host, events, mrq, cmd); + +irq_out: + return IRQ_HANDLED; +} + +static irqreturn_t phytium_sdci_dma_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_BD_ISR); + writel(1, host->base + SDCI_BD_ISR); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events & data_ints_mask)) + goto dma_irq_out; + + if (!mrq) { + dev_err(host->dev, + "%s: MRQ=NULL; events=%08X\n", + __func__, events); + goto dma_irq_out; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (data) + phytium_sdci_data_xfer_done(host, events, mrq, data); + +dma_irq_out: + return IRQ_HANDLED; +} + +static irqreturn_t phytium_sdci_err_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events; + + if (!host) + return IRQ_NONE; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_ERROR_ISR); + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events&err_ints_mask)) + goto err_irq_out; + + if (!mrq) { + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); + writel(1, host->base + SDCI_ERROR_ISR); + dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", __func__, events); + goto err_irq_out; + } + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); + if (data) { + dev_err(host->dev, + "[%s][%d]: cmd(%d); %d read blocks, status:%x,flag:%x\n", + __func__, __LINE__, mrq->cmd->opcode, data->blocks, events, data->flags); + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE | SDCI_BD_ISR_TRS_R, + mrq, data); + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + } else if (cmd) { + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, mrq, cmd); + } + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); +err_irq_out: + return IRQ_HANDLED; +} + +static void phytium_sdci_init_hw(struct phytium_sdci_host *host) +{ + u32 val; + + /* Reset */ + phytium_sdci_reset_hw(host); + + val = SDCI_SEN_CREFR_VAL | SDCI_SEN_DEBNCE_VAL; + writel(val, host->base + SDCI_SD_SEN); + + /* Disable and clear all interrupts */ + writel(0, host->base + SDCI_NORMAL_ISER); + writel(0, host->base + SDCI_ERROR_ISER); + writel(0, host->base + SDCI_BD_ISER); + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); + writel(1, host->base + SDCI_BD_ISR); + + sdr_set_bits(host->base + SDCI_NORMAL_ISER, + SDCI_SDCI_NORMAL_ISER_ECI|SDCI_SDCI_NORMAL_ISER_ECR); + /* Configure default cmd timeout to 0.1(s)s = val/25M */ + val = SDCI_F_MAX / 10; + writel(val, host->base + SDCI_TIMEOUT_CMD); + writel(SDCI_TIMEOUT_DATA_VALUE, host->base + SDCI_TIMEOUT_DATA); + + val = 0x0F00; + writel(val, host->base + SDCI_CONTROLLER); + + dev_dbg(host->dev, "init hardware done!"); +} + +static void phytium_sdci_deinit_hw(struct phytium_sdci_host *host) +{ + /* Disable and clear all interrupts */ + writel(0, host->base + SDCI_NORMAL_ISER); + writel(0, host->base + SDCI_ERROR_ISER); + writel(0, host->base + SDCI_BD_ISER); + + writel(0, host->base + SDCI_NORMAL_ISR); + writel(0, host->base + SDCI_ERROR_ISR); + writel(0, host->base + SDCI_BD_ISR); +} + +static void phytium_sdci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + + if (ios->bus_width == MMC_BUS_WIDTH_4) + mmc->caps = mmc->caps & (~MMC_CAP_4_BIT_DATA); + + /* Suspend/Resume will do power off/on */ + switch (ios->power_mode) { + case MMC_POWER_UP: + writel(SDCI_POWER_ON, host->base + SDCI_POWER); + break; + case MMC_POWER_ON: + phytium_sdci_set_clk(host, ios); + break; + case MMC_POWER_OFF: + writel(SDCI_POWER_OFF, host->base + SDCI_POWER); + break; + default: + break; + } +} + +static int phytium_sdci_get_cd(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + u32 status = readl(host->base + SDCI_STATUS); + + if (((status >> 19) & 0x1) == 0x1) + return 0; + + return 1; +} + +static void phytium_sdci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); +} + +static struct mmc_host_ops phytium_sdci_ops = { + .request = phytium_sdci_ops_request, + .set_ios = phytium_sdci_ops_set_ios, + .get_cd = phytium_sdci_get_cd, + .card_busy = phytium_sdci_card_busy, + .card_hw_reset = phytium_sdci_hw_reset, +}; + +static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, + u32 cmd, u32 resp_type, u32 arg) +{ + u32 temp, sd_cmd, sd_arg, sd_status; + unsigned long deadline_time; + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); + + sd_cmd = (cmd << 8) | resp_type; + sd_arg = arg; + writel(sd_cmd, host->base + SDCI_COMMAND); + writel(sd_arg, host->base + SDCI_ARGUMENT); + + if (cmd == MMC_STOP_TRANSMISSION) + deadline_time = jiffies + msecs_to_jiffies(1000); + else + deadline_time = jiffies + msecs_to_jiffies(100); + + temp = readl(host->base + SDCI_NORMAL_ISR); + while ((temp & SDCI_NORMAL_ISR_CC) != SDCI_NORMAL_ISR_CC) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) + return false; + + temp = readl(host->base + SDCI_NORMAL_ISR); + if (time_after(jiffies, deadline_time)) + return false; + + if (cmd == MMC_STOP_TRANSMISSION) + mdelay(1); + } + + return true; +} + +static int phytium_sdci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_sdci_host *host; + struct resource *res; + int ret; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + + /* Allocate MMC host for this device */ + mmc = mmc_alloc_host(sizeof(struct phytium_sdci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_sdc_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + if (device_property_read_bool(dev, "no-dma-coherent")) + dev->dma_coherent = false; + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + + acpi_dma_configure(dev, DEV_DMA_NON_COHERENT); + + host->clk_rate = 600000000; + } else { + dev_err(&pdev->dev, "No DT found\n"); + return -EINVAL; + } + + dma_set_mask(dev, DMA_BIT_MASK(40)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(40)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 1); + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + + host->irq_err = platform_get_irq(pdev, 2); + if (host->irq_err < 0) { + ret = -EINVAL; + goto host_free; + } + + host->irq_bd = platform_get_irq(pdev, 0); + if (host->irq_bd < 0) { + ret = -EINVAL; + goto host_free; + } + + host->dev = &pdev->dev; + host->mmc = mmc; + + if ((4 * SDCI_F_MAX) > host->clk_rate) + host->clk_div = 1; + else + host->clk_div = ((host->clk_rate / (2 * SDCI_F_MAX)) - 1); + + /* Set host parameters to mmc */ + mmc->f_min = SDCI_F_MIN; + mmc->f_max = (host->clk_rate / ((host->clk_div + 1) * 2)); + mmc->ops = &phytium_sdci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->caps |= host->caps; + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 512 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + + host->dma_rx.buf = dma_alloc_coherent(&pdev->dev, + MAX_BD_NUM, + &host->dma_rx.bd_addr, + GFP_KERNEL); + if (!host->dma_rx.buf) { + ret = -ENOMEM; + goto release_mem; + } + + host->cmd_timeout = msecs_to_jiffies(100); + host->data_timeout = msecs_to_jiffies(250); + + INIT_DELAYED_WORK(&host->req_timeout, phytium_sdci_request_timeout); + spin_lock_init(&host->lock); + + platform_set_drvdata(pdev, mmc); + phytium_sdci_init_hw(host); + + ret = devm_request_irq(&pdev->dev, host->irq, phytium_sdci_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = devm_request_irq(&pdev->dev, host->irq_err, phytium_sdci_err_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = devm_request_irq(&pdev->dev, host->irq_bd, phytium_sdci_dma_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = mmc_add_host(mmc); + if (ret) + goto release; + + return 0; + +release: + platform_set_drvdata(pdev, NULL); + phytium_sdci_deinit_hw(host); +release_mem: + if (host->dma_rx.buf) + dma_free_coherent(&pdev->dev, MAX_BD_NUM, + host->dma_rx.buf, + host->dma_rx.bd_addr); +host_free: + mmc_free_host(mmc); + + return ret; +} + +static void phytium_sdci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_sdci_host *host; + + mmc = platform_get_drvdata(pdev); + host = mmc_priv(mmc); + + cancel_delayed_work_sync(&host->req_timeout); + platform_set_drvdata(pdev, NULL); + mmc_remove_host(host->mmc); + phytium_sdci_deinit_hw(host); + + if (host->dma_rx.buf) + dma_free_coherent(&pdev->dev, MAX_BD_NUM, + host->dma_rx.buf, host->dma_rx.bd_addr); + + mmc_free_host(host->mmc); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_sdci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_deinit_hw(host); + return 0; +} + +static int phytium_sdci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_init_hw(host); + mmc->caps = mmc->caps | MMC_CAP_4_BIT_DATA; + + return 0; +} +#endif + +#ifdef CONFIG_PM +static int phytium_sdci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_deinit_hw(host); + + return 0; +} + +static int phytium_sdci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_init_hw(host); + + return 0; +} + +static const struct dev_pm_ops phytium_sdci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_sdci_suspend, + phytium_sdci_resume) + SET_RUNTIME_PM_OPS(phytium_sdci_runtime_suspend, + phytium_sdci_runtime_resume, NULL) +}; +#else +#define phytium_sdci_dev_pm_ops NULL +#endif + +static const struct of_device_id phytium_sdci_of_ids[] = { + { .compatible = "phytium,sdci", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_sdci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_sdci_acpi_ids[] = { + { .id = "PHYT0005" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_sdci_acpi_ids); +#else +#define phytium_sdci_acpi_ids NULL +#endif + +static struct platform_driver phytium_sdci_driver = { + .probe = phytium_sdci_probe, + .remove = phytium_sdci_remove, + .driver = { + .name = "sdci-phytium", + .of_match_table = phytium_sdci_of_ids, + .acpi_match_table = phytium_sdci_acpi_ids, + .pm = &phytium_sdci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_sdci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium SD Card Interface driver"); diff --git a/drivers/mmc/host/phytium-sdci.h b/drivers/mmc/host/phytium-sdci.h new file mode 100644 index 0000000000000..a50cf42ef98a2 --- /dev/null +++ b/drivers/mmc/host/phytium-sdci.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium SDCI dirver + * + * Copyright (C) 2019-2023, Phytium Technology Co., Ltd. + */ + +/*---------------------------------------------------------------------------*/ +/* Common Definition */ +/*---------------------------------------------------------------------------*/ +#define MAX_BD_NUM 0x1000 +#define SD_BLOCK_SIZE 512 + +/*---------------------------------------------------------------------------*/ +/* Register Offset */ +/*---------------------------------------------------------------------------*/ +#define SDCI_CONTROLLER 0x00 /* controller config reg */ +#define SDCI_ARGUMENT 0x04 /* argument reg */ +#define SDCI_COMMAND 0x08 /* command reg */ +#define SDCI_CLOCK_D 0x0C /* clock divide reg */ +#define SDCI_SOFTWARE 0x10 /* controller reset reg */ +#define SDCI_POWER 0X14 /* POWRE CONTROL REG */ +#define SDCI_TIMEOUT_CMD 0x18 /* cmd timeout config reg */ +#define SDCI_TIMEOUT_DATA 0x1C /* data timeout reg */ +#define SDCI_NORMAL_ISER 0x20 /* normal ISR config reg */ +#define SDCI_ERROR_ISER 0x24 /* erroe ISR config reg */ +#define SDCI_BD_ISER 0x28 /* BD ISR config reg */ +#define SDCI_CAPA 0x2C /* BD ISR config reg */ +#define SDCI_SD_DRV 0x30 /* SD card driving phase position reg */ +#define SDCI_SD_SAMP 0x34 /* SD card sampling phase position reg */ +#define SDCI_SD_SEN 0x38 /* SD card detection reg */ +#define SDCI_HDS_AXI 0x3C /* AXI boundary config reg */ +#define SDCI_BD_RX 0x40 /* BD rx addr reg */ +#define SDCI_BD_TX 0x60 /* BD tx addr reg */ +#define SDCI_BLK_CNT 0x80 /* r/w block num reg */ +#define SDCI_NORMAL_ISR 0xC0 /* normal ISR status reg */ +#define SDCI_ERROR_ISR 0xC4 /* error ISR status reg */ +#define SDCI_BD_ISR 0xC8 /* BD ISR status reg */ +#define SDCI_BD_STATUS 0xCC /* BD descriptor status reg */ +#define SDCI_STATUS 0xD0 /* status reg */ +#define SDCI_BLOCK 0xD4 /* block len reg */ +#define SDCI_RESP0 0xE0 /* response reg0 */ +#define SDCI_RESP1 0xE4 /* response reg1 */ +#define SDCI_RESP2 0xE8 /* response reg2 */ +#define SDCI_RESP3 0XEC /* response reg3 */ + +/*---------------------------------------------------------------------------*/ +/* Register Mask */ +/*---------------------------------------------------------------------------*/ +/* SDCI_CONTROLLER mask */ +#define SDCI_CONTROLLER_ECRCWR (0x1 << 0) /* RW */ +#define SDCI_CONTROLLER_ECRCRD (0x1 << 1) /* RW */ +#define SDCI_CONTROLLER_RESEDE (0x1 << 2) /* RW */ +#define SDCI_CONTROLLER_PERMDR (0x3 << 8) /* RW */ +#define SDCI_CONTROLLER_PERMDX (0x3 << 10) /* RW */ + +/* SDCI_SOFTWARE mask */ +#define SDCI_SOFTWARE_SRST (0x1 << 0) /* RW */ +#define SDCI_SOFTWARE_SCRST (0x1 << 1) /* RW */ +#define SDCI_SOFTWARE_BDRST (0x1 << 2) /* RW */ +#define SDCI_SOFTWARE_CFCLF (0x1 << 3) /* RW */ +#define SDCI_SOFTWARE_SDRST (0x1 << 4) /* RW */ + +/* SDCI_NORMAL_ISER mask */ +#define SDCI_SDCI_NORMAL_ISER_ECC_EN (0x1 << 0) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_ECR (0x1 << 1) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_ECI (0x1 << 2) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_EEI_EN (0x1 << 15) /* RW */ + +/* SDCI_NORMAL_ISR mask */ +#define SDCI_NORMAL_ISR_CC (0x1 << 0) /* R */ +#define SDCI_NORMAL_ISR_CR (0x1 << 1) /* R */ +#define SDCI_NORMAL_ISR_CI (0x1 << 2) /* R */ +#define SDCI_NORMAL_ISR_TIMEOUT (0x1 << 3) /* R */ +#define SDCI_NORMAL_ISR_EI (0x1 << 15) /* R */ + +/* SDCI_ERROR_ISER mask */ +#define SDCI_ERROR_ISER_ECTE_EN (0x1 << 0) /* RW */ +#define SDCI_ERROR_ISR_CCRCE_EN (0x1 << 1) /* RW */ +#define SDCI_ERROR_ISR_CIR_EN (0x1 << 3) /* RW */ +#define SDCI_ERROR_ISR_CNR_EN (0x1 << 4) /* RW */ +/* SDCI_ERROR_ISR mask */ +#define SDCI_ERROR_ISR_CTE (0x1 << 0) /* R */ +#define SDCI_ERROR_ISR_CCRCE (0x1 << 1) /* R */ +#define SDCI_ERROR_ISR_CIR (0x1 << 3) /* R */ +#define SDCI_ERROR_ISR_CNR (0x1 << 4) /* R */ + +/* SDCI_BD_ISER mask */ +#define SDCI_BD_ISER_ETRS_EN (0x1 << 8) /* RW */ +#define SDCI_BD_ISER_DATFRAX_EN (0x1 << 7) /* RW */ + +/* SDCI_BD_ISR mask */ +#define SDCI_BD_ISR_TRS_W (0x1 << 0) /* R */ +#define SDCI_BD_ISR_TRS_R (0x1 << 8) /* R */ +#define SDCI_BD_ISR_EDTE (0x1 << 3) /* R */ +#define SDCI_BD_ISR_DAIS (0x1 << 15) /* R */ +#define SDCI_BD_ISR_DATFRAX (0x1 << 7) /* R */ + +/* SDCI_HDS_AXI mask */ +#define SDCI_HDS_AXI_AWDOMAIN (0x1 << 0) /* RW */ +#define SDCI_HDS_AXI_ARDOMAIN (0x1 << 12) /* RW */ +#define SDCI_HDS_AXI_AWCACHE (0x6 << 24) /* RW */ +#define SDCI_HDS_AXI_ARCACHE (0xB << 28) /* RW */ + +/* SDCI_STATUS mask */ +#define SDCI_STATUS_CMD_BUSY (0x0 << 0) /* R */ +#define SDCI_STATUS_CMD_READY (0x1 << 0) /* R */ +#define SDCI_STATUS_IDIE (0x1 << 12) /* R */ +#define SDCI_CARD_BUSY_IN_PRG (0x1 << 20) /* R D0 BUSY:0,IDLE:1 */ + +/* SDCI_STATUS */ +#define SDCI_STATUS_CDSL (0x1 << 19) /* R */ + +/*---------------------------------------------------------------------------*/ +/* Register Value */ +/*---------------------------------------------------------------------------*/ +#define SDCI_SD_DRV_VALUE 0 +#define SDCI_SD_SAMP_VALUE_MAX 50 +#define SDCI_SD_SAMP_VALUE_MIN 0 + +#define SDCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define SDCI_TIMEOUT_DATA_VALUE 0xFFFFFFFF +#define SDCI_POWER_ON 1 +#define SDCI_POWER_OFF 0 + +#define SDCI_CMD_TIMEOUT 10 +#define SDCI_DAT_TIMEOUT 5000 + +#define SDCI_CMD_TYPE_ADTC 0x2 + +#define SDCI_F_MIN 400000 +#define SDCI_F_MAX 25000000 + +#define SDCI_SEN_CREFR_VAL (0x1 << 1) +#define SDCI_SEN_DEBNCE_VAL (0xB << 8) + +#define CARD_CURRENT_STATE (0xF << 9) +#define CARD_PRG_STATE (0x7 << 9) +#define CARD_TRAN_STATE (0x4 << 9) + +#define SDCI_CMD13_OK 1 +#define SDCI_CMD13_FAILED 0 + +#define ERR_TIMEOUT (0x1 << 0) +#define ERR_CARD_ABSENT (0x1 << 1) +#define ERR_CMD_RESPONED (0x1 << 2) + +/*---------------------------------------------------------------------------*/ +/* Structure Type */ +/*---------------------------------------------------------------------------*/ +struct phytium_sdci_dma { + struct scatterlist *sg; + u32 *buf; + dma_addr_t bd_addr; + size_t bytes; +}; + +enum adtc_type { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +struct phytium_sdci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + spinlock_t lock; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + + void __iomem *base; + + struct phytium_sdci_dma dma_rx; + struct phytium_sdci_dma dma_tx; + + u32 *sg_virt_addr; + enum adtc_type adtc_type; + + struct timer_list hotplug_timer; + + struct delayed_work req_timeout; + u32 cmd_timeout; + u32 data_timeout; + + int irq; + int irq_err; + int irq_bd; + + struct clk *src_clk; + unsigned long clk_rate; + unsigned long clk_div; + unsigned long real_rate; + + u32 current_rca; + bool is_multi_rw_only_one_blkcnt; +}; + diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 7408f34f0c68a..88eb74478daad 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -124,6 +124,25 @@ config MTD_NAND_ORION No board specific support is done by this driver, each board must advertise a platform_device for the driver to attach. +config MTD_NAND_PHYTIUM + tristate + +config MTD_NAND_PHYTIUM_PCI + tristate "Support Phytium NAND controller as a PCI device" + select MTD_NAND_PHYTIUM + depends on PCI + help + Enable the driver for NAND flash controller of Phytium Px210 chipset, + using the Phytium NAND controller core. + +config MTD_NAND_PHYTIUM_PLAT + tristate "Support Phytium NAND controller as a platform device" + select MTD_NAND_PHYTIUM + depends on ARCH_PHYTIUM + help + Enable the driver for NAND flash controller of Phytium CPU chipset, + using the Phytium NAND controller core. + config MTD_NAND_MARVELL tristate "Marvell EBU NAND controller" depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 619760138d322..bc50e9821c5c3 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -59,6 +59,9 @@ obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o obj-$(CONFIG_MTD_NAND_LOONGSON) += loongson-nand-controller.o +obj-$(CONFIG_MTD_NAND_PHYTIUM) += phytium_nand.o +obj-$(CONFIG_MTD_NAND_PHYTIUM_PCI) += phytium_nand_pci.o +obj-$(CONFIG_MTD_NAND_PHYTIUM_PLAT) += phytium_nand_plat.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/phytium_nand.c b/drivers/mtd/nand/raw/phytium_nand.c new file mode 100644 index 0000000000000..e7cab4fe765e5 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand.c @@ -0,0 +1,2083 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core driver for Phytium NAND flash controller + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_nand.h" + +u16 timing_asy_mode0[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ + 0x03, 0x03, 0x28, 0x28, 0x03, 0x03, 0x06, 0x06, 0x28, 0x70, 0x30, 0x50}; +u16 timing_asy_mode1[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ + 0x03, 0x03, 0x14, 0x14, 0x03, 0x03, 0x06, 0x06, 0x14, 0x70, 0x30, 0x28}; +u16 timing_asy_mode2[TIMING_ASY_NUM] = { /* x100 pass, sample: 7/8 (unlic) */ + 0x03, 0x03, 0x0D, 0x0D, 0x03, 0x03, 0x06, 0x06, 0x0D, 0x70, 0x20, 0x1A}; +u16 timing_asy_mode3[TIMING_ASY_NUM] = { /* x100 pass, sample: 4-7 */ + 0x03, 0x03, 0x0A, 0x0A, 0x03, 0x03, 0x06, 0x06, 0x0A, 0x70, 0x20, 0x14}; +u16 timing_asy_mode4[TIMING_ASY_NUM] = { /* x100 1.8v pass */ + 0x03, 0x03, 0x08, 0x08, 0x03, 0x03, 0x06, 0x06, 0x08, 0x70, 0x15, 0x10}; +u16 timing_asy_mode5[TIMING_ASY_NUM] = { /* x100 1.8v pass */ + 0x03, 0x03, 0x07, 0x07, 0x03, 0x03, 0x06, 0x06, 0x07, 0x20, 0x15, 0x0E}; +u16 timing_syn_mode0[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x20, 0x41, 0x05, 0x20, 0x10, 0x19, 0x62, 0x40, 0x38, 0x20, 0x00, 0x09, + 0x50, 0x20}; +u16 timing_syn_mode1[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x18, 0x32, 0x06, 0x18, 0x0C, 0x10, 0x76, 0x40, 0x2A, 0x1E, 0x00, 0x12, + 0x24, 0x18}; +u16 timing_syn_mode2[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x10, 0x0A, 0x04, 0x10, 0x08, 0x0A, 0x6E, 0x50, 0x1D, 0x10, 0x00, 0x0C, + 0x18, 0x10}; +u16 timing_syn_mode3[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x0C, 0x1A, 0x02, 0x0C, 0x06, 0x08, 0x78, 0x7C, 0x15, 0x0C, 0x00, 0x08, + 0x12, 0x0C}; +u16 timing_syn_mode4[TIMING_SYN_NUM] = { /* x100 1.8v failed */ + 0x08, 0x17, 0x05, 0x08, 0x04, 0x01, 0x73, 0x40, 0x0C, 0x08, 0x00, 0x06, + 0x0C, 0x10}; +u16 timing_tog_ddr_mode0[TIMING_TOG_NUM] = { /* 600M clk */ + 0x14, 0x0a, 0x08, 0x08, 0xc8, 0xc8, 0x08, 0x08, 0x20, 0x0a, 0x14, 0x08}; + +static u32 nfc_ecc_errover; +static u32 nfc_ecc_err; +static u32 nfc_irq_st; +static u32 nfc_irq_en; +static u32 nfc_irq_complete; + +static DECLARE_WAIT_QUEUE_HEAD(wait_done); + +/* + * Internal helper to conditionnally apply a delay (from the above structure, + * most of the time). + */ +static void cond_delay(unsigned int ns) +{ + if (!ns) + return; + + if (ns < 10000) + ndelay(ns); + else + udelay(DIV_ROUND_UP(ns, 1000)); +} + +static inline struct phytium_nfc *to_phytium_nfc(struct nand_controller *ctrl) +{ + return container_of(ctrl, struct phytium_nfc, controller); +} + +static inline struct phytium_nand_chip *to_phytium_nand(struct nand_chip *chip) +{ + return container_of(chip, struct phytium_nand_chip, chip); +} + +static u32 phytium_read(struct phytium_nfc *nfc, u32 reg) +{ + return readl_relaxed(nfc->regs + reg); +} + +static void phytium_write(struct phytium_nfc *nfc, u32 reg, u32 value) +{ + return writel_relaxed(value, nfc->regs + reg); +} + +static inline int phytium_wait_busy(struct phytium_nfc *nfc) +{ + u32 status; + + if (nfc_ecc_errover) { + nfc_ecc_errover = 0; + return 0; + } + + return readl_relaxed_poll_timeout(nfc->regs + NDSR, status, + !(status & NDSR_BUSY), 10, 10000); +} + +static void phytium_nfc_disable_int(struct phytium_nfc *nfc, u32 int_mask) +{ + u32 reg; + + reg = phytium_read(nfc, NDIR_MASK); + phytium_write(nfc, NDIR_MASK, reg | int_mask); +} + +static void phytium_nfc_enable_int(struct phytium_nfc *nfc, u32 int_mask) +{ + u32 reg; + + reg = phytium_read(nfc, NDIR_MASK); + phytium_write(nfc, NDIR_MASK, reg & (~int_mask)); +} + +static void phytium_nfc_clear_int(struct phytium_nfc *nfc, u32 int_mask) +{ + phytium_write(nfc, NDIR_MASK, int_mask); +} + +static int phytium_nfc_cmd_correct(struct phytium_nfc_op *nfc_op) +{ + if (!nfc_op) + return -EINVAL; + + if (nfc_op->cmd_len == 0x01) { + nfc_op->cmd[1] = nfc_op->cmd[0]; + nfc_op->cmd[0] = 0; + } + + return 0; +} + +static int phytium_nfc_addr_correct(struct phytium_nfc_op *nfc_op) +{ + u32 len; + int i, j; + + if (!nfc_op) + return -EINVAL; + + len = nfc_op->addr_len > PHYTIUM_NFC_ADDR_MAX_LEN ? + PHYTIUM_NFC_ADDR_MAX_LEN : nfc_op->addr_len; + + if (len == PHYTIUM_NFC_ADDR_MAX_LEN) + return 0; + + for (i = len-1, j = PHYTIUM_NFC_ADDR_MAX_LEN - 1; i >= 0; i--, j--) { + nfc_op->addr[j] = nfc_op->addr[i]; + nfc_op->addr[i] = 0; + } + + return 0; +} + +static void phytium_nfc_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, + struct phytium_nfc_op *nfc_op) +{ + struct nand_op_instr *instr = NULL; + bool first_cmd = true; + u32 op_id; + int i; + + /* Reset the input structure as most of its fields will be OR'ed */ + memset(nfc_op, 0, sizeof(struct phytium_nfc_op)); + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + unsigned int offset, naddrs; + const u8 *addrs; + int len; + + instr = (struct nand_op_instr *)&subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (first_cmd) { + nfc_op->cmd[0] = instr->ctx.cmd.opcode; + } else { + nfc_op->cmd[1] = instr->ctx.cmd.opcode; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + } + + nfc_op->cle_ale_delay_ns = instr->delay_ns; + first_cmd = false; + nfc_op->cmd_len++; + + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = naddrs; + + for (i = 0; i < min_t(u32, PHYTIUM_NFC_ADDR_MAX_LEN, naddrs); i++) + nfc_op->addr[i] = addrs[i]; + + nfc_op->cle_ale_delay_ns = instr->delay_ns; + + nfc_op->addr_len = naddrs; + break; + + case NAND_OP_DATA_IN_INSTR: + nfc_op->data_instr = instr; + nfc_op->data_instr_idx = op_id; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + len = nand_subop_get_data_len(subop, op_id); + nfc_op->page_cnt = len; + nfc_op->data_delay_ns = instr->delay_ns; + + break; + + case NAND_OP_DATA_OUT_INSTR: + nfc_op->data_instr = instr; + nfc_op->data_instr_idx = op_id; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + len = nand_subop_get_data_len(subop, op_id); + nfc_op->page_cnt = len; + nfc_op->data_delay_ns = instr->delay_ns; + break; + + case NAND_OP_WAITRDY_INSTR: + nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + nfc_op->rdy_delay_ns = instr->delay_ns; + break; + } + } +} + +static int phytium_nfc_prepare_cmd(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + enum dma_data_direction direction) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int i; + + phytium_nfc_cmd_correct(nfc_op); + phytium_nfc_addr_correct(nfc_op); + + nfc_op->cmd_ctrl.nfc_ctrl.csel = 0x0F ^ (0x01 << phytium_nand->selected_die); + + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + return 0; +} + +static int phytium_nfc_send_cmd(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value = 0; + + memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); + memcpy((u8 *)nfc->dsp_addr, (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); + + if (phytium_wait_busy(nfc) != 0) { + dev_err(nfc->dev, "NFC was always busy\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + return 0; + } + + spin_lock(&nfc->spinlock); + value = nfc->dsp_phy_addr & 0xFFFFFFFF; + phytium_write(nfc, NDAR0, value); + + /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ + value = phytium_read(nfc, NDAR1); + value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); + phytium_write(nfc, NDAR1, value); + + phytium_nfc_enable_int(nfc, NDIR_CMD_FINISH_MASK); + + value |= NDAR1_DMA_EN; + phytium_write(nfc, NDAR1, value); + spin_unlock(&nfc->spinlock); + + return 0; +} + +static int phytium_nfc_prepare_cmd2(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + enum dma_data_direction direction, + u32 cmd_num) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + int i; + + for (i = 0; i < cmd_num; i++) { + phytium_nfc_cmd_correct(nfc_op); + phytium_nfc_addr_correct(nfc_op); + nfc_op->cmd_ctrl.nfc_ctrl.csel = 0x0F ^ (0x01 << phytium_nand->selected_die); + nfc_op++; + } + + return 0; +} + +static int phytium_nfc_send_cmd2(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + u32 cmd_num) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value = 0; + int i; + + memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); + + for (i = 0; i < cmd_num; i++) { + memcpy((u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE, + (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); + nfc_op++; + } + + if (phytium_wait_busy(nfc) != 0) { + dev_err(nfc->dev, "NFC was always busy\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + return 0; + } + + spin_lock(&nfc->spinlock); + value = nfc->dsp_phy_addr & 0xFFFFFFFF; + phytium_write(nfc, NDAR0, value); + + /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ + value = phytium_read(nfc, NDAR1); + value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); + phytium_write(nfc, NDAR1, value); + + phytium_nfc_enable_int(nfc, NDIR_DMA_FINISH_MASK | NDIR_ECC_ERR_MASK); + + value |= NDAR1_DMA_EN; + phytium_write(nfc, NDAR1, value); + spin_unlock(&nfc->spinlock); + + return 0; +} + +static int phytium_nfc_wait_op(struct nand_chip *chip, + u32 timeout_ms) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret; + + /* Timeout is expressed in ms */ + if (!timeout_ms) + timeout_ms = IRQ_TIMEOUT; + else if (timeout_ms > 1000) + timeout_ms = 1000; + else if (timeout_ms < 100) + timeout_ms = 100; + + ret = wait_event_interruptible_timeout(wait_done, nfc_irq_complete, + msecs_to_jiffies(timeout_ms)); + nfc_irq_complete = false; + if (!ret) { + dev_err(nfc->dev, "Timeout waiting for RB signal\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC irq state: %x, irq en:%x\n", + phytium_read(nfc, NDIR), phytium_read(nfc, NDIR_MASK)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + + phytium_nfc_clear_int(nfc, NDIR_ALL_INT(nfc->caps->int_mask_bits)); + return -ETIMEDOUT; + } + + return 0; +} + +static int phytium_nfc_xfer_data_pio(struct phytium_nfc *nfc, + const struct nand_subop *subop, + struct phytium_nfc_op *nfc_op) +{ + const struct nand_op_instr *instr = nfc_op->data_instr; + unsigned int op_id = nfc_op->data_instr_idx; + unsigned int len = nand_subop_get_data_len(subop, op_id); + unsigned int offset = nand_subop_get_data_start_off(subop, op_id); + bool reading = (instr->type == NAND_OP_DATA_IN_INSTR); + + if (reading) { + u8 *in = instr->ctx.data.buf.in + offset; + + memcpy(in, nfc->dma_buf, len); + + nfc->dma_offset = 0; + } else { + const u8 *out = instr->ctx.data.buf.out + offset; + + memcpy(nfc->dma_buf, out, len); + } + + return 0; +} + +static int memcpy_to_reg16(struct phytium_nfc *nfc, u32 reg, u16 *buf, size_t len) +{ + int i; + u32 val = 0; + + if (!nfc || !buf || (len >= 16)) + return -EINVAL; + + for (i = 0; i < len; i++) { + val = (val << 16) + buf[i]; + if (i % 2) { + phytium_write(nfc, reg, val); + val = 0; + reg += 4; + } + } + + return 0; +} + +static int phytium_nfc_default_data_interface(struct phytium_nfc *nfc) +{ + int value; + + value = phytium_read(nfc, NDCR0); + value &= (~NDCR0_IN_MODE(3)); + value |= NDCR0_IN_MODE(nfc->inter_mode); + phytium_write(nfc, NDCR0, value); + + value = phytium_read(nfc, NDCR1); + value &= (~NDCR1_SAMPL_PHASE(0xFFFF)); + + switch (nfc->inter_mode) { + case ASYN_SDR: + if (nfc->timing_mode == ASY_MODE4) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode4, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(4)); + } else if (nfc->timing_mode == ASY_MODE3) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode3, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(5)); + } else if (nfc->timing_mode == ASY_MODE2) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode2, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(3)); + } else if (nfc->timing_mode == ASY_MODE1) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode1, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(2)); + } else { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode0, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(1)); + } + phytium_write(nfc, ND_INTERVAL_TIME, 0x01); + break; + case ONFI_DDR: + if (nfc->timing_mode == SYN_MODE4) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode4, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x0D)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x30); + } else if (nfc->timing_mode == SYN_MODE3) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode3, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x05)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x18); + } else if (nfc->timing_mode == SYN_MODE2) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode2, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x08)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x20); + } else if (nfc->timing_mode == SYN_MODE1) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode1, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x12)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x40); + } else { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode0, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x12)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x40); + } + break; + case TOG_ASYN_DDR: + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(8)); + phytium_write(nfc, ND_INTERVAL_TIME, 0xC8); + memcpy_to_reg16(nfc, NDTR13, timing_tog_ddr_mode0, TIMING_TOG_NUM); + break; + } + + return 0; +} + +static int phytium_nfc_naked_waitrdy_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + + dev_info(nfc->dev, "Phytium nand command 0x%02x 0x%02x.\n", + nfc_op.cmd[0], nfc_op.cmd[1]); + + switch (nfc_op.cmd[0]) { + case NAND_CMD_PARAM: + memset(nfc->dma_buf, 0, PAGE_SIZE); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_PARAM; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + if (nfc->inter_pro == NAND_ONFI) + nfc_op.page_cnt = 3 * sizeof(struct nand_onfi_params); + else if (nfc->inter_pro == NAND_JEDEC) + nfc_op.page_cnt = 3 * sizeof(struct nand_jedec_params); + if (nfc_op.page_cnt) + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc->dma_offset = 0; + break; + case NAND_CMD_SET_FEATURES: + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_SET_FTR; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + if (nfc->inter_mode != ASYN_SDR) { + dev_err(nfc->dev, "Not support SET_FEATURES command!\n"); + return 0; + } + break; + case NAND_CMD_GET_FEATURES: + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_GET_FTR; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + break; + case NAND_CMD_READ0: + if (nfc_op.cmd[1] == NAND_CMD_READSTART) { /* large page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } else if (nfc_op.cmd[1] == NAND_CMD_SEQIN) { /* program page begin */ + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } else { /* small page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } + break; + case NAND_CMD_RNDOUT: /* change read column */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + break; + case NAND_CMD_READSTART: /* large page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_RNDOUTSTART: /* change read column */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc->dma_offset = nfc_op.addr[1]; + nfc->dma_offset = (nfc->dma_offset << 8) + nfc_op.addr[0]; + break; + case NAND_CMD_SEQIN: /* program begin */ + if (nfc_op.cmd[0] == NAND_CMD_READ0) { + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + } + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_RNDIN: /* change write column */ + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_WR_COL; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_PAGEPROG: /* program end */ + nfc_op.cmd[0] = NAND_CMD_RNDIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + default: + dev_err(nfc->dev, "Not support cmd %d.\n", nfc_op.cmd[1]); + ret = -EINVAL; + goto out; + } + + if ((nfc_op.data_instr) && (direction == DMA_TO_DEVICE)) + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + nfc_op.rdy_timeout_ms = nfc_op.rdy_timeout_ms; + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); + + if ((nfc_op.data_instr) && (direction == DMA_FROM_DEVICE)) + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + +out: + return ret; +} + +static int phytium_nfc_read_id_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + u16 read_len = 0; + int ret; + u8 *buf = nfc->dma_buf; + + memset(nfc->dma_buf, 0, PAGE_SIZE); + direction = DMA_FROM_DEVICE; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + read_len = nfc_op.page_cnt; + nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; + + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_ID; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 0; + + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op.rdy_delay_ns); + + if (!strncmp(nfc->dma_buf, "ONFI", 4)) { + nfc->inter_pro = NAND_ONFI; + } else if (!strncmp(nfc->dma_buf, "JEDEC", 5)) { + nfc->inter_pro = NAND_JEDEC; + if (buf[5] == 1) + nfc->inter_mode = ASYN_SDR; + else if (buf[5] == 2) + nfc->inter_mode = TOG_ASYN_DDR; + else if (buf[5] == 4) + nfc->inter_mode = ASYN_SDR; + } else { + nfc->inter_pro = NAND_OTHER; + } + + dev_info(nfc->dev, "Nand protocol: %d, interface mode: %d\n", + nfc->inter_pro, nfc->inter_mode); + + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + return 0; +} + +static int phytium_nfc_read_status_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + u16 read_len = 0; + u32 timeout, count = 0; + int ret = 0; + + direction = DMA_FROM_DEVICE; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + read_len = nfc_op.page_cnt; + nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_STATUS; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + +read_status_retry: + count++; + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + timeout = nfc_op.rdy_timeout_ms ? nfc_op.rdy_timeout_ms : 10; + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + if (0xE0 != *(u8 *)(nfc->dma_buf)) { + dev_info(nfc->dev, "Retry to read status (%x)\n", *(u8 *)(nfc->dma_buf)); + + if (count < 10) + goto read_status_retry; + } + +out: + return ret; +} + +static int phytium_nfc_reset_cmd_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + + direction = DMA_NONE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_RESET; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); +} + +static int phytium_nfc_erase_cmd_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + direction = DMA_NONE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_ERASE; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); +} + +static int phytium_nfc_data_in_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + struct nand_op_instr *instr; + unsigned int op_id; + unsigned int len; + unsigned int offset; + u8 *in = NULL; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + if (nfc_op.data_instr->type != NAND_OP_DATA_IN_INSTR) { + dev_err(nfc->dev, "Phytium nfc instrs parser failed!\n"); + return -EINVAL; + } + + instr = nfc_op.data_instr; + op_id = nfc_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + offset = nand_subop_get_data_start_off(subop, op_id); + in = instr->ctx.data.buf.in + offset; + + memcpy(in, nfc->dma_buf + nfc->dma_offset, len); + nfc->dma_offset += len; + + return 0; +} + +static const struct nand_op_parser phytium_nfc_op_parser = NAND_OP_PARSER( + /* Naked commands not supported, use a function for each pattern */ + NAND_OP_PARSER_PATTERN( + phytium_nfc_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_erase_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_read_status_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_reset_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 8), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_data_in_type_exec, + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + ); + +static int phytium_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + return nand_op_parser_exec_op(chip, &phytium_nfc_op_parser, + op, check_only); +} + +static int phytium_nfc_reset(struct phytium_nfc *nfc) +{ + u32 value; + + phytium_write(nfc, NDIR_MASK, NDIR_ALL_INT(nfc->caps->int_mask_bits)); + phytium_write(nfc, NDSR, NDIR_ALL_INT(nfc->caps->int_mask_bits)); + + phytium_write(nfc, ND_ERR_CLR, 0x0F); + phytium_write(nfc, NDFIFO_CLR, 1); + + value = phytium_read(nfc, NDCR0); + phytium_write(nfc, NDCR0, value & ~(NDCR0_ECC_EN | NDCR0_SPARE_EN)); + + return 0; +} + +static void phytium_nfc_select_chip(struct nand_chip *chip, int die_nr) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + + dev_dbg(nfc->dev, "Phytium nand selected chip %d\n", die_nr); + + if (chip == nfc->selected_chip && die_nr == phytium_nand->selected_die) + return; + + if (die_nr < 0 || die_nr >= phytium_nand->nsels) { + nfc->selected_chip = NULL; + phytium_nand->selected_die = -1; + return; + } + + phytium_nfc_reset(nfc); + + nfc->selected_chip = chip; + phytium_nand->selected_die = die_nr; +} + +static int phytium_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->length = chip->ecc.total; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +static int phytium_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + /* + * Bootrom looks in bytes 0 & 5 for bad blocks for the + * 4KB page / 4bit BCH combination. + */ + if (mtd->writesize >= SZ_4K) + oobregion->offset = 6; + else + oobregion->offset = 2; + + oobregion->length = mtd->oobsize - chip->ecc.total - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops phytium_nand_ooblayout_ops = { + .ecc = phytium_nand_ooblayout_ecc, + .free = phytium_nand_ooblayout_free, +}; + +static void phytium_nfc_enable_hw_ecc(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 ndcr0 = phytium_read(nfc, NDCR0); + + if (!(ndcr0 & NDCR0_ECC_EN)) + phytium_write(nfc, NDCR0, ndcr0 | NDCR0_ECC_EN); +} + +static void phytium_nfc_disable_hw_ecc(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 ndcr0 = phytium_read(nfc, NDCR0); + + if (ndcr0 & NDCR0_ECC_EN) + phytium_write(nfc, NDCR0, ndcr0 & ~NDCR0_ECC_EN); +} + +irqreturn_t phytium_nfc_isr(int irq, void *dev_id) +{ + struct phytium_nfc *nfc = dev_id; + u32 st = phytium_read(nfc, NDIR); + u32 ien = (~phytium_read(nfc, NDIR_MASK)) & NDIR_ALL_INT(nfc->caps->int_mask_bits); + + if (!(st & ien)) + return IRQ_NONE; + + nfc_irq_st = st; + nfc_irq_en = ien; + phytium_nfc_disable_int(nfc, st & NDIR_ALL_INT(nfc->caps->int_mask_bits)); + phytium_write(nfc, 0xFD0, 0); + + if (st & (NDIR_CMD_FINISH | NDIR_DMA_FINISH)) { + if (st & NDIR_ECC_ERR) + nfc_ecc_err = 1; + phytium_write(nfc, NDIR, st); + nfc_irq_complete = 1; + } else if (st & (NDIR_FIFO_TIMEOUT | NDIR_PGFINISH)) { + phytium_write(nfc, NDIR, st); + phytium_nfc_enable_int(nfc, (~st) & (NDIR_DMA_FINISH_MASK | + NDIR_PGFINISH_MASK | + NDIR_FIFO_TIMEOUT_MASK | + NDIR_CMD_FINISH_MASK)); + nfc_irq_complete = 0; + } else if (st & NDIR_ECC_ERR) { + phytium_write(nfc, ND_ERR_CLR, 0x08); + phytium_write(nfc, NDIR, st); + phytium_write(nfc, NDFIFO_CLR, 0x01); + nfc_irq_complete = 1; + nfc_ecc_errover = 1; + } else { + phytium_write(nfc, NDIR, st); + nfc_irq_complete = 1; + } + + wake_up(&wait_done); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(phytium_nfc_isr); + +static int phytium_nfc_hw_ecc_correct(struct nand_chip *chip, + char *buf, int len) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 i, j, value, tmp; + u32 ecc_err_reg_nums; + int stat = 0; + + if (!buf) + return -EINVAL; + + if (nfc->caps->hw_ver == 1) + ecc_err_reg_nums = 2; + else + ecc_err_reg_nums = 4; + + for (i = 0; i < chip->ecc.steps; i++) { + for (j = 0; j < ecc_err_reg_nums; j++) { + value = phytium_read(nfc, 0xB8 + 4 * (ecc_err_reg_nums * i + j)); + dev_info(nfc->dev, "ECC_FLAG: offset:%x value:0x%08x\n", + 0xB8 + 4 * (2 * i + j), value); + + tmp = value & 0xFFFF; + if (tmp && (tmp <= 4096)) { + tmp--; + stat++; + buf[chip->ecc.size * i + (tmp >> 3)] ^= (0x01 << tmp % 8); + dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", + 0x01 << (tmp % 8), buf[chip->ecc.size * i + (tmp >> 3)]); + } else if (tmp > 4096) { + dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); + } + + tmp = (value >> 16) & 0xFFFF; + if (tmp && (tmp <= 4096)) { + tmp--; + stat++; + buf[chip->ecc.size * i + (tmp >> 3)] ^= (0x01 << tmp % 8); + dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + } else if (tmp > 4096) { + dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); + } + } + } + + return stat; +} + +static int phytium_nand_page_read(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(nand_get_interface_config(&phytium_nand->chip)); + + memset(nfc->dma_buf, 0x0, mtd->writesize + mtd->oobsize); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd[0] = NAND_CMD_READ0; + nfc_op.cmd[1] = NAND_CMD_READSTART; + nfc_op.cmd_len = 2; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.addr_len = 5; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = mtd->writesize; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + if ((direction == DMA_FROM_DEVICE) && buf) + memcpy(buf, nfc->dma_buf, mtd->writesize); + + return ret; +} + +static int phytium_nand_oob_read(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(nand_get_interface_config(&phytium_nand->chip)); + + memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd[0] = NAND_CMD_READ0; + nfc_op.cmd[1] = NAND_CMD_READSTART; + nfc_op.cmd_len = 2; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.addr_len = 5; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = oob_len; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + nfc_op.addr[0] = mtd->writesize & 0xFF; + nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op.rdy_delay_ns); + + if (direction == DMA_FROM_DEVICE) + memcpy(oob_buf, nfc->dma_buf, oob_len); + + return ret; +} + +static int phytium_nand_get_ecc_total(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + int ecc_total = 0; + + switch (mtd->writesize) { + case 0x200: + if (ecc->strength == 8) + ecc_total = 0x0D; + else if (ecc->strength == 4) + ecc_total = 7; + else if (ecc->strength == 2) + ecc_total = 4; + else + ecc_total = 0; + break; + case 0x800: + if (ecc->strength == 8) + ecc_total = 0x34; + else if (ecc->strength == 4) + ecc_total = 0x1a; + else if (ecc->strength == 2) + ecc_total = 0xd; + else + ecc_total = 0; + break; + case 0x1000: + if (ecc->strength == 8) + ecc_total = 0x68; + else if (ecc->strength == 4) + ecc_total = 0x34; + else if (ecc->strength == 2) + ecc_total = 0x1a; + else + ecc_total = 0; + break; + case 0x2000: + if (ecc->strength == 8) + ecc_total = 0xD0; + else if (ecc->strength == 4) + ecc_total = 0x68; + else if (ecc->strength == 2) + ecc_total = 0x34; + else + ecc_total = 0; + break; + case 0x4000: + if (ecc->strength == 8) + ecc_total = 0x1A0; + if (ecc->strength == 4) + ecc_total = 0xD0; + else if (ecc->strength == 2) + ecc_total = 0x68; + else + ecc_total = 0; + break; + default: + ecc_total = 0; + break; + } + + return ecc_total; +} + +static int phytium_nand_page_read_hwecc(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op *nfc_op = NULL; + enum dma_data_direction direction; + u32 ecc_offset; + int max_bitflips = 0; + u32 nfc_state = 0; + int ret = 0; + int i; + + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(nand_get_interface_config(&phytium_nand->chip)); + + ecc_offset = phytium_nand->ecc.offset; + memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); + nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); + if (!nfc_op) { + dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); + return 0; + } + + nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + direction = DMA_FROM_DEVICE; + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op->cmd[0] = NAND_CMD_READ0; + nfc_op->cmd[1] = NAND_CMD_READSTART; + nfc_op->cmd_len = 2; + nfc_op->addr_len = 5; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op->addr[2] = page; + nfc_op->addr[3] = page >> 8; + nfc_op->addr[4] = page >> 16; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op->page_cnt = mtd->writesize; + nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + nfc_op++; + memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op->cmd[0] = NAND_CMD_RNDOUT; + nfc_op->cmd[1] = NAND_CMD_RNDOUTSTART; + memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); + nfc_op->addr_len = 2; + nfc_op->addr[0] = mtd->writesize + phytium_nand->ecc.offset; + nfc_op->addr[1] = (mtd->writesize + phytium_nand->ecc.offset) >> 8; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; + nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); + nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; + nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = + ((nfc->dma_phy_addr + mtd->writesize) >> (8 * i)) & 0xFF; + + nfc_op--; + phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); + phytium_nfc_send_cmd2(chip, nfc_op, 2); + cond_delay(nfc_op->cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); + if (ret) { + kfree(nfc_op); + return ret; + } + + cond_delay(nfc_op->rdy_delay_ns * 1000); + + if ((direction == DMA_FROM_DEVICE) && buf) { + nfc_state = phytium_read(nfc, NDSR); + if ((nfc_state & NDSR_ECC_ERROVER) || (nfc_ecc_errover == 1)) { + for (i = 0; i < mtd->writesize/16; i++) { + if (0xFF != *(u8 *)(nfc->dma_buf + i)) { + dev_info(nfc->dev, "NFC: NDSR_ECC_ERROVER %x\n", page); + mtd->ecc_stats.failed++; + mtd->ecc_stats.corrected += max_bitflips; + break; + } + } + } else if (nfc_state & NDSR_ECC_ERR) { + max_bitflips = phytium_nfc_hw_ecc_correct(chip, + nfc->dma_buf, mtd->writesize); + mtd->ecc_stats.corrected += max_bitflips; + dev_info(nfc->dev, "NFC: NDSR_ECC_ERR page:%x, bit:%d\n", + page, max_bitflips); + } + + memcpy(buf, nfc->dma_buf, mtd->writesize); + } + + kfree(nfc_op); + return max_bitflips; +} + +static int phytium_nand_page_write(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(nand_get_interface_config(&phytium_nand->chip)); + + memcpy(nfc->dma_buf, buf, mtd->writesize); + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + nfc_op.cmd_len = 2; + nfc_op.addr_len = 5; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); + nfc_op.rdy_delay_ns = 0; + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.page_cnt = mtd->writesize; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); +out: + return ret; +} + +static int phytium_nand_oob_write(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(nand_get_interface_config(&phytium_nand->chip)); + + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + nfc_op.cmd_len = 2; + nfc_op.addr_len = 5; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); + nfc_op.rdy_delay_ns = 0; + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = oob_len; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + nfc_op.addr[0] = mtd->writesize & 0xFF; + nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + memcpy(nfc->dma_buf, oob_buf, mtd->oobsize); + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); +out: + return ret; +} + +static int phytium_nand_page_write_hwecc(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op *nfc_op; + enum dma_data_direction direction; + u32 ecc_offset; + int ret = 0; + int i; + + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(nand_get_interface_config(&phytium_nand->chip)); + ecc_offset = phytium_nand->ecc.offset; + + nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); + if (!nfc_op) + return 0; + + nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + direction = DMA_TO_DEVICE; + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_ROW_ADDR; + nfc_op->cmd[0] = NAND_CMD_SEQIN; + nfc_op->cmd_len = 1; + nfc_op->addr_len = 5; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 0; + nfc_op->addr[2] = page; + nfc_op->addr[3] = page >> 8; + nfc_op->addr[4] = page >> 16; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; + nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + /* The first dsp must have data to transfer */ + memcpy(nfc->dma_buf, buf, mtd->writesize); + nfc_op->page_cnt = mtd->writesize; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + + nfc_op++; + memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op->cmd[0] = NAND_CMD_RNDIN; + nfc_op->cmd[1] = NAND_CMD_PAGEPROG; + memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); + nfc_op->addr_len = 2; + nfc_op->cmd_len = 2; + nfc_op->addr[0] = mtd->writesize + ecc_offset; + nfc_op->addr[1] = (mtd->writesize + ecc_offset) >> 8; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; + nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); + nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = + ((nfc->dma_phy_addr + mtd->writesize + ecc_offset) >> (8 * i)) & 0xFF; + + /* when enable ECC, must offer ecc_offset of oob, but no oobdata */ + nfc_op--; + phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); + phytium_nfc_send_cmd2(chip, nfc_op, 2); + cond_delay(nfc_op->cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op->rdy_delay_ns * 1000); +out: + kfree(nfc_op); + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + u32 oob_len = oob_required ? mtd->oobsize : 0; + int ret; + + ret = phytium_nand_page_read(mtd, chip, buf, NULL, 0, page, true); + if (oob_required) + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* Invalidate page cache */ + chip->pagecache.page = -1; + memset(chip->oob_poi, 0xFF, mtd->oobsize); + + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + mtd->oobsize, page, true); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_page(struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + int ret; + struct mtd_info *mtd = nand_to_mtd(chip); + u32 oob_len = oob_required ? mtd->oobsize : 0; + struct phytium_nand_chip *phytium_nand = NULL; + + phytium_nand = to_phytium_nand(chip); + + phytium_nfc_enable_hw_ecc(chip); + cond_delay(20*1000); + + ret = phytium_nand_page_read_hwecc(mtd, chip, buf, NULL, + 0, page, true); + + phytium_nfc_disable_hw_ecc(chip); + + if (oob_required) { + oob_len = mtd->oobsize; + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + } + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, + int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + u32 oob_len = mtd->oobsize; + int ret; + + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + void *oob_buf = oob_required ? chip->oob_poi : NULL; + + if (oob_required) + phytium_nand_oob_write(mtd, chip, NULL, oob_buf, + mtd->oobsize, page, false); + + return phytium_nand_page_write(mtd, chip, buf, NULL, + 0, page, false); +} + +static int phytium_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + int ret; + struct mtd_info *mtd = nand_to_mtd(chip); + void *oob_buf = oob_required ? chip->oob_poi : NULL; + u32 oob_len; + + if (oob_required) { + oob_len = mtd->oobsize; + phytium_nand_oob_write(mtd, chip, NULL, oob_buf, + oob_len, page, false); + } + + phytium_nfc_enable_hw_ecc(chip); + cond_delay(20*1000); + ret = phytium_nand_page_write_hwecc(mtd, chip, buf, NULL, + 0, page, false); + phytium_nfc_disable_hw_ecc(chip); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, + mtd->oobsize, page, false); +} + +static int phytium_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + u32 oob_len = mtd->oobsize - phytium_nand->ecc.length; + + return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, + oob_len, page, false); +} + +static int phytium_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if ((mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) + return -EOPNOTSUPP; + + chip->ecc.algo = NAND_ECC_ALGO_BCH; + ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; + ecc->read_page = phytium_nfc_hw_ecc_bch_read_page; + ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob_raw; + ecc->read_oob = phytium_nfc_hw_ecc_bch_read_oob; + ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; + ecc->write_page = phytium_nfc_hw_ecc_bch_write_page; + ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; + ecc->write_oob = phytium_nfc_hw_ecc_bch_write_oob; + + return 0; +} + +static int phytium_nand_ecc_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + int ret = 0; + + mtd_set_ooblayout(mtd, &phytium_nand_ooblayout_ops); + + switch (ecc->engine_type) { + case NAND_ECC_ENGINE_TYPE_ON_HOST: + ret = phytium_nand_hw_ecc_ctrl_init(mtd, ecc); + break; + case NAND_ECC_ENGINE_TYPE_NONE: + ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; + ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob; + ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; + ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; + ecc->read_page = ecc->read_page_raw; + ecc->read_oob = ecc->read_oob_raw; + ecc->write_page = ecc->write_page_raw; + ecc->write_oob = ecc->write_oob_raw; + break; + case NAND_ECC_ENGINE_TYPE_SOFT: + case NAND_ECC_ENGINE_TYPE_ON_DIE: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static u8 bbt_pattern[] = {'P', 'H', 'Y', 'b', 't', '0' }; +static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'Y', 'H', 'P' }; + +static struct nand_bbt_descr bbt_main_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | + NAND_BBT_2BIT | NAND_BBT_VERSION, + .offs = 8, + .len = 6, + .veroffs = 14, + .maxblocks = 8, /* Last 8 blocks in each chip */ + .pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | + NAND_BBT_2BIT | NAND_BBT_VERSION, + .offs = 8, + .len = 6, + .veroffs = 14, + .maxblocks = 8, /* Last 8 blocks in each chip */ + .pattern = bbt_mirror_pattern +}; + +static int phytium_nand_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value; + int ret = 0; + + if (nfc->caps->flash_bbt) + chip->bbt_options |= NAND_BBT_USE_FLASH; + + if (chip->bbt_options & NAND_BBT_USE_FLASH) { + /* + * We'll use a bad block table stored in-flash and don't + * allow writing the bad block marker to the flash. + */ + chip->bbt_options |= NAND_BBT_NO_OOB_BBM; + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; + } + + if (chip->options & NAND_BUSWIDTH_16) + phytium_nand->ndcr |= NDCR0_WIDTH; + + /* + * On small page NANDs, only one cycle is needed to pass the + * column address. + */ + if (mtd->writesize <= 512) + phytium_nand->addr_cyc = 1; + else + phytium_nand->addr_cyc = 2; + + /* + * Now add the number of cycles needed to pass the row + * address. + * + * Addressing a chip using CS 2 or 3 should also need the third row + * cycle but due to inconsistance in the documentation and lack of + * hardware to test this situation, this case is not supported. + */ + if (chip->options & NAND_ROW_ADDR_3) + phytium_nand->addr_cyc += 3; + else + phytium_nand->addr_cyc += 2; + + if (nfc->caps) { + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) { + chip->ecc.size = nfc->caps->ecc_step_size ? + nfc->caps->ecc_step_size : + chip->ecc.size; + chip->ecc.strength = nfc->caps->ecc_strength ? + nfc->caps->ecc_strength : + chip->ecc.strength; + chip->ecc.strength = nfc->caps->ecc_strength; + chip->ecc.bytes = 7; + } else { + chip->ecc.size = 512; + chip->ecc.strength = 1; + chip->ecc.bytes = 0; + } + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + } + + if (nfc->caps->hw_ver == 1) { + if (chip->ecc.strength == 0x04) + phytium_nand->ndcr |= NDCR0_ECC_STREN(4); + else if (chip->ecc.strength == 0x02) + phytium_nand->ndcr |= NDCR0_ECC_STREN(2); + else + phytium_nand->ndcr |= NDCR0_ECC_STREN(0); + } else { + if (chip->ecc.strength == 0x08) + phytium_nand->ndcr |= NDCR0_ECC_STREN(7); + else if (chip->ecc.strength == 0x04) + phytium_nand->ndcr |= NDCR0_ECC_STREN(3); + else if (chip->ecc.strength == 0x02) + phytium_nand->ndcr |= NDCR0_ECC_STREN(1); + else + phytium_nand->ndcr |= NDCR0_ECC_STREN(0); + } + + value = phytium_read(nfc, NDCR0); + value &= ~NDCR0_EN; + phytium_write(nfc, NDCR0, value); + + value &= ~NDCR0_ECC_STREN(7); + value |= phytium_nand->ndcr; + phytium_write(nfc, NDCR0, value | NDCR0_EN); + + ret = phytium_nand_ecc_init(mtd, &chip->ecc); + if (ret) { + dev_err(nfc->dev, "ECC init failed: %d\n", ret); + goto out; + } + + /* + * Subpage write not available with hardware ECC, prohibit also + * subpage read as in userspace subpage access would still be + * allowed and subpage write, if used, would lead to numerous + * uncorrectable ECC errors. + */ + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) + chip->options |= NAND_NO_SUBPAGE_WRITE; + + /* + * We keep the MTD name unchanged to avoid breaking platforms + * where the MTD cmdline parser is used and the bootloader + * has not been updated to use the new naming scheme. + */ + if (nfc->caps->legacy_of_bindings) + mtd->name = "phytium_nand-0"; + +out: + return ret; +} + +static void phytium_nand_chips_cleanup(struct phytium_nfc *nfc) +{ + struct phytium_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + /* Unregister device */ + WARN_ON(mtd_device_unregister(nand_to_mtd(&entry->chip))); + nand_cleanup(&entry->chip); + list_del(&entry->node); + } +} + +static int phytium_nfc_init_dma(struct phytium_nfc *nfc) +{ + int ret; + + ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + nfc->dsp_addr = dma_alloc_coherent(nfc->dev, PAGE_SIZE, + &nfc->dsp_phy_addr, GFP_KERNEL | GFP_DMA); + if (!nfc->dsp_addr) + return -ENOMEM; + + nfc->dma_buf = dma_alloc_coherent(nfc->dev, MAX_CHUNK_SIZE, + &nfc->dma_phy_addr, GFP_KERNEL | GFP_DMA); + if (!nfc->dma_buf) + return -ENOMEM; + + dev_info(nfc->dev, "NFC address dsp_phy_addr:%llx, dma_phy_addr:%llx\n", + nfc->dsp_phy_addr, nfc->dma_phy_addr); + + return 0; +} + +static int phytium_nfc_init(struct phytium_nfc *nfc) +{ + u32 value; + + nfc->inter_mode = ASYN_SDR; + nfc->timing_mode = ASY_MODE0; + + value = phytium_read(nfc, NDCR1); + value &= (~NDCR1_SAMPL_PHASE(0xFFFF)); + value &= ~NDCR1_ECC_DATA_FIRST_EN; + value |= NDCR1_SAMPL_PHASE(1); + value |= NDCR1_ECC_BYPASS; + phytium_write(nfc, NDCR1, value); + phytium_write(nfc, ND_INTERVAL_TIME, 1); + phytium_write(nfc, NDFIFO_LEVEL0, 4); + phytium_write(nfc, NDFIFO_LEVEL1, 4); + phytium_write(nfc, NDFIFO_CLR, 1); + phytium_write(nfc, ND_ERR_CLR, 1); + + /* Configure the DMA */ + phytium_nfc_init_dma(nfc); + + phytium_nfc_reset(nfc); + + value = phytium_read(nfc, NDCR0); + value &= (~NDCR0_IN_MODE(3)); + value |= NDCR0_IN_MODE(nfc->inter_mode); + value |= NDCR0_EN; + + phytium_write(nfc, NDCR0, value); + + nfc_ecc_errover = 0; + + return 0; +} + +static int phytium_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + unsigned int period_ns = 2; + const struct nand_sdr_timings *sdr; + struct phytium_nfc_timings nfc_tmg; + int read_delay; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1; + nfc_tmg.tRH = nfc_tmg.tRP; + nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1; + nfc_tmg.tWH = nfc_tmg.tWP; + nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns); + nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1; + nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns); + dev_info(nfc->dev, "[nfc_tmg]tRP: %d, tRH:%d, tWP:%d tWH:%d\n", + nfc_tmg.tRP, nfc_tmg.tRH, nfc_tmg.tWP, nfc_tmg.tWH); + dev_info(nfc->dev, "[nfc_tmg]tCS: %d, tCH:%d, tADL:%d\n", + nfc_tmg.tCS, nfc_tmg.tCH, nfc_tmg.tADL); + + read_delay = sdr->tRC_min >= 30000 ? + MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH; + + nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns); + nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min), + period_ns) - 2, + nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min), + period_ns); + dev_info(nfc->dev, "[nfc_tmg]tAR: %d, tWHR:%d, tRHW:%d\n", + nfc_tmg.tAR, nfc_tmg.tWHR, nfc_tmg.tRHW); + + nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns); + + if (chipnr < 0) + return 0; + + if (nfc_tmg.tWP > 0x10) + nfc->timing_mode = ASY_MODE1; + else if (nfc_tmg.tWP < 0x0D) + nfc->timing_mode = ASY_MODE3; + + if (nfc->inter_mode == ONFI_DDR) + nfc->timing_mode = SYN_MODE3; + + phytium_nfc_default_data_interface(nfc); + + return 0; +} + +static int phytium_nand_chip_init(struct phytium_nfc *nfc) +{ + struct device *dev = nfc->dev; + struct phytium_nand_chip *phytium_nand; + struct mtd_info *mtd; + struct nand_chip *chip; + int ret; + + /* Alloc the nand chip structure */ + phytium_nand = devm_kzalloc(dev, sizeof(*phytium_nand), GFP_KERNEL); + if (!phytium_nand) + return -ENOMEM; + + phytium_nand->nsels = 1; + phytium_nand->selected_die = -1; + + chip = &phytium_nand->chip; + chip->controller = &nfc->controller; + chip->legacy.select_chip = phytium_nfc_select_chip; + phytium_nfc_default_data_interface(nfc); + + mtd = nand_to_mtd(chip); + mtd->dev.parent = dev; + mtd->owner = THIS_MODULE; + + /* + * Default to HW ECC engine mode. If the nand-ecc-mode property is given + * in the DT node, this entry will be overwritten in nand_scan_ident(). + */ + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + + chip->options |= NAND_BUSWIDTH_AUTO; + chip->options |= NAND_SKIP_BBTSCAN; + chip->bbt_options |= NAND_BBT_NO_OOB; + + ret = nand_scan(chip, phytium_nand->nsels); + if (ret) { + dev_err(dev, "could not scan the nand chip\n"); + goto out; + } + + if (nfc->caps->parts) { + ret = mtd_device_register(mtd, nfc->caps->parts, nfc->caps->nr_parts - 1); + } else if (dev->of_node) { + nand_set_flash_node(chip, dev->of_node); + ret = mtd_device_register(mtd, NULL, 0); + } else { + ret = -EINVAL; + } + + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); + /* Unregister device */ + WARN_ON(mtd_device_unregister(mtd)); + nand_cleanup(mtd_to_nand(mtd)); + return ret; + } + + phytium_nand->ecc.length = phytium_nand_get_ecc_total(mtd, &chip->ecc); + phytium_nand->ecc.offset = mtd->oobsize - phytium_nand->ecc.length; + chip->ecc.total = phytium_nand_get_ecc_total(mtd, &chip->ecc); + + mtd_ooblayout_ecc(mtd, 0, &phytium_nand->ecc); + + dev_info(dev, "ooblayout ecc offset: %x, length: %x\n", + phytium_nand->ecc.offset, phytium_nand->ecc.length); + +out: + list_add_tail(&phytium_nand->node, &nfc->chips); + return 0; +} + +static const struct nand_controller_ops phytium_nand_controller_ops = { + .attach_chip = phytium_nand_attach_chip, + .exec_op = phytium_nfc_exec_op, + .setup_interface = phytium_nfc_setup_interface, +}; + +int phytium_nand_init(struct phytium_nfc *nfc) +{ + int ret; + + nand_controller_init(&nfc->controller); + nfc->controller.ops = &phytium_nand_controller_ops; + INIT_LIST_HEAD(&nfc->chips); + + /* Init the controller and then probe the chips */ + ret = phytium_nfc_init(nfc); + if (ret) + goto out; + + ret = phytium_nand_chip_init(nfc); + if (ret) + goto out; + + spin_lock_init(&nfc->spinlock); + +out: + return ret; +} +EXPORT_SYMBOL_GPL(phytium_nand_init); + +int phytium_nand_remove(struct phytium_nfc *nfc) +{ + phytium_nand_chips_cleanup(nfc); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_remove); + +static int phytium_nfc_wait_ndrun(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret = 0; + u32 val; + + ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val, + (val & NDSR_RB) == 0, + 0, 100 * 1000); + if (ret) { + dev_err(nfc->dev, "Timeout on NAND controller run mode\n"); + ret = -EAGAIN; + } + + return ret; +} + +int phytium_nand_prepare(struct phytium_nfc *nfc) +{ + struct phytium_nand_chip *chip = NULL; + + list_for_each_entry(chip, &nfc->chips, node) + phytium_nfc_wait_ndrun(&chip->chip); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_prepare); + +int phytium_nand_resume(struct phytium_nfc *nfc) +{ + nfc->selected_chip = NULL; + phytium_nfc_init(nfc); + phytium_nfc_default_data_interface(nfc); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_resume); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium NAND controller platform driver"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/nand/raw/phytium_nand.h b/drivers/mtd/nand/raw/phytium_nand.h new file mode 100644 index 0000000000000..58f46f6ec7de5 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium NAND flash controller driver + * + * Copyright (C) 2020-2023, Phytium Technology, Co., Ltd. + */ +#ifndef PHYTIUM_NAND_H +#define PHYTIUM_NAND_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* NFC does not support transfers of larger chunks at a time */ +#define MAX_PAGE_NUM 16 +#define MAX_CHUNK_SIZE ((1024 + 76) * 16) + +#define POLL_PERIOD 0 +#define POLL_TIMEOUT 100000 +/* Interrupt maximum wait period in ms */ +#define IRQ_TIMEOUT 1000 + +/* Latency in clock cycles between SoC pins and NFC logic */ +#define MIN_RD_DEL_CNT 3 + +#define PHYTIUM_NFC_ADDR_MAX_LEN 5 +#define PHYTIUM_NFC_DSP_SIZE 16 + +/* NAND controller flash control register */ +#define NDCR0 0x00 +#define NDCR0_EN BIT(0) +#define NDCR0_WIDTH BIT(1) +#define NDCR0_IN_MODE(x) (min_t(u32, x, 0x3) << 2) +#define NDCR0_ECC_EN BIT(4) +#define NDCR0_ECC_STREN(x) (min_t(u32, x, 0x7) << 5) +#define NDCR0_SPARE_EN BIT(8) +#define NDCR0_SPARE_SIZE(x) (min_t(u32, x, 0xFFF) << 9) +#define NDCR0_GENERIC_FIELDS_MASK + +#define NDCR1 0x04 +#define NDCR1_SAMPL_PHASE(x) min_t(u32, x, 0xFFFF) +#define NDCR1_ECC_DATA_FIRST_EN BIT(16) +#define NDCR1_RB_SHARE_EN BIT(17) +#define NDCR1_ECC_BYPASS BIT(18) + +#define NDAR0 0x08 + +#define NDAR1 0x0C +#define NDAR1_H8(x) min_t(u32, x, 0xFF) +#define NDAR1_DMA_EN BIT(8) +#define NDAR1_EMPTY(x) (min_t(u32, x, 0x7F) << 9) +#define NDAR1_DMA_RLEN(x) (min_t(u32, x, 0xFF) << 9) +#define NDAR1_DMA_WLEN(x) (min_t(u32, x, 0xFF) << 9) + +#define NDTR0 0x10 +#define NDTR0_TCS_TCLS(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR0_TCLS_TWP(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR1 0x14 +#define NDTR1_TWH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR1_TWP(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR2 0x18 +#define NDTR2_TCH_TCLH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR2_TCLH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR3 0x1c +#define NDTR3_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR3_TCH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR4 0x20 +#define NDTR4_TWHR_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR4_TREH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR5 0x24 +#define NDTR5_TRC(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR5_TADL_SMX(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR6 0x28 +#define NDTR6_TCAD_TCS_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR6_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR7 0x2c +#define NDTR7_TCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR7_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR8 0x30 +#define NDTR8_TCAD_TCK_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR8_HF_TCK(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR9 0x34 +#define NDTR9_TWHR(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR9_TCCS_TCALS_SMX(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR10 0x38 +#define NDTR10_TCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR10_MTCK(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR11 0x3c +#define NDTR11_TCK_TCALS(x) (min_t(u32, x, 0xFFFF) << 16) +#define NDTR11_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR12 0x40 +#define NDTR12_TWRCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR12_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR13 0x44 +#define NDTR13_TWRHCA(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR13_TRLCA(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR14 0x48 +#define NDTR14_TWRHCE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR14_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR15 0x4c +#define NDTR15_TCDQSS_TWPRE_TDS(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR15_HFTDSC(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR16 0x50 +#define NDTR16_TWPST_TDH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR16_TWPSTH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR17 0x54 +#define NDTR17_TCS_TRPRE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR17_TRELDQS(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR18 0x58 +#define NDTR18_TRPST_TDQSRE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR18_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDFIFO 0x5c +#define NDFIFO_REV (min_t(u32, x, 0) << 12) +#define NDFIFO_FULL BIT(11) +#define NDFIFO_EMP BIT(10) +#define NDFIFO_CNT(x) (min_t(u32, x, 0x3F) << 0) + +#define ND_INTERVAL_TIME 0x60 +#define NDCMD_INTERVAL_TIME 0x64 +#define NDFIFO_TIMEOUT 0x68 +#define NDFIFO_LEVEL0 0x6c +#define NDFIFO_LEVEL1 0x70 +#define NDWP 0x74 +#define NDFIFO_CLR 0x78 + +#define NDSR 0x7c +#define NDSR_BUSY BIT(0) +#define NDSR_DMA_BUSY BIT(1) +#define NDSR_DMA_PGFINISH BIT(2) +#define NDSR_DMA_FINISH BIT(3) +#define NDSR_FIFO_EMP BIT(4) +#define NDSR_FIFO_FULL BIT(5) +#define NDSR_FIFO_TIMEOUT BIT(6) +#define NDSR_CS(x) (min_t(u32, x, 0xF) << 7) +#define NDSR_CMD_PGFINISH BIT(11) +#define NDSR_PG_PGFINISH BIT(12) +#define NDSR_RE BIT(13) +#define NDSR_DQS BIT(14) +#define NDSR_RB BIT(15) +#define NDSR_ECC_BUSY BIT(16) +#define NDSR_ECC_FINISH BIT(17) +#define NDSR_ECC_RIGHT BIT(18) +#define NDSR_ECC_ERR BIT(19) +#define NDSR_ECC_ERROVER BIT(20) +#define NDSR_AXI_DSP_ERR BIT(21) +#define NDSR_AXI_RD_ERR BIT(22) +#define NDSR_AXI_WR_ERR BIT(23) +#define NDSR_RB_STATUS(x) (min_t(u32, x, 0xF) << 24) +#define NDSR_PROT_ERR BIT(28) +#define NDSR_ECC_BYPASS BIT(29) + +#define NDIR_MASK 0x80 +#define NDIR_BUSY_MASK BIT(0) +#define NDIR_DMA_BUSY_MASK BIT(1) +#define NDIR_DMA_PGFINISH_MASK BIT(2) +#define NDIR_DMA_FINISH_MASK BIT(3) +#define NDIR_FIFO_EMP_MASK BIT(4) +#define NDIR_FIFO_FULL_MASK BIT(5) +#define NDIR_FIFO_TIMEOUT_MASK BIT(6) +#define NDIR_CMD_FINISH_MASK BIT(7) +#define NDIR_PGFINISH_MASK BIT(8) +#define NDIR_RE_MASK BIT(9) +#define NDIR_DQS_MASK BIT(10) +#define NDIR_RB_MASK BIT(11) +#define NDIR_ECC_FINISH_MASK BIT(12) +#define NDIR_ECC_ERR_MASK BIT(13) + +#define NDIR 0x84 +#define NDIR_ALL_INT(x) GENMASK(x, 0) +#define NDIR_BUSY BIT(0) +#define NDIR_DMA_BUSY BIT(1) +#define NDIR_DMA_PGFINISH BIT(2) +#define NDIR_DMA_FINISH BIT(3) +#define NDIR_FIFO_EMP BIT(4) +#define NDIR_FIFO_FULL BIT(5) +#define NDIR_FIFO_TIMEOUT BIT(6) +#define NDIR_CMD_FINISH BIT(7) +#define NDIR_PGFINISH BIT(8) +#define NDIR_RE BIT(9) +#define NDIR_DQS BIT(10) +#define NDIR_RB BIT(11) +#define NDIR_ECC_FINISH BIT(12) +#define NDIR_ECC_ERR BIT(13) + +#define ND_DEBUG 0x88 + +#define ND_ERR_CLR 0x8c +#define ND_DSP_ERR_CLR BIT(0) +#define ND_AXI_RD_ERR_CLR BIT(1) +#define ND_AXI_WR_ERR_CLR BIT(2) +#define ND_ECC_ERR_CLR BIT(3) + +#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0) + +enum nand_inter_pro { + NAND_ONFI, + NAND_JEDEC, + NAND_OTHER, +}; + +enum nand_inter_mode { + ASYN_SDR, + ONFI_DDR, + TOG_ASYN_DDR, +}; + +enum asy_timing_mode { + ASY_MODE0, + ASY_MODE1, + ASY_MODE2, + ASY_MODE3, + ASY_MODE4, +}; + +enum onfi_syn_timing_mode { + SYN_MODE0 = 0x10, + SYN_MODE1, + SYN_MODE2, + SYN_MODE3, + SYN_MODE4, +}; + +/** + * NAND controller timings expressed in NAND Controller clock cycles + * + * @tRP: ND_nRE pulse width + * @tRH: ND_nRE high duration + * @tWP: ND_nWE pulse time + * @tWH: ND_nWE high duration + * @tCS: Enable signal setup time + * @tCH: Enable signal hold time + * @tADL: Address to write data delay + * @tAR: ND_ALE low to ND_nRE low delay + * @tWHR: ND_nWE high to ND_nRE low for status read + * @tRHW: ND_nRE high duration, read to write delay + * @tR: ND_nWE high to ND_nRE low for read + */ +struct phytium_nfc_timings { + u16 tRP; + u16 tRH; + u16 tWP; /* NDTR1_TWP */ + u16 tWH; /* NDTR1_TWH */ + u16 tCS; + u16 tCH; + u16 tADL; + u16 tAR; + u16 tWHR; + u16 tRHW; + u16 tR; +}; + +/** + * NAND chip structure: stores NAND chip device related information + * + * @chip: Base NAND chip structure + * @node: Used to store NAND chips into a list + * @ndcr: Controller register value for this NAND chip + * @ndtr0: Timing registers 0 value for this NAND chip + * @ndtr1: Timing registers 1 value for this NAND chip + * @selected_die: Current active CS + * @nsels: Number of CS lines required by the NAND chip + */ +struct phytium_nand_chip { + struct nand_chip chip; + struct list_head node; + u32 ndcr; + u32 ndtr0; + u32 ndtr1; + int addr_cyc; + int selected_die; + unsigned int nsels; + struct mtd_oob_region ecc; +}; + +/** + * NAND controller capabilities for distinction between compatible strings + * + * @max_cs_nb: Number of Chip Select lines available + * @max_rb_nb: Number of Ready/Busy lines available + * @legacy_of_bindings: Indicates if DT parsing must be done using the old + * fashion way + * @flash_bbt: + * @ecc_strength: + * @ecc_step_size: + * @parts: + * @nr_parts: + */ +struct phytium_nfc_caps { + unsigned int hw_ver; + unsigned int int_mask_bits; + unsigned int max_cs_nb; + unsigned int max_rb_nb; + bool legacy_of_bindings; + bool flash_bbt; + int ecc_strength; + int ecc_step_size; + struct mtd_partition *parts; + unsigned int nr_parts; +}; + +/** + * NAND controller structure: stores Marvell NAND controller information + * + * @controller: Base controller structure + * @dev: Parent device (used to print error messages) + * @regs: NAND controller registers + * @reg_clk: Regiters clock + * @complete: Completion object to wait for NAND controller events + * @chips: List containing all the NAND chips attached to + * this NAND controller + * @caps: NAND controller capabilities for each compatible string + * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only) + */ +struct phytium_nfc { + struct nand_controller controller; + struct device *dev; + void __iomem *regs; + int irq; + struct list_head chips; + struct nand_chip *selected_chip; + struct phytium_nfc_caps *caps; + + void *dsp_addr; + dma_addr_t dsp_phy_addr; + + void *dma_buf; + u32 dma_offset; + dma_addr_t dma_phy_addr; + + enum nand_inter_pro inter_pro; + enum nand_inter_mode inter_mode; + u32 timing_mode; + + spinlock_t spinlock; +}; + +/** + * Derives a duration in numbers of clock cycles. + * + * @ps: Duration in pico-seconds + * @period_ns: Clock period in nano-seconds + * + * Convert the duration in nano-seconds, then divide by the period and + * return the number of clock periods. + */ +#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns)) +#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \ + period_ns)) + +struct phytium_nfc_cmd_ctrl { + u16 csel:4; + u16 dbc:1; + u16 addr_cyc:3; + u16 nc:1; +#define TYPE_RESET 0x00 +#define TYPE_SET_FTR 0x01 +#define TYPE_GET_FTR 0x02 +#define TYPE_READ_ID 0x03 +#define TYPE_PAGE_PRO 0x04 +#define TYPE_ERASE 0x05 +#define TYPE_READ 0x06 +#define TYPE_TOGGLE 0x07 +#define TYPE_READ_PARAM 0x02 +#define TYPE_READ_STATUS 0x03 +#define TYPE_CH_READ_COL 0x03 +#define TYPE_CH_ROW_ADDR 0x01 +#define TYPE_CH_WR_COL 0x01 + u16 cmd_type:4; + u16 dc:1; + u16 auto_rs:1; + u16 ecc_en:1; +}; + +/** + * NAND driver structure filled during the parsing of the ->exec_op() subop + * subset of instructions. + * + * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle + * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin + * @rdy_delay_ns: Optional delay after waiting for the RB pin + * @data_delay_ns: Optional delay after the data xfer + * @data_instr_idx: Index of the data instruction in the subop + * @data_instr: Pointer to the data instruction in the subop + */ +struct phytium_nfc_op { + u8 cmd[2]; + union { + u16 ctrl; + struct phytium_nfc_cmd_ctrl nfc_ctrl; + } cmd_ctrl; + u8 addr[PHYTIUM_NFC_ADDR_MAX_LEN]; + u16 page_cnt; + u8 mem_addr_first[PHYTIUM_NFC_ADDR_MAX_LEN]; + + u32 cmd_len; + u32 addr_len; + + u32 cle_ale_delay_ns; + u32 rdy_timeout_ms; + u32 rdy_delay_ns; + u32 data_delay_ns; + u32 data_instr_idx; + struct nand_op_instr *data_instr; +} __packed; + +#define TIMING_ASY_NUM 12 +#define TIMING_SYN_NUM 14 +#define TIMING_TOG_NUM 12 + +#define TMP_DMA_DEBUG 0 /* Temporary dma space */ + +int phytium_nand_init(struct phytium_nfc *nfc); +int phytium_nand_remove(struct phytium_nfc *nfc); +int phytium_nand_prepare(struct phytium_nfc *nfc); +int phytium_nand_suspend(struct phytium_nfc *nfc); +int phytium_nand_resume(struct phytium_nfc *nfc); + +irqreturn_t phytium_nfc_isr(int irq, void *dev_id); + +#endif /* NAND_PHYTIUM_NAND_H */ diff --git a/drivers/mtd/nand/raw/phytium_nand_pci.c b/drivers/mtd/nand/raw/phytium_nand_pci.c new file mode 100644 index 0000000000000..03dab1faa5f33 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand_pci.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI driver for Phytium NAND flash controller + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ +#include +#include +#include + +#include "phytium_nand.h" + +#define DRV_NAME "phytium_nand_pci" + +static struct mtd_partition partition_info[] = { + { + .name = "Flash partition 1", + .offset = 0x0000000, + .size = 0x4000000 }, + { + .name = "Flash partition 2", + .offset = 0x4000000, + .size = 0x8000000 }, + { + .name = "Flash partition 3", + .offset = 0x8000000, + .size = 0x10000000 }, + { + .name = "Flash partition 4", + .offset = 0x10000000, + .size = 0x12000000 }, + { + .name = "Flash partition 5", + .offset = 0x12000000, + .size = 0x14000000 }, +}; + +static struct phytium_nfc_caps x100_nfc_caps = { + .hw_ver = 1, + .int_mask_bits = 13, + .max_cs_nb = 2, + .max_rb_nb = 1, + .legacy_of_bindings = true, + .ecc_strength = 4, + .ecc_step_size = 512, + .nr_parts = 5, + .parts = partition_info, +}; + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_nfc *nfc; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + return ret; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + nfc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_nfc), + GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = &pdev->dev; + nfc->regs = pcim_iomap_table(pdev)[0]; + nfc->irq = pdev->irq; + nfc->caps = &x100_nfc_caps; + + ret = devm_request_irq(nfc->dev, nfc->irq, phytium_nfc_isr, + IRQF_SHARED, "phytium-nfc-pci", nfc); + if (ret) { + dev_err(nfc->dev, "Failed to register NFC interrupt.\n"); + return ret; + } + + ret = phytium_nand_init(nfc); + if (ret) + return ret; + + pci_set_drvdata(pdev, nfc); + + return ret; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_nfc *nfc = pci_get_drvdata(pdev); + int ret; + + ret = phytium_nand_remove(nfc); + if (ret) + dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); +} + +static int __maybe_unused phytium_nfc_prepare(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + struct phytium_nfc *nfc = pci_get_drvdata(pci); + int ret; + + ret = phytium_nand_prepare(nfc); + + return 0; +} + +static int __maybe_unused phytium_nfc_resume(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + struct phytium_nfc *nfc = pci_get_drvdata(pci); + int ret; + + ret = phytium_nand_resume(nfc); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_dev_pm_ops = { + .prepare = phytium_nfc_prepare, + .resume = phytium_nfc_resume, +}; + +static const struct pci_device_id phytium_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc29) }, + { } +}; +MODULE_DEVICE_TABLE(pci, phytium_pci_id_table); + +static struct pci_driver phytium_pci_driver = { + .name = DRV_NAME, + .id_table = phytium_pci_id_table, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .driver = { + .pm = &phytium_pci_dev_pm_ops, + }, +}; +module_pci_driver(phytium_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PCI driver for Phytium NAND controller"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/nand/raw/phytium_nand_plat.c b/drivers/mtd/nand/raw/phytium_nand_plat.c new file mode 100644 index 0000000000000..3065cf7691909 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand_plat.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core driver for Phytium NAND flash controller + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_nand.h" + +#define DRV_NAME "phytium_nand_plat" + +static int phytium_nfc_plat_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *r; + struct phytium_nfc *nfc; + unsigned int ecc_strength; + unsigned int ecc_step_size; + int ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_nfc), + GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = dev; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nfc->regs = devm_ioremap_resource(dev, r); + if (IS_ERR(nfc->regs)) + return PTR_ERR(nfc->regs); + + dev_info(nfc->dev, "NFC register address :%p, phy address:%llx\n", + nfc->regs, r->start); + + nfc->irq = platform_get_irq(pdev, 0); + if (nfc->irq < 0) { + dev_err(dev, "failed to retrieve irq\n"); + return nfc->irq; + } + + ret = devm_request_irq(dev, nfc->irq, phytium_nfc_isr, 0, + "phytium-nfc-plat", nfc); + if (ret) { + dev_err(nfc->dev, "Failed to register NFC interrupt.\n"); + return ret; + } + + nfc->caps = devm_kzalloc(dev, sizeof(struct phytium_nfc_caps), GFP_KERNEL); + if (!nfc->caps) + return -ENOMEM; + + /* Currently hard-coded parameters */ + nfc->caps->hw_ver = 2; + nfc->caps->int_mask_bits = 17; + nfc->caps->max_cs_nb = 4; + nfc->caps->max_rb_nb = 4; + nfc->caps->nr_parts = 0; + nfc->caps->parts = NULL; + + device_property_read_u32(&pdev->dev, "nand-ecc-strength", &ecc_strength); + nfc->caps->ecc_strength = ecc_strength ? ecc_strength : 8; + + device_property_read_u32(&pdev->dev, "nand-ecc-step-size", &ecc_step_size); + nfc->caps->ecc_step_size = ecc_step_size ? ecc_step_size : 512; + + ret = phytium_nand_init(nfc); + if (ret) + return ret; + + platform_set_drvdata(pdev, nfc); + + return ret; +} + +static void phytium_nfc_plat_remove(struct platform_device *pdev) +{ + struct phytium_nfc *nfc = platform_get_drvdata(pdev); + phytium_nand_remove(nfc); +} + +static int __maybe_unused phytium_nfc_plat_prepare(struct device *dev) +{ + struct phytium_nfc *nfc = dev_get_drvdata(dev); + + return phytium_nand_prepare(nfc); +} + +static int __maybe_unused phytium_nfc_plat_resume(struct device *dev) +{ + struct phytium_nfc *nfc = dev_get_drvdata(dev); + int ret; + + ret = phytium_nand_resume(nfc); + + return ret; +} + +static const struct dev_pm_ops phytium_dev_pm_ops = { + .prepare = phytium_nfc_plat_prepare, + .resume = phytium_nfc_plat_resume, +}; + +#ifdef CONFIG_OF +static const struct of_device_id phytium_nfc_of_ids[] = { + { .compatible = "phytium,nfc", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_nfc_of_ids); +#endif + +static struct platform_driver phytium_nfc_plat_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = phytium_nfc_of_ids, + .pm = &phytium_dev_pm_ops, + }, + .probe = phytium_nfc_plat_probe, + .remove = phytium_nfc_plat_remove, +}; +module_platform_driver(phytium_nfc_plat_driver) + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium NAND controller Platform driver"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index d43d566946676..d61394aa4a01b 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -225,6 +225,7 @@ source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/peak_canfd/Kconfig" +source "drivers/net/can/phytium/Kconfig" source "drivers/net/can/rcar/Kconfig" source "drivers/net/can/rockchip/Kconfig" source "drivers/net/can/sja1000/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 56138d8ddfd23..07052edc22107 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -33,5 +33,5 @@ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o - +obj-$(CONFIG_CAN_PHYTIUM) += phytium/ subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG diff --git a/drivers/net/can/phytium/Kconfig b/drivers/net/can/phytium/Kconfig new file mode 100644 index 0000000000000..a23216d23c226 --- /dev/null +++ b/drivers/net/can/phytium/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig CAN_PHYTIUM + tristate "Phytium CAN support" + help + Say Y here if you want support for Phytium CAN controller framework. + This is common support for devices that embed the Phytium CAN IP. + + To compile this driver as a module, choose M here: the module will + be called phytium_can. + +if CAN_PHYTIUM + +config CAN_PHYTIUM_PLATFORM + tristate "Phytium CAN support for io-mapped devices" + depends on HAS_IOMEM + help + Say Y here is you want to support for IO Mapped Phytium CAN controller. + This support is for devices that have the Phytium CAN controller IP + embedded into the device and the IP is IO Mapped to the processor. + + To compile this driver as a module, choose M here: the module will + be called phytium_can_platform. + +config CAN_PHYTIUM_PCI + tristate "Phytium CAN support for PCI devices" + depends on PCI + help + Say Y here is you want to support for Phytium CAN controller connected + to the PCI bus. This support is for devices that have the Phytium CAN + controller IP embedded into a PCI device. + + To compile this driver as a module, choose M here: the module will + be called phytium_can_pci. +endif diff --git a/drivers/net/can/phytium/Makefile b/drivers/net/can/phytium/Makefile new file mode 100644 index 0000000000000..7ef554fca58e4 --- /dev/null +++ b/drivers/net/can/phytium/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Phytium CAN controller drivers. +# +# + +obj-$(CONFIG_CAN_PHYTIUM) += phytium_can.o +obj-$(CONFIG_CAN_PHYTIUM_PLATFORM) += phytium_can_platform.o +obj-$(CONFIG_CAN_PHYTIUM_PCI) += phytium_can_pci.o diff --git a/drivers/net/can/phytium/phytium_can.c b/drivers/net/can/phytium/phytium_can.c new file mode 100644 index 0000000000000..ad90e4fd1e263 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can.c @@ -0,0 +1,1222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* CAN bus driver for Phytium CAN controller + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include + +#include "phytium_can.h" + +/* register definition */ +enum phytium_can_reg { + CAN_CTRL = 0x00, /* Global control register */ + CAN_INTR = 0x04, /* Interrupt register */ + CAN_ARB_RATE_CTRL = 0x08, /* Arbitration rate control register */ + CAN_DAT_RATE_CTRL = 0x0c, /* Data rate control register */ + CAN_ACC_ID0 = 0x10, /* Acceptance identifier0 register */ + CAN_ACC_ID1 = 0x14, /* Acceptance identifier1 register */ + CAN_ACC_ID2 = 0x18, /* Acceptance identifier2 register */ + CAN_ACC_ID3 = 0x1c, /* Acceptance identifier3 register */ + CAN_ACC_ID0_MASK = 0x20, /* Acceptance identifier0 mask register */ + CAN_ACC_ID1_MASK = 0x24, /* Acceptance identifier1 mask register */ + CAN_ACC_ID2_MASK = 0x28, /* Acceptance identifier2 mask register */ + CAN_ACC_ID3_MASK = 0x2c, /* Acceptance identifier3 mask register */ + CAN_XFER_STS = 0x30, /* Transfer status register */ + CAN_PHYTIUM_ERR_CNT = 0x34, /* Error counter register */ + CAN_FIFO_CNT = 0x38, /* FIFO counter register */ + CAN_DMA_CTRL = 0x3c, /* DMA request control register */ + CAN_XFER_EN = 0x40, /* Transfer enable register */ + CAN_INTR1 = 0x44, /* Interrupt register 1 */ + CAN_FRM_INFO = 0x48, /* Frame valid number register */ + CAN_TIME_OUT = 0x4c, /* Timeout register */ + CAN_TIME_OUT_CNT = 0x50, /* Timeout counter register */ + CAN_INTR2 = 0x54, /* Interrupt register 2 */ + CAN_TX_FIFO = 0x100, /* TX FIFO shadow register */ + CAN_RX_FIFO = 0x200, /* RX FIFO shadow register */ + CAN_RX_INFO_FIFO = 0x300, /* RX information FIFO shadow register */ + CAN_PIDR4 = 0xfd0, /* Peripheral Identification Register 4 */ + CAN_PIDR0 = 0xfe0, /* Peripheral Identification Register 0 */ + CAN_PIDR1 = 0xfe4, /* Peripheral Identification Register 1 */ + CAN_PIDR2 = 0xfe8, /* Peripheral Identification Register 2 */ + CAN_PIDR3 = 0xfec, /* Peripheral Identification Register 3 */ + CAN_CIDR0 = 0xff0, /* Component Identification Register 0 */ + CAN_CIDR1 = 0xff4, /* Component Identification Register 1 */ + CAN_CIDR2 = 0xff8, /* Component Identification Register 2 */ + CAN_CIDR3 = 0xffc, /* Component Identification Register 3 */ +}; + +/* Global control register (CTRL) */ +#define CTRL_XFER BIT(0) /* Transfer enable */ +#define CTRL_TXREQ BIT(1) /* Transmit request */ +#define CTRL_AIME BIT(2) /* Acceptance identifier mask enable */ +#define CTRL_TTS BIT(3) /* Transmit trigger strategy */ +#define CTRL_RST BIT(7) /* Write 1 to soft reset and self clear */ +#define CTRL_RFEIDF BIT(8) /* Allow RX frame end interrupt during ID filtered frame */ +#define CTRL_RFEDT BIT(9) /* Allow RX frame end interrupt during TX frame */ +#define CTRL_IOF BIT(10) /* Ignore overload flag internally */ +#define CTRL_FDCRC BIT(11) /* CANFD CRC mode */ + +/* Interrupt register (INTR) */ +#define INTR_BOIS BIT(0) /* Bus off interrupt status */ +#define INTR_PWIS BIT(1) /* Passive warning interrupt status */ +#define INTR_PEIS BIT(2) /* Passive error interrupt status */ +#define INTR_RFIS BIT(3) /* RX FIFO full interrupt status */ +#define INTR_TFIS BIT(4) /* TX FIFO empty interrupt status */ +#define INTR_REIS BIT(5) /* RX frame end interrupt status */ +#define INTR_TEIS BIT(6) /* TX frame end interrupt status */ +#define INTR_EIS BIT(7) /* Error interrupt status */ +#define INTR_BOIE BIT(8) /* Bus off interrupt enable */ +#define INTR_PWIE BIT(9) /* Passive warning interrupt enable */ +#define INTR_PEIE BIT(10) /* Passive error interrupt enable */ +#define INTR_RFIE BIT(11) /* RX FIFO full interrupt enable */ +#define INTR_TFIE BIT(12) /* TX FIFO empty interrupt enable */ +#define INTR_REIE BIT(13) /* RX frame end interrupt enable */ +#define INTR_TEIE BIT(14) /* TX frame end interrupt enable */ +#define INTR_EIE BIT(15) /* Error interrupt enable */ +#define INTR_BOIC BIT(16) /* Bus off interrupt clear */ +#define INTR_PWIC BIT(17) /* Passive warning interrupt clear */ +#define INTR_PEIC BIT(18) /* Passive error interrupt clear */ +#define INTR_RFIC BIT(19) /* RX FIFO full interrupt clear */ +#define INTR_TFIC BIT(20) /* TX FIFO empty interrupt clear */ +#define INTR_REIC BIT(21) /* RX frame end interrupt clear */ +#define INTR_TEIC BIT(22) /* TX frame end interrupt clear */ +#define INTR_EIC BIT(23) /* Error interrupt clear */ + +#define INTR_STATUS_MASK (INTR_BOIS | INTR_PWIS | INTR_PEIS | INTR_RFIS | \ + INTR_TFIS | INTR_REIS | INTR_TEIS | INTR_EIS) + +#define INTR_EN_MASK (INTR_BOIE | INTR_PWIE | INTR_PEIE | INTR_RFIE | \ + INTR_TFIE | INTR_REIE | INTR_TEIE | INTR_EIE) + +#define INTR_CLEAR_MASK (INTR_BOIC | INTR_PWIC | INTR_PEIC | INTR_RFIC | \ + INTR_TFIC | INTR_REIC | INTR_TEIC | INTR_EIC) + +/* Arbitration rate control register (ARB_RATE_CTRL) */ +#define ARB_RATE_CTRL_ARJW GENMASK(1, 0) /* Arbitration field resync jump width */ +#define ARB_RATE_CTRL_APRS GENMASK(4, 2) /* Arbitration field propagation segment */ +#define ARB_RATE_CTRL_APH1S GENMASK(7, 5) /* Arbitration field phase1 segment */ +#define ARB_RATE_CTRL_APH2S GENMASK(10, 8) /* Arbitration field phase2 segment */ +#define ARB_RATE_CTRL_APD GENMASK(28, 16) /* Arbitration field prescaler divider */ + +/* Data rate control register (DAT_RATE_CTRL) */ +#define DAT_RATE_CTRL_DRJW GENMASK(1, 0) /* Data field resync jump width */ +#define DAT_RATE_CTRL_DPRS GENMASK(4, 2) /* Data field propagation segment */ +#define DAT_RATE_CTRL_DPH1S GENMASK(7, 5) /* Data field phase1 segment */ +#define DAT_RATE_CTRL_DPH2S GENMASK(10, 8) /* Data field phase2 segment */ +#define DAT_RATE_CTRL_DPD GENMASK(28, 16) /* Data field prescaler divider */ + +/* Acceptance identifierX register (ACC_IDX) */ +#define ACC_IDX_AID_MASK GENMASK(28, 0) /* Acceptance identifier */ + +/* Acceptance identifier0 mask register (ACC_ID0_MASK) */ +#define ACC_IDX_MASK_AID_MASK GENMASK(28, 0) /* Acceptance identifier mask */ + +/* Transfer status register (XFER_STS) */ +#define XFER_STS_FRAS GENMASK(2, 0) /* Frame status */ +#define XFER_STS_FIES GENMASK(7, 3) /* Field status */ +#define XFER_STS_FIES_IDLE (0x0) /* idle */ +#define XFER_STS_FIES_ARBITRATION (0x1) /* arbitration */ +#define XFER_STS_FIES_TX_CTRL (0x2) /* transmit control */ +#define XFER_STS_FIES_TX_DATA (0x3) /* transmit data */ +#define XFER_STS_FIES_TX_CRC (0x4) /* transmit crc */ +#define XFER_STS_FIES_TX_FRM (0x5) /* transmit frame */ +#define XFER_STS_FIES_RX_CTRL (0x6) /* receive control */ +#define XFER_STS_FIES_RX_DATA (0x7) /* receive data */ +#define XFER_STS_FIES_RX_CRC (0x8) /* receive crc */ +#define XFER_STS_FIES_RX_FRM (0x9) /* receive frame */ +#define XFER_STS_FIES_INTERMISSION (0xa) /* intermission */ +#define XFER_STS_FIES_TX_SUSPD (0xb) /* transmit suspend */ +#define XFER_STS_FIES_BUS_IDLE (0xc) /* bus idle */ +#define XFER_STS_FIES_OVL_FLAG (0xd) /* overload flag */ +#define XFER_STS_FIES_OVL_DLM (0xe) /* overload delimiter */ +#define XFER_STS_FIES_ERR_FLAG (0xf) /* error flag */ +#define XFER_STS_FIES_ERR_DLM (0x10) /* error delimiter */ +#define XFER_STS_FIES_BUS_OFF (0x11) /* bus off */ +#define XFER_STS_TS BIT(8) /* Transmit status */ +#define XFER_STS_RS BIT(9) /* Receive status */ +#define XFER_STS_XFERS BIT(10) /* Transfer status */ + +/* Error counter register (ERR_CNT) */ +#define ERR_CNT_REC GENMASK(8, 0) /* Receive error counter */ +#define ERR_CNT_TEC GENMASK(24, 16) /* Transmit error counter */ + +/* FIFO counter register (FIFO_CNT) */ +#define FIFO_CNT_RFN GENMASK(6, 0) /* Receive FIFO valid data number */ +#define FIFO_CNT_TFN GENMASK(22, 16) /* Transmit FIFO valid data number */ + +/* DMA request control register (DMA_CTRL) */ +#define DMA_CTRL_RFTH GENMASK(5, 0) /* Receive FIFO DMA request threshold */ +#define DMA_CTRL_RFRE BIT(6) /* Receive FIFO DMA request enable */ +#define DMA_CTRL_TFTH GENMASK(21, 16) /* Transmit FIFO DMA request threshold */ +#define DMA_CTRL_TFRE BIT(22) /* Transmit FIFO DMA request enable */ + +/* Transfer enable register (XFER_EN) */ +#define XFER_EN_XFER BIT(0) /* Transfer enable */ + +/* Interrupt register 1 (INTR1) */ +#define INTR1_RF1IS BIT(0) /* RX FIFO 1/4 interrupt status */ +#define INTR1_RF2IS BIT(1) /* RX FIFO 1/2 interrupt status */ +#define INTR1_RF3IS BIT(2) /* RX FIFO 3/4 interrupt status */ +#define INTR1_RF4IS BIT(3) /* RX FIFO full interrupt status */ +#define INTR1_TF1IS BIT(4) /* TX FIFO 1/4 interrupt status */ +#define INTR1_TF2IS BIT(5) /* TX FIFO 1/2 interrupt status */ +#define INTR1_TF3IS BIT(6) /* TX FIFO 3/4 interrupt status */ +#define INTR1_TF4IS BIT(7) /* TX FIFO empty interrupt status */ +#define INTR1_RF1IE BIT(8) /* RX FIFO 1/4 interrupt enable */ +#define INTR1_RF2IE BIT(9) /* RX FIFO 1/2 interrupt enable */ +#define INTR1_RF3IE BIT(10) /* RX FIFO 3/4 interrupt enable */ +#define INTR1_RF4IE BIT(11) /* RX FIFO full interrupt enable */ +#define INTR1_TF1IE BIT(12) /* TX FIFO 1/4 interrupt enable */ +#define INTR1_TF2IE BIT(13) /* TX FIFO 1/2 interrupt enable */ +#define INTR1_TF3IE BIT(14) /* TX FIFO 3/4 interrupt enable */ +#define INTR1_TF4IE BIT(15) /* TX FIFO empty interrupt enable */ +#define INTR1_RF1IC BIT(16) /* RX FIFO 1/4 interrupt clear */ +#define INTR1_RF2IC BIT(17) /* RX FIFO 1/2 interrupt clear */ +#define INTR1_RF3IC BIT(18) /* RX FIFO 3/4 interrupt clear */ +#define INTR1_RF4IC BIT(19) /* RX FIFO full interrupt clear */ +#define INTR1_TF1IC BIT(20) /* TX FIFO 1/4 interrupt clear */ +#define INTR1_TF2IC BIT(21) /* TX FIFO 1/2 interrupt clear */ +#define INTR1_TF3IC BIT(22) /* TX FIFO 3/4 interrupt clear */ +#define INTR1_TF4IC BIT(23) /* TX FIFO empty interrupt clear */ +#define INTR1_RF1RIS BIT(24) /* RX FIFO 1/4 raw interrupt status */ +#define INTR1_RF2RIS BIT(25) /* RX FIFO 1/2 raw interrupt status */ +#define INTR1_RF3RIS BIT(26) /* RX FIFO 3/4 raw interrupt status */ +#define INTR1_RF4RIS BIT(27) /* RX FIFO full raw interrupt status */ +#define INTR1_TF1RIS BIT(28) /* TX FIFO 1/4 raw interrupt status */ +#define INTR1_TF2RIS BIT(29) /* TX FIFO 1/2 raw interrupt status */ +#define INTR1_TF3RIS BIT(30) /* TX FIFO 3/4 raw interrupt status */ +#define INTR1_TF4RIS BIT(31) /* TX FIFO empty raw interrupt status */ + +/* Frame valid number register (FRM_INFO) */ +#define FRM_INFO_RXFC GENMASK(5, 0) /* Valid frame number in RX FIFO */ +#define FRM_INFO_SSPD GENMASK(31, 16) /* Secondary sample point delay */ + +/* Interrupt register 2 (INTR2) */ +#define INTR2_TOIS BIT(0) /* RX FIFO time out interrupt status */ +#define INTR2_TOIM BIT(8) /* RX FIFO time out interrupt mask */ +#define INTR2_TOIC BIT(16) /* RX FIFO time out interrupt clear */ +#define INTR2_TORIS BIT(24) /* RX FIFO time out raw interrupt status */ + +/* RX information FIFO shadow register (RX_INFO_FIFO) */ +#define RX_INFO_FIFO_WNORF GENMASK(4, 0) /* Word (4-byte) number of current receive frame */ +#define RX_INFO_FIFO_RORF BIT(5) /* RTR value of current receive frame */ +#define RX_INFO_FIFO_FORF BIT(6) /* FDF value of current receive frame */ +#define RX_INFO_FIFO_IORF BIT(7) /* IDE value of current receive frame */ + +/* Arbitration Bits */ +#define CAN_ID1_MASK GENMASK(31, 21) /* Base identifer */ +/* Standard Remote Transmission Request */ +#define CAN_ID1_RTR_MASK BIT(20) +/* Extended Substitute remote TXreq */ +#define CAN_ID2_SRR_MASK BIT(20) +#define CAN_IDE_MASK BIT(19) /* IDentifier extension flag */ +#define CAN_ID2_MASK GENMASK(18, 1) /* Identifier extension */ +/* Extended frames remote TX request */ +#define CAN_ID2_RTR_MASK BIT(0) +#define CAN_ID1_FDF_MASK BIT(18) +#define CAN_ID1_DLC_MASK GENMASK(17, 14) +#define CANFD_ID1_BRS_MASK BIT(16) +#define CANFD_ID1_ESI_MASK BIT(15) +#define CANFD_ID1_DLC_MASK GENMASK(14, 11) + +#define CAN_ID2_FDF_MASK BIT(31) +#define CAN_ID2_DLC_MASK GENMASK(29, 26) +#define CANFD_ID2_BRS_MASK BIT(29) +#define CANFD_ID2_ESI_MASK BIT(28) +#define CANFD_ID2_DLC_MASK GENMASK(27, 24) + +#define CAN_ID1_DLC_OFF 14 +#define CANFD_ID1_DLC_OFF 11 +#define CAN_ID2_DLC_OFF 26 +#define CANFD_ID2_DLC_OFF 24 + +#define CAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ +#define CAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ +#define CAN_IDR_SDLC_SHIFT 14 +#define CAN_IDR_EDLC_SHIFT 26 + +/* CANFD Standard msg padding 1 */ +#define CANFD_IDR_PAD_MASK 0x000007FF +#define CAN_IDR_PAD_MASK 0x00003FFF /* Standard msg padding 1 */ + +/** + * phytium_can_set_reg_bits - set a bit value to the device register + * @cdev: Driver private data structure + * @reg: Register offset + * @bs: The bit mask + * + * Read data from the particular CAN register + * Return: value read from the CAN register + */ +static void +phytium_can_set_reg_bits(const struct phytium_can_dev *cdev, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(cdev->base + reg); + + val |= bs; + writel(val, cdev->base + reg); +} + +/** + * phytium_can_clr_reg_bits - clear a bit value to the device register + * @cdev: Driver private data structure + * @reg: Register offset + * @bs: The bit mask + * + * Read data from the particular CAN register + * Return: value read from the CAN register + */ +static void +phytium_can_clr_reg_bits(const struct phytium_can_dev *cdev, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(cdev->base + reg); + + val &= ~bs; + writel(val, cdev->base + reg); +} + +static inline u32 phytium_can_read(const struct phytium_can_dev *cdev, enum phytium_can_reg reg) +{ + return readl(cdev->base + reg); +} + +static inline void +phytium_can_write(const struct phytium_can_dev *cdev, enum phytium_can_reg reg, u32 val) +{ + writel(val, cdev->base + reg); +} + +static inline void phytium_can_enable_all_interrupts(struct phytium_can_dev *cdev) +{ + phytium_can_write(cdev, CAN_INTR, INTR_EN_MASK); +} + +static inline void phytium_can_disable_all_interrupt(struct phytium_can_dev *cdev) +{ + phytium_can_write(cdev, CAN_INTR, 0x0); +} + +static int phytium_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + bec->rxerr = phytium_can_read(cdev, CAN_PHYTIUM_ERR_CNT) & ERR_CNT_REC; + bec->txerr = (phytium_can_read(cdev, CAN_PHYTIUM_ERR_CNT) & ERR_CNT_TEC) >> 16; + + return 0; +} + +static int phytium_can_read_fifo(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct phytium_can_dev *cdev = netdev_priv(dev); + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, dlc, i; + + /* Read the frame header from FIFO */ + id = phytium_can_read(cdev, CAN_RX_FIFO); + id = be32_to_cpup(&id); + if (id & CAN_IDE_MASK) { + /* Received an extended frame */ + dlc = phytium_can_read(cdev, CAN_RX_FIFO); + dlc = be32_to_cpup(&dlc); + if (dlc & CAN_ID2_FDF_MASK) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + if (dlc & CAN_ID2_FDF_MASK) { + /* CAN FD extended frame */ + if (dlc & CANFD_ID2_BRS_MASK) + cf->flags |= CANFD_BRS; + if (dlc & CANFD_ID2_ESI_MASK) + cf->flags |= CANFD_ESI; + cf->len = can_fd_dlc2len((dlc & CANFD_ID2_DLC_MASK) >> CANFD_ID2_DLC_OFF); + } else { + /* CAN extended frame */ + cf->len = can_cc_dlc2len((dlc & CAN_ID2_DLC_MASK) >> CAN_ID2_DLC_OFF); + } + + cf->can_id = (id & CAN_ID1_MASK) >> 3; + cf->can_id |= (id & CAN_ID2_MASK) >> 1; + cf->can_id |= CAN_EFF_FLAG; + + if (id & CAN_ID2_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + /* Received a standard frame */ + if (id & CAN_ID1_FDF_MASK) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + if (id & CAN_ID1_FDF_MASK) { + /* CAN FD extended frame */ + if (id & CANFD_ID1_BRS_MASK) + cf->flags |= CANFD_BRS; + if (id & CANFD_ID1_ESI_MASK) + cf->flags |= CANFD_ESI; + cf->len = can_fd_dlc2len((id & CANFD_ID1_DLC_MASK) >> CANFD_ID1_DLC_OFF); + } else { + /* CAN extended frame */ + cf->len = can_cc_dlc2len((id & CAN_ID1_DLC_MASK) >> CAN_ID1_DLC_OFF); + } + + cf->can_id = (id & CAN_ID1_MASK) >> 21; + + if (id & CAN_ID1_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } + + if (!(cf->can_id & CAN_RTR_FLAG)) + /* Receive data frames */ + for (i = 0; i < cf->len; i += 4) + *(__be32 *)(cf->data + i) = phytium_can_read(cdev, CAN_RX_FIFO); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + netif_receive_skb(skb); + + return 1; +} + +static int phytium_can_do_rx_poll(struct net_device *dev, int quota) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 rxfs, pkts = 0; + int isr; + + isr = cdev->isr; + + rxfs = phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_RFN; + if (!rxfs) { + netdev_dbg(dev, "no messages in RX FIFO\n"); + return 0; + } + + while ((rxfs != 0) && (quota > 0)) { + if (isr & INTR_REIS) { + pkts += phytium_can_read_fifo(dev); + quota--; + } else { + break; + } + rxfs = phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_RFN; + netdev_dbg(dev, "Next received %d frame again.\n", rxfs); + } + + return pkts; +} + +static int phytium_can_rx_handler(struct net_device *dev, int quota) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + int work_done = 0; + u32 isr; + + isr = cdev->isr | phytium_can_read(cdev, CAN_INTR); + if (!isr) + goto end; + + /* Handle RX IRQ */ + if (isr & INTR_REIS) { + int rx_work_or_err; + + rx_work_or_err = phytium_can_do_rx_poll(dev, (quota - work_done)); + if (rx_work_or_err < 0) + return rx_work_or_err; + + work_done += rx_work_or_err; + } + +end: + return 0; +} + +static int phytium_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct phytium_can_dev *cdev = netdev_priv(dev); + int work_done; + unsigned long flags; + + netdev_dbg(dev, "The receive processing is going on !\n"); + + work_done = phytium_can_rx_handler(dev, quota); + + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure) + */ + if (work_done >= 0 && work_done < quota) { + napi_complete_done(napi, work_done); + + spin_lock_irqsave(&cdev->lock, flags); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_REIE); + spin_unlock_irqrestore(&cdev->lock, flags); + } + + return work_done; +} + +static void phytium_can_write_frame(struct phytium_can_dev *cdev) +{ + struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct net_device *dev = cdev->net; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb = cdev->tx_skb; + u32 i, id, dlc = 0, frame_head[2] = {0, 0}; + u32 data_len, tmp_len; + + data_len = can_fd_len2dlc(cf->len); + cdev->tx_skb = NULL; + + /* Watch carefully on the bit sequence */ + if (cf->can_id & CAN_EFF_FLAG) { + /* Extended CAN ID format */ + id = ((cf->can_id & CAN_EFF_MASK) << 1) & CAN_ID2_MASK; + id |= (((cf->can_id & CAN_EFF_MASK) >> + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << + CAN_IDR_ID1_SHIFT) & CAN_ID1_MASK; + + /* The substibute remote TX request bit should be "1" + * for extended frames as in the Phytium CAN datasheet + */ + id |= CAN_IDE_MASK | CAN_ID2_SRR_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Extended frames remote TX request */ + id |= CAN_ID2_RTR_MASK; + if ((cdev->can.ctrlmode & CAN_CTRLMODE_FD) && + can_is_canfd_skb(skb)) + dlc = data_len << CANFD_ID2_DLC_OFF; + else + dlc = data_len << CAN_ID2_DLC_OFF; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + dlc |= CAN_ID2_FDF_MASK; + if (cf->flags & CANFD_BRS) + dlc |= CANFD_ID2_BRS_MASK; + if (cf->flags & CANFD_ESI) + dlc |= CANFD_ID2_ESI_MASK; + } + + frame_head[0] = cpu_to_be32p(&id); + frame_head[1] = cpu_to_be32p(&dlc); + + /* Write the Frame to Phytium CAN TX FIFO */ + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[0]); + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[1]); + netdev_dbg(dev, "Write atbitration field [0]:0x%x [1]:0x%x\n", + frame_head[0], frame_head[1]); + } else { + /* Standard CAN ID format */ + id = ((cf->can_id & CAN_SFF_MASK) << CAN_IDR_ID1_SHIFT) + & CAN_ID1_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Standard frames remote TX request */ + id |= CAN_ID1_RTR_MASK; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + dlc = (data_len << CANFD_ID1_DLC_OFF) + | CANFD_IDR_PAD_MASK; + else + dlc = (data_len << CAN_ID1_DLC_OFF) | CAN_IDR_PAD_MASK; + + id |= dlc; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + id |= CAN_ID1_FDF_MASK; + if (cf->flags & CANFD_BRS) + id |= CANFD_ID1_BRS_MASK; + if (cf->flags & CANFD_ESI) + id |= CANFD_ID1_ESI_MASK; + } + + frame_head[0] = cpu_to_be32p(&id); + /* Write the Frame to Phytium CAN TX FIFO */ + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[0]); + netdev_dbg(dev, "Write atbitration field [0] 0x%x\n", + frame_head[0]); + } + + if (!(cf->can_id & CAN_RTR_FLAG)) { + netdev_dbg(dev, "Write CAN data frame\n"); + for (i = 0; i < cf->len; i += 4) { + phytium_can_write(cdev, CAN_TX_FIFO, + *(__be32 *)(cf->data + i)); + netdev_dbg(dev, "[%d]:%x\n", i, + *(__be32 *)(cf->data + i)); + } + } + + stats->tx_bytes += cf->len; + stats->tx_packets++; + + cdev->is_tx_done = false; + cdev->is_need_stop_xmit = true; + mod_timer(&cdev->timer, jiffies + HZ / 10); + + netdev_dbg(dev, "Trigger send message!\n"); + can_put_echo_skb(skb, dev, 0, 0); + tmp_len = can_get_echo_skb(dev, 0, 0); +} + +static netdev_tx_t phytium_can_tx_handler(struct phytium_can_dev *cdev) +{ + struct net_device *dev = cdev->net; + u32 tx_fifo_used; + unsigned long flags; + + phytium_can_write_frame(cdev); + /* Check if the TX buffer is full */ + tx_fifo_used = 4 * ((phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_TFN) >> 16); + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + if (CAN_FIFO_BYTE_LEN - tx_fifo_used <= KEEP_CANFD_FIFO_MIN_LEN) { + netif_stop_queue(dev); + spin_lock_irqsave(&cdev->lock, flags); + cdev->is_stop_queue_flag = STOP_QUEUE_TRUE; + spin_unlock_irqrestore(&cdev->lock, flags); + } + } else { + if (CAN_FIFO_BYTE_LEN - tx_fifo_used <= KEEP_CAN_FIFO_MIN_LEN) { + netif_stop_queue(dev); + spin_lock_irqsave(&cdev->lock, flags); + cdev->is_stop_queue_flag = STOP_QUEUE_TRUE; + spin_unlock_irqrestore(&cdev->lock, flags); + } + } + + return NETDEV_TX_OK; +} + +/** + * phytium_can_tx_interrupt - Tx Done Isr + * @ndev: net_device pointer + * @isr: Interrupt status register value + */ +static void phytium_can_tx_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_dev *cdev = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + u32 tx_fifo_used = 0; + + if (isr & INTR_TEIS) + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + + /* Check if the TX buffer is full */ + if (cdev->is_stop_queue_flag) { + tx_fifo_used = 4 * ((phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_TFN) >> 16); + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + if (CAN_FIFO_BYTE_LEN - tx_fifo_used > KEEP_CANFD_FIFO_MIN_LEN) { + netif_wake_queue(ndev); + cdev->is_stop_queue_flag = STOP_QUEUE_FALSE; + } + } else { + if (CAN_FIFO_BYTE_LEN - tx_fifo_used > KEEP_CAN_FIFO_MIN_LEN) { + netif_wake_queue(ndev); + cdev->is_stop_queue_flag = STOP_QUEUE_FALSE; + } + } + } + + cdev->is_tx_done = true; + cdev->is_need_stop_xmit = false; + timer_delete(&cdev->timer); + + netdev_dbg(ndev, "Finish transform packets %lu\n", stats->tx_packets); + + phytium_can_set_reg_bits(cdev, CAN_INTR, (INTR_BOIE | + INTR_PWIE | INTR_PEIE)); +} + +static void phytium_can_tx_done_timeout(struct timer_list *t) +{ + struct phytium_can_dev *priv = timer_container_of(priv, t, timer); + struct net_device *ndev = priv->net; + + if (!priv->is_tx_done) { + if (priv->is_need_stop_xmit) { + netdev_dbg(ndev, "%s stop xmit\n", __func__); + priv->is_need_stop_xmit = false; + phytium_can_clr_reg_bits(priv, CAN_CTRL, CTRL_XFER); + phytium_can_clr_reg_bits(priv, CAN_INTR, (INTR_BOIE | + INTR_PWIE | INTR_PEIE)); + /* stop xmit and restart after 500ms */ + mod_timer(&priv->timer, jiffies + HZ / 2); + } else { + netdev_dbg(ndev, "%s start xmit\n", __func__); + priv->is_need_stop_xmit = true; + phytium_can_set_reg_bits(priv, CAN_CTRL, CTRL_XFER); + /* start xmit and stop after 250ms */ + mod_timer(&priv->timer, jiffies + HZ / 4); + } + } +} + +static void phytium_can_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_dev *cdev = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 txerr = 0, rxerr = 0; + + skb = alloc_can_err_skb(ndev, &cf); + + rxerr = phytium_can_read(cdev, CAN_PHYTIUM_ERR_CNT) & ERR_CNT_REC; + txerr = ((phytium_can_read(cdev, CAN_PHYTIUM_ERR_CNT) & ERR_CNT_TEC) >> 16); + + if (isr & INTR_BOIS) { + netdev_dbg(ndev, "bus_off %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_BUS_OFF; + cdev->can.can_stats.bus_off++; + /* Leave device in Config Mode in bus-off state */ + phytium_can_write(cdev, CAN_CTRL, CTRL_RST); + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + } else if ((isr & INTR_PEIS) == INTR_PEIS) { + netdev_dbg(ndev, "error_passive %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_ERROR_PASSIVE; + cdev->can.can_stats.error_passive++; + /* Clear interrupt condition */ + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PWIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIC); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } else if (isr & INTR_PWIS) { + netdev_dbg(ndev, "error_warning %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_ERROR_WARNING; + cdev->can.can_stats.error_warning++; + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PWIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIC); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } + + /* Check for RX FIFO Overflow interrupt */ + if (isr & INTR_RFIS) { + stats->rx_over_errors++; + stats->rx_errors++; + + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/** + * phytium_can_isr - CAN Isr + * @irq: irq number + * @dev_id: device id poniter + * + * This is the phytium CAN Isr. It checks for the type of interrupt + * and invokes the corresponding ISR. + * + * Return: + * * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise + */ +static irqreturn_t phytium_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 isr; + + /* Get the interrupt status */ + isr = phytium_can_read(cdev, CAN_INTR) & INTR_STATUS_MASK; + if (!isr) + return IRQ_NONE; + + spin_lock(&cdev->lock); + /* Check for FIFO full interrupt and alarm */ + if ((isr & INTR_RFIS)) { + netdev_dbg(dev, "rx_fifo is full!.\n"); + isr &= (~INTR_RFIS); + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_RFIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_RFIC); + napi_schedule(&cdev->napi); + } + + /* Check for FIFO empty interrupt and alarm */ + if ((isr & INTR_TFIS)) { + netdev_dbg(dev, "tx_fifo is empty!.\n"); + isr &= (~INTR_TFIS); + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_TFIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TFIC); + } + + /* Check for the type of error interrupt and Processing it */ + if (isr & (INTR_EIS | INTR_RFIS | INTR_BOIS | INTR_PWIS | INTR_PEIS)) { + phytium_can_clr_reg_bits(cdev, CAN_INTR, (INTR_EIE | INTR_RFIE | + INTR_BOIE | INTR_PWIE | INTR_PEIE)); + phytium_can_err_interrupt(dev, isr); + phytium_can_set_reg_bits(cdev, CAN_INTR, (INTR_EIC | INTR_RFIC | + INTR_BOIC | INTR_PWIC | INTR_PEIC)); + spin_unlock(&cdev->lock); + return IRQ_HANDLED; + } + + /* Check for Tx interrupt and Processing it */ + if ((isr & INTR_TEIS)) { + isr &= (~INTR_REIS); + phytium_can_tx_interrupt(dev, isr); + } + + /* Check for the type of receive interrupt and Processing it */ + if (isr & (INTR_REIS)) { + cdev->isr = (isr & INTR_REIS); + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_REIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_REIC); + napi_schedule(&cdev->napi); + } + spin_unlock(&cdev->lock); + return IRQ_HANDLED; +} + +/** + * phytium_can_set_bittiming - CAN set bit timing routine + * @dev: Pointer to net_device structure + * + * This is the driver set bittiming routine. + * Return: 0 on success and failure value on error + */ +static int phytium_can_set_bittiming(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming; + u32 btr, dbtr; + u32 is_config_mode; + + /** + * Check whether Phytium CAN is in configuration mode. + * It cannot set bit timing if Phytium CAN is not in configuration mode. + */ + is_config_mode = phytium_can_read(cdev, CAN_CTRL) & CTRL_XFER; + if (is_config_mode) { + netdev_alert(dev, "BUG! Cannot set bittiming - CAN is not in config mode\n"); + return -EPERM; + } + + /* Setting Baud Rate prescalar value in BRPR Register */ + btr = (bt->brp - 1) << 16; + + /* Setting Time Segment 1 in BTR Register */ + btr |= (bt->prop_seg - 1) << 2; + + btr |= (bt->phase_seg1 - 1) << 5; + + /* Setting Time Segment 2 in BTR Register */ + btr |= (bt->phase_seg2 - 1) << 8; + + /* Setting Synchronous jump width in BTR Register */ + btr |= (bt->sjw - 1); + + dbtr = (dbt->brp - 1) << 16; + dbtr |= (dbt->prop_seg - 1) << 2; + dbtr |= (dbt->phase_seg1 - 1) << 5; + dbtr |= (dbt->phase_seg2 - 1) << 8; + dbtr |= (dbt->sjw - 1); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + phytium_can_write(cdev, CAN_ARB_RATE_CTRL, btr); + phytium_can_write(cdev, CAN_DAT_RATE_CTRL, dbtr); + } else { + phytium_can_write(cdev, CAN_ARB_RATE_CTRL, btr); + phytium_can_write(cdev, CAN_DAT_RATE_CTRL, btr); + } + + netdev_dbg(dev, "DAT=0x%08x, ARB=0x%08x\n", + phytium_can_read(cdev, CAN_DAT_RATE_CTRL), + phytium_can_read(cdev, CAN_ARB_RATE_CTRL)); + + return 0; +} + +/** + * phytium_can_start - This the drivers start routine + * @dev: Pointer to net_device structure + * + * This is the drivers start routine. + * Based on the State of the CAN device it puts + * the CAN device into a proper mode. + * + * Return: 0 on success and failure value on error + */ +static void phytium_can_start(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 ctrl; + + /* Disable transfer */ + ctrl = phytium_can_read(cdev, CAN_CTRL); + ctrl &= ~CTRL_XFER; + phytium_can_write(cdev, CAN_CTRL, ctrl); + + /* XXX: If CANFD, reset the controller */ + phytium_can_write(cdev, CAN_CTRL, (ctrl | CTRL_RST)); + + /* Bittiming setup */ + phytium_can_set_bittiming(dev); + + /* Acceptance identifier mask setup */ + phytium_can_write(cdev, CAN_ACC_ID0_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID1_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID2_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID3_MASK, ACC_IDX_MASK_AID_MASK); + ctrl |= CTRL_AIME; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + ctrl |= CTRL_IOF | CTRL_FDCRC; + + phytium_can_write(cdev, CAN_CTRL, ctrl); + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + phytium_can_enable_all_interrupts(cdev); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + ctrl |= CTRL_XFER; + else + ctrl |= CTRL_XFER | CTRL_TXREQ; + + phytium_can_write(cdev, CAN_CTRL, ctrl); +} + +/** + * phytium_can_stop - Driver stop routine + * @dev: Pointer to net_device structure + * + * This is the drivers stop routine. It will disable the + * interrupts and put the device into configuration mode. + */ +static void phytium_can_stop(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 ctrl; + + /* Disable all interrupts */ + phytium_can_disable_all_interrupt(cdev); + + /* Disable transfer and switch to receive-only mode */ + ctrl = phytium_can_read(cdev, CAN_CTRL); + ctrl &= ~(CTRL_XFER | CTRL_TXREQ); + phytium_can_write(cdev, CAN_CTRL, ctrl); + + timer_delete(&cdev->timer); + + /* Set the state as STOPPED */ + cdev->can.state = CAN_STATE_STOPPED; +} + +static void phytium_can_clean(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + if (cdev->tx_skb) { + dev->stats.tx_errors++; + can_free_echo_skb(cdev->net, 0, NULL); + cdev->tx_skb = NULL; + } +} + +static int phytium_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + phytium_can_clean(dev); + phytium_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * phytium_can_open - Driver open routine + * @dev: Pointer to net_device structure + * + * This is the driver open routine. + * Return: 0 on success and failure value on error + */ +static int phytium_can_open(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + int ret; + + ret = pm_runtime_get_sync(cdev->dev); + if (ret < 0) { + netdev_err(dev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + return ret; + } + + /* Open the CAN device */ + ret = open_candev(dev); + if (ret) { + netdev_err(dev, "failed to open can device\n"); + goto disable_clk; + } + + /* Register interrupt handler */ + ret = request_irq(dev->irq, phytium_can_isr, + IRQF_SHARED, dev->name, dev); + if (ret < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto fail; + } + + /* Start the controller */ + phytium_can_start(dev); + + netdev_dbg(dev, "%s is going on\n", __func__); + + napi_enable(&cdev->napi); + cdev->is_stop_queue_flag = STOP_QUEUE_FALSE; + netif_start_queue(dev); + + return 0; + +fail: + pm_runtime_put(cdev->dev); + close_candev(dev); +disable_clk: + pm_runtime_put_sync(cdev->dev); + return ret; +} + +/** + * phytium_can_close - Driver close routine + * @dev: Pointer to net_device structure + * + * Return: 0 always + */ +static int phytium_can_close(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&cdev->napi); + + phytium_can_stop(dev); + free_irq(dev->irq, dev); + pm_runtime_put_sync(cdev->dev); + + close_candev(dev); + + return 0; +} + +/** + * phytium_can_start_xmit - Starts the transmission + * + * Return: 0 on success. + */ +static netdev_tx_t phytium_can_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + cdev->tx_skb = skb; + + return phytium_can_tx_handler(cdev); +} + +static const struct net_device_ops phytium_can_netdev_ops = { + .ndo_open = phytium_can_open, + .ndo_stop = phytium_can_close, + .ndo_start_xmit = phytium_can_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int register_phytium_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; + dev->netdev_ops = &phytium_can_netdev_ops; + + return register_candev(dev); +} + +static int phytium_can_dev_setup(struct phytium_can_dev *cdev) +{ + struct net_device *dev = cdev->net; + + netif_napi_add(dev, &cdev->napi, phytium_can_poll); + + cdev->can.do_set_mode = phytium_can_set_mode; + cdev->can.do_get_berr_counter = phytium_can_get_berr_counter; + + cdev->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + cdev->can.bittiming_const = cdev->bit_timing; + + if (cdev->fdmode) { + cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + dev->mtu = CANFD_MTU; + cdev->can.ctrlmode = CAN_CTRLMODE_FD; + cdev->can.fd.data_bittiming_const = cdev->bit_timing; + } + spin_lock_init(&cdev->lock); + return 0; +} + +struct phytium_can_dev *phytium_can_allocate_dev(struct device *dev, int sizeof_priv, + int tx_fifo_depth) +{ + struct phytium_can_dev *cdev = NULL; + struct net_device *net_dev; + + /* Allocate the can device struct */ + net_dev = alloc_candev(sizeof_priv, tx_fifo_depth); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device.\n"); + goto out; + } + + cdev = netdev_priv(net_dev); + cdev->net = net_dev; + cdev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); + +out: + return cdev; +} +EXPORT_SYMBOL(phytium_can_allocate_dev); + +void phytium_can_free_dev(struct net_device *net) +{ + free_candev(net); +} +EXPORT_SYMBOL(phytium_can_free_dev); + +int phytium_can_register(struct phytium_can_dev *cdev) +{ + int ret; + + ret = phytium_can_dev_setup(cdev); + if (ret) + goto fail; + + ret = register_phytium_can_dev(cdev->net); + if (ret) { + dev_err(cdev->dev, "registering %s failed (err=%d)\n", + cdev->net->name, ret); + goto fail; + } + + cdev->is_tx_done = true; + cdev->is_need_stop_xmit = false; + timer_setup(&cdev->timer, phytium_can_tx_done_timeout, 0); + + dev_info(cdev->dev, "%s device registered (irq=%d)\n", + KBUILD_MODNAME, cdev->net->irq); + + /* Probe finished + * Stop clocks. They will be reactivated once the device is opened. + */ + pm_runtime_put_sync(cdev->dev); + + return 0; + +fail: + pm_runtime_put_sync(cdev->dev); + return ret; +} +EXPORT_SYMBOL(phytium_can_register); + +void phytium_can_unregister(struct phytium_can_dev *cdev) +{ + unregister_candev(cdev->net); +} +EXPORT_SYMBOL(phytium_can_unregister); + +int phytium_can_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + phytium_can_stop(ndev); + pm_runtime_put_sync(cdev->dev); + } + + cdev->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL(phytium_can_suspend); + +int phytium_can_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + int ret; + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + phytium_can_start(ndev); + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL(phytium_can_resume); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN bus driver for Phytium CAN controller"); + diff --git a/drivers/net/can/phytium/phytium_can.h b/drivers/net/can/phytium/phytium_can.h new file mode 100644 index 0000000000000..802bb36a13116 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium CAN controller driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_CAN_H_ +#define _PHYTIUM_CAN_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KEEP_CAN_FIFO_MIN_LEN 16 +#define KEEP_CANFD_FIFO_MIN_LEN 128 +#define CAN_FIFO_BYTE_LEN 256 +#define STOP_QUEUE_TRUE 1 +#define STOP_QUEUE_FALSE 0 + +enum phytium_can_ip_type { + PHYTIUM_CAN = 0, + PHYTIUM_CANFD, +}; + +struct phytium_can_devtype { + enum phytium_can_ip_type cantype; + const struct can_bittiming_const *bittiming_const; +}; + +struct phytium_can_dev { + struct can_priv can; + + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *clk; + + struct sk_buff *tx_skb; + + const struct can_bittiming_const *bit_timing; + spinlock_t lock; /*spinlock*/ + int fdmode; + u32 isr; + u32 tx_fifo_depth; + unsigned int is_stop_queue_flag; + void __iomem *base; + + struct timer_list timer; /* xmit done timer */ + u32 is_tx_done; + u32 is_need_stop_xmit; +}; + +struct phytium_can_dev *phytium_can_allocate_dev(struct device *dev, int sizeof_priv, + int tx_fifo_depth); +void phytium_can_free_dev(struct net_device *net); + +int phytium_can_register(struct phytium_can_dev *cdev); +void phytium_can_unregister(struct phytium_can_dev *cdev); + +int phytium_can_suspend(struct device *dev); +int phytium_can_resume(struct device *dev); +#endif /* _PHYTIUM_CAN_H_ */ diff --git a/drivers/net/can/phytium/phytium_can_pci.c b/drivers/net/can/phytium/phytium_can_pci.c new file mode 100644 index 0000000000000..af7f606f432a0 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can_pci.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform CAN bus driver for Phytium CAN controller + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include + +#include "phytium_can.h" + +struct phytium_can_pci_config { + const struct phytium_can_devtype *devtype; + unsigned int clock_freq; + unsigned int tx_fifo_depth; +}; + +#define cdev2priv(dev) container_of(dev, struct phytium_can_pci, cdev) + +struct phytium_can_pci { + struct phytium_can_dev cdev; + + void __iomem *base; +}; + +static const struct can_bittiming_const phytium_bittiming_const_8192 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 8192, + .brp_inc = 2, +}; + +static const struct phytium_can_devtype phytium_can_pci = { + .cantype = PHYTIUM_CAN, + .bittiming_const = &phytium_bittiming_const_8192, +}; + +static const struct phytium_can_pci_config phytium_can_pci_data = { + .devtype = &phytium_can_pci, + .clock_freq = 480000000, + .tx_fifo_depth = 64, +}; + +static int phytium_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct phytium_can_pci_config *cfg; + struct phytium_can_dev *cdev; + struct phytium_can_pci *priv; + int ret; + + cfg = (const struct phytium_can_pci_config *)id->driver_data; + + ret = pcim_enable_device(pdev); + if (ret) + goto err; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) + goto err; + + cdev = phytium_can_allocate_dev(&pdev->dev, sizeof(struct phytium_can_pci), + cfg->tx_fifo_depth); + if (!cdev) + return -ENOMEM; + + priv = cdev2priv(cdev); + priv->base = pcim_iomap_table(pdev)[0]; + + cdev->dev = &pdev->dev; + cdev->fdmode = cfg->devtype->cantype; + cdev->bit_timing = cfg->devtype->bittiming_const; + cdev->can.clock.freq = cfg->clock_freq; + cdev->tx_fifo_depth = cfg->tx_fifo_depth; + + cdev->base = priv->base; + cdev->net->irq = pdev->irq; + + pci_set_drvdata(pdev, cdev->net); + + if (!pm_runtime_enabled(cdev->dev)) + pm_runtime_enable(cdev->dev); + ret = pm_runtime_get_sync(cdev->dev); + if (ret < 0) { + netdev_err(cdev->net, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + goto err_pmdisable; + } + ret = phytium_can_register(cdev); + if (ret) + goto err; + + return 0; + +err_pmdisable: + pm_runtime_disable(&pdev->dev); +err: + return ret; +} + +static void phytium_can_pci_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct phytium_can_dev *cdev = netdev_priv(dev); + + pm_runtime_disable(cdev->dev); + + phytium_can_unregister(cdev); + phytium_can_free_dev(cdev->net); +} + +static __maybe_unused int phytium_can_pci_suspend(struct device *dev) +{ + return phytium_can_suspend(dev); +} + +static __maybe_unused int phytium_can_pci_resume(struct device *dev) +{ + return phytium_can_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(phytium_can_pci_pm_ops, + phytium_can_pci_suspend, phytium_can_pci_resume); + +static const struct pci_device_id phytium_can_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2d), (kernel_ulong_t)&phytium_can_pci_data, }, + { /* sentinel */ }, +}; + +static struct pci_driver phytium_can_pci_driver = { + .name = KBUILD_MODNAME, + .probe = phytium_can_pci_probe, + .remove = phytium_can_pci_remove, + .id_table = phytium_can_pci_id_table, + .driver = { + .pm = &phytium_can_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_can_pci_driver); + +MODULE_AUTHOR("Cheng Quan +#include +#include + +#include "phytium_can.h" + +#define cdev2priv(dev) container_of(dev, struct phytium_can_plat, cdev) + +struct phytium_can_plat { + struct phytium_can_dev cdev; + struct phytium_can_devtype *devtype; + + int irq; + void __iomem *reg_base; +}; + +static const struct can_bittiming_const phytium_bittiming_const_512 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 512, + .brp_inc = 2, +}; + +static const struct can_bittiming_const phytium_bittiming_const_8192 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 8192, + .brp_inc = 2, +}; + +static const struct phytium_can_devtype phytium_can_data = { + .cantype = PHYTIUM_CAN, + .bittiming_const = &phytium_bittiming_const_512, +}; + +static const struct phytium_can_devtype phytium_canfd_data = { + .cantype = PHYTIUM_CANFD, + .bittiming_const = &phytium_bittiming_const_8192, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_can_acpi_ids[] = { + { "PHYT000A", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_can_acpi_ids); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id phytium_can_of_ids[] = { + { .compatible = "phytium,can", .data = &phytium_can_data }, + { .compatible = "phytium,canfd", .data = &phytium_canfd_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_can_of_ids); +#endif + +static int phytium_can_plat_probe(struct platform_device *pdev) +{ + struct phytium_can_dev *cdev; + struct phytium_can_plat *priv; + struct resource *res; + const struct of_device_id *of_id; + const struct phytium_can_devtype *devtype = &phytium_can_data; + u32 tx_fifo_depth; + int ret; + const char *str = "can"; + + ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), "tx-fifo-depth", &tx_fifo_depth); + if (ret) + tx_fifo_depth = 64; + + cdev = phytium_can_allocate_dev(&pdev->dev, sizeof(struct phytium_can_plat), + tx_fifo_depth); + if (!cdev) + return -ENOMEM; + + priv = cdev2priv(cdev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + priv->irq = platform_get_irq(pdev, 0); + if (IS_ERR(priv->reg_base) || cdev->net->irq < 0) { + ret = -EINVAL; + goto fail; + } + + if (pdev->dev.of_node) { + cdev->clk = devm_clk_get(&pdev->dev, "can_clk"); + if (IS_ERR(cdev->clk)) { + dev_err(&pdev->dev, "no clock found\n"); + ret = -ENODEV; + goto fail; + } + cdev->can.clock.freq = clk_get_rate(cdev->clk); + + of_id = of_match_device(phytium_can_of_ids, &pdev->dev); + if (of_id && of_id->data) + devtype = of_id->data; + } else if (has_acpi_companion(&pdev->dev)) { + ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), + "clock-frequency", + &cdev->can.clock.freq); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get clock frequency.\n"); + goto fail; + } + + fwnode_property_read_string(dev_fwnode(&pdev->dev), + "mode-select", &str); + if (!(strcmp(str, "canfd"))) + devtype = &phytium_canfd_data; + } + + cdev->tx_fifo_depth = tx_fifo_depth; + + if (devtype->cantype == PHYTIUM_CANFD) + cdev->fdmode = 1; + else + cdev->fdmode = 0; + + if (fwnode_property_present(dev_fwnode(&pdev->dev), "extend_brp")) + cdev->bit_timing = &phytium_bittiming_const_8192; + else + cdev->bit_timing = devtype->bittiming_const; + cdev->can.bittiming_const = devtype->bittiming_const; + cdev->base = priv->reg_base; + cdev->net->irq = priv->irq; + + platform_set_drvdata(pdev, cdev->net); + + pm_runtime_enable(cdev->dev); + ret = phytium_can_register(cdev); + if (ret) + goto out_runtime_disable; + + return ret; + +out_runtime_disable: + pm_runtime_disable(cdev->dev); +fail: + phytium_can_free_dev(cdev->net); + return ret; +} + +static __maybe_unused int phytium_can_plat_suspend(struct device *dev) +{ + return phytium_can_suspend(dev); +} + +static __maybe_unused int phytium_can_plat_resume(struct device *dev) +{ + return phytium_can_resume(dev); +} + +static void phytium_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct phytium_can_dev *cdev = netdev_priv(dev); + + phytium_can_unregister(cdev); + + phytium_can_free_dev(cdev->net); +} + +static int __maybe_unused phytium_can_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + clk_disable_unprepare(cdev->clk); + + return 0; +} + +static int __maybe_unused phytium_can_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + return clk_prepare_enable(cdev->clk); +} + +static const struct dev_pm_ops phytium_can_plat_pm_ops = { + SET_RUNTIME_PM_OPS(phytium_can_runtime_suspend, + phytium_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(phytium_can_suspend, phytium_can_resume) +}; + +static struct platform_driver phytium_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(phytium_can_of_ids), + .acpi_match_table = ACPI_PTR(phytium_can_acpi_ids), + .pm = &phytium_can_plat_pm_ops, + }, + .probe = phytium_can_plat_probe, + .remove = phytium_can_plat_remove, +}; + +module_platform_driver(phytium_can_plat_driver); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium CAN driver for IO Mapped controllers"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 86e912471dead..c0befc3125d46 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1950,13 +1950,18 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); + rx_q->dma_rx = NULL; + rx_q->dma_erx = NULL; if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) xdp_rxq_info_unreg(&rx_q->xdp_rxq); kfree(rx_q->buf_pool); + rx_q->buf_pool = NULL; + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); + rx_q->page_pool = NULL; } static void free_dma_rx_desc_resources(struct stmmac_priv *priv, @@ -2002,8 +2007,14 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + tx_q->dma_etx = NULL; + tx_q->dma_entx = NULL; + tx_q->dma_tx = NULL; + kfree(tx_q->tx_skbuff_dma); + tx_q->tx_skbuff_dma = NULL; kfree(tx_q->tx_skbuff); + tx_q->tx_skbuff = NULL; } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index bc630ab8a2831..0f08f0cdde0a6 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "../pci.h" @@ -80,6 +81,20 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +static void loongson_d3_quirk(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + pdev->no_d1d2 = 1; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT3, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT4, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT5, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT6, loongson_d3_quirk); + /* * Some Loongson PCIe ports have hardware limitations on their Maximum Read * Request Size. They can't handle anything larger than this. Sane @@ -176,6 +191,91 @@ static void loongson_pci_msi_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk); +static void loongson_display_quirk(struct pci_dev *dev) +{ + u32 val; + u64 mask, size; + u64 max_size = 0; + int i, num; + struct pci_bus *bus = dev->bus; + + if (!dev->bus->number) { + if (!(dev->vendor == PCI_VENDOR_ID_LOONGSON && dev->device == 0x7a25)) + return; + } else { + while (!pci_is_root_bus(bus->parent)) + bus = bus->parent; + + /* ensure slot is 7a2000 */ + if (bus->self->vendor != PCI_VENDOR_ID_LOONGSON || bus->self->device < 0x7a39) + return; + } + max_size = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) { + size = dev->resource[i].end - dev->resource[i].start; + if (size > max_size) { + max_size = size; + num = i; + } + } + } + mask = ~(dev->resource[num].end - dev->resource[num].start); + val = (dev->resource[num].start >> (24 - 16)) | ((mask >> 24) & 0xffff); + writel(val, (volatile void *)0x80000efdfb000174UL); + writel(0x80000000, (volatile void *)0x80000efdfb000170UL); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); + +static void pci_fixup_aspeed(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + struct pci_bus *bus; + struct pci_dev *vdevp = NULL; + u16 config; + + bus = pdev->bus; + bridge = bus->self; + + /* Is VGA routed to us? */ + if (bridge && (pci_is_bridge(bridge))) { + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); + + /* Yes, this bridge is PCI bridge-to-bridge spec compliant, + * just return! + */ + if (config & PCI_BRIDGE_CTL_VGA) + return; + + dev_warn(&pdev->dev, "VGA bridge control is not enabled\n"); + } + + /* Just return if the system already have a default device */ + if (vga_default_device()) + return; + + /* No default vga device */ + while ((vdevp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, vdevp))) { + if (vdevp->vendor != 0x1a03) { + /* Have other vga devcie in the system, do nothing */ + dev_info(&pdev->dev, + "Another boot vga device: 0x%x:0x%x\n", + vdevp->vendor, vdevp->device); + return; + } + } + + vga_set_default_device(pdev); + + dev_info(&pdev->dev, + "Boot vga device set as 0x%x:0x%x\n", + pdev->vendor, pdev->device); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(0x1a03, 0x2000, + PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_aspeed); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; @@ -255,6 +355,36 @@ static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, return NULL; } +static int pci_loongson_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = bus->ops->map_bus(bus, devfn, where); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (size == 1) + *val = readb(addr); + else if (size == 2) + *val = readw(addr); + else + *val = readl(addr); + /* + * fix some pcie card not scanning properly when bus number is + * inconsistent during firmware and kernel scan phases. + */ + if (*val == 0x0 && where == PCI_VENDOR_ID) { + writel(*val, addr); + *val = readl(addr); + } + + + return PCIBIOS_SUCCESSFUL; +} + #ifdef CONFIG_OF static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) @@ -278,7 +408,7 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) /* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, }; @@ -321,6 +451,7 @@ static int loongson_pci_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; struct resource *regs; + unsigned int num = 0; if (!node) return -ENODEV; @@ -345,7 +476,9 @@ static int loongson_pci_probe(struct platform_device *pdev) } if (priv->data->flags & FLAG_CFG1) { - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (priv->cfg0_base) + num = 1; + regs = platform_get_resource(pdev, IORESOURCE_MEM, num); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); else { @@ -402,7 +535,7 @@ const struct pci_ecam_ops loongson_pci_ecam_ops = { .init = loongson_pci_ecam_init, .pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, } }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b14dd064006cc..abd30336d9899 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -30,6 +30,9 @@ #include #include #include +#ifdef CONFIG_MACH_LOONGSON64 +#include +#endif #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -170,6 +173,15 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifdef CONFIG_MACH_LOONGSON64 + +#ifndef CONFIG_PM_SLEEP +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -5882,8 +5894,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) u16 v; int ret; unsigned int firstbit; +#ifdef CONFIG_MACH_LOONGSON64 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); - +#endif if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -5904,7 +5917,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8); - if (bridge->no_inc_mrrs) { +#ifdef CONFIG_MACH_LOONGSON64 + if (pm_suspend_target_state == PM_SUSPEND_ON && + bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { @@ -5912,6 +5927,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; } } +#endif ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b9c252aa6fe08..317110522d606 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5588,6 +5588,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PHYTIUM, 0xdc3a, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS static void quirk_no_ats(struct pci_dev *pdev) diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 4f8507ebbdacd..676cdd6d268ea 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -688,6 +688,7 @@ source "drivers/pinctrl/berlin/Kconfig" source "drivers/pinctrl/cirrus/Kconfig" source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/intel/Kconfig" +source "drivers/pinctrl/zhaoxin/Kconfig" source "drivers/pinctrl/mediatek/Kconfig" source "drivers/pinctrl/meson/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index e0cfb9b7c99ba..339e01a207aff 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += cirrus/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ +obj-$(CONFIG_X86) += zhaoxin/ obj-y += mediatek/ obj-$(CONFIG_PINCTRL_MESON) += meson/ obj-y += mvebu/ diff --git a/drivers/pinctrl/zhaoxin/Kconfig b/drivers/pinctrl/zhaoxin/Kconfig new file mode 100644 index 0000000000000..dece29a29423f --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# Intel pin control drivers + +if (X86 || COMPILE_TEST) + +config PINCTRL_ZHAOXIN + tristate + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB + select GPIOLIB_IRQCHIP + +config PINCTRL_KX7000 + tristate "Zhaoxin KX7000 pinctrl and GPIO driver" + depends on ACPI + select PINCTRL_ZHAOXIN + help + This pinctrl driver provides an interface that allows configuring + of Zhaoxin KX7000 chipset pins and using them as GPIOs. + +endif diff --git a/drivers/pinctrl/zhaoxin/Makefile b/drivers/pinctrl/zhaoxin/Makefile new file mode 100644 index 0000000000000..a3acfa66f196b --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Makefile @@ -0,0 +1,4 @@ +# zhaoxin pin control drivers + +obj-$(CONFIG_PINCTRL_ZHAOXIN) += pinctrl-zhaoxin.o +obj-$(CONFIG_PINCTRL_KX7000) += pinctrl-kx7000.o diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c new file mode 100644 index 0000000000000..8e83df0ef2a70 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin KX7000 pinctrl/GPIO driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include + +#include + +#include "pinctrl-zhaoxin.h" + +#define ZX_CAL_ARRAY(a, b) \ +{ \ + .pmio_offset = (a), \ + .size = (b), \ +} + +#define PMIO_RX90 100 +#define PMIO_RX8C 200 + +#define ZX_CAL_INDEX_ARRAY(a, b, c) \ +{ \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ +} + +/* kx7000 pin define */ +static const struct pinctrl_pin_desc kx7000_pins[] = { + + PINCTRL_PIN(0, "IOD_CPUTCK"), + PINCTRL_PIN(1, "IOD_CPUTMS"), + PINCTRL_PIN(2, "IOD_CPUTRST"), + PINCTRL_PIN(3, "IOD_CPUTDO"), + PINCTRL_PIN(4, "IOD_CPUTDI"), + PINCTRL_PIN(5, "IOD_ZLSCLK0"), + PINCTRL_PIN(6, "IOD_ZLDATA0"), + PINCTRL_PIN(7, "IOD_ZLSCLK1"), + PINCTRL_PIN(8, "IOD_ZLDATA1"), + PINCTRL_PIN(9, "IOD_CLK27M"), + PINCTRL_PIN(10, "IOD_CPURST"), + PINCTRL_PIN(11, "IOD_PWORK"), + PINCTRL_PIN(12, "IOD_RSMRST"), + PINCTRL_PIN(13, "IOD_THRMTRIP"), + //GPIO range 0 + PINCTRL_PIN(14, "USBHOC0"), + PINCTRL_PIN(15, "USBHOC1"), + PINCTRL_PIN(16, "USBHOC2"), + PINCTRL_PIN(17, "USBHOC3"), + PINCTRL_PIN(18, "USBHOC4"), + PINCTRL_PIN(19, "USBHOC5"), + PINCTRL_PIN(20, "USBHOC6"), + PINCTRL_PIN(21, "USBHOC7"), + //gpio range 1 + PINCTRL_PIN(22, "USB4SBTX0"), + PINCTRL_PIN(23, "USB4SBRX0"), + PINCTRL_PIN(24, "USB4SBTX1"), + PINCTRL_PIN(25, "USB4SBRX1"), + //gpio range 2 + PINCTRL_PIN(26, "I2C1DT"), + PINCTRL_PIN(27, "I2C1CK"), + PINCTRL_PIN(28, "I2C1INT"), + //gpio range 3 + PINCTRL_PIN(29, "I2C2DT"), + PINCTRL_PIN(30, "I2C2CK"), + //gpio range 4 + PINCTRL_PIN(31, "I2C2INT"), + //gpio range 5 + PINCTRL_PIN(32, "SMBDT1"), + PINCTRL_PIN(33, "SMBCK1"), + PINCTRL_PIN(34, "SMBDT2"), + PINCTRL_PIN(35, "SMBCK2"), + PINCTRL_PIN(36, "SMBALRT"), + //gpio range 6 + PINCTRL_PIN(37, "SME_I2CDT"), + PINCTRL_PIN(38, "SME_I2CCK"), + //gpio range 7 + PINCTRL_PIN(39, "PWM"), + PINCTRL_PIN(40, "TACH"), + //gpio range 8 + PINCTRL_PIN(41, "GPIO0"), + PINCTRL_PIN(42, "GPIO1"), + PINCTRL_PIN(43, "GPIO2"), + PINCTRL_PIN(44, "GPIO3"), + PINCTRL_PIN(45, "GPIO4"), + PINCTRL_PIN(46, "GPIO5"), + PINCTRL_PIN(47, "GPIO6"), + PINCTRL_PIN(48, "GPIO7"), + PINCTRL_PIN(49, "GPIO8"), + PINCTRL_PIN(50, "GPIO9"), + PINCTRL_PIN(51, "LPCCLK"), + PINCTRL_PIN(52, "LPCDRQ1"), + //gpio range 9 + PINCTRL_PIN(53, "LPCDRQ0"), + PINCTRL_PIN(54, "LPCFRAME"), + PINCTRL_PIN(55, "LPCAD3"), + PINCTRL_PIN(56, "LPCAD2"), + PINCTRL_PIN(57, "LPCAD1"), + PINCTRL_PIN(58, "LPCAD0"), + //gpio range 10 + PINCTRL_PIN(59, "SERIRQ"), + PINCTRL_PIN(60, "AZRST"), + PINCTRL_PIN(61, "AZBITCLK"), + PINCTRL_PIN(62, "AZSDIN0"), + PINCTRL_PIN(63, "AZSDIN1"), + PINCTRL_PIN(64, "AZSDOUT"), + PINCTRL_PIN(65, "AZSYNC"), + //gpio range 11 + PINCTRL_PIN(66, "I2S1_SCLK"), + PINCTRL_PIN(67, "I2S1_TXD"), + PINCTRL_PIN(68, "I2S1_WS"), + PINCTRL_PIN(69, "I2S1_MCLK"), + //gpio range 12 + PINCTRL_PIN(70, "I2S1_RXD"), + //gpio range 13 + PINCTRL_PIN(71, "I2S1_INT"), + PINCTRL_PIN(72, "MSPIDI"), + PINCTRL_PIN(73, "MSPIDO"), + PINCTRL_PIN(74, "MSPIIO2"), + PINCTRL_PIN(75, "MSPIIO3"), + PINCTRL_PIN(76, "MSPICLK"), + PINCTRL_PIN(77, "MSPISS0"), + //gpio range 14 + PINCTRL_PIN(78, "MSPISS1"), + PINCTRL_PIN(79, "MSPISS2"), + //gpio range 15 + PINCTRL_PIN(80, "SPIDEVINT"), + PINCTRL_PIN(81, "BIOSSEL"), + //gpio range 16 + PINCTRL_PIN(82, "THRM"), + PINCTRL_PIN(83, "PEXWAKE"), + PINCTRL_PIN(84, "PWRBTN"), + //gpio range 17 + PINCTRL_PIN(85, "SPKR"), + PINCTRL_PIN(86, "PME"), + //gpio range 18 + PINCTRL_PIN(87, "BATLOW"), + PINCTRL_PIN(88, "EXTSMI"), + PINCTRL_PIN(89, "SUSA"), + PINCTRL_PIN(90, "SUSB"), + PINCTRL_PIN(91, "SUSC"), + PINCTRL_PIN(92, "GPWAKE"), + PINCTRL_PIN(93, "RING"), + PINCTRL_PIN(94, "LID"), + PINCTRL_PIN(95, "SLPS0"), + PINCTRL_PIN(96, "PCIRST"), + PINCTRL_PIN(97, "SVID_VREN"), + //gpio range 19 + PINCTRL_PIN(98, "INTRUDER"), + //gpio range 20 + PINCTRL_PIN(99, "GFX_I2CCLK0"), + PINCTRL_PIN(100, "GFX_I2CDAT0"), + PINCTRL_PIN(101, "GFX_I2CCLK1"), + PINCTRL_PIN(102, "GFX_I2CDAT1"), + PINCTRL_PIN(103, "GFX_I2CCLK2"), + PINCTRL_PIN(104, "GFX_I2CDAT2"), + PINCTRL_PIN(105, "GFX_I2CCLK3"), + PINCTRL_PIN(106, "GFX_I2CDAT3"), + PINCTRL_PIN(107, "GFX_GPIO0"), + PINCTRL_PIN(108, "GFX_GPIO1"), + PINCTRL_PIN(109, "GFX_GPIO2"), + PINCTRL_PIN(110, "GFX_GPIO3"), + PINCTRL_PIN(111, "CRTHSYNC"), + PINCTRL_PIN(112, "CRTVSYNC"), +}; + +#define NOT_DEFINE -30000 + +static int calibrate_int[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static int calibrate_sattus[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static const struct reg_cal_array kx7000_int_cal[] = { + ZX_CAL_ARRAY(0x58, 16), + ZX_CAL_ARRAY(0x5A, 2), + ZX_CAL_ARRAY(0xDA, 16), + ZX_CAL_ARRAY(0xDE, 16), +}; + +static const struct reg_calibrate int_cal[] = { + { + .reg = kx7000_int_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_int_cal), + .cal_array = calibrate_int, + .size = ARRAY_SIZE(calibrate_int), + } +}; + +static const struct reg_cal_array kx7000_status_cal[] = { + ZX_CAL_ARRAY((0x8), 16), + ZX_CAL_ARRAY((0xE), 2), + ZX_CAL_ARRAY((0xA), 16), + ZX_CAL_ARRAY((0xC), 16), +}; + +static const struct reg_calibrate status_cal[] = { + { + .reg = kx7000_status_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_status_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct reg_cal_array kx7000_mod_sel_cal[] = { + ZX_CAL_ARRAY((0x0), 16), + ZX_CAL_ARRAY((0x6), 2), + ZX_CAL_ARRAY((0x2), 16), + ZX_CAL_ARRAY((0x4), 16), +}; + +static const struct reg_calibrate mod_sel_cal[] = { + { + .reg = kx7000_mod_sel_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_mod_sel_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct index_cal_array kx7000_gpio_in_cal[] = { + ZX_CAL_INDEX_ARRAY(0x98, NULL, 71), +}; + +static const struct index_cal_array kx7000_gpio_out_cal[] = { + ZX_CAL_INDEX_ARRAY(0x90, NULL, 71), +}; + +static int calibrate_trigger[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31, + 32, 33, 34, 35, + 36, 50, 51, 52, + 53, 54, 55, 56, + 57, 58, 59, 60, + 61, 62, 63, 64, + 65, 66, 67, 68, + 69, 70 +}; + +static const struct index_cal_array kx7000_trigger_cal[] = { + ZX_CAL_INDEX_ARRAY(0xA0, calibrate_trigger, 50), +}; + +static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { + { + .int_cal = int_cal, + .status_cal = status_cal, + .mod_sel_cal = mod_sel_cal, + .gpio_in_cal = kx7000_gpio_in_cal, + .gpio_out_cal = kx7000_gpio_out_cal, + .trigger_cal = kx7000_trigger_cal, + } +}; + +#define KX7000_GPP(s, e, g) \ +{ \ + .zhaoxin_range_pin_base = (s), \ + .zhaoxin_range_pin_size = ((e) - (s) + 1), \ + .zhaoxin_range_gpio_base = (g), \ +} + +static const struct zhaoxin_pin_map2_gpio kx7000_pinmap_gpps[] = { + KX7000_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(14, 19, 10), + KX7000_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(22, 25, 65), + KX7000_GPP(26, 28, 43), + KX7000_GPP(29, 30, 41), + KX7000_GPP(31, 31, 49), + KX7000_GPP(32, 36, 16), + KX7000_GPP(37, 38, 69), + KX7000_GPP(39, 40, 67), + KX7000_GPP(41, 50, 0), + KX7000_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(53, 53, 39), + KX7000_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(59, 59, 40), + KX7000_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(66, 69, 35), + KX7000_GPP(70, 70, 46), + KX7000_GPP(71, 71, 64), + KX7000_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(78, 78, 50), + KX7000_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(80, 80, 51), + KX7000_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(82, 82, 52), + KX7000_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(85, 85, 53), + KX7000_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(87, 95, 54), + KX7000_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(98, 98, 63), + KX7000_GPP(99, 112, 21), +}; + +static const struct zhaoxin_pinctrl_soc_data kx7000_soc_data = { + .pins = kx7000_pins, + .npins = ARRAY_SIZE(kx7000_pins), + .pin_topologys = kx7000_pin_topologys, + .zhaoxin_pin_maps = kx7000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kx7000_pinmap_gpps), +}; + +static const struct acpi_device_id kx7000_pinctrl_acpi_match[] = { + { "KX8344B", (kernel_ulong_t)&kx7000_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, kx7000_pinctrl_acpi_match); + +static const struct dev_pm_ops kx7000_pinctrl_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(zhaoxin_pinctrl_suspend_noirq, zhaoxin_pinctrl_resume_noirq) +}; + +static struct platform_driver kx7000_pinctrl_driver = { + .probe = zhaoxin_pinctrl_probe_by_hid, + .driver = { + .name = "kx7000-pinctrl", + .acpi_match_table = kx7000_pinctrl_acpi_match, + .pm = &kx7000_pinctrl_pm_ops, + }, +}; + +module_platform_driver(kx7000_pinctrl_driver); + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c new file mode 100644 index 0000000000000..4a8b4adc9160a --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin pinctrl common code + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "../core.h" +#include "pinctrl-zhaoxin.h" + +static u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + return inw(pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + outw(value, pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static int zhaoxin_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->ngroups; +} + +static const char *zhaoxin_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->groups[group].name; +} + +static int zhaoxin_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *npins) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *pins = pctrl->soc->groups[group].pins; + *npins = pctrl->soc->groups[group].npins; + + return 0; +} + +static void zhaoxin_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) +{ + +} + +static const struct pinctrl_ops zhaoxin_pinctrl_ops = { + .get_groups_count = zhaoxin_get_groups_count, + .get_group_name = zhaoxin_get_group_name, + .get_group_pins = zhaoxin_get_group_pins, + .pin_dbg_show = zhaoxin_pin_dbg_show, +}; + +static int zhaoxin_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->nfunctions; +} + +static const char *zhaoxin_get_function_name(struct pinctrl_dev *pctldev, unsigned int function) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->functions[function].name; +} + +static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, + const char *const **groups, unsigned int *const ngroups) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctrl->soc->functions[function].groups; + *ngroups = pctrl->soc->functions[function].ngroups; + + return 0; +} + +static int zhaoxin_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + unsigned int group) +{ + return 0; +} + +#define ZHAOXIN_PULL_UP_20K 0x80 +#define ZHAOXIN_PULL_UP_10K 0x40 +#define ZHAOXIN_PULL_UP_47K 0x20 +#define ZHAOXIN_PULL_DOWN 0x10 + +#define ZHAOXIN_PULL_UP 0xe0 + +static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, unsigned int pin, + bool isup) +{ + u16 tmp = 0; + u16 value; + u16 value_back = 0; + + if (isup) + tmp = ZHAOXIN_PULL_UP_10K|1; + else + tmp = ZHAOXIN_PULL_DOWN|1; + value = zx_pad_read16(pctrl, pin); + + if (pin <= 0x32 && pin >= 0x29) { + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value &= ~(0x1); + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } else { + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value |= 0x1; + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } +} + +static int zhaoxin_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned int pin) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, true); + + return 0; +} + +static const struct pinmux_ops zhaoxin_pinmux_ops = { + .get_functions_count = zhaoxin_get_functions_count, + .get_function_name = zhaoxin_get_function_name, + .get_function_groups = zhaoxin_get_function_groups, + .set_mux = zhaoxin_pinmux_set_mux, + .gpio_request_enable = zhaoxin_gpio_request_enable, +}; + +static int zhaoxin_config_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) +{ + return 0; +} + +static int zhaoxin_config_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, + unsigned int nconfigs) +{ + return 0; +} + +static const struct pinconf_ops zhaoxin_pinconf_ops = { + .is_generic = true, + .pin_config_get = zhaoxin_config_get, + .pin_config_set = zhaoxin_config_set, +}; + +static const struct pinctrl_desc zhaoxin_pinctrl_desc = { + .pctlops = &zhaoxin_pinctrl_ops, + .pmxops = &zhaoxin_pinmux_ops, + .confops = &zhaoxin_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offset, + const struct zhaoxin_pin_topology **community, + const struct zhaoxin_pin_map2_gpio **padgrp) +{ + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + const struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (offset >= map->zhaoxin_range_gpio_base && + offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { + int pin; + + pin = map->zhaoxin_range_pin_base + offset - map->zhaoxin_range_gpio_base; + if (padgrp) + *padgrp = map; + return pin; + } + } + + return -EINVAL; +} + +static __maybe_unused int zhaoxin_pin_to_gpio(struct zhaoxin_pinctrl *pctrl, int pin) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + + pin_maps = pctrl->pin_maps; + if (!pin_maps) + return -EINVAL; + + return pin - pin_maps->zhaoxin_range_pin_base + pin_maps->zhaoxin_range_gpio_base; +} + +static int zhaoxin_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(chip); + const struct index_cal_array *gpio_in_cal; + int gap = offset/16; + int bit = offset%16; + int pin; + int value; + + gpio_in_cal = pctrl->pin_topologys->gpio_in_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + value = zx_pad_read16(pctrl, gpio_in_cal->index+gap); + value &= (1<pin_topologys->gpio_out_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + org = zx_pad_read16(pctrl, gpio_out_cal->index+gap); + if (value) + org |= (1<index+gap, org); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + return 0; +} + +static int zhaoxin_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + return pinctrl_gpio_direction_input(chip, offset); +} + +static int zhaoxin_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + return pinctrl_gpio_direction_output(chip, offset); +} + +static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + return gpiochip_generic_request(gc, offset); +} +static void zhaoxin_gpio_free(struct gpio_chip *gc, unsigned int offset) +{ + gpiochip_generic_free(gc, offset); +} + +static int zhaoxin_gpio_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) +{ + return gpiochip_generic_config(gc, offset, config); +} + +static const struct gpio_chip zhaoxin_gpio_chip = { + .owner = THIS_MODULE, + .request = zhaoxin_gpio_request, + .free = zhaoxin_gpio_free, + .direction_input = zhaoxin_gpio_direction_input, + .direction_output = zhaoxin_gpio_direction_output, + .get = zhaoxin_gpio_get, + .set = zhaoxin_gpio_set, + .set_config = zhaoxin_gpio_config, +}; + +static void zhaoxin_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *status_cal; + const struct reg_cal_array *reg_off; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + int bit_off = 0; + u16 value; + u16 value_read; + + status_cal = pctrl->pin_topologys->status_cal; + if (gpio >= 0) { + for (i = 0; i < status_cal->size; i++) + if (gpio == status_cal->cal_array[i]) + break; + for (j = 0; j < status_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += status_cal->reg[j].size; + } + reg_off = &status_cal->reg[j-1]; + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = readw(pctrl->pm_pmio_base+reg_off->pmio_offset); + value_read = value; + value |= (1<pm_pmio_base+reg_off->pmio_offset); + } +} + +static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + const struct reg_cal_array *reg_off, *mod; + int bit_off = 0; + u16 value; + u16 value1; + + int_cal = pctrl->pin_topologys->int_cal; + mod_sel_cal = pctrl->pin_topologys->mod_sel_cal; + + if (gpio >= 0) { + for (i = 0; i < int_cal->size; i++) + if (gpio == int_cal->cal_array[i]) + break; + for (j = 0; j < int_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += int_cal->reg[j].size; + } + reg_off = &(int_cal->reg[j-1]); + mod = &(mod_sel_cal->reg[j-1]); + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + + value = inw(pctrl->pmio_base+reg_off->pmio_offset); + if (mask) + value &= (~(1<pmio_base+reg_off->pmio_offset); + if (mask) { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } else { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } + } +} + +static void zhaoxin_gpio_irq_mask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, true); +} + +static void zhaoxin_gpio_irq_unmask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, false); +} + +/* + * father domain irq handle + */ +static irqreturn_t zhaoxin_gpio_irq(int irq, void *data) +{ + struct zhaoxin_pinctrl *pctrl = data; + struct gpio_chip *gc = &pctrl->chip; + const struct reg_calibrate *init; + const struct reg_calibrate *stat_cal; + unsigned int i, bit_offset; + u16 status, enable; + unsigned long pending; + int index = 0; + int ret = 0; + int subirq; + unsigned int hwirq; + + init = pctrl->pin_topologys->int_cal; + stat_cal = pctrl->pin_topologys->status_cal; + for (i = 0; i < init->reg_cal_size; i++) { + pending = 0; + status = readw(pctrl->pm_pmio_base + stat_cal->reg[i].pmio_offset); + enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + enable &= status; + pending = enable; + for_each_set_bit(bit_offset, &pending, init->reg[i].size) { + hwirq = init->cal_array[index + bit_offset] ; + subirq = irq_find_mapping(gc->irq.domain, hwirq); + generic_handle_irq(subirq); + } + + ret += pending ? 1 : 0; + index += init->reg[i].size; + } + + return IRQ_RETVAL(ret); +} + +static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int gpio = irqd_to_hwirq(d); + const struct index_cal_array *trigger_cal; + unsigned int pin; + unsigned long flags; + u8 index; + int position, point; + u16 value; + bool isup = true; + + trigger_cal = pctrl->pin_topologys->trigger_cal; + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (type & IRQ_TYPE_EDGE_FALLING) + isup = true; + else if (type & IRQ_TYPE_EDGE_RISING) + isup = true; + else if (type & IRQ_TYPE_LEVEL_LOW) + isup = true; + else if (type & IRQ_TYPE_LEVEL_HIGH) + isup = false; + + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, isup); + + for (position = 0; position < trigger_cal->size; position++) + if (trigger_cal->cal_array[position] == gpio) + break; + + index = trigger_cal->index + ALIGN(position+1, 4)/4-1; + point = position % 4; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + value = zx_pad_read16(pctrl, index); + + if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + value |= TRIGGER_BOTH_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_FALLING) + value |= TRIGGER_FALL_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_RISING) + value |= TRIGGER_RISE_EDGE << (point*4); + else if (type & IRQ_TYPE_LEVEL_LOW) + value |= TRIGGER_LOW_LEVEL << (point*4); + else if (type & IRQ_TYPE_LEVEL_HIGH) + value |= TRIGGER_HIGH_LEVEL << (point*4); + else + pr_debug("%s wrong type\n", __func__); + + zx_pad_write16(pctrl, index, value); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; +} + +static int zhaoxin_gpio_irq_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int pin; + + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (pin) { + if (on) + enable_irq_wake(pctrl->irq); + else + disable_irq_wake(pctrl->irq); + } + + return 0; +} + +static int zhaoxin_gpio_add_pin_ranges(struct gpio_chip *gc) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + int ret, i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + map->zhaoxin_range_gpio_base, + map->zhaoxin_range_pin_base, + map->zhaoxin_range_pin_size); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; + } + } + + return 0; +} + +static unsigned int zhaoxin_gpio_ngpio(const struct zhaoxin_pinctrl *pctrl) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + unsigned int ngpio = 0; + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + pin_maps = &pctrl->pin_maps[i]; + if (pin_maps->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (pin_maps->zhaoxin_range_gpio_base + pin_maps->zhaoxin_range_pin_size > ngpio) + ngpio = pin_maps->zhaoxin_range_gpio_base + + pin_maps->zhaoxin_range_pin_size; + } + + return ngpio; +} + +static int zhaoxin_gpio_probe(struct zhaoxin_pinctrl *pctrl, int irq) +{ + int ret; + struct gpio_irq_chip *girq; + + pctrl->chip = zhaoxin_gpio_chip; + + pctrl->chip.ngpio = zhaoxin_gpio_ngpio(pctrl); + pctrl->chip.label = dev_name(pctrl->dev); + pctrl->chip.parent = pctrl->dev; + pctrl->chip.base = -1; + pctrl->chip.add_pin_ranges = zhaoxin_gpio_add_pin_ranges; + + pctrl->irq = irq; + + pctrl->irqchip.name = dev_name(pctrl->dev); + pctrl->irqchip.irq_ack = zhaoxin_gpio_irq_ack; + pctrl->irqchip.irq_mask = zhaoxin_gpio_irq_mask; + pctrl->irqchip.irq_unmask = zhaoxin_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = zhaoxin_gpio_irq_type; + pctrl->irqchip.irq_set_wake = zhaoxin_gpio_irq_wake; + pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; + /* + * father domain irq + */ + ret = devm_request_irq(pctrl->dev, irq, zhaoxin_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, + dev_name(pctrl->dev), pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to request interrupt\n"); + return ret; + } + girq = &pctrl->chip.irq; + girq->chip = &pctrl->irqchip; + /* This will let us handle the IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to register gpiochip\n"); + return ret; + } + + return 0; +} + +static int zhaoxin_pinctrl_pm_init(struct zhaoxin_pinctrl *pctrl) +{ + return 0; +} + +static int zhaoxin_pinctrl_probe(struct platform_device *pdev, + const struct zhaoxin_pinctrl_soc_data *soc_data) +{ + struct zhaoxin_pinctrl *pctrl; + int ret, i, irq; + struct resource *res; + void __iomem *regs; + + pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + pctrl->dev = &pdev->dev; + pctrl->soc = soc_data; + raw_spin_lock_init(&pctrl->lock); + pctrl->pin_topologys = pctrl->soc->pin_topologys; + pctrl->pin_map_size = pctrl->soc->pin_map_size; + pctrl->pin_maps = devm_kcalloc(&pdev->dev, pctrl->pin_map_size, sizeof(*pctrl->pin_maps), + GFP_KERNEL); + if (!pctrl->pin_maps) + return -ENOMEM; + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *community = &pctrl->pin_maps[i]; + *community = pctrl->soc->zhaoxin_pin_maps[i]; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + pctrl->pm_pmio_base = regs; + pctrl->pmio_base = 0x800; + pctrl->pmio_rx90 = 0x90; + pctrl->pmio_rx8c = 0x8c; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = zhaoxin_pinctrl_pm_init(pctrl); + if (ret) + return ret; + pctrl->pctldesc = zhaoxin_pinctrl_desc; + pctrl->pctldesc.name = dev_name(&pdev->dev); + pctrl->pctldesc.pins = pctrl->soc->pins; + pctrl->pctldesc.npins = pctrl->soc->npins; + pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc, pctrl); + if (IS_ERR(pctrl->pctldev)) { + dev_err(&pdev->dev, "failed to register pinctrl driver\n"); + return PTR_ERR(pctrl->pctldev); + } + ret = zhaoxin_gpio_probe(pctrl, irq); + + if (ret) + return ret; + platform_set_drvdata(pdev, pctrl); + return 0; +} + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = device_get_match_data(&pdev->dev); + if (!data) + return -ENODATA; + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_hid); + +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = zhaoxin_pinctrl_get_soc_data(pdev); + if (IS_ERR(data)) + return PTR_ERR(data); + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_uid); + + +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data = NULL; + const struct zhaoxin_pinctrl_soc_data **table; + struct acpi_device *adev; + unsigned int i; + + adev = ACPI_COMPANION(&pdev->dev); + if (adev) { + const void *match = device_get_match_data(&pdev->dev); + + table = (const struct zhaoxin_pinctrl_soc_data **)match; + for (i = 0; table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { + data = table[i]; + break; + } + } + } else { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return ERR_PTR(-ENODEV); + + table = (const struct zhaoxin_pinctrl_soc_data **)id->driver_data; + data = table[pdev->id]; + } + + return data ?: ERR_PTR(-ENODATA); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_get_soc_data); + +#ifdef CONFIG_PM_SLEEP +int zhaoxin_pinctrl_suspend_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_suspend_noirq); + +int zhaoxin_pinctrl_resume_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_resume_noirq); +#endif /* CONFIG_PM_SLEEP */ + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h new file mode 100644 index 0000000000000..88ef00796729f --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * zhaoxin pinctrl common code + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + */ + +#ifndef PINCTRL_zhaoxin_H +#define PINCTRL_zhaoxin_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct platform_device; +struct device; + +/* + * struct zhaoxin_pingroup pin define + */ +struct zhaoxin_pingroup { + const char *name; + const unsigned int *pins; + size_t npins; + unsigned short mode; + const unsigned int *modes; +}; + +/* + * struct zhaoxin_function + */ +struct zhaoxin_function { + const char *name; + const char * const *groups; + size_t ngroups; +}; + +/* + * struct zhaoxin_pin_map2_gpio + * @zhaoxin_range_pin_base + * @size: pin number + * @zhaoxin_range_gpio_base + */ +struct zhaoxin_pin_map2_gpio { + unsigned int zhaoxin_range_pin_base; + unsigned int zhaoxin_range_pin_size; + int zhaoxin_range_gpio_base; +}; + +#define MAX_GPIO 256 + +struct reg_cal_array { + int pmio_offset; + int size; +}; + +struct reg_calibrate { + const struct reg_cal_array *reg; + const int reg_cal_size; + const int *cal_array; + const int size; +}; + +struct index_cal_array { + int reg_port_base; + int reg_data_base; + int index; + int *cal_array; + int size; +}; + +struct zhaoxin_pin_topology { + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + const struct reg_calibrate *status_cal; + const struct index_cal_array *gpio_in_cal; + const struct index_cal_array *gpio_out_cal; + const struct index_cal_array *gpio_dir_cal; + const struct index_cal_array *trigger_cal; +}; + +#define TRIGGER_FALL_EDGE 0 +#define TRIGGER_RISE_EDGE 1 +#define TRIGGER_BOTH_EDGE 2 +#define TRIGGER_LOW_LEVEL 3 +#define TRIGGER_HIGH_LEVEL 4 + +#define ZHAOXIN_GPIO_BASE_NOMAP -1 + +struct zhaoxin_pinctrl_soc_data { + const char *uid; + const struct pinctrl_pin_desc *pins; + size_t npins; + const struct zhaoxin_pingroup *groups; + size_t ngroups; + const struct zhaoxin_function *functions; + size_t nfunctions; + const struct zhaoxin_pin_topology *pin_topologys; + const struct zhaoxin_pin_map2_gpio *zhaoxin_pin_maps; + size_t pin_map_size; +}; + +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); + +struct zhaoxin_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct zhaoxin_pinctrl_soc_data *soc; + const struct zhaoxin_pin_topology *pin_topologys; + struct zhaoxin_pin_map2_gpio *pin_maps; + size_t pin_map_size; + int irq; + int pmio_base; + void __iomem *pm_pmio_base; + int pmio_rx90; + int pmio_rx8c; +}; + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev); +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev); + +#ifdef CONFIG_PM_SLEEP +int zhaoxin_pinctrl_suspend_noirq(struct device *dev); +int zhaoxin_pinctrl_resume_noirq(struct device *dev); +#endif + +#endif /* PINCTRL_zhaoxin_H */ diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index dbd2d5835f002..a84580a840fea 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -12,6 +12,22 @@ #define UIP_RECHECK_DELAY_MS (USEC_PER_MSEC / UIP_RECHECK_DELAY) #define UIP_RECHECK_LOOPS_MS(x) (x / UIP_RECHECK_DELAY_MS) +#ifdef CONFIG_X86 +static inline bool follow_mc146818_divider_reset(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 <= 7 && boot_cpu_data.x86_model <= 59)) + return false; + return true; +} +#else +static inline bool follow_mc146818_divider_reset(void) +{ + return true; +} +#endif + /* * Execute a function while the UIP (Update-in-progress) bit of the RTC is * unset. The timeout is configurable by the caller in ms. @@ -278,12 +294,13 @@ int mc146818_set_time(struct rtc_time *time) spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - if (apply_amd_register_a_behavior()) - CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); - else - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - + if (follow_mc146818_divider_reset()) { + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + } #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif @@ -300,7 +317,8 @@ int mc146818_set_time(struct rtc_time *time) #endif CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 50d2051f7f20b..45dca3c21d907 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -30,3 +30,33 @@ config OPTEE_STATIC_PROTMEM_POOL bool depends on HAS_IOMEM && TEE_DMABUF_HEAPS default y + +if OPTEE + +choice + prompt "Default conduit method" + default OPTEE_DEFAULT_METHOD_NONE + help + This option sets the default conduit method for OP-TEE in case + firmware misses "method" property. If in doubt, select "none" + which depends on firmware to provide the value. + +config OPTEE_DEFAULT_METHOD_NONE + bool "none" + help + There is no default conduit method used by the driver. Require + firmware to provide the method explicitly. + +config OPTEE_DEFAULT_METHOD_HVC + bool "hvc" + help + Use the "hvc" as default conduit method. + +config OPTEE_DEFAULT_METHOD_SMC + bool "smc" + help + Use the "smc" as default conduit method. + +endchoice + +endif diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 0be663fcd52b1..8bb2bd6b82df9 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -1474,6 +1475,14 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); } +#if defined(CONFIG_OPTEE_DEFAULT_METHOD_HVC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#elif defined(CONFIG_OPTEE_DEFAULT_METHOD_SMC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_smc +#else +#define DEFAULT_CONDUIT_METHOD ERR_PTR(-ENXIO) +#endif + static optee_invoke_fn *get_invoke_func(struct device *dev) { const char *method; @@ -1482,7 +1491,7 @@ static optee_invoke_fn *get_invoke_func(struct device *dev) if (device_property_read_string(dev, "method", &method)) { pr_warn("missing \"method\" property\n"); - return ERR_PTR(-ENXIO); + return DEFAULT_CONDUIT_METHOD; } if (!strcmp("hvc", method)) @@ -1952,6 +1961,14 @@ static const struct of_device_id optee_dt_match[] = { }; MODULE_DEVICE_TABLE(of, optee_dt_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id optee_acpi_match[] = { + { "PHYT8003" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, optee_acpi_match); +#endif + static struct platform_driver optee_driver = { .probe = optee_probe, .remove = optee_smc_remove, @@ -1959,6 +1976,7 @@ static struct platform_driver optee_driver = { .driver = { .name = "optee", .of_match_table = optee_dt_match, + .acpi_match_table = ACPI_PTR(optee_acpi_match), }, }; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 282116765e648..0a98a7b731bf1 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -85,6 +85,18 @@ config SERIAL_EARLYCON_SEMIHOST This is enabled with "earlycon=smh" on the kernel command line. The console is enabled when early_param is processed. +config SERIAL_PHYTIUM_PCI + tristate "Phytium PCI serial port support" + depends on PCI + select SERIAL_CORE + help + This driver supports the Phytium UART controller on PCI/PCIe adapters. + If you want to compile this driver into the kernel, say Y here. To + compile this driver as a module, choose M here. + + If unsure, say N. + + config SERIAL_EARLYCON_RISCV_SBI bool "Early console using RISC-V SBI" depends on RISCV_SBI diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index a2ccbc508ec57..0fcf7dc9dfa34 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -64,6 +64,7 @@ obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o obj-$(CONFIG_SERIAL_OWL) += owl-uart.o obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o +obj-$(CONFIG_SERIAL_PHYTIUM_PCI) += phytium-uart.o obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o diff --git a/drivers/tty/serial/phytium-uart.c b/drivers/tty/serial/phytium-uart.c new file mode 100644 index 0000000000000..ca28a3677ad03 --- /dev/null +++ b/drivers/tty/serial/phytium-uart.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Phytium PCI UART controller + * + * Copyright (c) 2021-2023, Phytium Techonology Co., Ltd. + * + * Derived from drivers/tty/serial/amba-pl011.c + * Copyright 1999 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * Copyright (C) 2010 ST-Ericsson SA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "phytium_uart" + +#define REG_DR 0x00 +#define REG_FR 0x18 +#define REG_IBRD 0x24 +#define REG_FBRD 0x28 +#define REG_LCRH_RX 0x2c +#define REG_LCRH_TX 0x2c +#define REG_CR 0x30 +#define REG_IFLS 0x34 +#define REG_IMSC 0x38 +#define REG_RIS 0x3c +#define REG_MIS 0x40 +#define REG_ICR 0x44 + +#define REG_DR_OE (1 << 11) +#define REG_DR_BE (1 << 10) +#define REG_DR_PE (1 << 9) +#define REG_DR_FE (1 << 8) + +#define REG_LCRH_SPS 0x80 +#define REG_LCRH_WLEN_8 0x60 +#define REG_LCRH_WLEN_7 0x40 +#define REG_LCRH_WLEN_6 0x20 +#define REG_LCRH_WLEN_5 0x00 +#define REG_LCRH_FEN 0x10 +#define REG_LCRH_STP2 0x08 +#define REG_LCRH_EPS 0x04 +#define REG_LCRH_PEN 0x02 +#define REG_LCRH_BRK 0x01 + +#define REG_FR_RI 0x100 +#define REG_FR_TXFE 0x080 +#define REG_FR_RXFF 0x040 +#define REG_FR_TXFF 0x020 +#define REG_FR_RXFE 0x010 +#define REG_FR_BUSY 0x008 +#define REG_FR_DCD 0x004 +#define REG_FR_DSR 0x002 +#define REG_FR_CTS 0x001 +#define REG_FR_TMSK (REG_FR_TXFF + REG_FR_BUSY) + +#define REG_CR_CTSEN 0x8000 /* CTS hardware flow control */ +#define REG_CR_RTSEN 0x4000 /* RTS hardware flow control */ +#define REG_CR_OUT2 0x2000 /* OUT2 */ +#define REG_CR_OUT1 0x1000 /* OUT1 */ +#define REG_CR_RTS 0x0800 /* RTS */ +#define REG_CR_DTR 0x0400 /* DTR */ +#define REG_CR_RXE 0x0200 /* receive enable */ +#define REG_CR_TXE 0x0100 /* transmit enable */ +#define REG_CR_LBE 0x0080 /* loopback enable */ +#define REG_CR_RTIE 0x0040 +#define REG_CR_TIE 0x0020 +#define REG_CR_RIE 0x0010 +#define REG_CR_MSIE 0x0008 +#define REG_CR_IIRLP 0x0004 /* SIR low power mode */ +#define REG_CR_SIREN 0x0002 /* SIR enable */ +#define REG_CR_UARTEN 0x0001 /* UART enable */ + +#define REG_IFLS_RX1_8 (0 << 3) +#define REG_IFLS_RX2_8 (1 << 3) +#define REG_IFLS_RX4_8 (2 << 3) +#define REG_IFLS_RX6_8 (3 << 3) +#define REG_IFLS_RX7_8 (4 << 3) +#define REG_IFLS_TX1_8 (0 << 0) +#define REG_IFLS_TX2_8 (1 << 0) +#define REG_IFLS_TX4_8 (2 << 0) +#define REG_IFLS_TX6_8 (3 << 0) + +#define REG_IMSC_OEIM (1 << 10) /* overrun error interrupt mask */ +#define REG_IMSC_BEIM (1 << 9) /* break error interrupt mask */ +#define REG_IMSC_PEIM (1 << 8) /* parity error interrupt mask */ +#define REG_IMSC_FEIM (1 << 7) /* framing error interrupt mask */ +#define REG_IMSC_RTIM (1 << 6) /* receive timeout interrupt mask */ +#define REG_IMSC_TXIM (1 << 5) /* transmit interrupt mask */ +#define REG_IMSC_RXIM (1 << 4) /* receive interrupt mask */ +#define REG_IMSC_DSRMIM (1 << 3) /* DSR interrupt mask */ +#define REG_IMSC_DCDMIM (1 << 2) /* DCD interrupt mask */ +#define REG_IMSC_CTSMIM (1 << 1) /* CTS interrupt mask */ +#define REG_IMSC_RIMIM (1 << 0) /* RI interrupt mask */ + +#define REG_ICR_OEIS (1 << 10) /* overrun error interrupt status */ +#define REG_ICR_BEIS (1 << 9) /* break error interrupt status */ +#define REG_ICR_PEIS (1 << 8) /* parity error interrupt status */ +#define REG_ICR_FEIS (1 << 7) /* framing error interrupt status */ +#define REG_ICR_RTIS (1 << 6) /* receive timeout interrupt status */ +#define REG_ICR_TXIS (1 << 5) /* transmit interrupt status */ +#define REG_ICR_RXIS (1 << 4) /* receive interrupt status */ +#define REG_ICR_DSRMIS (1 << 3) /* DSR interrupt status */ +#define REG_ICR_DCDMIS (1 << 2) /* DCD interrupt status */ +#define REG_ICR_CTSMIS (1 << 1) /* CTS interrupt status */ +#define REG_ICR_RIMIS (1 << 0) /* RI interrupt status */ + +#define UART_NR 12 + +#define UART_DR_ERROR (REG_DR_OE|REG_DR_BE|REG_DR_PE|REG_DR_FE) +#define UART_DUMMY_DR_RX (1 << 16) + +#define DEFAULT_UARTCLK 48000000 /* 48 MHz */ + +/* + * We wrap our port structure around the generic uart_port. + */ +struct phytium_uart_port { + struct uart_port port; + unsigned int im; /* interrupt mask */ + unsigned int old_status; + unsigned int old_cr; /* state during shutdown */ + char type[12]; +}; + +static unsigned int phytium_uart_read(const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + return readl_relaxed(addr); +} + +static void phytium_uart_write(unsigned int val, const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + writel_relaxed(val, addr); +} + +static int phytium_fifo_to_tty(struct phytium_uart_port *pup) +{ + u16 status; + unsigned int ch, flag, fifotaken; + + for (fifotaken = 0; fifotaken < 256; fifotaken++) { + status = phytium_uart_read(pup, REG_FR); + if (status & REG_FR_RXFE) + break; + + /* Take chars from the FIFO and update status */ + ch = phytium_uart_read(pup, REG_DR) | UART_DUMMY_DR_RX; + flag = TTY_NORMAL; + pup->port.icount.rx++; + + if (unlikely(ch & UART_DR_ERROR)) { + if (ch & REG_DR_BE) { + ch &= ~(REG_DR_FE | REG_DR_PE); + pup->port.icount.brk++; + if (uart_handle_break(&pup->port)) + continue; + } else if (ch & REG_DR_PE) + pup->port.icount.parity++; + else if (ch & REG_DR_FE) + pup->port.icount.frame++; + if (ch & REG_DR_OE) + pup->port.icount.overrun++; + + ch &= pup->port.read_status_mask; + + if (ch & REG_DR_BE) + flag = TTY_BREAK; + else if (ch & REG_DR_PE) + flag = TTY_PARITY; + else if (ch & REG_DR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&pup->port, ch & 255)) + continue; + + uart_insert_char(&pup->port, ch, REG_DR_OE, ch, flag); + } + + return fifotaken; +} + +static void phytium_rx_chars(struct phytium_uart_port *pup) +__releases(&pup->port.lock) +__acquires(&pup->port.lock) +{ + phytium_fifo_to_tty(pup); + + spin_unlock(&pup->port.lock); + tty_flip_buffer_push(&pup->port.state->port); + spin_lock(&pup->port.lock); +} + +static void phytium_stop_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im &= ~REG_IMSC_TXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static bool phytium_tx_char(struct phytium_uart_port *pup, unsigned char c, + bool from_irq) +{ + + if (unlikely(!from_irq) && + phytium_uart_read(pup, REG_FR) & REG_FR_TXFF) + return false; /* unable to transmit character */ + + phytium_uart_write(c, pup, REG_DR); + pup->port.icount.tx++; + + return true; +} + +static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) +{ + struct tty_port *tport = &pup->port.state->port; + int count = pup->port.fifosize >> 1; + + if (pup->port.x_char) { + if (!phytium_tx_char(pup, pup->port.x_char, from_irq)) + return true; + pup->port.x_char = 0; + --count; + } + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&pup->port)) { + phytium_stop_tx(&pup->port); + return false; + } + + do { + if (likely(from_irq) && count-- == 0) + break; + + if (!phytium_tx_char(pup, kfifo_len(&tport->xmit_fifo), from_irq)) + break; + } while (!kfifo_is_empty(&tport->xmit_fifo)); + + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) + uart_write_wakeup(&pup->port); + + if (kfifo_is_empty(&tport->xmit_fifo)) { + phytium_stop_tx(&pup->port); + return false; + } + return true; +} + +static void phytium_modem_status(struct phytium_uart_port *pup) +{ + unsigned int status, delta; + + status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); + + delta = status ^ pup->old_status; + pup->old_status = status; + + if (!delta) + return; + + if (delta & REG_FR_DCD) + uart_handle_dcd_change(&pup->port, status & REG_FR_DCD); + + if (delta & REG_FR_DSR) + pup->port.icount.dsr++; + + if (delta & REG_FR_CTS) + uart_handle_cts_change(&pup->port, status & REG_FR_CTS); + + wake_up_interruptible(&pup->port.state->port.delta_msr_wait); +} + +static irqreturn_t phytium_uart_interrupt(int irq, void *dev_id) +{ + struct phytium_uart_port *pup = dev_id; + unsigned long flags; + unsigned int status, pass_counter = 256; + int handled = 0; + + spin_lock_irqsave(&pup->port.lock, flags); + status = phytium_uart_read(pup, REG_RIS) & pup->im; + if (status) { + do { + phytium_uart_write(status & ~(REG_ICR_TXIS|REG_ICR_RTIS|REG_ICR_RXIS), + pup, REG_ICR); + + if (status & (REG_ICR_RTIS|REG_ICR_RXIS)) + phytium_rx_chars(pup); + + if (status & (REG_ICR_DSRMIS|REG_ICR_DCDMIS| + REG_ICR_CTSMIS|REG_ICR_RIMIS)) + phytium_modem_status(pup); + if (status & REG_ICR_TXIS) + phytium_tx_chars(pup, true); + + if (pass_counter-- == 0) + break; + + status = phytium_uart_read(pup, REG_RIS) & pup->im; + } while (status != 0); + handled = 1; + } + spin_unlock_irqrestore(&pup->port.lock, flags); + + return IRQ_RETVAL(handled); +} + +static unsigned int phytium_tx_empty(struct uart_port *port) +{ + unsigned int status; + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + status = phytium_uart_read(pup, REG_FR) & (REG_FR_BUSY | REG_FR_TXFF); + + return status ? 0 : TIOCSER_TEMT; +} + +static void phytium_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr; + + cr = phytium_uart_read(pup, REG_CR); + +#define TIOCMBIT(tiocmbit, uartbit) \ + do { \ + if (mctrl & tiocmbit) \ + cr |= uartbit; \ + else \ + cr &= ~uartbit; \ + } while (0) + + TIOCMBIT(TIOCM_RTS, REG_CR_RTS); + TIOCMBIT(TIOCM_DTR, REG_CR_DTR); + TIOCMBIT(TIOCM_OUT1, REG_CR_OUT1); + TIOCMBIT(TIOCM_OUT2, REG_CR_OUT2); + TIOCMBIT(TIOCM_LOOP, REG_CR_LBE); + + if (port->status & UPSTAT_AUTORTS) { + /* We need to disable auto-RTS if we want to turn RTS off */ + TIOCMBIT(TIOCM_RTS, REG_CR_RTSEN); + } +#undef TIOCMBIT + + phytium_uart_write(cr, pup, REG_CR); +} + +static unsigned int phytium_get_mctrl(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr = 0; + unsigned int status = phytium_uart_read(pup, REG_FR); + +#define TIOCMBIT(uartbit, tiocmbit) \ + do { \ + if (status & uartbit) \ + cr |= tiocmbit; \ + } while (0) + + TIOCMBIT(REG_FR_DCD, TIOCM_CAR); + TIOCMBIT(REG_FR_DSR, TIOCM_DSR); + TIOCMBIT(REG_FR_CTS, TIOCM_CTS); + TIOCMBIT(REG_FR_RI, TIOCM_RNG); +#undef TIOCMBIT + return cr; +} + +static void phytium_start_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + if (phytium_tx_chars(pup, false)) { + pup->im |= REG_IMSC_TXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); + } +} + +static void phytium_stop_rx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im &= ~(REG_IMSC_RXIM|REG_IMSC_RTIM|REG_IMSC_FEIM| + REG_IMSC_PEIM|REG_IMSC_BEIM|REG_IMSC_OEIM); + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static void phytium_enable_ms(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im |= REG_IMSC_RIMIM|REG_IMSC_CTSMIM|REG_IMSC_DCDMIM|REG_IMSC_DSRMIM; + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static void phytium_break_ctl(struct uart_port *port, int break_state) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned long flags; + unsigned int lcr_h; + + spin_lock_irqsave(&pup->port.lock, flags); + lcr_h = phytium_uart_read(pup, REG_LCRH_TX); + if (break_state == -1) + lcr_h |= REG_LCRH_BRK; + else + lcr_h &= ~REG_LCRH_BRK; + phytium_uart_write(lcr_h, pup, REG_LCRH_TX); + spin_unlock_irqrestore(&pup->port.lock, flags); +} + +static int phytium_hwinit(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + /* XXX: more configurable setup method in future */ + pup->port.uartclk = DEFAULT_UARTCLK; + + /* Clear pending error and receive interrupts */ + phytium_uart_write(REG_ICR_OEIS | REG_ICR_BEIS | REG_ICR_PEIS | + REG_ICR_FEIS | REG_ICR_RTIS | REG_ICR_RXIS, + pup, REG_ICR); + + /* + * Save interrupts enable mask, and enable RX interrupts in case if + * the interrupt is used for NMI entry. + */ + pup->im = phytium_uart_read(pup, REG_IMSC); + phytium_uart_write(REG_IMSC_RTIM | REG_IMSC_RXIM, pup, REG_IMSC); + + return 0; +} + +static int phytium_uart_allocate_irq(struct phytium_uart_port *pup) +{ + phytium_uart_write(pup->im, pup, REG_IMSC); + + return request_irq(pup->port.irq, phytium_uart_interrupt, IRQF_SHARED, DRV_NAME, pup); +} + +static void phytium_enable_interrtups(struct phytium_uart_port *pup) +{ + unsigned int i; + + spin_lock_irq(&pup->port.lock); + + /* Clear out any spuriously appearing RX interrupts */ + phytium_uart_write(REG_ICR_RTIS | REG_ICR_RXIS, pup, REG_ICR); + + /* + * RXIS is asserted only when the RX FIFO transitions from below + * to above the trigger threshold. If the RX FIFO is already + * full to the threashold this can't happen and RXIS will now be + * stuck off. Drain the RX FIFO explicitly to fix this: + */ + for (i = 0; i < pup->port.fifosize * 2; i++) { + if (phytium_uart_read(pup, REG_FR) & REG_FR_RXFE) + break; + + phytium_uart_read(pup, REG_DR); + } + + pup->im = REG_IMSC_RTIM | REG_IMSC_RXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); + spin_unlock_irq(&pup->port.lock); +} + +static int phytium_startup(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr; + int ret = 0; + + ret = phytium_hwinit(port); + if (ret) + goto out; + + ret = phytium_uart_allocate_irq(pup); + if (ret) + goto out; + + phytium_uart_write(REG_IFLS_RX4_8|REG_IFLS_TX4_8, pup, REG_IFLS); + + spin_lock_irq(&pup->port.lock); + + /* restore RTS and DTR */ + cr = pup->old_cr & (REG_CR_RTS | REG_CR_DTR); + cr |= REG_CR_UARTEN | REG_CR_RXE | REG_CR_TXE; + phytium_uart_write(cr, pup, REG_CR); + + spin_unlock_irq(&pup->port.lock); + + /* initialise the old status of the modem signals */ + pup->old_status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); + + phytium_enable_interrtups(pup); + +out: + return ret; +} + +static void phytium_shutdown_channel(struct phytium_uart_port *pup, + unsigned int lcrh) +{ + unsigned long val; + + val = phytium_uart_read(pup, lcrh); + val &= ~(REG_LCRH_BRK | REG_LCRH_FEN); + phytium_uart_write(val, pup, lcrh); +} + +static void phytium_disable_uart(struct phytium_uart_port *pup) +{ + unsigned int cr; + + pup->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + spin_lock_irq(&pup->port.lock); + cr = phytium_uart_read(pup, REG_CR); + pup->old_cr = cr; + cr &= REG_CR_RTS | REG_CR_DTR; + cr |= REG_CR_UARTEN | REG_CR_TXE; + phytium_uart_write(cr, pup, REG_CR); + spin_unlock_irq(&pup->port.lock); + + /* + * disable break condition and fifos + */ + phytium_shutdown_channel(pup, REG_LCRH_RX); +} + +static void phytium_disable_interrupts(struct phytium_uart_port *pup) +{ + spin_lock_irq(&pup->port.lock); + + /* mask all interrupts and clear all pending ones */ + pup->im = 0; + phytium_uart_write(pup->im, pup, REG_IMSC); + phytium_uart_write(0xffff, pup, REG_ICR); + + spin_unlock_irq(&pup->port.lock); +} + +static void phytium_shutdown(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + phytium_disable_interrupts(pup); + + free_irq(pup->port.irq, pup); + + phytium_disable_uart(pup); + + if (pup->port.ops->flush_buffer) + pup->port.ops->flush_buffer(port); +} + +static void +phytium_setup_status_masks(struct uart_port *port, struct ktermios *termios) +{ + port->read_status_mask = REG_DR_OE | 255; + if (termios->c_iflag & INPCK) + port->read_status_mask |= REG_DR_FE | REG_DR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= REG_DR_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= REG_DR_FE | REG_DR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= REG_DR_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= REG_DR_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_DR_RX; +} + +static void +phytium_set_termios(struct uart_port *port, struct ktermios *termios, const struct ktermios *old) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int lcr_h, old_cr; + unsigned long flags; + unsigned int baud, quot; + + /* Ask the core to calculate the divisor for us. */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + + if (baud > port->uartclk/16) + quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); + else + quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr_h = REG_LCRH_WLEN_5; + break; + case CS6: + lcr_h = REG_LCRH_WLEN_6; + break; + case CS7: + lcr_h = REG_LCRH_WLEN_7; + break; + default: /* CS8 */ + lcr_h = REG_LCRH_WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + lcr_h |= REG_LCRH_STP2; + if (termios->c_cflag & PARENB) { + lcr_h |= REG_LCRH_PEN; + if (!(termios->c_cflag & PARODD)) + lcr_h |= REG_LCRH_EPS; + if (termios->c_cflag & CMSPAR) + lcr_h |= REG_LCRH_SPS; + } + if (pup->port.fifosize > 1) + lcr_h |= REG_LCRH_FEN; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + phytium_setup_status_masks(port, termios); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + phytium_enable_ms(port); + + /* first, disable everything */ + old_cr = phytium_uart_read(pup, REG_CR); + phytium_uart_write(0, pup, REG_CR); + + if (termios->c_cflag & CRTSCTS) { + if (old_cr & REG_CR_RTS) + old_cr |= REG_CR_RTSEN; + + old_cr |= REG_CR_CTSEN; + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + } else { + old_cr &= ~(REG_CR_CTSEN | REG_CR_RTSEN); + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + } + + /* Set baud rate */ + phytium_uart_write(quot & 0x3f, pup, REG_FBRD); + phytium_uart_write(quot >> 6, pup, REG_IBRD); + + phytium_uart_write(lcr_h, pup, REG_LCRH_RX); + phytium_uart_write(old_cr, pup, REG_CR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *phytium_type(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + return pup->port.type == PORT_PHYTIUM ? pup->type : NULL; +} + +static void phytium_release_port(struct uart_port *port) +{ + /* Nothing to release ... */ +} + +static int phytium_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void phytium_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_PHYTIUM; + phytium_request_port(port); + } +} + +static int phytium_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_PHYTIUM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + + return ret; +} + +static const struct uart_ops phytium_uart_ops = { + .tx_empty = phytium_tx_empty, + .set_mctrl = phytium_set_mctrl, + .get_mctrl = phytium_get_mctrl, + .stop_tx = phytium_stop_tx, + .start_tx = phytium_start_tx, + .stop_rx = phytium_stop_rx, + .enable_ms = phytium_enable_ms, + .break_ctl = phytium_break_ctl, + .startup = phytium_startup, + .shutdown = phytium_shutdown, + .set_termios = phytium_set_termios, + .type = phytium_type, + .release_port = phytium_release_port, + .request_port = phytium_request_port, + .config_port = phytium_config_port, + .verify_port = phytium_verify_port, +}; + +static struct phytium_uart_port *uart_ports[UART_NR]; + +static struct uart_driver phytium_uart = { + .owner = THIS_MODULE, + .driver_name = DRV_NAME, + .dev_name = "ttyS", + .nr = UART_NR, +}; + +static void phytium_unregister_port(struct phytium_uart_port *pup) +{ + int i; + bool busy = false; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) { + if (uart_ports[i] == pup) + uart_ports[i] = NULL; + else if (uart_ports[i]) + busy = true; + } + + if (!busy) + uart_unregister_driver(&phytium_uart); +} + +static int phytium_find_free_port(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) + if (uart_ports[i] == NULL) + return i; + + return -EBUSY; +} + +static int phytium_register_port(struct phytium_uart_port *pup) +{ + int rc; + + /* Ensure interrupts from this UART are masked and cleared */ + phytium_uart_write(0, pup, REG_IMSC); + phytium_uart_write(0xffff, pup, REG_ICR); + + if (!phytium_uart.state) { + rc = uart_register_driver(&phytium_uart); + if (rc < 0) { + dev_err(pup->port.dev, + "Failed to register Phytium PCI UART driver\n"); + return rc; + } + } + + rc = uart_add_one_port(&phytium_uart, &pup->port); + if (rc) + phytium_unregister_port(pup); + + return rc; +} + +static int phytium_uart_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_uart_port *pup; + int portnr, rc; + + portnr = phytium_find_free_port(); + if (portnr < 0) + return portnr; + + pup = devm_kzalloc(&pdev->dev, sizeof(struct phytium_uart_port), + GFP_KERNEL); + if (!pup) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pcim_request_all_regions(pdev, pci_name(pdev)); + if (rc) + return rc; + + pup->port.iotype = UPIO_MEM32; + pup->port.irq = pdev->irq; + pup->port.mapbase = pci_resource_start(pdev, 0); + pup->port.membase = pcim_iomap_table(pdev)[0]; + pup->port.ops = &phytium_uart_ops; + pup->port.dev = &pdev->dev; + pup->port.fifosize = 32; + pup->port.flags = UPF_BOOT_AUTOCONF; + pup->port.line = portnr; + + uart_ports[portnr] = pup; + + pup->old_cr = 0; + snprintf(pup->type, sizeof(pup->type), "pci-uart"); + + pci_set_drvdata(pdev, pup); + + return phytium_register_port(pup); +} + +static void phytium_uart_remove(struct pci_dev *pdev) +{ + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + uart_remove_one_port(&phytium_uart, &pup->port); + phytium_unregister_port(pup); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_uart_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + if (pup) + uart_suspend_port(&phytium_uart, &pup->port); + + return 0; +} + +static int phytium_uart_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + if (pup) + uart_resume_port(&phytium_uart, &pup->port); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_dev_pm_ops, phytium_uart_suspend, phytium_uart_resume); + +static const struct pci_device_id pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2e) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver phytium_uart_pci_driver = { + .name = DRV_NAME, + .probe = phytium_uart_probe, + .remove = phytium_uart_remove, + .driver = { + .pm = &phytium_dev_pm_ops, + }, + .id_table = pci_ids, +}; + +static int __init phytium_uart_init(void) +{ + pr_info("Serial: Phytium PCI UART driver\n"); + + return pci_register_driver(&phytium_uart_pci_driver); +} + +static void __exit phytium_uart_exit(void) +{ + pci_unregister_driver(&phytium_uart_pci_driver); +} + +module_init(phytium_uart_init); +module_exit(phytium_uart_exit); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium PCI serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 9c007a106330b..d99394a5f8047 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -231,6 +231,9 @@ /* Sunplus UART */ #define PORT_SUNPLUS 123 +/* Phytium PCI UART */ +#define PORT_PHYTIUM 124 + /* Generic type identifier for ports which type is not important to userspace. */ #define PORT_GENERIC (-1) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 444bdfdab7318..1561673200c18 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2588,6 +2588,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } +#if IS_ENABLED(CONFIG_X86) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) { + for_each_cpu(i, cpu_map) { + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) + sd->flags |= SD_ASYM_PACKING; + } + } +#endif + /* Calculate CPU capacity for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map)) diff --git a/lib/crc/x86/crc32.h b/lib/crc/x86/crc32.h index 19a5e3c6c73bb..6986becaeadc6 100644 --- a/lib/crc/x86/crc32.h +++ b/lib/crc/x86/crc32.h @@ -110,6 +110,12 @@ static void crc32_mod_init_arch(void) { if (boot_cpu_has(X86_FEATURE_XMM4_2)) static_branch_enable(&have_crc32); + + struct cpuinfo_x86 *c = &boot_cpu_data; + if ((c->x86_vendor == X86_VENDOR_ZHAOXIN || c->x86_vendor == X86_VENDOR_CENTAUR) && + (c->x86 <= 7 && c->x86_model <= 59)) + return ; + if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { static_branch_enable(&have_pclmulqdq); if (have_vpclmul()) { diff --git a/sound/hda/controllers/intel.c b/sound/hda/controllers/intel.c index 4a2167afcc767..615139c6cef0d 100644 --- a/sound/hda/controllers/intel.c +++ b/sound/hda/controllers/intel.c @@ -1545,7 +1545,8 @@ static int check_position_fix(struct azx *chip, int fix) } /* Check VIA/ATI HD Audio Controller exist */ - if (chip->driver_type == AZX_DRIVER_VIA) { + if (chip->driver_type == AZX_DRIVER_VIA || + chip->driver_type == AZX_DRIVER_ZHAOXIN) { dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); return POS_FIX_VIACOMBO; } @@ -1699,7 +1700,7 @@ static void azx_check_snoop_available(struct azx *chip) snoop = true; if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE && - chip->driver_type == AZX_DRIVER_VIA) { + (chip->driver_type == AZX_DRIVER_VIA || chip->driver_type == AZX_DRIVER_ZHAOXIN)) { /* force to non-snoop mode for a new VIA controller * when BIOS is set */