diff --git a/.github/workflows/package-kernel-amd64-daily.yml b/.github/workflows/package-kernel-amd64-daily.yml index 5902521bfe633..944c4f7de3f78 100644 --- a/.github/workflows/package-kernel-amd64-daily.yml +++ b/.github/workflows/package-kernel-amd64-daily.yml @@ -26,9 +26,10 @@ jobs: # .config make deepin_x86_desktop_defconfig make bindeb-pkg -j$(nproc) + mv ../*.deb . - name: 'Upload Kernel Artifact' uses: actions/upload-artifact@v3 with: name: kernel-amd64-deb - path: "../*.deb" + path: "*.deb" diff --git a/Documentation/devicetree/bindings/gpio/phytium,sgpio.yaml b/Documentation/devicetree/bindings/gpio/phytium,sgpio.yaml new file mode 100644 index 0000000000000..fed5c2b634683 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/phytium,sgpio.yaml @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/phytium,sgpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium SGPIO controller + +description: | + This SGPIO controller is for Phytium Pe220x SoCs, which supports up to + 96 (32x3) Serial GPIOs. + +maintainers: + - Chen Baozi + +properties: + compatible: + const: phytium,sgpio + + reg: + maxItems: 1 + description: Address and length of the register set for the device. + + gpio-controller: true + + '#gpio-cells': + const: 2 + description: | + The first cell is the pin number and the second cell is used to specify + the gpio polarity. + 0 = active high + 1 = active low + + interrupts: + maxItems: 1 + + ngpios: true + + bus-frequency: true + + clocks: + maxItems: 1 + +additionalProperties: false + +required: + - compatible + - reg + - gpio-controller + - '#gpio-cells' + - interrupts + - ngpios + - clocks + - bus-frequency + +examples: + - | + sgpio: sgpio@2807d000 { + compatible = "phytium,sgpio"; + reg = <0x0 0x2807d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + ngpios = <96>; + bus-frequency = <48000>; + gpio-controller; + #gpio-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/gpu/phytium,dc.yaml b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml new file mode 100644 index 0000000000000..e5336599307e5 --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/phytium,dc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Display Controller + +maintainers: + - Dai Jingtao + +properties: + compatible: + enum: + - phytium,dc + + reg: + items: + - description: DC registers + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +examples: + - | + dc@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>; + interrupts = ; + }; diff --git a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml index 6fa3078074d02..5195466658b3a 100644 --- a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml +++ b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml @@ -15,7 +15,9 @@ allOf: properties: compatible: oneOf: - - const: cdns,i3c-master + - enum: + - cdns,i3c-master + - phytium,cdns-i3c-master - items: - enum: - axiado,ax3000-i3c diff --git a/Documentation/devicetree/bindings/i3c/phytium,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/phytium,i3c-master.yaml new file mode 100644 index 0000000000000..0e004735fd38d --- /dev/null +++ b/Documentation/devicetree/bindings/i3c/phytium,i3c-master.yaml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause + +* Phytium I3C controller + +This I3C controller is for Phytium Soc. + +Required properties: +- compatible: Shall be "phytium,cdns-i3c-master" +- clocks: Shall reference the pclk and sysclk +- clock-names: Shall contain "pclk" and "sysclk" +- interrupts: The interrupt line connected to this I3C master +- reg: I3C master registers +- #address-cells: Shall be set to 1 +- #size-cells: Shall be set to 0 +- i2c-scl-hz: I2C CLK frequency +- i3c-scl-hz: I3C CLK frequency + +Example: + + i3c-master@28045000 { + compatible = "phytium,cdns-i3c-master"; + reg = <0x0 0x28045000 0x0 0x1000>; + interrupts = ; + clocks = <&coreclock>, <&i3csysclock>; + clock-names = "pclk", "sysclk"; + #address-cells = <1>; + #size-cells = <0>; + i2c-scl-hz = <400000>; + i3c-scl-hz = <1000000>; + + nunchuk: nunchuk@52 { + compatible = "nintendo,nunchuk"; + reg = <0x52 0x0 0x10>; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/phytnet_led.yaml b/Documentation/devicetree/bindings/leds/phytnet_led.yaml new file mode 100644 index 0000000000000..24efebc9ba30d --- /dev/null +++ b/Documentation/devicetree/bindings/leds/phytnet_led.yaml @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/phytnet_led.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium mac led controller + +maintainers: + - LongShixiang + +description: + This modules is used to control mac led. + +properties: + compatible: + const: phytium,net_led + net_dev: + maxItems: 1 + description: Phandler of specified Net device + led-gpios: + minItems: 1 + maxItems: 2 + description: |- + the gpios used for led control based on net_dev condition. + One represents LINK condition, another represents ACT condition. + +required: + - compatible + - net_dev + - led-gpios + +examples: + - | + gpiochip0: gpop_controller{ + ... + } + eth0: ethernet{ + ... + } + phytium_net_led0 { + compatible = "phytium,net_led"; + net_dev = <ð1>; + led-gpios = <&gpiochip0 9 GPIO_ACTIVE_HIGH>, /* link */ + <&gpiochip0 11 GPIO_ACTIVE_HIGH>; /* act */ + }; diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml index 1029786a855c5..a58660e164a9c 100644 --- a/Documentation/devicetree/bindings/net/cdns,macb.yaml +++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml @@ -59,6 +59,8 @@ properties: - cdns,emac # Generic - cdns,gem # Generic - cdns,macb # Generic + - cdns,phytium-gem-1.0 # GEM version 1.0 on Phytium SoCs + - cdns,phytium-gem-2.0 # GEM version 2.0 on Phytium SoCs - items: - enum: diff --git a/Documentation/devicetree/bindings/usb/phytium,usb2.yaml b/Documentation/devicetree/bindings/usb/phytium,usb2.yaml new file mode 100644 index 0000000000000..caf762039f04d --- /dev/null +++ b/Documentation/devicetree/bindings/usb/phytium,usb2.yaml @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/phytium,usb2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium USBHS-DRD controller bindings + +maintainers: + - Chen Baozi + +properties: + compatible: + const: phytium,usb2 + + reg: + items: + - description: USB controller registers + - description: PHY registers + + interrupts: + maxItems: 1 + + dr_mode: + enum: [host, otg, peripheral] + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + usb2_0: usb2@31800000 { + compatible = "phytium,usb2"; + reg = <0x0 0x31800000 0x0 0x80000>, + <0x0 0x31990000 0x0 0x10000>; + interrupts = ; + }; diff --git a/Makefile b/Makefile index c2b5cfd2fce01..8c653a789227c 100644 --- a/Makefile +++ b/Makefile @@ -1245,11 +1245,8 @@ vmlinux: vmlinux.o $(KBUILD_LDS) modpost # make sure no implicit rule kicks in $(sort $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)): . ; -ifeq ($(origin KERNELRELEASE),file) -filechk_kernel.release = $(srctree)/scripts/setlocalversion $(srctree) -else -filechk_kernel.release = echo $(KERNELRELEASE) -endif +filechk_kernel.release = \ + echo "$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" # Store (new) KERNELRELEASE string in include/config/kernel.release include/config/kernel.release: FORCE diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 2670adc5c91d9..51350f0fc7352 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -313,6 +313,7 @@ config ARCH_PENSANDO config ARCH_PHYTIUM bool "Phytium SoC Family" + select ARM_GIC_PHYTIUM_2500 help This enables support for Phytium ARMv8 SoC family, including: - Phytium Server SoC Family diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index 2e9538807e285..bf4586bdea4f9 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -1,4 +1,4 @@ -CONFIG_COMPILE_TEST=y +CONFIG_WERROR=y CONFIG_BUILD_SALT="-arm64-desktop-hwe" CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_HOSTNAME="" @@ -110,13 +110,10 @@ CONFIG_CMDLINE="pcie_aspm=off" CONFIG_SUSPEND_SKIP_SYNC=y CONFIG_HIBERNATION=y CONFIG_PM_STD_PARTITION="/dev/sda2" -CONFIG_PM_DEBUG=y CONFIG_ENERGY_MODEL=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y CONFIG_CPU_IDLE_GOV_TEO=y -CONFIG_ARM_PSCI_CPUIDLE=y -CONFIG_ARM_CLPS711X_CPUIDLE=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y @@ -133,12 +130,10 @@ CONFIG_ARM_SCPI_CPUFREQ=m CONFIG_ARM_IMX_CPUFREQ_DT=m CONFIG_ARM_MEDIATEK_CPUFREQ=m CONFIG_ARM_QCOM_CPUFREQ_HW=m -CONFIG_ARM_RASPBERRYPI_CPUFREQ=m CONFIG_ARM_SCMI_CPUFREQ=m CONFIG_QORIQ_CPUFREQ=m CONFIG_ACPI=y CONFIG_ACPI_FPDT=y -CONFIG_ACPI_EC_DEBUGFS=m CONFIG_ACPI_BUTTON=m CONFIG_ACPI_TINY_POWER_BUTTON=m CONFIG_ACPI_FAN=m @@ -224,6 +219,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_MEMORY_FAILURE=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_CMA=y CONFIG_ZONE_DEVICE=y CONFIG_USERFAULTFD=y CONFIG_LRU_GEN=y @@ -664,6 +660,7 @@ CONFIG_BT_6LOWPAN=m CONFIG_BT_LEDS=y CONFIG_BT_MSFTEXT=y CONFIG_BT_AOSPEXT=y +# CONFIG_BT_DEBUGFS is not set CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y CONFIG_BT_HCIBTUSB_MTK=y @@ -688,7 +685,6 @@ CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m CONFIG_BT_MTKSDIO=m CONFIG_BT_MTKUART=m -CONFIG_BT_QCOMSMD=m CONFIG_BT_VIRTIO=m CONFIG_BT_NXPUART=m CONFIG_AF_RXRPC_IPV6=y @@ -752,21 +748,21 @@ CONFIG_PCI_AARDVARK=m CONFIG_PCIE_ALTERA=m CONFIG_PCIE_ALTERA_MSI=m CONFIG_PCIE_APPLE=m -CONFIG_PCI_VERSATILE=y CONFIG_PCI_HOST_THUNDER_PEM=y CONFIG_PCI_HOST_THUNDER_ECAM=y CONFIG_PCI_FTPCI100=y CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCIE_HISI_ERR=y CONFIG_PCIE_MEDIATEK=m CONFIG_PCIE_MEDIATEK_GEN3=m -CONFIG_PCIE_MT7621=m +CONFIG_PCIE_PHYTIUM_EP=m CONFIG_PCIE_MICROCHIP_HOST=m +CONFIG_PCI_HYPERV_INTERFACE=m CONFIG_PCI_TEGRA=y CONFIG_PCIE_RCAR_HOST=y CONFIG_PCIE_RCAR_EP=y CONFIG_PCIE_ROCKCHIP_HOST=m CONFIG_PCIE_ROCKCHIP_EP=y -CONFIG_PCI_V3_SEMI=y CONFIG_PCI_XGENE=y CONFIG_PCIE_XILINX=y CONFIG_PCIE_XILINX_NWL=y @@ -776,9 +772,6 @@ CONFIG_PCIE_CADENCE_PLAT_EP=y CONFIG_PCI_J721E_HOST=y CONFIG_PCI_J721E_EP=y CONFIG_PCIE_AL=y -CONFIG_PCIE_ARTPEC6_HOST=y -CONFIG_PCIE_ARTPEC6_EP=y -CONFIG_PCIE_BT1=m CONFIG_PCI_IMX6_HOST=y CONFIG_PCI_IMX6_EP=y CONFIG_PCI_LAYERSCAPE=y @@ -786,22 +779,17 @@ CONFIG_PCI_LAYERSCAPE_EP=y CONFIG_PCI_HISI=y CONFIG_PCIE_KIRIN=y CONFIG_PCIE_HISI_STB=y -CONFIG_PCIE_INTEL_GW=y CONFIG_PCIE_KEEMBAY_HOST=y CONFIG_PCIE_KEEMBAY_EP=y CONFIG_PCIE_ARMADA_8K=y -CONFIG_PCIE_TEGRA194_HOST=m -CONFIG_PCIE_TEGRA194_EP=m CONFIG_PCIE_DW_PLAT_HOST=y CONFIG_PCIE_DW_PLAT_EP=y CONFIG_PCIE_QCOM=y CONFIG_PCIE_QCOM_EP=m CONFIG_PCIE_ROCKCHIP_DW_HOST=y CONFIG_PCI_EXYNOS=m -CONFIG_PCIE_FU740=y CONFIG_PCIE_UNIPHIER=y CONFIG_PCIE_UNIPHIER_EP=y -CONFIG_PCIE_SPEAR13XX=y CONFIG_PCI_KEYSTONE_HOST=y CONFIG_PCI_KEYSTONE_EP=y CONFIG_PCIE_VISCONTI_HOST=y @@ -815,6 +803,7 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER_DEBUG is not set CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_FW_LOADER_COMPRESS=y @@ -830,15 +819,12 @@ CONFIG_ISCSI_IBFT=m CONFIG_FW_CFG_SYSFS=m CONFIG_FW_CFG_SYSFS_CMDLINE=y CONFIG_ARM_FFA_TRANSPORT=m -CONFIG_BCM47XX_NVRAM=y -CONFIG_BCM47XX_SPROM=y CONFIG_EFI_VARS_PSTORE=m CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y CONFIG_EFI_ZBOOT=y CONFIG_EFI_BOOTLOADER_CONTROL=m CONFIG_EFI_CAPSULE_LOADER=m CONFIG_RESET_ATTACK_MITIGATION=y -CONFIG_ARM_PSCI_CHECKER=y CONFIG_TEGRA_IVC=y CONFIG_GNSS=m CONFIG_GNSS_MTK_SERIAL=m @@ -846,16 +832,12 @@ CONFIG_GNSS_SIRF_SERIAL=m CONFIG_GNSS_UBX_SERIAL=m CONFIG_GNSS_USB=m CONFIG_MTD=m -CONFIG_MTD_TESTS=m CONFIG_MTD_AR7_PARTS=m -CONFIG_MTD_BCM63XX_PARTS=y CONFIG_MTD_BRCM_U_BOOT=m CONFIG_MTD_CMDLINE_PARTS=m CONFIG_MTD_OF_PARTS_LINKSYS_NS=y CONFIG_MTD_AFS_PARTS=m -CONFIG_MTD_PARSER_TPLINK_SAFELOADER=m CONFIG_MTD_PARSER_TRX=m -CONFIG_MTD_SHARPSL_PARTS=m CONFIG_MTD_REDBOOT_PARTS=m CONFIG_MTD_BLOCK=m CONFIG_MTD_BLOCK_RO=m @@ -899,6 +881,34 @@ CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_GENERIC=m CONFIG_MTD_ONENAND_OTP=y CONFIG_MTD_ONENAND_2X_PROGRAM=y +CONFIG_MTD_RAW_NAND=m +CONFIG_MTD_NAND_DENALI_PCI=m +CONFIG_MTD_NAND_DENALI_DT=m +CONFIG_MTD_NAND_CAFE=m +CONFIG_MTD_NAND_PHYTIUM_PCI=m +CONFIG_MTD_NAND_PHYTIUM_PLAT=m +CONFIG_MTD_NAND_MARVELL=m +CONFIG_MTD_NAND_BRCMNAND=m +CONFIG_MTD_NAND_BRCMNAND_BCM63XX=m +CONFIG_MTD_NAND_FSL_IFC=m +CONFIG_MTD_NAND_MXC=m +CONFIG_MTD_NAND_SUNXI=m +CONFIG_MTD_NAND_HISI504=m +CONFIG_MTD_NAND_QCOM=m +CONFIG_MTD_NAND_MXIC=m +CONFIG_MTD_NAND_TEGRA=m +CONFIG_MTD_NAND_STM32_FMC2=m +CONFIG_MTD_NAND_MESON=m +CONFIG_MTD_NAND_GPIO=m +CONFIG_MTD_NAND_PLATFORM=m +CONFIG_MTD_NAND_CADENCE=m +CONFIG_MTD_NAND_ARASAN=m +CONFIG_MTD_NAND_INTEL_LGM=m +CONFIG_MTD_NAND_ROCKCHIP=m +CONFIG_MTD_NAND_RENESAS=m +CONFIG_MTD_NAND_NANDSIM=m +CONFIG_MTD_NAND_RICOH=m +CONFIG_MTD_NAND_DISKONCHIP=m # CONFIG_MTD_NAND_ECC_MXIC is not set CONFIG_MTD_LPDDR=m CONFIG_MTD_SPI_NOR=m @@ -916,7 +926,6 @@ CONFIG_PARPORT_PC_FIFO=y CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_DRBD=m -CONFIG_DRBD_FAULT_INJECTION=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=8 @@ -947,13 +956,9 @@ CONFIG_AD525X_DPOT_SPI=m CONFIG_DUMMY_IRQ=m CONFIG_PHANTOM=m CONFIG_ICS932S401=m -CONFIG_ATMEL_SSC=m CONFIG_ENCLOSURE_SERVICES=m -CONFIG_SMPRO_ERRMON=m -CONFIG_SMPRO_MISC=m -CONFIG_GEHC_ACHC=m +CONFIG_HI6421V600_IRQ=m CONFIG_HP_ILO=m -CONFIG_QCOM_COINCELL=m CONFIG_QCOM_FASTRPC=m CONFIG_APDS9802ALS=m CONFIG_ISL29003=m @@ -963,11 +968,8 @@ CONFIG_SENSORS_BH1770=m CONFIG_SENSORS_APDS990X=m CONFIG_HMC6352=m CONFIG_DS1682=m -CONFIG_PCH_PHUB=m CONFIG_LATTICE_ECP3_CONFIG=m -CONFIG_SRAM=y CONFIG_DW_XDATA_PCIE=m -CONFIG_PCI_ENDPOINT_TEST=m CONFIG_XILINX_SDFEC=m CONFIG_HISI_HIKEY_USB=m CONFIG_OPEN_DICE=m @@ -1068,7 +1070,6 @@ CONFIG_SCSI_QLA_ISCSI=m CONFIG_QEDI=m CONFIG_QEDF=m CONFIG_SCSI_LPFC=m -CONFIG_SCSI_LPFC_DEBUG_FS=y CONFIG_SCSI_DC395x=m CONFIG_SCSI_AM53C974=m CONFIG_SCSI_WD719X=m @@ -1087,20 +1088,14 @@ CONFIG_SATA_ZPODD=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=m CONFIG_AHCI_BRCM=m -CONFIG_AHCI_DA850=m -CONFIG_AHCI_DM816=m CONFIG_AHCI_DWC=m -CONFIG_AHCI_ST=m CONFIG_AHCI_IMX=m CONFIG_AHCI_CEVA=m CONFIG_AHCI_MTK=m CONFIG_AHCI_MVEBU=m CONFIG_AHCI_SUNXI=m CONFIG_AHCI_TEGRA=m -CONFIG_AHCI_XGENE=m CONFIG_AHCI_QORIQ=m -CONFIG_SATA_FSL=m -CONFIG_SATA_GEMINI=m CONFIG_SATA_AHCI_SEATTLE=m CONFIG_SATA_INIC162X=m CONFIG_SATA_ACARD_AHCI=m @@ -1111,7 +1106,6 @@ CONFIG_SATA_SX4=m CONFIG_ATA_PIIX=m CONFIG_SATA_DWC=m CONFIG_SATA_DWC_OLD_DMA=y -CONFIG_SATA_HIGHBANK=m CONFIG_SATA_MV=m CONFIG_SATA_NV=m CONFIG_SATA_PROMISE=m @@ -1124,17 +1118,12 @@ CONFIG_SATA_VIA=m CONFIG_SATA_VITESSE=m CONFIG_PATA_ALI=m CONFIG_PATA_AMD=m -CONFIG_PATA_ARASAN_CF=m CONFIG_PATA_ARTOP=m CONFIG_PATA_ATIIXP=m CONFIG_PATA_ATP867X=m CONFIG_PATA_CMD64X=m -CONFIG_PATA_CS5520=m -CONFIG_PATA_CS5530=m -CONFIG_PATA_CS5536=m CONFIG_PATA_CYPRESS=m CONFIG_PATA_EFAR=m -CONFIG_PATA_FTIDE010=m CONFIG_PATA_HPT366=m CONFIG_PATA_HPT37X=m CONFIG_PATA_HPT3X2N=m @@ -1154,17 +1143,14 @@ CONFIG_PATA_PDC2027X=m CONFIG_PATA_PDC_OLD=m CONFIG_PATA_RADISYS=m CONFIG_PATA_RDC=m -CONFIG_PATA_SC1200=m CONFIG_PATA_SCH=m CONFIG_PATA_SERVERWORKS=m CONFIG_PATA_SIL680=m CONFIG_PATA_TOSHIBA=m CONFIG_PATA_TRIFLEX=m CONFIG_PATA_VIA=m -CONFIG_PATA_PXA=m CONFIG_PATA_WINBOND=m CONFIG_PATA_CMD640_PCI=m -CONFIG_PATA_IXP4XX_CF=m CONFIG_PATA_MPIIX=m CONFIG_PATA_NS87410=m CONFIG_PATA_OPTI=m @@ -1197,7 +1183,6 @@ CONFIG_MD_FAULTY=m CONFIG_MD_CLUSTER=m CONFIG_BCACHE=m CONFIG_BLK_DEV_DM=m -CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y CONFIG_DM_UNSTRIPED=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m @@ -1447,6 +1432,10 @@ CONFIG_SMSC911X=m CONFIG_SMSC9420=m CONFIG_STMMAC_ETH=y CONFIG_DWMAC_DWC_QOS_ETH=m +CONFIG_DWMAC_MEDIATEK=m +CONFIG_DWMAC_PHYTIUM=m +CONFIG_DWMAC_INTEL_PLAT=m +CONFIG_DWMAC_TEGRA=m CONFIG_STMMAC_PCI=y CONFIG_HAPPYMEAL=m CONFIG_SUNGEM=m @@ -1464,6 +1453,9 @@ CONFIG_TXGBE=m CONFIG_WIZNET_W5100=m CONFIG_WIZNET_W5300=m CONFIG_WIZNET_BUS_DIRECT=y +CONFIG_PHYTMAC=m +CONFIG_PHYTMAC_PLATFORM=m +CONFIG_PHYTMAC_PCI=m CONFIG_FDDI=m CONFIG_DEFXX=m CONFIG_SKFP=m @@ -1511,6 +1503,9 @@ CONFIG_CAN_CC770_PLATFORM=m CONFIG_CAN_IFI_CANFD=m CONFIG_CAN_M_CAN=m CONFIG_CAN_PEAK_PCIEFD=m +CONFIG_CAN_PHYTIUM=m +CONFIG_CAN_PHYTIUM_PLATFORM=m +CONFIG_CAN_PHYTIUM_PCI=m CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m CONFIG_CAN_KVASER_PCI=m @@ -1604,8 +1599,6 @@ CONFIG_WCN36XX=m CONFIG_ATH11K=m CONFIG_ATH11K_PCI=m CONFIG_ATH12K=m -CONFIG_ATH12K_DEBUG=y -CONFIG_ATH12K_TRACING=y CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m CONFIG_AT76C50X_USB=m @@ -1765,6 +1758,7 @@ CONFIG_INPUT_MOUSEDEV_PSAUX=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_ADP5588=m CONFIG_KEYBOARD_ADP5589=m +CONFIG_KEYBOARD_QT1050=m CONFIG_KEYBOARD_QT1070=m CONFIG_KEYBOARD_QT2160=m CONFIG_KEYBOARD_DLINK_DIR685=m @@ -1793,6 +1787,8 @@ CONFIG_KEYBOARD_XTKBD=m CONFIG_KEYBOARD_CAP11XX=m CONFIG_KEYBOARD_BCM=m CONFIG_KEYBOARD_MTK_PMIC=m +CONFIG_KEYBOARD_CYPRESS_SF=m +CONFIG_KEYBOARD_PHYTIUM=m CONFIG_MOUSE_PS2_SENTELIC=y CONFIG_MOUSE_PS2_TOUCHKIT=y CONFIG_MOUSE_SERIAL=m @@ -1878,8 +1874,6 @@ CONFIG_TOUCHSCREEN_IMX6UL_TSC=m CONFIG_TOUCHSCREEN_INEXIO=m CONFIG_TOUCHSCREEN_PENMOUNT=m CONFIG_TOUCHSCREEN_EDT_FT5X06=m -CONFIG_TOUCHSCREEN_RASPBERRYPI_FW=m -CONFIG_TOUCHSCREEN_MIGOR=m CONFIG_TOUCHSCREEN_TOUCHRIGHT=m CONFIG_TOUCHSCREEN_TOUCHWIN=m CONFIG_TOUCHSCREEN_PIXCIR=m @@ -1889,7 +1883,6 @@ CONFIG_TOUCHSCREEN_WM97XX=m CONFIG_TOUCHSCREEN_USB_COMPOSITE=m CONFIG_TOUCHSCREEN_MC13783=m CONFIG_TOUCHSCREEN_TOUCHIT213=m -CONFIG_TOUCHSCREEN_TS4800=m CONFIG_TOUCHSCREEN_TSC_SERIO=m CONFIG_TOUCHSCREEN_TSC2004=m CONFIG_TOUCHSCREEN_TSC2005=m @@ -1975,45 +1968,38 @@ CONFIG_SERIO_ALTERA_PS2=m CONFIG_SERIO_PS2MULT=m CONFIG_SERIO_ARC_PS2=m CONFIG_SERIO_APBPS2=m -CONFIG_SERIO_OLPC_APSP=m CONFIG_SERIO_SUN4I_PS2=m CONFIG_SERIO_GPIO_PS2=m CONFIG_USERIO=m CONFIG_GAMEPORT=m CONFIG_GAMEPORT_EMU10K1=m CONFIG_GAMEPORT_FM801=m -CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_FINTEK=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=48 CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_ASPEED_VUART=m CONFIG_SERIAL_8250_PCI1XXXX=m +CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_8250_BCM2835AUX=m CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_EM=m -CONFIG_SERIAL_8250_IOC3=m CONFIG_SERIAL_8250_RT288X=y CONFIG_SERIAL_8250_OMAP=m -CONFIG_SERIAL_8250_LPC18XX=m CONFIG_SERIAL_8250_MT6577=m CONFIG_SERIAL_8250_UNIPHIER=m -CONFIG_SERIAL_8250_INGENIC=m -CONFIG_SERIAL_8250_PXA=m CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL010=y CONFIG_SERIAL_AMBA_PL010_CONSOLE=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_ATMEL=y -CONFIG_SERIAL_ATMEL_CONSOLE=y +CONFIG_SERIAL_PHYTIUM_PCI=m CONFIG_SERIAL_MESON=m CONFIG_SERIAL_MESON_CONSOLE=y -CONFIG_SERIAL_CLPS711X=m CONFIG_SERIAL_SAMSUNG=m CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_TEGRA=m @@ -2024,43 +2010,26 @@ CONFIG_SERIAL_IMX=m CONFIG_SERIAL_IMX_CONSOLE=m CONFIG_SERIAL_UARTLITE=m CONFIG_SERIAL_SH_SCI=m -CONFIG_SERIAL_HS_LPC32XX=m -CONFIG_SERIAL_ICOM=m CONFIG_SERIAL_JSM=m CONFIG_SERIAL_MSM=m -CONFIG_SERIAL_VT8500=y -CONFIG_SERIAL_VT8500_CONSOLE=y -CONFIG_SERIAL_OMAP=m CONFIG_SERIAL_SIFIVE=m -CONFIG_SERIAL_LANTIQ=m CONFIG_SERIAL_SCCNXP=m CONFIG_SERIAL_SC16IS7XX=m CONFIG_SERIAL_SC16IS7XX_SPI=y -CONFIG_SERIAL_TIMBERDALE=m CONFIG_SERIAL_BCM63XX_CONSOLE=y CONFIG_SERIAL_ALTERA_JTAGUART=m CONFIG_SERIAL_ALTERA_UART=m -CONFIG_SERIAL_PCH_UART=m -CONFIG_SERIAL_MXS_AUART=m CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y -CONFIG_SERIAL_MPS2_UART_CONSOLE=y -CONFIG_SERIAL_MPS2_UART=y CONFIG_SERIAL_ARC=m CONFIG_SERIAL_RP2=m CONFIG_SERIAL_FSL_LPUART=m CONFIG_SERIAL_FSL_LINFLEXUART=m CONFIG_SERIAL_CONEXANT_DIGICOLOR=m -CONFIG_SERIAL_ST_ASC=m CONFIG_SERIAL_SPRD=m CONFIG_SERIAL_STM32=m CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_OWL=m -CONFIG_SERIAL_RDA=y -CONFIG_SERIAL_MILBEAUT_USIO=m -CONFIG_SERIAL_LITEUART=m -CONFIG_SERIAL_SUNPLUS=m -CONFIG_SERIAL_SUNPLUS_CONSOLE=y CONFIG_SERIAL_NUVOTON_MA35D1=m CONFIG_SERIAL_NONSTANDARD=y CONFIG_MOXA_INTELLIO=m @@ -2071,17 +2040,27 @@ CONFIG_NOZOMI=m CONFIG_HVC_DCC=y CONFIG_RPMSG_TTY=m CONFIG_SERIAL_DEV_BUS=m +CONFIG_TTY_PRINTK=m +CONFIG_TTY_PRINTK_LEVEL=7 +CONFIG_PRINTER=m +CONFIG_LP_CONSOLE=y +CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y -CONFIG_ASPEED_KCS_IPMI_BMC=m +CONFIG_PHYTIUM_KCS_IPMI_BMC=m +CONFIG_PHYTIUM_BT_IPMI_BMC=m CONFIG_NPCM7XX_KCS_IPMI_BMC=m CONFIG_IPMI_KCS_BMC_CDEV_IPMI=m CONFIG_IPMI_KCS_BMC_SERIO=m -CONFIG_ASPEED_BT_IPMI_BMC=m CONFIG_SSIF_IPMI_BMC=m +CONFIG_IPMB_DEVICE_INTERFACE=m CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_HW_RANDOM_HISI=m +CONFIG_HW_RANDOM_CCTRNG=m +CONFIG_HW_RANDOM_XIPHERA=m +CONFIG_HW_RANDOM_PHYTIUM=m +CONFIG_APPLICOM=m CONFIG_TCG_TIS=m CONFIG_TCG_TIS_SPI=m CONFIG_TCG_TIS_SPI_CR50=y @@ -2111,7 +2090,6 @@ CONFIG_I2C_MUX_PINCTRL=m CONFIG_I2C_MUX_REG=m CONFIG_I2C_DEMUX_PINCTRL=m CONFIG_I2C_MUX_MLXCPLD=m -CONFIG_I2C_ATR=m CONFIG_I2C_ALI1535=m CONFIG_I2C_ALI1563=m CONFIG_I2C_ALI15X3=m @@ -2131,42 +2109,27 @@ CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m CONFIG_I2C_SCMI=m CONFIG_I2C_ALTERA=m -CONFIG_I2C_ASPEED=m -CONFIG_I2C_AT91=m -CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL=m -CONFIG_I2C_AXXIA=m CONFIG_I2C_BCM2835=m -CONFIG_I2C_BCM_KONA=m CONFIG_I2C_CADENCE=m CONFIG_I2C_CBUS_GPIO=m -CONFIG_I2C_DAVINCI=m CONFIG_I2C_DESIGNWARE_SLAVE=y CONFIG_I2C_DESIGNWARE_PCI=m -CONFIG_I2C_DIGICOLOR=m -CONFIG_I2C_EG20T=m CONFIG_I2C_EMEV2=m CONFIG_I2C_GPIO=m -CONFIG_I2C_GXP=m -CONFIG_I2C_IMG=m +CONFIG_I2C_HISI=m CONFIG_I2C_IMX=m CONFIG_I2C_IMX_LPI2C=m -CONFIG_I2C_IOP3XX=m -CONFIG_I2C_JZ4780=m -CONFIG_I2C_LPC2K=m -CONFIG_I2C_LS2X=m CONFIG_I2C_MESON=m -CONFIG_I2C_MICROCHIP_CORE=m CONFIG_I2C_MT65XX=m -CONFIG_I2C_MT7621=m CONFIG_I2C_MV64XXX=m -CONFIG_I2C_MXS=m CONFIG_I2C_NOMADIK=m CONFIG_I2C_NPCM=m CONFIG_I2C_OCORES=m CONFIG_I2C_OMAP=m CONFIG_I2C_OWL=m CONFIG_I2C_PCA_PLATFORM=m -CONFIG_I2C_PNX=m +CONFIG_I2C_PHYTIUM_PCI=m +CONFIG_I2C_PHYTIUM_PLATFORM=m CONFIG_I2C_PXA=m CONFIG_I2C_PXA_SLAVE=y CONFIG_I2C_QCOM_CCI=m @@ -2178,17 +2141,13 @@ CONFIG_I2C_S3C2410=m CONFIG_I2C_SH_MOBILE=m CONFIG_I2C_SIMTEC=m CONFIG_I2C_SPRD=m -CONFIG_I2C_ST=m CONFIG_I2C_STM32F4=m CONFIG_I2C_STM32F7=m -CONFIG_I2C_SUN6I_P2WI=m CONFIG_I2C_SYNQUACER=m CONFIG_I2C_TEGRA=m -CONFIG_I2C_TEGRA_BPMP=m CONFIG_I2C_UNIPHIER=m CONFIG_I2C_UNIPHIER_F=m CONFIG_I2C_VERSATILE=m -CONFIG_I2C_WMT=m CONFIG_I2C_THUNDERX=m CONFIG_I2C_XILINX=m CONFIG_I2C_XLP9XX=m @@ -2207,21 +2166,15 @@ CONFIG_I2C_XGENE_SLIMPRO=m CONFIG_I2C_VIRTIO=m CONFIG_I2C_STUB=m CONFIG_I2C_SLAVE_EEPROM=m -CONFIG_I2C_SLAVE_TESTUNIT=m +CONFIG_I3C=m +CONFIG_PHYTIUM_I3C_MASTER=m CONFIG_SPI=y CONFIG_SPI_ALTERA=m CONFIG_SPI_AMLOGIC_SPIFC_A1=m -CONFIG_SPI_AR934X=m -CONFIG_SPI_ATH79=m CONFIG_SPI_ARMADA_3700=m -CONFIG_SPI_ASPEED_SMC=m -CONFIG_SPI_ATMEL=m -CONFIG_SPI_AT91_USART=m -CONFIG_SPI_ATMEL_QUADSPI=m CONFIG_SPI_AXI_SPI_ENGINE=m CONFIG_SPI_BCM2835=m CONFIG_SPI_BCM2835AUX=m -CONFIG_SPI_BCM63XX=m CONFIG_SPI_BCM63XX_HSSPI=m CONFIG_SPI_BCM_QSPI=m CONFIG_SPI_BCMBCA_HSSPI=m @@ -2229,29 +2182,17 @@ CONFIG_SPI_BUTTERFLY=m CONFIG_SPI_CADENCE=m CONFIG_SPI_CADENCE_QUADSPI=m CONFIG_SPI_CADENCE_XSPI=m -CONFIG_SPI_CLPS711X=m -CONFIG_SPI_DAVINCI=m CONFIG_SPI_DESIGNWARE=m CONFIG_SPI_DW_DMA=y CONFIG_SPI_DW_PCI=m CONFIG_SPI_DW_MMIO=m -CONFIG_SPI_DW_BT1=m -CONFIG_SPI_DW_BT1_DIRMAP=y CONFIG_SPI_DLN2=m -CONFIG_SPI_EP93XX=m CONFIG_SPI_FSL_LPSPI=m CONFIG_SPI_FSL_QUADSPI=m -CONFIG_SPI_GXP=m CONFIG_SPI_NXP_FLEXSPI=m CONFIG_SPI_GPIO=m -CONFIG_SPI_IMG_SPFI=m CONFIG_SPI_IMX=m -CONFIG_SPI_INGENIC=m -CONFIG_SPI_INTEL_PCI=m -CONFIG_SPI_INTEL_PLATFORM=m -CONFIG_SPI_JCORE=m CONFIG_SPI_LM70_LLP=m -CONFIG_SPI_LP8841_RTC=m CONFIG_SPI_FSL_SPI=m CONFIG_SPI_FSL_DSPI=m CONFIG_SPI_MESON_SPICC=m @@ -2259,19 +2200,17 @@ CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_MICROCHIP_CORE=m CONFIG_SPI_MICROCHIP_CORE_QSPI=m CONFIG_SPI_MT65XX=m -CONFIG_SPI_MT7621=m CONFIG_SPI_MTK_NOR=m CONFIG_SPI_WPCM_FIU=m CONFIG_SPI_NPCM_FIU=m CONFIG_SPI_NPCM_PSPI=m -CONFIG_SPI_LANTIQ_SSC=m CONFIG_SPI_OC_TINY=m CONFIG_SPI_OMAP24XX=m -CONFIG_SPI_TI_QSPI=m CONFIG_SPI_ORION=y CONFIG_SPI_PCI1XXXX=m -CONFIG_SPI_PIC32=m -CONFIG_SPI_PIC32_SQI=m +CONFIG_SPI_PHYTIUM_PLAT=m +CONFIG_SPI_PHYTIUM_PCI=m +CONFIG_SPI_PHYTIUM_QSPI=m CONFIG_SPI_PL022=y CONFIG_SPI_PXA2XX=m CONFIG_SPI_ROCKCHIP=m @@ -2283,7 +2222,6 @@ CONFIG_SPI_QUP=m CONFIG_SPI_S3C64XX=m CONFIG_SPI_SC18IS602=m CONFIG_SPI_SH_MSIOF=m -CONFIG_SPI_SH=m CONFIG_SPI_SH_HSPI=m CONFIG_SPI_SIFIVE=m CONFIG_SPI_SN_F_OSPI=m @@ -2291,10 +2229,8 @@ CONFIG_SPI_SPRD=m CONFIG_SPI_SPRD_ADI=m CONFIG_SPI_STM32=m CONFIG_SPI_STM32_QSPI=m -CONFIG_SPI_ST_SSC4=m CONFIG_SPI_SUN4I=m CONFIG_SPI_SUN6I=m -CONFIG_SPI_SUNPLUS_SP7021=m CONFIG_SPI_SYNQUACER=m CONFIG_SPI_MXIC=m CONFIG_SPI_TEGRA210_QUAD=m @@ -2302,13 +2238,10 @@ CONFIG_SPI_TEGRA114=m CONFIG_SPI_TEGRA20_SFLASH=m CONFIG_SPI_TEGRA20_SLINK=m CONFIG_SPI_THUNDERX=m -CONFIG_SPI_TOPCLIFF_PCH=m CONFIG_SPI_UNIPHIER=m CONFIG_SPI_XCOMM=m CONFIG_SPI_XILINX=m CONFIG_SPI_XLP=m -CONFIG_SPI_XTENSA_XTFPGA=m -CONFIG_SPI_ZYNQ_QSPI=m CONFIG_SPI_ZYNQMP_GQSPI=m CONFIG_SPI_AMD=m CONFIG_SPI_MUX=m @@ -2333,7 +2266,22 @@ CONFIG_GPIO_PCA953X=m CONFIG_GPIO_PCF857X=m CONFIG_GPIO_DLN2=m CONFIG_GPIO_LP3943=m +CONFIG_GPIO_BT8XX=m +CONFIG_GPIO_MLXBF=m +CONFIG_GPIO_MLXBF2=m +CONFIG_GPIO_MLXBF3=m +CONFIG_GPIO_PCI_IDIO_16=m +CONFIG_GPIO_PCIE_IDIO_24=m +CONFIG_GPIO_RDC321X=m CONFIG_GPIO_VIPERBOARD=m +CONFIG_W1=m +CONFIG_W1_MASTER_MATROX=m +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_MXC=m +CONFIG_W1_MASTER_GPIO=m +CONFIG_W1_MASTER_PHYTIUM=y +CONFIG_W1_MASTER_SGI=m # CONFIG_POWER_RESET_BRCMSTB is not set CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_GPIO_RESTART=y @@ -2372,40 +2320,76 @@ CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m CONFIG_SENSORS_ADT7470=m CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_AHT10=m +CONFIG_SENSORS_AQUACOMPUTER_D5NEXT=m +CONFIG_SENSORS_AS370=m CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_AXI_FAN_CONTROL=m CONFIG_SENSORS_ARM_SCMI=m CONFIG_SENSORS_ARM_SCPI=m +CONFIG_SENSORS_PHYTIUM=m CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_CORSAIR_CPRO=m +CONFIG_SENSORS_CORSAIR_PSU=m +CONFIG_SENSORS_DRIVETEMP=m CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DA9052_ADC=m +CONFIG_SENSORS_DA9055=m CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_SPARX5=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m CONFIG_SENSORS_MC13783_ADC=m +CONFIG_SENSORS_FTSTEUTATES=m CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m CONFIG_SENSORS_G762=m CONFIG_SENSORS_GPIO_FAN=m CONFIG_SENSORS_HIH6130=m +CONFIG_SENSORS_HS3001=m +CONFIG_SENSORS_IIO_HWMON=m CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m CONFIG_SENSORS_POWR1220=m CONFIG_SENSORS_LINEAGE=m CONFIG_SENSORS_LTC2945=m +CONFIG_SENSORS_LTC2947_I2C=m +CONFIG_SENSORS_LTC2947_SPI=m CONFIG_SENSORS_LTC2990=m +CONFIG_SENSORS_LTC2992=m CONFIG_SENSORS_LTC4151=m CONFIG_SENSORS_LTC4215=m CONFIG_SENSORS_LTC4222=m CONFIG_SENSORS_LTC4245=m CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX127=m +CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX31722=m +CONFIG_SENSORS_MAX31730=m +CONFIG_SENSORS_MAX31760=m +CONFIG_MAX31827=m +CONFIG_SENSORS_MAX6620=m +CONFIG_SENSORS_MAX6621=m CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MC34VR500=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_MLXREG_FAN=m +CONFIG_SENSORS_TC654=m +CONFIG_SENSORS_TPS23861=m +CONFIG_SENSORS_MENF21BMC_HWMON=m +CONFIG_SENSORS_MR75203=m +CONFIG_SENSORS_ADCXX=m CONFIG_SENSORS_LM63=m CONFIG_SENSORS_LM70=m CONFIG_SENSORS_LM73=m @@ -2424,11 +2408,16 @@ CONFIG_SENSORS_LM95241=m CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m CONFIG_SENSORS_NCT6683=m CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT6775_I2C=m CONFIG_SENSORS_NCT7802=m CONFIG_SENSORS_NCT7904=m CONFIG_SENSORS_NPCM7XX=m +CONFIG_SENSORS_NZXT_KRAKEN2=m +CONFIG_SENSORS_NZXT_SMART2=m +CONFIG_SENSORS_OCC_P8_I2C=m CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m CONFIG_SENSORS_ADM1275=m @@ -2443,43 +2432,61 @@ CONFIG_SENSORS_UCD9000=m CONFIG_SENSORS_UCD9200=m CONFIG_SENSORS_ZL6100=m CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SBTSI=m +CONFIG_SENSORS_SBRMI=m CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SHT21=m CONFIG_SENSORS_SHT3x=m +CONFIG_SENSORS_SHT4x=m CONFIG_SENSORS_SHTC1=m CONFIG_SENSORS_SIS5595=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_EMC1403=m CONFIG_SENSORS_EMC2103=m +CONFIG_SENSORS_EMC2305=m CONFIG_SENSORS_EMC6W201=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_STTS751=m CONFIG_SENSORS_ADC128D818=m CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_INA238=m +CONFIG_SENSORS_INA3221=m CONFIG_SENSORS_TC74=m CONFIG_SENSORS_THMC50=m CONFIG_SENSORS_TMP102=m CONFIG_SENSORS_TMP103=m +CONFIG_SENSORS_TMP108=m CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_TMP464=m +CONFIG_SENSORS_TMP513=m CONFIG_SENSORS_VEXPRESS=m CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83773G=m CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83795_FANCTRL=y CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_WM831X=m +CONFIG_SENSORS_WM8350=m CONFIG_SENSORS_XGENE=m +CONFIG_SENSORS_ACPI_POWER=m CONFIG_THERMAL_GOV_FAIR_SHARE=y CONFIG_THERMAL_GOV_BANG_BANG=y CONFIG_THERMAL_GOV_USER_SPACE=y @@ -2525,7 +2532,8 @@ CONFIG_MFD_RETU=m CONFIG_MFD_PCF50633=m CONFIG_PCF50633_ADC=m CONFIG_PCF50633_GPIO=m -CONFIG_MFD_RDC321X=m +CONFIG_MFD_PHYTIUM_I2S_LSD=y +CONFIG_MFD_PHYTIUM_I2S_MMD=y CONFIG_MFD_RT5033=m CONFIG_MFD_RC5T583=y CONFIG_MFD_RN5T618=m @@ -2734,21 +2742,67 @@ CONFIG_USB_SI4713=m CONFIG_PLATFORM_SI4713=m CONFIG_RADIO_WL128X=m CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SDR_PLATFORM_DRIVERS=y CONFIG_DVB_PLATFORM_DRIVERS=y CONFIG_V4L_MEM2MEM_DRIVERS=y CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m CONFIG_VIDEO_MUX=m +CONFIG_VIDEO_ALLEGRO_DVT=m +CONFIG_VIDEO_MESON_GE2D=m +CONFIG_VIDEO_AMPHION_VPU=m CONFIG_VIDEO_CADENCE_CSI2RX=m CONFIG_VIDEO_CADENCE_CSI2TX=m +CONFIG_VIDEO_CODA=m CONFIG_VIDEO_CAFE_CCIC=m +CONFIG_VIDEO_MEDIATEK_VPU=m +CONFIG_VIDEO_TEGRA_VDE=m +CONFIG_VIDEO_IMX7_CSI=m +CONFIG_VIDEO_IMX8MQ_MIPI_CSI2=m +CONFIG_VIDEO_IMX_MIPI_CSIS=m +CONFIG_VIDEO_IMX8_ISI=m +CONFIG_VIDEO_IMX8_ISI_M2M=y +CONFIG_VIDEO_IMX_PXP=m +CONFIG_VIDEO_DW100=m +CONFIG_VIDEO_IMX8_JPEG=m +CONFIG_VIDEO_PHYTIUM_JPEG=m +CONFIG_VIDEO_QCOM_CAMSS=m +CONFIG_VIDEO_RCAR_ISP=m +CONFIG_VIDEO_RCAR_CSI2=m +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_VIDEO_RZG2L_CSI2=m +CONFIG_VIDEO_RZG2L_CRU=m +CONFIG_VIDEO_RENESAS_FCP=m +CONFIG_VIDEO_RENESAS_FDP1=m +CONFIG_VIDEO_RENESAS_JPU=m +CONFIG_VIDEO_RENESAS_VSP1=m +CONFIG_VIDEO_RCAR_DRIF=m +CONFIG_VIDEO_ROCKCHIP_RGA=m +CONFIG_VIDEO_ROCKCHIP_ISP1=m +CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m +CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m +CONFIG_VIDEO_S5P_FIMC=m +CONFIG_VIDEO_S5P_MIPI_CSIS=m +CONFIG_VIDEO_EXYNOS4_FIMC_IS=m +CONFIG_VIDEO_SAMSUNG_S5P_G2D=m +CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m +CONFIG_VIDEO_SAMSUNG_S5P_MFC=m +CONFIG_VIDEO_STM32_DCMI=m +CONFIG_VIDEO_STM32_DMA2D=m +CONFIG_VIDEO_SUN4I_CSI=m +CONFIG_VIDEO_SUN6I_CSI=m +CONFIG_VIDEO_SUN6I_MIPI_CSI2=m +CONFIG_VIDEO_SUN8I_A83T_MIPI_CSI2=m +CONFIG_VIDEO_SUN8I_DEINTERLACE=m +CONFIG_VIDEO_SUN8I_ROTATE=m +CONFIG_VIDEO_TI_CAL=m +CONFIG_VIDEO_HANTRO=m CONFIG_VIDEO_XILINX=m +CONFIG_VIDEO_XILINX_CSI2RXSS=m CONFIG_VIDEO_XILINX_TPG=m CONFIG_SMS_SDIO_DRV=m CONFIG_DVB_FIREDTV=m CONFIG_PANEL=m -CONFIG_IMX_IPUV3_CORE=m CONFIG_DRM=y -# CONFIG_DRM_FBDEV_EMULATION is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y CONFIG_DRM_DP_AUX_CHARDEV=y CONFIG_DRM_DP_CEC=y @@ -2778,11 +2832,9 @@ CONFIG_DRM_EXYNOS_DPI=y CONFIG_DRM_EXYNOS_DSI=y CONFIG_DRM_EXYNOS_HDMI=y CONFIG_DRM_EXYNOS_MIC=y -CONFIG_DRM_EXYNOS_G2D=y CONFIG_DRM_EXYNOS_FIMC=y CONFIG_DRM_EXYNOS_ROTATOR=y CONFIG_DRM_EXYNOS_SCALER=y -CONFIG_DRM_EXYNOS_GSC=y CONFIG_DRM_VMWGFX=m CONFIG_DRM_UDL=m CONFIG_DRM_AST=m @@ -2793,12 +2845,10 @@ CONFIG_DRM_RCAR_DW_HDMI=m CONFIG_DRM_RZG2L_MIPI_DSI=m CONFIG_DRM_SHMOBILE=m CONFIG_DRM_SUN4I=m -CONFIG_DRM_SUN4I_HDMI_CEC=y CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=y CONFIG_DRM_MSM=m CONFIG_DRM_TEGRA=m -CONFIG_DRM_TEGRA_DEBUG=y CONFIG_DRM_TEGRA_STAGING=y CONFIG_DRM_STM=m CONFIG_DRM_STM_DSI=m @@ -2899,15 +2949,8 @@ CONFIG_DRM_TI_TFP410=m CONFIG_DRM_ANALOGIX_ANX78XX=m CONFIG_DRM_I2C_ADV7511=m CONFIG_DRM_CDNS_DSI=m -CONFIG_DRM_IMX=m -CONFIG_DRM_IMX_PARALLEL_DISPLAY=m -CONFIG_DRM_IMX_TVE=m -CONFIG_DRM_IMX_LDB=m -CONFIG_DRM_IMX_HDMI=m CONFIG_DRM_IMX_LCDC=m CONFIG_DRM_V3D=m -CONFIG_DRM_VC4=m -CONFIG_DRM_VC4_HDMI_CEC=y CONFIG_DRM_HISI_HIBMC=m CONFIG_DRM_HISI_KIRIN=m CONFIG_DRM_MEDIATEK=m @@ -2920,7 +2963,6 @@ CONFIG_DRM_ARCPGU=m CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_GM12U320=m -CONFIG_DRM_OFDRM=m CONFIG_DRM_PANEL_MIPI_DBI=m CONFIG_DRM_SIMPLEDRM=m CONFIG_TINYDRM_HX8357D=m @@ -2933,27 +2975,24 @@ CONFIG_TINYDRM_REPAPER=m CONFIG_TINYDRM_ST7586=m CONFIG_TINYDRM_ST7735R=m CONFIG_DRM_PL111=m -CONFIG_DRM_TVE200=m CONFIG_DRM_XEN_FRONTEND=m CONFIG_DRM_LIMA=m CONFIG_DRM_PANFROST=m -CONFIG_DRM_ASPEED_GFX=m -CONFIG_DRM_MCDE=m CONFIG_DRM_TIDSS=m CONFIG_DRM_GUD=m CONFIG_DRM_SSD130X=m CONFIG_DRM_SSD130X_I2C=m CONFIG_DRM_SSD130X_SPI=m CONFIG_DRM_SPRD=m +CONFIG_DRM_PHYTIUM=m CONFIG_FB=m -CONFIG_FB_OMAP2=m -CONFIG_MMP_DISP=m CONFIG_FB_TILEBLITTING=y CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_ADP8860=m CONFIG_BACKLIGHT_ADP8870=m CONFIG_BACKLIGHT_LM3630A=m CONFIG_BACKLIGHT_GPIO=m +CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION=y CONFIG_LOGO=y CONFIG_DRM_ACCEL=y CONFIG_DRM_ACCEL_QAIC=m @@ -3027,6 +3066,7 @@ CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m CONFIG_SND_YMFPCI=m CONFIG_SND_HDA_INTEL=y +CONFIG_SND_HDA_PHYTIUM=m CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y @@ -3083,6 +3123,10 @@ CONFIG_SND_SOC_IMG_PARALLEL_OUT=m CONFIG_SND_SOC_IMG_SPDIF_IN=m CONFIG_SND_SOC_IMG_SPDIF_OUT=m CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m +CONFIG_SND_SOC_PHYTIUM_I2S=m +CONFIG_SND_PMDK_ES8388=m +CONFIG_SND_PMDK_ES8336=m +CONFIG_SND_PMDK_DP=m CONFIG_SND_SOC_XTFPGA_I2S=m CONFIG_SND_SOC_AC97_CODEC=m CONFIG_SND_SOC_ADAU1701=m @@ -3306,6 +3350,8 @@ CONFIG_USB_MON=m CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_DBGCAP=y CONFIG_USB_XHCI_HISTB=m +CONFIG_USB_XHCI_MTK=m +CONFIG_USB_XHCI_MVEBU=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m @@ -3336,14 +3382,20 @@ CONFIG_USBIP_CORE=m CONFIG_USBIP_VHCI_HCD=m CONFIG_USBIP_HOST=m CONFIG_USB_MUSB_HDRC=m +CONFIG_USB_MUSB_HOST=y CONFIG_MUSB_PIO_ONLY=y CONFIG_USB_DWC3=m CONFIG_USB_DWC3_ULPI=y +CONFIG_USB_DWC3_HOST=y CONFIG_USB_DWC2=m +CONFIG_USB_DWC2_HOST=y CONFIG_USB_DWC2_PCI=m CONFIG_USB_CHIPIDEA=m CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_ISP1760=m +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_PHYTIUM=m +CONFIG_USB_PHYTIUM_PCI=m CONFIG_USB_SERIAL=y CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_SIMPLE=m @@ -3421,12 +3473,24 @@ CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m CONFIG_USB_ISP1301=m +CONFIG_USB_GADGET=m CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_TCPCI_MAXIM=m CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_QCOM_PMIC=m CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_CCG=m CONFIG_UCSI_ACPI=m +CONFIG_UCSI_STM32G0=m CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_ANX7411=m +CONFIG_TYPEC_RT1719=m +CONFIG_TYPEC_HD3SS3220=m +CONFIG_TYPEC_STUSB160X=m +CONFIG_TYPEC_WUSB3801=m CONFIG_TYPEC_MUX_PI3USB30532=m CONFIG_TYPEC_DP_ALTMODE=m CONFIG_MMC=m @@ -3447,13 +3511,22 @@ CONFIG_MMC_DW_EXYNOS=m CONFIG_MMC_DW_HI3798CV200=m CONFIG_MMC_DW_K3=m CONFIG_MMC_DW_PCI=m +CONFIG_MMC_DW_ROCKCHIP=m +CONFIG_MMC_SH_MMCIF=m CONFIG_MMC_VUB300=m CONFIG_MMC_USHC=m +CONFIG_MMC_USDHI6ROL0=m CONFIG_MMC_REALTEK_PCI=m CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SUNXI=m +CONFIG_MMC_HSQ=m CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_BCM2835=m CONFIG_MMC_MTK=m CONFIG_MMC_SDHCI_XENON=m +CONFIG_MMC_SDHCI_AM654=m +CONFIG_MMC_OWL=m +CONFIG_MMC_PHYTIUM_SDCI=m CONFIG_SCSI_UFSHCD=m CONFIG_SCSI_UFSHCD_PCI=m CONFIG_SCSI_UFSHCD_PLATFORM=m @@ -3511,7 +3584,24 @@ CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m CONFIG_EDAC=y +CONFIG_EDAC_GHES=m +CONFIG_EDAC_AL_MC=m +CONFIG_EDAC_LAYERSCAPE=m +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_ALTERA=y +CONFIG_EDAC_ALTERA_SDRAM=y +CONFIG_EDAC_ALTERA_OCRAM=y +CONFIG_EDAC_ALTERA_ETHERNET=y +CONFIG_EDAC_ALTERA_USB=y +CONFIG_EDAC_ALTERA_QSPI=y +CONFIG_EDAC_ALTERA_SDMMC=y +CONFIG_EDAC_SYNOPSYS=m CONFIG_EDAC_XGENE=m +CONFIG_EDAC_PHYTIUM=m +CONFIG_EDAC_BLUEFIELD=m +CONFIG_EDAC_DMC520=m +CONFIG_EDAC_ZYNQMP=m +CONFIG_EDAC_NPCM=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_ABB5ZES3=m CONFIG_RTC_DRV_ABX80X=m @@ -3541,10 +3631,56 @@ CONFIG_RTC_DRV_ZYNQMP=y CONFIG_RTC_DRV_PL030=y CONFIG_RTC_DRV_PL031=y CONFIG_DMADEVICES=y -# CONFIG_BCM_SBA_RAID is not set +CONFIG_ALTERA_MSGDMA=m +CONFIG_AMBA_PL08X=y +CONFIG_AXI_DMAC=m +CONFIG_DMA_BCM2835=m +CONFIG_DMA_SUN6I=m +CONFIG_DW_AXI_DMAC=m +CONFIG_FSL_EDMA=m +CONFIG_FSL_QDMA=m +CONFIG_HISI_DMA=m +CONFIG_IMX_DMA=m +CONFIG_IMX_SDMA=m CONFIG_INTEL_IDMA64=m +CONFIG_K3_DMA=m +CONFIG_MV_XOR=y +CONFIG_MV_XOR_V2=y +CONFIG_MXS_DMA=y +CONFIG_OWL_DMA=m CONFIG_PL330_DMA=m CONFIG_PLX_DMA=m +CONFIG_STM32_DMA=y +CONFIG_STM32_DMAMUX=y +CONFIG_STM32_MDMA=y +CONFIG_SPRD_DMA=m +CONFIG_TEGRA186_GPC_DMA=m +CONFIG_TEGRA20_APB_DMA=m +CONFIG_TEGRA210_ADMA=m +CONFIG_UNIPHIER_MDMAC=m +CONFIG_UNIPHIER_XDMAC=m +CONFIG_XGENE_DMA=m +CONFIG_XILINX_DMA=m +CONFIG_XILINX_XDMA=m +CONFIG_XILINX_ZYNQMP_DMA=m +CONFIG_XILINX_ZYNQMP_DPDMA=m +CONFIG_PHYTIUM_DDMA=y +CONFIG_MTK_HSDMA=m +CONFIG_MTK_CQDMA=m +CONFIG_MTK_UART_APDMA=m +CONFIG_QCOM_BAM_DMA=m +CONFIG_QCOM_GPI_DMA=m +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +CONFIG_DW_EDMA=m +CONFIG_DW_EDMA_PCIE=m +CONFIG_SF_PDMA=m +CONFIG_RCAR_DMAC=m +CONFIG_RENESAS_USB_DMAC=m +CONFIG_TI_K3_UDMA=m +CONFIG_TI_K3_UDMA_GLUE_LAYER=m CONFIG_ASYNC_TX_DMA=y CONFIG_SW_SYNC=y CONFIG_UIO_CIF=m @@ -3569,6 +3705,7 @@ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y CONFIG_HYPERV=m CONFIG_HYPERV_UTILS=m CONFIG_HYPERV_BALLOON=m @@ -3596,7 +3733,9 @@ CONFIG_DVB_BUDGET_PATCH=m CONFIG_LTE_GDM724X=m CONFIG_PI433=m CONFIG_QLGE=m +CONFIG_GOLDFISH=y CONFIG_CHROME_PLATFORMS=y +CONFIG_MELLANOX_PLATFORM=y CONFIG_COMMON_CLK_WM831X=m CONFIG_COMMON_CLK_MAX77686=m CONFIG_COMMON_CLK_SCPI=y @@ -3607,15 +3746,38 @@ CONFIG_COMMON_CLK_CDCE706=m CONFIG_COMMON_CLK_CDCE925=m # CONFIG_COMMON_CLK_XGENE is not set CONFIG_COMMON_CLK_PWM=m +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_OMAP=m +CONFIG_HWSPINLOCK_PHYTIUM=m +CONFIG_HWSPINLOCK_QCOM=m +CONFIG_HWSPINLOCK_SPRD=m +CONFIG_HWSPINLOCK_SUN6I=m CONFIG_ARM_MHU=y +CONFIG_ARM_MHU_V2=m +CONFIG_IMX_MBOX=m +CONFIG_PHYTIUM_MBOX=m CONFIG_PLATFORM_MHU=y CONFIG_PL320_MBOX=y +CONFIG_ARMADA_37XX_RWTM_MBOX=m +CONFIG_OMAP2PLUS_MBOX=m +CONFIG_ROCKCHIP_MBOX=y +CONFIG_ALTERA_MBOX=m +CONFIG_BCM2835_MBOX=m CONFIG_HI6220_MBOX=m +CONFIG_QCOM_APCS_IPC=m +CONFIG_TEGRA_HSP_MBOX=y +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_BCM_PDC_MBOX=m +CONFIG_MTK_ADSP_MBOX=m +CONFIG_MTK_CMDQ_MBOX=m +CONFIG_SPRD_MBOX=m +CONFIG_QCOM_IPCC=m CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y CONFIG_ARM_SMMU=y CONFIG_ARM_SMMU_V3=y CONFIG_RPMSG_VIRTIO=m +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=m CONFIG_DEVFREQ_GOV_POWERSAVE=m CONFIG_DEVFREQ_GOV_USERSPACE=m @@ -3630,6 +3792,14 @@ CONFIG_EXTCON_PALMAS=m CONFIG_EXTCON_RT8973A=m CONFIG_EXTCON_SM5502=m CONFIG_ARM_PL172_MPMC=m +CONFIG_IIO=m +CONFIG_IIO_BUFFER_CB=m +CONFIG_IIO_BUFFER_DMAENGINE=m +CONFIG_IIO_BUFFER_HW_CONSUMER=m +CONFIG_IIO_SW_DEVICE=m +CONFIG_IIO_SW_TRIGGER=m +CONFIG_IIO_TRIGGERED_EVENT=m +CONFIG_PHYTIUM_ADC=m CONFIG_NTB=y CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y @@ -3638,8 +3808,12 @@ CONFIG_PWM_FSL_FTM=m CONFIG_PWM_HIBVT=m CONFIG_PWM_LP3943=m CONFIG_PWM_PCA9685=m +CONFIG_PWM_PHYTIUM=m CONFIG_PWM_TWL=m CONFIG_PWM_TWL_LED=m +CONFIG_AL_FIC=y +CONFIG_XILINX_INTC=y +CONFIG_PHYTIUM_IXIC=y CONFIG_IPACK_BUS=m CONFIG_BOARD_TPCI200=m CONFIG_SERIAL_IPOCTAL=m @@ -3652,9 +3826,28 @@ CONFIG_PHY_PXA_28NM_USB2=m CONFIG_PHY_MAPPHONE_MDM6600=m CONFIG_PHY_QCOM_USB_HS=m CONFIG_PHY_QCOM_USB_HSIC=m -CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_TUSB1210=m CONFIG_POWERCAP=y +CONFIG_ARM_CCI_PMU=m +CONFIG_ARM_CCN=m +CONFIG_ARM_CMN=m +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_DSU_PMU=m +CONFIG_FSL_IMX8_DDR_PMU=m +CONFIG_FSL_IMX9_DDR_PMU=m +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +CONFIG_ARM_DMC620_PMU=m +CONFIG_MARVELL_CN10K_TAD_PMU=m +CONFIG_APPLE_M1_CPU_PMU=y +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HNS3_PMU=m +CONFIG_MARVELL_CN10K_DDR_PMU=m +CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU=m +CONFIG_MESON_DDR_PMU=m +CONFIG_PHYTIUM_PMU=y CONFIG_STM=y CONFIG_STM_SOURCE_CONSOLE=y CONFIG_FPGA=m @@ -3857,11 +4050,9 @@ CONFIG_IMA_WRITE_POLICY=y # CONFIG_IMA_READ_POLICY is not set CONFIG_IMA_APPRAISE=y CONFIG_DEFAULT_SECURITY_APPARMOR=y -CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m CONFIG_CRYPTO_SM2=y @@ -3935,9 +4126,11 @@ CONFIG_SIGNED_PE_FILE_VERIFICATION=y CONFIG_INDIRECT_PIO=y CONFIG_CRC4=m CONFIG_LIBCRC32C=y +CONFIG_DMA_CMA=y CONFIG_PRINTK_TIME=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=4 CONFIG_DYNAMIC_DEBUG=y +# CONFIG_DEBUG_MISC is not set CONFIG_DEBUG_INFO_DWARF5=y CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y @@ -3948,6 +4141,7 @@ CONFIG_VMLINUX_MAP=y CONFIG_MAGIC_SYSRQ=y # CONFIG_SLUB_DEBUG is not set # CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set # CONFIG_STRICT_DEVMEM is not set diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index f1cb2447afc9c..1b439a2112181 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -294,6 +294,20 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { +#ifdef CONFIG_ARM64 + if (read_cpuid_implementor() == ARM_CPU_IMP_HISI) { + /* For normal memory we already have a cacheable mapping. */ + if (memblock_is_map_memory(phys)) + return (void __iomem *)__phys_to_virt(phys); + + /* + * We should still honor the memory's attribute here because + * crash dump kernel possibly excludes some ACPI (reclaim) + * regions from memblock list. + */ + return ioremap_prot(phys, size, __acpi_get_mem_attribute(phys)); + } +#endif efi_memory_desc_t *md, *region = NULL; pgprot_t prot; diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index 1928e203b576b..43f7b114bbbf2 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -1,4 +1,4 @@ -CONFIG_COMPILE_TEST=y +CONFIG_WERROR=y CONFIG_BUILD_SALT="-loong64-desktop-hwe" CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_HOSTNAME="" @@ -69,7 +69,6 @@ CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_EC_DEBUGFS=y CONFIG_ACPI_TAD=m CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m @@ -82,6 +81,8 @@ CONFIG_ACPI_CONFIGFS=m CONFIG_ACPI_PFRUT=m CONFIG_ACPI_FFH=y CONFIG_PMIC_OPREGION=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m CONFIG_COMPAT_32BIT_TIME=y CONFIG_LOCK_EVENT_COUNTS=y CONFIG_MODULES=y @@ -100,6 +101,7 @@ CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_BLK_CGROUP_FC_APPID=y CONFIG_BLK_CGROUP_IOCOST=y CONFIG_BLK_CGROUP_IOPRIO=y +# CONFIG_BLK_DEBUG_FS is not set CONFIG_BLK_SED_OPAL=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y @@ -121,7 +123,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_LDM_PARTITION=y -CONFIG_LDM_DEBUG=y CONFIG_SGI_PARTITION=y CONFIG_ULTRIX_PARTITION=y CONFIG_SUN_PARTITION=y @@ -641,7 +642,7 @@ CONFIG_BT_6LOWPAN=m CONFIG_BT_LEDS=y CONFIG_BT_MSFTEXT=y CONFIG_BT_AOSPEXT=y -CONFIG_BT_SELFTEST=y +# CONFIG_BT_DEBUGFS is not set CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y CONFIG_BT_HCIBTUSB_MTK=y @@ -670,7 +671,6 @@ CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m CONFIG_BT_MTKSDIO=m CONFIG_BT_MTKUART=m -CONFIG_BT_QCOMSMD=m CONFIG_BT_VIRTIO=m CONFIG_BT_NXPUART=m CONFIG_AF_RXRPC_IPV6=y @@ -770,6 +770,7 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set # CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER_DEBUG is not set CONFIG_FW_LOADER_COMPRESS=y CONFIG_FW_LOADER_COMPRESS_ZSTD=y CONFIG_MOXTET=m @@ -789,7 +790,6 @@ CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y CONFIG_EFI_ZBOOT=y CONFIG_EFI_BOOTLOADER_CONTROL=m CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m CONFIG_RESET_ATTACK_MITIGATION=y CONFIG_EFI_COCO_SECRET=y CONFIG_GNSS=m @@ -926,7 +926,6 @@ CONFIG_DS1682=m CONFIG_LATTICE_ECP3_CONFIG=m CONFIG_SRAM=y CONFIG_DW_XDATA_PCIE=m -CONFIG_PCI_ENDPOINT_TEST=m CONFIG_XILINX_SDFEC=m CONFIG_HISI_HIKEY_USB=m CONFIG_OPEN_DICE=m @@ -1025,7 +1024,6 @@ CONFIG_SCSI_EFCT=m CONFIG_SCSI_DC395x=m CONFIG_SCSI_AM53C974=m CONFIG_SCSI_WD719X=m -CONFIG_SCSI_DEBUG=m CONFIG_SCSI_PMCRAID=m CONFIG_SCSI_PM8001=m CONFIG_SCSI_BFA_FC=m @@ -1132,9 +1130,6 @@ CONFIG_MD_CLUSTER=m CONFIG_BCACHE=y CONFIG_BCACHE_ASYNC_REGISTRATION=y CONFIG_BLK_DEV_DM=y -CONFIG_DM_DEBUG=y -CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y -CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y CONFIG_DM_UNSTRIPED=y CONFIG_DM_CRYPT=y CONFIG_DM_SNAPSHOT=y @@ -1366,6 +1361,7 @@ CONFIG_SKY2=y CONFIG_OCTEON_EP=m CONFIG_PRESTERA=m CONFIG_MLX4_EN=m +# CONFIG_MLX4_DEBUG is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y @@ -1383,6 +1379,7 @@ CONFIG_ENC28J60=m CONFIG_ENCX24J600=m CONFIG_LAN743X=m CONFIG_LAN966X_SWITCH=m +CONFIG_YT6801=m CONFIG_MSCC_OCELOT_SWITCH=m CONFIG_MYRI10GE=m CONFIG_FEALNX=m @@ -1415,7 +1412,6 @@ CONFIG_8139TOO=y CONFIG_8139TOO_TUNE_TWISTER=y CONFIG_8139TOO_8129=y CONFIG_R8169=m -CONFIG_YT6801=m CONFIG_ROCKER=m CONFIG_SXGBE_ETH=m CONFIG_SC92031=m @@ -1631,13 +1627,13 @@ CONFIG_ATH6KL_SDIO=m CONFIG_ATH6KL_USB=m CONFIG_AR5523=m CONFIG_WIL6210=m +# CONFIG_WIL6210_DEBUGFS is not set CONFIG_ATH10K=m CONFIG_ATH10K_PCI=m CONFIG_ATH10K_AHB=y CONFIG_ATH10K_SDIO=m CONFIG_ATH10K_USB=m CONFIG_WCN36XX=m -CONFIG_WCN36XX_DEBUGFS=y CONFIG_ATH11K=m CONFIG_ATH11K_AHB=m CONFIG_ATH11K_PCI=m @@ -1757,6 +1753,8 @@ CONFIG_RTW88_8723DU=m CONFIG_RTW88_8821CE=m CONFIG_RTW88_8821CS=m CONFIG_RTW88_8821CU=m +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y CONFIG_RTW89=m CONFIG_RTW89_8851BE=m CONFIG_RTW89_8852AE=m @@ -1765,6 +1763,7 @@ CONFIG_RTW89_8852CE=m CONFIG_RTW89_DEBUGMSG=y CONFIG_RTW89_DEBUGFS=y CONFIG_RSI_91X=m +# CONFIG_RSI_DEBUGFS is not set CONFIG_WFX=m CONFIG_CW1200=m CONFIG_CW1200_WLAN_SDIO=m @@ -2214,7 +2213,6 @@ CONFIG_I2C_DESIGNWARE_SLAVE=y CONFIG_I2C_DESIGNWARE_PCI=m CONFIG_I2C_EMEV2=m CONFIG_I2C_GPIO=m -CONFIG_I2C_GPIO_FAULT_INJECTOR=y CONFIG_I2C_KEMPLD=m CONFIG_I2C_OCORES=m CONFIG_I2C_PCA_PLATFORM=m @@ -2423,7 +2421,6 @@ CONFIG_MAX8925_POWER=m CONFIG_WM831X_BACKUP=m CONFIG_WM831X_POWER=m CONFIG_WM8350_POWER=m -CONFIG_TEST_POWER=m CONFIG_BATTERY_88PM860X=m CONFIG_CHARGER_ADP5061=m CONFIG_BATTERY_ACT8945A=m @@ -2738,6 +2735,7 @@ CONFIG_DA9062_THERMAL=m CONFIG_GENERIC_ADC_THERMAL=m CONFIG_LOONGSON2_THERMAL=m CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y CONFIG_WATCHDOG_SYSFS=y CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT=y CONFIG_WATCHDOG_PRETIMEOUT_GOV=y @@ -2770,7 +2768,6 @@ CONFIG_STPMIC1_WATCHDOG=m CONFIG_ALIM7101_WDT=m CONFIG_I6300ESB_WDT=m CONFIG_KEMPLD_WDT=m -CONFIG_LOONGSON1_WDT=m CONFIG_MEN_A21_WDT=m CONFIG_PCIPCWATCHDOG=m CONFIG_WDTPCI=m @@ -3121,12 +3118,10 @@ CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_RADIO_SUPPORT=y CONFIG_MEDIA_SDR_SUPPORT=y CONFIG_MEDIA_PLATFORM_SUPPORT=y -CONFIG_VIDEO_ADV_DEBUG=y CONFIG_VIDEO_FIXED_MINOR_RANGES=y CONFIG_V4L2_FLASH_LED_CLASS=m CONFIG_DVB_MMAP=y CONFIG_DVB_DEMUX_SECTION_LOSS_LOG=y -CONFIG_DVB_ULE_DEBUG=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_GSPCA=m CONFIG_USB_GSPCA_BENQ=m @@ -3595,14 +3590,12 @@ CONFIG_DRM_I2C_ADV7511=m CONFIG_DRM_I2C_ADV7511_AUDIO=y CONFIG_DRM_CDNS_DSI=m CONFIG_DRM_CDNS_MHDP8546=m -CONFIG_DRM_LOONGSON=m CONFIG_DRM_ETNAVIV=m CONFIG_DRM_LOGICVC=m CONFIG_DRM_ARCPGU=m CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_GM12U320=m -CONFIG_DRM_OFDRM=m CONFIG_DRM_PANEL_MIPI_DBI=m CONFIG_DRM_SIMPLEDRM=y CONFIG_TINYDRM_HX8357D=m @@ -3735,7 +3728,6 @@ CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_DUMMY=m CONFIG_SND_ALOOP=m -CONFIG_SND_PCMTEST=m CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m CONFIG_SND_MTS64=m @@ -4374,16 +4366,12 @@ CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m CONFIG_USB_TRANCEVIBRATOR=m CONFIG_USB_IOWARRIOR=m -CONFIG_USB_TEST=m -CONFIG_USB_EHSET_TEST_FIXTURE=m CONFIG_USB_ISIGHTFW=m CONFIG_USB_YUREX=m CONFIG_USB_HUB_USB251XB=m CONFIG_USB_HSIC_USB3503=m CONFIG_USB_HSIC_USB4604=m -CONFIG_USB_LINK_LAYER_TEST=m CONFIG_USB_CHAOSKEY=m -CONFIG_USB_ONBOARD_HUB=m CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m @@ -4645,6 +4633,7 @@ CONFIG_INFINIBAND_IRDMA=m CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set CONFIG_INFINIBAND_OCRDMA=m CONFIG_INFINIBAND_QEDR=m CONFIG_INFINIBAND_VMWARE_PVRDMA=m @@ -4823,7 +4812,6 @@ CONFIG_GREYBUS_ES2=m CONFIG_COMEDI=m CONFIG_COMEDI_MISC_DRIVERS=y CONFIG_COMEDI_BOND=m -CONFIG_COMEDI_TEST=m CONFIG_COMEDI_PARPORT=m CONFIG_COMEDI_ISA_DRIVERS=y CONFIG_COMEDI_PCL711=m @@ -5064,7 +5052,6 @@ CONFIG_COMMON_CLK_FIXED_MMIO=y CONFIG_XILINX_VCU=m CONFIG_COMMON_CLK_XLNX_CLKWZRD=m CONFIG_HWSPINLOCK=y -CONFIG_CLKSRC_LOONGSON1_PWM=y CONFIG_MAILBOX=y CONFIG_PLATFORM_MHU=m CONFIG_PCC=y @@ -5085,6 +5072,7 @@ CONFIG_LOONGSON2_GUTS=m CONFIG_LOONGSON2_PM=y CONFIG_WPCM450_SOC=m CONFIG_QCOM_PMIC_GLINK=m +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=m CONFIG_DEVFREQ_GOV_POWERSAVE=m CONFIG_DEVFREQ_GOV_USERSPACE=m @@ -5545,10 +5533,8 @@ CONFIG_MCB=m CONFIG_MCB_PCI=m CONFIG_MCB_LPC=m CONFIG_USB4=m -CONFIG_USB4_DMA_TEST=m CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDERFS=y -CONFIG_ANDROID_BINDER_IPC_SELFTEST=y CONFIG_LIBNVDIMM=y CONFIG_DEV_DAX=m CONFIG_NVMEM_LAYOUT_SL28_VPD=m @@ -5602,7 +5588,6 @@ CONFIG_FSI_SCOM=m CONFIG_FSI_SBEFIFO=m CONFIG_FSI_OCC=m CONFIG_I2CR_SCOM=m -CONFIG_TEE=m CONFIG_MUX_ADG792A=m CONFIG_MUX_ADGS1408=m CONFIG_MUX_GPIO=m @@ -5610,6 +5595,7 @@ CONFIG_MUX_MMIO=m CONFIG_SIOX=m CONFIG_SIOX_BUS_GPIO=m CONFIG_SLIM_QCOM_CTRL=m +CONFIG_INTERCONNECT=y CONFIG_COUNTER=m CONFIG_INTERRUPT_CNT=m CONFIG_MOST=m @@ -5644,7 +5630,7 @@ CONFIG_XFS_ONLINE_REPAIR=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m -CONFIG_OCFS2_DEBUG_FS=y +# CONFIG_OCFS2_DEBUG_MASKLOG is not set CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_ASSERT=y @@ -5663,7 +5649,6 @@ CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QUOTA_DEBUG=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=y CONFIG_AUTOFS_FS=y @@ -5764,7 +5749,6 @@ CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA=y CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2=y -CONFIG_SUNRPC_DEBUG=y CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y @@ -5931,7 +5915,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y CONFIG_CRYPTO_USER_API_AEAD=y CONFIG_CRYPTO_STATS=y -CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_CRC32_LOONGARCH=y CONFIG_CRYPTO_DEV_ATMEL_ECC=m CONFIG_CRYPTO_DEV_ATMEL_SHA204A=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -5947,9 +5931,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_SAFEXCEL=m CONFIG_CRYPTO_DEV_CCREE=m CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m -CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG=y CONFIG_PKCS8_PRIVATE_KEY_PARSER=y -CONFIG_PKCS7_TEST_KEY=y CONFIG_SIGNED_PE_FILE_VERIFICATION=y CONFIG_CORDIC=y CONFIG_CRYPTO_LIB_CURVE25519=y @@ -5977,6 +5959,7 @@ CONFIG_FONT_TER16x32=y CONFIG_FONT_6x8=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y +# CONFIG_DEBUG_MISC is not set CONFIG_DEBUG_INFO_DWARF5=y CONFIG_GDB_SCRIPTS=y CONFIG_STRIP_ASM_SYMS=y @@ -5984,20 +5967,15 @@ CONFIG_READABLE_ASM=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_VMLINUX_MAP=y CONFIG_KGDB=y -CONFIG_DEBUG_PAGE_REF=y -CONFIG_SHRINKER_DEBUG=y -CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_SLUB_DEBUG is not set CONFIG_PANIC_TIMEOUT=30 CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_WQ_CPU_INTENSIVE_REPORT=y +# CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -CONFIG_DEBUG_PREEMPT=y -CONFIG_DEBUG_RT_MUTEXES=y CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=20 -CONFIG_RCU_EQS_DEBUG=y +# CONFIG_RCU_TRACE is not set CONFIG_BOOTTIME_TRACING=y CONFIG_FUNCTION_GRAPH_RETVAL=y CONFIG_FPROBE=y @@ -6010,7 +5988,6 @@ CONFIG_HWLAT_TRACER=y CONFIG_TIMERLAT_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_SYNTH_EVENTS=y CONFIG_USER_EVENTS=y CONFIG_TRACE_EVENT_INJECT=y @@ -6018,5 +5995,4 @@ CONFIG_RV=y CONFIG_RV_MON_WIP=y CONFIG_RV_MON_WWNR=y # CONFIG_STRICT_DEVMEM is not set -CONFIG_FUNCTION_ERROR_INJECTION=y # CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 5497262ab827a..e7f2f94b4b919 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -103,6 +103,13 @@ void __init init_IRQ(void) int i, ret; unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; + unsigned long node; + + if (!acpi_gbl_reduced_hardware) { + for_each_node(node) + writel(0x40000000 | (node << 12), + (volatile void __iomem *)(0x80000efdfb000274UL + (node<<44))); + } clear_csr_ecfg(ECFG0_IM); clear_csr_estat(ESTATF_IP); diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c index 6be04d36ca076..89eeb9a97dd5d 100644 --- a/arch/loongarch/mm/cache.c +++ b/arch/loongarch/mm/cache.c @@ -63,6 +63,27 @@ static void flush_cache_leaf(unsigned int leaf) } while (--nr_nodes > 0); } +static void flush_cache_last_level(unsigned int leaf) +{ + u64 addr; + int i, j, nr_nodes; + struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf; + + nr_nodes = loongson_sysconf.nr_nodes; + + addr = CSR_DMW1_BASE; + iocsr_write32(0x1, 0x280); + do { + for (i = 0; i < (cdesc->ways * 3); i++) { + for (j = 0; j < (cdesc->sets); j++) { + *(volatile u32 *)addr; + addr += cdesc->linesz; + } + } + addr += 0x100000000000; + } while (--nr_nodes > 0); +} + asmlinkage __visible void __flush_cache_all(void) { int leaf; @@ -71,7 +92,7 @@ asmlinkage __visible void __flush_cache_all(void) leaf = cache_present - 1; if (cache_inclusive(cdesc + leaf)) { - flush_cache_leaf(leaf); + flush_cache_last_level(leaf); return; } diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index 92155ac20cbf5..f22f871e41900 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -1,5 +1,4 @@ -CONFIG_COMPILE_TEST=y -CONFIG_UAPI_HEADER_TEST=y +CONFIG_WERROR=y CONFIG_BUILD_SALT="-amd64-desktop-hwe" CONFIG_KERNEL_ZSTD=y CONFIG_SYSVIPC=y @@ -59,7 +58,6 @@ CONFIG_X86_UV=y CONFIG_X86_INTEL_MID=y CONFIG_X86_INTEL_LPSS=y CONFIG_X86_AMD_PLATFORM_DEVICE=y -CONFIG_IOSF_MBI_DEBUG=y CONFIG_PARAVIRT_SPINLOCKS=y CONFIG_XEN=y CONFIG_XEN_PVH=y @@ -69,7 +67,6 @@ CONFIG_PROCESSOR_SELECT=y CONFIG_GART_IOMMU=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y CONFIG_X86_MCELOG_LEGACY=y -CONFIG_X86_MCE_INJECT=m CONFIG_PERF_EVENTS_INTEL_RAPL=m CONFIG_PERF_EVENTS_INTEL_CSTATE=m CONFIG_PERF_EVENTS_AMD_POWER=m @@ -91,15 +88,9 @@ CONFIG_EFI_MIXED=y CONFIG_LIVEPATCH=y CONFIG_HIBERNATION=y CONFIG_PM_WAKELOCKS=y -CONFIG_PM_DEBUG=y -CONFIG_PM_ADVANCED_DEBUG=y -CONFIG_PM_TRACE_RTC=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ENERGY_MODEL=y -CONFIG_ACPI_DEBUGGER=y -CONFIG_ACPI_DEBUGGER_USER=y CONFIG_ACPI_FPDT=y -CONFIG_ACPI_EC_DEBUGFS=m CONFIG_ACPI_TAD=m CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m @@ -113,7 +104,6 @@ CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m CONFIG_ACPI_DPTF=y CONFIG_ACPI_EXTLOG=m CONFIG_ACPI_CONFIGFS=m @@ -157,6 +147,7 @@ CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_BLK_CGROUP_FC_APPID=y CONFIG_BLK_CGROUP_IOCOST=y CONFIG_BLK_CGROUP_IOPRIO=y +# CONFIG_BLK_DEBUG_FS is not set CONFIG_BLK_SED_OPAL=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y @@ -171,7 +162,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_LDM_PARTITION=y -CONFIG_LDM_DEBUG=y CONFIG_SGI_PARTITION=y CONFIG_ULTRIX_PARTITION=y CONFIG_SUN_PARTITION=y @@ -190,7 +180,6 @@ CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y CONFIG_CMA_AREAS=7 @@ -489,7 +478,6 @@ CONFIG_ATM_LANE=m CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m @@ -603,6 +591,7 @@ CONFIG_BT_6LOWPAN=m CONFIG_BT_LEDS=y CONFIG_BT_MSFTEXT=y CONFIG_BT_AOSPEXT=y +# CONFIG_BT_DEBUGFS is not set CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y CONFIG_BT_HCIBTUSB_MTK=y @@ -631,7 +620,6 @@ CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m CONFIG_BT_MTKSDIO=m CONFIG_BT_MTKUART=m -CONFIG_BT_QCOMSMD=m CONFIG_BT_VIRTIO=m CONFIG_BT_NXPUART=m CONFIG_AF_RXRPC_IPV6=y @@ -640,7 +628,6 @@ CONFIG_RXPERF=m CONFIG_AF_KCM=m CONFIG_MCTP=y CONFIG_CFG80211=m -CONFIG_CFG80211_DEBUGFS=y CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m CONFIG_MAC80211_MESH=y @@ -689,7 +676,6 @@ CONFIG_NFC_ST95HF=m CONFIG_NET_IFE=m CONFIG_PCI=y CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m CONFIG_PCIE_ECRC=y CONFIG_PCIE_DPC=y CONFIG_PCIE_PTM=y @@ -706,8 +692,6 @@ CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_VMD=m -CONFIG_PCIE_KEEMBAY_HOST=y -CONFIG_PCIE_KEEMBAY_EP=y CONFIG_PCIE_DW_PLAT_HOST=y CONFIG_PCIE_DW_PLAT_EP=y CONFIG_PCI_ENDPOINT=y @@ -733,10 +717,10 @@ CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set +# CONFIG_FW_LOADER_DEBUG is not set CONFIG_FW_LOADER_COMPRESS=y CONFIG_FW_LOADER_COMPRESS_ZSTD=y CONFIG_DEBUG_DEVRES=y -CONFIG_INTEL_IXP4XX_EB=y CONFIG_MHI_BUS=m CONFIG_MHI_BUS_PCI_GENERIC=m CONFIG_MHI_BUS_EP=m @@ -750,7 +734,6 @@ CONFIG_GOOGLE_FIRMWARE=y CONFIG_EFI_VARS_PSTORE=m CONFIG_EFI_BOOTLOADER_CONTROL=m CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m CONFIG_RESET_ATTACK_MITIGATION=y CONFIG_EFI_RCI2_TABLE=y CONFIG_GNSS=m @@ -972,7 +955,6 @@ CONFIG_SCSI_LPFC=m CONFIG_SCSI_DC395x=m CONFIG_SCSI_AM53C974=m CONFIG_SCSI_WD719X=m -CONFIG_SCSI_DEBUG=m CONFIG_SCSI_PMCRAID=m CONFIG_SCSI_PM8001=m CONFIG_SCSI_BFA_FC=m @@ -992,7 +974,6 @@ CONFIG_ATA=y CONFIG_SATA_ZPODD=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=m -CONFIG_SATA_AHCI_SEATTLE=m CONFIG_SATA_INIC162X=m CONFIG_SATA_ACARD_AHCI=m CONFIG_SATA_SIL24=m @@ -1267,6 +1248,7 @@ CONFIG_SKGE_GENESIS=y CONFIG_SKY2=m CONFIG_PRESTERA=m CONFIG_MLX4_EN=m +# CONFIG_MLX4_DEBUG is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y @@ -1406,6 +1388,127 @@ CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m CONFIG_USB_HSO=m CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_USB_NET_AQC111=m +CONFIG_ADM8211=m +CONFIG_ATH5K=m +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_CHANNEL_CONTEXT=y +CONFIG_ATH9K_PCI_NO_EEPROM=m +CONFIG_ATH9K_HTC=m +CONFIG_ATH9K_HWRNG=y +CONFIG_CARL9170=m +CONFIG_CARL9170_HWRNG=y +CONFIG_ATH6KL=m +CONFIG_ATH6KL_SDIO=m +CONFIG_ATH6KL_USB=m +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_WIL6210_TRACING=y +# CONFIG_WIL6210_DEBUGFS is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +CONFIG_ATH10K_SDIO=m +CONFIG_ATH10K_USB=m +CONFIG_ATH10K_TRACING=y +CONFIG_WCN36XX=m +CONFIG_ATH11K=m +CONFIG_ATH11K_AHB=m +CONFIG_ATH11K_PCI=m +CONFIG_ATH12K=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PCMCIA_ATMEL=m +CONFIG_AT76C50X_USB=m +CONFIG_B43=m +CONFIG_B43_SDIO=y +CONFIG_B43LEGACY=m +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +CONFIG_BRCM_TRACING=y +CONFIG_AIRO=m +CONFIG_AIRO_CS=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +CONFIG_IWL4965=m +CONFIG_IWL3945=m +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_HOSTAP_CS=m +CONFIG_HERMES=m +CONFIG_HERMES_PRISM=y +CONFIG_PLX_HERMES=m +CONFIG_TMD_HERMES=m +CONFIG_NORTEL_HERMES=m +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_SPECTRUM=m +CONFIG_ORINOCO_USB=m +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +CONFIG_P54_SPI=m +CONFIG_P54_SPI_DEFAULT_EEPROM=y +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_CS=m +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_SPI=m +CONFIG_LIBERTAS_MESH=y +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MWL8K=m +CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x0E=m +CONFIG_MT76x2E=m +CONFIG_MT76x2U=m +CONFIG_MT7603E=m +CONFIG_MT7615E=m +CONFIG_MT7663U=m +CONFIG_MT7663S=m +CONFIG_MT7915E=m +CONFIG_MT7921E=m +CONFIG_MT7921S=m +CONFIG_MT7921U=m +CONFIG_MT7996E=m +CONFIG_WILC1000_SDIO=m +CONFIG_WILC1000_SPI=m +CONFIG_WILC1000_HW_OOB_INTR=y +CONFIG_PLFXLC=m +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y CONFIG_RTL8180=m CONFIG_RTL8187=m CONFIG_RTL8192CE=m @@ -1448,8 +1551,26 @@ CONFIG_RTW89_8852BE=m CONFIG_RTW89_8852BU=m CONFIG_RTW89_8852BTE=m CONFIG_RTW89_8852CE=m -CONFIG_RTW89_8922AE=m -CONFIG_RTW89_DEBUGMSG=y +CONFIG_RSI_91X=m +# CONFIG_RSI_DEBUGFS is not set +CONFIG_WFX=m +CONFIG_CW1200=m +CONFIG_CW1200_WLAN_SDIO=m +CONFIG_CW1200_WLAN_SPI=m +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLCORE_SDIO=m +CONFIG_USB_ZD1201=m +CONFIG_ZD1211RW=m +CONFIG_QTNFMAC_PCIE=m +CONFIG_PCMCIA_RAYCS=m +CONFIG_PCMCIA_WL3501=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_VIRT_WIFI=m CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m @@ -1462,7 +1583,6 @@ CONFIG_PCI200SYN=m CONFIG_WANXL=m CONFIG_PC300TOO=m CONFIG_FARSYNC=m -CONFIG_SLIC_DS26522=m CONFIG_LAPBETHER=m CONFIG_IEEE802154_FAKELB=m CONFIG_IEEE802154_AT86RF230=m @@ -1471,12 +1591,11 @@ CONFIG_IEEE802154_CC2520=m CONFIG_IEEE802154_ATUSB=m CONFIG_IEEE802154_ADF7242=m CONFIG_IEEE802154_CA8210=m -CONFIG_IEEE802154_CA8210_DEBUGFS=y CONFIG_IEEE802154_MCR20A=m CONFIG_IEEE802154_HWSIM=m CONFIG_WWAN=m +# CONFIG_WWAN_DEBUGFS is not set CONFIG_WWAN_HWSIM=m -CONFIG_QCOM_BAM_DMUX=m CONFIG_RPMSG_WWAN_CTRL=m CONFIG_IOSM=m CONFIG_MTK_T7XX=m @@ -1509,10 +1628,8 @@ CONFIG_KEYBOARD_APPLESPI=m CONFIG_KEYBOARD_QT1050=m CONFIG_KEYBOARD_QT1070=m CONFIG_KEYBOARD_QT2160=m -CONFIG_KEYBOARD_CLPS711X=m CONFIG_KEYBOARD_DLINK_DIR685=m CONFIG_KEYBOARD_LKKBD=m -CONFIG_KEYBOARD_EP93XX=m CONFIG_KEYBOARD_GPIO=m CONFIG_KEYBOARD_TCA8418=m CONFIG_KEYBOARD_MATRIX=m @@ -1622,7 +1739,6 @@ CONFIG_TOUCHSCREEN_HYCON_HY46XX=m CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX=m CONFIG_TOUCHSCREEN_ILI210X=m CONFIG_TOUCHSCREEN_ILITEK=m -CONFIG_TOUCHSCREEN_IPROC=m CONFIG_TOUCHSCREEN_S6SY761=m CONFIG_TOUCHSCREEN_GUNZE=m CONFIG_TOUCHSCREEN_EKTF2127=m @@ -1637,11 +1753,9 @@ CONFIG_TOUCHSCREEN_MSG2638=m CONFIG_TOUCHSCREEN_MTOUCH=m CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS=m CONFIG_TOUCHSCREEN_IMAGIS=m -CONFIG_TOUCHSCREEN_IMX6UL_TSC=m CONFIG_TOUCHSCREEN_INEXIO=m CONFIG_TOUCHSCREEN_PENMOUNT=m CONFIG_TOUCHSCREEN_EDT_FT5X06=m -CONFIG_TOUCHSCREEN_MIGOR=m CONFIG_TOUCHSCREEN_TOUCHRIGHT=m CONFIG_TOUCHSCREEN_TOUCHWIN=m CONFIG_TOUCHSCREEN_PIXCIR=m @@ -1662,7 +1776,6 @@ CONFIG_TOUCHSCREEN_SILEAD=m CONFIG_TOUCHSCREEN_SIS_I2C=m CONFIG_TOUCHSCREEN_ST1232=m CONFIG_TOUCHSCREEN_STMFTS=m -CONFIG_TOUCHSCREEN_SUN4I=m CONFIG_TOUCHSCREEN_SUR40=m CONFIG_TOUCHSCREEN_SURFACE3_SPI=m CONFIG_TOUCHSCREEN_SX8654=m @@ -1803,6 +1916,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m CONFIG_HW_RANDOM_BA431=m CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_HW_RANDOM_XIPHERA=m CONFIG_APPLICOM=m @@ -1812,7 +1926,6 @@ CONFIG_HANGCHECK_TIMER=m CONFIG_TCG_TIS_SPI=m CONFIG_TCG_TIS_SPI_CR50=y CONFIG_TCG_TIS_I2C=m -CONFIG_TCG_TIS_SYNQUACER=m CONFIG_TCG_TIS_I2C_CR50=m CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m @@ -1893,7 +2006,6 @@ CONFIG_SPI_ZYNQMP_GQSPI=m CONFIG_SPI_AMD=m CONFIG_SPI_MUX=m CONFIG_SPI_SPIDEV=m -CONFIG_SPI_LOOPBACK_TEST=m CONFIG_SPI_TLE62X0=m CONFIG_SPI_SLAVE=y CONFIG_SPI_SLAVE_TIME=m @@ -1934,6 +2046,7 @@ CONFIG_PINCTRL_METEORLAKE=m CONFIG_PINCTRL_SUNRISEPOINT=m CONFIG_PINCTRL_TIGERLAKE=m CONFIG_PINCTRL_MERRIFIELD=m +CONFIG_PINCTRL_KX7000=m CONFIG_GPIO_SYSFS=y CONFIG_GPIO_AMDPT=m CONFIG_GPIO_DWAPB=m @@ -2029,7 +2142,6 @@ CONFIG_MAX8925_POWER=m CONFIG_WM831X_BACKUP=m CONFIG_WM831X_POWER=m CONFIG_WM8350_POWER=m -CONFIG_TEST_POWER=m CONFIG_BATTERY_88PM860X=m CONFIG_CHARGER_ADP5061=m CONFIG_BATTERY_CW2015=m @@ -2299,6 +2411,7 @@ CONFIG_SENSORS_TMP421=m CONFIG_SENSORS_TMP464=m CONFIG_SENSORS_TMP513=m CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m @@ -2333,6 +2446,7 @@ CONFIG_INTEL_TCC_COOLING=m CONFIG_INTEL_HFI_THERMAL=y CONFIG_GENERIC_ADC_THERMAL=m CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y CONFIG_WATCHDOG_SYSFS=y CONFIG_WATCHDOG_PRETIMEOUT_GOV=y CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y @@ -2831,14 +2945,7 @@ CONFIG_VIDEO_CADENCE_CSI2TX=m CONFIG_VIDEO_CAFE_CCIC=m CONFIG_VIDEO_VIA_CAMERA=m CONFIG_SMS_SDIO_DRV=m -CONFIG_V4L_TEST_DRIVERS=y -CONFIG_VIDEO_VIM2M=m -CONFIG_VIDEO_VICODEC=m -CONFIG_VIDEO_VIMC=m -CONFIG_VIDEO_VIVID=m -CONFIG_VIDEO_VIVID_CEC=y CONFIG_DVB_FIREDTV=m -CONFIG_SMS_SIANO_DEBUGFS=y CONFIG_VIDEO_AR0521=m CONFIG_VIDEO_HI556=m CONFIG_VIDEO_HI846=m @@ -3082,6 +3189,8 @@ CONFIG_BACKLIGHT_RAVE_SP=m CONFIG_FIRMWARE_EDID=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y +CONFIG_DRM_ACCEL=y +CONFIG_DRM_ACCEL_IVPU=m CONFIG_SOUND=m # CONFIG_SOUND_OSS_CORE_PRECLAIM is not set CONFIG_SND=m @@ -3263,7 +3372,6 @@ CONFIG_SND_SOC_INTEL_AVS=m CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219=m CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC=m CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO=m -CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98927=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A=m CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373=m @@ -3738,14 +3846,11 @@ CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m CONFIG_USB_TRANCEVIBRATOR=m CONFIG_USB_IOWARRIOR=m -CONFIG_USB_TEST=m -CONFIG_USB_EHSET_TEST_FIXTURE=m CONFIG_USB_ISIGHTFW=m CONFIG_USB_YUREX=m CONFIG_USB_HUB_USB251XB=m CONFIG_USB_HSIC_USB3503=m CONFIG_USB_HSIC_USB4604=m -CONFIG_USB_LINK_LAYER_TEST=m CONFIG_USB_CHAOSKEY=m CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m @@ -4096,12 +4201,10 @@ CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_VDPA=m CONFIG_VIRTIO_PMEM=m CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=m CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VDPA=m -CONFIG_VDPA_SIM=m CONFIG_MLX5_VDPA_NET=m CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m @@ -4120,7 +4223,6 @@ CONFIG_GREYBUS_ES2=m CONFIG_COMEDI=m CONFIG_COMEDI_MISC_DRIVERS=y CONFIG_COMEDI_BOND=m -CONFIG_COMEDI_TEST=m CONFIG_COMEDI_PARPORT=m CONFIG_COMEDI_ISA_DRIVERS=y CONFIG_COMEDI_PCL711=m @@ -4318,8 +4420,8 @@ CONFIG_CROS_EC_ISHTP=m CONFIG_CROS_EC_SPI=m CONFIG_CROS_EC_LPC=m CONFIG_CROS_KBD_LED_BACKLIGHT=m +# CONFIG_CROS_EC_DEBUGFS is not set CONFIG_WILCO_EC=m -CONFIG_WILCO_EC_DEBUGFS=m CONFIG_WILCO_EC_EVENTS=m CONFIG_WILCO_EC_TELEMETRY=m CONFIG_MELLANOX_PLATFORM=y @@ -4369,6 +4471,8 @@ CONFIG_X86_PLATFORM_DRIVERS_HP=y CONFIG_WIRELESS_HOTKEY=m CONFIG_IBM_RTL=m CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_LMI=m CONFIG_INTEL_ATOMISP2_LED=m CONFIG_INTEL_SAR_INT1092=m CONFIG_INTEL_SKL_INT3472=m @@ -4452,6 +4556,7 @@ CONFIG_RPMSG_VIRTIO=m CONFIG_SOUNDWIRE=m CONFIG_SOUNDWIRE_QCOM=m CONFIG_SOC_TI=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y @@ -4820,29 +4925,15 @@ CONFIG_SERIAL_IPOCTAL=m CONFIG_RESET_SIMPLE=y CONFIG_RESET_TI_SYSCON=m CONFIG_RESET_TI_TPS380X=m -CONFIG_PHY_PISTACHIO_USB=m CONFIG_USB_LGM_PHY=m -CONFIG_PHY_SUN4I_USB=m -CONFIG_PHY_SUN9I_USB=m CONFIG_BCM_KONA_USB2_PHY=m -CONFIG_PHY_HI6220_USB=m -CONFIG_PHY_HI3660_USB=m -CONFIG_PHY_HI3670_USB=m -CONFIG_PHY_HISI_INNO_USB2=m -CONFIG_PHY_INGENIC_USB=m CONFIG_PHY_PXA_28NM_HSIC=m CONFIG_PHY_PXA_28NM_USB2=m -CONFIG_PHY_PXA_USB=m -CONFIG_PHY_MMP3_USB=m -CONFIG_PHY_MMP3_HSIC=m CONFIG_PHY_CPCAP_USB=m CONFIG_PHY_QCOM_USB_HS=m CONFIG_PHY_QCOM_USB_HSIC=m -CONFIG_PHY_RALINK_USB=m CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_TUSB1210=m -CONFIG_PHY_INTEL_KEEMBAY_EMMC=m -CONFIG_PHY_INTEL_KEEMBAY_USB=m CONFIG_PHY_INTEL_LGM_EMMC=m CONFIG_INTEL_RAPL=m CONFIG_MCB=m @@ -4891,6 +4982,7 @@ CONFIG_MUX_ADGS1408=m CONFIG_MUX_GPIO=m CONFIG_SIOX=m CONFIG_SIOX_BUS_GPIO=m +CONFIG_INTERCONNECT=y CONFIG_104_QUAD_8=m CONFIG_MOST=m CONFIG_MOST_USB_HDM=m @@ -4910,6 +5002,7 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_DEBUG_MASKLOG is not set CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_ASSERT=y @@ -4927,7 +5020,6 @@ CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QUOTA_DEBUG=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=m CONFIG_AUTOFS_FS=m @@ -5015,7 +5107,6 @@ CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y # CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 is not set CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2=y -CONFIG_SUNRPC_DEBUG=y CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y @@ -5025,6 +5116,7 @@ CONFIG_CIFS=m CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y CONFIG_CIFS_SWN_UPCALL=y CONFIG_CIFS_SMB_DIRECT=y @@ -5123,7 +5215,6 @@ CONFIG_EVM_EXTRA_SMACK_XATTRS=y CONFIG_EVM_ADD_XATTRS=y CONFIG_DEFAULT_SECURITY_APPARMOR=y CONFIG_LSM="lockdown,yama,integrity,apparmor,selinux,hookmanager" -CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_FORTIFY_SOURCE=y CONFIG_HARDENED_USERCOPY=y @@ -5133,6 +5224,7 @@ CONFIG_CRYPTO_ECRDSA=m CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SM4_GENERIC=m @@ -5140,7 +5232,9 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_AEGIS128=m CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_SEQIV=y @@ -5168,6 +5262,7 @@ CONFIG_CRYPTO_DES3_EDE_X86_64=m CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64=m CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m @@ -5175,6 +5270,7 @@ CONFIG_CRYPTO_NHPOLY1305_SSE2=m CONFIG_CRYPTO_NHPOLY1305_AVX2=m CONFIG_CRYPTO_POLYVAL_CLMUL_NI=m CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_DEV_PADLOCK=y CONFIG_CRYPTO_DEV_PADLOCK_AES=m @@ -5193,17 +5289,13 @@ CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_SAFEXCEL=m CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m -CONFIG_CRYPTO_DEV_ASPEED=m -CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH=y -CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO=y -CONFIG_CRYPTO_DEV_ASPEED_ACRY=y CONFIG_CRYPTO_DEV_TSSE=m CONFIG_PKCS8_PRIVATE_KEY_PARSER=m -CONFIG_PKCS7_TEST_KEY=m CONFIG_SIGNED_PE_FILE_VERIFICATION=y CONFIG_SYSTEM_EXTRA_CERTIFICATE=y CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRC4=m CONFIG_XZ_DEC_TEST=m CONFIG_FONTS=y CONFIG_FONT_8x8=y @@ -5235,16 +5327,12 @@ CONFIG_KGDB=y CONFIG_KGDB_LOW_LEVEL_TRAP=y CONFIG_KGDB_KDB=y CONFIG_KDB_KEYBOARD=y -CONFIG_PAGE_POISONING=y +# CONFIG_SLUB_DEBUG is not set CONFIG_DEBUG_WX=y -CONFIG_DEBUG_STACK_USAGE=y CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -CONFIG_DEBUG_PLIST=y -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set CONFIG_FUNCTION_PROFILER=y @@ -5254,21 +5342,16 @@ CONFIG_HWLAT_TRACER=y CONFIG_MMIOTRACE=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -CONFIG_SAMPLES=y -CONFIG_SAMPLE_TRACE_PRINTK=m -CONFIG_SAMPLE_FTRACE_DIRECT=m -CONFIG_SAMPLE_TRACE_ARRAY=m -CONFIG_IO_STRICT_DEVMEM=y +# CONFIG_STRICT_DEVMEM is not set # CONFIG_X86_VERBOSE_BOOTUP is not set CONFIG_EARLY_PRINTK_DBGP=y CONFIG_EARLY_PRINTK_USB_XDBC=y CONFIG_IO_DELAY_0XED=y -CONFIG_DEBUG_BOOT_PARAMS=y -CONFIG_PUNIT_ATOM_DEBUG=m +# CONFIG_X86_DEBUG_FPU is not set CONFIG_UNWINDER_FRAME_POINTER=y +# CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_FUNCTION_ERROR_INJECTION=y CONFIG_TEST_BPF=m +CONFIG_TEST_BLACKHOLE_DEV=m diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 263439cda8179..cfebc73e8349a 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -329,6 +329,12 @@ struct cper_sec_mem_err; extern void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +extern void zx_apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +struct cper_sec_pcie; +extern void zx_apei_mce_report_pcie_error(int corrected, struct cper_sec_pcie *pcie_err); +struct cper_sec_proc_generic; +extern void zx_apei_mce_report_zdi_error(int corrected, struct cper_sec_proc_generic *zdi_err); + /* * Enumerate new IP types and HWID values in AMD processors which support * Scalable MCA. diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index 0916f00a992e1..26d9963b66bdf 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -40,7 +40,29 @@ int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data) void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { #ifdef CONFIG_X86_MCE - apei_mce_report_mem_error(sev, mem_err); + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_mem_error(sev, mem_err); + else + apei_mce_report_mem_error(sev, mem_err); +#endif +} + +void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) +{ +#ifdef CONFIG_X86_MCE + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_pcie_error(sev, pcie_err); +#endif +} + +void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +{ +#ifdef CONFIG_X86_MCE + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_zdi_error(sev, zdi_err); #endif } diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 0a89947e47bc8..0e1845a11f0ea 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -65,6 +65,177 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); +void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) +{ + struct mce_hw_err err; + struct mce *m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) + return; + + mce_prep_record(&err); + m = &err.m; + m->misc = 0; + m->misc = mem_err->module; + m->addr = mem_err->physical_addr; + if (mem_err->card == 0) + m->bank = 9; + else + m->bank = 10; + + switch (mem_err->error_type) { + case 2: + m->status = 0x9c20004000010080; + break; + case 3: + m->status = 0xbe40000000020090; + apei_error = apei_write_mce(m); + break; + case 8: + if (mem_err->requestor_id == 2) + m->status = 0x98200040000400b0; + else if (mem_err->requestor_id == 3) { + m->status = 0xba400000000600a0; + apei_error = apei_write_mce(m); + } else if (mem_err->requestor_id == 4) + m->status = 0x98200100000300b0; + else if (mem_err->requestor_id == 5) { + m->status = 0xba000000000500b0; + apei_error = apei_write_mce(m); + } else + pr_info("Undefined Parity error\n"); + break; + case 10: + if (mem_err->requestor_id == 6) { + m->status = 0xba400000000700a0; + apei_error = apei_write_mce(m); + } else if (mem_err->requestor_id == 7) { + m->status = 0xba000000000800b0; + apei_error = apei_write_mce(m); + } else + pr_info("Undefined dvad error\n"); + break; + case 13: + m->status = 0x9c200040000100c0; + break; + case 14: + m->status = 0xbd000000000200c0; + apei_error = apei_write_mce(m); + break; + } + mce_log(&err); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_mem_error); + +void zx_apei_mce_report_pcie_error(int severity, struct cper_sec_pcie *pcie_err) +{ + struct mce_hw_err err; + struct mce *m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + mce_prep_record(&err); + m = &err.m; + m->addr = 0; + m->misc = 0; + m->misc |= (u64)pcie_err->device_id.segment << 32; + m->misc |= pcie_err->device_id.bus << 24; + m->misc |= pcie_err->device_id.device << 19; + m->misc |= pcie_err->device_id.function << 16; + m->bank = 6; + + switch (severity) { + case 1: + m->status = 0x9820004000020e0b; + break; + case 2: + m->status = 0xba20000000010e0b; + break; + case 3: + m->status = 0xbd20000000000e0b; + apei_error = apei_write_mce(m); + break; + default: + pr_info("Undefine pcie error\n"); + break; + } + mce_log(&err); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_pcie_error); + +void zx_apei_mce_report_zdi_error(int severity, struct cper_sec_proc_generic *zdi_err) +{ + struct mce_hw_err err; + struct mce *m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + mce_prep_record(&err); + m = &err.m; + m->misc = 0; + m->misc |= (zdi_err->requestor_id & 0xff) << 19; + m->misc |= ((zdi_err->requestor_id & 0xff00) >> 8) >> 24; + m->bank = 5; + switch (zdi_err->responder_id) { + case 2: + m->status = 0xba00000000040e0f; + apei_error = apei_write_mce(m); + break; + case 3: + m->status = 0xba00000000030e0f; + apei_error = apei_write_mce(m); + break; + case 4: + m->status = 0xba00000000020e0f; + apei_error = apei_write_mce(m); + break; + case 5: + m->status = 0xba00000000010e0f; + apei_error = apei_write_mce(m); + break; + case 6: + m->status = 0x9820004000090e0f; + break; + case 7: + m->status = 0x9820004000080e0f; + break; + case 8: + m->status = 0x9820004000070e0f; + break; + case 9: + m->status = 0x9820004000060e0f; + break; + case 10: + m->status = 0x9820004000050e0f; + break; + case 11: + case 12: + case 13: + case 14: + case 15: + m->status = 0x98200040000b0e0f; + break; + case 16: + case 17: + case 18: + m->status = 0x98200040000c0e0f; + break; + default: + pr_info("Undefined ZDI Error\n"); + break; + } + mce_log(&err); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_zdi_error); + int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { const u64 *i_mce = ((const u64 *) (ctx_info + 1)); diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index ec3aa340d351d..bd46224ef3c05 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -231,6 +231,11 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; wrmsrq(MSR_IA32_TSC_ADJUST, ref->adjusted); + } else if (cur->adjusted != bootval) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + cur->adjusted = bootval; + } } /* * We have the TSCs forced to be in sync on this package. Skip sync diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 9c84f3da7c090..f652c46dc8dd5 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -773,6 +773,16 @@ void __weak arch_apei_report_mem_error(int sev, } EXPORT_SYMBOL_GPL(arch_apei_report_mem_error); +void __weak arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) +{ +} +EXPORT_SYMBOL_GPL(arch_apei_report_pcie_error); + +void __weak arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +{ +} +EXPORT_SYMBOL_GPL(arch_apei_report_zdi_error); + int apei_osc_setup(void) { static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 56107aa002744..af092f4b27660 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -896,9 +896,12 @@ static void ghes_do_proc(struct ghes *ghes, atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); - arch_apei_report_mem_error(sev, mem_err); + arch_apei_report_mem_error(sec_sev, mem_err); queued = ghes_handle_memory_failure(gdata, sev, sync); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); + + arch_apei_report_pcie_error(sec_sev, pcie_err); ghes_handle_aer(gdata); } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev, sync); @@ -918,6 +921,10 @@ static void ghes_do_proc(struct ghes *ghes, struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); + } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { + struct cper_sec_proc_generic *zdi_err = acpi_hest_get_payload(gdata); + + arch_apei_report_zdi_error(sec_sev, zdi_err); } else { void *err = acpi_hest_get_payload(gdata); @@ -1302,6 +1309,8 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, u32 len, node_len; u64 buf_paddr; int sev, rc; + struct acpi_hest_generic_data *gdata; + guid_t *sec_type; if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG)) return -EOPNOTSUPP; @@ -1336,6 +1345,23 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, sev = ghes_severity(estatus->error_severity); if (sev >= GHES_SEV_PANIC) { + apei_estatus_for_each_section(estatus, gdata) { + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); + + arch_apei_report_mem_error(sev, mem_err); + } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); + + arch_apei_report_pcie_error(sev, pcie_err); + } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { + struct cper_sec_proc_generic *zdi_err = + acpi_hest_get_payload(gdata); + + arch_apei_report_zdi_error(sev, zdi_err); + } + } ghes_print_queued_estatus(); __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx); } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3bdeeee3414e6..96008b7a08dae 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -694,7 +694,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) acpi_status status; int ret = -ENODATA; +#ifdef CONFIG_ARM64 + if (read_cpuid_implementor() != ARM_CPU_IMP_HISI && !osc_sb_cppc2_support_acked) { +#else if (!osc_sb_cppc2_support_acked) { +#endif pr_debug("CPPC v2 _OSC not acked\n"); if (!cpc_supported_by_cpu()) { pr_debug("CPPC is not supported by the CPU\n"); diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 505900a80da88..ec7ab3ac2efbb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -115,6 +115,16 @@ config SATA_AHCI If unsure, say N. +config AHCI_ZHAOXIN_SGPIO + tristate "Zhaoxin AHCI SGPIO support" + depends on SATA_AHCI && (CPU_SUP_ZHAOXIN || CPU_SUP_CENTAUR) + default y + help + This option enables support for Zhaoxin AHCI SGPIO. + Add support SGPIO mode and SGPIO GP mode. + + If unsure, say N. + config SATA_MOBILE_LPM_POLICY int "Default SATA Link Power Management policy" range 0 5 diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 4b846692e3656..ee2cb6367b661 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_QORIQ) += ahci_qoriq.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_ZHAOXIN_SGPIO) += ahci_zhaoxin_sgpio.o # SFF w/ custom DMA obj-$(CONFIG_PDC_ADMA) += pdc_adma.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7a7f88b3fa2b1..fa2f11fe83e02 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1005,6 +1005,11 @@ static int ahci_pci_device_runtime_suspend(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct ata_host *host = pci_get_drvdata(pdev); + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && pdev->revision <= 0x20) { + dev_err(&pdev->dev, "zx ahci controller does not support runtime pm!\n"); + return -EIO; + } + ahci_pci_disable_interrupts(host); return 0; } diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 293b7fb216b51..8131e0fb08e1f 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -449,6 +449,7 @@ void ahci_print_info(struct ata_host *host, const char *scc_s); int ahci_host_activate(struct ata_host *host, const struct scsi_host_template *sht); void ahci_error_handler(struct ata_port *ap); u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked); +int get_ahci_em_messages(void); static inline void __iomem *__ahci_port_base(struct ahci_host_priv *hpriv, unsigned int port_no) diff --git a/drivers/ata/ahci_zhaoxin_sgpio.c b/drivers/ata/ahci_zhaoxin_sgpio.c new file mode 100644 index 0000000000000..de6806cf8140f --- /dev/null +++ b/drivers/ata/ahci_zhaoxin_sgpio.c @@ -0,0 +1,706 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ahci_zhaoxin_sgpio.c - Driver for Zhaoxin sgpio + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ahci.h" +#include "libata.h" +#include "ahci_zhaoxin_sgpio.h" + +static LIST_HEAD(sgpio_zhaoxin_list); + +static unsigned int zhaoxin_em_type __read_mostly = AHCI_EM_MSG_LED_MODE; /*LED protocol*/ +module_param(zhaoxin_em_type, int, 0644); +MODULE_PARM_DESC(zhaoxin_em_type, + "AHCI Enclosure Management Message type control (1 = led on, 2 = sgpio on,3 = sgpio gp on)"); + +static int ahci_wait_em_reset(struct sgpio_zhaoxin *sgpio_zhaoxin, u32 retry) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + u32 em_ctl; + + if (!sgpio_zhaoxin || retry == 0) { + pr_err("In ahci wait em reset, invalid param\n"); + return -EINVAL; + } + + while (retry--) { /*EM_CTL needs reset at least 64ms*/ + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_RST) + msleep(10); /*EM_CTL still in reset, usleep 10ms*/ + else + break; + + if (!retry) + pr_err("Wait for EM_CTL reset, time out\n"); + } + + return 0; +} + +static void ahci_zhaoxin_set_em_sgpio(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + + volatile u32 read; + + sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1; + + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_a = 0x7; + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_b = 0x3; + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_c = 0x0; + sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_on = 0; + sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_off = 0; + sgpio_zhaoxin->sgpio_reg.cfg_1.max_act_on = 2; + sgpio_zhaoxin->sgpio_reg.cfg_1.force_act_off = 1; + + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf; + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0x0; + + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0; + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0; + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0; + + sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0x07070707; + sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0; + + /*Setup SGPIO type*/ + read = readl(mmio + sgpio_zhaoxin->em_loc); + read = read | SGPIO_MESSAGE_HEAD; /*LED register MSG_HEAD, select SGPIO*/ + writel(read, mmio + sgpio_zhaoxin->em_loc); + + /*Setup gp mode*/ + writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38); + + /*Initial SGPIO CFG1*/ + writel(sgpio_zhaoxin->sgpio_reg.cfg_1.sgpio_cfg_1, em_mmio + 0x4); + + /*Initial SGPIO CFG0*/ + read = readl(em_mmio); + read |= sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0; + writel(read, em_mmio); +} + +static void ahci_zhaoxin_set_em_sgpio_gpmode(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + u32 read; + + sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1; + + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf; + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0xff; + + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0; + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0; + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0; + + sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0; + sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0xff0f0000; + + /*Setup SGPIO type*/ + read = readl(mmio + sgpio_zhaoxin->em_loc); + read |= SGPIO_MESSAGE_HEAD; + writel(read, mmio + sgpio_zhaoxin->em_loc); + + /*Setup gp mode*/ + writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38); + + /*Enable SGPIO*/ + writel(sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0, em_mmio); +} + +static ssize_t ahci_em_type_sys_show(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf) +{ + return sprintf(buf, "0x%x\n", zhaoxin_em_type); +} +static ssize_t ahci_em_type_sys_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + int code = 0; + int rc = 0; + + if (kstrtouint(buf, 0, &code)) + return count; + + if (code == AHCI_EM_MSG_LED_MODE) { + zhaoxin_em_type = code; + } else if (code == AHCI_EM_MSG_SGPIO_MODE) { + rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (rc < 0) { + pr_err("ahci wait em reset failed!\n"); + return rc; + } + zhaoxin_em_type = code; + ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin); + } else if (code == AHCI_EM_MSG_SGPIO_GP_MODE) { + rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (rc < 0) { + pr_err("ahci wait em reset failed!\n"); + return rc; + } + zhaoxin_em_type = code; + ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin); + } else + pr_err("Incorrect value:1 = LED on, 2 = SGPIO normal on, 3 = SGPIO GP on)\n"); + + return count; +} + +static ssize_t ahci_transmit_sgpio_message(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, u16 state, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + unsigned long flags; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags); + + switch (port_num) { + case 0: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x22); + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0x0000ffff; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_error = state & 0x7; + break; + case 1: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x20); + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0xffff0000; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_error = state & 0x7; + break; + case 2: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x26); + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0x0000ffff; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_error = state & 0x7; + break; + case 3: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x24); + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0xffff0000; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_error = state & 0x7; + break; + default: + pr_err("Unsupported port number in this controller\n"); + break; + } + + spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags); + + return size; +} + +static ssize_t ahci_transmit_sgpio_indicator(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, + u8 indicator_code, enum SGPIO_INDICATOR type, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + u16 state; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_MODE)) { + pr_err("Current setting not SGPIO normal mode, quit\n"); + return -EINVAL; + } + + switch (port_num) { + case 0: + state = readw(em_mmio + 0x22); + break; + case 1: + state = readw(em_mmio + 0x20); + break; + case 2: + state = readw(em_mmio + 0x26); + break; + case 3: + state = readw(em_mmio + 0x24); + break; + default: + return -EINVAL; + } + + if (type == SGPIO_ACTIVITY) { + state &= 0xfc3f; + state |= (indicator_code&0xf) << 6; + } else if (type == SGPIO_LOCATE) { + state &= 0xffc7; + state |= (indicator_code&0x7) << 3; + } else if (type == SGPIO_ERROR) { + state &= 0xfff8; + state |= indicator_code & 0x7; + } else { + return -EINVAL; + } + + return ahci_transmit_sgpio_message(port_num, sgpio_zhaoxin, state, size); +} + +static ssize_t ahci_transmit_sgpio_indicator_gp(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, + u8 indicator_code, enum SGPIO_INDICATOR type, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + union SGPIO_TX_GP state; + unsigned long flags; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_GP_MODE)) { + pr_err("Current setting not SGPIO_GP mode, quit\n"); + return -EINVAL; + } + + spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags); + + state.sgpio_tx_gp = readl(em_mmio + 0x3c); + switch (port_num) { + case 0: + if (type == SGPIO_ACTIVITY) + state.D00 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D01 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D02 = indicator_code & 0x1; + break; + case 1: + if (type == SGPIO_ACTIVITY) + state.D10 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D11 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D12 = indicator_code & 0x1; + break; + case 2: + if (type == SGPIO_ACTIVITY) + state.D20 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D21 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D22 = indicator_code & 0x1; + break; + case 3: + if (type == SGPIO_ACTIVITY) + state.D30 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D31 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D32 = indicator_code & 0x1; + break; + default: + return -EINVAL; + } + + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writel(state.sgpio_tx_gp, em_mmio + 0x3c); + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = state.sgpio_tx_gp; + + spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags); + return size; +} + +static ssize_t sgpio_activity_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_DISABLE, SGPIO_ACTIVITY, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_ENABLE, SGPIO_ACTIVITY, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GA_FON, SGPIO_ACTIVITY, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GA_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_BRIEF_EN_EOF, SGPIO_ACTIVITY, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_BRIEF_EN_SOF, SGPIO_ACTIVITY, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GB_FON, SGPIO_ACTIVITY, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GB_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x8: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GC_FON, SGPIO_ACTIVITY, 1); + break; + case 0x9: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GC_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_ACTIVITY, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_ON, SGPIO_ACTIVITY, 1); + break; + default: + pr_err("Unsupported command for activity indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + + return -EINVAL; +} + +static ssize_t sgpio_locate_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_DISABLE, SGPIO_LOCATE, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_ENABLE, SGPIO_LOCATE, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FON, SGPIO_LOCATE, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FOFF, SGPIO_LOCATE, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FON, SGPIO_LOCATE, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FOFF, SGPIO_LOCATE, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FON, SGPIO_LOCATE, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FOFF, SGPIO_LOCATE, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_LOCATE, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, GP_ON, + SGPIO_LOCATE, 1); + break; + default: + pr_err("Unsupported command for locate indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + return -EINVAL; +} + +static ssize_t sgpio_error_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_DISABLE, SGPIO_ERROR, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_ENABLE, SGPIO_ERROR, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FON, SGPIO_ERROR, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FOFF, SGPIO_ERROR, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FON, SGPIO_ERROR, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FOFF, SGPIO_ERROR, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FON, SGPIO_ERROR, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FOFF, SGPIO_ERROR, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_ERROR, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_ON, SGPIO_ERROR, 1); + break; + default: + pr_err("Unsupport command for error indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + + return -EINVAL; +} + +static struct sgpio_zhaoxin_sysfs_attr dev_attr_ahci_em_type_sys = + __ATTR(ahci_em_type_sys, 0644, ahci_em_type_sys_show, + ahci_em_type_sys_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_activity = + __ATTR(sgpio_activity, 0200, NULL, sgpio_activity_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_locate = + __ATTR(sgpio_locate, 0200, NULL, sgpio_locate_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_error = + __ATTR(sgpio_error, 0200, NULL, sgpio_error_store); + +struct attribute *sgpio_attrs[] = { + &dev_attr_ahci_em_type_sys.attr, + &dev_attr_sgpio_activity.attr, + &dev_attr_sgpio_locate.attr, + &dev_attr_sgpio_error.attr, + NULL +}; + +static const struct attribute_group sgpio_attrs_group = { + .attrs = sgpio_attrs +}; +const struct attribute_group *sgpio_groups[] = { + &sgpio_attrs_group, + NULL +}; + +static ssize_t sgpio_zhaoxin_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr); + struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj); + + if (!sgpio_zhaoxin_sysfs_attr->show) + return -EIO; + + return sgpio_zhaoxin_sysfs_attr->show(sgpio_zhaoxin, buf); +} + +static ssize_t sgpio_zhaoxin_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t len) +{ + struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr); + struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj); + + if (!sgpio_zhaoxin_sysfs_attr->store) + return -EIO; + + return sgpio_zhaoxin_sysfs_attr->store(sgpio_zhaoxin, buf, len); +} + +const struct sysfs_ops sgpio_zhaoxin_sysfs_ops = { + .show = sgpio_zhaoxin_attr_show, + .store = sgpio_zhaoxin_attr_store, +}; + +const struct kobj_type sgpio_zhaoxin_ktype = { + .sysfs_ops = &sgpio_zhaoxin_sysfs_ops, + .default_groups = sgpio_groups, +}; + +static void set_em_messages(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + u32 em_loc = readl(mmio + HOST_EM_LOC); + u32 em_ctl = readl(mmio + HOST_EM_CTL); + u8 messages; + + if (!get_ahci_em_messages()) + return; + + messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; + + if (messages) { + /* store em_loc */ + sgpio_zhaoxin->em_loc = ((em_loc >> 16) * 4); + sgpio_zhaoxin->em_buf_sz = ((em_loc & 0xff) * 4); + sgpio_zhaoxin->em_msg_type = messages; + } +} + +static int add_sgpio_zhaoxin(void) +{ + struct pci_dev *pdev_cur = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, NULL); + struct pci_dev *pdev_next = pdev_cur; + struct sgpio_zhaoxin *sgpio_zhaoxin; + int ret = 0; + + if (!get_ahci_em_messages()) + return 0; + + while (pdev_next) { + pdev_next = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, pdev_cur); + + WARN_ON(MAX_TEST_RESULT_LEN <= 0); + + sgpio_zhaoxin = (struct sgpio_zhaoxin *)get_zeroed_page(GFP_KERNEL); + if (!sgpio_zhaoxin) + return -ENOMEM; + + list_add(&sgpio_zhaoxin->list, &sgpio_zhaoxin_list); + ret = kobject_init_and_add(&sgpio_zhaoxin->kobj, &sgpio_zhaoxin_ktype, + &(&pdev_cur->dev)->kobj, "zx_sgpio"); + if (ret) { + kobject_put(&sgpio_zhaoxin->kobj); + return -1; + } + + kobject_uevent(&sgpio_zhaoxin->kobj, KOBJ_ADD); + spin_lock_init(&sgpio_zhaoxin->wr_lock); + sgpio_zhaoxin->kobj_valid = 1; + sgpio_zhaoxin->mmio = pcim_iomap_table(pdev_cur)[5]; + set_em_messages(sgpio_zhaoxin); + ret = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (ret < 0) { + pr_err("ahci wait em reset failed!\n"); + return ret; + } + + sgpio_zhaoxin->kobj_valid = 1; + + if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_GP_MODE) + ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin); + else if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_MODE) + ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin); + + pdev_cur = pdev_next; + } + + return 0; +} + + +static void remove_sgpio_zhaoxin(void) +{ + struct sgpio_zhaoxin *cur = NULL, *next = NULL; + + if (!get_ahci_em_messages()) + return; + + list_for_each_entry_safe(cur, next, &sgpio_zhaoxin_list, list) { + list_del(&cur->list); + if (cur->kobj_valid) + kobject_put(&cur->kobj); + + free_page((unsigned long)cur); + if (!next) + break; + } +} + +static int __init zhaoxin_sgpio_init(void) +{ + return add_sgpio_zhaoxin(); +} + +static void __exit zhaoxin_sgpio_exit(void) +{ + remove_sgpio_zhaoxin(); +} + +late_initcall(zhaoxin_sgpio_init); +module_exit(zhaoxin_sgpio_exit); + +MODULE_DESCRIPTION("Zhaoxin SGPIO driver"); +MODULE_AUTHOR("XanderChen"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/ahci_zhaoxin_sgpio.h b/drivers/ata/ahci_zhaoxin_sgpio.h new file mode 100644 index 0000000000000..39025d0c57652 --- /dev/null +++ b/drivers/ata/ahci_zhaoxin_sgpio.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ACHI_ZHAOXIN_SGPIO_H +#define _ACHI_ZHAOXIN_SGPIO_H + +#define SGPIO_OFFSET 0x580 + +#define SGPIO_MESSAGE_HEAD 0x3000000 + +#define ACTIVITY_DISABLE 0x0 +#define ACTIVITY_ENABLE 0x1 +#define ACTIVITY_GA_FON 0x2 +#define ACTIVITY_GA_FOFF 0x3 +#define ACTIVITY_BRIEF_EN_EOF 0x4 +#define ACTIVITY_BRIEF_EN_SOF 0x5 +#define ACTIVITY_GB_FON 0x6 +#define ACTIVITY_GB_FOFF 0x7 +#define ACTIVITY_GC_FON 0x8 +#define ACTIVITY_GC_FOFF 0x9 +#define LOCATE_ERROR_DISABLE 0x0 +#define LOCATE_ERROR_ENABLE 0x1 +#define LOCATE_ERROR_GA_FON 0x2 +#define LOCATE_ERROR_GA_FOFF 0x3 +#define LOCATE_ERROR_GB_FON 0x4 +#define LOCATE_ERROR_GB_FOFF 0x5 +#define LOCATE_ERROR_GC_FON 0x6 +#define LOCATE_ERROR_GC_FOFF 0x7 + +#define GP_OFF 0x10 +#define GP_ON 0x11 + +#define to_sgpio_attr(x) container_of(x, struct sgpio_zhaoxin_sysfs_attr, attr) +#define to_sgpio_obj(x) container_of(x, struct sgpio_zhaoxin, kobj) +#define MAX_TEST_RESULT_LEN (PAGE_SIZE - sizeof(struct sgpio_zhaoxin) - 8) + +//SGPIO module parameter: 0-off, 1-LED, 2-SGPIO, 3-SGPIO_GP +enum ahci_em_msg_modes { + AHCI_EM_MSG_OFF = 0, + AHCI_EM_MSG_LED_MODE, + AHCI_EM_MSG_SGPIO_MODE, + AHCI_EM_MSG_SGPIO_GP_MODE, + AHCI_EM_MSG_NULL, +}; + +enum SGPIO_INDICATOR { + SGPIO_ACTIVITY, + SGPIO_LOCATE, + SGPIO_ERROR +}; + +enum SGPIO_CFG1 { + STRETCH_ACTIVITY_OFF, + STRETCH_ACTIVITY_ON, + FORCE_ACTIVITY_OFF, + MAXIMUM_ACTIVITY_ON, + BLINK_GENERATIOR_RATE_B, + BLINK_GENERATIOR_RATE_A, + BLINK_GENERATIOR_RATE_C +}; + +union SGPIO_CFG_0 { + struct { + u32 reserved0 :8; + u32 version :4; + u32 reserved1 :4; + u32 gp_register_count :4; + u32 cfg_register_count :3; + u32 enable :1; + u32 supported_drive_count :8; + }; + u32 sgpio_cfg_0; +}; + +union SGPIO_CFG_1 { + struct { + u32 reserved0 :4; + u32 blink_gen_c :4; + u32 blink_gen_a :4; + u32 blink_gen_b :4; + u32 max_act_on :4; + u32 force_act_off :4; + u32 stretch_act_on :4; + u32 stretch_act_off :4; + }; + u32 sgpio_cfg_1; +}; + +union SGPIO_RX { + struct { + u32 drive_3_input :3; + u32 reserved3 :5; + u32 drive_2_input :3; + u32 reserved2 :5; + u32 drive_1_input :3; + u32 reserved1 :5; + u32 drive_0_input :3; + u32 reserved0 :5; + }; + u32 sgpio_rx; +}; + +union SGPIO_RX_GP_CFG { + struct { + u32 reserved0 :16; + u32 count :8; + u32 reserved1 :8; + }; + u32 sgpio_rx_gp_cfg; +}; +union SGPIO_RX_GP { + struct { + u32 reserved0 :16; + u32 D22 :1; + u32 D30 :1; + u32 D31 :1; + u32 D32 :1; + u32 reserved1:4; + u32 D00 :1; + u32 D01 :1; + u32 D02 :1; + u32 D10 :1; + u32 D11 :1; + u32 D12 :1; + u32 D20 :1; + u32 D21 :1; + }; + u32 sgpio_rx_gp; +}; + +union SGPIO_TX_0 { + struct { + u32 drive_1_error :3; + u32 drive_1_locate :3; + u32 drive_1_active :4; + u32 reserved1 :6; + u32 drive_0_error :3; + u32 drive_0_locate :3; + u32 drive_0_active :4; + u32 reserved0 :6; + }; + u32 sgpio_tx_0; +}; + +union SGPIO_TX_1 { + struct { + u32 drive_3_error :3; + u32 drive_3_locate :3; + u32 drive_3_active :4; + u32 reserved3 :6; + u32 drive_2_error :3; + u32 drive_2_locate :3; + u32 drive_2_active :4; + u32 reserved2 :6; + }; + u32 sgpio_tx_1; +}; + +union SGPIO_TX_GP_CFG { + struct { + u32 reserved0 :16; + u32 count :8; + u32 sload :4; + u32 reserved1 :4; + }; + u32 sgpio_tx_gp_cfg; +}; + +union SGPIO_TX_GP { + struct { + u32 reserved0 :16; + u32 D22 :1; + u32 D30 :1; + u32 D31 :1; + u32 D32 :1; + u32 reserved1:4; + u32 D00 :1; + u32 D01 :1; + u32 D02 :1; + u32 D10 :1; + u32 D11 :1; + u32 D12 :1; + u32 D20 :1; + u32 D21 :1; + }; + u32 sgpio_tx_gp; +}; + +struct AHCI_SGPIO_REG { + union SGPIO_CFG_0 cfg_0; + union SGPIO_CFG_1 cfg_1; + union SGPIO_RX receive_reg; + union SGPIO_RX_GP_CFG gp_receive_cfg; + union SGPIO_RX_GP gp_receive_reg; + union SGPIO_TX_0 transmit_0; + union SGPIO_TX_1 transmit_1; + union SGPIO_TX_GP_CFG gp_transmit_cfg; + union SGPIO_TX_GP gp_transmit_reg; +}; + +struct sgpio_zhaoxin { + struct kobject kobj; + struct list_head list; + unsigned int kobj_valid; + unsigned int index; + u32 em_loc; /* enclosure management location */ + u32 em_buf_sz; /* EM buffer size in byte */ + u32 em_msg_type; /* EM message type */ + void __iomem *mmio; + spinlock_t wr_lock; /* protects sgpio register */ + struct AHCI_SGPIO_REG sgpio_reg; /* saved sgpio register */ +}; + +struct sgpio_zhaoxin_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf); + ssize_t (*store)(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count); +}; + +#endif /* _ACHI_ZHAOXIN_SGPIO_H */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index c79abdfcd7a9b..7b32cb2050d32 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -208,6 +208,12 @@ static int devslp_idle_timeout __read_mostly = 1000; module_param(devslp_idle_timeout, int, 0644); MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout"); +int get_ahci_em_messages(void) +{ + return ahci_em_messages; +} +EXPORT_SYMBOL_GPL(get_ahci_em_messages); + static void ahci_enable_ahci(void __iomem *mmio) { int i; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 2586e77ebf45d..1709ac3933e66 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2125,6 +2125,8 @@ static int ata_eh_link_set_lpm(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *device = ap ? ap->host->dev : NULL; + struct pci_dev *pdev = (!device || !dev_is_pci(device)) ? NULL : to_pci_dev(device); struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; @@ -2133,6 +2135,11 @@ static int ata_eh_link_set_lpm(struct ata_link *link, unsigned int err_mask; int rc; + /* if controller does not support lpm, then sets no LPM flags*/ + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + !(~ap->host->flags & (ATA_HOST_NO_PART | ATA_HOST_NO_SSC | ATA_HOST_NO_DEVSLP))) + link->flags |= ATA_LFLAG_NO_LPM; + /* if the link or host doesn't do LPM, noop */ if (!IS_ENABLED(CONFIG_SATA_HOST) || (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d2cfc584e2023..4d648eeb3e5c7 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -423,4 +423,6 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. +source "drivers/char/phytnetled/Kconfig" + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 1291369b91265..9996c79342828 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -44,3 +44,4 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o obj-$(CONFIG_XILLYBUS_CLASS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o obj-$(CONFIG_ADI) += adi.o +obj-$(CONFIG_PHYTNET_LED) += phytnetled/ diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 43101f39d3967..2efe92b0e1b4d 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -178,6 +178,19 @@ config HW_RANDOM_VIA If unsure, say Y. +config HW_RANDOM_ZHAOXIN + tristate "Zhaoxin HW Random Number Generator support" + depends on X86 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Zhaoxin based motherboards. + + To compile this driver as a module, choose M here: the + module will be called zhaoxin-rng. + + If unsure, say Y. + config HW_RANDOM_IXP4XX tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" depends on ARCH_IXP4XX || COMPILE_TEST diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 0fa9f0a271d46..8fc3fcdb937b0 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_ZHAOXIN) += zhaoxin-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a9a0a3b09c8bd..6d13128458023 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -35,9 +35,6 @@ #include #include - - - enum { VIA_STRFILT_CNT_SHIFT = 16, VIA_STRFILT_FAIL = (1 << 15), @@ -135,7 +132,7 @@ static int via_rng_init(struct hwrng *rng) * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); @@ -191,19 +188,25 @@ static struct hwrng via_rng = { .data_read = via_rng_data_read, }; +static const struct x86_cpu_id via_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XSTORE, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_ids); static int __init via_rng_mod_init(void) { int err; - if (!boot_cpu_has(X86_FEATURE_XSTORE)) + if (!x86_match_cpu(via_rng_cpu_ids)) { + pr_err(PFX "The CPU isn't support XSTORE.\n"); return -ENODEV; + } pr_info("VIA RNG detected\n"); err = hwrng_register(&via_rng); if (err) { - pr_err(PFX "RNG registering failed (%d)\n", - err); + pr_err(PFX "RNG registering failed (%d)\n", err); goto out; } out: @@ -217,11 +220,5 @@ static void __exit via_rng_mod_exit(void) } module_exit(via_rng_mod_exit); -static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); - MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c new file mode 100644 index 0000000000000..a189ace19dcaf --- /dev/null +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RNG driver for Zhaoxin RNGs + * + * Copyright 2023 (c) Zhaoxin Semiconductor Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_MAX_SIZE = (128 * 1024), +}; + +static int zhaoxin_rng_init(struct hwrng *rng) +{ + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } + + return 0; +} + +static inline int rep_xstore(size_t size, size_t factor, void *result) +{ + asm(".byte 0xf3, 0x0f, 0xa7, 0xc0" + : "=m"(*(size_t *)result), "+c"(size), "+d"(factor), "+D"(result)); + + return 0; +} + +static int zhaoxin_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + if (max > ZHAOXIN_RNG_MAX_SIZE) + max = ZHAOXIN_RNG_MAX_SIZE; + + rep_xstore(max, ZHAOXIN_RNG_CHUNK_1, data); + + return max; +} + +static struct hwrng zhaoxin_rng = { + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .read = zhaoxin_rng_read, +}; + +static const struct x86_cpu_id zhaoxin_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XSTORE, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_ids); + +static int __init zhaoxin_rng_mod_init(void) +{ + int err; + + if (!x86_match_cpu(zhaoxin_rng_cpu_ids)) + return -ENODEV; + + pr_info("Zhaoxin RNG detected\n"); + + err = hwrng_register(&zhaoxin_rng); + if (err) + pr_err(PFX "RNG registering failed (%d)\n", err); + + return err; +} +module_init(zhaoxin_rng_mod_init); + +static void __exit zhaoxin_rng_mod_exit(void) +{ + hwrng_unregister(&zhaoxin_rng); +} +module_exit(zhaoxin_rng_mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPUs"); +MODULE_AUTHOR("YunShen@zhaoxin.com"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/phytnetled/Kconfig b/drivers/char/phytnetled/Kconfig new file mode 100644 index 0000000000000..26691906dbf4d --- /dev/null +++ b/drivers/char/phytnetled/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# PCMCIA character device configuration +# +config PHYTNET_LED + tristate "Phytium mac led control module" + depends on PHYTMAC + depends on GPIO_PHYTIUM_PLAT + default m + help + If you have a network (Ethernet) controller of this type and + want to use it control port led say Y or M here. + diff --git a/drivers/char/phytnetled/Makefile b/drivers/char/phytnetled/Makefile new file mode 100644 index 0000000000000..781bc90c1b9c3 --- /dev/null +++ b/drivers/char/phytnetled/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHYTNET_LED) += phytnet_led.o \ No newline at end of file diff --git a/drivers/char/phytnetled/phytnet_led.c b/drivers/char/phytnetled/phytnet_led.c new file mode 100644 index 0000000000000..c0d5ebfd3f949 --- /dev/null +++ b/drivers/char/phytnetled/phytnet_led.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022-2023 Phytium Technology Co.,Ltd. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "phytnet_led.h" + +#define DRIVER_NAME "phytnet_led" +#define DRIVER_VERSION "1.0" +#define DRIVER_AUTHOR "LongShixiang " +#define DRIVER_DESC "net device led control module" +#define NET_DEV_PROPNAME "net_dev" +#define LED_OF_NAME "led" +#define CHECK_INTERVAL 125 /* Unit: ms */ +#define NDEV_CHECK_DELAY 30000 /* Unit: 30s */ +#define LED_ON 1 +#define LED_OFF 0 +#define LINK_OFFSET 0 +#define ACT_OFFSET 1 + +#if defined(CONFIG_OF) +static const struct of_device_id phytnet_led_of_ids[] = { + { .compatible = "phytium,net_led"}, + {} +}; + +MODULE_DEVICE_TABLE(of, phytnet_led_of_ids); +#endif /* CONFIG_OF */ + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytnet_acpi_ids[] = { + { .id = "PHYT800C"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytnet_acpi_ids); +#else +#define phytnet_acpi_ids NULL +#endif /* CONFIG_ACPI */ + +static void +led_on(struct gpio_desc *gd) +{ + gpiod_set_value(gd, LED_ON); +} + +static void +led_off(struct gpio_desc *gd) +{ + gpiod_set_value(gd, LED_OFF); +} + +static void +led_blink(struct led_data *phytnet_led) +{ + phytnet_led->act_val = !phytnet_led->act_val; + gpiod_set_value(phytnet_led->act, phytnet_led->act_val); +} + +static int +port_is_linkup(struct led_data *phytnet_led) +{ + if (netif_carrier_ok(phytnet_led->ndev)) + return true; + else + return false; +} + +static bool +port_is_act(struct led_data *phytnet_led) +{ + bool ret = false; + + if (phytnet_led->ndev_rx != phytnet_led->ndev->stats.rx_packets) { + phytnet_led->ndev_rx = phytnet_led->ndev->stats.rx_packets; + ret = true; + } + + if (phytnet_led->ndev_tx != phytnet_led->ndev->stats.tx_packets) { + phytnet_led->ndev_tx = phytnet_led->ndev->stats.tx_packets; + ret = true; + } + + return ret; +} + +static void +led_control(struct led_data *phytnet_led) +{ + while (!phytnet_led->led_stop) { + msleep(CHECK_INTERVAL); + + if (!netif_running(phytnet_led->ndev)) { + led_off(phytnet_led->link); + led_off(phytnet_led->act); + continue; + } + + if (port_is_linkup(phytnet_led)) + led_on(phytnet_led->link); + else + led_off(phytnet_led->link); + + if (port_is_act(phytnet_led)) + led_blink(phytnet_led); + else + led_off(phytnet_led->act); + } +} + +static int +of_ndev_init(struct led_data *phytnet_led) +{ + struct device_node *net_node; + + net_node = of_parse_phandle(phytnet_led->pdev->dev.of_node, NET_DEV_PROPNAME, 0); + if (!net_node) { + dev_err(&phytnet_led->pdev->dev, "Failed to get netdev ofnode from device tree\n"); + return -ENODEV; + } + + phytnet_led->ndev = of_find_net_device_by_node(net_node); + + if (!phytnet_led->ndev) { + dev_err(&phytnet_led->pdev->dev, "Failed to get acpi ndev\n"); + return -ENODEV; + } + + dev_info(&phytnet_led->pdev->dev, "Successfully get ndev...\n"); + dev_hold(phytnet_led->ndev); + + return 0; +} + + +static int +acpi_ndev_init(struct led_data *phytnet_led) +{ + int err; + struct net_device *find_ndev; + const char *ndev_acpi_path; + acpi_handle net_handler; + struct acpi_device *adev; + acpi_status status; + struct device *find_dev; + + err = device_property_read_string(&phytnet_led->pdev->dev, + NET_DEV_PROPNAME, + &ndev_acpi_path); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to read net_dev property!\n"); + return -ENODEV; + } + + status = acpi_get_handle(NULL, (acpi_string)ndev_acpi_path, &net_handler); + if (ACPI_FAILURE(status)) { + dev_err(&phytnet_led->pdev->dev, "Failed to get acpi handler path: %s\n", + ndev_acpi_path); + return -ENODEV; + } + + adev = acpi_get_acpi_dev(net_handler); + if (!err) { + dev_err(&phytnet_led->pdev->dev, "Failed to get adev dev\n"); + return -ENODEV; + } + + for_each_netdev(&init_net, find_ndev) { + if (find_ndev->dev.parent != NULL) { + find_dev = find_ndev->dev.parent; + if (&adev->fwnode == find_dev->fwnode) + phytnet_led->ndev = find_ndev; + } + } + + if (!phytnet_led->ndev) { + dev_err(&phytnet_led->pdev->dev, "Failed to get acpi ndev\n"); + return -ENODEV; + } + + dev_info(&phytnet_led->pdev->dev, "Successfully get ndev...\n"); + dev_hold(phytnet_led->ndev); + + return 0; +} + +static int +gpio_init(struct led_data *phytnet_led) +{ + int err; + + phytnet_led->link = gpiod_get_index(&phytnet_led->pdev->dev, + LED_OF_NAME, + LINK_OFFSET, + GPIOD_OUT_HIGH); + if (IS_ERR(phytnet_led->link)) { + dev_err(&phytnet_led->pdev->dev, "Failed to get link gpio, err: %ld\n", + PTR_ERR(phytnet_led->link)); + return PTR_ERR(phytnet_led->link); + } + + err = gpiod_direction_output(phytnet_led->link, LED_OFF); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to set link dir, err: %ld\n", + PTR_ERR(phytnet_led->link)); + return err; + } + + phytnet_led->act = gpiod_get_index(&phytnet_led->pdev->dev, + LED_OF_NAME, + ACT_OFFSET, + GPIOD_OUT_HIGH); + if (IS_ERR(phytnet_led->act)) { + dev_err(&phytnet_led->pdev->dev, "Failed to get act gpio, err:%d\n", err); + return PTR_ERR(phytnet_led->act); + } + + err = gpiod_direction_output(phytnet_led->act, LED_OFF); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to set act dir, err: %d\n", err); + return err; + } + + return 0; +} + +static void +led_init_and_control(struct work_struct *work) +{ + int err = -1; + struct led_data *phytnet_led = container_of(work, struct led_data, led_control_work.work); + + if (phytnet_led->pdev->dev.of_node) + err = of_ndev_init(phytnet_led); + else if (has_acpi_companion(&phytnet_led->pdev->dev)) + err = acpi_ndev_init(phytnet_led); + + if (err) { + dev_err(&phytnet_led->pdev->dev, "ndev init wrong\n"); + return; + } + + err = gpio_init(phytnet_led); + if (err) { + dev_err(&phytnet_led->pdev->dev, "gpio init wrong\n"); + return; + } + + led_control(phytnet_led); +} + +static int +net_led_probe(struct platform_device *pdev) +{ + struct led_data *phytnet_led = devm_kzalloc(&pdev->dev, + sizeof(struct led_data), + GFP_KERNEL); + + if (!phytnet_led) + return -ENOMEM; + + platform_set_drvdata(pdev, phytnet_led); + + phytnet_led->act = LED_OFF; + phytnet_led->pdev = pdev; + phytnet_led->led_stop = 0; + + INIT_DELAYED_WORK(&phytnet_led->led_control_work, led_init_and_control); + schedule_delayed_work(&phytnet_led->led_control_work, msecs_to_jiffies(NDEV_CHECK_DELAY)); + + return 0; +} + +static void +net_led_remove(struct platform_device *pdev) +{ + struct led_data *phytnet_led = platform_get_drvdata(pdev); + + phytnet_led->led_stop = 1; + cancel_delayed_work_sync(&phytnet_led->led_control_work); + + if (phytnet_led->ndev) + dev_put(phytnet_led->ndev); + + if (phytnet_led->link) { + led_off(phytnet_led->link); + gpiod_put(phytnet_led->link); + } + + if (phytnet_led->act) { + led_off(phytnet_led->act); + gpiod_put(phytnet_led->act); + } + + devm_kfree(&pdev->dev, phytnet_led); +} + +static struct platform_driver net_led_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(phytnet_led_of_ids), + .acpi_match_table = ACPI_PTR(phytnet_acpi_ids), + }, + .probe = net_led_probe, + .remove = net_led_remove, +}; + +module_platform_driver(net_led_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/char/phytnetled/phytnet_led.h b/drivers/char/phytnetled/phytnet_led.h new file mode 100644 index 0000000000000..8f2fbadde0f8b --- /dev/null +++ b/drivers/char/phytnetled/phytnet_led.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2022-2023 Phytium Technology Co.,Ltd. + * + */ +struct led_data { + struct platform_device *pdev; + struct net_device *ndev; + unsigned long ndev_rx, ndev_tx; + struct gpio_desc *link, *act; + struct delayed_work led_control_work; + int led_stop; + int act_val; +}; diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 79ba688a64f8d..9d13de31ae240 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -201,6 +201,54 @@ static const char * const proc_flag_strs[] = { "corrected", }; +static const char * const zdi_zpi_err_type_strs[] = { + "No Error", + "Training Error Status (PHY)", + "Data Link Protocol Error Status (DLL)", + "Surprise Down Error Status", + "Flow Control Protocol Error Status (TL)", + "Receiver Overflow Status (TL)", + "Receiver Error Status (PHY)", + "Bad TLP Status (DLL)", + "Bad Data Link Layer Packet (DLLP) Status (DLL)", + "REPLAY_NUM Rollover Status (DLL)", + "Replay Timer Timeout Status (DLL)", + "X16 Link Width Unreliable Status", + "ZPI X8 Link Width Unreliable Status", + "ZPI X4 Link Width Unreliable Status", + "ZPI X2 Link Width Unreliable Status", + "ZPI Gen3 Link Speed Unreliable Status", + "ZPI Gen2 Link Speed Unreliable Status", + "ZDI Gen3 Link Speed Unreliable Status", + "ZDI Gen4 Link Speed Unreliable Status", +}; + +const char *cper_zdi_zpi_err_type_str(unsigned int etype) +{ + return etype < ARRAY_SIZE(zdi_zpi_err_type_strs) ? + zdi_zpi_err_type_strs[etype] : "unknown error"; +} +EXPORT_SYMBOL_GPL(cper_zdi_zpi_err_type_str); + +static void __maybe_unused +cper_print_proc_generic_zdi_zpi(const char *pfx, const struct cper_sec_proc_generic *zdi_zpi) +{ + u8 etype = zdi_zpi->responder_id; + + if ((zdi_zpi->requestor_id & 0xff) == 7) { + pr_info("%s general processor error(zpi error)\n", pfx); + } else if ((zdi_zpi->requestor_id & 0xff) == 6) { + pr_info("%s general processor error(zdi error)\n", pfx); + } else { + pr_info("%s general processor error(unknown error)\n", pfx); + return; + } + pr_info("%s bus number %llx device number %llx function number 0\n", pfx, + ((zdi_zpi->requestor_id)>>8) & 0xff, zdi_zpi->requestor_id & 0xff); + pr_info("%s apic id %lld error_type: %s\n", pfx, zdi_zpi->proc_id, + cper_zdi_zpi_err_type_str(etype)); +} + static void cper_print_proc_generic(const char *pfx, const struct cper_sec_proc_generic *proc) { @@ -244,6 +292,11 @@ static void cper_print_proc_generic(const char *pfx, pfx, proc->responder_id); if (proc->validation_bits & CPER_PROC_VALID_IP) printk("%s""IP: 0x%016llx\n", pfx, proc->ip); +#if IS_ENABLED(CONFIG_X86) + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + cper_print_proc_generic_zdi_zpi(pfx, proc); +#endif } static const char * const mem_err_type_strs[] = { diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d5a01d6ed496c..badd2d909634f 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -578,6 +578,15 @@ config GPIO_PHYTIUM_PLAT Say yes here to support the on-chip GPIO controller for the Phytium SoC family. +config GPIO_PHYTIUM_SGPIO + tristate "Phytium SGPIO support" + default y if ARCH_PHYTIUM + depends on ARM64 + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Say yes here to enable SGPIO support for Phytium SoCs. + config GPIO_PL061 tristate "PrimeCell PL061 GPIO support" depends on ARM_AMBA || COMPILE_TEST diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 53e8537750d1f..77d9701fd76d6 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -144,6 +144,7 @@ obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o obj-$(CONFIG_GPIO_PHYTIUM_CORE) += gpio-phytium-core.o obj-$(CONFIG_GPIO_PHYTIUM_PCI) += gpio-phytium-pci.o obj-$(CONFIG_GPIO_PHYTIUM_PLAT) += gpio-phytium-platform.o +obj-$(CONFIG_GPIO_PHYTIUM_SGPIO) += gpio-phytium-sgpio.o obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o diff --git a/drivers/gpio/gpio-phytium-sgpio.c b/drivers/gpio/gpio-phytium-sgpio.c new file mode 100644 index 0000000000000..3e8e8d15059ae --- /dev/null +++ b/drivers/gpio/gpio-phytium-sgpio.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SGPIO Driver + * + * Copyright (c) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SGPIO_CTL0_REG 0x00 +#define SGPIO_CTL0_REG_ENABLE BIT(0) +#define SGPIO_CTL0_REG_RX_DISABLE BIT(1) +#define SGPIO_CTL0_REG_L3_L0 GENMASK(11, 8) +#define SGPIO_CTL0_REG_CLK_DIV_NUM GENMASK(31, 12) +#define SGPIO_CTL1_REG 0x04 +#define SGPIO_CTL1_REG_READY BIT(0) +#define SGPIO_CTL1_REG_W_UPDATA BIT(1) +#define SGPIO_CTL1_REG_OP_MODE BIT(2) +#define SGPIO_CTL1_REG_OP_STATE BIT(3) +#define SGPIO_CTL1_REG_BIT_NUM GENMASK(14, 8) +#define SGPIO_CTL1_REG_INTERVAL_TIMER GENMASK(31, 16) +#define SGPIO_SOFT_RESET_REG 0x08 +#define SGPIO_SOFT_RESET_REG_MASK BIT(0) +#define SGPIO_IRQ_REG 0x0c +#define SGPIO_IRQ_REG_MASK BIT(0) +#define SGPIO_IRQ_M_REG 0x10 +#define SGPIO_IRQ_M_REG_MASK BIT(0) +#define SGPIO_WDATA0_REG 0x14 +#define SGPIO_WDATA_REG(x) (SGPIO_WDATA0_REG + (x) * 4) +#define SGPIO_RDATA0_REG 0x24 +#define SGPIO_RDATA_REG(x) (SGPIO_RDATA0_REG + (x) * 4) + +#define DEFAULT_L3_L0 0 +#define DEFAULT_CLK 50000000 + +#define GPIO_GROUP(x) ((x) >> 6) +#define GPIO_OFFSET(x) ((x) & GENMASK(5, 0)) +#define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1) + +struct phytium_sgpio { + struct gpio_chip gc; + void __iomem *regs; + unsigned int ngpios; + struct clk *pclk; + + struct mutex lock; + struct completion completion; +}; + +static bool phytium_sgpio_is_input(unsigned int offset) +{ + return !(offset % 2); +} + +static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + u32 reg; + int rc = 0; + + if (phytium_sgpio_is_input(offset)) + return -EINVAL; + + reinit_completion(&gpio->completion); + + /* + * Since this is an output, read the cached value from rdata, + * then update value. + */ + reg = readl(gpio->regs + SGPIO_RDATA_REG(GPIO_GROUP(offset))); + if (val) + reg |= GPIO_BIT(offset); + else + reg &= GPIO_BIT(offset); + writel(reg, gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + + /* Start transmission and wait for completion */ + writel(readl(gpio->regs + SGPIO_CTL1_REG) | SGPIO_CTL1_REG_W_UPDATA, + gpio->regs + SGPIO_CTL1_REG); + if (!wait_for_completion_timeout(&gpio->completion, msecs_to_jiffies(1000))) + rc = -EINVAL; + + return rc; +} + +static int phytium_sgpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + return phytium_sgpio_is_input(offset) ? 0 : -EINVAL; +} + +static int phytium_sgpio_direction_output(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + int rc; + + mutex_lock(&gpio->lock); + + /* + * No special action is required for setting the direction; we'll + * error-out in sgpio_set_value if this isn't an output GPIO + */ + rc = sgpio_set_value(&gpio->gc, offset, val); + + mutex_unlock(&gpio->lock); + + return rc; +} + +static int phytium_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + return !!phytium_sgpio_is_input(offset); +} + +static int phytium_sgpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + int rc = 0; + u32 val, ctl0; + + mutex_lock(&gpio->lock); + + if (!phytium_sgpio_is_input(offset)) { + val = readl(gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + rc = !!(val & GPIO_BIT(offset)); + mutex_unlock(&gpio->lock); + return rc; + } + + reinit_completion(&gpio->completion); + + /* Enable Rx */ + ctl0 = readl(gpio->regs + SGPIO_CTL0_REG); + writel(ctl0 & ~SGPIO_CTL0_REG_RX_DISABLE, gpio->regs + SGPIO_CTL0_REG); + + /* Start reading transaction and wait for completion */ + writel(readl(gpio->regs + SGPIO_CTL1_REG) | SGPIO_CTL1_REG_W_UPDATA, + gpio->regs + SGPIO_CTL1_REG); + if (!wait_for_completion_timeout(&gpio->completion, msecs_to_jiffies(1000))) { + rc = -EINVAL; + goto err; + } + + val = readl(gpio->regs + SGPIO_RDATA_REG(GPIO_GROUP(offset))); + rc = !!(val & GPIO_BIT(offset)); + +err: + /* Disalbe Rx to hold the value */ + writel(ctl0 | SGPIO_CTL0_REG_RX_DISABLE, gpio->regs + SGPIO_CTL0_REG); + mutex_unlock(&gpio->lock); + + return rc; +} + +static int phytium_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + + mutex_lock(&gpio->lock); + + sgpio_set_value(gc, offset, val); + + mutex_unlock(&gpio->lock); + + return 0; +} + +static irqreturn_t phytium_sgpio_irq_handler(int irq, void *data) +{ + struct phytium_sgpio *gpio = data; + + if (!readl(gpio->regs + SGPIO_IRQ_REG)) + return IRQ_NONE; + + /* Clear the interrupt */ + writel(0, gpio->regs + SGPIO_IRQ_REG); + + /* Check if tx/rx has been done */ + if (!(readl(gpio->regs + SGPIO_CTL1_REG) & SGPIO_CTL1_REG_OP_STATE)) + complete(&gpio->completion); + + return IRQ_HANDLED; +} + +static int phytium_sgpio_probe(struct platform_device *pdev) +{ + u32 pclk_freq, sclk_freq, clk_div; + struct phytium_sgpio *gpio; + struct resource *res; + struct device *dev = &pdev->dev; + int rc; + + gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + if (devm_request_irq(dev, platform_get_irq(pdev, 0), + phytium_sgpio_irq_handler, + IRQF_SHARED, dev_name(dev), gpio)) { + dev_err(dev, "failed to request IRQ\n"); + return -ENOENT; + } + + rc = fwnode_property_read_u32(dev_fwnode(dev), "ngpios", &gpio->ngpios); + if (rc < 0) { + dev_err(dev, "Could not read ngpios property\n"); + return -EINVAL; + } else if (gpio->ngpios % 32) { + dev_err(&pdev->dev, "Number of GPIOs not multiple of 32: %d\n", + gpio->ngpios); + return -EINVAL; + } + + rc = fwnode_property_read_u32(dev_fwnode(dev), "bus-frequency", &sclk_freq); + if (rc < 0) { + dev_err(dev, "Could not read bus-frequency property\n"); + return -EINVAL; + } + + if (has_acpi_companion(dev)) { + device_property_read_u32(dev, "pclk_freq", &pclk_freq); + if (!pclk_freq || (pclk_freq != 50000000)) { + dev_err(dev, "Could not get APB clock property from acpi, use default clk!\n"); + pclk_freq = DEFAULT_CLK; + } + + } else { + gpio->pclk = devm_clk_get(dev, NULL); + if (IS_ERR(gpio->pclk)) { + dev_err(dev, "Could not get the APB clock property\n"); + return PTR_ERR(gpio->pclk); + } + rc = clk_prepare_enable(gpio->pclk); + if (rc) { + dev_err(dev, "failed to enable pclk: %d\n", rc); + return rc; + } + pclk_freq = clk_get_rate(gpio->pclk); + } + + /* + * From the datasheet: + * (pclk / 2) / (clk_div + 1) = sclk + */ + if (sclk_freq == 0) { + dev_err(dev, "SCLK should not be 0\n"); + return -EINVAL; + } + + clk_div = (pclk_freq / (sclk_freq * 2)) - 1; + if (clk_div > (1 << 20) - 1) { + dev_err(dev, "clk_div is overflow\n"); + return -EINVAL; + } + + writel(FIELD_PREP(SGPIO_CTL0_REG_CLK_DIV_NUM, clk_div) | + FIELD_PREP(SGPIO_CTL0_REG_L3_L0, DEFAULT_L3_L0) | + SGPIO_CTL0_REG_RX_DISABLE | SGPIO_CTL0_REG_ENABLE, + gpio->regs + SGPIO_CTL0_REG); + + writel(FIELD_PREP(SGPIO_CTL1_REG_BIT_NUM, gpio->ngpios) | + SGPIO_CTL1_REG_READY, gpio->regs + SGPIO_CTL1_REG); + + mutex_init(&gpio->lock); + init_completion(&gpio->completion); + platform_set_drvdata(pdev, gpio); + + gpio->gc.parent = dev; + gpio->gc.base = -1; + gpio->gc.ngpio = gpio->ngpios * 2; + gpio->gc.label = dev_name(dev); + gpio->gc.direction_input = phytium_sgpio_direction_input; + gpio->gc.direction_output = phytium_sgpio_direction_output; + gpio->gc.get_direction = phytium_sgpio_get_direction; + gpio->gc.get = phytium_sgpio_get; + gpio->gc.set = phytium_sgpio_set; + + return devm_gpiochip_add_data(dev, &gpio->gc, gpio); +} + +static const struct of_device_id phytium_sgpio_of_match[] = { + { .compatible = "phytium,sgpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_sgpio_of_match); + +static const struct acpi_device_id phytium_sgpio_acpi_match[] = { + { "PHYT0031", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, phytium_sgpio_acpi_match); + +static struct platform_driver phytium_sgpio_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(phytium_sgpio_of_match), + .acpi_match_table = ACPI_PTR(phytium_sgpio_acpi_match), + }, + .probe = phytium_sgpio_probe, +}; +module_platform_driver(phytium_sgpio_driver); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium SGPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2b7aba22ecc19..e9a1957806b79 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2125,6 +2125,14 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; + +/* This workaround causes instability for LoongArch/Loongson (MIPS) + * devices based on the 7A1000/2000 chipset under heavy I/O load. + * + * FIXME: Disable this workaround until we find a better fix (possibly in + * the platform-specific PCI code). + */ +#ifndef CONFIG_MACH_LOONGSON64 /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. */ @@ -2138,6 +2146,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, DATA_SEL(1) | INT_SEL(0)); amdgpu_ring_write(ring, lower_32_bits(seq - 1)); amdgpu_ring_write(ring, upper_32_bits(seq - 1)); +#endif /* Then send the real EOP event down the pipe. */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8a81713d97aac..61b999f2e8c80 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6113,6 +6113,13 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; +/* This workaround causes instability for LoongArch/Loongson (MIPS) + * devices based on the 7A1000/2000 chipset under heavy I/O load. + * + * FIXME: Disable this workaround until we find a better fix (possibly in + * the platform-specific PCI code). + */ +#ifndef CONFIG_MACH_LOONGSON64 /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. */ @@ -6127,6 +6134,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, DATA_SEL(1) | INT_SEL(0)); amdgpu_ring_write(ring, lower_32_bits(seq - 1)); amdgpu_ring_write(ring, upper_32_bits(seq - 1)); +#endif /* Then send the real EOP event down the pipe: * EVENT_WRITE_EOP - flush caches, send int */ diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index a7e6d7854b7b2..6cec9438aea02 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -360,6 +360,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); + struct pci_dev *pdev = adev->pdev; enum amd_dpm_forced_level level; int ret = 0; @@ -391,12 +392,20 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, if (ret < 0) return ret; + //Some devices have stablity issue on AMD_DPM_FORCED_LEVEL_* except AMD_DPM_FORCED_LEVEL_AUTO. Don't use these AMD_DPM_FORCED_LEVEL + if(pdev->device == 0x15d8 && (level != AMD_DPM_FORCED_LEVEL_AUTO)) + { + pr_warn("device 0x%04X don't allow dpm force level %d\n", pdev->device, level); + return count; + } + mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { amdgpu_pm_put_access(adev); mutex_unlock(&adev->pm.stable_pstate_ctx_lock); return -EINVAL; } + /* override whatever a user ctx may have set */ adev->pm.stable_pstate_ctx = NULL; mutex_unlock(&adev->pm.stable_pstate_ctx_lock); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e4f0286047360..8e2e9eaa87b4f 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3543,6 +3543,13 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; +/* This workaround causes instability for LoongArch/Loongson (MIPS) + * devices based on the 7A1000/2000 chipset under heavy I/O load. + * + * FIXME: Disable this workaround until we find a better fix (possibly in + * the platform-specific PCI code). + */ +#ifndef CONFIG_MACH_LOONGSON64 /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. */ @@ -3556,6 +3563,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, DATA_SEL(1) | INT_SEL(0)); radeon_ring_write(ring, fence->seq - 1); radeon_ring_write(ring, 0); +#endif /* Then send the real EOP event down the pipe. */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index ecdf31743fc6e..087bc06f38fcd 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -361,6 +361,17 @@ config I2C_SCMI To compile this driver as a module, choose M here: the module will be called i2c-scmi. +config I2C_ZHAOXIN_SMBUS + tristate "Zhaoxin SMBus Interface" + depends on (PCI && (CPU_SUP_CENTAUR || CPU_SUP_ZHAOXIN)) || COMPILE_TEST + default m + help + If you say yes to this option, support will be included for the + ZHAOXIN SMBus interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin-smbus. + endif # ACPI comment "Mac SMBus host controller drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index edc699ff2329d..9490029f3de88 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -148,6 +148,7 @@ obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o +obj-$(CONFIG_I2C_ZHAOXIN_SMBUS) += i2c-zhaoxin-smbus.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c new file mode 100644 index 0000000000000..0f6abf736b603 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Zhaoxin SMBus controller driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "3.1.0" + +#define ZXSMB_NAME "smbus_zhaoxin" + +/* + * registers + */ +/* SMBus MMIO address offsets */ +#define ZXSMB_STS 0x00 +#define ZXSMB_BUSY BIT(0) +#define ZXSMB_CMD_CMPLET BIT(1) +#define ZXSMB_DEV_ERR BIT(2) +#define ZXSMB_BUS_CLSI BIT(3) +#define ZXSMB_FAIL_TRANS BIT(4) +#define ZXSMB_STS_MASK GENMASK(4, 0) +#define ZXSMB_NSMBSRST BIT(5) +#define ZXSMB_CTL 0x02 +#define ZXSMB_CMPLT_EN BIT(0) +#define ZXSMB_KILL_PRG BIT(1) +#define ZXSMB_START BIT(6) +#define ZXSMB_PEC_EN BIT(7) +#define ZXSMB_CMD 0x03 +#define ZXSMB_ADD 0x04 +#define ZXSMB_DAT0 0x05 +#define ZXSMB_DAT1 0x06 +#define ZXSMB_BLKDAT 0x07 + +/* + * platform related informations + */ + /* protocol cmd constants */ +#define ZXSMB_QUICK 0x00 +#define ZXSMB_BYTE 0x04 +#define ZXSMB_BYTE_DATA 0x08 +#define ZXSMB_WORD_DATA 0x0C +#define ZXSMB_PROC_CALL 0x10 +#define ZXSMB_BLOCK_DATA 0x14 +#define ZXSMB_I2C_10_BIT_ADDR 0x18 +#define ZXSMB_I2C_PROC_CALL 0x30 +#define ZXSMB_I2C_BLOCK_DATA 0x34 +#define ZXSMB_I2C_7_BIT_ADDR 0x38 +#define ZXSMB_UNIVERSAL 0x3C + +#define ZXSMB_TIMEOUT 500 + +struct zxsmb { + struct device *dev; + struct i2c_adapter adap; + struct completion complete; + u16 base; + int irq; + u8 status; + int size; + u8 pec; +}; + +static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id) +{ + struct zxsmb *smb = (struct zxsmb *)dev_id; + + smb->status = inb(smb->base + ZXSMB_STS); + if ((smb->status & ZXSMB_STS_MASK) == 0) + return IRQ_NONE; + + /* clear status */ + outb(smb->status, smb->base + ZXSMB_STS); + complete(&smb->complete); + + return IRQ_HANDLED; +} + +static int zxsmb_status_check(struct zxsmb *smb) +{ + if (smb->status & ZXSMB_CMD_CMPLET) + return 0; + + if (smb->status & ZXSMB_BUS_CLSI) { + dev_err(smb->dev, "Lost arbitration\n"); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + return -EAGAIN; + } + + dev_dbg(smb->dev, "Trans failed, status = 0x%X\n", smb->status); + + return -EIO; +} + +static int zxsmb_wait_interrput_finish(struct zxsmb *smb) +{ + int time_left; + + time_left = wait_for_completion_timeout(&smb->complete, msecs_to_jiffies(ZXSMB_TIMEOUT)); + if (time_left == 0) { + u8 status = inb(smb->base + ZXSMB_STS); + + /* some host's irq config not work well */ + if (status & ZXSMB_STS_MASK) { + outb(status, smb->base + ZXSMB_STS); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + devm_free_irq(smb->dev, smb->irq, smb); + smb->irq = 0; + dev_warn(smb->dev, "change to polling mode\n"); + + return -EAGAIN; + } + dev_dbg(smb->dev, "interrput timeout\n"); + return -EIO; + } + + return zxsmb_status_check(smb); +} + +static int zxsmb_wait_polling_finish(struct zxsmb *smb) +{ + int status; + int time_left = ZXSMB_TIMEOUT * 10; + + do { + usleep_range(100, 200); + status = inb(smb->base + ZXSMB_STS); + } while ((status & ZXSMB_BUSY) && (--time_left)); + + if (time_left == 0) { + dev_dbg(smb->dev, "polling timeout\n"); + return -EIO; + } + + /* clear status */ + outb(status, smb->base + ZXSMB_STS); + smb->status = status; + + return zxsmb_status_check(smb); +} + +static int zxsmb_trans_start(struct zxsmb *smb) +{ + u16 base = smb->base; + int tmp; + + /* Make sure the SMBus host is ready to start transmitting */ + if ((tmp = inb(base + ZXSMB_STS)) & ZXSMB_BUSY) { + outb(tmp, base + ZXSMB_STS); + usleep_range(1000, 5000); + if ((tmp = inb(base + ZXSMB_STS)) & ZXSMB_BUSY) { + dev_err(smb->dev, "SMBus reset failed! (0x%02x)\n", tmp); + return -EIO; + } + } + + tmp = ZXSMB_START | smb->size; + + if (smb->pec) + tmp |= ZXSMB_PEC_EN; + else + tmp &= (~ZXSMB_PEC_EN); + + if (smb->irq) + tmp |= ZXSMB_CMPLT_EN; + + reinit_completion(&smb->complete); + smb->status = 0; + outb(tmp, base + ZXSMB_CTL); + return 0; +} + +static int zxsmb_transaction(struct zxsmb *smb) +{ + int err; + + err = zxsmb_trans_start(smb); + if (err) + return err; + + if (smb->irq) + err = zxsmb_wait_interrput_finish(smb); + else + err = zxsmb_wait_polling_finish(smb); + + outb(0, smb->base + ZXSMB_CTL); + return err; +} + +static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command, + int size, union i2c_smbus_data *data) +{ + int i; + int err; + u8 len; + struct zxsmb *smb = (struct zxsmb *)i2c_get_adapdata(adap); + u16 base = smb->base; + + switch (size) { + case I2C_SMBUS_QUICK: + size = ZXSMB_QUICK; + break; + case I2C_SMBUS_BYTE: + size = ZXSMB_BYTE; + if (!read) + outb(command, base + ZXSMB_CMD); + break; + case I2C_SMBUS_BYTE_DATA: + outb(command, base + ZXSMB_CMD); + if (!read) + outb(data->byte, base + ZXSMB_DAT0); + size = ZXSMB_BYTE_DATA; + break; + case I2C_SMBUS_PROC_CALL: + case I2C_SMBUS_WORD_DATA: + if (read && size == I2C_SMBUS_PROC_CALL) + goto exit_unsupported; + outb(command, base + ZXSMB_CMD); + if (!read) { + outb(data->word & 0xff, base + ZXSMB_DAT0); + outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1); + } + size = (size == I2C_SMBUS_PROC_CALL) ? + ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + case I2C_SMBUS_BLOCK_DATA: + len = data->block[0]; + if (read && size == I2C_SMBUS_I2C_BLOCK_DATA) + outb(len, base + ZXSMB_DAT1); + outb(command, base + ZXSMB_CMD); + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + if (!read) { + outb(len, base + ZXSMB_DAT0); + outb(0, base + ZXSMB_DAT1); + for (i = 1; i <= len; i++) + outb(data->block[i], base + ZXSMB_BLKDAT); + } + size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? + ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; + break; + default: + goto exit_unsupported; + } + + outb(((addr & 0x7f) << 1) | read, base + ZXSMB_ADD); + smb->size = size; + smb->pec = flags & I2C_CLIENT_PEC; + err = zxsmb_transaction(smb); + if (err) + return err; + + if ((read == I2C_SMBUS_WRITE) || (size == ZXSMB_QUICK)) { + if (unlikely(size == ZXSMB_PROC_CALL)) + goto prepare_read; + return 0; + } + +prepare_read: + switch (size) { + case ZXSMB_BYTE: + case ZXSMB_BYTE_DATA: + data->byte = inb(base + ZXSMB_DAT0); + break; + case ZXSMB_PROC_CALL: + case ZXSMB_WORD_DATA: + data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8); + break; + case ZXSMB_I2C_BLOCK_DATA: + case ZXSMB_BLOCK_DATA: + data->block[0] = inb(base + ZXSMB_DAT0); + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + for (i = 1; i <= data->block[0]; i++) + data->block[i] = inb(base + ZXSMB_BLKDAT); + break; + } + + return 0; + +exit_unsupported: + dev_err(smb->dev, "unsupported access, size:%x, dir:%s", size, read ? "read" : "write"); + return -EOPNOTSUPP; +} + +static u32 zxsmb_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smbus_algorithm = { + .smbus_xfer = zxsmb_smbus_xfer, + .functionality = zxsmb_func, +}; + +static int zxsmb_probe(struct platform_device *pdev) +{ + struct zxsmb *smb; + struct resource *res; + struct i2c_adapter *adap; + + smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL); + if (!smb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (IS_ERR(res)) + return -ENODEV; + smb->base = res->start; + if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "Can't get I/O resource\n"); + return -EBUSY; + } + + smb->irq = platform_get_irq(pdev, 0); + if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED, + pdev->name, smb)) { + dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq); + smb->irq = 0; + } else + init_completion(&smb->complete); + + smb->dev = &pdev->dev; + platform_set_drvdata(pdev, (void *)smb); + + adap = &smb->adap; + adap->algo = &smbus_algorithm; + adap->retries = 2; + adap->owner = THIS_MODULE; + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(smb->dev)); + i2c_set_adapdata(&smb->adap, smb); + + return i2c_add_adapter(&smb->adap); +} + +static void zxsmb_remove(struct platform_device *pdev) +{ + struct zxsmb *smb = platform_get_drvdata(pdev); + + i2c_del_adapter(&(smb->adap)); + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, smb); +} + +static const struct acpi_device_id zxsmb_acpi_match[] = { + {"SMB3324", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match); + +static struct platform_driver zxsmb_driver = { + .probe = zxsmb_probe, + .remove = zxsmb_remove, + .driver = { + .name = ZXSMB_NAME, + .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), + }, +}; + +module_platform_driver(zxsmb_driver); + +MODULE_AUTHOR("hanshu@zhaoxin.com"); +MODULE_DESCRIPTION("Zhaoxin SMBus driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile index 11982efbc6d91..02d9b450fb864 100644 --- a/drivers/i3c/Makefile +++ b/drivers/i3c/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 i3c-y := device.o master.o obj-$(CONFIG_I3C) += i3c.o +i3c-$(CONFIG_ACPI) += i3c_master_acpi.o obj-$(CONFIG_I3C) += master/ diff --git a/drivers/i3c/i3c_master_acpi.c b/drivers/i3c/i3c_master_acpi.c new file mode 100644 index 0000000000000..54b6dbfb7acfe --- /dev/null +++ b/drivers/i3c/i3c_master_acpi.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for I3C ACPI Interface + * + * Copyright (C) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include "i3c_master_acpi.h" + +static int i3c_master_acpi_parse_val(union acpi_object *store_elements, char *method, + int count, u32 *value) +{ + int i, package_count, ret = -1; + union acpi_object *acpi_elements; + + for (i = 0; i < count; i++) { + acpi_elements = store_elements; + if (acpi_elements[i].type == ACPI_TYPE_PACKAGE) { + package_count = acpi_elements[i].package.count; + + if (package_count == 2) { + acpi_elements = acpi_elements[i].package.elements; + if ((acpi_elements[0].type == ACPI_TYPE_STRING) && + (!strcmp(acpi_elements[0].string.pointer, method))) { + if (acpi_elements[1].type == ACPI_TYPE_INTEGER) { + *value = acpi_elements[1].integer.value; + ret = 0; + break; + } + } + } + } + } + return ret; +} + +int i3c_master_acpi_get_params(acpi_handle handle, char *method, u32 *value) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + acpi_status status = 0; + union acpi_object *obj; + int count, ret = -1; + + status = acpi_evaluate_object(handle, "_DSD", NULL, &buf); + if (ACPI_FAILURE(status)) { + kfree(buf.pointer); + return -2; + } + + obj = (union acpi_object *)buf.pointer; + + if (obj->type == ACPI_TYPE_PACKAGE) { + union acpi_object *acpi_elements; + union acpi_object *store_elements; + + acpi_elements = obj->package.elements; + if ((obj->package.count >= 2) && (acpi_elements[1].type == ACPI_TYPE_PACKAGE)) { + count = acpi_elements[1].package.count; + acpi_elements = acpi_elements[1].package.elements; + + store_elements = acpi_elements; + ret = i3c_master_acpi_parse_val(store_elements, method, count, value); + } + } + + kfree(buf.pointer); + return ret; +} +EXPORT_SYMBOL_GPL(i3c_master_acpi_get_params); + +void i3c_master_acpi_clk_params(acpi_handle handle, char *method, + u32 *pres_ctrl0, u32 *pres_ctrl1, u32 *thd_delay) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + union acpi_object *obj; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) + return; + + obj = (union acpi_object *)buf.pointer; + if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { + const union acpi_object *objs = obj->package.elements; + + *pres_ctrl0 = (u32)objs[0].integer.value; + *pres_ctrl1 = (u32)objs[1].integer.value; + *thd_delay = (u32)objs[2].integer.value; + } + + kfree(buf.pointer); +} +EXPORT_SYMBOL_GPL(i3c_master_acpi_clk_params); diff --git a/drivers/i3c/i3c_master_acpi.h b/drivers/i3c/i3c_master_acpi.h new file mode 100644 index 0000000000000..61fa62ff12fba --- /dev/null +++ b/drivers/i3c/i3c_master_acpi.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for I3C ACPI Interface + * + * Copyright (C) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __I3C_MASTER_ACPI_H +#define __I3C_MASTER_ACPI_H + +#include +#include + +int i3c_master_acpi_get_params(acpi_handle handle, char *method, u32 *value); +void i3c_master_acpi_clk_params(acpi_handle handle, char *method, + u32 *pres_ctrl0, u32 *pres_ctrl1, u32 *thd_delay); + +#endif diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 66513a27e6e77..9c5b05dacee70 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -5,6 +5,8 @@ * Author: Boris Brezillon */ +#include +#include #include #include #include @@ -19,8 +21,25 @@ #include #include +#if defined(CONFIG_ACPI) && defined(CONFIG_ARCH_PHYTIUM) +#include "i3c_master_acpi.h" +#endif #include "internals.h" +struct i3c_acpi_lookup { + u64 pid; + acpi_handle adapter_handle; + acpi_handle device_handle; + acpi_handle search_handle; + int n; + int index; + u32 speed; + u32 min_speed; + u32 force_speed; + u32 slave_address; + u32 lvr; +}; + static DEFINE_IDR(i3c_bus_idr); static DEFINE_MUTEX(i3c_core_lock); static int __i3c_first_dynamic_bus_num; @@ -266,6 +285,88 @@ static ssize_t modalias_show(struct device *dev, } static DEVICE_ATTR_RO(modalias); +static ssize_t reg_read_store(struct device *dev, + struct device_attribute *da, + const char *buf, size_t size) +{ + struct i3c_device *i3c = dev_to_i3cdev(dev); + struct i3c_priv_xfer xfers[2]; + u8 status = 0; + long value; + __u8 reg; + __u8 val; + + status = kstrtol(buf, 0, &value); + if (status) + return status; + + reg = (__u8)value; + + xfers[0].rnw = false; + xfers[0].len = 1; + xfers[0].data.out = ® + + xfers[1].rnw = true; + xfers[1].len = 1; + xfers[1].data.in = &val; + + i3c_bus_normaluse_lock(i3c->bus); + i3c_device_do_priv_xfers(i3c, xfers, 2); + dev_info(dev, "reg read reg =0x%x, val=%x\n", reg, val); + i3c_bus_normaluse_unlock(i3c->bus); + + return size; +} +static DEVICE_ATTR_WO(reg_read); + +static ssize_t reg_write_store(struct device *dev, + struct device_attribute *da, + const char *buf, size_t size) +{ + struct i3c_device *i3c = dev_to_i3cdev(dev); + struct i3c_priv_xfer xfers[2]; + u8 status = 0; + char *p; + char *token; + long value; + __u8 reg; + u16 val; + + p = kmalloc(size, GFP_KERNEL); + strscpy(p, buf, size); + token = strsep(&p, " "); + + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + + reg = (u8)value; + token = strsep(&p, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + + val = ((u8)value << 8) | reg; + xfers[0].rnw = false; + xfers[0].len = 2; + xfers[0].data.out = &val; + + i3c_bus_normaluse_lock(i3c->bus); + i3c_device_do_priv_xfers(i3c, xfers, 1); + dev_info(dev, "reg write reg =0x%x, val=%x\n", reg, val); + i3c_bus_normaluse_unlock(i3c->bus); + + kfree(p); + return size; +} +static DEVICE_ATTR_WO(reg_write); + static struct attribute *i3c_device_attrs[] = { &dev_attr_bcr.attr, &dev_attr_dcr.attr, @@ -273,6 +374,8 @@ static struct attribute *i3c_device_attrs[] = { &dev_attr_dynamic_address.attr, &dev_attr_hdrcap.attr, &dev_attr_modalias.attr, + &dev_attr_reg_read.attr, + &dev_attr_reg_write.attr, NULL, }; ATTRIBUTE_GROUPS(i3c_device); @@ -1682,9 +1785,10 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master) dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id, desc->info.pid); - if (desc->boardinfo) + if (desc->boardinfo) { desc->dev->dev.of_node = desc->boardinfo->of_node; - + desc->dev->dev.fwnode = desc->boardinfo->fwnode; + } ret = device_register(&desc->dev->dev); if (ret) { dev_err(&master->dev, @@ -2402,6 +2506,182 @@ static int of_populate_i3c_bus(struct i3c_master_controller *master) return 0; } +#if defined(CONFIG_ACPI) && defined(CONFIG_ARCH_PHYTIUM) +static int i3c_acpi_master_add_i2c_boardinfo(struct i3c_master_controller *master, + struct acpi_device *adev, struct i3c_acpi_lookup *look_up) +{ + struct i2c_dev_boardinfo *boardinfo; + struct device *dev = &master->dev; + u32 addr = look_up->slave_address; + + boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL); + if (!boardinfo) + return -ENOMEM; + + boardinfo->base.addr = addr; + acpi_set_modalias(adev, dev_name(&adev->dev), boardinfo->base.type, + sizeof(boardinfo->base.type)); + boardinfo->base.fwnode = acpi_fwnode_handle(adev); + + /* + * The I3C Specification does not clearly say I2C devices with 10-bit + * address are supported. These devices can't be passed properly through + * DEFSLVS command. + */ + if (boardinfo->base.flags & I2C_CLIENT_TEN) { + dev_err(dev, "I2C device with 10 bit address not supported."); + return -EOPNOTSUPP; + } + + boardinfo->lvr = look_up->lvr; + + /* If the client speed is smaller than controller speed, + * then set the controller speed to the client speed + */ + if (look_up->speed && look_up->speed < master->bus.scl_rate.i2c) + master->bus.scl_rate.i2c = look_up->speed; + + list_add_tail(&boardinfo->node, &master->boardinfo.i2c); + + return 0; +} + +static int i3c_acpi_master_add_i3c_boardinfo(struct i3c_master_controller *master, + struct acpi_device *adev, acpi_handle handle, struct i3c_acpi_lookup *look_up) +{ + struct i3c_dev_boardinfo *boardinfo; + struct device *dev = &master->dev; + enum i3c_addr_slot_status addrstatus; + u32 init_dyn_addr = 0; + + boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL); + if (!boardinfo) + return -ENOMEM; + + if (look_up->slave_address) { + if (look_up->slave_address > I3C_MAX_ADDR) + return -EINVAL; + + addrstatus = i3c_bus_get_addr_slot_status(&master->bus, look_up->slave_address); + if (addrstatus != I3C_ADDR_SLOT_FREE) + return -EINVAL; + } + + boardinfo->static_addr = look_up->slave_address; + boardinfo->pid = look_up->pid; + + if ((boardinfo->pid & GENMASK_ULL(63, 48)) || + I3C_PID_RND_LOWER_32BITS(boardinfo->pid)) + return -EINVAL; + + boardinfo->init_dyn_addr = init_dyn_addr; + boardinfo->fwnode = acpi_fwnode_handle(adev); + + /* If the client speed is smaller than controller speed, + * then set the controller speed to the client speed + */ + if (look_up->speed && look_up->speed < master->bus.scl_rate.i3c) + master->bus.scl_rate.i3c = look_up->speed; + + list_add_tail(&boardinfo->node, &master->boardinfo.i3c); + + return 0; +} + +static int i3c_acpi_master_get_resource(struct acpi_resource *ares, void *data) +{ + struct i3c_acpi_lookup *look_up = (struct i3c_acpi_lookup *)data; + struct acpi_resource_i2c_serialbus *sb; + bool ret; + + ret = i2c_acpi_get_i2c_resource(ares, &sb); + if (ret == true) { + look_up->slave_address = sb->slave_address; + look_up->speed = sb->connection_speed; + } + + return 0; +} + +static int i3c_acpi_master_get_slave_info(struct i3c_master_controller *master, + struct acpi_device *adev, struct i3c_acpi_lookup *look_up) +{ + struct list_head resource_list; + int ret; + + /* Look up for I2cSerialBus resource */ + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, + i3c_acpi_master_get_resource, look_up); + acpi_dev_free_resource_list(&resource_list); + + return ret; +} + +static bool i3c_master_check_cpu_workaround(void) +{ + u32 cpu_implementor; + + cpu_implementor = read_cpuid_implementor(); + + if (cpu_implementor == ARM_CPU_IMP_PHYTIUM) + return true; + + return false; +} + +static int i3c_acpi_master_add_dev(struct acpi_device *adev, void *data) +{ + int ret = -1; + acpi_handle handle = adev->handle; + acpi_status status; + struct i3c_master_controller *master = (struct i3c_master_controller *)data; + u64 pid, lvr; + struct i3c_acpi_lookup look_up; + u32 mode_val; + + i3c_acpi_master_get_slave_info(master, adev, &look_up); + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &pid); + + if (ACPI_SUCCESS(status)) + look_up.pid = pid; + else + look_up.pid = 0x202a0a0a0a0; + + if (i3c_master_check_cpu_workaround() == true) { + dev_info(&master->dev, "I3C ACPI SPECIAL KEYS\n"); + status = acpi_evaluate_integer(handle, "_LVR", NULL, &lvr); + + if (ACPI_SUCCESS(status)) + look_up.lvr = (u32)lvr; + else + look_up.lvr = 0; + + ret = i3c_master_acpi_get_params(handle, "i3c-mode", &mode_val); + if (!ret) { + if (!mode_val) + i3c_acpi_master_add_i2c_boardinfo(master, adev, &look_up); + else + i3c_acpi_master_add_i3c_boardinfo(master, adev, handle, &look_up); + } + } + return 0; +} + +static void i3c_acpi_register_devices(struct i3c_master_controller *master) +{ + struct acpi_device *adev = NULL; + + if (!has_acpi_companion(master->dev.parent)) + return; + + adev = acpi_fetch_acpi_dev(ACPI_HANDLE(master->dev.parent)); + if (adev) + acpi_dev_for_each_child(adev, i3c_acpi_master_add_dev, master); +} +#endif + static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap, struct i2c_msg *xfers, int nxfers) { @@ -2875,6 +3155,7 @@ int i3c_master_register(struct i3c_master_controller *master, master->dev.parent = parent; master->dev.of_node = of_node_get(parent->of_node); + master->dev.fwnode = parent->fwnode; master->dev.bus = &i3c_bus_type; master->dev.type = &i3c_masterdev_type; master->dev.release = i3c_masterdev_release; @@ -2897,7 +3178,9 @@ int i3c_master_register(struct i3c_master_controller *master, ret = of_populate_i3c_bus(master); if (ret) goto err_put_dev; - +#if defined(CONFIG_ACPI) && defined(CONFIG_ARCH_PHYTIUM) + i3c_acpi_register_devices(master); +#endif list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) { switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) { case I3C_LVR_I2C_INDEX(0): diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig index 82cf330778d5a..c481b50bee298 100644 --- a/drivers/i3c/master/Kconfig +++ b/drivers/i3c/master/Kconfig @@ -85,3 +85,12 @@ config RENESAS_I3C This driver can also be built as a module. If so, the module will be called renesas-i3c. + +config PHYTIUM_I3C_MASTER + tristate "Phytium I3C master driver" + depends on I3C + depends on HAS_IOMEM + depends on !(ALPHA || PARISC) + help + Enable this driver if you want to support phytium I3C master block. + diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile index 816a227b6f7ac..a2db5d70dc611 100644 --- a/drivers/i3c/master/Makefile +++ b/drivers/i3c/master/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/ obj-$(CONFIG_RENESAS_I3C) += renesas-i3c.o +obj-$(CONFIG_PHYTIUM_I3C_MASTER) += i3c-master-phytium.o diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 97b151564d3d3..723b364033051 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -395,6 +395,7 @@ struct cdns_i3c_xfer { struct cdns_i3c_data { u8 thd_delay_ns; + u8 halt_disable; }; struct cdns_i3c_master { @@ -713,6 +714,19 @@ static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) cdns_i3c_master_unqueue_xfer(master, xfer); + /*GETMXDS format 1 need retransmission*/ + if ((xfer->ret) && (cmd->id == I3C_CCC_GETMXDS)) { + if (cmd->dests[0].payload.len == 5) { + cmd->dests[0].payload.len = 2; + ccmd->rx_len = cmd->dests[0].payload.len; + ccmd->cmd0 &= 0xfff000fff; + ccmd->cmd0 |= CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len); + cdns_i3c_master_queue_xfer(master, xfer); + if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) + cdns_i3c_master_unqueue_xfer(master, xfer); + } + } + ret = xfer->ret; cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]); cdns_i3c_master_free_xfer(xfer); @@ -1273,7 +1287,10 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) * * We will issue ENTDAA afterwards from the threaded IRQ handler. */ - ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; + if (master->devdata->halt_disable) + ctrl |= CTRL_HJ_DISEC | CTRL_MCS_EN; + else + ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; /* * Configure data hold delay based on device-specific data. @@ -1539,10 +1556,17 @@ static void cdns_i3c_master_hj(struct work_struct *work) static struct cdns_i3c_data cdns_i3c_devdata = { .thd_delay_ns = 10, + .halt_disable = 0, +}; + +static struct cdns_i3c_data phytium_i3c_devdata = { + .thd_delay_ns = 10, + .halt_disable = 1, }; static const struct of_device_id cdns_i3c_master_of_ids[] = { { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, + { .compatible = "phytium,cdns-i3c-master", .data = &phytium_i3c_devdata}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids); @@ -1619,8 +1643,12 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) writel(MST_INT_IBIR_THR, master->regs + MST_IER); writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL); - return i3c_master_register(&master->base, &pdev->dev, + ret = i3c_master_register(&master->base, &pdev->dev, &cdns_i3c_master_ops, false); + + writel(readl(master->regs + CTRL) | CTRL_HJ_ACK, master->regs + CTRL); + + return ret; } static void cdns_i3c_master_remove(struct platform_device *pdev) diff --git a/drivers/i3c/master/i3c-master-phytium.c b/drivers/i3c/master/i3c-master-phytium.c new file mode 100644 index 0000000000000..6b25beb382ae1 --- /dev/null +++ b/drivers/i3c/master/i3c-master-phytium.c @@ -0,0 +1,1847 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Technology Co.,Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../i3c_master_acpi.h" + +#define DEV_ID 0x0 +#define DEV_ID_I3C_MASTER 0x5034 + +#define CONF_STATUS0 0x4 +#define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29)) +#define CONF_STATUS0_ECC_CHK BIT(28) +#define CONF_STATUS0_INTEG_CHK BIT(27) +#define CONF_STATUS0_CSR_DAP_CHK BIT(26) +#define CONF_STATUS0_TRANS_TOUT_CHK BIT(25) +#define CONF_STATUS0_PROT_FAULTS_CHK BIT(24) +#define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16) +#define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8) +#define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7)) +#define CONF_STATUS0_SUPPORTS_DDR BIT(5) +#define CONF_STATUS0_SEC_MASTER BIT(4) +#define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0)) + +#define CONF_STATUS1 0x8 +#define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1) +#define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26)) +#define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21)) +#define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16)) +#define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10)) +#define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5)) +#define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0))) + +#define REV_ID 0xc +#define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20) +#define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8) +#define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4) +#define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0)) + +#define CTRL 0x10 +#define CTRL_DEV_EN BIT(31) +#define CTRL_HALT_EN BIT(30) +#define CTRL_MCS BIT(29) +#define CTRL_MCS_EN BIT(28) +#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) +#define CTRL_HJ_DISEC BIT(8) +#define CTRL_MST_ACK BIT(7) +#define CTRL_HJ_ACK BIT(6) +#define CTRL_HJ_INIT BIT(5) +#define CTRL_MST_INIT BIT(4) +#define CTRL_AHDR_OPT BIT(3) +#define CTRL_PURE_BUS_MODE 0 +#define CTRL_MIXED_FAST_BUS_MODE 2 +#define CTRL_MIXED_SLOW_BUS_MODE 3 +#define CTRL_BUS_MODE_MASK GENMASK(1, 0) +#define THD_DELAY_MAX 3 + +#define PRESCL_CTRL0 0x14 +#define PRESCL_CTRL0_I2C(x) ((x) << 16) +#define PRESCL_CTRL0_I3C(x) (x) +#define PRESCL_CTRL0_MAX GENMASK(9, 0) + +#define PRESCL_CTRL1 0x18 +#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8) +#define PRESCL_CTRL1_PP_LOW(x) ((x) << 8) +#define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0) +#define PRESCL_CTRL1_OD_LOW(x) (x) + +#define MST_IER 0x20 +#define MST_IDR 0x24 +#define MST_IMR 0x28 +#define MST_ICR 0x2c +#define MST_ISR 0x30 +#define MST_INT_HALTED BIT(18) +#define MST_INT_MR_DONE BIT(17) +#define MST_INT_IMM_COMP BIT(16) +#define MST_INT_TX_THR BIT(15) +#define MST_INT_TX_OVF BIT(14) +#define MST_INT_IBID_THR BIT(12) +#define MST_INT_IBID_UNF BIT(11) +#define MST_INT_IBIR_THR BIT(10) +#define MST_INT_IBIR_UNF BIT(9) +#define MST_INT_IBIR_OVF BIT(8) +#define MST_INT_RX_THR BIT(7) +#define MST_INT_RX_UNF BIT(6) +#define MST_INT_CMDD_EMP BIT(5) +#define MST_INT_CMDD_THR BIT(4) +#define MST_INT_CMDD_OVF BIT(3) +#define MST_INT_CMDR_THR BIT(2) +#define MST_INT_CMDR_UNF BIT(1) +#define MST_INT_CMDR_OVF BIT(0) + +#define MST_STATUS0 0x34 +#define MST_STATUS0_IDLE BIT(18) +#define MST_STATUS0_HALTED BIT(17) +#define MST_STATUS0_MASTER_MODE BIT(16) +#define MST_STATUS0_TX_FULL BIT(13) +#define MST_STATUS0_IBID_FULL BIT(12) +#define MST_STATUS0_IBIR_FULL BIT(11) +#define MST_STATUS0_RX_FULL BIT(10) +#define MST_STATUS0_CMDD_FULL BIT(9) +#define MST_STATUS0_CMDR_FULL BIT(8) +#define MST_STATUS0_TX_EMP BIT(5) +#define MST_STATUS0_IBID_EMP BIT(4) +#define MST_STATUS0_IBIR_EMP BIT(3) +#define MST_STATUS0_RX_EMP BIT(2) +#define MST_STATUS0_CMDD_EMP BIT(1) +#define MST_STATUS0_CMDR_EMP BIT(0) + +#define CMDR 0x38 +#define CMDR_NO_ERROR 0 +#define CMDR_DDR_PREAMBLE_ERROR 1 +#define CMDR_DDR_PARITY_ERROR 2 +#define CMDR_DDR_RX_FIFO_OVF 3 +#define CMDR_DDR_TX_FIFO_UNF 4 +#define CMDR_M0_ERROR 5 +#define CMDR_M1_ERROR 6 +#define CMDR_M2_ERROR 7 +#define CMDR_MST_ABORT 8 +#define CMDR_NACK_RESP 9 +#define CMDR_INVALID_DA 10 +#define CMDR_DDR_DROPPED 11 +#define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24) +#define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8) +#define CMDR_CMDID_HJACK_DISEC 0xfe +#define CMDR_CMDID_HJACK_ENTDAA 0xff +#define CMDR_CMDID(x) ((x) & GENMASK(7, 0)) + +#define IBIR 0x3c +#define IBIR_ACKED BIT(12) +#define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8) +#define IBIR_ERROR BIT(7) +#define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2) +#define IBIR_TYPE_IBI 0 +#define IBIR_TYPE_HJ 1 +#define IBIR_TYPE_MR 2 +#define IBIR_TYPE(x) ((x) & GENMASK(1, 0)) + +#define SLV_IER 0x40 +#define SLV_IDR 0x44 +#define SLV_IMR 0x48 +#define SLV_ICR 0x4c +#define SLV_ISR 0x50 +#define SLV_INT_TM BIT(20) +#define SLV_INT_ERROR BIT(19) +#define SLV_INT_EVENT_UP BIT(18) +#define SLV_INT_HJ_DONE BIT(17) +#define SLV_INT_MR_DONE BIT(16) +#define SLV_INT_DA_UPD BIT(15) +#define SLV_INT_SDR_FAIL BIT(14) +#define SLV_INT_DDR_FAIL BIT(13) +#define SLV_INT_M_RD_ABORT BIT(12) +#define SLV_INT_DDR_RX_THR BIT(11) +#define SLV_INT_DDR_TX_THR BIT(10) +#define SLV_INT_SDR_RX_THR BIT(9) +#define SLV_INT_SDR_TX_THR BIT(8) +#define SLV_INT_DDR_RX_UNF BIT(7) +#define SLV_INT_DDR_TX_OVF BIT(6) +#define SLV_INT_SDR_RX_UNF BIT(5) +#define SLV_INT_SDR_TX_OVF BIT(4) +#define SLV_INT_DDR_RD_COMP BIT(3) +#define SLV_INT_DDR_WR_COMP BIT(2) +#define SLV_INT_SDR_RD_COMP BIT(1) +#define SLV_INT_SDR_WR_COMP BIT(0) + +#define SLV_STATUS0 0x54 +#define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16) +#define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0)) + +#define SLV_STATUS1 0x58 +#define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20) +#define SLV_STATUS1_VEN_TM BIT(19) +#define SLV_STATUS1_HJ_DIS BIT(18) +#define SLV_STATUS1_MR_DIS BIT(17) +#define SLV_STATUS1_PROT_ERR BIT(16) +#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9) +#define SLV_STATUS1_HAS_DA BIT(8) +#define SLV_STATUS1_DDR_RX_FULL BIT(7) +#define SLV_STATUS1_DDR_TX_FULL BIT(6) +#define SLV_STATUS1_DDR_RX_EMPTY BIT(5) +#define SLV_STATUS1_DDR_TX_EMPTY BIT(4) +#define SLV_STATUS1_SDR_RX_FULL BIT(3) +#define SLV_STATUS1_SDR_TX_FULL BIT(2) +#define SLV_STATUS1_SDR_RX_EMPTY BIT(1) +#define SLV_STATUS1_SDR_TX_EMPTY BIT(0) + +#define CMD0_FIFO 0x60 +#define CMD0_FIFO_IS_DDR BIT(31) +#define CMD0_FIFO_IS_CCC BIT(30) +#define CMD0_FIFO_BCH BIT(29) +#define XMIT_BURST_STATIC_SUBADDR 0 +#define XMIT_SINGLE_INC_SUBADDR 1 +#define XMIT_SINGLE_STATIC_SUBADDR 2 +#define XMIT_BURST_WITHOUT_SUBADDR 3 +#define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27) +#define CMD0_FIFO_SBCA BIT(26) +#define CMD0_FIFO_RSBC BIT(25) +#define CMD0_FIFO_IS_10B BIT(24) +#define CMD0_FIFO_PL_LEN(l) ((l) << 12) +#define CMD0_FIFO_PL_LEN_MAX 4095 +#define CMD0_FIFO_DEV_ADDR(a) ((a) << 1) +#define CMD0_FIFO_RNW BIT(0) + +#define CMD1_FIFO 0x64 +#define CMD1_FIFO_CMDID(id) ((id) << 24) +#define CMD1_FIFO_CSRADDR(a) (a) +#define CMD1_FIFO_CCC(id) (id) + +#define TX_FIFO 0x68 + +#define IMD_CMD0 0x70 +#define IMD_CMD0_PL_LEN(l) ((l) << 12) +#define IMD_CMD0_DEV_ADDR(a) ((a) << 1) +#define IMD_CMD0_RNW BIT(0) + +#define IMD_CMD1 0x74 +#define IMD_CMD1_CCC(id) (id) + +#define IMD_DATA 0x78 +#define RX_FIFO 0x80 +#define IBI_DATA_FIFO 0x84 +#define SLV_DDR_TX_FIFO 0x88 +#define SLV_DDR_RX_FIFO 0x8c + +#define CMD_IBI_THR_CTRL 0x90 +#define IBIR_THR(t) ((t) << 24) +#define CMDR_THR(t) ((t) << 16) +#define IBI_THR(t) ((t) << 8) +#define CMD_THR(t) (t) + +#define TX_RX_THR_CTRL 0x94 +#define RX_THR(t) ((t) << 16) +#define TX_THR(t) (t) + +#define SLV_DDR_TX_RX_THR_CTRL 0x98 +#define SLV_DDR_RX_THR(t) ((t) << 16) +#define SLV_DDR_TX_THR(t) (t) + +#define FLUSH_CTRL 0x9c +#define FLUSH_IBI_RESP BIT(23) +#define FLUSH_CMD_RESP BIT(22) +#define FLUSH_SLV_DDR_RX_FIFO BIT(22) +#define FLUSH_SLV_DDR_TX_FIFO BIT(21) +#define FLUSH_IMM_FIFO BIT(20) +#define FLUSH_IBI_FIFO BIT(19) +#define FLUSH_RX_FIFO BIT(18) +#define FLUSH_TX_FIFO BIT(17) +#define FLUSH_CMD_FIFO BIT(16) + +#define TTO_PRESCL_CTRL0 0xb0 +#define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16) +#define TTO_PRESCL_CTRL0_DIVA(x) (x) + +#define TTO_PRESCL_CTRL1 0xb4 +#define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16) +#define TTO_PRESCL_CTRL1_DIVA(x) (x) + +#define DEVS_CTRL 0xb8 +#define DEVS_CTRL_DEV_CLR_SHIFT 16 +#define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16) +#define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev)) +#define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev) +#define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0) +#define MAX_DEVS 16 + +#define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10)) +#define DEV_ID_RR0_LVR_EXT_ADDR BIT(11) +#define DEV_ID_RR0_HDR_CAP BIT(10) +#define DEV_ID_RR0_IS_I3C BIT(9) +#define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13)) +#define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \ + (((a) & GENMASK(9, 7)) << 6)) +#define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \ + (((x) >> 6) & GENMASK(9, 7))) + +#define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10)) +#define DEV_ID_RR1_PID_MSB(pid) (pid) + +#define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10)) +#define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16) +#define DEV_ID_RR2_BCR(bcr) ((bcr) << 8) +#define DEV_ID_RR2_DCR(dcr) (dcr) +#define DEV_ID_RR2_LVR(lvr) (lvr) + +#define SIR_MAP(x) (0x180 + ((x) * 4)) +#define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2) +#define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0)) +#define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0)) +#define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0)) +#define DEV_ROLE_SLAVE 0 +#define DEV_ROLE_MASTER 1 +#define SIR_MAP_DEV_ROLE(role) ((role) << 14) +#define SIR_MAP_DEV_SLOW BIT(13) +#define SIR_MAP_DEV_PL(l) ((l) << 8) +#define SIR_MAP_PL_MAX GENMASK(4, 0) +#define SIR_MAP_DEV_DA(a) ((a) << 1) +#define SIR_MAP_DEV_ACK BIT(0) + +#define GPIR_WORD(x) (0x200 + ((x) * 4)) +#define GPI_REG(val, id) \ + (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) + +#define GPOR_WORD(x) (0x220 + ((x) * 4)) +#define GPO_REG(val, id) \ + (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) + +#define ASF_INT_STATUS 0x300 +#define ASF_INT_RAW_STATUS 0x304 +#define ASF_INT_MASK 0x308 +#define ASF_INT_TEST 0x30c +#define ASF_INT_FATAL_SELECT 0x310 +#define ASF_INTEGRITY_ERR BIT(6) +#define ASF_PROTOCOL_ERR BIT(5) +#define ASF_TRANS_TIMEOUT_ERR BIT(4) +#define ASF_CSR_ERR BIT(3) +#define ASF_DAP_ERR BIT(2) +#define ASF_SRAM_UNCORR_ERR BIT(1) +#define ASF_SRAM_CORR_ERR BIT(0) + +#define ASF_SRAM_CORR_FAULT_STATUS 0x320 +#define ASF_SRAM_UNCORR_FAULT_STATUS 0x324 +#define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24) +#define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0)) + +#define ASF_SRAM_FAULT_STATS 0x328 +#define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16) +#define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0)) + +#define ASF_TRANS_TOUT_CTRL 0x330 +#define ASF_TRANS_TOUT_EN BIT(31) +#define ASF_TRANS_TOUT_VAL(x) (x) + +#define ASF_TRANS_TOUT_FAULT_MASK 0x334 +#define ASF_TRANS_TOUT_FAULT_STATUS 0x338 +#define ASF_TRANS_TOUT_FAULT_APB BIT(3) +#define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2) +#define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1) +#define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0) + +#define ASF_PROTO_FAULT_MASK 0x340 +#define ASF_PROTO_FAULT_STATUS 0x344 +#define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31) +#define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30) +#define ASF_PROTO_FAULT_S(x) BIT(16 + (x)) +#define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15) +#define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14) +#define ASF_PROTO_FAULT_M(x) BIT(x) + +#define I3C_CONTROL_DEFAULT_I2C_SCL (1000000) +#define I3C_CONTROL_DEFAULT_I3C_SCL (1000000) +#define PHYTIUM_I3C_DEV_MAX_NUM (12) +#define PHYTIUM_I3C_CMDR_MAX_TIMES (32) + +struct phytium_i3c_master_caps { + u32 cmdfifodepth; + u32 cmdrfifodepth; + u32 txfifodepth; + u32 rxfifodepth; + u32 ibirfifodepth; +}; + +struct phytium_i3c_cmd { + u32 cmd0; + u32 cmd1; + u32 tx_len; + const void *tx_buf; + u32 rx_len; + void *rx_buf; + u32 error; +}; + +struct phytium_i3c_xfer { + struct list_head node; + struct completion comp; + int ret; + unsigned int ncmds; + struct phytium_i3c_cmd cmds[]; +}; + +struct phytium_i3c_data { + u8 thd_delay_ns; +}; + +struct phytium_i3c_slave_dev { + u32 dev_rr0; + u32 dev_rr1; + u32 dev_rr2; +}; + +struct phytium_i3c_master { + struct work_struct hj_work; + struct i3c_master_controller base; + u32 free_rr_slots; + unsigned int maxdevs; + struct { + unsigned int num_slots; + struct i3c_dev_desc **slots; + spinlock_t lock; + } ibi; + struct { + struct list_head list; + struct phytium_i3c_xfer *cur; + spinlock_t lock; + } xferqueue; + void __iomem *regs; + struct clk *sysclk; + struct clk *pclk; + u32 sysclk_rate; + u32 prescl0; + u32 prescl1; + u32 ctrl_thd_del; + u32 ctrl_info; + u32 dev_valid; + struct device *dev; + struct phytium_i3c_master_caps caps; + unsigned long i3c_scl_lim; + const struct phytium_i3c_data *devdata; + struct phytium_i3c_slave_dev devinfo[PHYTIUM_I3C_DEV_MAX_NUM]; +}; + +static inline struct phytium_i3c_master * +to_phytium_i3c_master(struct i3c_master_controller *master) +{ + return container_of(master, struct phytium_i3c_master, base); +} + +static void phytium_i3c_master_wr_to_tx_fifo(struct phytium_i3c_master *master, + const u8 *bytes, int nbytes) +{ + writesl(master->regs + TX_FIFO, bytes, nbytes / 4); + if (nbytes & 3) { + u32 tmp = 0; + + memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3); + writesl(master->regs + TX_FIFO, &tmp, 1); + } +} + +static void phytium_i3c_master_rd_from_rx_fifo(struct phytium_i3c_master *master, + u8 *bytes, int nbytes) +{ + readsl(master->regs + RX_FIFO, bytes, nbytes / 4); + if (nbytes & 3) { + u32 tmp; + + readsl(master->regs + RX_FIFO, &tmp, 1); + memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3); + } +} + +static bool phytium_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, + const struct i3c_ccc_cmd *cmd) +{ + if (cmd->ndests > 1) + return false; + + switch (cmd->id) { + case I3C_CCC_ENEC(true): + case I3C_CCC_ENEC(false): + case I3C_CCC_DISEC(true): + case I3C_CCC_DISEC(false): + case I3C_CCC_ENTAS(0, true): + case I3C_CCC_ENTAS(0, false): + case I3C_CCC_RSTDAA(true): + case I3C_CCC_RSTDAA(false): + case I3C_CCC_ENTDAA: + case I3C_CCC_SETMWL(true): + case I3C_CCC_SETMWL(false): + case I3C_CCC_SETMRL(true): + case I3C_CCC_SETMRL(false): + case I3C_CCC_DEFSLVS: + case I3C_CCC_ENTHDR(0): + case I3C_CCC_SETDASA: + case I3C_CCC_SETNEWDA: + case I3C_CCC_GETMWL: + case I3C_CCC_GETMRL: + case I3C_CCC_GETPID: + case I3C_CCC_GETBCR: + case I3C_CCC_GETDCR: + case I3C_CCC_GETSTATUS: + case I3C_CCC_GETACCMST: + case I3C_CCC_GETMXDS: + case I3C_CCC_GETHDRCAP: + return true; + default: + break; + } + + return false; +} + +static int phytium_i3c_master_disable(struct phytium_i3c_master *master) +{ + u32 status; + + writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL); + + return readl_poll_timeout(master->regs + MST_STATUS0, status, + status & MST_STATUS0_IDLE, 10, 1000000); +} + +static void phytium_i3c_master_enable(struct phytium_i3c_master *master) +{ + writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL); +} + +static struct phytium_i3c_xfer * +phytium_i3c_master_alloc_xfer(struct phytium_i3c_master *master, unsigned int ncmds) +{ + struct phytium_i3c_xfer *xfer; + + xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL); + if (!xfer) + return NULL; + + INIT_LIST_HEAD(&xfer->node); + xfer->ncmds = ncmds; + xfer->ret = -ETIMEDOUT; + + return xfer; +} + +static void phytium_i3c_master_free_xfer(struct phytium_i3c_xfer *xfer) +{ + kfree(xfer); +} + +static void phytium_i3c_master_start_xfer_locked(struct phytium_i3c_master *master) +{ + struct phytium_i3c_xfer *xfer = master->xferqueue.cur; + unsigned int i; + + if (!xfer) + return; + + phytium_i3c_master_enable(master); + + writel(MST_INT_CMDD_EMP, master->regs + MST_ICR); + for (i = 0; i < xfer->ncmds; i++) { + struct phytium_i3c_cmd *cmd = &xfer->cmds[i]; + + phytium_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, + cmd->tx_len); + } + for (i = 0; i < xfer->ncmds; i++) { + struct phytium_i3c_cmd *cmd = &xfer->cmds[i]; + + writel(cmd->cmd1 | CMD1_FIFO_CMDID(i), + master->regs + CMD1_FIFO); + writel(cmd->cmd0, master->regs + CMD0_FIFO); + } + + writel(readl(master->regs + CTRL) | CTRL_MCS, + master->regs + CTRL); + writel(MST_INT_CMDD_EMP, master->regs + MST_IER); +} + +static void phytium_i3c_master_end_xfer_locked(struct phytium_i3c_master *master, + u32 isr) +{ + struct phytium_i3c_xfer *xfer = master->xferqueue.cur; + int i, ret = 0; + u32 status0; + + if (!xfer) + return; + + if (!(isr & MST_INT_CMDD_EMP)) + return; + + writel(MST_INT_CMDD_EMP, master->regs + MST_IDR); + + for (status0 = readl(master->regs + MST_STATUS0); + !(status0 & MST_STATUS0_CMDR_EMP); + status0 = readl(master->regs + MST_STATUS0)) { + struct phytium_i3c_cmd *cmd; + u32 cmdr, rx_len, id; + + cmdr = readl(master->regs + CMDR); + id = CMDR_CMDID(cmdr); + if (id == CMDR_CMDID_HJACK_DISEC || + id == CMDR_CMDID_HJACK_ENTDAA || + WARN_ON(id >= xfer->ncmds)) + continue; + + cmd = &xfer->cmds[CMDR_CMDID(cmdr)]; + rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len); + phytium_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len); + cmd->error = CMDR_ERROR(cmdr); + } + + for (i = 0; i < xfer->ncmds; i++) { + switch (xfer->cmds[i].error) { + case CMDR_NO_ERROR: + break; + + case CMDR_DDR_PREAMBLE_ERROR: + case CMDR_DDR_PARITY_ERROR: + case CMDR_M0_ERROR: + case CMDR_M1_ERROR: + case CMDR_M2_ERROR: + case CMDR_MST_ABORT: + case CMDR_NACK_RESP: + case CMDR_DDR_DROPPED: + ret = -EIO; + break; + + case CMDR_DDR_RX_FIFO_OVF: + case CMDR_DDR_TX_FIFO_UNF: + ret = -ENOSPC; + break; + + case CMDR_INVALID_DA: + default: + ret = -EINVAL; + break; + } + } + + xfer->ret = ret; + complete(&xfer->comp); + + xfer = list_first_entry_or_null(&master->xferqueue.list, + struct phytium_i3c_xfer, node); + if (xfer) + list_del_init(&xfer->node); + + master->xferqueue.cur = xfer; + phytium_i3c_master_start_xfer_locked(master); +} + +static void phytium_i3c_master_queue_xfer(struct phytium_i3c_master *master, + struct phytium_i3c_xfer *xfer) +{ + unsigned long flags; + + init_completion(&xfer->comp); + spin_lock_irqsave(&master->xferqueue.lock, flags); + if (master->xferqueue.cur) { + list_add_tail(&xfer->node, &master->xferqueue.list); + } else { + master->xferqueue.cur = xfer; + phytium_i3c_master_start_xfer_locked(master); + } + spin_unlock_irqrestore(&master->xferqueue.lock, flags); +} + +static void phytium_i3c_master_unqueue_xfer(struct phytium_i3c_master *master, + struct phytium_i3c_xfer *xfer) +{ + unsigned long flags; + + spin_lock_irqsave(&master->xferqueue.lock, flags); + if (master->xferqueue.cur == xfer) { + u32 status; + + writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, + master->regs + CTRL); + readl_poll_timeout_atomic(master->regs + MST_STATUS0, status, + status & MST_STATUS0_IDLE, 10, + 1000000); + master->xferqueue.cur = NULL; + writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | + FLUSH_CMD_RESP, + master->regs + FLUSH_CTRL); + writel(MST_INT_CMDD_EMP, master->regs + MST_IDR); + writel(readl(master->regs + CTRL) | CTRL_DEV_EN, + master->regs + CTRL); + } else { + list_del_init(&xfer->node); + } + spin_unlock_irqrestore(&master->xferqueue.lock, flags); +} + +static enum i3c_error_code phytium_i3c_cmd_get_err(struct phytium_i3c_cmd *cmd) +{ + switch (cmd->error) { + case CMDR_M0_ERROR: + return I3C_ERROR_M0; + + case CMDR_M1_ERROR: + return I3C_ERROR_M1; + + case CMDR_M2_ERROR: + case CMDR_NACK_RESP: + return I3C_ERROR_M2; + + default: + break; + } + + return I3C_ERROR_UNKNOWN; +} + +static int phytium_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, + struct i3c_ccc_cmd *cmd) +{ + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_xfer *xfer; + struct phytium_i3c_cmd *ccmd; + int ret; + + xfer = phytium_i3c_master_alloc_xfer(master, 1); + if (!xfer) + return -ENOMEM; + + ccmd = xfer->cmds; + ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id); + ccmd->cmd0 = CMD0_FIFO_IS_CCC | + CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len); + + if (cmd->id & I3C_CCC_DIRECT) + ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr); + + if (cmd->rnw) { + ccmd->cmd0 |= CMD0_FIFO_RNW; + ccmd->rx_buf = cmd->dests[0].payload.data; + ccmd->rx_len = cmd->dests[0].payload.len; + } else { + ccmd->tx_buf = cmd->dests[0].payload.data; + ccmd->tx_len = cmd->dests[0].payload.len; + } + + phytium_i3c_master_queue_xfer(master, xfer); + if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) + phytium_i3c_master_unqueue_xfer(master, xfer); + + /*GETMXDS format 1 need retransmission*/ + if ((xfer->ret) && (cmd->id == I3C_CCC_GETMXDS)) { + if (cmd->dests[0].payload.len == 5) { + cmd->dests[0].payload.len = 2; + ccmd->rx_len = cmd->dests[0].payload.len; + ccmd->cmd0 &= 0xfff000fff; + ccmd->cmd0 |= CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len); + phytium_i3c_master_queue_xfer(master, xfer); + if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) + phytium_i3c_master_unqueue_xfer(master, xfer); + } + } + ret = xfer->ret; + cmd->err = phytium_i3c_cmd_get_err(&xfer->cmds[0]); + phytium_i3c_master_free_xfer(xfer); + + return ret; +} + +static int phytium_i3c_master_priv_xfers(struct i3c_dev_desc *dev, + struct i3c_priv_xfer *xfers, + int nxfers) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + int txslots = 0, rxslots = 0, i, ret; + struct phytium_i3c_xfer *phytium_xfer; + + for (i = 0; i < nxfers; i++) { + if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX) + return -EOPNOTSUPP; + } + + if (!nxfers) + return 0; + + if (nxfers > master->caps.cmdfifodepth || + nxfers > master->caps.cmdrfifodepth) + return -EOPNOTSUPP; + + /* + * First make sure that all transactions (block of transfers separated + * by a STOP marker) fit in the FIFOs. + */ + for (i = 0; i < nxfers; i++) { + if (xfers[i].rnw) + rxslots += DIV_ROUND_UP(xfers[i].len, 4); + else + txslots += DIV_ROUND_UP(xfers[i].len, 4); + } + + if (rxslots > master->caps.rxfifodepth || + txslots > master->caps.txfifodepth) + return -EOPNOTSUPP; + + phytium_xfer = phytium_i3c_master_alloc_xfer(master, nxfers); + if (!phytium_xfer) + return -ENOMEM; + + for (i = 0; i < nxfers; i++) { + struct phytium_i3c_cmd *ccmd = &phytium_xfer->cmds[i]; + u32 pl_len = xfers[i].len; + + ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) | + CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); + + if (xfers[i].rnw) { + ccmd->cmd0 |= CMD0_FIFO_RNW; + ccmd->rx_buf = xfers[i].data.in; + ccmd->rx_len = xfers[i].len; + pl_len++; + } else { + ccmd->tx_buf = xfers[i].data.out; + ccmd->tx_len = xfers[i].len; + } + + ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len); + + if (i < nxfers - 1) + ccmd->cmd0 |= CMD0_FIFO_RSBC; + + if (!i) + ccmd->cmd0 |= CMD0_FIFO_BCH; + } + + phytium_i3c_master_queue_xfer(master, phytium_xfer); + if (!wait_for_completion_timeout(&phytium_xfer->comp, + msecs_to_jiffies(1000))) + phytium_i3c_master_unqueue_xfer(master, phytium_xfer); + + ret = phytium_xfer->ret; + + for (i = 0; i < nxfers; i++) + xfers[i].err = phytium_i3c_cmd_get_err(&phytium_xfer->cmds[i]); + + phytium_i3c_master_free_xfer(phytium_xfer); + + return ret; +} + +static int phytium_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, + struct i2c_msg *xfers, int nxfers) +{ + struct i3c_master_controller *m = i2c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + unsigned int nrxwords = 0, ntxwords = 0; + struct phytium_i3c_xfer *xfer; + int i, ret = 0; + + if (nxfers > master->caps.cmdfifodepth) + return -EOPNOTSUPP; + + for (i = 0; i < nxfers; i++) { + if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX) + return -EOPNOTSUPP; + + if (xfers[i].flags & I2C_M_RD) + nrxwords += DIV_ROUND_UP(xfers[i].len, 4); + else + ntxwords += DIV_ROUND_UP(xfers[i].len, 4); + } + + if (ntxwords > master->caps.txfifodepth || + nrxwords > master->caps.rxfifodepth) + return -EOPNOTSUPP; + + xfer = phytium_i3c_master_alloc_xfer(master, nxfers); + if (!xfer) + return -ENOMEM; + + for (i = 0; i < nxfers; i++) { + struct phytium_i3c_cmd *ccmd = &xfer->cmds[i]; + + ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) | + CMD0_FIFO_PL_LEN(xfers[i].len) | + CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); + + if (xfers[i].flags & I2C_M_TEN) + ccmd->cmd0 |= CMD0_FIFO_IS_10B; + + if (xfers[i].flags & I2C_M_RD) { + ccmd->cmd0 |= CMD0_FIFO_RNW; + ccmd->rx_buf = xfers[i].buf; + ccmd->rx_len = xfers[i].len; + } else { + ccmd->tx_buf = xfers[i].buf; + ccmd->tx_len = xfers[i].len; + } + } + + phytium_i3c_master_queue_xfer(master, xfer); + if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000))) + phytium_i3c_master_unqueue_xfer(master, xfer); + + ret = xfer->ret; + phytium_i3c_master_free_xfer(xfer); + + return ret; +} + +struct phytium_i3c_i2c_dev_data { + u16 id; + s16 ibi; + struct i3c_generic_ibi_pool *ibi_pool; +}; + +static u32 prepare_rr0_dev_address(u32 addr) +{ + u32 ret = (addr << 1) & 0xff; + + /* RR0[7:1] = addr[6:0] */ + ret |= (addr & GENMASK(6, 0)) << 1; + + /* RR0[15:13] = addr[9:7] */ + ret |= (addr & GENMASK(9, 7)) << 6; + + /* RR0[0] = ~XOR(addr[6:0]) */ + if (!(hweight8(addr & 0x7f) & 1)) + ret |= 1; + + return ret; +} + +static void phytium_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + u32 rr; + + rr = prepare_rr0_dev_address(dev->info.dyn_addr ? + dev->info.dyn_addr : + dev->info.static_addr); + writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id)); +} + +static int phytium_i3c_master_get_rr_slot(struct phytium_i3c_master *master, + u8 dyn_addr) +{ + unsigned long activedevs; + u32 rr; + int i; + + if (!dyn_addr) { + if (!master->free_rr_slots) + return -ENOSPC; + + return ffs(master->free_rr_slots) - 1; + } + + activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; + activedevs &= ~BIT(0); + + for_each_set_bit(i, &activedevs, master->maxdevs + 1) { + rr = readl(master->regs + DEV_ID_RR0(i)); + if (!(rr & DEV_ID_RR0_IS_I3C) || + DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr) + continue; + + return i; + } + + return -EINVAL; +} + +static int phytium_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, + u8 old_dyn_addr) +{ + phytium_i3c_master_upd_i3c_addr(dev); + + return 0; +} + +static int phytium_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data; + int slot; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + slot = phytium_i3c_master_get_rr_slot(master, dev->info.dyn_addr); + if (slot < 0) { + kfree(data); + return slot; + } + + data->ibi = -1; + data->id = slot; + i3c_dev_set_master_data(dev, data); + master->free_rr_slots &= ~BIT(slot); + + if (!dev->info.dyn_addr) { + phytium_i3c_master_upd_i3c_addr(dev); + writel(readl(master->regs + DEVS_CTRL) | + DEVS_CTRL_DEV_ACTIVE(data->id), + master->regs + DEVS_CTRL); + } + + return 0; +} + +static void phytium_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + + writel(readl(master->regs + DEVS_CTRL) | + DEVS_CTRL_DEV_CLR(data->id), + master->regs + DEVS_CTRL); + + i3c_dev_set_master_data(dev, NULL); + master->free_rr_slots |= BIT(data->id); + kfree(data); +} + +static int phytium_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) +{ + struct i3c_master_controller *m = i2c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data; + int slot; + + slot = phytium_i3c_master_get_rr_slot(master, 0); + if (slot < 0) + return slot; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = slot; + master->free_rr_slots &= ~BIT(slot); + i2c_dev_set_master_data(dev, data); + + writel(prepare_rr0_dev_address(dev->addr), + master->regs + DEV_ID_RR0(data->id)); + writel(dev->lvr, master->regs + DEV_ID_RR2(data->id)); + writel(readl(master->regs + DEVS_CTRL) | + DEVS_CTRL_DEV_ACTIVE(data->id), + master->regs + DEVS_CTRL); + + return 0; +} + +static void phytium_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev) +{ + struct i3c_master_controller *m = i2c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); + + writel(readl(master->regs + DEVS_CTRL) | + DEVS_CTRL_DEV_CLR(data->id), + master->regs + DEVS_CTRL); + master->free_rr_slots |= BIT(data->id); + + i2c_dev_set_master_data(dev, NULL); + kfree(data); +} + +static void phytium_i3c_master_bus_cleanup(struct i3c_master_controller *m) +{ + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + + phytium_i3c_master_disable(master); +} + +static void phytium_i3c_master_dev_rr_to_info(struct phytium_i3c_master *master, + unsigned int slot, + struct i3c_device_info *info) +{ + u32 rr; + + memset(info, 0, sizeof(*info)); + rr = readl(master->regs + DEV_ID_RR0(slot)); + info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr); + rr = readl(master->regs + DEV_ID_RR2(slot)); + info->dcr = rr; + info->bcr = rr >> 8; + info->pid = rr >> 16; + info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16; +} + +static void phytium_i3c_master_upd_i3c_scl_lim(struct phytium_i3c_master *master) +{ + struct i3c_master_controller *m = &master->base; + unsigned long i3c_lim_period, pres_step, ncycles; + struct i3c_bus *bus = i3c_master_get_bus(m); + unsigned long new_i3c_scl_lim = 0; + struct i3c_dev_desc *dev; + u32 prescl1, ctrl; + + i3c_bus_for_each_i3cdev(bus, dev) { + unsigned long max_fscl; + + max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds), + I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds)); + switch (max_fscl) { + case I3C_SDR1_FSCL_8MHZ: + max_fscl = 8000000; + break; + case I3C_SDR2_FSCL_6MHZ: + max_fscl = 6000000; + break; + case I3C_SDR3_FSCL_4MHZ: + max_fscl = 4000000; + break; + case I3C_SDR4_FSCL_2MHZ: + max_fscl = 2000000; + break; + case I3C_SDR0_FSCL_MAX: + default: + max_fscl = 0; + break; + } + + if (max_fscl && + (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim)) + new_i3c_scl_lim = max_fscl; + } + + /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */ + if (new_i3c_scl_lim == master->i3c_scl_lim) + return; + master->i3c_scl_lim = new_i3c_scl_lim; + if (!new_i3c_scl_lim) + return; + pres_step = 1000000000UL / (bus->scl_rate.i3c * 4); + + /* Configure PP_LOW to meet I3C slave limitations. */ + prescl1 = readl(master->regs + PRESCL_CTRL1) & + ~PRESCL_CTRL1_PP_LOW_MASK; + ctrl = readl(master->regs + CTRL); + + i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim); + ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step); + if (ncycles < 4) + ncycles = 0; + else + ncycles -= 4; + + prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles); + + /* Disable I3C master before updating PRESCL_CTRL1. */ + if (ctrl & CTRL_DEV_EN) + phytium_i3c_master_disable(master); + + writel(prescl1, master->regs + PRESCL_CTRL1); + + if (ctrl & CTRL_DEV_EN) + phytium_i3c_master_enable(master); +} + +static int phytium_i3c_master_do_daa(struct i3c_master_controller *m) +{ + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + unsigned long olddevs, newdevs; + int ret, slot; + u8 addrs[MAX_DEVS] = { }; + u8 last_addr = 0; + + olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; + olddevs |= BIT(0); + + /* Prepare RR slots before launching DAA. */ + for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) { + ret = i3c_master_get_free_addr(m, last_addr + 1); + if (ret < 0) + return -ENOSPC; + + last_addr = ret; + addrs[slot] = last_addr; + writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C, + master->regs + DEV_ID_RR0(slot)); + writel(0, master->regs + DEV_ID_RR1(slot)); + writel(0, master->regs + DEV_ID_RR2(slot)); + } + + ret = i3c_master_entdaa_locked(&master->base); + if (ret && ret != I3C_ERROR_M2) + return ret; + + newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; + newdevs &= ~olddevs; + + /* + * Clear all retaining registers filled during DAA. We already + * have the addressed assigned to them in the addrs array. + */ + for_each_set_bit(slot, &newdevs, master->maxdevs + 1) + i3c_master_add_i3c_dev_locked(m, addrs[slot]); + + /* + * Clear slots that ended up not being used. Can be caused by I3C + * device creation failure or when the I3C device was already known + * by the system but with a different address (in this case the device + * already has a slot and does not need a new one). + */ + writel(readl(master->regs + DEVS_CTRL) | + master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT, + master->regs + DEVS_CTRL); + i3c_master_defslvs_locked(&master->base); + + phytium_i3c_master_upd_i3c_scl_lim(master); + + /* Unmask Hot-Join and Mastership request interrupts. */ + i3c_master_enec_locked(m, I3C_BROADCAST_ADDR, + I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR); + + return 0; +} + +static u8 phytium_i3c_master_calculate_thd_delay(struct phytium_i3c_master *master) +{ + unsigned long sysclk_rate = clk_get_rate(master->sysclk); + u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns, + (NSEC_PER_SEC / sysclk_rate)); + + /* Every value greater than 3 is not valid. */ + if (thd_delay > THD_DELAY_MAX) + thd_delay = THD_DELAY_MAX; + + /* CTLR_THD_DEL value is encoded. */ + return (THD_DELAY_MAX - thd_delay); +} + +static int phytium_i3c_master_bus_init(struct i3c_master_controller *m) +{ + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + unsigned long pres_step, sysclk_rate, max_i2cfreq; + struct i3c_bus *bus = i3c_master_get_bus(m); + u32 ctrl, prescl0, prescl1, pres, low; + struct i3c_device_info info = { }; + int ret, ncycles; + + switch (bus->mode) { + case I3C_BUS_MODE_PURE: + ctrl = CTRL_PURE_BUS_MODE; + break; + + case I3C_BUS_MODE_MIXED_FAST: + ctrl = CTRL_MIXED_FAST_BUS_MODE; + break; + + case I3C_BUS_MODE_MIXED_SLOW: + ctrl = CTRL_MIXED_SLOW_BUS_MODE; + break; + + default: + return -EINVAL; + } + + if (has_acpi_companion(master->dev)) { + writel(master->prescl0, master->regs + PRESCL_CTRL0); + writel(master->prescl1, master->regs + PRESCL_CTRL1); + } else { + sysclk_rate = clk_get_rate(master->sysclk); + + if (!sysclk_rate) + return -EINVAL; + + pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1; + if (pres > PRESCL_CTRL0_MAX) + return -ERANGE; + + bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4); + + prescl0 = PRESCL_CTRL0_I3C(pres); + + low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2; + prescl1 = PRESCL_CTRL1_OD_LOW(low); + + max_i2cfreq = bus->scl_rate.i2c; + + pres = (sysclk_rate / (max_i2cfreq * 5)) - 1; + if (pres > PRESCL_CTRL0_MAX) + return -ERANGE; + + bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5); + + prescl0 |= PRESCL_CTRL0_I2C(pres); + writel(prescl0, master->regs + PRESCL_CTRL0); + master->prescl0 = prescl0; + + /* Calculate OD and PP low. */ + pres_step = 1000000000 / (bus->scl_rate.i3c * 4); + ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2; + if (ncycles < 0) + ncycles = 0; + prescl1 = PRESCL_CTRL1_OD_LOW(ncycles); + writel(prescl1, master->regs + PRESCL_CTRL1); + master->prescl1 = prescl1; + } + /* Get an address for the master. */ + ret = i3c_master_get_free_addr(m, 0); + if (ret < 0) + return ret; + + writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C, + master->regs + DEV_ID_RR0(0)); + + phytium_i3c_master_dev_rr_to_info(master, 0, &info); + if (info.bcr & I3C_BCR_HDR_CAP) + info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR); + + ret = i3c_master_set_info(&master->base, &info); + if (ret) + return ret; + + /* + * Enable Hot-Join, and, when a Hot-Join request happens, disable all + * events coming from this device. + * + * We will issue ENTDAA afterwards from the threaded IRQ handler. + */ + + ctrl |= CTRL_HJ_DISEC | CTRL_MCS_EN; + /* + * Configure data hold delay based on device-specific data. + * + * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on + * master output. This setting allows to meet this timing on master's + * SoC outputs, regardless of PCB balancing. + */ + if (has_acpi_companion(master->dev)) + ctrl |= CTRL_THD_DELAY(master->ctrl_thd_del); + else + ctrl |= CTRL_THD_DELAY(phytium_i3c_master_calculate_thd_delay(master)); + + writel(ctrl, master->regs + CTRL); + + phytium_i3c_master_enable(master); + + return 0; +} + +static void phytium_i3c_master_handle_ibi(struct phytium_i3c_master *master, + u32 ibir) +{ + struct phytium_i3c_i2c_dev_data *data; + bool data_consumed = false; + struct i3c_ibi_slot *slot; + u32 id = IBIR_SLVID(ibir); + struct i3c_dev_desc *dev; + size_t nbytes; + u8 *buf; + + /* + * FIXME: maybe we should report the FIFO OVF errors to the upper + * layer. + */ + if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR)) + goto out; + + dev = master->ibi.slots[id]; + spin_lock(&master->ibi.lock); + + data = i3c_dev_get_master_data(dev); + slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); + if (!slot) + goto out_unlock; + + buf = slot->data; + + nbytes = IBIR_XFER_BYTES(ibir); + readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4); + if (nbytes % 3) { + u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO); + + memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3); + } + + slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir), + dev->ibi->max_payload_len); + i3c_master_queue_ibi(dev, slot); + data_consumed = true; + +out_unlock: + spin_unlock(&master->ibi.lock); + +out: + /* Consume data from the FIFO if it's not been done already. */ + if (!data_consumed) { + int i; + + for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4) + readl(master->regs + IBI_DATA_FIFO); + } +} + +static void phytium_i3c_master_demux_ibis(struct phytium_i3c_master *master) +{ + u32 status0; + + writel(MST_INT_IBIR_THR, master->regs + MST_ICR); + + for (status0 = readl(master->regs + MST_STATUS0); + !(status0 & MST_STATUS0_IBIR_EMP); + status0 = readl(master->regs + MST_STATUS0)) { + u32 ibir = readl(master->regs + IBIR); + + switch (IBIR_TYPE(ibir)) { + case IBIR_TYPE_IBI: + phytium_i3c_master_handle_ibi(master, ibir); + break; + + case IBIR_TYPE_HJ: + WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR)); + queue_work(master->base.wq, &master->hj_work); + break; + + case IBIR_TYPE_MR: + WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR)); + break; + default: + break; + } + } +} + +static irqreturn_t phytium_i3c_master_interrupt(int irq, void *data) +{ + struct phytium_i3c_master *master = data; + u32 status; + + status = readl(master->regs + MST_ISR); + if (!(status & readl(master->regs + MST_IMR))) + return IRQ_NONE; + + spin_lock(&master->xferqueue.lock); + phytium_i3c_master_end_xfer_locked(master, status); + spin_unlock(&master->xferqueue.lock); + + if (status & MST_INT_IBIR_THR) + phytium_i3c_master_demux_ibis(master); + + return IRQ_HANDLED; +} + +static int phytium_i3c_master_disable_ibi(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + unsigned long flags; + u32 sirmap; + int ret; + + ret = i3c_master_disec_locked(m, dev->info.dyn_addr, + I3C_CCC_EVENT_SIR); + if (ret) + return ret; + + spin_lock_irqsave(&master->ibi.lock, flags); + sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); + sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); + sirmap |= SIR_MAP_DEV_CONF(data->ibi, + SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); + writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); + spin_unlock_irqrestore(&master->ibi.lock, flags); + + return ret; +} + +static int phytium_i3c_master_enable_ibi(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + unsigned long flags; + u32 sircfg, sirmap; + int ret; + + spin_lock_irqsave(&master->ibi.lock, flags); + sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); + sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); + sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) | + SIR_MAP_DEV_DA(dev->info.dyn_addr) | + SIR_MAP_DEV_PL(dev->info.max_ibi_len) | + SIR_MAP_DEV_ACK; + + if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) + sircfg |= SIR_MAP_DEV_SLOW; + + sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg); + writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); + spin_unlock_irqrestore(&master->ibi.lock, flags); + + ret = i3c_master_enec_locked(m, dev->info.dyn_addr, + I3C_CCC_EVENT_SIR); + if (ret) { + spin_lock_irqsave(&master->ibi.lock, flags); + sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi)); + sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi); + sirmap |= SIR_MAP_DEV_CONF(data->ibi, + SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); + writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi)); + spin_unlock_irqrestore(&master->ibi.lock, flags); + } + + return ret; +} + +static int phytium_i3c_master_request_ibi(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + unsigned long flags; + unsigned int i; + + data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req); + if (IS_ERR(data->ibi_pool)) + return PTR_ERR(data->ibi_pool); + + spin_lock_irqsave(&master->ibi.lock, flags); + for (i = 0; i < master->ibi.num_slots; i++) { + if (!master->ibi.slots[i]) { + data->ibi = i; + master->ibi.slots[i] = dev; + break; + } + } + spin_unlock_irqrestore(&master->ibi.lock, flags); + + if (i < master->ibi.num_slots) + return 0; + + i3c_generic_ibi_free_pool(data->ibi_pool); + data->ibi_pool = NULL; + + return -ENOSPC; +} + +static void phytium_i3c_master_free_ibi(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *m = i3c_dev_get_master(dev); + struct phytium_i3c_master *master = to_phytium_i3c_master(m); + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + unsigned long flags; + + spin_lock_irqsave(&master->ibi.lock, flags); + master->ibi.slots[data->ibi] = NULL; + data->ibi = -1; + spin_unlock_irqrestore(&master->ibi.lock, flags); + + i3c_generic_ibi_free_pool(data->ibi_pool); +} + +static void phytium_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, + struct i3c_ibi_slot *slot) +{ + struct phytium_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); + + i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); +} + +static const struct i3c_master_controller_ops phytium_i3c_master_ops = { + .bus_init = phytium_i3c_master_bus_init, + .bus_cleanup = phytium_i3c_master_bus_cleanup, + .do_daa = phytium_i3c_master_do_daa, + .attach_i3c_dev = phytium_i3c_master_attach_i3c_dev, + .reattach_i3c_dev = phytium_i3c_master_reattach_i3c_dev, + .detach_i3c_dev = phytium_i3c_master_detach_i3c_dev, + .attach_i2c_dev = phytium_i3c_master_attach_i2c_dev, + .detach_i2c_dev = phytium_i3c_master_detach_i2c_dev, + .supports_ccc_cmd = phytium_i3c_master_supports_ccc_cmd, + .send_ccc_cmd = phytium_i3c_master_send_ccc_cmd, + .priv_xfers = phytium_i3c_master_priv_xfers, + .i2c_xfers = phytium_i3c_master_i2c_xfers, + .enable_ibi = phytium_i3c_master_enable_ibi, + .disable_ibi = phytium_i3c_master_disable_ibi, + .request_ibi = phytium_i3c_master_request_ibi, + .free_ibi = phytium_i3c_master_free_ibi, + .recycle_ibi_slot = phytium_i3c_master_recycle_ibi_slot, +}; + +static void phytium_i3c_master_hj(struct work_struct *work) +{ + struct phytium_i3c_master *master = container_of(work, + struct phytium_i3c_master, + hj_work); + + i3c_master_do_daa(&master->base); +} + +static struct phytium_i3c_data phytium_i3c_devdata = { + .thd_delay_ns = 10, +}; + +static const struct of_device_id phytium_i3c_master_of_ids[] = { + { .compatible = "phytium,cdns-i3c-master", .data = &phytium_i3c_devdata }, + { /* sentinel */ }, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_i3c_master_acpi_ids[] = { + { "PHYT0035", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_i3c_master_acpi_ids); +#endif + +static void phytium_i3c_master_controller_init(struct phytium_i3c_master *master) +{ + int i; + + phytium_i3c_master_disable(master); + + writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | + FLUSH_CMD_RESP, + master->regs + FLUSH_CTRL); + + writel(0xffffffff, master->regs + MST_IDR); + writel(0xffffffff, master->regs + SLV_IDR); + + writel(master->prescl0, master->regs + PRESCL_CTRL0); + writel(master->prescl1, master->regs + PRESCL_CTRL1); + + writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL); + writel(MST_INT_IBIR_THR, master->regs + MST_IER); + writel(master->dev_valid, master->regs + DEVS_CTRL); + + for (i = 0; i < PHYTIUM_I3C_DEV_MAX_NUM; i++) { + writel(master->devinfo[i].dev_rr0, master->regs + DEV_ID_RR0(i)); + writel(master->devinfo[i].dev_rr1, master->regs + DEV_ID_RR1(i)); + writel(master->devinfo[i].dev_rr2, master->regs + DEV_ID_RR2(i)); + } + + for (i = 0; i < PHYTIUM_I3C_CMDR_MAX_TIMES; i++) + readl(master->regs + CMDR); + + writel(master->ctrl_info, master->regs + CTRL); + + phytium_i3c_master_enable(master); +} + +static void phytium_i3c_master_store_dev(struct phytium_i3c_master *master) +{ + int i; + + master->ctrl_info = readl(master->regs + CTRL); + master->dev_valid = readl(master->regs + DEVS_CTRL); + + for (i = 0; i < PHYTIUM_I3C_DEV_MAX_NUM; i++) { + master->devinfo[i].dev_rr0 = readl(master->regs + DEV_ID_RR0(i)); + master->devinfo[i].dev_rr1 = readl(master->regs + DEV_ID_RR1(i)); + master->devinfo[i].dev_rr2 = readl(master->regs + DEV_ID_RR2(i)); + } +} + +static int phytium_i3c_master_probe(struct platform_device *pdev) +{ + struct phytium_i3c_master *master; + struct resource *res; + int ret = -EINVAL; + int irq; + u32 val; + + master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->dev = &pdev->dev; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + master->regs = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(master->regs)) + return PTR_ERR(master->regs); + + if (pdev->dev.of_node) { + master->devdata = of_device_get_match_data(&pdev->dev); + + if (!master->devdata) + return -EINVAL; + + + master->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(master->pclk)) + return PTR_ERR(master->pclk); + + master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); + if (IS_ERR(master->sysclk)) + return PTR_ERR(master->sysclk); + + ret = clk_prepare_enable(master->pclk); + if (ret) + return ret; + + ret = clk_prepare_enable(master->sysclk); + if (ret) + goto err_disable_pclk; +#ifdef CONFIG_ACPI + } else if (has_acpi_companion(&pdev->dev)) { + i3c_master_acpi_clk_params(ACPI_HANDLE(&pdev->dev), "SCLK", &master->prescl0, + &master->prescl1, &master->ctrl_thd_del); + + if (!master->prescl0) + goto err_disable_sysclk; + + master->base.bus.scl_rate.i2c = I3C_CONTROL_DEFAULT_I2C_SCL; + master->base.bus.scl_rate.i3c = I3C_BUS_I3C_SCL_MAX_RATE; + ACPI_COMPANION_SET(master->dev, ACPI_COMPANION(&pdev->dev)); +#endif + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) { + ret = -EINVAL; + goto err_disable_sysclk; + } + + spin_lock_init(&master->xferqueue.lock); + INIT_LIST_HEAD(&master->xferqueue.list); + + INIT_WORK(&master->hj_work, phytium_i3c_master_hj); + phytium_i3c_master_disable(master); + + writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | + FLUSH_CMD_RESP, + master->regs + FLUSH_CTRL); + writel(0xffffffff, master->regs + MST_IDR); + writel(0xffffffff, master->regs + SLV_IDR); + ret = devm_request_irq(&pdev->dev, irq, phytium_i3c_master_interrupt, 0, + dev_name(&pdev->dev), master); + if (ret) + goto err_disable_sysclk; + + platform_set_drvdata(pdev, master); + + val = readl(master->regs + CONF_STATUS0); + + /* Device ID0 is reserved to describe this master. */ + master->maxdevs = CONF_STATUS0_DEVS_NUM(val); + master->free_rr_slots = GENMASK(master->maxdevs, 1); + + val = readl(master->regs + CONF_STATUS1); + master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val); + master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val); + master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val); + master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val); + master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val); + + spin_lock_init(&master->ibi.lock); + master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val); + master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, + sizeof(*master->ibi.slots), + GFP_KERNEL); + if (!master->ibi.slots) { + ret = -ENOMEM; + goto err_disable_sysclk; + } + + writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL); + writel(MST_INT_IBIR_THR, master->regs + MST_IER); + writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL); + + ret = i3c_master_register(&master->base, &pdev->dev, + &phytium_i3c_master_ops, false); + if (ret) + goto err_disable_sysclk; + writel(readl(master->regs + CTRL) | CTRL_HJ_ACK, master->regs + CTRL); + phytium_i3c_master_store_dev(master); + + return 0; + +err_disable_sysclk: + clk_disable_unprepare(master->sysclk); + +err_disable_pclk: + clk_disable_unprepare(master->pclk); + + return ret; +} + +static void phytium_i3c_master_remove(struct platform_device *pdev) +{ + struct phytium_i3c_master *master = platform_get_drvdata(pdev); + + i3c_master_unregister(&master->base); + + clk_disable_unprepare(master->sysclk); + clk_disable_unprepare(master->pclk); +} + +static int __maybe_unused phytium_i3c_plat_suspend(struct device *dev) +{ + struct phytium_i3c_master *master = dev_get_drvdata(dev); + + + phytium_i3c_master_disable(master); + + clk_disable_unprepare(master->sysclk); + clk_disable_unprepare(master->pclk); + + return 0; +} + +static int __maybe_unused phytium_i3c_plat_resume(struct device *dev) +{ + struct phytium_i3c_master *master = dev_get_drvdata(dev); + + phytium_i3c_master_controller_init(master); + + clk_prepare_enable(master->sysclk); + clk_prepare_enable(master->pclk); + + return 0; +} + +static const struct dev_pm_ops phytium_i3c_dev_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(phytium_i3c_plat_suspend, + phytium_i3c_plat_resume) +}; + +static struct platform_driver phytium_i3c_master = { + .probe = phytium_i3c_master_probe, + .remove = phytium_i3c_master_remove, + .driver = { + .name = "phytium-i3c-master", + .of_match_table = phytium_i3c_master_of_ids, + .acpi_match_table = ACPI_PTR(phytium_i3c_master_acpi_ids), + .pm = &phytium_i3c_dev_pm_ops, + }, +}; +module_platform_driver(phytium_i3c_master); + +MODULE_AUTHOR("Feng Jun "); +MODULE_DESCRIPTION("Phytium I3C master driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:phytium-i3c-master"); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 9780f40ba3e65..257369dc96601 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3947,6 +3947,13 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; doorbell &= MSI_CFG0_ADDR_MASK; +#ifdef CONFIG_PM_SLEEP + /* Saves the msg (base addr of msi irq) and restores it during resume */ + desc->msg.address_lo = msg->address_lo; + desc->msg.address_hi = msg->address_hi; + desc->msg.data = msg->data; +#endif + writeq_relaxed(doorbell, smmu->base + cfg[0]); writel_relaxed(msg->data, smmu->base + cfg[1]); writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); @@ -3989,11 +3996,53 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) devm_add_action_or_reset(dev, arm_smmu_free_msis, dev); } -static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) +#ifdef CONFIG_PM_SLEEP +static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) +{ + struct msi_desc *desc; + struct device *dev = smmu->dev; + + if (!dev->msi.domain) + return; + + msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) { + switch (desc->msi_index) { + case EVTQ_MSI_INDEX: + case GERROR_MSI_INDEX: + case PRIQ_MSI_INDEX: { + phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index]; + struct msi_msg *msg = &desc->msg; + phys_addr_t doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; + + doorbell &= MSI_CFG0_ADDR_MASK; + writeq_relaxed(doorbell, smmu->base + cfg[0]); + writel_relaxed(msg->data, smmu->base + cfg[1]); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + cfg[2]); + break; + } + default: + break; + } + } +} +#else +static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) +{ +} +#endif + +static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu, bool resume) { int irq, ret; - arm_smmu_setup_msis(smmu); + if (!resume) + arm_smmu_setup_msis(smmu); + else { + /* The irq doesn't need to be re-requested during resume */ + arm_smmu_resume_msis(smmu); + return; + } /* Request interrupt lines */ irq = smmu->evtq.q.irq; @@ -4035,7 +4084,7 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) } } -static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) { int ret, irq; u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; @@ -4062,7 +4111,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (ret < 0) dev_warn(smmu->dev, "failed to enable combined irq\n"); } else - arm_smmu_setup_unique_irqs(smmu); + arm_smmu_setup_unique_irqs(smmu, resume); if (smmu->features & ARM_SMMU_FEAT_PRI) irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; @@ -4111,7 +4160,7 @@ static void arm_smmu_write_strtab(struct arm_smmu_device *smmu) writel_relaxed(reg, smmu->base + ARM_SMMU_STRTAB_BASE_CFG); } -static int arm_smmu_device_reset(struct arm_smmu_device *smmu) +static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) { int ret; u32 reg, enables; @@ -4215,7 +4264,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) } } - ret = arm_smmu_setup_irqs(smmu); + ret = arm_smmu_setup_irqs(smmu, resume); if (ret) { dev_err(smmu->dev, "failed to setup irqs\n"); return ret; @@ -4732,6 +4781,24 @@ static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu) return ERR_PTR(ret); } +#ifdef CONFIG_PM_SLEEP +static int arm_smmu_suspend(struct device *dev) +{ + /* + * The smmu is powered off and related registers are automatically + * cleared when suspend. No need to do anything. + */ + return 0; +} +static int arm_smmu_resume(struct device *dev) +{ + struct arm_smmu_device *smmu = dev_get_drvdata(dev); + + arm_smmu_device_reset(smmu, true); + return 0; +} +#endif + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -4819,7 +4886,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) arm_smmu_rmr_install_bypass_ste(smmu); /* Reset the device */ - ret = arm_smmu_device_reset(smmu); + ret = arm_smmu_device_reset(smmu, false); if (ret) goto err_disable; @@ -4876,10 +4943,21 @@ static void arm_smmu_driver_unregister(struct platform_driver *drv) platform_driver_unregister(drv); } +#ifdef CONFIG_PM_SLEEP +static const struct dev_pm_ops arm_smmu_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(arm_smmu_suspend, + arm_smmu_resume) +}; +#define ARM_SMMU_PM_OPS (&arm_smmu_pm_ops) +#else +#define ARM_SMMU_PM_OPS NULL +#endif + static struct platform_driver arm_smmu_driver = { .driver = { .name = "arm-smmu-v3", .of_match_table = arm_smmu_of_match, + .pm = ARM_SMMU_PM_OPS, .suppress_bind_attrs = true, }, .probe = arm_smmu_device_probe, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f1fb27681b0bd..ef532555317e7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -549,6 +549,37 @@ static int iova_reserve_pci_windows(struct pci_dev *dev, return 0; } +int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + + unsigned long lo, hi; + + lo = iova_pfn(iovad, start); + hi = iova_pfn(iovad, end); + + if (!cookie) + return -EINVAL; + + reserve_iova(iovad, lo, hi); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_reserve_domain_addr); + +static int iova_reserve_pci_regions(struct device *dev, struct iommu_domain *domain) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + int ret = 0; + + if (dev_is_pci(dev)) + ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); + + return ret; +} + static int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain) { @@ -558,12 +589,6 @@ static int iova_reserve_iommu_regions(struct device *dev, LIST_HEAD(resv_regions); int ret = 0; - if (dev_is_pci(dev)) { - ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); - if (ret) - return ret; - } - iommu_get_resv_regions(dev, &resv_regions); list_for_each_entry(region, &resv_regions, list) { unsigned long lo, hi; @@ -694,7 +719,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev return -EFAULT; } - return 0; + ret = 0; + goto iova_reserve_iommu; } init_iova_domain(iovad, 1UL << order, base_pfn); @@ -709,7 +735,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain))) domain->type = IOMMU_DOMAIN_DMA; - return iova_reserve_iommu_regions(dev, domain); + ret = iova_reserve_pci_regions(dev, domain); + if (ret) + goto done; + +iova_reserve_iommu: + ret = iova_reserve_iommu_regions(dev, domain); + +done: + return ret; } /** diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index ec975c73cfe6c..246192e573e5c 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -768,6 +768,57 @@ static void __init dmar_acpi_insert_dev_scope(u8 device_number, device_number, dev_name(&adev->dev)); } +/* Return: > 0 if match found, 0 if no match found */ +bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number, struct acpi_device *adev, void *start, + void *end, struct dmar_dev_scope *devices, int devices_cnt) +{ + struct acpi_dmar_device_scope *scope; + struct device *tmp; + int i; + struct acpi_dmar_pci_path *path; + + for (; start < end; start += scope->length) { + scope = start; + if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) + continue; + if (scope->enumeration_id != device_number) + continue; + path = (void *)(scope + 1); + pr_info("ACPI device \"%s\" under DMAR as %02x:%02x.%d\n", dev_name(&adev->dev), + scope->bus, path->device, path->function); + for_each_dev_scope(devices, devices_cnt, i, tmp) + if (tmp == NULL) { + devices[i].bus = scope->bus; + devices[i].devfn = PCI_DEVFN(path->device, path->function); + rcu_assign_pointer(devices[i].dev, get_device(&adev->dev)); + return true; + } + WARN_ON(i >= devices_cnt); + } + + return false; +} + +static int dmar_acpi_bus_add_dev(u8 device_number, struct acpi_device *adev) +{ + struct dmar_drhd_unit *dmaru; + struct acpi_dmar_hardware_unit *drhd; + int ret; + + for_each_drhd_unit(dmaru) { + drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); + ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(drhd+1), + ((void *)drhd)+drhd->header.length, + dmaru->devices, dmaru->devices_cnt); + if (ret) + break; + } + if (ret > 0) + ret = dmar_rmrr_add_acpi_dev(device_number, adev); + + return ret; +} + static int __init dmar_acpi_dev_scope_init(void) { struct acpi_dmar_andd *andd; @@ -795,7 +846,11 @@ static int __init dmar_acpi_dev_scope_init(void) andd->device_name); continue; } - dmar_acpi_insert_dev_scope(andd->device_number, adev); + + if (apply_zhaoxin_dmar_acpi_a_behavior()) + dmar_acpi_bus_add_dev(andd->device_number, adev); + else + dmar_acpi_insert_dev_scope(andd->device_number, adev); } } return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e236c7ec221f4..1441974c3f304 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2755,6 +2755,24 @@ static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) return supported; } +int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + int ret; + struct dmar_rmrr_unit *rmrru; + struct acpi_dmar_reserved_memory *rmrr; + + list_for_each_entry(rmrru, &dmar_rmrr_units, list) { + rmrr = container_of(rmrru->hdr, struct acpi_dmar_reserved_memory, header); + ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(rmrr + 1), + ((void *)rmrr) + rmrr->header.length, + rmrru->devices, rmrru->devices_cnt); + if (ret) + break; + } + + return 0; +} + int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { int ret; @@ -2975,6 +2993,43 @@ static int __init platform_optin_force_iommu(void) return 1; } +static inline int acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, + struct device *dev) +{ + int ret; + + pr_info("rmrr andd dev:%s enter to %s\n", dev_name(dev), __func__); + ret = __acpi_rmrr_device_create_direct_mappings(domain, dev); + + return ret; +} + +static inline int acpi_rmrr_andd_probe(struct device *dev) +{ + struct intel_iommu *iommu = NULL; + struct pci_dev *pci_device = NULL; + u8 bus, devfn; + int ret = 0; + + ret = iommu_probe_device(dev); + + iommu = device_lookup_iommu(dev, &bus, &devfn); + if (!iommu) { + pr_info("dpoint-- cannot get acpi device corresponding iommu\n"); + return -EINVAL; + } + + pci_device = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn); + if (!pci_device) { + pr_info("dpoint-- cannot get acpi devie corresponding pci_device\n"); + return -EINVAL; + } + ret = acpi_rmrr_device_create_direct_mappings(iommu_get_domain_for_dev(&pci_device->dev), + dev); + + return ret; +} + static int __init probe_acpi_namespace_devices(void) { struct dmar_drhd_unit *drhd; @@ -2998,6 +3053,10 @@ static int __init probe_acpi_namespace_devices(void) list_for_each_entry(pn, &adev->physical_node_list, node) { ret = iommu_probe_device(pn->dev); + + if (apply_zhaoxin_dmar_acpi_a_behavior()) + ret = acpi_rmrr_andd_probe(dev); + if (ret) break; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 59244c744eabd..f3a8c0e33cd46 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1208,7 +1208,8 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain, map_size = 0; } } - + if (apply_zhaoxin_dmar_acpi_a_behavior()) + iova_reserve_domain_addr(domain, start, end); } out: iommu_put_resv_regions(dev, &mappings); @@ -1272,6 +1273,12 @@ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, return ERR_PTR(ret); } +int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev) +{ + return iommu_create_device_direct_mappings(domain, dev); +} +EXPORT_SYMBOL_GPL(__acpi_rmrr_device_create_direct_mappings); + /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index cecfb9b5f294c..f220636c05c19 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -66,6 +66,12 @@ config ARM_GIC_V5 select IRQ_MSI_LIB select ARM_GIC_ITS_PARENT +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 756eb5dd87ccc..ac0c7d97771da 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o \ irq-gic-v5-iwb.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 0000000000000..d6529a63d09d9 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,5694 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) + +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) + +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + void __iomem *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + int vlpi_redist_offset; +}; + +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (require_its_list_vmovp(vm, its)) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + return d->hwirq - its_dev->event_map.lpi_base; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + int cpu; + + if (map) { + cpu = vpe_to_cpuid_lock(map->vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe_to_cpuid_unlock(map->vpe, flags); +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + if (is_kdump_kernel()) + col->col_id = col->col_id % 65; + else + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return desc->its_invall_cmd.col; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr, vconf_addr; + u64 target; + bool alloc; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (!require_its_list_vmovp(vpe->its_vm, its)) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); +} + +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (map) { + va = page_address(map->vm->vprop_page); + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void wait_for_syncr(void __iomem *rdbase) +{ + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + void __iomem *rdbase; + unsigned long flags; + u64 val; + int cpu; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) + return; + + map->db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issuing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; + int cpu, node; + + node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if ((is_kdump_kernel()) && (skt_id == skt)) + return i; + + skt_cpu_cnt[skt]++; + } else if (skt != 0xff) { + pr_err("socket address: %d is out of range.", skt); + } + } + + if (skt_id) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + int prev_cpu; + unsigned int skt_t1, skt_t2, cpu_idx; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + + cpu_idx = its_cpumask_select(its_dev, mask_val, cpu_mask); + skt_t1 = (cpu_logical_map(cpu_idx) >> 16) & 0xff; + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); + + skt_t2 = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_t1 != skt_t2) + cpu = cpu_idx; + + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + its_inc_lpi_count(d, cpu); + + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } + + return 0; +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_ATOMIC); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map; + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + map = get_vlpi_map(d); + + if (!its_dev->event_map.vm || !map) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = *map; + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!is_v4(its_dev->its)) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kmalloc(sizeof(*range), GFP_KERNEL); + if (range) { + range->base_id = base; + range->span = span; + } + + return range; +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; + } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); + + mutex_unlock(&lpi_range_lock); + return 0; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + bitmap_free(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_range(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GIC-S2500: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GICv-S2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char *its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 order, bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages, psz; + struct page *page; + void *base; + + psz = baser->psz; + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + base = (void *)page_address(page); + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + u32 psz = baser->psz; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order >= MAX_PAGE_ORDER) { + new_order = MAX_PAGE_ORDER - 1; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", + &its->phys_base, its_base_type_string[type], + device_ids(its), ids); + } + + *order = new_order; + + return indirect; +} + +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + bool indirect = false; + u32 order; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, &order, + device_ids(its)); + break; + + case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + if ((sibling = find_sibling_its(its))) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + + indirect = its_parse_indirect_baser(its, baser, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + /* Skip non-present CPUs */ + if (!base) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-S2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + do { + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(!clean)) + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + + return val; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u64 val; + + /* Make sure we wait until the RD is done with the initial scan */ + val = read_vpend_dirty_clear(vlpi_base); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + val = read_vpend_dirty_clear(vlpi_base); + if (unlikely(val & GICR_VPENDBASER_Dirty)) + val |= GICR_VPENDBASER_PendingLast; + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * scheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + u64 target; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + if (is_kdump_kernel()) + its->collections[cpu].col_id = cpu % 65; + else + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < device_ids(its)); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + int cpu; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!is_v4(its)) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + bitmap_free(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + raw_spin_lock_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->event_map.col_map); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entirely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) + its_dev->shared = true; + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if ((is_kdump_kernel()) && (skt_id == skt)) + return i; + + skt_cpu_cnt[skt]++; + } else if (skt != 0xff) { + pr_err("socket address: %d is out of range.", skt); + } + } + + if (skt_id) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + cpu = its_cpumask_first(its_dev, cpu_mask); + + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + + its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditioned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int from, cpu = cpumask_first(mask_val); + unsigned long flags; + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. + */ + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) + goto out; + + vpe->col_idx = cpu; + + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 1, 500)); +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base, 0, 0); + + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + /* Target the redistributor this VPE is currently known on */ + raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); + } else { + its_vpe_send_cmd(vpe, its_send_inv); + } +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its = NULL; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + unsigned long flags; + + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. + */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + unsigned long flags; + u64 val; + int cpu; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targeting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + irq_data_update_effective_affinity(d, mask_val); + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurrent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_free(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + raw_spin_lock_init(&vpe->vpe_lock); + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct irq_chip *irqchip = &its_vpe_irq_chip; + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + irqchip, vm->vpes[i]); + set_bit(i, bitmap); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. + */ + if (!gic_requires_eager_mapping()) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, false); + } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. + */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) +{ + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + inner_domain->parent = its_parent; + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + inner_domain->flags |= its->msi_domain_flags; + info->ops = &its_msi_domain_ops; + info->data = its; + inner_domain->host_data = info; + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) + return -ENOMEM; + + /* Use the last possible DevID */ + devid = GENMASK(device_ids(its) - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct resource *res, + void __iomem *its_base) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &res->start); + return -EINVAL; + } + + ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its_base + GITS_CTLR); + ctlr = readl_relaxed(its_base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &res->start, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + struct its_node *its; + void __iomem *its_base; + u64 baser, tmp, typer; + struct page *page; + u32 ctlr; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) { + err = -ENOMEM; + goto out_unmap; + } + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + typer = gic_read_typer(its_base + GITS_TYPER); + its->typer = typer; + its->base = its_base; + its->phys_base = res->start; + if (is_v4(its)) { + if (!(typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(res, its_base); + if (err < 0) + goto out_free_its; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &res->start, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + + its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out_free_its; + } + + its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &res->start, its->mpidr, svpet); + } + } + + its->numa_node = numa_node; + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { + err = -ENOMEM; + goto out_unmap_sgir; + } + its->cmd_base = (void *)page_address(page); + its->cmd_write = its->cmd_base; + its->fwnode_handle = handle; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; + + its_enable_quirks(its); + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (is_v4(its)) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + err = its_init_domain(handle, its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); +out_free_its: + kfree(its); +out_unmap: + iounmap(its_base); + pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +static int its_cpu_memreserve_lpi(unsigned int cpu) +{ + struct page *pend_page; + int ret = 0; + + /* This gets to run exactly once per CPU */ + if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE) + return 0; + + pend_page = gic_data_rdist()->pend_page; + if (WARN_ON(!pend_page)) { + ret = -ENOMEM; + goto out; + } + /* + * If the pending table was pre-programmed, free the memory we + * preemptively allocated. Otherwise, reserve that memory for + * later kexecs. + */ + if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) { + its_free_pending_table(pend_page); + gic_data_rdist()->pend_page = NULL; + } else { + phys_addr_t paddr = page_to_phys(pend_page); + + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + } + +out: + /* Last CPU being brought up gets to issue the cleanup */ + if (!IS_ENABLED(CONFIG_SMP) || + cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask)) + schedule_work(&rdist_memreserve_cpuhp_cleanup_work); + + gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE; + return ret; +} + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-s2500-its", }, + {}, +}; + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + int err; + + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + /* + * Note that in theory a new proximity node could be created by this + * entry as it is an SRAT resource allocation structure. + * We do not currently support doing so. + */ + node = pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) + return; + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode(&res.start); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-phytium-S2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-Phytium-S2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + err = its_probe_one(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!err) + return 0; + + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_lpi_memreserve_init(void) +{ + int state; + + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + + if (list_empty(&its_nodes)) + return 0; + + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; + state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/arm/gic-2500/memreserve:online", + its_cpu_memreserve_lpi, + NULL); + if (state < 0) + return state; + + gic_rdists->cpuhp_memreserve_state = state; + + return 0; +} +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + bool has_v4_1 = false; + int err; + + gic_rdists = rdists; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) { + has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; + + if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 0000000000000..4513be823546c --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2553 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd + */ + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" +#include + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +static u8 gic_dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ; +static u8 gic_dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, + * these values are unchanged by the GIC. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); + + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t *ppi_nmi_refs; + +static struct gic_kvm_info gic_v3_kvm_info __initdata; +static DEFINE_PER_CPU(bool, has_rss); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline bool gic_irq_in_rdist(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + return true; + default: + return false; + } +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ + return gic_data.dist_base; + + default: + return NULL; + } +} + +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & bit) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); +} + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + } + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + /* Either Aff0 or Aff1 is not zero */ + if (mpidr & 0xffff) + return; + + /* Skip 64 Redistributors */ + rbase = rbase + 64 * SZ_128K; + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + u32 index, mask, skt; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base, *rbase; + unsigned long mpidr; + int i; + u32 index, mask, skt; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (index / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K, GICR_CTLR_RWP); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_do_wait_for_rwp(base, GICD_CTLR_RWP); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); + if (gic_irq_in_rdist(d)) + gic_redist_wait_for_rwp(); + else + gic_dist_wait_for_rwp(); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + if (val) { + gic_mask_irq(d); + return 0; + } + reg = GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) +{ + switch (__get_intid_range(hwirq)) { + case PPI_RANGE: + return hwirq - 16; + case EPPI_RANGE: + return hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + return __gic_get_ppi_index(d->hwirq); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, gic_dist_prio_nmi); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, gic_dist_prio_irq); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + write_gicreg(gic_irq(d), ICC_EOIR1_EL1); + isb(); +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + gic_write_dir(gic_irq(d)); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + enum gic_intid_range range; + unsigned int irq = gic_irq(d); + void __iomem *base, *rbase; + u32 offset, index, skt; + int ret, i; + unsigned long mpidr; + + range = get_intid_range(d); + /* Interrupt configuration for SGIs can't be changed */ + if (range == SGI_RANGE) + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; + + /* SPIs have restrictions on the supported types */ + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(index, type, base + offset); + gic_redist_wait_for_rwp(); + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(index, type, rbase + offset); + gic_do_wait_for_rwp(rbase - SZ_64K, GICR_CTLR_RWP); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(index, type, base + offset); + gic_do_wait_for_rwp(base, GICD_CTLR_RWP); + } + + + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (get_intid_range(d) == SGI_RANGE) + return -EINVAL; + + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_mpidr_to_affinity(unsigned long mpidr) +{ + u64 aff; + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + } +} + +/* + * Follow a read of the IAR with any HW maintenance that needs to happen prior + * to invoking the relevant IRQ handler. We must do two things: + * + * (1) Ensure instruction ordering between a read of IAR and subsequent + * instructions in the IRQ handler using an ISB. + * + * It is possible for the IAR to report an IRQ which was signalled *after* + * the CPU took an IRQ exception as multiple interrupts can race to be + * recognized by the GIC, earlier interrupts could be withdrawn, and/or + * later interrupts could be prioritized by the GIC. + * + * For devices which are tightly coupled to the CPU, such as PMUs, a + * context synchronization event is necessary to ensure that system + * register state is not stale, as these may have been indirectly written + * *after* exception entry. + * + * (2) Deactivate the interrupt when EOI mode 1 is in use. + */ +static inline void gic_complete_ack(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) + write_gicreg(irqnr, ICC_EOIR1_EL1); + + isb(); +} + +static bool gic_rpr_is_nmi_prio(void) +{ + if (!gic_supports_nmi()) + return false; + + return unlikely(gic_read_rpr() == GICV3_PRIO_NMI); +} + +static bool gic_irqnr_is_special(u32 irqnr) +{ + return irqnr >= 1020 && irqnr <= 1023; +} + +static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_irq(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +/* + * An exception has been taken from a context with IRQs enabled, and this could + * be an IRQ or an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear + * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, + * after handling any NMI but before handling any IRQ. + * + * The entry code has performed IRQ entry, and if an NMI is detected we must + * perform NMI entry/exit around invoking the handler. + */ +static void __gic_handle_irq_from_irqson(struct pt_regs *regs) +{ + bool is_nmi; + u32 irqnr; + + irqnr = gic_read_iar(); + + is_nmi = gic_rpr_is_nmi_prio(); + + if (is_nmi) { + nmi_enter(); + __gic_handle_nmi(irqnr, regs); + nmi_exit(); + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (!is_nmi) + __gic_handle_irq(irqnr, regs); +} + +/* + * An exception has been taken from a context with IRQs disabled, which can only + * be an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave + * DAIF.IF (and ICC_PMR_EL1) unchanged. + * + * The entry code has performed NMI entry. + */ +static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) +{ + u64 pmr; + u32 irqnr; + + /* + * We were in a context with IRQs disabled. However, the + * entry code has set PMR to a value that allows any + * interrupt to be acknowledged, and not just NMIs. This can + * lead to surprising effects if the NMI has been retired in + * the meantime, and that there is an IRQ pending. The IRQ + * would then be taken in NMI context, something that nobody + * wants to debug twice. + * + * Until we sort this, drop PMR again to a level that will + * actually only allow NMIs before reading IAR, and then + * restore it to what it was. + */ + pmr = gic_read_pmr(); + gic_pmr_mask_irqs(); + isb(); + irqnr = gic_read_iar(); + gic_write_pmr(pmr); + + __gic_handle_nmi(irqnr, regs); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) + __gic_handle_irq_from_irqsoff(regs); + else + __gic_handle_irq_from_irqson(regs); +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base = gic_data.dist_base; + u32 val, skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base, GICD_CTLR_RWP); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < GIC_LINE_NR; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(REPEAT_BYTE_U32(gic_dist_prio_irq), base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff, and wait for the distributor to drain */ + gic_dist_config(base, GIC_LINE_NR, gic_dist_prio_irq); + gic_do_wait_for_rwp(base, GICD_CTLR_RWP); // do sync outside of gic_dist_config + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + + /* Enable distributor with ARE, Group1 */ + writel_relaxed(val, base + GICD_CTLR); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); + for (i = 32; i < GIC_LINE_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr = cpu_logical_map(smp_processor_id()); + u64 typer; + u32 aff, aff2_skt, rdist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + rdist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != rdist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + + raw_spin_lock_init(&gic_data_rdist()->rd_lock); + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + u32 ctlr = readl_relaxed(ptr + GICR_CTLR); + + /* Boot-time cleanip */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + + /* + * TYPER.RVPEID implies some form of DirectLPI, no matter what the + * doc says... :-/ And CTLR.IR implies another subset of DirectLPI + * that the ITS driver can make use of for LPIs (and not VLPIs). + * + * These are 3 different ways to express the same thing, depending + * on the revision of the architecture and its relaxations over + * time. Just group them under the 'direct_lpi' banner. + */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + !!(ctlr & GICR_CTLR_IR) | + gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); + + return 1; +} + +static void gic_update_rdist_properties(void) +{ + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("GIC-2500 features: %d PPIs%s%s\n", + gic_data.ppi_nr, + gic_data.has_rss ? ", RSS" : "", + gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); + + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = cpu_logical_map(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); + + need_rss |= MPIDR_RS(cpu_logical_map(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)cpu_logical_map(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return kstrtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + int i; + unsigned long mpidr; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_dist_prio_irq); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = rbase + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, GICD_INT_DEF_PRI); + gic_do_wait_for_rwp(rbase - SZ_64K, GICR_CTLR_RWP); + + rbase = rbase + SZ_128K; + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr = cpu_logical_map(cpu); + u16 tlist = 0; + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = cpu_logical_map(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + int cpu; + + if (WARN_ON(d->hwirq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(ishst); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, d->hwirq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void __init gic_smp_init(void) +{ + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gic_phytium_2500:starting", + gic_starting_cpu, NULL); + + /* Register all 8 non-secure SGIs */ + base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8, + NUMA_NO_NODE, &sgi_fwspec, + false, NULL); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if ((is_kdump_kernel()) && (irq_skt == skt)) + return i; + + skt_cpu_cnt[skt]++; + } else if (skt != 0xff) { + pr_err("socket address: %d is out of range.", skt); + } + } + + if (irq_skt) { + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu, skt; + u32 offset, index; + void __iomem *reg; + int enabled; + u64 val; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + offset = convert_offset_index(d, GICD_IROUTER, &index); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + offset + (index * 8); + val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_ipi_send_mask NULL +#define gic_smp_init() do { } while (0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-Phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GIC-Phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + switch (__get_intid_range(hw)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + break; + + case SPI_RANGE: + case ESPI_RANGE: + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + break; + + case LPI_RANGE: + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitioned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, + irq_hw_number_t hwirq) +{ + enum gic_intid_range range; + + if (!gic_data.ppi_descs) + return false; + + if (!is_of_node(fwspec->fwnode)) + return false; + + if (fwspec->param_count < 4 || !fwspec->param[3]) + return false; + + range = __get_intid_range(hwirq); + if (range != PPI_RANGE && range != EPPI_RANGE) + return false; + + return true; +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + unsigned int type, ret, ppi_idx; + irq_hw_number_t hwirq; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); + if (WARN_ON_ONCE(ret)) + return 0; + + if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) + return d == gic_data.domain; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + ppi_idx = __gic_get_ppi_index(hwirq); + return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + unsigned long ppi_intid; + struct device_node *np; + unsigned int ppi_idx; + int ret; + + if (!gic_data.ppi_descs) + return -ENOMEM; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); + if (WARN_ON_ONCE(ret)) + return 0; + + ppi_idx = __gic_get_ppi_index(ppi_intid); + ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled()) + return; + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); + + static_branch_enable(&supports_pseudo_nmis); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static void __init gic_prio_init(void) +{ + bool cpus_have_security_disabled = gic_dist_security_disabled(); + bool cpus_have_group0 = gic_has_group0(); + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * way priorities are presented in ICC_PMR_EL1 and in the distributor: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor + * ------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * In the non-secure view reads and writes are modified: + * + * - A value written is right-shifted by one and the MSB is set, + * forcing the priority into the non-secure range. + * + * - A value read is left-shifted by one. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we program the non-secure values into + * the distributor to match the PMR values we want. + */ + if (cpus_have_group0 && !cpus_have_security_disabled) { + gic_dist_prio_irq = __gicv3_prio_to_ns(gic_dist_prio_irq); // 0x80 + gic_dist_prio_nmi = __gicv3_prio_to_ns(gic_dist_prio_nmi); // 0x00 + } + + pr_info("GIC-2500: DS=%d, FIQ=%d, dist_irq_prio=0x%x, dist_nmi_prio=0x%x\n", + cpus_have_security_disabled, cpus_have_group0, + gic_dist_prio_irq, gic_dist_prio_nmi); +} + +static int __init gic_init_bases(void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_rvpeid = true; + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + + if (typer & GICD_TYPER_MBIS) { + err = mbi_init(handle, gic_data.domain); + if (err) + pr_err("Failed to initialize MBIs\n"); + } + + set_handle_irq(gic_handle_irq); + + gic_update_rdist_properties(); + gic_prio_init(); + + gic_dist_init(); + gic_cpu_init(); + gic_smp_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + phytium_its_lpi_memreserve_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); + } + + gic_enable_nmi_support(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + goto out_put_node; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %pOFn[%d] { ", + child_part, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); + continue; + } + + pr_cont("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); + } + + pr_cont("}\n"); + part_idx++; + } + + for (i = 0; i < gic_data.ppi_nr; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + void __iomem *dist_base; + struct redist_region *rdist_regs; + u64 redist_stride; + u32 nr_redist_regions; + int err, i; + struct resource res; + unsigned long skt; + + dist_base = gic_of_iomap(node, 0, "GICD", &res); + if (IS_ERR(dist_base)) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return PTR_ERR(dist_base); + } + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Error: No GIC Distributor in FDT\n"); + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3-soc-bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, + redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_phyt_2500, "arm,gic-phytium-2500", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + int enabled_rdists; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < max_t(unsigned int, nr_cpu_ids, NR_CPUS); i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) { + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + } + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count = 0; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + gic_request_region(redist->base_address, redist->length, "GICR"); + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + gic_request_region(gicc->gicr_base_address, size, "GICR"); + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; + return 0; + } + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) { + acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_s2500_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + +static int __init +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + size_t size; + int i, err, skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, + acpi_data.nr_redist_regions, 0, gsi_domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_s2500_get_gsi_domain_id); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(gsi_domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_OEM_RESERVED, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 467cb78435a9b..6023a6ff19cfa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -5032,6 +5032,7 @@ static void its_restore_enable(void) { struct its_node *its; int ret; + int cpu; raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) { @@ -5085,6 +5086,23 @@ static void its_restore_enable(void) GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) its_cpu_init_collection(its); } + + /* + * Enable LPIs:firmware just restore GICR_CTLR_ENABLE_LPIs of boot + * CPU, the other CPUs also should be restored. + */ + for_each_possible_cpu(cpu) { + void __iomem *rbase = gic_data_rdist_cpu(cpu)->rd_base; + u32 val; + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + if (val & GICR_CTLR_ENABLE_LPIS) + continue; + + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + } raw_spin_unlock(&its_lock); } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 6cec1858947bf..84158b744b724 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1224,6 +1224,20 @@ config MFD_RETU Retu and Tahvo are a multi-function devices found on Nokia Internet Tablets (770, N800 and N810). +config MFD_PHYTIUM_I2S_LSD + bool "PHYTIUM Px210 I2S LSD MFD driver" + depends on (PCI && ARCH_PHYTIUM) + select MFD_CORE + help + This enables support for the Phytium Px210 LSD I2S controller. + +config MFD_PHYTIUM_I2S_MMD + bool "PHYTIUM Px210 I2S MMD MFD driver" + depends on (PCI && ARCH_PHYTIUM) + help + This enables support for the Phytium Px210 MMD I2S controllers + for Display Port. + config MFD_PM8XXX tristate "Qualcomm PM8xxx PMIC chips driver" depends on ARM || HEXAGON || COMPILE_TEST diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 865e9f12faff0..521c2c668ec3b 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -303,3 +303,6 @@ obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o obj-$(CONFIG_MFD_UPBOARD_FPGA) += upboard-fpga.o obj-$(CONFIG_MFD_LOONGSON_SE) += loongson-se.o + +obj-$(CONFIG_MFD_PHYTIUM_I2S_LSD) += phytium_px210_i2s_lsd.o +obj-$(CONFIG_MFD_PHYTIUM_I2S_MMD) += phytium_px210_i2s_mmd.o diff --git a/drivers/mfd/phytium_px210_i2s_lsd.c b/drivers/mfd/phytium_px210_i2s_lsd.c new file mode 100644 index 0000000000000..58de96a0cdcfb --- /dev/null +++ b/drivers/mfd/phytium_px210_i2s_lsd.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S LSD MFD driver over PCI bus + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include + +struct phytium_px210_mfd { + struct device *dev; +}; + +struct pdata_px210_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static struct resource phytium_px210_i2s_res0[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell phytium_px210_mfd_cells[] = { + { + .id = 0, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res0, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res0), + .ignore_resource_conflicts = true, + }, +}; + +static void phytium_px210_i2s_setup(struct pci_dev *pdev) +{ + struct mfd_cell *cell = &phytium_px210_mfd_cells[0]; + struct resource *res = (struct resource *)cell->resources; + struct pdata_px210_mfd *pdata; + + res[0].start = pci_resource_start(pdev, 0); + res[0].end = pci_resource_start(pdev, 0) + 0x0fff; + + res[1].start = pci_resource_start(pdev, 0) + 0x1000; + res[1].end = pci_resource_start(pdev, 0) + 0x1fff; + + res[2].start = pdev->irq; + res[2].end = pdev->irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + pdata->dev = &pdev->dev; + pdata->name = "phytium-i2s-lsd"; + pdata->clk_base = 480000000; + + cell->platform_data = pdata; + cell->pdata_size = sizeof(*pdata); +} + +static int phytium_px210_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_px210_mfd *phytium_mfd; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); + if (!phytium_mfd) + return -ENOMEM; + + phytium_mfd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, phytium_mfd); + + phytium_px210_i2s_setup(pdev); + + ret = mfd_add_devices(&pdev->dev, 0, phytium_px210_mfd_cells, + ARRAY_SIZE(phytium_px210_mfd_cells), NULL, 0, + NULL); + if (ret) + return 0; + + return 0; +} + + +static void phytium_px210_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static const struct pci_device_id phytium_px210_mfd_ids[] = { + { + .vendor = 0x1DB7, + .device = 0xDC2B, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x3, + .class_mask = 0, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_px210_mfd_ids); + +static struct pci_driver phytium_i2s_lsd_mfd_driver = { + .name = "phytium_px210_mfd_i2s", + .id_table = phytium_px210_mfd_ids, + .probe = phytium_px210_mfd_probe, + .remove = phytium_px210_mfd_remove, +}; + +module_pci_driver(phytium_i2s_lsd_mfd_driver); + +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("Phytium Px210 MFD PCI driver for I2S-LSD"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/phytium_px210_i2s_mmd.c b/drivers/mfd/phytium_px210_i2s_mmd.c new file mode 100644 index 0000000000000..684e588793024 --- /dev/null +++ b/drivers/mfd/phytium_px210_i2s_mmd.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S MMD MFD driver over PCI bus + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include + +struct phytium_px210_mfd { + struct device *dev; +}; + +struct pdata_px210_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static struct resource phytium_px210_i2s_res0[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource phytium_px210_i2s_res1[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource phytium_px210_i2s_res2[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell phytium_px210_mfd_cells[] = { + { + .id = 1, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res0, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res0), + .ignore_resource_conflicts = true, + }, + { + .id = 2, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res1, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res1), + .ignore_resource_conflicts = true, + }, + { + .id = 3, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res2, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res2), + .ignore_resource_conflicts = true, + }, +}; + +static void phytium_px210_i2s_setup(struct pci_dev *pdev, int i) +{ + struct mfd_cell *cell = &phytium_px210_mfd_cells[i]; + struct resource *res = (struct resource *)cell->resources; + struct pdata_px210_mfd *pdata; + + res[0].start = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1000; + res[0].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1fff; + + res[1].start = pci_resource_start(pdev, 0) + 0x2000 * i; + res[1].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x0fff; + + res[2].start = pdev->irq; + res[2].end = pdev->irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + pdata->dev = &pdev->dev; + pdata->clk_base = 600000000; + switch (i) { + case 0: + pdata->name = "phytium-i2s-dp0"; + break; + case 1: + pdata->name = "phytium-i2s-dp1"; + break; + case 2: + pdata->name = "phytium-i2s-dp2"; + break; + default: + break; + } + + cell->platform_data = pdata; + cell->pdata_size = sizeof(*pdata); +} + +static int phytium_px210_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_px210_mfd *phytium_mfd; + int i; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); + if (!phytium_mfd) + return -ENOMEM; + + phytium_mfd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, phytium_mfd); + + for (i = 0; i < 3; i++) + phytium_px210_i2s_setup(pdev, i); + + ret = mfd_add_devices(&pdev->dev, 0, phytium_px210_mfd_cells, + ARRAY_SIZE(phytium_px210_mfd_cells), NULL, 0, + NULL); + if (ret) + return 0; + + return 0; +} + + +static void phytium_px210_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static const struct pci_device_id phytium_px210_mfd_ids[] = { + { + .vendor = 0x1DB7, + .device = 0xDC23, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x3, + .class_mask = 0, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_px210_mfd_ids); + +static struct pci_driver phytium_i2s_mmd_mfd_driver = { + .name = "phytium_px210_mfd_mmd", + .id_table = phytium_px210_mfd_ids, + .probe = phytium_px210_mfd_probe, + .remove = phytium_px210_mfd_remove, +}; + +module_pci_driver(phytium_i2s_mmd_mfd_driver); + +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("Phytium Px210 MFD PCI driver for I2S-DP"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 7c82e7364d097..1f984a26c3953 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -204,5 +204,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/phytium/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index ec269721e28a5..bdb3af2d54c5b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -108,4 +108,5 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ +obj-$(CONFIG_NET_VENDOR_PHYTIUM) += phytium/ obj-$(CONFIG_OA_TC6) += oa_tc6.o diff --git a/drivers/net/ethernet/phytium/Kconfig b/drivers/net/ethernet/phytium/Kconfig new file mode 100644 index 0000000000000..14a77adbfdc12 --- /dev/null +++ b/drivers/net/ethernet/phytium/Kconfig @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Phytium device configuration +# + +config NET_VENDOR_PHYTIUM + bool "Phytium devices" + depends on HAS_IOMEM + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + remaining Cadence network card questions. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_PHYTIUM + +config PHYTMAC + tristate "Phytium GMAC support" + depends on HAS_DMA + select PHYLINK + select CRC32 + help + If you have a network (Ethernet) controller of this type, say Y + or M here. + + To compile this driver as a module, choose M here: the module + will be phytmac. + +config PHYTMAC_ENABLE_PTP + bool "Enable IEEE 1588 hwstamp" + depends on PHYTMAC + depends on PTP_1588_CLOCK + default y + help + Enable IEEE 1588 PTP support for PHYTMAC. + +config PHYTMAC_PLATFORM + tristate "Phytmac Platform support" + depends on PHYTMAC + help + This is Platform driver. + + To compile this driver as a module, choose M here: the module + will be called phytmac_platform. + +config PHYTMAC_PCI + tristate "Phytmac PCI support" + depends on PHYTMAC && PCI + help + This is PCI driver. + + To compile this driver as a module, choose M here: the module + will be called phytmac_pci. + +endif # NET_VENDOR_PHYTIUM diff --git a/drivers/net/ethernet/phytium/Makefile b/drivers/net/ethernet/phytium/Makefile new file mode 100644 index 0000000000000..6e710d4d54b66 --- /dev/null +++ b/drivers/net/ethernet/phytium/Makefile @@ -0,0 +1,17 @@ + +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Phytium network device drivers. +# +# + +obj-$(CONFIG_PHYTMAC) += phytmac.o + +phytmac-objs := phytmac_main.o phytmac_ethtool.o phytmac_v1.o phytmac_v2.o +phytmac-$(CONFIG_PHYTMAC_ENABLE_PTP) += phytmac_ptp.o + +obj-$(CONFIG_PHYTMAC_PLATFORM) += phytmac-platform.o +phytmac-platform-objs := phytmac_platform.o + +obj-$(CONFIG_PHYTMAC_PCI) += phytmac-pci.o +phytmac-pci-objs := phytmac_pci.o diff --git a/drivers/net/ethernet/phytium/phytmac.h b/drivers/net/ethernet/phytium/phytmac.h new file mode 100644 index 0000000000000..b90e1551499ef --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac.h @@ -0,0 +1,591 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _PHYTMAC_H +#define _PHYTMAC_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHYTMAC_DRV_NAME "phytium-mac" +#define PHYTMAC_DRV_DESC "PHYTIUM Ethernet Driver" + +#define PHYTMAC_DEFAULT_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_INTR | \ + NETIF_MSG_HW |\ + NETIF_MSG_PKTDATA) + +#define IRQ_TYPE_INT 0 +#define IRQ_TYPE_MSI 1 +#define IRQ_TYPE_INTX 2 + +#define PHYTMAC_MAX_QUEUES 8 +#define DEFAULT_DMA_BURST_LENGTH 16 +#define DEFAULT_JUMBO_MAX_LENGTH 10240 +#define PHYTMAC_MAX_TX_LEN 16320 +#define PHYTMAC_MIN_TX_LEN 64 +#define DEFAULT_TX_RING_SIZE 512 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define MAX_RX_RING_SIZE 4096 +#define MIN_TX_RING_SIZE 64 +#define MIN_RX_RING_SIZE 64 +#define DEFAULT_TX_DESC_MIN_FREE 64 +#define DEFAULT_RX_DESC_MIN_FREE 64 + +#define MEMORY_SIZE 4096 +#define MHU_SIZE 0x20 + +#define PHYTMAC_POWEROFF 1 +#define PHYTMAC_POWERON 2 + +#define PHYTMAC_WOL_MAGIC_PACKET 1 + +#define DEFAULT_MSG_RING_SIZE 16 + +#define PHYTMAC_CAPS_JUMBO 0x00000001 +#define PHYTMAC_CAPS_PTP 0x00000002 +#define PHYTMAC_CAPS_BD_RD_PREFETCH 0x00000004 +#define PHYTMAC_CAPS_PCS 0x00000008 +#define PHYTMAC_CAPS_LSO 0x00000010 +#define PHYTMAC_CAPS_SG_DISABLED 0x00000020 +#define PHYTMAC_CAPS_TAILPTR 0x00000040 +#define PHYTMAC_CAPS_START 0x00000080 +#define PHYTMAC_CAPS_NO_WOL 0x0000100 +#define PHYTMAC_CAPS_LPI 0x0000400 +#define PHYTMAC_CAPS_MSG 0x0000800 + +#define PHYTMAC_TX 0x1 +#define PHYTMAC_RX 0x2 + +#define PHYTMAC_GREGS_LEN 16 + +#define PHYTMAC_MTU_MIN_SIZE ETH_MIN_MTU + +#define EQUAL(a, b) ((a) == (b)) + +#define TXD_USE_COUNT(_pdata, s) DIV_ROUND_UP((s), (_pdata)->max_tx_length) + +/* Bit manipulation macros */ +#define PHYTMAC_BIT(_field) \ + (1 << PHYTMAC_##_field##_INDEX) + +#define PHYTMAC_BITS(_field, value) \ + (((value) & ((1 << PHYTMAC_##_field##_WIDTH) - 1)) \ + << PHYTMAC_##_field##_INDEX) + +#define PHYTMAC_GET_BITS(_var, _field) \ + (((_var) >> (PHYTMAC_##_field##_INDEX)) \ + & ((0x1 << (PHYTMAC_##_field##_WIDTH)) - 1)) + +#define PHYTMAC_SET_BITS(_var, _field, _val) \ + (((_var) & ~(((1 << PHYTMAC_##_field##_WIDTH) - 1) \ + << PHYTMAC_##_field##_INDEX)) \ + | (((_val) & ((1 << PHYTMAC_##_field##_WIDTH) - 1)) \ + << PHYTMAC_##_field##_INDEX)) + +#define PHYTMAC_READ(_pdata, _reg) \ + __raw_readl((_pdata)->mac_regs + (_reg)) + +#define PHYTMAC_READ_BITS(_pdata, _reg, _field) \ + PHYTMAC_GET_BITS(PHYTMAC_READ((_pdata), _reg), _field) + +#define PHYTMAC_WRITE(_pdata, _reg, _val) \ + __raw_writel((_val), (_pdata)->mac_regs + (_reg)) + +#define PHYTMAC_MSG_READ(_pdata, _reg) \ + __raw_readl((_pdata)->mac_regs + (_reg)) + +#define PHYTMAC_WRITE(_pdata, _reg, _val) \ + __raw_writel((_val), (_pdata)->mac_regs + (_reg)) + +#define LSO_UFO 1 +#define LSO_TSO 2 + +#define PHYTMAC_INT_TX_COMPLETE 0x1 +#define PHYTMAC_INT_TX_ERR 0x2 +#define PHYTMAC_INT_RX_COMPLETE 0x4 +#define PHYTMAC_INT_RX_OVERRUN 0x8 +#define PHYTMAC_INT_RX_DESC_FULL 0x10 +#define PHYTMAC_RX_INT_FLAGS (PHYTMAC_INT_RX_COMPLETE) +#define PHYTMAC_TX_INT_FLAGS (PHYTMAC_INT_TX_COMPLETE \ + | PHYTMAC_INT_TX_ERR) + +#define PHYTMAC_WAKE_MAGIC 0x00000001 +#define PHYTMAC_WAKE_ARP 0x00000002 +#define PHYTMAC_WAKE_UCAST 0x00000004 +#define PHYTMAC_WAKE_MCAST 0x00000008 + +struct packet_info { + int lso; + int desc_cnt; + int hdrlen; + int nocrc; + u32 mss; + u32 seq; +}; + +#define DEV_TYPE_PLATFORM 0 +#define DEV_TYPE_PCI 1 + +struct phytmac_statistics { + char stat_string[ETH_GSTRING_LEN]; +}; + +#define STAT_TITLE(title) { \ + .stat_string = title, \ +} + +static const struct phytmac_statistics phytmac_statistics[] = { + STAT_TITLE("tx_octets"), + STAT_TITLE("tx_packets"), + STAT_TITLE("tx_bcast_packets"), + STAT_TITLE("tx_mcase_packets"), + STAT_TITLE("tx_pause_packets"), + STAT_TITLE("tx_64_byte_packets"), + STAT_TITLE("tx_65_127_byte_packets"), + STAT_TITLE("tx_128_255_byte_packets"), + STAT_TITLE("tx_256_511_byte_packets"), + STAT_TITLE("tx_512_1023_byte_packets"), + STAT_TITLE("tx_1024_1518_byte_packets"), + STAT_TITLE("tx_more_than_1518_byte_packets"), + STAT_TITLE("tx_underrun"), + STAT_TITLE("tx_single_collisions"), + STAT_TITLE("tx_multiple_collisions"), + STAT_TITLE("tx_excessive_collisions"), + STAT_TITLE("tx_late_collisions"), + STAT_TITLE("tx_deferred"), + STAT_TITLE("tx_carrier_sense_errors"), + STAT_TITLE("rx_octets"), + STAT_TITLE("rx_packets"), + STAT_TITLE("rx_bcast_packets"), + STAT_TITLE("rx_mcast_packets"), + STAT_TITLE("rx_pause_packets"), + STAT_TITLE("rx_64_byte_packets"), + STAT_TITLE("rx_65_127_byte_packets"), + STAT_TITLE("rx_128_255_byte_packets"), + STAT_TITLE("rx_256_511_byte_packets"), + STAT_TITLE("rx_512_1023_byte_packets"), + STAT_TITLE("rx_1024_1518_byte_packets"), + STAT_TITLE("rx_more_than_1518_byte_packets"), + STAT_TITLE("rx_undersized_packets"), + STAT_TITLE("rx_oversize_packets"), + STAT_TITLE("rx_jabbers"), + STAT_TITLE("rx_fcs_errors"), + STAT_TITLE("rx_length_errors"), + STAT_TITLE("rx_symbol_errors"), + STAT_TITLE("rx_alignment_errors"), + STAT_TITLE("rx_resource_over"), + STAT_TITLE("rx_overruns"), + STAT_TITLE("rx_iphdr_csum_errors"), + STAT_TITLE("rx_tcp_csum_errors"), + STAT_TITLE("rx_udp_csum_errors"), +}; + +#define PHYTMAC_STATS_LEN ARRAY_SIZE(phytmac_statistics) + +/* per queue statistics, each should be unsigned long type */ +struct phytmac_queue_stats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long rx_dropped; + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +static const struct phytmac_statistics queue_statistics[] = { + STAT_TITLE("rx_packets"), + STAT_TITLE("rx_bytes"), + STAT_TITLE("rx_dropped"), + STAT_TITLE("tx_packets"), + STAT_TITLE("tx_bytes"), + STAT_TITLE("tx_dropped"), +}; + +#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics) + +struct phytmac_config { + struct phytmac_hw_if *hw_if; + u32 caps; + u32 tsu_rate; + u16 queue_num; +}; + +struct phytmac_stats { + u64 tx_octets; + u64 tx_packets; + u64 tx_bcast_packets; + u64 tx_mcase_packets; + u64 tx_pause_packets; + u64 tx_64_byte_packets; + u64 tx_65_127_byte_packets; + u64 tx_128_255_byte_packets; + u64 tx_256_511_byte_packets; + u64 tx_512_1023_byte_packets; + u64 tx_1024_1518_byte_packets; + u64 tx_more_than_1518_byte_packets; + u64 tx_underrun; + u64 tx_single_collisions; + u64 tx_multiple_collisions; + u64 tx_excessive_collisions; + u64 tx_late_collisions; + u64 tx_deferred; + u64 tx_carrier_sense_errors; + u64 rx_octets; + u64 rx_packets; + u64 rx_bcast_packets; + u64 rx_mcast_packets; + u64 rx_pause_packets; + u64 rx_64_byte_packets; + u64 rx_65_127_byte_packets; + u64 rx_128_255_byte_packets; + u64 rx_256_511_byte_packets; + u64 rx_512_1023_byte_packets; + u64 rx_1024_1518_byte_packets; + u64 rx_more_than_1518_byte_packets; + u64 rx_undersized_packets; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_fcs_errors; + u64 rx_length_errors; + u64 rx_symbol_errors; + u64 rx_alignment_errors; + u64 rx_resource_over; + u64 rx_overruns; + u64 rx_iphdr_csum_errors; + u64 rx_tcp_csum_errors; + u64 rx_udp_csum_errors; +}; + +struct ts_incr { + u32 sub_ns; + u32 ns; +}; + +enum phytmac_bd_control { + TS_DISABLED, + TS_FRAME_PTP_EVENT_ONLY, + TS_ALL_PTP_FRAMES, + TS_ALL_FRAMES, +}; + +#ifdef CONFIG_PHYTMAC_ENABLE_PTP +struct phytmac_dma_desc { + u32 desc0; + u32 desc1; + u32 desc2; + u32 desc3; + u32 desc4; + u32 desc5; +}; +#else +struct phytmac_dma_desc { + u32 desc0; + u32 desc1; + u32 desc2; + u32 desc3; +}; +#endif + +struct phytmac_tx_skb { + struct sk_buff *skb; + dma_addr_t addr; + size_t length; + bool mapped_as_page; +}; + +struct phytmac_tx_ts { + struct sk_buff *skb; + u32 ts_1; + u32 ts_2; +}; + +struct phytmac_queue { + struct phytmac *pdata; + int irq; + int index; + + /* tx queue info */ + unsigned int tx_head; + unsigned int tx_tail; + unsigned int tx_xmit_more; + dma_addr_t tx_ring_addr; + struct work_struct tx_error_task; + struct napi_struct tx_napi; + struct phytmac_dma_desc *tx_ring; + struct phytmac_tx_skb *tx_skb; + /* Lock to protect tx */ + spinlock_t tx_lock; + + /* rx queue info */ + dma_addr_t rx_ring_addr; + unsigned int rx_head; + unsigned int rx_tail; + struct phytmac_dma_desc *rx_ring; + struct sk_buff **rx_skb; + struct napi_struct rx_napi; + struct phytmac_queue_stats stats; + +#ifdef CONFIG_PHYTMAC_ENABLE_PTP + struct work_struct tx_ts_task; + unsigned int tx_ts_head; + unsigned int tx_ts_tail; + struct phytmac_tx_ts tx_timestamps[128]; +#endif +}; + +struct ethtool_rx_fs_item { + struct ethtool_rx_flow_spec fs; + struct list_head list; +}; + +struct ethtool_rx_fs_list { + struct list_head list; + unsigned int count; +}; + +struct phytmac_msg { + struct completion tx_msg_comp; + u32 tx_msg_ring_size; + u32 rx_msg_ring_size; + u32 tx_msg_head; + u32 tx_msg_tail; + u32 rx_msg_head; + u32 rx_msg_tail; + /* Lock to protect msg */ + spinlock_t msg_lock; +}; + +struct ts_ctrl { + int tx_control; + int rx_control; + int one_step; +}; + +struct phytmac { + void __iomem *mac_regs; + void __iomem *msg_regs; + void __iomem *mhu_regs; + struct pci_dev *pcidev; + struct platform_device *platdev; + struct net_device *ndev; + struct device *dev; + struct ncsi_dev *ncsidev; + struct fwnode_handle *fwnode; + struct phytmac_hw_if *hw_if; + struct phytmac_msg msg_ring; + int dev_type; + int sfp_irq; + int irq_type; + int queue_irq[PHYTMAC_MAX_QUEUES]; + u32 rx_irq_mask; + u32 tx_irq_mask; + u32 msg_enable; + u32 capacities; + u32 max_tx_length; + u32 min_tx_length; + u32 jumbo_len; + u32 wol; + u32 lpi; + u32 power_state; + struct work_struct restart_task; + /* Lock to protect mac config */ + spinlock_t lock; + /* Lock to protect msg tx */ + spinlock_t msg_lock; + u32 rx_ring_size; + u32 tx_ring_size; + u32 dma_data_width; + u32 dma_addr_width; + u32 dma_burst_length; + int rx_bd_prefetch; + int tx_bd_prefetch; + int rx_buffer_len; + u16 queues_max_num; + u16 queues_num; + struct phytmac_queue queues[PHYTMAC_MAX_QUEUES]; + struct phytmac_stats stats; + u64 ethtool_stats[PHYTMAC_STATS_LEN + + QUEUE_STATS_LEN * PHYTMAC_MAX_QUEUES]; + int use_ncsi; + int use_mii; + struct mii_bus *mii_bus; + struct phylink *phylink; + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + int pause; + phy_interface_t phy_interface; + int speed; + int duplex; + int autoneg; + /* 1588 */ + spinlock_t ts_clk_lock; /* clock locking */ + unsigned int ts_rate; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct ts_incr ts_incr; + struct hwtstamp_config ts_config; + struct ts_ctrl ts_ctrl; + /* RX queue filer rule set */ + struct ethtool_rx_fs_list rx_fs_list; + /* Lock to protect fs */ + spinlock_t rx_fs_lock; + unsigned int max_rx_fs; +}; + +struct phytmac_hw_if { + int (*init_msg_ring)(struct phytmac *pdata); + int (*init_hw)(struct phytmac *pdata); + void (*reset_hw)(struct phytmac *pdata); + int (*init_ring_hw)(struct phytmac *pdata); + int (*poweron)(struct phytmac *pdata, int on); + int (*set_wol)(struct phytmac *pdata, int wake); + int (*get_feature)(struct phytmac *pdata); + int (*set_mac_address)(struct phytmac *pdata, const u8 *addr); + int (*get_mac_address)(struct phytmac *pdata, u8 *addr); + int (*enable_promise)(struct phytmac *pdata, int enable); + int (*enable_multicast)(struct phytmac *pdata, int enable); + int (*set_hash_table)(struct phytmac *pdata, unsigned long *mc_filter); + int (*enable_rx_csum)(struct phytmac *pdata, int enable); + int (*enable_tx_csum)(struct phytmac *pdata, int enable); + int (*enable_pause)(struct phytmac *pdata, int enable); + int (*enable_autoneg)(struct phytmac *pdata, int enable); + int (*enable_network)(struct phytmac *pdata, int enable, int rx_tx); + void (*get_stats)(struct phytmac *pdata); + void (*get_regs)(struct phytmac *pdata, u32 *reg_buff); + int (*set_speed)(struct phytmac *pdata); + + void (*mac_config)(struct phytmac *pdata, u32 mode, + const struct phylink_link_state *state); + int (*mac_linkup)(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex); + int (*mac_linkdown)(struct phytmac *pdata); + int (*pcs_linkup)(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex); + int (*pcs_linkdown)(struct phytmac *pdata); + unsigned int (*get_link)(struct phytmac *pdata, phy_interface_t interface); + + /* For RX coalescing */ + int (*config_rx_coalesce)(struct phytmac *pdata); + int (*config_tx_coalesce)(struct phytmac *pdata); + + /* ethtool nfc */ + int (*add_fdir_entry)(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow); + int (*del_fdir_entry)(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow); + + /* mido ops */ + int (*enable_mdio_control)(struct phytmac *pdata, int enable); + int (*mdio_read)(struct phytmac *pdata, int mii_id, int regnum); + int (*mdio_write)(struct phytmac *pdata, int mii_id, + int regnum, u16 data); + int (*mdio_read_c45)(struct phytmac *pdata, int mii_id, int devad, int regnum); + int (*mdio_write_c45)(struct phytmac *pdata, int mii_id, int devad, + int regnum, u16 data); + void (*enable_irq)(struct phytmac *pdata, int queue_index, u32 mask); + void (*disable_irq)(struct phytmac *pdata, int queue_index, u32 mask); + void (*clear_irq)(struct phytmac *pdata, int queue_index, u32 mask); + void (*mask_irq)(struct phytmac *pdata, int queue_index, u32 mask); + unsigned int (*get_irq)(struct phytmac *pdata, int queue_index); + unsigned int (*get_intx_mask)(struct phytmac *pdata); + unsigned int (*tx_map)(struct phytmac_queue *pdata, u32 tx_tail, + struct packet_info *packet); + void (*init_rx_map)(struct phytmac_queue *queue, u32 index); + unsigned int (*rx_map)(struct phytmac_queue *queue, u32 index, dma_addr_t addr); + unsigned int (*rx_clean)(struct phytmac_queue *queue, u32 cleaned_count); + void (*transmit)(struct phytmac_queue *queue); + void (*restart)(struct phytmac *pdata); + int (*tx_complete)(const struct phytmac_dma_desc *desc); + int (*rx_complete)(const struct phytmac_dma_desc *desc); + int (*get_rx_pkt_len)(struct phytmac *pdata, const struct phytmac_dma_desc *desc); + dma_addr_t (*get_desc_addr)(const struct phytmac_dma_desc *desc); + bool (*rx_checksum)(const struct phytmac_dma_desc *desc); + void (*set_desc_rxused)(struct phytmac_dma_desc *desc); + bool (*rx_single_buffer)(const struct phytmac_dma_desc *desc); + bool (*rx_pkt_start)(const struct phytmac_dma_desc *desc); + bool (*rx_pkt_end)(const struct phytmac_dma_desc *desc); + void (*clear_rx_desc)(struct phytmac_queue *queue, int begin, int end); + void (*clear_tx_desc)(struct phytmac_queue *queue); + /* ptp */ + void (*init_ts_hw)(struct phytmac *pdata); + void (*get_time)(struct phytmac *pdata, struct timespec64 *ts); + void (*set_time)(struct phytmac *pdata, time64_t sec, long nsec); + int (*set_ts_config)(struct phytmac *pdata, struct ts_ctrl *ctrl); + void (*clear_time)(struct phytmac *pdata); + int (*set_incr)(struct phytmac *pdata, struct ts_incr *incr); + int (*adjust_fine)(struct phytmac *pdata, long ppm, bool negative); + int (*adjust_time)(struct phytmac *pdata, s64 delta, int neg); + int (*ts_valid)(struct phytmac *pdata, struct phytmac_dma_desc *desc, + int direction); + void (*get_timestamp)(struct phytmac *pdata, u32 ts_1, u32 ts_2, + struct timespec64 *ts); + unsigned int (*get_ts_rate)(struct phytmac *pdata); +}; + +/* mhu */ +#define PHYTMAC_MHU_AP_CPP_STAT 0x00 +#define PHYTMAC_MHU_AP_CPP_SET 0x04 +#define PHYTMAC_MHU_CPP_DATA0 0x18 +#define PHYTMAC_MHU_CPP_DATA1 0x1c + +#define PHYTMAC_MHU_STAT_BUSY_INDEX 0 +#define PHYTMAC_MHU_STAT_BUSY_WIDTH 1 + +#define PHYTMAC_MHU_SET_INDEX 0 +#define PHYTMAC_MHU_SET_WIDTH 1 + +#define PHYTMAC_DATA0_FREE_INDEX 0 +#define PHYTMAC_DATA0_FREE_WIDTH 1 +#define PHYTMAC_DATA0_DOMAIN_INDEX 1 +#define PHYTMAC_DATA0_DOMAIN_WIDTH 7 +#define PHYTMAC_DATA0_MSG_INDEX 8 +#define PHYTMAC_DATA0_MSG_WIDTH 8 +#define PHYTMAC_MSG_PM 0x04 +#define PHYTMAC_DATA0_PRO_INDEX 16 +#define PHYTMAC_DATA0_PRO_WIDTH 8 +#define PHYTMAC_PRO_ID 0x11 +#define PHYTMAC_DATA0_PAYLOAD_INDEX 24 +#define PHYTMAC_DATA0_PAYLOAD_WIDTH 8 + +#define PHYTMAC_DATA1_STAT_INDEX 0 +#define PHYTMAC_DATA1_STAT_WIDTH 28 +#define PHYTMAC_STATON 8 +#define PHYTMAC_STATOFF 0 +#define PHYTMAC_DATA1_MUST0_INDEX 28 +#define PHYTMAC_DATA1_MUST0_WIDTH 2 +#define PHYTMAC_DATA1_STATTYPE_INDEX 30 +#define PHYTMAC_DATA1_STATTYPE_WIDTH 1 +#define PHYTMAC_STATTYPE 0x1 +#define PHYTMAC_DATA1_MUST1_INDEX 31 +#define PHYTMAC_DATA1_MUST1_WIDTH 1 + +#define PHYTMAC_MHU_READ(_pdata, _reg) \ + __raw_readl((_pdata)->mhu_regs + (_reg)) +#define PHYTMAC_MHU_WRITE(_pdata, _reg, _val) \ + __raw_writel((_val), (_pdata)->mhu_regs + (_reg)) +#define PHYTMAC_READ_STAT(pdata) PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_AP_CPP_STAT) +#define PHYTMAC_READ_DATA0(pdata) PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA0) +#define PHYTMAC_TIMEOUT 1000000000 /* in usecs */ + +struct phytmac_tx_skb *phytmac_get_tx_skb(struct phytmac_queue *queue, + unsigned int index); +inline struct phytmac_dma_desc *phytmac_get_tx_desc(struct phytmac_queue *queue, + unsigned int index); +inline struct phytmac_dma_desc *phytmac_get_rx_desc(struct phytmac_queue *queue, + unsigned int index); +void phytmac_set_ethtool_ops(struct net_device *netdev); +int phytmac_drv_probe(struct phytmac *pdata); +int phytmac_drv_remove(struct phytmac *pdata); +int phytmac_drv_suspend(struct phytmac *pdata); +int phytmac_drv_resume(struct phytmac *pdata); +struct phytmac *phytmac_alloc_pdata(struct device *dev); +void phytmac_free_pdata(struct phytmac *pdata); +int phytmac_reset_ringsize(struct phytmac *pdata, u32 rx_size, u32 tx_size); +#endif diff --git a/drivers/net/ethernet/phytium/phytmac_ethtool.c b/drivers/net/ethernet/phytium/phytmac_ethtool.c new file mode 100644 index 0000000000000..d212451cd682f --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_ethtool.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" +#include "phytmac_v2.h" +#include "phytmac_ptp.h" + +static void phytmac_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, + u64 *data) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + int i = PHYTMAC_STATS_LEN, j; + int q; + struct phytmac_queue *queue; + unsigned long *stat; + + hw_if->get_stats(pdata); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) + for (j = 0, stat = &queue->stats.rx_packets; j < QUEUE_STATS_LEN; ++j, ++stat) + pdata->ethtool_stats[i++] = *stat; + + memcpy(data, &pdata->ethtool_stats, sizeof(u64) + * (PHYTMAC_STATS_LEN + QUEUE_STATS_LEN * pdata->queues_num)); +} + +static inline int phytmac_get_sset_count(struct net_device *ndev, int sset) +{ + struct phytmac *pdata = netdev_priv(ndev); + + switch (sset) { + case ETH_SS_STATS: + return PHYTMAC_STATS_LEN + QUEUE_STATS_LEN * pdata->queues_num; + default: + return -EOPNOTSUPP; + } +} + +static void phytmac_get_ethtool_strings(struct net_device *ndev, u32 sset, u8 *p) +{ + char stat_string[ETH_GSTRING_LEN]; + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_queue *queue; + unsigned int i; + unsigned int q; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < PHYTMAC_STATS_LEN; i++, p += ETH_GSTRING_LEN) + memcpy(p, phytmac_statistics[i].stat_string, + ETH_GSTRING_LEN); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { + snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", + q, queue_statistics[i].stat_string); + memcpy(p, stat_string, ETH_GSTRING_LEN); + } + } + break; + } +} + +static inline int phytmac_get_regs_len(struct net_device *ndev) +{ + return PHYTMAC_GREGS_LEN; +} + +static void phytmac_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, + void *p) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 *regs_buff = p; + + memset(p, 0, PHYTMAC_GREGS_LEN * sizeof(u32)); + + hw_if->get_regs(pdata, regs_buff); +} + +static void phytmac_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct phytmac *pdata = netdev_priv(ndev); + + phylink_ethtool_get_wol(pdata->phylink, wol); + + if (pdata->wol & PHYTMAC_WAKE_MAGIC) { + wol->wolopts |= WAKE_MAGIC; + wol->supported |= WAKE_MAGIC; + } + if (pdata->wol & PHYTMAC_WAKE_ARP) { + wol->wolopts |= WAKE_ARP; + wol->supported |= WAKE_ARP; + } + if (pdata->wol & PHYTMAC_WAKE_UCAST) { + wol->wolopts |= WAKE_UCAST; + wol->supported |= WAKE_UCAST; + } + if (pdata->wol & PHYTMAC_WAKE_MCAST) { + wol->wolopts |= WAKE_MCAST; + wol->supported |= WAKE_MCAST; + } +} + +static int phytmac_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct phytmac *pdata = netdev_priv(ndev); + int ret; + + ret = phylink_ethtool_set_wol(pdata->phylink, wol); + + if (!ret || ret != -EOPNOTSUPP) + return ret; + + pdata->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + pdata->wol |= PHYTMAC_WAKE_MAGIC; + if (wol->wolopts & WAKE_ARP) + pdata->wol |= PHYTMAC_WAKE_ARP; + if (wol->wolopts & WAKE_UCAST) + pdata->wol |= PHYTMAC_WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + pdata->wol |= PHYTMAC_WAKE_MCAST; + + device_set_wakeup_enable(pdata->dev, pdata->wol ? 1 : 0); + + return 0; +} + +static void phytmac_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct phytmac *pdata = netdev_priv(ndev); + + ring->rx_max_pending = MAX_RX_RING_SIZE; + ring->tx_max_pending = MAX_TX_RING_SIZE; + + ring->rx_pending = pdata->rx_ring_size; + ring->tx_pending = pdata->tx_ring_size; +} + +static int phytmac_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct phytmac *pdata = netdev_priv(ndev); + u32 new_rx_size, new_tx_size; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_size = clamp_t(u32, ring->rx_pending, + MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); + new_rx_size = roundup_pow_of_two(new_rx_size); + + new_tx_size = clamp_t(u32, ring->tx_pending, + MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); + new_tx_size = roundup_pow_of_two(new_tx_size); + + if (EQUAL(new_tx_size, pdata->tx_ring_size) && + EQUAL(new_rx_size, pdata->rx_ring_size)) { + /* nothing to do */ + return 0; + } + + return phytmac_reset_ringsize(pdata, new_rx_size, new_tx_size); +} + +static int phytmac_get_ts_info(struct net_device *ndev, + struct kernel_ethtool_ts_info *info) +{ + struct phytmac *pdata = netdev_priv(ndev); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = + (1 << HWTSTAMP_TX_ONESTEP_SYNC) | + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + info->phc_index = pdata->ptp_clock ? ptp_clock_index(pdata->ptp_clock) : -1; + + return 0; + } + + return ethtool_op_get_ts_info(ndev, info); +} + +static int phytmac_add_fdir_ethtool(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_flow_spec *fs = &cmd->fs; + struct ethtool_rx_fs_item *item, *newfs; + unsigned long flags; + int ret = -EINVAL; + bool added = false; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (cmd->fs.location >= pdata->max_rx_fs || + cmd->fs.ring_cookie >= pdata->queues_num) { + return -EINVAL; + } + + newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); + if (!newfs) + return -ENOMEM; + memcpy(&newfs->fs, fs, sizeof(newfs->fs)); + + netdev_dbg(ndev, "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", + fs->flow_type, (int)fs->ring_cookie, fs->location, + htonl(fs->h_u.tcp_ip4_spec.ip4src), + htonl(fs->h_u.tcp_ip4_spec.ip4dst), + htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); + + spin_lock_irqsave(&pdata->rx_fs_lock, flags); + + /* find correct place to add in list */ + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (item->fs.location > newfs->fs.location) { + list_add_tail(&newfs->list, &item->list); + added = true; + break; + } else if (item->fs.location == fs->location) { + netdev_err(ndev, "Rule not added: location %d not free!\n", + fs->location); + ret = -EBUSY; + goto err; + } + } + if (!added) + list_add_tail(&newfs->list, &pdata->rx_fs_list.list); + + hw_if->add_fdir_entry(pdata, fs); + pdata->rx_fs_list.count++; + + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + return 0; + +err: + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + kfree(newfs); + return ret; +} + +static int phytmac_del_fdir_ethtool(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_fs_item *item; + struct ethtool_rx_flow_spec *fs; + unsigned long flags; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + spin_lock_irqsave(&pdata->rx_fs_lock, flags); + + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (item->fs.location == cmd->fs.location) { + /* disable screener regs for the flow entry */ + fs = &item->fs; + netdev_dbg(ndev, "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", + fs->flow_type, (int)fs->ring_cookie, fs->location, + htonl(fs->h_u.tcp_ip4_spec.ip4src), + htonl(fs->h_u.tcp_ip4_spec.ip4dst), + htons(fs->h_u.tcp_ip4_spec.psrc), + htons(fs->h_u.tcp_ip4_spec.pdst)); + + hw_if->del_fdir_entry(pdata, fs); + + list_del(&item->list); + pdata->rx_fs_list.count--; + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + kfree(item); + return 0; + } + } + + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + return -EINVAL; +} + +static int phytmac_get_fdir_entry(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_fs_item *item; + + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (item->fs.location == cmd->fs.location) { + memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); + return 0; + } + } + return -EINVAL; +} + +static int phytmac_get_all_fdir_entries(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_fs_item *item; + u32 cnt = 0; + + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = item->fs.location; + cnt++; + } + cmd->data = pdata->max_rx_fs; + cmd->rule_cnt = cnt; + + return 0; +} + +static int phytmac_get_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct phytmac *pdata = netdev_priv(ndev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = pdata->queues_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = pdata->rx_fs_list.count; + break; + case ETHTOOL_GRXCLSRULE: + ret = phytmac_get_fdir_entry(ndev, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = phytmac_get_all_fdir_entries(ndev, cmd, rule_locs); + break; + default: + netdev_err(ndev, "Command parameter %d is not supported\n", cmd->cmd); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int phytmac_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd) +{ + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + return phytmac_add_fdir_ethtool(ndev, cmd); + case ETHTOOL_SRXCLSRLDEL: + return phytmac_del_fdir_ethtool(ndev, cmd); + default: + netdev_err(ndev, "Command parameter %d is not supported\n", cmd->cmd); + return -EOPNOTSUPP; + } +} + +static int phytmac_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *kset) +{ + int ret = 0; + struct phytmac *pdata = netdev_priv(ndev); + u32 supported = 0; + u32 advertising = 0; + + if (!ndev->phydev) { + if (pdata->phy_interface == PHY_INTERFACE_MODE_USXGMII || + pdata->phy_interface == PHY_INTERFACE_MODE_10GBASER) { + supported = SUPPORTED_10000baseT_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_10000baseT_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + kset->base.duplex = DUPLEX_FULL; + kset->base.speed = SPEED_10000; + } else if (pdata->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + supported = SUPPORTED_2500baseX_Full | SUPPORTED_Pause; + advertising = ADVERTISED_2500baseX_Full | ADVERTISED_Pause; + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + kset->base.duplex = DUPLEX_FULL; + kset->base.speed = SPEED_2500; + } else if (pdata->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + supported = SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_10baseT_Full | SUPPORTED_FIBRE + | SUPPORTED_Pause; + advertising = ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Full + | ADVERTISED_10baseT_Full | ADVERTISED_FIBRE + | ADVERTISED_Pause; + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + kset->base.duplex = DUPLEX_FULL; + kset->base.speed = SPEED_100; + } else if (pdata->phy_interface == PHY_INTERFACE_MODE_SGMII) { + supported = SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_10baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Full + | ADVERTISED_10baseT_Full | ADVERTISED_FIBRE | ADVERTISED_Pause; + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + kset->base.duplex = DUPLEX_FULL; + kset->base.speed = SPEED_1000; + } + + ethtool_convert_legacy_u32_to_link_mode(kset->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(kset->link_modes.advertising, + advertising); + } else { + phy_ethtool_get_link_ksettings(ndev, kset); + } + + return ret; +} + +static int phytmac_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *kset) +{ + int ret = 0; + + if (!ndev->phydev) { + netdev_err(ndev, "fixed link interface not supported set link\n"); + ret = -EOPNOTSUPP; + } else { + phy_ethtool_set_link_ksettings(ndev, kset); + } + + return ret; +} + +static inline void phytmac_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct phytmac *pdata = netdev_priv(ndev); + + pause->rx_pause = pdata->pause; + pause->tx_pause = pdata->pause; +} + +static int phytmac_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (pause->rx_pause != pdata->pause) + hw_if->enable_pause(pdata, pause->rx_pause); + + pdata->pause = pause->rx_pause; + + return 0; +} + +static inline void phytmac_get_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct phytmac *pdata = netdev_priv(ndev); + + ch->max_combined = pdata->queues_max_num; + ch->combined_count = pdata->queues_num; +} + +static int phytmac_set_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct phytmac *pdata = netdev_priv(ndev); + + if (netif_running(ndev)) + return -EBUSY; + + if (ch->combined_count > pdata->queues_max_num) { + netdev_err(ndev, "combined channel count cannot exceed %u\n", + ch->combined_count); + + return -EINVAL; + } + + pdata->queues_num = ch->combined_count; + + return 0; +} + +static inline u32 phytmac_get_msglevel(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + + return pdata->msg_enable; +} + +static inline void phytmac_set_msglevel(struct net_device *ndev, u32 level) +{ + struct phytmac *pdata = netdev_priv(ndev); + + pdata->msg_enable = level; +} + +static const struct ethtool_ops phytmac_ethtool_ops = { + .get_regs_len = phytmac_get_regs_len, + .get_regs = phytmac_get_regs, + .get_msglevel = phytmac_get_msglevel, + .set_msglevel = phytmac_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ts_info = phytmac_get_ts_info, + .get_ethtool_stats = phytmac_get_ethtool_stats, + .get_strings = phytmac_get_ethtool_strings, + .get_sset_count = phytmac_get_sset_count, + .get_link_ksettings = phytmac_get_link_ksettings, + .set_link_ksettings = phytmac_set_link_ksettings, + .get_ringparam = phytmac_get_ringparam, + .set_ringparam = phytmac_set_ringparam, + .get_rxnfc = phytmac_get_rxnfc, + .set_rxnfc = phytmac_set_rxnfc, + .get_pauseparam = phytmac_get_pauseparam, + .set_pauseparam = phytmac_set_pauseparam, + .get_channels = phytmac_get_channels, + .set_channels = phytmac_set_channels, + .get_wol = phytmac_get_wol, + .set_wol = phytmac_set_wol, +}; + +void phytmac_set_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &phytmac_ethtool_ops; +} + diff --git a/drivers/net/ethernet/phytium/phytmac_main.c b/drivers/net/ethernet/phytium/phytmac_main.c new file mode 100644 index 0000000000000..ada04899c9ccd --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_main.c @@ -0,0 +1,2187 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium Ethernet Controller driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_ptp.h" + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +#define RX_BUFFER_MULTIPLE 64 /* bytes */ +#define MAX_MTU 3072 +#define RING_ADDR_INTERVAL 128 + +#define RX_RING_BYTES(pdata) (sizeof(struct phytmac_dma_desc) \ + * (pdata)->rx_ring_size) + +#define TX_RING_BYTES(pdata) (sizeof(struct phytmac_dma_desc)\ + * (pdata)->tx_ring_size) + +/* Max length of transmit frame must be a multiple of 8 bytes */ +#define PHYTMAC_TX_LEN_ALIGN 8 +/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a + * false amba_error in TX path from the DMA assuming there is not enough + * space in the SRAM (16KB) even when there is. + */ + +static int phytmac_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + if (new_mtu > MAX_MTU) { + netdev_info(ndev, "Can not set MTU over %d.\n", MAX_MTU); + return -EINVAL; + } + + ndev->mtu = new_mtu; + + return 0; +} + +static int phytmac_set_mac_address(struct net_device *netdev, void *addr) +{ + struct phytmac *pdata = netdev_priv(netdev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct sockaddr *saddr = addr; + + if (netif_msg_drv(pdata)) + netdev_info(netdev, "phytmac set mac address"); + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, saddr->sa_data); + + hw_if->set_mac_address(pdata, saddr->sa_data); + + return 0; +} + +static int phytmac_get_mac_address(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + u8 addr[6]; + + hw_if->get_mac_address(pdata, addr); + + if (is_valid_ether_addr(addr)) { + eth_hw_addr_set(pdata->ndev, addr); + return 0; + } + dev_info(pdata->dev, "invalid hw address, using random\n"); + eth_hw_addr_random(pdata->ndev); + + return 0; +} + +static int phytmac_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) +{ + struct phytmac *pdata = bus->priv; + struct phytmac_hw_if *hw_if = pdata->hw_if; + int data; + + data = hw_if->mdio_read(pdata, mii_id, regnum); + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "mdio read c22 mii_id=%d, regnum=%x, data=%x\n", + mii_id, regnum, data); + + return data; +} + +static int phytmac_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, u16 data) +{ + struct phytmac *pdata = bus->priv; + struct phytmac_hw_if *hw_if = pdata->hw_if; + int ret; + + ret = hw_if->mdio_write(pdata, mii_id, regnum, data); + if (ret) + netdev_err(pdata->ndev, "mdio %d, reg %x, data %x write failed!\n", + mii_id, regnum, data); + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "mdio write c22 mii_id=%d, regnum=%x, data=%x\n", + mii_id, regnum, data); + + return 0; +} + +static int phytmac_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad, int regnum) +{ + struct phytmac *pdata = bus->priv; + struct phytmac_hw_if *hw_if = pdata->hw_if; + int data; + + data = hw_if->mdio_read_c45(pdata, mii_id, devad, regnum); + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "mdio read c45 mii_id=%d, regnum=%x, data=%x\n", + mii_id, regnum, data); + + return data; +} + +static int phytmac_mdio_write_c45(struct mii_bus *bus, int mii_id, int devad, int regnum, u16 data) +{ + struct phytmac *pdata = bus->priv; + struct phytmac_hw_if *hw_if = pdata->hw_if; + int ret; + + ret = hw_if->mdio_write_c45(pdata, mii_id, devad, regnum, data); + if (ret) + netdev_err(pdata->ndev, "mdio %d, reg %x, data %x write failed!\n", + mii_id, regnum, data); + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "mdio write c45 mii_id=%d, regnum=%x, data=%x\n", + mii_id, regnum, data); + + return 0; +} + +static inline int hash_bit_value(int bitnr, __u8 *addr) +{ + if (addr[bitnr / 8] & (1 << (bitnr % 8))) + return 1; + return 0; +} + +/* Return the hash index value for the specified address. */ +static int phytmac_get_hash_index(__u8 *addr) +{ + int i, j, bitval; + int hash_index = 0; + + for (j = 0; j < 6; j++) { + for (i = 0, bitval = 0; i < 8; i++) + bitval ^= hash_bit_value(i * 6 + j, addr); + + hash_index |= (bitval << j); + } + + return hash_index; +} + +static void phytmac_set_mac_hash_table(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct netdev_hw_addr *ha; + unsigned long mc_filter[2]; + unsigned int bitnr; + + mc_filter[0] = 0; + mc_filter[1] = 0; + + netdev_for_each_mc_addr(ha, ndev) { + bitnr = phytmac_get_hash_index(ha->addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + hw_if->set_hash_table(pdata, mc_filter); +} + +/* Enable/Disable promiscuous and multicast modes. */ +static void phytmac_set_rx_mode(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + hw_if->enable_promise(pdata, ndev->flags & IFF_PROMISC); + + hw_if->enable_multicast(pdata, ndev->flags & IFF_ALLMULTI); + if (!netdev_mc_empty(ndev)) + phytmac_set_mac_hash_table(ndev); +} + +static struct net_device_stats *phytmac_get_stats(struct net_device *dev) +{ + struct phytmac *pdata = netdev_priv(dev); + struct net_device_stats *nstat = &pdata->ndev->stats; + struct phytmac_stats *stat = &pdata->stats; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (pdata->power_state == PHYTMAC_POWEROFF) + return nstat; + + hw_if->get_stats(pdata); + + nstat->rx_errors = (stat->rx_fcs_errors + + stat->rx_alignment_errors + + stat->rx_overruns + + stat->rx_oversize_packets + + stat->rx_jabbers + + stat->rx_undersized_packets + + stat->rx_length_errors); + nstat->rx_dropped = stat->rx_resource_over; + nstat->tx_errors = (stat->tx_late_collisions + + stat->tx_excessive_collisions + + stat->tx_underrun + + stat->tx_carrier_sense_errors); + nstat->multicast = stat->rx_mcast_packets; + nstat->collisions = (stat->tx_single_collisions + + stat->tx_multiple_collisions + + stat->tx_excessive_collisions + + stat->tx_late_collisions); + nstat->rx_length_errors = (stat->rx_oversize_packets + + stat->rx_jabbers + + stat->rx_undersized_packets + + stat->rx_length_errors); + nstat->rx_over_errors = stat->rx_resource_over; + nstat->rx_crc_errors = stat->rx_fcs_errors; + nstat->rx_frame_errors = stat->rx_alignment_errors; + nstat->rx_fifo_errors = stat->rx_overruns; + nstat->tx_aborted_errors = stat->tx_excessive_collisions; + nstat->tx_carrier_errors = stat->tx_carrier_sense_errors; + nstat->tx_fifo_errors = stat->tx_underrun; + + return nstat; +} + +static int phytmac_calc_rx_buf_len(struct phytmac *pdata, u32 mtu) +{ + unsigned int size = mtu + ETH_HLEN + ETH_FCS_LEN; + int rx_buf_len = roundup(size, RX_BUFFER_MULTIPLE); + + netdev_dbg(pdata->ndev, "mtu [%u] rx_buffer_size [%u]\n", + mtu, rx_buf_len); + + return rx_buf_len; +} + +inline struct phytmac_dma_desc *phytmac_get_rx_desc(struct phytmac_queue *queue, + unsigned int index) +{ + return &queue->rx_ring[index & (queue->pdata->rx_ring_size - 1)]; +} + +static struct sk_buff *phytmac_get_rx_skb(struct phytmac_queue *queue, + unsigned int index) +{ + return queue->rx_skb[index & (queue->pdata->rx_ring_size - 1)]; +} + +struct phytmac_tx_skb *phytmac_get_tx_skb(struct phytmac_queue *queue, + unsigned int index) +{ + return &queue->tx_skb[index & (queue->pdata->tx_ring_size - 1)]; +} + +inline struct phytmac_dma_desc *phytmac_get_tx_desc(struct phytmac_queue *queue, + unsigned int index) +{ + return &queue->tx_ring[index & (queue->pdata->tx_ring_size - 1)]; +} + +static int phytmac_free_tx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_dma_desc *tx_ring_base = NULL; + dma_addr_t tx_ring_base_addr; + unsigned int q; + int size; + + queue = pdata->queues; + if (queue->tx_ring) { + tx_ring_base = queue->tx_ring; + tx_ring_base_addr = queue->tx_ring_addr; + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + kfree(queue->tx_skb); + queue->tx_skb = NULL; + + if (queue->tx_ring) + queue->tx_ring = NULL; + } + + if (tx_ring_base) { + size = pdata->queues_num * (TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + + RING_ADDR_INTERVAL); + dma_free_coherent(pdata->dev, size, tx_ring_base, tx_ring_base_addr); + } + + return 0; +} + +static int phytmac_free_rx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct sk_buff *skb; + struct phytmac_dma_desc *desc; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *rx_ring_base = NULL; + dma_addr_t rx_ring_base_addr; + dma_addr_t addr; + unsigned int q; + int size, i; + + queue = pdata->queues; + if (queue->rx_ring) { + rx_ring_base = queue->rx_ring; + rx_ring_base_addr = queue->rx_ring_addr; + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + if (queue->rx_skb) { + for (i = 0; i < pdata->rx_ring_size; i++) { + skb = phytmac_get_rx_skb(queue, i); + if (skb) { + desc = &queue->rx_ring[i]; + addr = hw_if->get_desc_addr(desc); + dma_unmap_single(pdata->dev, addr, pdata->rx_buffer_len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = NULL; + } + } + + kfree(queue->rx_skb); + queue->rx_skb = NULL; + } + + if (queue->rx_ring) + queue->rx_ring = NULL; + } + + if (rx_ring_base) { + size = pdata->queues_num * (RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + + RING_ADDR_INTERVAL); + dma_free_coherent(pdata->dev, size, rx_ring_base, rx_ring_base_addr); + } + + return 0; +} + +static int phytmac_alloc_tx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_dma_desc *tx_ring_base; + dma_addr_t tx_ring_base_addr; + unsigned int q; + int size; + + size = pdata->queues_num * (TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + + RING_ADDR_INTERVAL); + tx_ring_base = dma_alloc_coherent(pdata->dev, size, + &tx_ring_base_addr, GFP_KERNEL); + if (!tx_ring_base) + goto err; + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + size = TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + RING_ADDR_INTERVAL; + queue->tx_ring = (void *)tx_ring_base + q * size; + queue->tx_ring_addr = tx_ring_base_addr + q * size; + if (!queue->tx_ring) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated TX ring for queue %u of %d bytes at %08lx\n", + q, size, (unsigned long)queue->tx_ring_addr); + + size = pdata->tx_ring_size * sizeof(struct phytmac_tx_skb); + queue->tx_skb = kzalloc(size, GFP_KERNEL); + if (!queue->tx_skb) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated %d TX struct tx_skb entries at %p\n", + pdata->tx_ring_size, queue->tx_skb); + } + tx_ring_base = NULL; + + return 0; +err: + phytmac_free_tx_resource(pdata); + + return -ENOMEM; +} + +static int phytmac_alloc_rx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *rx_ring_base; + dma_addr_t rx_ring_base_addr; + unsigned int q; + int size; + int i; + + size = pdata->queues_num * (RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + + RING_ADDR_INTERVAL); + rx_ring_base = dma_alloc_coherent(pdata->dev, size, + &rx_ring_base_addr, GFP_KERNEL); + if (!rx_ring_base) + goto err; + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + size = RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + RING_ADDR_INTERVAL; + queue->rx_ring = (void *)rx_ring_base + q * size; + queue->rx_ring_addr = rx_ring_base_addr + q * size; + if (!queue->rx_ring) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated RX ring for queue %u of %d bytes at %08lx\n", + q, size, (unsigned long)queue->rx_ring_addr); + + for (i = 0; i < pdata->rx_ring_size; i++) + hw_if->init_rx_map(queue, i); + + size = pdata->rx_ring_size * sizeof(struct sk_buff *); + queue->rx_skb = kzalloc(size, GFP_KERNEL); + if (!queue->rx_skb) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated %d RX struct sk_buff entries at %p\n", + pdata->rx_ring_size, queue->rx_skb); + } + rx_ring_base = NULL; + + return 0; +err: + phytmac_free_rx_resource(pdata); + + return -ENOMEM; +} + +static int phytmac_alloc_resource(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + int ret; + + pdata->rx_buffer_len = phytmac_calc_rx_buf_len(pdata, ndev->mtu); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "alloc resource, rx_buffer_len = %d\n", + pdata->rx_buffer_len); + + ret = phytmac_alloc_tx_resource(pdata); + if (ret) + return ret; + + ret = phytmac_alloc_rx_resource(pdata); + if (ret) { + phytmac_free_tx_resource(pdata); + return ret; + } + + return 0; +} + +static void phytmac_free_resource(struct phytmac *pdata) +{ + phytmac_free_tx_resource(pdata); + phytmac_free_rx_resource(pdata); +} + +static irqreturn_t phytmac_irq(int irq, void *data) +{ + struct phytmac_queue *queue = data; + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 status; + + status = hw_if->get_irq(pdata, queue->index); + + if (netif_msg_intr(pdata)) + netdev_info(pdata->ndev, "phymac irq status = %x\n", status); + + if (unlikely(!status)) + return IRQ_NONE; + + while (status) { + if (status & pdata->rx_irq_mask) { + /* Disable RX interrupts */ + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_RX_COMPLETE); + + if (napi_schedule_prep(&queue->rx_napi)) + __napi_schedule(&queue->rx_napi); + } + + if (status & (PHYTMAC_INT_TX_COMPLETE)) { + /* Disable TX interrupts */ + hw_if->disable_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + + if (napi_schedule_prep(&queue->tx_napi)) + __napi_schedule(&queue->tx_napi); + } + + if (status & PHYTMAC_INT_TX_ERR) + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_TX_ERR); + + if (status & PHYTMAC_INT_RX_OVERRUN) { + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_RX_OVERRUN); + pdata->stats.rx_overruns++; + } + status = hw_if->get_irq(pdata, queue->index); + } + + return IRQ_HANDLED; +} + +static irqreturn_t phytmac_intx_irq(int irq, void *data) +{ + struct phytmac *pdata = data; + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 irq_mask; + int i; + + irq_mask = hw_if->get_intx_mask(pdata); + + if (unlikely(!irq_mask)) + return IRQ_NONE; + + for (i = 0; i < pdata->queues_num; i++) { + if (irq_mask & BIT(i)) + phytmac_irq(irq, &pdata->queues[i]); + } + + return IRQ_HANDLED; +} + +static void phytmac_dump_pkt(struct phytmac *pdata, struct sk_buff *skb, bool tx) +{ + struct net_device *ndev = pdata->ndev; + + if (tx) { + netdev_dbg(ndev, "start_xmit: queue %u len %u head %p data %p tail %p end %p\n", + skb->queue_mapping, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); + } else { + netdev_dbg(ndev, "queue %u received skb of length %u, csum: %08x\n", + skb->queue_mapping, skb->len, skb->csum); + print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb_mac_header(skb), 16, true); + } + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); +} + +static struct sk_buff *phytmac_rx_single(struct phytmac_queue *queue, struct phytmac_dma_desc *desc) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct sk_buff *skb; + unsigned int len; + dma_addr_t addr; + + skb = phytmac_get_rx_skb(queue, queue->rx_tail); + if (unlikely(!skb)) { + netdev_err(pdata->ndev, + "inconsistent Rx descriptor chain\n"); + pdata->ndev->stats.rx_dropped++; + queue->stats.rx_dropped++; + return NULL; + } + + queue->rx_skb[queue->rx_tail & (pdata->rx_ring_size - 1)] = NULL; + len = hw_if->get_rx_pkt_len(pdata, desc); + addr = hw_if->get_desc_addr(desc); + + skb_put(skb, len); + dma_unmap_single(pdata->dev, addr, + pdata->rx_buffer_len, DMA_FROM_DEVICE); + skb->protocol = eth_type_trans(skb, pdata->ndev); + skb_checksum_none_assert(skb); + + if (pdata->ndev->features & NETIF_F_RXCSUM && + !(pdata->ndev->flags & IFF_PROMISC) && + hw_if->rx_checksum(desc)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (netif_msg_pktdata(pdata)) + phytmac_dump_pkt(pdata, skb, false); + + return skb; +} + +static struct sk_buff *phytmac_rx_frame(struct phytmac_queue *queue, + unsigned int first_frag, unsigned int last_frag, int len) +{ + unsigned int offset = 0; + unsigned int frag = 0; + unsigned int entry = 0; + dma_addr_t addr = 0; + struct sk_buff *skb; + struct phytmac_dma_desc *desc; + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int frag_len = pdata->rx_buffer_len; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "rx frame %u - %u (len %u)\n", + first_frag, last_frag, len); + + skb = netdev_alloc_skb(pdata->ndev, len); + if (!skb) { + pdata->ndev->stats.rx_dropped++; + netdev_err(pdata->ndev, "rx frame alloc skb failed\n"); + return NULL; + } + + skb_checksum_none_assert(skb); + + if (pdata->ndev->features & NETIF_F_RXCSUM && + !(pdata->ndev->flags & IFF_PROMISC) && + hw_if->rx_checksum(phytmac_get_rx_desc(queue, last_frag))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_put(skb, len); + + for (frag = first_frag; ; frag++) { + if (offset + frag_len > len) { + if (unlikely(frag != last_frag)) { + dev_kfree_skb_any(skb); + return NULL; + } + frag_len = len - offset; + } + + desc = phytmac_get_rx_desc(queue, frag); + addr = hw_if->get_desc_addr(desc); + dma_sync_single_for_cpu(pdata->dev, addr, frag_len, + DMA_FROM_DEVICE); + + entry = frag & (pdata->rx_ring_size - 1); + skb_copy_to_linear_data_offset(skb, offset, queue->rx_skb[entry]->data, frag_len); + + offset += pdata->rx_buffer_len; + + dma_sync_single_for_device(pdata->dev, addr, frag_len, + DMA_FROM_DEVICE); + + if (frag == last_frag) + break; + } + + skb->protocol = eth_type_trans(skb, pdata->ndev); + if (netif_msg_pktdata(pdata)) + phytmac_dump_pkt(pdata, skb, false); + + return skb; +} + +static struct sk_buff *phytmac_rx_mbuffer(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *desc; + struct sk_buff *skb = NULL; + unsigned int rx_tail = 0; + int first_frag = -1; + int len; + + for (rx_tail = queue->rx_tail; ; rx_tail++) { + desc = phytmac_get_rx_desc(queue, rx_tail); + if (hw_if->rx_pkt_start(desc)) { + if (first_frag != -1) + hw_if->clear_rx_desc(queue, first_frag, rx_tail); + first_frag = rx_tail; + continue; + } + + if (hw_if->rx_pkt_end(desc)) { + queue->rx_tail = rx_tail; + len = hw_if->get_rx_pkt_len(pdata, desc); + skb = phytmac_rx_frame(queue, first_frag, rx_tail, len); + first_frag = -1; + break; + } + } + return skb; +} + +static void phytmac_rx_clean(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int index, space; + dma_addr_t paddr; + struct sk_buff *skb; + unsigned int rx_unclean = 0; + + space = CIRC_SPACE(queue->rx_head, queue->rx_tail, + pdata->rx_ring_size); + + if (space < DEFAULT_RX_DESC_MIN_FREE) + return; + + index = queue->rx_head & (pdata->rx_ring_size - 1); + while (space > 0) { + if (!queue->rx_skb[index]) { + skb = netdev_alloc_skb(pdata->ndev, pdata->rx_buffer_len); + if (unlikely(!skb)) { + netdev_err(pdata->ndev, "rx clean alloc skb failed\n"); + break; + } + + paddr = dma_map_single(pdata->dev, skb->data, + pdata->rx_buffer_len, DMA_FROM_DEVICE); + if (dma_mapping_error(pdata->dev, paddr)) { + dev_kfree_skb(skb); + break; + } + + queue->rx_skb[index] = skb; + + hw_if->rx_map(queue, index, paddr); + } + + index = (index + 1) & (pdata->rx_ring_size - 1); + rx_unclean++; + space--; + } + + /* make newly descriptor to hardware */ + wmb(); + hw_if->rx_clean(queue, rx_unclean); + /* make newly descriptor to hardware */ + wmb(); + queue->rx_head += rx_unclean; + if (queue->rx_head >= pdata->rx_ring_size) + queue->rx_head &= (pdata->rx_ring_size - 1); +} + +static int phytmac_rx(struct phytmac_queue *queue, struct napi_struct *napi, + int budget) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct sk_buff *skb; + struct phytmac_dma_desc *desc; + int count = 0; + + while (count < budget) { + desc = phytmac_get_rx_desc(queue, queue->rx_tail); + /* make newly desc to cpu */ + rmb(); + + if (!hw_if->rx_complete(desc)) + break; + + /* Ensure ctrl is at least as up-to-date as rxused */ + dma_rmb(); + + if (hw_if->rx_single_buffer(desc)) + skb = phytmac_rx_single(queue, desc); + else + skb = phytmac_rx_mbuffer(queue); + + if (!skb) { + netdev_warn(pdata->ndev, "phytmac rx skb is NULL\n"); + break; + } + + pdata->ndev->stats.rx_packets++; + queue->stats.rx_packets++; + pdata->ndev->stats.rx_bytes += skb->len; + queue->stats.rx_bytes += skb->len; + queue->rx_tail = (queue->rx_tail + 1) & (pdata->rx_ring_size - 1); + + count++; + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + phytmac_ptp_rxstamp(pdata, skb, desc); + + napi_gro_receive(napi, skb); + } + + phytmac_rx_clean(queue); + + return count; +} + +static void phytmac_tx_unmap(struct phytmac *pdata, struct phytmac_tx_skb *tx_skb, int budget) +{ + if (tx_skb->addr) { + if (tx_skb->mapped_as_page) + dma_unmap_page(pdata->dev, tx_skb->addr, + tx_skb->length, DMA_TO_DEVICE); + else + dma_unmap_single(pdata->dev, tx_skb->addr, + tx_skb->length, DMA_TO_DEVICE); + tx_skb->addr = 0; + } + + if (tx_skb->skb) { + napi_consume_skb(tx_skb->skb, budget); + tx_skb->skb = NULL; + } +} + +static int phytmac_maybe_stop_tx_queue(struct phytmac_queue *queue, + unsigned int count) +{ + struct phytmac *pdata = queue->pdata; + int space = CIRC_SPACE(queue->tx_tail, queue->tx_head, + pdata->tx_ring_size); + + if (space < count) { + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "Tx queue %d stopped, not enough descriptors available\n", + queue->index); + + netif_stop_subqueue(pdata->ndev, queue->index); + + return NETDEV_TX_BUSY; + } + + return 0; +} + +static int phytmac_maybe_wake_tx_queue(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + int space = CIRC_CNT(queue->tx_tail, queue->tx_head, + pdata->tx_ring_size); + + return (space <= (3 * pdata->tx_ring_size / 4)) ? 1 : 0; +} + +static int phytmac_tx_clean(struct phytmac_queue *queue, int budget) +{ + struct phytmac *pdata = queue->pdata; + u16 queue_index = queue->index; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_tx_skb *tx_skb; + struct phytmac_dma_desc *desc; + int complete = 0; + int packet_count = 0; + unsigned int tail = queue->tx_tail; + unsigned int head; + + spin_lock(&pdata->lock); + + for (head = queue->tx_head; head != tail && packet_count < budget; ) { + struct sk_buff *skb; + + desc = phytmac_get_tx_desc(queue, head); + /* make newly desc to cpu */ + rmb(); + if (!hw_if->tx_complete(desc)) + break; + + /* Process all buffers of the current transmitted frame */ + for (;; head++) { + tx_skb = phytmac_get_tx_skb(queue, head); + skb = tx_skb->skb; + + if (skb) { + complete = 1; + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + !phytmac_ptp_one_step(skb)) { + phytmac_ptp_txstamp(queue, skb, desc); + } + } + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "desc %u (data %p) tx complete\n", + head, tx_skb->skb->data); + + pdata->ndev->stats.tx_packets++; + queue->stats.tx_packets++; + pdata->ndev->stats.tx_bytes += tx_skb->skb->len; + queue->stats.tx_bytes += tx_skb->skb->len; + packet_count++; + } + + /* Now we can safely release resources */ + phytmac_tx_unmap(pdata, tx_skb, budget); + + if (complete) { + complete = 0; + break; + } + } + + head++; + if (head >= pdata->tx_ring_size) + head &= (pdata->tx_ring_size - 1); + } + + queue->tx_head = head; + if (__netif_subqueue_stopped(pdata->ndev, queue_index) && + (phytmac_maybe_wake_tx_queue(queue))) + netif_wake_subqueue(pdata->ndev, queue_index); + spin_unlock(&pdata->lock); + + return packet_count; +} + +static int phytmac_rx_poll(struct napi_struct *napi, int budget) +{ + struct phytmac_queue *queue = container_of(napi, struct phytmac_queue, rx_napi); + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *desc; + int work_done; + + work_done = phytmac_rx(queue, napi, budget); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "RX poll: queue = %u, work_done = %d, budget = %d\n", + (unsigned int)(queue->index), work_done, budget); + if (work_done < budget && napi_complete_done(napi, work_done)) { + hw_if->enable_irq(pdata, queue->index, pdata->rx_irq_mask); + + desc = phytmac_get_rx_desc(queue, queue->rx_tail); + /* make newly desc to cpu */ + rmb(); + + if (hw_if->rx_complete(desc)) { + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_RX_COMPLETE); + + napi_schedule(napi); + } + } + + return work_done; +} + +static int phytmac_tx_poll(struct napi_struct *napi, int budget) +{ + struct phytmac_queue *queue = container_of(napi, struct phytmac_queue, tx_napi); + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *desc; + int work_done; + + work_done = phytmac_tx_clean(queue, budget); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "TX poll: queue = %u, work_done = %d, budget = %d\n", + (unsigned int)(queue->index), work_done, budget); + if (work_done < budget && napi_complete_done(napi, work_done)) { + hw_if->enable_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + if (queue->tx_head != queue->tx_tail) { + desc = phytmac_get_tx_desc(queue, queue->tx_head); + /* make newly desc to cpu */ + rmb(); + + if (hw_if->tx_complete(desc)) { + hw_if->disable_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + + napi_schedule(napi); + } + } + } + + return work_done; +} + +static inline int phytmac_clear_csum(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* make sure we can modify the header */ + if (unlikely(skb_cow_head(skb, 0))) + return -1; + + *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; + return 0; +} + +static int phytmac_add_fcs(struct sk_buff **skb, struct net_device *ndev) +{ + bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || + skb_is_nonlinear(*skb); + int padlen = ETH_ZLEN - (*skb)->len; + int headroom = skb_headroom(*skb); + int tailroom = skb_tailroom(*skb); + struct sk_buff *nskb; + u32 fcs; + int i; + + if ((ndev->features & NETIF_F_HW_CSUM) || + !((*skb)->ip_summed != CHECKSUM_PARTIAL) || + skb_shinfo(*skb)->gso_size || phytmac_ptp_one_step(*skb)) + return 0; + + if (padlen <= 0) { + if (tailroom >= ETH_FCS_LEN) + goto add_fcs; + else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) + padlen = 0; + else + padlen = ETH_FCS_LEN; + } else { + padlen += ETH_FCS_LEN; + } + + if (!cloned && headroom + tailroom >= padlen) { + (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); + skb_set_tail_pointer(*skb, (*skb)->len); + } else { + nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + dev_consume_skb_any(*skb); + *skb = nskb; + } + + if (padlen > ETH_FCS_LEN) + skb_put_zero(*skb, padlen - ETH_FCS_LEN); + +add_fcs: + fcs = crc32_le(~0, (*skb)->data, (*skb)->len); + fcs = ~fcs; + + for (i = 0; i < 4; ++i) + skb_put_u8(*skb, (fcs >> (i * 8)) & 0xff); + return 0; +} + +static int phytmac_packet_info(struct phytmac *pdata, + struct phytmac_queue *queue, struct sk_buff *skb, + struct packet_info *packet) +{ + int is_lso; + unsigned int hdrlen, f; + int desc_cnt; + + memset(packet, 0, sizeof(struct packet_info)); + + is_lso = (skb_shinfo(skb)->gso_size != 0); + + if (is_lso) { + /* length of headers */ + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + /* only queue eth + ip headers separately for UDP */ + hdrlen = skb_transport_offset(skb); + packet->lso = LSO_UFO; + packet->mss = skb_shinfo(skb)->gso_size + hdrlen + ETH_FCS_LEN; + } else { + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + packet->lso = LSO_TSO; + packet->mss = skb_shinfo(skb)->gso_size; + } + + if (skb_headlen(skb) < hdrlen) { + dev_err(pdata->dev, "Error - LSO headers fragmented!!!\n"); + return NETDEV_TX_BUSY; + } + } else { + hdrlen = min(skb_headlen(skb), pdata->max_tx_length); + packet->lso = 0; + packet->mss = 0; + } + + packet->hdrlen = hdrlen; + + if (is_lso && (skb_headlen(skb) > hdrlen)) + desc_cnt = TXD_USE_COUNT(pdata, (skb_headlen(skb) - hdrlen)) + 1; + else + desc_cnt = TXD_USE_COUNT(pdata, hdrlen); + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + desc_cnt += TXD_USE_COUNT(pdata, skb_frag_size(&skb_shinfo(skb)->frags[f])); + packet->desc_cnt = desc_cnt; + + if ((!(pdata->ndev->features & NETIF_F_HW_CSUM)) && + skb->ip_summed != CHECKSUM_PARTIAL && + !is_lso && + !phytmac_ptp_one_step(skb)) + packet->nocrc = 1; + else + packet->nocrc = 0; + + if (netif_msg_pktdata(pdata)) { + netdev_info(pdata->ndev, "packet info: desc_cnt=%d, nocrc=%d,ip_summed=%d\n", + desc_cnt, packet->nocrc, skb->ip_summed); + netdev_info(pdata->ndev, "packet info: mss=%d, lso=%d,skb_len=%d, nr_frags=%d\n", + packet->mss, packet->lso, skb->len, skb_shinfo(skb)->nr_frags); + } + + return 0; +} + +static unsigned int phytmac_tx_map(struct phytmac *pdata, + struct phytmac_queue *queue, + struct sk_buff *skb, + struct packet_info *packet) +{ + dma_addr_t mapping; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int len, i, tx_tail = queue->tx_tail; + struct phytmac_tx_skb *tx_skb = NULL; + unsigned int offset, size, count = 0; + unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; + + len = skb_headlen(skb); + size = packet->hdrlen; + + offset = 0; + tx_tail = queue->tx_tail; + while (len) { + tx_skb = phytmac_get_tx_skb(queue, tx_tail); + + mapping = dma_map_single(pdata->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, mapping)) + goto dma_error; + + /* Save info to properly release resources */ + tx_skb->skb = NULL; + tx_skb->addr = mapping; + tx_skb->length = size; + tx_skb->mapped_as_page = false; + + len -= size; + offset += size; + count++; + tx_tail++; + + size = min(len, pdata->max_tx_length); + } + + /* Then, map paged data from fragments */ + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + while (len) { + size = min(len, pdata->max_tx_length); + tx_skb = phytmac_get_tx_skb(queue, tx_tail); + mapping = skb_frag_dma_map(pdata->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, mapping)) + goto dma_error; + + /* Save info to properly release resources */ + tx_skb->skb = NULL; + tx_skb->addr = mapping; + tx_skb->length = size; + tx_skb->mapped_as_page = true; + + len -= size; + offset += size; + count++; + tx_tail++; + } + } + + /* Should never happen */ + if (unlikely(!tx_skb)) { + netdev_err(pdata->ndev, "BUG! empty skb!\n"); + return 0; + } + + /* This is the last buffer of the frame: save socket buffer */ + tx_skb->skb = skb; + + if (hw_if->tx_map(queue, tx_tail, packet)) { + netdev_err(pdata->ndev, "BUG!hw tx map failed!\n"); + return 0; + } + + queue->tx_tail = tx_tail & (pdata->tx_ring_size - 1); + + return count; + +dma_error: + netdev_err(pdata->ndev, "TX DMA map failed\n"); + + for (i = queue->tx_tail; i != tx_tail; i++) { + tx_skb = phytmac_get_tx_skb(queue, i); + phytmac_tx_unmap(pdata, tx_skb, 0); + } + + return 0; +} + +static inline void phytmac_init_ring(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_queue *queue; + unsigned int q = 0; + + for (queue = pdata->queues; q < pdata->queues_num; ++q) { + queue->tx_head = 0; + queue->tx_tail = 0; + hw_if->clear_tx_desc(queue); + + queue->rx_head = 0; + queue->rx_tail = 0; + phytmac_rx_clean(queue); + ++queue; + } + + hw_if->init_ring_hw(pdata); +} + +static netdev_tx_t phytmac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + u16 queue_index = skb->queue_mapping; + struct phytmac_queue *queue = &pdata->queues[queue_index]; + netdev_tx_t ret = NETDEV_TX_OK; + struct packet_info packet; + unsigned long flags; + + if (phytmac_clear_csum(skb)) { + dev_kfree_skb_any(skb); + return ret; + } + + if (phytmac_add_fcs(&skb, ndev)) { + dev_kfree_skb_any(skb); + return ret; + } + + ret = phytmac_packet_info(pdata, queue, skb, &packet); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + if (netif_msg_pktdata(pdata)) + phytmac_dump_pkt(pdata, skb, true); + + spin_lock_irqsave(&pdata->lock, flags); + /* Check that there are enough descriptors available */ + ret = phytmac_maybe_stop_tx_queue(queue, packet.desc_cnt); + if (ret) + goto tx_return; + + /* Map socket buffer for DMA transfer */ + if (!phytmac_tx_map(pdata, queue, skb, &packet)) { + dev_kfree_skb_any(skb); + goto tx_return; + } + + skb_tx_timestamp(skb); + /* Make newly descriptor to hardware */ + wmb(); + + hw_if->transmit(queue); + +tx_return: + spin_unlock_irqrestore(&pdata->lock, flags); + return ret; +} + +static int phytmac_phylink_connect(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct phy_device *phydev; + struct fwnode_handle *fwnode = dev_fwnode(pdata->dev); + int ret = 0; + + if (fwnode) + ret = phylink_fwnode_phy_connect(pdata->phylink, fwnode, 0); + + if (!fwnode || ret) { + if (pdata->mii_bus) { + phydev = phy_find_first(pdata->mii_bus); + if (!phydev) { + dev_err(pdata->dev, "no PHY found\n"); + return -ENXIO; + } + /* attach the mac to the phy */ + ret = phylink_connect_phy(pdata->phylink, phydev); + } else { + netdev_err(ndev, "Not mii register\n"); + return -ENXIO; + } + } + + if (ret) { + netdev_err(ndev, "Could not attach PHY (%d)\n", ret); + return ret; + } + + return 0; +} + +static int phytmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static void phytmac_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex) +{ + struct phytmac *pdata = container_of(pcs, struct phytmac, phylink_pcs); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "pcs link up, interface = %s, speed = %d, duplex = %d\n", + phy_modes(interface), speed, duplex); + hw_if->pcs_linkup(pdata, interface, speed, duplex); +} + +static const struct phylink_pcs_ops phytmac_pcs_phylink_ops = { + .pcs_config = phytmac_pcs_config, + .pcs_link_up = phytmac_pcs_link_up, +}; + +static struct phylink_pcs *phytmac_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct phytmac *pdata = netdev_priv(to_net_dev(config->dev)); + + if (interface == PHY_INTERFACE_MODE_USXGMII || + interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + pdata->phylink_pcs.ops = &phytmac_pcs_phylink_ops; + } else { + pdata->phylink_pcs.ops = NULL; + } + + return &pdata->phylink_pcs; +} + +static void phytmac_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct phytmac *pdata = netdev_priv(to_net_dev(config->dev)); + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + + if (netif_msg_link(pdata)) { + netdev_info(pdata->ndev, "mac config interface=%s, mode=%d\n", + phy_modes(state->interface), mode); + } + + spin_lock_irqsave(&pdata->lock, flags); + hw_if->mac_config(pdata, mode, state); + spin_unlock_irqrestore(&pdata->lock, flags); +} + +static void phytmac_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_queue *queue; + unsigned int q; + unsigned long flags; + struct phytmac_tx_skb *tx_skb; + int i; + + if (netif_msg_link(pdata)) { + netdev_info(ndev, "link down interface:%s, mode=%d\n", + phy_modes(interface), mode); + } + + if (pdata->use_ncsi) + ncsi_stop_dev(pdata->ncsidev); + + spin_lock_irqsave(&pdata->lock, flags); + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + hw_if->clear_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + } + + /* Disable Rx and Tx */ + hw_if->enable_network(pdata, false, PHYTMAC_RX | PHYTMAC_TX); + + /* Tx clean */ + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + for (i = 0; i < pdata->tx_ring_size; i++) { + tx_skb = phytmac_get_tx_skb(queue, i); + if (tx_skb) + phytmac_tx_unmap(pdata, tx_skb, 0); + } + } + + spin_unlock_irqrestore(&pdata->lock, flags); + + netif_tx_stop_all_queues(ndev); +} + +static void phytmac_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_queue *queue; + unsigned long flags; + unsigned int q; + int ret; + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "link up interface:%s, speed:%d, duplex:%s\n", + phy_modes(interface), speed, duplex ? "full-duplex" : "half-duplex"); + + spin_lock_irqsave(&pdata->lock, flags); + + hw_if->mac_linkup(pdata, interface, speed, duplex); + + if (rx_pause != pdata->pause) { + hw_if->enable_pause(pdata, rx_pause); + pdata->pause = rx_pause; + } + + phytmac_init_ring(pdata); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) + hw_if->enable_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + + /* Enable Rx and Tx */ + hw_if->enable_network(pdata, true, PHYTMAC_RX | PHYTMAC_TX); + spin_unlock_irqrestore(&pdata->lock, flags); + + if (pdata->use_ncsi) { + /* Start the NCSI device */ + ret = ncsi_start_dev(pdata->ncsidev); + if (ret) { + netdev_err(pdata->ndev, "Ncsi start dev failed (error %d)\n", ret); + return; + } + } + + netif_tx_wake_all_queues(ndev); +} + +static int phytmac_mdio_register(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + int ret; + + pdata->mii_bus = mdiobus_alloc(); + if (!pdata->mii_bus) { + ret = -ENOMEM; + goto err_out; + } + + pdata->mii_bus->name = "phytmac_mii_bus"; + pdata->mii_bus->read = &phytmac_mdio_read_c22; + pdata->mii_bus->write = &phytmac_mdio_write_c22; + pdata->mii_bus->read_c45 = &phytmac_mdio_read_c45; + pdata->mii_bus->write_c45 = &phytmac_mdio_write_c45; + + if (pdata->platdev) { + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%s", + pdata->mii_bus->name, pdata->platdev->name); + } else if (pdata->pcidev) { + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%s", + pdata->mii_bus->name, pci_name(pdata->pcidev)); + } else { + ret = -ENOMEM; + goto free_mdio; + } + + pdata->mii_bus->priv = pdata; + pdata->mii_bus->parent = pdata->dev; + + hw_if->enable_mdio_control(pdata, 1); + + return mdiobus_register(pdata->mii_bus); +free_mdio: + mdiobus_free(pdata->mii_bus); + pdata->mii_bus = NULL; + +err_out: + return ret; +} + +static void phytmac_pcs_get_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct phytmac *pdata = container_of(config, struct phytmac, phylink_config); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + state->link = hw_if->get_link(pdata, state->interface); +} + +static const struct phylink_mac_ops phytmac_phylink_ops = { + .mac_select_pcs = phytmac_mac_select_pcs, + .mac_config = phytmac_mac_config, + .mac_link_down = phytmac_mac_link_down, + .mac_link_up = phytmac_mac_link_up, +}; + +static inline void set_phy_interface(unsigned long *intf) +{ + __set_bit(PHY_INTERFACE_MODE_SGMII, intf); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, intf); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, intf); + __set_bit(PHY_INTERFACE_MODE_USXGMII, intf); + __set_bit(PHY_INTERFACE_MODE_5GBASER, intf); + __set_bit(PHY_INTERFACE_MODE_10GBASER, intf); + phy_interface_set_rgmii(intf); +} + +static int phytmac_phylink_create(struct phytmac *pdata) +{ + struct fwnode_handle *fw_node = dev_fwnode(pdata->dev); + + pdata->phylink_config.dev = &pdata->ndev->dev; + pdata->phylink_config.type = PHYLINK_NETDEV; + if (pdata->phy_interface == PHY_INTERFACE_MODE_SGMII || + pdata->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + pdata->phy_interface == PHY_INTERFACE_MODE_2500BASEX || + pdata->phy_interface == PHY_INTERFACE_MODE_USXGMII || + pdata->phy_interface == PHY_INTERFACE_MODE_10GBASER) { + pdata->phylink_config.poll_fixed_state = true; + pdata->phylink_config.get_fixed_state = phytmac_pcs_get_state; + pdata->phylink_pcs.ops = &phytmac_pcs_phylink_ops; + } + + set_phy_interface(pdata->phylink_config.supported_interfaces); + pdata->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD | + MAC_5000FD | MAC_10000FD; + + pdata->phylink = phylink_create(&pdata->phylink_config, fw_node, + pdata->phy_interface, &phytmac_phylink_ops); + if (IS_ERR(pdata->phylink)) { + dev_err(pdata->dev, "Could not create a phylink instance (%ld)\n", + PTR_ERR(pdata->phylink)); + return PTR_ERR(pdata->phylink); + } + + return 0; +} + +static int phytmac_open(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int q = 0; + int ret; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "open\n"); + + /* phytmac_powerup */ + if (pdata->power_state == PHYTMAC_POWEROFF) + hw_if->poweron(pdata, PHYTMAC_POWERON); + + if (hw_if->init_msg_ring) + hw_if->init_msg_ring(pdata); + + ret = hw_if->get_feature(pdata); + if (ret) { + netdev_err(ndev, "phytmac get features failed\n"); + return ret; + } + + hw_if->reset_hw(pdata); + + ret = phytmac_get_mac_address(pdata); + if (ret) { + netdev_err(ndev, "phytmac get mac address failed\n"); + goto reset_hw; + } + + ret = netif_set_real_num_tx_queues(ndev, pdata->queues_num); + if (ret) { + netdev_err(ndev, "error setting real tx queue number\n"); + return ret; + } + ret = netif_set_real_num_rx_queues(ndev, pdata->queues_num); + if (ret) { + netdev_err(ndev, "error setting real tx queue number\n"); + return ret; + } + + /* RX buffers initialization */ + ret = phytmac_alloc_resource(pdata); + if (ret) { + netdev_err(ndev, "Unable to allocate DMA memory (error %d)\n", + ret); + goto reset_hw; + } + + for (queue = pdata->queues; q < pdata->queues_num; ++q) { + napi_enable(&queue->tx_napi); + napi_enable(&queue->rx_napi); + ++queue; + } + + phytmac_init_ring(pdata); + hw_if->init_hw(pdata); + + ret = phytmac_phylink_connect(pdata); + if (ret) { + netdev_err(ndev, "phylink connet failed,(error %d)\n", + ret); + goto reset_hw; + } + + phylink_start(pdata->phylink); + + netif_tx_start_all_queues(pdata->ndev); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + ret = phytmac_ptp_register(pdata); + if (ret) { + netdev_err(ndev, "ptp register failed, (error %d)\n", + ret); + goto reset_hw; + } + + phytmac_ptp_init(pdata->ndev); + } + + return 0; + +reset_hw: + hw_if->reset_hw(pdata); + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q) { + napi_disable(&queue->tx_napi); + napi_disable(&queue->rx_napi); + ++queue; + } + phytmac_free_resource(pdata); + return ret; +} + +static int phytmac_close(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + unsigned int q; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "close"); + + netif_tx_stop_all_queues(ndev); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + napi_disable(&queue->tx_napi); + napi_disable(&queue->rx_napi); + } + + phylink_stop(pdata->phylink); + phylink_disconnect_phy(pdata->phylink); + + netif_carrier_off(ndev); + + spin_lock_irqsave(&pdata->lock, flags); + hw_if->reset_hw(pdata); + spin_unlock_irqrestore(&pdata->lock, flags); + + phytmac_free_resource(pdata); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + phytmac_ptp_unregister(pdata); + + /* phytmac_powerup */ + if (pdata->power_state == PHYTMAC_POWERON) + hw_if->poweron(pdata, PHYTMAC_POWEROFF); + + return 0; +} + +static int phytmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct phytmac *pdata = netdev_priv(dev); + int ret; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + ret = phylink_mii_ioctl(pdata->phylink, rq, cmd); + break; +#ifdef CONFIG_PHYTMAC_ENABLE_PTP + case SIOCSHWTSTAMP: + ret = phytmac_ptp_set_ts_config(dev, rq, cmd); + break; + case SIOCGHWTSTAMP: + ret = phytmac_ptp_get_ts_config(dev, rq); + break; +#endif + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static inline int phytmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct phytmac *pdata = netdev_priv(netdev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + netdev_features_t changed = features ^ netdev->features; + + /* TX checksum offload */ + if (changed & NETIF_F_HW_CSUM) { + if (features & NETIF_F_HW_CSUM) + hw_if->enable_tx_csum(pdata, 1); + else + hw_if->enable_tx_csum(pdata, 0); + } + + /* RX checksum offload */ + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM && + !(netdev->flags & IFF_PROMISC)) + hw_if->enable_rx_csum(pdata, 1); + else + hw_if->enable_rx_csum(pdata, 0); + } + return 0; +} + +static netdev_features_t phytmac_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int nr_frags, f; + unsigned int hdrlen; + + /* there is only one buffer or protocol is not UDP */ + if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) + return features; + + /* length of header */ + hdrlen = skb_transport_offset(skb); + + if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, PHYTMAC_TX_LEN_ALIGN)) + return features & ~NETIF_F_TSO; + + nr_frags = skb_shinfo(skb)->nr_frags; + /* No need to check last fragment */ + nr_frags--; + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + if (!IS_ALIGNED(skb_frag_size(frag), PHYTMAC_TX_LEN_ALIGN)) + return features & ~NETIF_F_TSO; + } + return features; +} + +int phytmac_reset_ringsize(struct phytmac *pdata, u32 rx_size, u32 tx_size) +{ + int ret = 0; + int reset = 0; + + if (netif_running(pdata->ndev)) { + reset = 1; + phytmac_close(pdata->ndev); + } + + pdata->rx_ring_size = rx_size; + pdata->tx_ring_size = tx_size; + + if (reset) + phytmac_open(pdata->ndev); + + return ret; +} + +static const struct net_device_ops phytmac_netdev_ops = { + .ndo_open = phytmac_open, + .ndo_stop = phytmac_close, + .ndo_start_xmit = phytmac_start_xmit, + .ndo_set_rx_mode = phytmac_set_rx_mode, + .ndo_get_stats = phytmac_get_stats, + .ndo_eth_ioctl = phytmac_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = phytmac_change_mtu, + .ndo_set_mac_address = phytmac_set_mac_address, + .ndo_set_features = phytmac_set_features, + .ndo_features_check = phytmac_features_check, + .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, +}; + +static int phytmac_init(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + unsigned int q; + struct phytmac_queue *queue; + int ret; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "phytmac init !\n"); + + spin_lock_init(&pdata->lock); + + /* set the queue register mapping once for all: queue0 has a special + * register mapping but we don't want to test the queue index then + * compute the corresponding register offset at run time. + */ + for (q = 0; q < pdata->queues_num; ++q) { + queue = &pdata->queues[q]; + queue->pdata = pdata; + queue->index = q; + spin_lock_init(&queue->tx_lock); + + netif_napi_add(ndev, &queue->tx_napi, phytmac_tx_poll); + netif_napi_add(ndev, &queue->rx_napi, phytmac_rx_poll); + + if (pdata->irq_type == IRQ_TYPE_INT || pdata->irq_type == IRQ_TYPE_MSI) { + queue->irq = pdata->queue_irq[q]; + if (pdata->irq_type == IRQ_TYPE_INT) + ret = devm_request_irq(pdata->dev, queue->irq, phytmac_irq, + IRQF_SHARED, ndev->name, queue); + else + ret = devm_request_irq(pdata->dev, queue->irq, phytmac_irq, + 0, ndev->name, queue); + + if (ret) { + dev_err(pdata->dev, + "Unable to request IRQ %d (error %d)\n", + queue->irq, ret); + return ret; + } + } + } + + if (pdata->irq_type == IRQ_TYPE_INTX) { + ret = devm_request_irq(pdata->dev, pdata->queue_irq[0], phytmac_intx_irq, + IRQF_SHARED, ndev->name, pdata); + if (ret) { + dev_err(pdata->dev, + "Unable to request INTX IRQ %d (error %d)\n", + pdata->queue_irq[0], ret); + return ret; + } + } + + ndev->netdev_ops = &phytmac_netdev_ops; + phytmac_set_ethtool_ops(ndev); + eth_hw_addr_random(pdata->ndev); + + if (ndev->hw_features & NETIF_F_NTUPLE) { + INIT_LIST_HEAD(&pdata->rx_fs_list.list); + pdata->rx_fs_list.count = 0; + spin_lock_init(&pdata->rx_fs_lock); + } + + device_set_wakeup_enable(pdata->dev, pdata->wol ? 1 : 0); + + return 0; +} + +static void phytmac_default_config(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + + pdata->rx_irq_mask = PHYTMAC_RX_INT_FLAGS; + pdata->tx_irq_mask = PHYTMAC_TX_INT_FLAGS; + pdata->tx_ring_size = DEFAULT_TX_RING_SIZE; + pdata->rx_ring_size = DEFAULT_RX_RING_SIZE; + pdata->max_tx_length = PHYTMAC_MAX_TX_LEN; + pdata->min_tx_length = PHYTMAC_MIN_TX_LEN; + pdata->pause = true; + + ndev->hw_features = NETIF_F_SG; + + if (pdata->capacities & PHYTMAC_CAPS_LSO) + ndev->hw_features |= NETIF_F_TSO; + + if (pdata->use_ncsi) { + ndev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } else { + ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + } + + if (pdata->capacities & PHYTMAC_CAPS_SG_DISABLED) + ndev->hw_features &= ~NETIF_F_SG; + + ndev->hw_features |= NETIF_F_NTUPLE; + + ndev->min_mtu = ETH_MIN_MTU; + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + ndev->max_mtu = pdata->jumbo_len - ETH_HLEN - ETH_FCS_LEN; + else + ndev->max_mtu = ETH_DATA_LEN; + + ndev->features = ndev->hw_features; +} + +static void phytmac_ncsi_handler(struct ncsi_dev *nd) +{ + if (unlikely(nd->state != ncsi_dev_state_functional)) + return; + + netdev_dbg(nd->dev, "NCSI interface %s\n", + nd->link_up ? "up" : "down"); +} + +int phytmac_drv_probe(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct device *dev = pdata->dev; + int ret = 0; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "phytmac drv probe start\n"); + + phytmac_default_config(pdata); + + if (dma_set_mask(dev, DMA_BIT_MASK(40)) || + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40))) { + dev_err(dev, "dma_set_mask or coherent failed\n"); + return 1; + } + + ret = phytmac_init(pdata); + if (ret) + goto err_out; + + if (pdata->use_ncsi) { + pdata->ncsidev = ncsi_register_dev(ndev, phytmac_ncsi_handler); + if (!pdata->ncsidev) + goto err_out; + } + + netif_carrier_off(ndev); + ret = register_netdev(ndev); + if (ret) { + dev_err(pdata->dev, "Cannot register net device, aborting.\n"); + goto err_out; + } + + if (pdata->use_mii && !pdata->mii_bus) { + ret = phytmac_mdio_register(pdata); + if (ret) { + netdev_err(ndev, "MDIO bus registration failed\n"); + goto err_out_free_mdiobus; + } + } + + ret = phytmac_phylink_create(pdata); + if (ret) { + netdev_err(ndev, "phytmac phylink create failed, error %d\n", ret); + goto err_phylink_init; + } + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "probe successfully! Phytium %s at 0x%08lx irq %d (%pM)\n", + "MAC", ndev->base_addr, ndev->irq, ndev->dev_addr); + + return 0; + +err_phylink_init: + if (pdata->mii_bus) + mdiobus_unregister(pdata->mii_bus); + +err_out_free_mdiobus: + if (pdata->mii_bus) + mdiobus_free(pdata->mii_bus); + + unregister_netdev(ndev); + +err_out: + return ret; +} +EXPORT_SYMBOL_GPL(phytmac_drv_probe); + +int phytmac_drv_remove(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + + if (ndev) { + if (pdata->use_ncsi && pdata->ncsidev) + ncsi_unregister_dev(pdata->ncsidev); + + unregister_netdev(ndev); + + if (pdata->use_mii && pdata->mii_bus) { + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + } + + if (pdata->phylink) + phylink_destroy(pdata->phylink); + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytmac_drv_remove); + +int phytmac_drv_suspend(struct phytmac *pdata) +{ + int q; + unsigned long flags; + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (!netif_running(pdata->ndev)) + return 0; + + if (pdata->power_state == PHYTMAC_POWEROFF) + return 0; + + netif_carrier_off(pdata->ndev); + netif_device_detach(pdata->ndev); + + /* napi_disable */ + for (q = 0, queue = pdata->queues; q < pdata->queues_num; + ++q, ++queue) { + napi_disable(&queue->tx_napi); + napi_disable(&queue->rx_napi); + } + + if (pdata->wol) { + hw_if->set_wol(pdata, pdata->wol); + } else { + rtnl_lock(); + phylink_stop(pdata->phylink); + rtnl_unlock(); + spin_lock_irqsave(&pdata->lock, flags); + hw_if->reset_hw(pdata); + hw_if->poweron(pdata, PHYTMAC_POWEROFF); + spin_unlock_irqrestore(&pdata->lock, flags); + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytmac_drv_suspend); + +int phytmac_drv_resume(struct phytmac *pdata) +{ + int q; + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct ethtool_rx_fs_item *item; + + if (!netif_running(pdata->ndev)) + return 0; + + if (pdata->power_state == PHYTMAC_POWEROFF) + hw_if->poweron(pdata, PHYTMAC_POWERON); + + if (hw_if->init_msg_ring) + hw_if->init_msg_ring(pdata); + + if (pdata->wol) { + hw_if->set_wol(pdata, 0); + rtnl_lock(); + phylink_stop(pdata->phylink); + rtnl_unlock(); + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; + ++q, ++queue) { + napi_enable(&queue->tx_napi); + napi_enable(&queue->rx_napi); + } + + hw_if->init_hw(pdata); + phytmac_set_rx_mode(pdata->ndev); + phytmac_set_features(pdata->ndev, pdata->ndev->features); + list_for_each_entry(item, &pdata->rx_fs_list.list, list) + hw_if->add_fdir_entry(pdata, &item->fs); + + rtnl_lock(); + phylink_start(pdata->phylink); + rtnl_unlock(); + + netif_device_attach(pdata->ndev); + + return 0; +} +EXPORT_SYMBOL_GPL(phytmac_drv_resume); + +struct phytmac *phytmac_alloc_pdata(struct device *dev) +{ + struct phytmac *pdata; + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct phytmac), + PHYTMAC_MAX_QUEUES); + if (!netdev) { + dev_err(dev, "alloc_etherdev_mq failed\n"); + return ERR_PTR(-ENOMEM); + } + SET_NETDEV_DEV(netdev, dev); + pdata = netdev_priv(netdev); + pdata->ndev = netdev; + pdata->dev = dev; + + spin_lock_init(&pdata->lock); + spin_lock_init(&pdata->msg_lock); + spin_lock_init(&pdata->ts_clk_lock); + pdata->msg_enable = netif_msg_init(debug, PHYTMAC_DEFAULT_MSG_ENABLE); + + return pdata; +} +EXPORT_SYMBOL_GPL(phytmac_alloc_pdata); + +void phytmac_free_pdata(struct phytmac *pdata) +{ + struct net_device *netdev = pdata->ndev; + + free_netdev(netdev); +} +EXPORT_SYMBOL_GPL(phytmac_free_pdata); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium Ethernet driver"); +MODULE_AUTHOR("Wenting Song"); +MODULE_ALIAS("platform:phytmac"); + diff --git a/drivers/net/ethernet/phytium/phytmac_pci.c b/drivers/net/ethernet/phytium/phytmac_pci.c new file mode 100644 index 0000000000000..e4aeec10e5a15 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_pci.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium GMAC PCI wrapper. + * + */ + +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" +#include "phytmac_v2.h" + +#define PCI_DEVICE_ID_GMAC 0xDC3B +#define PCI_SUBDEVICE_ID_SGMII 0x1000 +#define PCI_SUBDEVICE_ID_1000BASEX 0x1001 +#define PCI_SUBDEVICE_ID_2500BASEX 0x1002 +#define PCI_SUBDEVICE_ID_5GBASER 0x1003 +#define PCI_SUBDEVICE_ID_USXGMII 0x1004 +#define PCI_SUBDEVICE_ID_10GBASER 0x1005 + +struct phytmac_data { + struct phytmac_hw_if *hw_if; + u32 caps; + u32 tsu_rate; + u16 queue_num; + int speed; + bool duplex; + bool use_mii; + bool use_ncsi; + phy_interface_t interface; + const struct property_entry *properties; +}; + +static const u32 fixedlink[][5] = { + {0, 1, 1000, 1, 0}, + {0, 1, 2500, 1, 0}, + {0, 1, 5000, 1, 0}, + {0, 1, 10000, 1, 0}, +}; + +static const struct property_entry fl_properties[][2] = { + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[0]), {} }, + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[1]), {} }, + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[2]), {} }, + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[3]), {} }, +}; + +static int phytmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct phytmac_data *data = (struct phytmac_data *)id->driver_data; + struct phytmac *pdata; + struct device *dev = &pdev->dev; + void __iomem * const *iomap_table; + struct fwnode_handle *fw_node = NULL; + int bar_mask; + int ret, i; + + pdata = phytmac_alloc_pdata(dev); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto err_alloc; + } + + pdata->pcidev = pdev; + pci_set_drvdata(pdev, pdata); + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "pcim_enable_device failed\n"); + goto err_pci_enable; + } + + /* Obtain the mmio areas for the device */ + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + ret = pcim_iomap_regions(pdev, bar_mask, PHYTMAC_DRV_NAME); + if (ret) { + dev_err(dev, "pcim_iomap_regions failed\n"); + goto err_pci_enable; + } + + iomap_table = pcim_iomap_table(pdev); + if (!iomap_table) { + dev_err(dev, "pcim_iomap_table failed\n"); + ret = -ENOMEM; + goto err_pci_enable; + } + + pdata->mac_regs = iomap_table[0]; + if (!pdata->mac_regs) { + dev_err(dev, "xgmac ioremap failed\n"); + ret = -ENOMEM; + goto err_pci_enable; + } + + pdata->msg_regs = iomap_table[1]; + if (!pdata->msg_regs) { + dev_err(dev, "xpcs ioremap failed\n"); + ret = -ENOMEM; + goto err_pci_enable; + } + + pci_set_master(pdev); + + /* para */ + pdata->dma_burst_length = DEFAULT_DMA_BURST_LENGTH; + pdata->jumbo_len = DEFAULT_DMA_BURST_LENGTH; + pdata->wol |= PHYTMAC_WAKE_MAGIC; + pdata->use_ncsi = data->use_ncsi; + pdata->use_mii = data->use_mii; + pdata->phy_interface = data->interface; + pdata->queues_num = data->queue_num; + pdata->capacities = data->caps; + pdata->hw_if = data->hw_if; + + if (!pdata->use_mii) { + fw_node = fwnode_create_software_node(data->properties, NULL); + if (IS_ERR(fw_node)) { + dev_err(dev, "Failed to create software node\n"); + goto err_pci_enable; + } + pdata->dev->fwnode = fw_node; + } + + /* irq */ + ret = pci_alloc_irq_vectors(pdev, pdata->queues_num, pdata->queues_num, PCI_IRQ_MSI); + if (ret < 0) { + pdata->irq_type = IRQ_TYPE_INTX; + pdata->queue_irq[0] = pdev->irq; + } else { + pdata->irq_type = IRQ_TYPE_MSI; + for (i = 0; i < pdata->queues_num; i++) + pdata->queue_irq[i] = pci_irq_vector(pdev, i); + } + + /* Configure the netdev resource */ + ret = phytmac_drv_probe(pdata); + if (ret) + goto err_irq_vectors; + + netdev_notice(pdata->ndev, "net device enabled\n"); + + return 0; + +err_irq_vectors: + if (fw_node) + fwnode_remove_software_node(fw_node); + pci_free_irq_vectors(pdata->pcidev); + +err_pci_enable: + phytmac_free_pdata(pdata); + +err_alloc: + dev_notice(dev, "net device not enabled\n"); + + return ret; +} + +static void phytmac_pci_remove(struct pci_dev *pdev) +{ + struct phytmac *pdata = pci_get_drvdata(pdev); + struct fwnode_handle *fw_node = dev_fwnode(pdata->dev); + int i = 0; + int bar_mask; + + if (fw_node) { + fwnode_remove_software_node(fw_node); + pdata->dev->fwnode = NULL; + } + + phytmac_drv_remove(pdata); + + for (i = 0; i < pdata->queues_num; i++) + free_irq(pci_irq_vector(pdev, i), &pdata->queues[i]); + pci_free_irq_vectors(pdev); + + phytmac_free_pdata(pdata); + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + + pci_disable_device(pdev); +} + +static int __maybe_unused phytmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytmac *pdata = pci_get_drvdata(pdev); + int ret; + + ret = phytmac_drv_suspend(pdata); + + return ret; +} + +static int __maybe_unused phytmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytmac *pdata = pci_get_drvdata(pdev); + int ret; + + ret = phytmac_drv_resume(pdata); + + return ret; +} + +struct phytmac_data phytmac_sgmii = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = true, + .interface = PHY_INTERFACE_MODE_SGMII, +}; + +struct phytmac_data phytmac_1000basex = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 1000, + .duplex = true, + .interface = PHY_INTERFACE_MODE_SGMII, + .properties = fl_properties[0], +}; + +struct phytmac_data phytmac_2500basex = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 2500, + .duplex = true, + .interface = PHY_INTERFACE_MODE_2500BASEX, + .properties = fl_properties[1], +}; + +struct phytmac_data phytmac_5000baser = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 5000, + .duplex = true, + .interface = PHY_INTERFACE_MODE_5GBASER, + .properties = fl_properties[2], +}; + +struct phytmac_data phytmac_usxgmii = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 10000, + .duplex = true, + .interface = PHY_INTERFACE_MODE_USXGMII, + .properties = fl_properties[3], +}; + +static const struct pci_device_id phytmac_pci_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_SGMII), + .driver_data = (kernel_ulong_t)&phytmac_sgmii}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_1000BASEX), + .driver_data = (kernel_ulong_t)&phytmac_1000basex}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_2500BASEX), + .driver_data = (kernel_ulong_t)&phytmac_2500basex}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_5GBASER), + .driver_data = (kernel_ulong_t)&phytmac_5000baser}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_USXGMII), + .driver_data = (kernel_ulong_t)&phytmac_usxgmii}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_10GBASER), + .driver_data = (kernel_ulong_t)&phytmac_usxgmii}, + /* Last entry must be zero */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, phytmac_pci_table); + +static const struct dev_pm_ops phytmac_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytmac_pci_suspend, phytmac_pci_resume) +}; + +static struct pci_driver phytmac_driver = { + .name = PHYTMAC_DRV_NAME, + .id_table = phytmac_pci_table, + .probe = phytmac_pci_probe, + .remove = phytmac_pci_remove, + .driver = { + .pm = &phytmac_pci_pm_ops, + } +}; + +module_pci_driver(phytmac_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium NIC PCI wrapper"); diff --git a/drivers/net/ethernet/phytium/phytmac_platform.c b/drivers/net/ethernet/phytium/phytmac_platform.c new file mode 100644 index 0000000000000..f18dee1bae93f --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_platform.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium GMAC Platform wrapper. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" +#include "phytmac_v2.h" + +#include + +static const struct phytmac_config phytium_1p0_config = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .tsu_rate = 300000000, +}; + +static const struct phytmac_config phytium_2p0_config = { + .hw_if = &phytmac_2p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_LPI + | PHYTMAC_CAPS_LSO + | PHYTMAC_CAPS_MSG + | PHYTMAC_CAPS_JUMBO, + .queue_num = 2, + .tsu_rate = 300000000, +}; + +#if defined(CONFIG_OF) +static const struct of_device_id phytmac_dt_ids[] = { + { .compatible = "phytium,gmac-1.0", .data = &phytium_1p0_config }, + { .compatible = "phytium,gmac-2.0", .data = &phytium_2p0_config }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phytmac_dt_ids); +#endif /* CONFIG_OF */ + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytmac_acpi_ids[] = { + { .id = "PHYT0046", .driver_data = (kernel_ulong_t)&phytium_1p0_config }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytmac_acpi_ids); +#else +#define phytmac_acpi_ids NULL +#endif + +static int phytmac_get_phy_mode(struct platform_device *pdev) +{ + const char *pm; + int err, i; + + err = device_property_read_string(&pdev->dev, "phy-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + +static int phytmac_plat_probe(struct platform_device *pdev) +{ + const struct phytmac_config *phytmac_config = &phytium_1p0_config; + struct device_node __maybe_unused *np; + struct resource *regs; + struct phytmac *pdata; + int ret, i; + u32 queue_num; + + pdata = phytmac_alloc_pdata(&pdev->dev); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto err_alloc; + } + + platform_set_drvdata(pdev, pdata); + + pdata->platdev = pdev; + + if (pdev->dev.of_node) { + const struct of_device_id *match; + + match = of_match_node(phytmac_dt_ids, np); + if (match && match->data) { + phytmac_config = match->data; + pdata->hw_if = phytmac_config->hw_if; + pdata->capacities = phytmac_config->caps; + pdata->queues_max_num = phytmac_config->queue_num; + } + } else if (has_acpi_companion(&pdev->dev)) { + const struct acpi_device_id *match; + + match = acpi_match_device(phytmac_acpi_ids, &pdev->dev); + if (match && match->driver_data) { + phytmac_config = (void *)match->driver_data; + pdata->hw_if = phytmac_config->hw_if; + pdata->capacities = phytmac_config->caps; + pdata->queues_max_num = phytmac_config->queue_num; + } + } + + i = 0; + pdata->mac_regs = devm_platform_get_and_ioremap_resource(pdev, i, ®s); + if (IS_ERR(pdata->mac_regs)) { + dev_err(&pdev->dev, "mac_regs ioremap failed\n"); + ret = PTR_ERR(pdata->mac_regs); + goto err_mem; + } + pdata->ndev->base_addr = regs->start; + + if (pdata->capacities & PHYTMAC_CAPS_MSG) { + ++i; + regs = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (regs) { + pdata->msg_regs = ioremap_wt(regs->start, MEMORY_SIZE); + if (!pdata->msg_regs) { + dev_err(&pdev->dev, "msg_regs ioremap failed, i=%d\n", i); + ret = PTR_ERR(pdata->mac_regs); + goto err_mem; + } + } + } + + if (device_property_read_bool(&pdev->dev, "lpi")) + pdata->capacities |= PHYTMAC_CAPS_LPI; + + if (pdata->capacities & PHYTMAC_CAPS_LPI) { + /* lpi resource */ + ++i; + regs = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (regs) { + pdata->mhu_regs = ioremap(regs->start, MHU_SIZE); + if (!pdata->mhu_regs) + dev_err(&pdev->dev, "mhu_regs ioremap failed, i=%d\n", i); + } + } + + if (device_property_read_u32(&pdev->dev, "dma-burst-length", &pdata->dma_burst_length)) + pdata->dma_burst_length = DEFAULT_DMA_BURST_LENGTH; + + if (device_property_read_u32(&pdev->dev, "jumbo-max-length", &pdata->jumbo_len)) + pdata->jumbo_len = DEFAULT_JUMBO_MAX_LENGTH; + + if (device_property_read_u32(&pdev->dev, "queue-number", &queue_num)) + pdata->queues_num = pdata->queues_max_num; + else + pdata->queues_num = queue_num; + + pdata->wol = 0; + if (device_property_read_bool(&pdev->dev, "magic-packet")) + pdata->wol |= PHYTMAC_WAKE_MAGIC; + + pdata->use_ncsi = device_property_read_bool(&pdev->dev, "use-ncsi"); + pdata->use_mii = device_property_read_bool(&pdev->dev, "use-mii"); + + pdata->power_state = PHYTMAC_POWEROFF; + + device_set_wakeup_capable(&pdev->dev, pdata->wol & PHYTMAC_WOL_MAGIC_PACKET); + + for (i = 0; i < pdata->queues_num; i++) { + pdata->irq_type = IRQ_TYPE_INT; + pdata->queue_irq[i] = platform_get_irq(pdev, i); + } + + ret = phytmac_get_phy_mode(pdev); + if (ret < 0) + pdata->phy_interface = PHY_INTERFACE_MODE_MII; + else + pdata->phy_interface = ret; + + ret = phytmac_drv_probe(pdata); + if (ret) + goto err_mem; + + if (netif_msg_probe(pdata)) { + dev_notice(&pdev->dev, "phytium net device enabled\n"); + dev_dbg(pdata->dev, "use_ncsi:%d, use_mii:%d, wol:%d, queues_num:%d\n", + pdata->use_ncsi, pdata->use_mii, pdata->wol, pdata->queues_num); + } + + return 0; + +err_mem: + phytmac_free_pdata(pdata); + +err_alloc: + dev_err(&pdev->dev, "phytium net device not enabled\n"); + + return ret; +} + +static void phytmac_plat_remove(struct platform_device *pdev) +{ + struct phytmac *pdata = platform_get_drvdata(pdev); + + phytmac_drv_remove(pdata); + phytmac_free_pdata(pdata); +} + +static int __maybe_unused phytmac_plat_suspend(struct device *dev) +{ + struct phytmac *pdata = dev_get_drvdata(dev); + int ret; + + ret = phytmac_drv_suspend(pdata); + + return ret; +} + +static int __maybe_unused phytmac_plat_resume(struct device *dev) +{ + struct phytmac *pdata = dev_get_drvdata(dev); + int ret; + + ret = phytmac_drv_resume(pdata); + + return ret; +} + +static const struct dev_pm_ops phytmac_plat_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytmac_plat_suspend, phytmac_plat_resume) +}; + +static struct platform_driver phytmac_driver = { + .probe = phytmac_plat_probe, + .remove = phytmac_plat_remove, + .driver = { + .name = PHYTMAC_DRV_NAME, + .of_match_table = of_match_ptr(phytmac_dt_ids), + .acpi_match_table = phytmac_acpi_ids, + .pm = &phytmac_plat_pm_ops, + }, +}; + +module_platform_driver(phytmac_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium Ethernet driver"); +MODULE_AUTHOR("Wenting Song"); +MODULE_ALIAS("platform:phytmac"); diff --git a/drivers/net/ethernet/phytium/phytmac_ptp.c b/drivers/net/ethernet/phytium/phytmac_ptp.c new file mode 100644 index 0000000000000..26b1b75edbde1 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_ptp.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-only +/** + * 1588 PTP support for Phytium GMAC device. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_ptp.h" + +bool phytmac_ptp_one_step(struct sk_buff *skb) +{ + struct ptp_header *hdr; + unsigned int ptp_class; + u8 msgtype; + + /* No need to parse packet if PTP TS is not involved */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + goto not_oss; + + /* Identify and return whether PTP one step sync is being processed */ + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + goto not_oss; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + goto not_oss; + + if (hdr->flag_field[0] & 0x2) + goto not_oss; + + msgtype = ptp_get_msgtype(hdr, ptp_class); + if (msgtype == PTP_MSGTYPE_SYNC) + return true; + +not_oss: + return false; +} + +int phytmac_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + + spin_lock_irqsave(&pdata->ts_clk_lock, flags); + hw_if->get_time(pdata, ts); + spin_unlock_irqrestore(&pdata->ts_clk_lock, flags); + + return 0; +} + +int phytmac_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + + spin_lock_irqsave(&pdata->ts_clk_lock, flags); + hw_if->set_time(pdata, ts->tv_sec, ts->tv_nsec); + spin_unlock_irqrestore(&pdata->ts_clk_lock, flags); + + return 0; +} + +int phytmac_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + bool negative = false; + + if (scaled_ppm < 0) { + negative = true; + scaled_ppm = -scaled_ppm; + } + + hw_if->adjust_fine(pdata, scaled_ppm, negative); + return 0; +} + +int phytmac_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + int negative = 0; + + if (delta < 0) { + negative = 1; + delta = -delta; + } + + spin_lock_irq(&pdata->ts_clk_lock); + hw_if->adjust_time(pdata, delta, negative); + spin_unlock_irq(&pdata->ts_clk_lock); + + return 0; +} + +int phytmac_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +void phytmac_ptp_init_timer(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 rem = 0; + u64 adj; + + pdata->ts_rate = hw_if->get_ts_rate(pdata); + pdata->ts_incr.ns = div_u64_rem(NSEC_PER_SEC, pdata->ts_rate, &rem); + if (rem) { + adj = rem; + adj <<= 24; + pdata->ts_incr.sub_ns = div_u64(adj, pdata->ts_rate); + } else { + pdata->ts_incr.sub_ns = 0; + } +} + +void phytmac_ptp_rxstamp(struct phytmac *pdata, struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct timespec64 ts; + + if (hw_if->ts_valid(pdata, desc, PHYTMAC_RX)) { + hw_if->get_timestamp(pdata, desc->desc4, desc->desc5, &ts); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + } +} + +int phytmac_ptp_txstamp(struct phytmac_queue *queue, struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct timespec64 ts; + struct skb_shared_hwtstamps shhwtstamps; + + if (queue->pdata->ts_config.tx_type == TS_DISABLED) + return -EOPNOTSUPP; + + if (!hw_if->ts_valid(pdata, desc, PHYTMAC_TX)) + return -EINVAL; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + hw_if->get_timestamp(pdata, desc->desc4, desc->desc5, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); + + return 0; +} + +int phytmac_ptp_register(struct phytmac *pdata) +{ + pdata->ptp_clock_info.owner = THIS_MODULE; + snprintf(pdata->ptp_clock_info.name, 16, "%s", pdata->ndev->name); + pdata->ptp_clock_info.max_adj = 64000000; /* In PPB */ + pdata->ptp_clock_info.n_alarm = 0; + pdata->ptp_clock_info.n_ext_ts = 0; + pdata->ptp_clock_info.n_per_out = 0; + pdata->ptp_clock_info.pps = 1; + pdata->ptp_clock_info.adjfine = phytmac_ptp_adjfine; + pdata->ptp_clock_info.adjtime = phytmac_ptp_adjtime; + pdata->ptp_clock_info.gettime64 = phytmac_ptp_gettime; + pdata->ptp_clock_info.settime64 = phytmac_ptp_settime; + pdata->ptp_clock_info.enable = phytmac_ptp_enable; + pdata->ptp_clock = ptp_clock_register(&pdata->ptp_clock_info, pdata->dev); + if (IS_ERR_OR_NULL(pdata->ptp_clock)) { + dev_err(pdata->dev, "ptp_clock_register failed %lu\n", + PTR_ERR(pdata->ptp_clock)); + return -EINVAL; + } + + return 0; +} + +void phytmac_ptp_unregister(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (pdata->ptp_clock) + ptp_clock_unregister(pdata->ptp_clock); + pdata->ptp_clock = NULL; + + hw_if->clear_time(pdata); + + dev_info(pdata->dev, "phytmac ptp clock unregistered.\n"); +} + +void phytmac_ptp_init(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + phytmac_ptp_init_timer(pdata); + + hw_if->init_ts_hw(pdata); + + dev_info(pdata->dev, "phytmac ptp clock init success.\n"); +} + +int phytmac_ptp_get_ts_config(struct net_device *dev, struct ifreq *rq) +{ + struct hwtstamp_config *tstamp_config; + struct phytmac *pdata = netdev_priv(dev); + + if (!IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + return -EOPNOTSUPP; + + tstamp_config = &pdata->ts_config; + + if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config))) + return -EFAULT; + else + return 0; +} + +int phytmac_ptp_set_ts_config(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config config; + struct phytmac *pdata = netdev_priv(dev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct ts_ctrl tstamp_ctrl; + int ret; + + memset(&tstamp_ctrl, 0, sizeof(struct ts_ctrl)); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + tstamp_ctrl.one_step = 1; + tstamp_ctrl.tx_control = TS_ALL_FRAMES; + break; + case HWTSTAMP_TX_ON: + tstamp_ctrl.one_step = 0; + tstamp_ctrl.tx_control = TS_ALL_FRAMES; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tstamp_ctrl.rx_control = TS_ALL_PTP_FRAMES; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + tstamp_ctrl.rx_control = TS_ALL_FRAMES; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + ret = hw_if->set_ts_config(pdata, &tstamp_ctrl); + if (ret) + return ret; + + /* save these settings for future reference */ + pdata->ts_config = config; + memcpy(&pdata->ts_config, &config, sizeof(config)); + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + else + return 0; +} + diff --git a/drivers/net/ethernet/phytium/phytmac_ptp.h b/drivers/net/ethernet/phytium/phytmac_ptp.h new file mode 100644 index 0000000000000..72c8b7c674137 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_ptp.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Phytium Ethernet Controller driver + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _PHYTMAC_PTP_H +#define _PHYTMAC_PTP_H + +#ifdef CONFIG_PHYTMAC_ENABLE_PTP +bool phytmac_ptp_one_step(struct sk_buff *skb); +int phytmac_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); +int phytmac_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts); +int phytmac_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); +int phytmac_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta); +int phytmac_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); +void phytmac_ptp_init_timer(struct phytmac *pdata); +void phytmac_ptp_rxstamp(struct phytmac *pdata, struct sk_buff *skb, + struct phytmac_dma_desc *desc); +int phytmac_ptp_txstamp(struct phytmac_queue *queue, struct sk_buff *skb, + struct phytmac_dma_desc *desc); +int phytmac_ptp_register(struct phytmac *pdata); +void phytmac_ptp_unregister(struct phytmac *pdata); +void phytmac_ptp_init(struct net_device *ndev); +int phytmac_ptp_get_ts_config(struct net_device *dev, struct ifreq *rq); +int phytmac_ptp_set_ts_config(struct net_device *dev, struct ifreq *ifr, int cmd); +#else +static inline bool phytmac_ptp_one_step(struct sk_buff *skb) +{ + return 1; +} + +static inline void phytmac_ptp_rxstamp(struct phytmac *pdata, struct sk_buff *skb, + struct phytmac_dma_desc *desc) {} +static inline int phytmac_ptp_txstamp(struct phytmac_queue *queue, struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + return -1; +} + +static inline int phytmac_ptp_register(struct phytmac *pdata) +{ + return 0; +} + +static inline void phytmac_ptp_unregister(struct phytmac *pdata) {} +static inline void phytmac_ptp_init(struct net_device *ndev) {} + +#endif +#endif diff --git a/drivers/net/ethernet/phytium/phytmac_v1.c b/drivers/net/ethernet/phytium/phytmac_v1.c new file mode 100644 index 0000000000000..ec95c6c79b066 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v1.c @@ -0,0 +1,1400 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" + +static int phytmac_enable_promise(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + if (enable) + value |= PHYTMAC_BIT(PROMISC); + else + value &= ~PHYTMAC_BIT(PROMISC); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + + return 0; +} + +static int phytmac_enable_multicast(struct phytmac *pdata, int enable) +{ + u32 config; + + if (enable) { + PHYTMAC_WRITE(pdata, PHYTMAC_HASHB, -1); + PHYTMAC_WRITE(pdata, PHYTMAC_HASHT, -1); + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + config |= PHYTMAC_BIT(MH_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_HASHB, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_HASHT, 0); + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + config &= ~PHYTMAC_BIT(MH_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + } + + return 0; +} + +static int phytmac_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) +{ + u32 config; + + PHYTMAC_WRITE(pdata, PHYTMAC_HASHB, mc_filter[0]); + PHYTMAC_WRITE(pdata, PHYTMAC_HASHT, mc_filter[1]); + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + config |= PHYTMAC_BIT(MH_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + return 0; +} + +static int phytmac_enable_rxcsum(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + if (enable) + value |= PHYTMAC_BIT(RCO_EN); + else + value &= ~PHYTMAC_BIT(RCO_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + + return 0; +} + +static int phytmac_enable_txcsum(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_DCONFIG); + + if (enable) + value |= PHYTMAC_BIT(TCO_EN); + else + value &= ~PHYTMAC_BIT(TCO_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, value); + + return 0; +} + +static int phytmac_enable_mdio(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + + if (enable) + value |= PHYTMAC_BIT(MPE); + else + value &= ~PHYTMAC_BIT(MPE); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, value); + + return 0; +} + +static int phytmac_enable_pause(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + if (enable) + value |= PHYTMAC_BIT(PAUSE_EN); + else + value &= ~PHYTMAC_BIT(PAUSE_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + return 0; +} + +static int phytmac_enable_network(struct phytmac *pdata, int enable, int rx_tx) +{ + u32 old_ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + u32 ctrl; + + ctrl = old_ctrl; + + if (rx_tx & PHYTMAC_TX) { + if (enable) + ctrl |= PHYTMAC_BIT(TE); + else + ctrl &= ~PHYTMAC_BIT(TE); + } + + if (rx_tx & PHYTMAC_RX) { + if (enable) + ctrl |= PHYTMAC_BIT(RE); + else + ctrl &= ~PHYTMAC_BIT(RE); + } + + if (ctrl ^ old_ctrl) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + + return 0; +} + +static int phytmac_enable_autoneg(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_PCSCTRL); + + if (enable) + value |= PHYTMAC_BIT(AUTONEG); + else + value &= ~PHYTMAC_BIT(AUTONEG); + + PHYTMAC_WRITE(pdata, PHYTMAC_PCSCTRL, value); + + return 0; +} + +static int phytmac_pcs_software_reset(struct phytmac *pdata, int reset) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_PCSCTRL); + + if (reset) + value |= PHYTMAC_BIT(PCS_RESET); + else + value &= ~PHYTMAC_BIT(PCS_RESET); + + PHYTMAC_WRITE(pdata, PHYTMAC_PCSCTRL, value); + + return 0; +} + +static int phytmac_mac_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + u32 ctrl, config; + + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + config &= ~(PHYTMAC_BIT(SPEED) | PHYTMAC_BIT(FD)); + + if (speed == SPEED_100) + config |= PHYTMAC_BIT(SPEED); + else if (speed == SPEED_1000 || speed == SPEED_2500) + config |= PHYTMAC_BIT(GM_EN); + + if (duplex) + config |= PHYTMAC_BIT(FD); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + if (speed == SPEED_2500) { + ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + ctrl |= PHYTMAC_BIT(2PT5G); + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + } + + if (speed == SPEED_10000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_10000M); + else if (speed == SPEED_5000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_5000M); + else if (speed == SPEED_2500) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_2500M); + else if (speed == SPEED_1000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_1000M); + else + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_100M); + + return 0; +} + +static int phytmac_mac_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static int phytmac_pcs_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + u32 config; + + if (interface == PHY_INTERFACE_MODE_USXGMII || + interface == PHY_INTERFACE_MODE_10GBASER) { + config = PHYTMAC_READ(pdata, PHYTMAC_USXCTRL); + if (speed == SPEED_10000) { + config = PHYTMAC_SET_BITS(config, SERDES_RATE, PHYTMAC_SERDES_RATE_10G); + config = PHYTMAC_SET_BITS(config, USX_SPEED, PHYTMAC_SPEED_10000M); + } else if (speed == SPEED_5000) { + config = PHYTMAC_SET_BITS(config, SERDES_RATE, PHYTMAC_SERDES_RATE_5G); + config = PHYTMAC_SET_BITS(config, USX_SPEED, PHYTMAC_SPEED_5000M); + } + + /* reset */ + config &= ~(PHYTMAC_BIT(RX_EN) | PHYTMAC_BIT(TX_EN)); + config |= PHYTMAC_BIT(RX_SYNC_RESET); + + PHYTMAC_WRITE(pdata, PHYTMAC_USXCTRL, config); + + /* enable rx and tx */ + config &= ~(PHYTMAC_BIT(RX_SYNC_RESET)); + config |= PHYTMAC_BIT(RX_EN) | PHYTMAC_BIT(TX_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_USXCTRL, config); + } + + return 0; +} + +static int phytmac_pcs_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) +{ + u32 bottom; + u16 top; + + bottom = PHYTMAC_READ(pdata, PHYTMAC_MAC1B); + top = PHYTMAC_READ(pdata, PHYTMAC_MAC1T); + + addr[0] = bottom & 0xff; + addr[1] = (bottom >> 8) & 0xff; + addr[2] = (bottom >> 16) & 0xff; + addr[3] = (bottom >> 24) & 0xff; + addr[4] = top & 0xff; + addr[5] = (top >> 8) & 0xff; + + return 0; +} + +static int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) +{ + u32 bottom; + u16 top; + + bottom = cpu_to_le32(*((u32 *)addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_MAC1B, bottom); + top = cpu_to_le16(*((u16 *)(addr + 4))); + PHYTMAC_WRITE(pdata, PHYTMAC_MAC1T, top); + + return 0; +} + +static void phytmac_reset_hw(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + unsigned int q; + u32 ctrl; + + ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + + ctrl &= ~(PHYTMAC_BIT(RE) | PHYTMAC_BIT(TE)); + ctrl |= PHYTMAC_BIT(CLEARSTAT); + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + + /* Disable and clear all interrupts and disable queues */ + for (q = 0, queue = pdata->queues; q < pdata->queues_max_num; ++q, ++queue) { + if (q == 0) { + PHYTMAC_WRITE(pdata, PHYTMAC_ID, -1); + PHYTMAC_WRITE(pdata, PHYTMAC_IS, -1); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR_Q0, 1); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR_Q0, 1); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_IDR(q - 1), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_ISR(q - 1), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR(q - 1), 1); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR(q - 1), 1); + } + + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTRH(q), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTRH(q), 0); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(q), 0); + } +} + +static void phytmac_get_regs(struct phytmac *pdata, u32 *reg_buff) +{ + reg_buff[0] = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + reg_buff[1] = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + reg_buff[2] = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); + reg_buff[3] = PHYTMAC_READ(pdata, PHYTMAC_DCONFIG); + reg_buff[4] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR_Q0); + reg_buff[5] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR_Q0); + reg_buff[6] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR(1)); + reg_buff[7] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR(1)); + reg_buff[8] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR(2)); + reg_buff[9] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR(2)); + reg_buff[10] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR(3)); + reg_buff[11] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR(3)); + reg_buff[12] = PHYTMAC_READ(pdata, PHYTMAC_HCONFIG); + reg_buff[13] = PHYTMAC_READ(pdata, PHYTMAC_IM); + if (pdata->phy_interface == PHY_INTERFACE_MODE_USXGMII || + pdata->phy_interface == PHY_INTERFACE_MODE_10GBASER) { + reg_buff[14] = PHYTMAC_READ(pdata, PHYTMAC_USXCTRL); + reg_buff[15] = PHYTMAC_READ(pdata, PHYTMAC_USXSTATUS); + } else { + reg_buff[14] = PHYTMAC_READ(pdata, PHYTMAC_PCSCTRL); + reg_buff[15] = PHYTMAC_READ(pdata, PHYTMAC_PCSSTATUS); + } +} + +static int phytmac_init_hw(struct phytmac *pdata) +{ + u32 config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + u32 dmaconfig; + u32 nctrlconfig; + + nctrlconfig = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + nctrlconfig |= PHYTMAC_BIT(MPE); + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, nctrlconfig); + + phytmac_set_mac_addr(pdata, pdata->ndev->dev_addr); + + PHYTMAC_WRITE(pdata, PHYTMAC_AXICTRL, 0x1010); + + /* jumbo */ + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + config |= PHYTMAC_BIT(JUMBO_EN); + else + config |= PHYTMAC_BIT(RCV_BIG); + /* promisc */ + if (pdata->ndev->flags & IFF_PROMISC) + config |= PHYTMAC_BIT(PROMISC); + if (pdata->ndev->features & NETIF_F_RXCSUM) + config |= PHYTMAC_BIT(RCO_EN); + if (pdata->ndev->flags & IFF_BROADCAST) + config &= ~PHYTMAC_BIT(NO_BCAST); + else + config |= PHYTMAC_BIT(NO_BCAST); + + /* pause enable */ + config |= PHYTMAC_BIT(PAUSE_EN); + /* Rx Fcs remove */ + config |= PHYTMAC_BIT(FCS_REMOVE); + if (pdata->dma_data_width == PHYTMAC_DBW_64) + config |= PHYTMAC_BIT(DBW64); + if (pdata->dma_data_width == PHYTMAC_DBW_128) + config |= PHYTMAC_BIT(DBW128); + /* mdc div */ + config = PHYTMAC_SET_BITS(config, MCD, 6); + netdev_dbg(pdata->ndev, "phytmac configure NetConfig with 0x%08x\n", + config); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + /* init dma */ + dmaconfig = PHYTMAC_READ(pdata, PHYTMAC_DCONFIG); + if (pdata->dma_burst_length) + dmaconfig = PHYTMAC_SET_BITS(dmaconfig, BURST, pdata->dma_burst_length); + /* default in small endian */ + dmaconfig &= ~(PHYTMAC_BIT(ENDIA_PKT) | PHYTMAC_BIT(ENDIA_DESC)); + + if (pdata->ndev->features & NETIF_F_HW_CSUM) + dmaconfig |= PHYTMAC_BIT(TCO_EN); + else + dmaconfig &= ~PHYTMAC_BIT(TCO_EN); + + if (pdata->dma_addr_width) + dmaconfig |= PHYTMAC_BIT(ABW); + + /* fdir ethtype -- ipv4 */ + PHYTMAC_WRITE(pdata, PHYTMAC_ETHT(0), (uint16_t)ETH_P_IP); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + dmaconfig |= PHYTMAC_BIT(RX_EXBD_EN) | PHYTMAC_BIT(TX_EXBD_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, dmaconfig); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_ENABLE, 0x80000001); + + if (phy_interface_mode_is_8023z(pdata->phy_interface)) + phytmac_pcs_software_reset(pdata, 1); + + return 0; +} + +static int phytmac_powerup_hw(struct phytmac *pdata, int on) +{ + u32 status, data0, data1, rdata1; + int ret; + + if (pdata->capacities & PHYTMAC_CAPS_LPI) { + ret = readx_poll_timeout(PHYTMAC_READ_STAT, pdata, status, !status, + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh status is busy, status=%x\n", status); + + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy, data0=%x\n", data0); + + data0 = 0; + data0 = PHYTMAC_SET_BITS(data0, DATA0_MSG, PHYTMAC_MSG_PM); + data0 = PHYTMAC_SET_BITS(data0, DATA0_PRO, PHYTMAC_PRO_ID); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA0, data0); + data1 = 0; + + if (on == PHYTMAC_POWERON) { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATON); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } else { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATOFF); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } + + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_AP_CPP_SET, 1); + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy"); + + rdata1 = PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA1); + if (rdata1 == data1) + netdev_err(pdata->ndev, "gmac power %s success, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + else + netdev_err(pdata->ndev, "gmac power %s failed, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + } + pdata->power_state = on; + + return 0; +} + +static int phytmac_set_wake(struct phytmac *pdata, int wake) +{ + u32 value = 0; + + if (wake & PHYTMAC_WAKE_MAGIC) + value |= PHYTMAC_BIT(MAGIC); + if (wake & PHYTMAC_WAKE_ARP) + value |= PHYTMAC_BIT(ARP); + if (wake & PHYTMAC_WAKE_UCAST) + value |= PHYTMAC_BIT(UCAST); + if (wake & PHYTMAC_WAKE_MCAST) + value |= PHYTMAC_BIT(MCAST); + + PHYTMAC_WRITE(pdata, PHYTMAC_WOL, value); + + return 0; +} + +static void phytmac_mdio_idle(struct phytmac *pdata) +{ + u32 val; + + /* wait for end of transfer */ + val = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); + while (!(val & PHYTMAC_BIT(MDIO_IDLE))) { + cpu_relax(); + val = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); + } +} + +static int phytmac_mdio_data_read_c22(struct phytmac *pdata, int mii_id, int regnum) +{ + u16 data; + + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C22) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C22_READ) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, regnum) + | PHYTMAC_BITS(MUST, 2))); + + phytmac_mdio_idle(pdata); + data = PHYTMAC_READ(pdata, PHYTMAC_MDATA) & 0xffff; + phytmac_mdio_idle(pdata); + + return data; +} + +static int phytmac_mdio_data_write_c22(struct phytmac *pdata, int mii_id, + int regnum, u16 data) +{ + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C22) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C22_WRITE) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, regnum) + | PHYTMAC_BITS(DATA, data) + | PHYTMAC_BITS(MUST, 2))); + phytmac_mdio_idle(pdata); + + return 0; +} + +static int phytmac_mdio_data_read_c45(struct phytmac *pdata, int mii_id, int devad, int regnum) +{ + u16 data; + + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_ADDR) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, devad & 0x1F) + | PHYTMAC_BITS(DATA, regnum & 0xFFFF) + | PHYTMAC_BITS(MUST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_READ) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, devad & 0x1F) + | PHYTMAC_BITS(DATA, regnum & 0xFFFF) + | PHYTMAC_BITS(MUST, 2))); + + phytmac_mdio_idle(pdata); + data = PHYTMAC_READ(pdata, PHYTMAC_MDATA) & 0xffff; + phytmac_mdio_idle(pdata); + + return data; +} + +static int phytmac_mdio_data_write_c45(struct phytmac *pdata, int mii_id, int devad, + int regnum, u16 data) +{ + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_ADDR) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, devad & 0x1F) + | PHYTMAC_BITS(DATA, regnum & 0xFFFF) + | PHYTMAC_BITS(MUST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_WRITE) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, devad & 0x1F) + | PHYTMAC_BITS(DATA, data) + | PHYTMAC_BITS(MUST, 2))); + phytmac_mdio_idle(pdata); + + return 0; +} + +static int phytmac_get_feature_all(struct phytmac *pdata) +{ + unsigned int queue_mask; + unsigned int num_queues; + int val; + + /* get max queues */ + queue_mask = 0x1; + queue_mask |= PHYTMAC_READ(pdata, PHYTMAC_DEFAULT2) & 0xff; + num_queues = hweight32(queue_mask); + pdata->queues_max_num = num_queues; + + /* get dma desc prefetch number */ + val = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT4, TXDESCRD); + if (val) + pdata->tx_bd_prefetch = (2 << (val - 1)) * + sizeof(struct phytmac_dma_desc); + + val = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT4, RXDESCRD); + if (val) + pdata->rx_bd_prefetch = (2 << (val - 1)) * + sizeof(struct phytmac_dma_desc); + + /* dma bus width */ + pdata->dma_data_width = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT1, DBW); + + /* dma addr width */ + if (PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT2, DAW64)) + pdata->dma_addr_width = 64; + else + pdata->dma_addr_width = 32; + + /* max rx fs */ + pdata->max_rx_fs = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT3, SCR2CMP); + + if (netif_msg_hw(pdata)) + netdev_info(pdata->ndev, "get feature queue_num=%d, daw=%d, dbw=%d, rx_fs=%d, rx_bd=%d, tx_bd=%d\n", + pdata->queues_num, pdata->dma_addr_width, pdata->dma_data_width, + pdata->max_rx_fs, pdata->rx_bd_prefetch, pdata->tx_bd_prefetch); + return 0; +} + +static int phytmac_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; + u16 index = rx_flow->location; + u32 tmp, fdir_ctrl; + bool ipsrc = false; + bool ipdst = false; + bool port = false; + + tp4sp_v = &rx_flow->h_u.tcp_ip4_spec; + tp4sp_m = &rx_flow->m_u.tcp_ip4_spec; + + if (tp4sp_m->ip4src == 0xFFFFFFFF) { + tmp = 0; + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, ETHTYPE_SIP_OFFSET); + tmp = PHYTMAC_SET_BITS(tmp, OFFSET_TYPE, PHYTMAC_OFFSET_AFTER_L2HEAD); + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 1); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index), tp4sp_v->ip4src); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index), tmp); + ipsrc = true; + } + + if (tp4sp_m->ip4dst == 0xFFFFFFFF) { + /* 2nd compare reg - IP destination address */ + tmp = 0; + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, ETHTYPE_DIP_OFFSET); + tmp = PHYTMAC_SET_BITS(tmp, OFFSET_TYPE, PHYTMAC_OFFSET_AFTER_L2HEAD); + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 1); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 1), tp4sp_v->ip4dst); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index + 1), tmp); + ipdst = true; + } + + if (tp4sp_m->psrc == 0xFFFF || tp4sp_m->pdst == 0xFFFF) { + tmp = 0; + tmp = PHYTMAC_SET_BITS(tmp, OFFSET_TYPE, PHYTMAC_OFFSET_AFTER_L3HEAD); + if (tp4sp_m->psrc == tp4sp_m->pdst) { + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 1); + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, IPHEAD_SPORT_OFFSET); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 2), + tp4sp_v->psrc | (u32)(tp4sp_v->pdst << 16)); + } else { + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 0); + if (tp4sp_m->psrc == 0xFFFF) { /* src port */ + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, IPHEAD_SPORT_OFFSET); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 2), + tp4sp_m->psrc | (u32)(tp4sp_v->psrc << 16)); + } else { /* dst port */ + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, IPHEAD_DPORT_OFFSET); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 2), + tp4sp_m->pdst | (u32)(tp4sp_v->pdst << 16)); + } + } + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index + 2), tmp); + port = true; + } + + fdir_ctrl = PHYTMAC_READ(pdata, PHYTMAC_FDIR(rx_flow->location)); + + if (ipsrc) { + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CA, 3 * index); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CA_EN, 1); + } + + if (ipdst) { + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CB, 3 * index + 1); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CB_EN, 1); + } + + if (port) { + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CC, 3 * index + 2); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CC_EN, 1); + } + + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, QUEUE_NUM, (rx_flow->ring_cookie) & 0xFF); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, ETH_TYPE, 0); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, ETH_EN, 1); + + PHYTMAC_WRITE(pdata, PHYTMAC_FDIR(rx_flow->location), fdir_ctrl); + + return 0; +} + +static int phytmac_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + int i; + int index = rx_flow->location; + + PHYTMAC_WRITE(pdata, PHYTMAC_FDIR(index), 0); + for (i = 0; i < 3; i++) { + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + i), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index + i), 0); + } + return 0; +} + +static int phytmac_init_ring_hw(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + unsigned int q = 0; + u32 buffer_size = pdata->rx_buffer_len / 64; + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + if (q == 0) { + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR_Q0, + lower_32_bits(queue->rx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR_Q0, + lower_32_bits(queue->tx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, + PHYTMAC_SET_BITS(PHYTMAC_READ(pdata, PHYTMAC_DCONFIG), + RX_BUF_LEN, buffer_size)); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR(q - 1), + lower_32_bits(queue->rx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR(q - 1), + lower_32_bits(queue->tx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_RBQS(q - 1), buffer_size); + } + + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTRH(q), upper_32_bits(queue->tx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTRH(q), upper_32_bits(queue->rx_ring_addr)); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(q), queue->tx_tail); + } + + return 0; +} + +static u32 phytmac_get_irq_mask(u32 mask) +{ + u32 value = 0; + + value |= (mask & PHYTMAC_INT_TX_COMPLETE) ? PHYTMAC_BIT(TXCOMP) : 0; + value |= (mask & PHYTMAC_INT_TX_ERR) ? PHYTMAC_BIT(BUS_ERR) : 0; + value |= (mask & PHYTMAC_INT_RX_COMPLETE) ? PHYTMAC_BIT(RXCOMP) : 0; + value |= (mask & PHYTMAC_INT_RX_OVERRUN) ? PHYTMAC_BIT(RXOVERRUN) : 0; + value |= (mask & PHYTMAC_INT_RX_DESC_FULL) ? PHYTMAC_BIT(RUB) : 0; + + return value; +} + +static u32 phytmac_get_irq_status(u32 value) +{ + u32 status = 0; + + status |= (value & PHYTMAC_BIT(TXCOMP)) ? PHYTMAC_INT_TX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(BUS_ERR)) ? PHYTMAC_INT_TX_ERR : 0; + status |= (value & PHYTMAC_BIT(RXCOMP)) ? PHYTMAC_INT_RX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(RXOVERRUN)) ? PHYTMAC_INT_RX_OVERRUN : 0; + status |= (value & PHYTMAC_BIT(RUB)) ? PHYTMAC_INT_RX_DESC_FULL : 0; + + return status; +} + +static void phytmac_enable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + + if (queue_index == 0) + PHYTMAC_WRITE(pdata, PHYTMAC_IE, value); + else + PHYTMAC_WRITE(pdata, PHYTMAC_IER(queue_index - 1), value); +} + +static void phytmac_disable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + + if (queue_index == 0) + PHYTMAC_WRITE(pdata, PHYTMAC_ID, value); + else + PHYTMAC_WRITE(pdata, PHYTMAC_IDR(queue_index - 1), value); +} + +static void phytmac_clear_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + + if (queue_index == 0) + PHYTMAC_WRITE(pdata, PHYTMAC_IS, value); + else + PHYTMAC_WRITE(pdata, PHYTMAC_ISR(queue_index - 1), value); +} + +static unsigned int phytmac_get_intx_mask(struct phytmac *pdata) +{ + u32 value; + + value = PHYTMAC_READ(pdata, PHYTMAC_INTX_IRQ_MASK); + PHYTMAC_WRITE(pdata, PHYTMAC_INTX_IRQ_MASK, value); + + return value; +} + +static unsigned int phytmac_get_irq(struct phytmac *pdata, int queue_index) +{ + u32 status, value; + + if (queue_index == 0) + value = PHYTMAC_READ(pdata, PHYTMAC_IS); + else + value = PHYTMAC_READ(pdata, PHYTMAC_ISR(queue_index - 1)); + + status = phytmac_get_irq_status(value); + + return status; +} + +static unsigned int phytmac_tx_map_desc(struct phytmac_queue *queue, + u32 tx_tail, struct packet_info *packet) +{ + unsigned int i, ctrl; + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + struct phytmac_tx_skb *tx_skb; + unsigned int eof = 1; + + i = tx_tail; + + if (!(pdata->capacities & PHYTMAC_CAPS_TAILPTR)) { + ctrl = PHYTMAC_BIT(TX_USED); + desc = phytmac_get_tx_desc(queue, i); + desc->desc1 = ctrl; + } + + do { + i--; + tx_skb = phytmac_get_tx_skb(queue, i); + desc = phytmac_get_tx_desc(queue, i); + + ctrl = (u32)tx_skb->length; + if (eof) { + ctrl |= PHYTMAC_BIT(TX_LAST); + eof = 0; + } + + if (unlikely(i == (pdata->tx_ring_size - 1))) + ctrl |= PHYTMAC_BIT(TX_WRAP); + + if (i == queue->tx_tail) { + ctrl |= PHYTMAC_BITS(TX_LSO, packet->lso); + ctrl |= PHYTMAC_BITS(TX_TCP_SEQ_SRC, packet->seq); + if (packet->nocrc) + ctrl |= PHYTMAC_BIT(TX_NOCRC); + } else { + ctrl |= PHYTMAC_BITS(MSS_MFS, packet->mss); + } + + desc->desc2 = upper_32_bits(tx_skb->addr); + desc->desc0 = lower_32_bits(tx_skb->addr); + /* Make newly descriptor visible to hardware */ + wmb(); + desc->desc1 = ctrl; + } while (i != queue->tx_tail); + + return 0; +} + +static void phytmac_init_rx_map_desc(struct phytmac_queue *queue, + u32 index) +{ + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + desc->desc1 = 0; + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 |= PHYTMAC_BIT(RX_USED); +} + +static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, + u32 index, dma_addr_t addr) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + if (addr) { + if (unlikely(index == (pdata->rx_ring_size - 1))) + addr |= PHYTMAC_BIT(RX_WRAP); + desc->desc1 = 0; + desc->desc2 = upper_32_bits(addr); + desc->desc0 = lower_32_bits(addr) | PHYTMAC_BIT(RX_USED); + } + return 0; +} + +static unsigned int phytmac_rx_clean_desc(struct phytmac_queue *queue, u32 count) +{ + struct phytmac_dma_desc *desc; + u32 index = queue->rx_head + count - 1; + + while (count) { + desc = phytmac_get_rx_desc(queue, index); + desc->desc0 &= ~PHYTMAC_BIT(RX_USED); + dma_wmb(); + index--; + count--; + } + + return 0; +} + +static void phytmac_tx_start(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(queue->index), + BIT(31) | queue->tx_tail); + + if (pdata->capacities & PHYTMAC_CAPS_START) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, + PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(TSTART)); +} + +static void phytmac_restart(struct phytmac *pdata) +{ + int q; + struct phytmac_queue *queue; + + for (q = 0; q < pdata->queues_num; q++) { + queue = &pdata->queues[q]; + if (queue->tx_head != queue->tx_tail) { + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, + PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(TSTART)); + break; + } + } +} + +static int phytmac_tx_complete(const struct phytmac_dma_desc *desc) +{ + return PHYTMAC_GET_BITS(desc->desc1, TX_USED); +} + +static int phytmac_rx_complete(const struct phytmac_dma_desc *desc) +{ + return (desc->desc0 & PHYTMAC_BIT(RX_USED)) != 0; +} + +static int phytmac_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_desc *desc) +{ + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + return desc->desc1 & PHYTMAC_JUMBO_FRAME_MASK; + else + return desc->desc1 & PHYTMAC_FRAME_MASK; +} + +static dma_addr_t phytmac_get_desc_addr(const struct phytmac_dma_desc *desc) +{ + dma_addr_t addr = 0; + + addr = ((u64)(desc->desc2) << 32); + + addr |= (desc->desc0 & 0xfffffffc); + return addr; +} + +static bool phytmac_rx_checksum(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + u32 check = value >> PHYTMAC_RX_CSUM_INDEX & 0x3; + + return (check == PHYTMAC_RX_CSUM_IP_TCP || check == PHYTMAC_RX_CSUM_IP_UDP); +} + +static bool phytmac_rx_single_buffer(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return ((value & PHYTMAC_BIT(RX_SOF)) && (value & PHYTMAC_BIT(RX_EOF))); +} + +static bool phytmac_rx_sof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RX_SOF)); +} + +static bool phytmac_rx_eof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RX_EOF)); +} + +static void phytmac_clear_rx_desc(struct phytmac_queue *queue, int begin, int end) +{ + unsigned int frag; + unsigned int tmp = end; + struct phytmac_dma_desc *desc; + + if (begin > end) + tmp = end + queue->pdata->rx_ring_size; + + for (frag = begin; frag != end; frag++) { + desc = phytmac_get_rx_desc(queue, frag); + desc->desc0 &= ~PHYTMAC_BIT(RX_USED); + } +} + +static void phytmac_mac_interface_config(struct phytmac *pdata, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 old_ctrl, old_config; + u32 ctrl, config, usxctl; + + old_ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + old_config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + ctrl = old_ctrl; + config = old_config; + + if (state->speed == SPEED_10000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_10000M); + else if (state->speed == SPEED_5000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_5000M); + else if (state->speed == SPEED_2500) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_2500M); + else if (state->speed == SPEED_1000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_1000M); + else if (state->speed == SPEED_100 || state->speed == SPEED_10) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_100M); + + config &= ~(PHYTMAC_BIT(SGMII_EN) | PHYTMAC_BIT(PCS_EN) + | PHYTMAC_BIT(SPEED) | PHYTMAC_BIT(FD) | PHYTMAC_BIT(GM_EN)); + ctrl &= ~(PHYTMAC_BIT(HIGHSPEED) | PHYTMAC_BIT(2PT5G)); + + if (state->interface == PHY_INTERFACE_MODE_SGMII) { + config |= PHYTMAC_BIT(SGMII_EN) | PHYTMAC_BIT(PCS_EN); + if (state->speed == SPEED_1000) + config |= PHYTMAC_BIT(GM_EN); + else if (state->speed == SPEED_2500) + config |= PHYTMAC_BIT(2PT5G); + } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { + config |= PHYTMAC_BIT(PCS_EN) | PHYTMAC_BIT(GM_EN); + } else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { + config |= PHYTMAC_BIT(2PT5G) | PHYTMAC_BIT(PCS_EN); + } else if (state->interface == PHY_INTERFACE_MODE_10GBASER || + state->interface == PHY_INTERFACE_MODE_USXGMII || + state->interface == PHY_INTERFACE_MODE_5GBASER) { + usxctl = PHYTMAC_READ(pdata, PHYTMAC_USXCTRL); + if (state->speed == SPEED_10000) { + usxctl = PHYTMAC_SET_BITS(usxctl, SERDES_RATE, PHYTMAC_SERDES_RATE_10G); + usxctl = PHYTMAC_SET_BITS(usxctl, USX_SPEED, PHYTMAC_SPEED_10000M); + } else if (state->speed == SPEED_5000) { + usxctl = PHYTMAC_SET_BITS(usxctl, SERDES_RATE, PHYTMAC_SERDES_RATE_5G); + usxctl = PHYTMAC_SET_BITS(usxctl, USX_SPEED, PHYTMAC_SPEED_5000M); + } + usxctl |= PHYTMAC_BIT(RX_EN) | PHYTMAC_BIT(TX_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_USXCTRL, usxctl); + + config |= PHYTMAC_BIT(PCS_EN); + ctrl |= PHYTMAC_BIT(HIGHSPEED); + } + + if (state->duplex) + config |= PHYTMAC_BIT(FD); + + if (old_ctrl ^ ctrl) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + + if (old_config ^ config) + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + /* Disable AN for SGMII fixed link configuration, enable otherwise.*/ + if (state->interface == PHY_INTERFACE_MODE_SGMII) + phytmac_enable_autoneg(pdata, mode == MLO_AN_FIXED ? 0 : 1); + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + phytmac_enable_autoneg(pdata, 1); +} + +static unsigned int phytmac_pcs_get_link(struct phytmac *pdata, + phy_interface_t interface) +{ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_NSTATUS, PCS_LINK); + else if (interface == PHY_INTERFACE_MODE_USXGMII || + interface == PHY_INTERFACE_MODE_10GBASER) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_USXSTATUS, USX_PCS_LINK); + + return 0; +} + +static void phytmac_clear_tx_desc(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc = NULL; + int i; + + for (i = 0; i < queue->pdata->tx_ring_size; i++) { + desc = phytmac_get_tx_desc(queue, i); + desc->desc2 = 0; + desc->desc0 = 0; + /* make newly desc1 to hardware */ + wmb(); + desc->desc1 = PHYTMAC_BIT(TX_USED); + } + desc->desc1 |= PHYTMAC_BIT(TX_WRAP); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(queue->index), queue->tx_tail); +} + +static void phytmac_get_hw_stats(struct phytmac *pdata) +{ + u32 stats[45]; + int i, j; + u64 val; + u64 *p = &pdata->stats.tx_octets; + + for (i = 0 ; i < 45; i++) + stats[i] = PHYTMAC_READ(pdata, PHYTMAC_OCTTX + i * 4); + + for (i = 0, j = 0; i < 45; i++) { + if (i == 0 || i == 20) { + val = (u64)stats[i + 1] << 32 | stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } else { + if (i != 1 && i != 21) { + val = stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } + } + } +} + +static void phytmac_get_time(struct phytmac *pdata, struct timespec64 *ts) +{ + u32 ns, secl, sech; + + ns = PHYTMAC_READ(pdata, PHYTMAC_TN); + secl = PHYTMAC_READ(pdata, PHYTMAC_TSL); + sech = PHYTMAC_READ(pdata, PHYTMAC_TSH); + + ts->tv_nsec = ns; + ts->tv_sec = (((u64)sech << 32) | secl) & SEC_MAX_VAL; +} + +static void phytmac_set_time(struct phytmac *pdata, time64_t sec, long nsec) +{ + u32 secl, sech; + + secl = (u32)sec; + sech = (sec >> 32) & (0xffff); + + PHYTMAC_WRITE(pdata, PHYTMAC_TN, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TSH, sech); + PHYTMAC_WRITE(pdata, PHYTMAC_TSL, secl); + PHYTMAC_WRITE(pdata, PHYTMAC_TN, nsec); +} + +static void phytmac_clear_time(struct phytmac *pdata) +{ + u32 value; + + pdata->ts_incr.sub_ns = 0; + pdata->ts_incr.ns = 0; + + value = PHYTMAC_READ(pdata, PHYTMAC_TISN); + value = PHYTMAC_SET_BITS(value, SUB_NSECL, 0); + value = PHYTMAC_SET_BITS(value, SUB_NSECH, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TISN, value); + + value = PHYTMAC_READ(pdata, PHYTMAC_TI); + value = PHYTMAC_SET_BITS(value, INCR_NS, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TI, value); + + PHYTMAC_WRITE(pdata, PHYTMAC_TA, 0); +} + +static int phytmac_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) +{ + if (ctrl->rx_control == TS_ALL_PTP_FRAMES) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, + PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(STORE_RX_TS)); + + PHYTMAC_WRITE(pdata, PHYTMAC_TXBDCTRL, PHYTMAC_BITS(TX_TSMODE, ctrl->tx_control)); + PHYTMAC_WRITE(pdata, PHYTMAC_RXBDCTRL, PHYTMAC_BITS(RX_TSMODE, ctrl->rx_control)); + + return 0; +} + +static int phytmac_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) +{ + u32 value; + + value = PHYTMAC_BITS(SUB_NSECL, incr->sub_ns) | + PHYTMAC_BITS(SUB_NSECH, incr->sub_ns >> 8); + PHYTMAC_WRITE(pdata, PHYTMAC_TISN, value); + PHYTMAC_WRITE(pdata, PHYTMAC_TI, incr->ns); + + return 0; +} + +static void phytmac_ptp_init_hw(struct phytmac *pdata) +{ + struct timespec64 ts; + + ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); + phytmac_set_time(pdata, ts.tv_sec, ts.tv_nsec); + + phytmac_set_tsincr(pdata, &pdata->ts_incr); +} + +static int phytmac_adjust_fine(struct phytmac *pdata, long ppm, bool negative) +{ + struct ts_incr ts_incr; + u32 tmp; + u64 adj; + + ts_incr.ns = pdata->ts_incr.ns; + ts_incr.sub_ns = pdata->ts_incr.sub_ns; + + tmp = ((u64)ts_incr.ns << PHYTMAC_SUB_NSECL_INDEX) + ts_incr.sub_ns; + adj = ((u64)ppm * tmp + (USEC_PER_SEC >> 1)) >> PHYTMAC_SUB_NSECL_INDEX; + + adj = div_u64(adj, USEC_PER_SEC); + adj = negative ? (tmp - adj) : (tmp + adj); + + ts_incr.ns = (adj >> PHYTMAC_SUB_NSEC_WIDTH) + & ((1 << PHYTMAC_SUB_NSECL_WIDTH) - 1); + ts_incr.sub_ns = adj & ((1 << PHYTMAC_SUB_NSEC_WIDTH) - 1); + + phytmac_set_tsincr(pdata, &ts_incr); + + return 0; +} + +static int phytmac_adjust_time(struct phytmac *pdata, s64 delta, int neg) +{ + u32 adj; + + if (delta > PHYTMAC_ADJUST_SEC_MAX) { + struct timespec64 now, then; + + if (neg) + then = ns_to_timespec64(-delta); + else + then = ns_to_timespec64(delta); + phytmac_get_time(pdata, &now); + now = timespec64_add(now, then); + phytmac_set_time(pdata, now.tv_sec, now.tv_nsec); + } else { + adj = (neg << PHYTMAC_INCR_ADD_INDEX) | delta; + PHYTMAC_WRITE(pdata, PHYTMAC_TA, adj); + } + + return 0; +} + +static int phytmac_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc, int direction) +{ + int ts_valid = 0; + + if (direction == PHYTMAC_TX) + ts_valid = desc->desc1 & PHYTMAC_BIT(TX_TS_VALID); + else if (direction == PHYTMAC_RX) + ts_valid = desc->desc0 & PHYTMAC_BIT(RX_TS_VALID); + return ts_valid; +} + +static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct timespec64 *ts) +{ + struct timespec64 ts2; + + ts->tv_sec = (PHYTMAC_GET_BITS(ts_2, DMA_SECH) << 2) | + PHYTMAC_GET_BITS(ts_1, DMA_SECL); + ts->tv_nsec = PHYTMAC_GET_BITS(ts_1, DMA_NSEC); + + phytmac_get_time(pdata, &ts2); + + if (((ts->tv_sec ^ ts2.tv_sec) & (PHYTMAC_DMA_SEC_TOP >> 1)) != 0) + ts->tv_sec -= PHYTMAC_DMA_SEC_TOP; + + ts->tv_sec += (ts2.tv_sec & (~PHYTMAC_DMA_SEC_MASK)); +} + +static unsigned int phytmac_get_ts_rate(struct phytmac *pdata) +{ + return 300000000; +} + +struct phytmac_hw_if phytmac_1p0_hw = { + .reset_hw = phytmac_reset_hw, + .init_hw = phytmac_init_hw, + .init_ring_hw = phytmac_init_ring_hw, + .get_feature = phytmac_get_feature_all, + .get_regs = phytmac_get_regs, + .get_stats = phytmac_get_hw_stats, + .set_mac_address = phytmac_set_mac_addr, + .get_mac_address = phytmac_get_mac_addr, + .mdio_read = phytmac_mdio_data_read_c22, + .mdio_write = phytmac_mdio_data_write_c22, + .mdio_read_c45 = phytmac_mdio_data_read_c45, + .mdio_write_c45 = phytmac_mdio_data_write_c45, + .poweron = phytmac_powerup_hw, + .set_wol = phytmac_set_wake, + .enable_promise = phytmac_enable_promise, + .enable_multicast = phytmac_enable_multicast, + .set_hash_table = phytmac_set_mc_hash, + .enable_rx_csum = phytmac_enable_rxcsum, + .enable_tx_csum = phytmac_enable_txcsum, + .enable_mdio_control = phytmac_enable_mdio, + .enable_autoneg = phytmac_enable_autoneg, + .enable_pause = phytmac_enable_pause, + .enable_network = phytmac_enable_network, + .add_fdir_entry = phytmac_add_fdir_entry, + .del_fdir_entry = phytmac_del_fdir_entry, + /* mac config */ + .mac_config = phytmac_mac_interface_config, + .mac_linkup = phytmac_mac_linkup, + .mac_linkdown = phytmac_mac_linkdown, + .pcs_linkup = phytmac_pcs_linkup, + .pcs_linkdown = phytmac_pcs_linkdown, + .get_link = phytmac_pcs_get_link, + /* irq */ + .enable_irq = phytmac_enable_irq, + .disable_irq = phytmac_disable_irq, + .clear_irq = phytmac_clear_irq, + .get_irq = phytmac_get_irq, + .get_intx_mask = phytmac_get_intx_mask, + /* tx and rx */ + .tx_map = phytmac_tx_map_desc, + .transmit = phytmac_tx_start, + .restart = phytmac_restart, + .tx_complete = phytmac_tx_complete, + .rx_complete = phytmac_rx_complete, + .get_rx_pkt_len = phytmac_rx_pkt_len, + .get_desc_addr = phytmac_get_desc_addr, + .init_rx_map = phytmac_init_rx_map_desc, + .rx_map = phytmac_rx_map_desc, + .rx_clean = phytmac_rx_clean_desc, + .rx_checksum = phytmac_rx_checksum, + .rx_single_buffer = phytmac_rx_single_buffer, + .rx_pkt_start = phytmac_rx_sof, + .rx_pkt_end = phytmac_rx_eof, + .clear_rx_desc = phytmac_clear_rx_desc, + .clear_tx_desc = phytmac_clear_tx_desc, + /* ptp */ + .init_ts_hw = phytmac_ptp_init_hw, + .set_time = phytmac_set_time, + .clear_time = phytmac_clear_time, + .get_time = phytmac_get_time, + .set_ts_config = phytmac_set_tsmode, + .set_incr = phytmac_set_tsincr, + .adjust_fine = phytmac_adjust_fine, + .adjust_time = phytmac_adjust_time, + .ts_valid = phytmac_ts_valid, + .get_timestamp = phytmac_get_dma_ts, + .get_ts_rate = phytmac_get_ts_rate, +}; +EXPORT_SYMBOL_GPL(phytmac_1p0_hw); diff --git a/drivers/net/ethernet/phytium/phytmac_v1.h b/drivers/net/ethernet/phytium/phytmac_v1.h new file mode 100644 index 0000000000000..d8de2c26cab4b --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v1.h @@ -0,0 +1,455 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _PHYTMAC_V1_H +#define _PHYTMAC_V1_H + +extern struct phytmac_hw_if phytmac_1p0_hw; + +#define PHYTMAC_FRAME_MASK 0x1fff +#define PHYTMAC_JUMBO_FRAME_MASK 0x3fff + +#define PHYTMAC_SPEED_100M 0 +#define PHYTMAC_SPEED_1000M 1 +#define PHYTMAC_SPEED_2500M 2 +#define PHYTMAC_SPEED_5000M 3 +#define PHYTMAC_SPEED_10000M 4 +#define PHYTMAC_SERDES_RATE_5G 0 +#define PHYTMAC_SERDES_RATE_10G 1 + +#define PHYTMAC_NCTRL 0x0000 +#define PHYTMAC_NCONFIG 0x0004 +#define PHYTMAC_NSTATUS 0x0008 +#define PHYTMAC_DCONFIG 0x0010 +#define PHYTMAC_RXPTR_Q0 0x0018 +#define PHYTMAC_TXPTR_Q0 0x001C +#define PHYTMAC_IS 0x0024 +#define PHYTMAC_IE 0x0028 +#define PHYTMAC_ID 0x002C +#define PHYTMAC_IM 0x0030 +#define PHYTMAC_MDATA 0x0034 +#define PHYTMAC_HCONFIG 0x0050 +#define PHYTMAC_AXICTRL 0x0054 +#define PHYTMAC_INT_MODERATION 0x005C +#define PHYTMAC_HASHB 0x0080 +#define PHYTMAC_HASHT 0x0084 +#define PHYTMAC_MAC1B 0x0088 +#define PHYTMAC_MAC1T 0x008C +#define PHYTMAC_WOL 0x00B8 +#define PHYTMAC_OCTTX 0x0100 +#define PHYTMAC_TISN 0x01BC +#define PHYTMAC_TSH 0x01C0 +#define PHYTMAC_TSL 0x01D0 +#define PHYTMAC_TN 0x01D4 +#define PHYTMAC_TA 0x01D8 +#define PHYTMAC_TI 0x01DC +#define PHYTMAC_PCSCTRL 0x0200 +#define PHYTMAC_PCSSTATUS 0x0204 +#define PHYTMAC_PCSANLPBASE 0x0214 +#define PHYTMAC_DEFAULT1 0x0280 /* Default HW Config 1 */ +#define PHYTMAC_DEFAULT2 0x0294 /* Default HW Config 2 */ +#define PHYTMAC_DEFAULT3 0x029C /* Default HW Config 3 */ +#define PHYTMAC_DEFAULT4 0x02A4 /* Default HW Config 4 */ +#define PHYTMAC_DEFAULT5 0x02AC /* Default HW Config 5 */ +#define PHYTMAC_USXCTRL 0x0A80 +#define PHYTMAC_USXSTATUS 0x0A88 +#define PHYTMAC_TXBDCTRL 0x04CC +#define PHYTMAC_RXBDCTRL 0x04D0 + +/* Fdir match registers */ +#define PHYTMAC_FDIR(i) (0x0540 + ((i) << 2)) + +/* EtherType registers */ +#define PHYTMAC_ETHT(i) (0x06E0 + ((i) << 2)) + +/* Fdir compare registers */ +#define PHYTMAC_COMP1(i) (0x0700 + ((i) << 3)) +#define PHYTMAC_COMP2(i) (0x0704 + ((i) << 3)) + +#define PHYTMAC_ISR(i) (0x0400 + ((i) << 2)) +#define PHYTMAC_TXPTR(i) (0x0440 + ((i) << 2)) +#define PHYTMAC_RXPTR(i) (0x0480 + ((i) << 2)) +#define PHYTMAC_RBQS(i) (0x04A0 + ((i) << 2)) +#define PHYTMAC_TXPTRH(i) (0x04c8) +#define PHYTMAC_RXPTRH(i) (0x04d4) + +#define PHYTMAC_IER(i) (0x0600 + ((i) << 2)) +#define PHYTMAC_IDR(i) (0x0620 + ((i) << 2)) +#define PHYTMAC_IMR(i) (0x0640 + ((i) << 2)) +#define PHYTMAC_TAIL_ENABLE (0x0e7c) +#define PHYTMAC_TAILPTR(i) (0x0e80 + ((i) << 2)) + +#define PHYTMAC_PHY_INT_ENABLE 0x1C88 +#define PHYTMAC_PHY_INT_CLEAR 0x1C8C +#define PHYTMAC_PHY_INT_STATE 0x1C90 +#define PHYTMAC_INTX_IRQ_MASK 0x1C7C + +#define PHYTMAC_READ_NSTATUS(pdata) PHYTMAC_READ(pdata, PHYTMAC_NSTATUS) + +/* Ethernet Network Control Register */ +#define PHYTMAC_RE_INDEX 2 /* Receive enable */ +#define PHYTMAC_RE_WIDTH 1 +#define PHYTMAC_TE_INDEX 3 /* Transmit enable */ +#define PHYTMAC_TE_WIDTH 1 +#define PHYTMAC_MPE_INDEX 4 /* Management port enable */ +#define PHYTMAC_MPE_WIDTH 1 +#define PHYTMAC_CLEARSTAT_INDEX 5 /* Clear stats regs */ +#define PHYTMAC_CLEARSTAT_WIDTH 1 +#define PHYTMAC_TSTART_INDEX 9 /* Start transmission */ +#define PHYTMAC_TSTART_WIDTH 1 +#define PHYTMAC_THALT_INDEX 10 /* Transmit halt */ +#define PHYTMAC_THALT_WIDTH 1 +#define PHYTMAC_STORE_RX_TS_INDEX 15 +#define PHYTMAC_STORE_RX_TS_WIDTH 1 +#define PHYTMAC_OSSMODE_INDEX 24 /* Enable One Step Synchro Mode */ +#define PHYTMAC_OSSMODE_WIDTH 1 +#define PHYTMAC_2PT5G_INDEX 29 /* 2.5G operation selected */ +#define PHYTMAC_2PT5G_WIDTH 1 +#define PHYTMAC_HIGHSPEED_INDEX 31 /* High speed enable */ +#define PHYTMAC_HIGHSPEED_WIDTH 1 + +/* Ethernet Network Config Register */ +#define PHYTMAC_SPEED_INDEX 0 /* Speed */ +#define PHYTMAC_SPEED_WIDTH 1 +#define PHYTMAC_FD_INDEX 1 /* Full duplex */ +#define PHYTMAC_FD_WIDTH 1 +#define PHYTMAC_JUMBO_EN_INDEX 3 /* Transmit enable */ +#define PHYTMAC_JUMBO_EN_WIDTH 1 +#define PHYTMAC_PROMISC_INDEX 4 /* Copy all frames */ +#define PHYTMAC_PROMISC_WIDTH 1 +#define PHYTMAC_NO_BCAST_INDEX 5 /* No broadcast */ +#define PHYTMAC_NO_BCAST_WIDTH 1 +#define PHYTMAC_MH_EN_INDEX 6 /* Multicast hash enable */ +#define PHYTMAC_MH_EN_WIDTH 1 +#define PHYTMAC_RCV_BIG_INDEX 8 +#define PHYTMAC_RCV_BIG_WIDTH 1 +#define PHYTMAC_GM_EN_INDEX 10 /* Gigabit mode enable */ +#define PHYTMAC_GM_EN_WIDTH 1 +#define PHYTMAC_PCS_EN_INDEX 11 /* PCS select */ +#define PHYTMAC_PCS_EN_WIDTH 1 +#define PHYTMAC_PAUSE_EN_INDEX 13 /* Pause enable */ +#define PHYTMAC_PAUSE_EN_WIDTH 1 +#define PHYTMAC_FCS_REMOVE_INDEX 17 /* FCS remove */ +#define PHYTMAC_FCS_REMOVE_WIDTH 1 +#define PHYTMAC_MCD_INDEX 18 /* MDC clock division */ +#define PHYTMAC_MCD_WIDTH 3 +#define PHYTMAC_DBW64_INDEX 21 /* Data bus width */ +#define PHYTMAC_DBW64_WIDTH 1 +#define PHYTMAC_DBW128_INDEX 22 /* Data bus width */ +#define PHYTMAC_DBW128_WIDTH 1 +#define PHYTMAC_DBW_32 1 +#define PHYTMAC_DBW_64 2 +#define PHYTMAC_DBW_128 4 +#define PHYTMAC_RCO_EN_INDEX 24 /* Receive checksum offload enable */ +#define PHYTMAC_RCO_EN_WIDTH 1 +#define PHYTMAC_SGMII_EN_INDEX 27 /* Sgmii mode enable */ +#define PHYTMAC_SGMII_EN_WIDTH 1 + +/* Ethernet Network Status Register */ +#define PHYTMAC_PCS_LINK_INDEX 0 /* PCS link status */ +#define PHYTMAC_PCS_LINK_WIDTH 1 +#define PHYTMAC_MDIO_IDLE_INDEX 2 /* Mdio idle */ +#define PHYTMAC_MDIO_IDLE_WIDTH 1 +#define PHYTMAC_PCS_FD_INDEX 3 /* PCS auto negotiation duplex resolution */ +#define PHYTMAC_PCS_FD__WIDTH 1 + +/* Ethernet Network Dma config Register */ +#define PHYTMAC_BURST_INDEX 0 /* Amba burst length */ +#define PHYTMAC_BURST_WIDTH 5 +#define PHYTMAC_ENDIA_PKT_INDEX 6 +#define PHYTMAC_ENDIA_PKT_WIDTH 1 +#define PHYTMAC_ENDIA_DESC_INDEX 7 +#define PHYTMAC_ENDIA_DESC_WIDTH 1 +#define PHYTMAC_TCO_EN_INDEX 11 /* Tx Checksum Offload en */ +#define PHYTMAC_TCO_EN_WIDTH 1 +#define PHYTMAC_RX_BUF_LEN_INDEX 16 /* DMA receive buffer size */ +#define PHYTMAC_RX_BUF_LEN_WIDTH 8 +#define PHYTMAC_RX_EXBD_EN_INDEX 28 /* Enable RX extended BD mode */ +#define PHYTMAC_RX_EXBD_EN_WIDTH 1 +#define PHYTMAC_TX_EXBD_EN_INDEX 29 /* Enable TX extended BD mode */ +#define PHYTMAC_TX_EXBD_EN_WIDTH 1 +#define PHYTMAC_ABW_INDEX 30 /* DMA address bus width */ +#define PHYTMAC_ABW_WIDTH 1 + +/* Int stauts/Enable/Disable/Mask Register */ +#define PHYTMAC_RXCOMP_INDEX 1 /* Rx complete */ +#define PHYTMAC_RXCOMP_WIDTH 1 +#define PHYTMAC_RUB_INDEX 2 /* Rx used bit read */ +#define PHYTMAC_RUB_WIDTH 1 +#define PHYTMAC_BUS_ERR_INDEX 6 /* AMBA error */ +#define PHYTMAC_BUS_ERR_WIDTH 1 +#define PHYTMAC_TXCOMP_INDEX 7 /* Tx complete */ +#define PHYTMAC_TXCOMP_WIDTH 1 +#define PHYTMAC_RXOVERRUN_INDEX 10 /* Tx overrun */ +#define PHYTMAC_RXOVERRUN_WIDTH 1 +#define PHYTMAC_RESP_ERR_INDEX 11 /* Resp not ok */ +#define PHYTMAC_RESP_ERR_WIDTH 1 + +/* Mdio read/write Register */ +#define PHYTMAC_DATA_INDEX 0 /* Data */ +#define PHYTMAC_DATA_WIDTH 16 +#define PHYTMAC_MUST_INDEX 16 /* Must Be 10 */ +#define PHYTMAC_MUST_WIDTH 2 +#define PHYTMAC_REG_ADDR_INDEX 18 /* Register address */ +#define PHYTMAC_REG_ADDR_WIDTH 5 +#define PHYTMAC_PHY_ADDR_INDEX 23 /* Phy address */ +#define PHYTMAC_PHY_ADDR_WIDTH 5 +#define PHYTMAC_OPS_INDEX 28 +#define PHYTMAC_OPS_WIDTH 2 +#define PHYTMAC_CLAUSE_SEL_INDEX 30 +#define PHYTMAC_CLAUSE_SEL_WIDTH 1 +#define PHYTMAC_CLAUSE_C22 1 +#define PHYTMAC_CLAUSE_C45 0 +#define PHYTMAC_OPS_C45_ADDR 0 +#define PHYTMAC_OPS_C45_WRITE 1 +#define PHYTMAC_OPS_C45_READ 3 +#define PHYTMAC_OPS_C22_WRITE 1 +#define PHYTMAC_OPS_C22_READ 2 + +/* hs mac config register */ +#define PHYTMAC_HS_SPEED_INDEX 0 +#define PHYTMAC_HS_SPEED_WIDTH 3 +#define PHYTMAC_HS_SPEED_100M 0 +#define PHYTMAC_HS_SPEED_1000M 1 +#define PHYTMAC_HS_SPEED_2500M 2 +#define PHYTMAC_HS_SPEED_5000M 3 +#define PHYTMAC_HS_SPEED_10G 4 + +/* WOL register */ +#define PHYTMAC_ARP_IP_INDEX 0 +#define PHYTMAC_ARP_IP_WIDTH 16 +#define PHYTMAC_MAGIC_INDEX 16 +#define PHYTMAC_MAGIC_WIDTH 1 +#define PHYTMAC_ARP_INDEX 17 +#define PHYTMAC_ARP_WIDTH 1 +#define PHYTMAC_UCAST_INDEX 18 +#define PHYTMAC_UCAST_WIDTH 1 +#define PHYTMAC_MCAST_INDEX 19 +#define PHYTMAC_MCAST_WIDTH 1 + +/* PCSCTRL register */ +#define PHYTMAC_AUTONEG_INDEX 12 +#define PHYTMAC_AUTONEG_WIDTH 1 +#define PHYTMAC_PCS_RESET_INDEX 15 +#define PHYTMAC_PCS_RESET_WIDTH 1 + +/* DEFAULT1 register */ +#define PHYTMAC_DBW_INDEX 25 +#define PHYTMAC_DBW_WIDTH 3 + +/* DEFAULT2 register */ +#define PHYTMAC_DAW64_INDEX 23 +#define PHYTMAC_DAW64_WIDTH 1 + +/* DEFAULT3 register */ +#define PHYTMAC_SCR2CMP_INDEX 0 +#define PHYTMAC_SCR2CMP_WIDTH 8 +#define PHYTMAC_SCR2ETH_INDEX 8 +#define PHYTMAC_SCR2ETH_WIDTH 8 + +/* DEFAULT4 register */ +#define PHYTMAC_TXDESCRD_INDEX 12 +#define PHYTMAC_TXDESCRD_WIDTH 4 +#define PHYTMAC_RXDESCRD_INDEX 8 +#define PHYTMAC_RXDESCRD_WIDTH 4 + +/* USXCTRL register */ +#define PHYTMAC_RX_EN_INDEX 0 +#define PHYTMAC_RX_EN_WIDTH 1 +#define PHYTMAC_TX_EN_INDEX 1 +#define PHYTMAC_TX_EN_WIDTH 1 +#define PHYTMAC_RX_SYNC_RESET_INDEX 2 +#define PHYTMAC_RX_SYNC_RESET_WIDTH 1 +#define PHYTMAC_SERDES_RATE_INDEX 12 +#define PHYTMAC_SERDES_RATE_WIDTH 2 +#define PHYTMAC_USX_SPEED_INDEX 14 +#define PHYTMAC_USX_SPEED_WIDTH 3 + +/* Bitfields in USX_STATUS. */ +#define PHYTMAC_USX_PCS_LINK_INDEX 0 +#define PHYTMAC_USX_PCS_LINK_WIDTH 1 + +/* Bitfields in PHYTMAC_TISN */ +#define PHYTMAC_SUB_NSECH_INDEX 0 +#define PHYTMAC_SUB_NSECH_WIDTH 16 +#define PHYTMAC_SUB_NSECL_INDEX 24 +#define PHYTMAC_SUB_NSECL_WIDTH 8 +#define PHYTMAC_SUB_NSEC_WIDTH 24 + +/* Bitfields in PHYTMAC_TSH */ +#define PHYTMAC_SECH_INDEX 0 +#define PHYTMAC_SECH_WIDTH 16 + +/* Bitfields in PHYTMAC_TSL */ +#define PHYTMAC_SECL_INDEX 0 +#define PHYTMAC_SECL_WIDTH 32 + +/* Bitfields in PHYTMAC_TN */ +#define PHYTMAC_NSEC_INDEX 0 +#define PHYTMAC_NSEC_WIDTH 30 + +/* Bitfields in PHYTMAC_TA */ +#define PHYTMAC_INCR_SEC_INDEX 0 +#define PHYTMAC_INCR_SEC_WIDTH 30 +#define PHYTMAC_INCR_ADD_INDEX 31 +#define PHYTMAC_INCR_ADD_WIDTH 1 +#define PHYTMAC_ADJUST_SEC_MAX ((1 << PHYTMAC_INCR_SEC_WIDTH) - 1) + +/* Bitfields in PHYTMAC_TI */ +#define PHYTMAC_INCR_NS_INDEX 0 +#define PHYTMAC_INCR_NS_WIDTH 8 + +/* PHYTMAC_TXBDCTRL register */ +#define PHYTMAC_TX_TSMODE_INDEX 4 +#define PHYTMAC_TX_TSMODE_WIDTH 2 + +#define PHYTMAC_RX_TSMODE_INDEX 4 +#define PHYTMAC_RX_TSMODE_WIDTH 2 + +/* Bitfields in PHYTMAC_FDIR */ +#define PHYTMAC_QUEUE_NUM_INDEX 0 +#define PHYTMAC_QUEUE_NUM_WIDTH 4 +#define PHYTMAC_VLAN_PRI_INDEX 4 +#define PHYTMAC_VLAN_PRI_WIDTH 3 +#define PHYTMAC_VLAN_EN_INDEX 8 +#define PHYTMAC_VLAN_EN_WIDTH 1 +#define PHYTMAC_ETH_TYPE_INDEX 9 +#define PHYTMAC_ETH_TYPE_WIDTH 3 +#define PHYTMAC_ETH_EN_INDEX 12 +#define PHYTMAC_ETH_EN_WIDTH 1 +#define PHYTMAC_CA_INDEX 13 +#define PHYTMAC_CA_WIDTH 5 +#define PHYTMAC_CA_EN_INDEX 18 +#define PHYTMAC_CA_EN_WIDTH 1 +#define PHYTMAC_CB_INDEX 19 +#define PHYTMAC_CB_WIDTH 5 +#define PHYTMAC_CB_EN_INDEX 24 +#define PHYTMAC_CB_EN_WIDTH 1 +#define PHYTMAC_CC_INDEX 25 +#define PHYTMAC_CC_WIDTH 5 +#define PHYTMAC_CC_EN_INDEX 30 +#define PHYTMAC_CC_EN_WIDTH 1 + +/* Bitfields in PHYTMAC_ETHERTYPE */ +#define PHYTMAC_ETHTYPE_VALUE_INDEX 0 +#define PHYTMAC_ETHTYPE_VALUE_WIDTH 16 + +/* Bitfields in PHYTMAC_COMP1 */ +#define PHYTMAC_SPORT_INDEX 0 +#define PHYTMAC_SPORT_WIDTH 16 +#define PHYTMAC_DPORT_INDEX 16 +#define PHYTMAC_DPORTE_WIDTH 16 + +/* Bitfields in PHYTMAC_COMP2 */ +#define PHYTMAC_OFFSET_INDEX 0 +#define PHYTMAC_OFFSET_WIDTH 7 +#define ETHTYPE_SIP_OFFSET 12 +#define ETHTYPE_DIP_OFFSET 16 +#define IPHEAD_SPORT_OFFSET 0 +#define IPHEAD_DPORT_OFFSET 2 +#define PHYTMAC_OFFSET_TYPE_INDEX 7 +#define PHYTMAC_OFFSET_TYPE_WIDTH 2 +#define PHYTMAC_OFFSET_BEGIN 0 +#define PHYTMAC_OFFSET_AFTER_L2HEAD 1 +#define PHYTMAC_OFFSET_AFTER_L3HEAD 2 +#define PHYTMAC_OFFSET_AFTER_L4HEAD 3 +#define PHYTMAC_DIS_MASK_INDEX 9 +#define PHYTMAC_DIS_MASK_WIDTH 1 +#define PHYTMAC_VLAN_ID_INDEX 10 +#define PHYTMAC_VLAN_ID_WIDTH 1 + +#define PHYTMAC_TSEC_WIDTH (PHYTMAC_SECH_WIDTH + PHYTMAC_SECL_WIDTH) +#define SEC_MAX_VAL (((u64)1 << PHYTMAC_TSEC_WIDTH) - 1) +#define NSEC_MAX_VAL ((1 << PHYTMAC_NSEC_WIDTH) - 1) + +/* rx dma desc bit */ +/* DMA descriptor bitfields */ +#define PHYTMAC_RX_USED_INDEX 0 +#define PHYTMAC_RX_USED_WIDTH 1 +#define PHYTMAC_RX_WRAP_INDEX 1 +#define PHYTMAC_RX_WRAP_WIDTH 1 +#define PHYTMAC_RX_TS_VALID_INDEX 2 +#define PHYTMAC_RX_TS_VALID_WIDTH 1 +#define PHYTMAC_RX_WADDR_INDEX 2 +#define PHYTMAC_RX_WADDR_WIDTH 30 + +#define PHYTMAC_RX_FRMLEN_INDEX 0 +#define PHYTMAC_RX_FRMLEN_WIDTH 12 +#define PHYTMAC_RX_INDEX_INDEX 12 +#define PHYTMAC_RX_INDEX_WIDTH 2 +#define PHYTMAC_RX_SOF_INDEX 14 +#define PHYTMAC_RX_SOF_WIDTH 1 +#define PHYTMAC_RX_EOF_INDEX 15 +#define PHYTMAC_RX_EOF_WIDTH 1 +#define PHYTMAC_RX_CFI_INDEX 16 +#define PHYTMAC_RX_CFI_WIDTH 1 +#define PHYTMAC_RX_VLAN_PRI_INDEX 17 +#define PHYTMAC_RX_VLAN_PRI_WIDTH 3 +#define PHYTMAC_RX_PRI_TAG_INDEX 20 +#define PHYTMAC_RX_PRI_TAG_WIDTH 1 +#define PHYTMAC_RX_VLAN_TAG_INDEX 21 +#define PHYTMAC_RX_VLAN_TAG_WIDTH 1 +#define PHYTMAC_RX_UHASH_MATCH_INDEX 29 +#define PHYTMAC_RX_UHASH_MATCH_WIDTH 1 +#define PHYTMAC_RX_MHASH_MATCH_INDEX 30 +#define PHYTMAC_RX_MHASH_MATCH_WIDTH 1 +#define PHYTMAC_RX_BROADCAST_INDEX 31 +#define PHYTMAC_RX_BROADCAST_WIDTH 1 + +#define PHYTMAC_RX_FRMLEN_MASK 0x1FFF +#define PHYTMAC_RX_JFRMLEN_MASK 0x3FFF + +/* RX checksum offload disabled: bit 24 clear in NCFGR */ +#define PHYTMAC_RX_TYPEID_MATCH_INDEX 22 +#define PHYTMAC_RX_TYPEID_MATCH_WIDTH 2 + +/* RX checksum offload enabled: bit 24 set in NCFGR */ +#define PHYTMAC_RX_CSUM_INDEX 22 +#define PHYTMAC_RX_CSUM_WIDTH 2 + +/* tx dma desc bit */ +#define PHYTMAC_TX_FRMLEN_INDEX 0 +#define PHYTMAC_TX_FRMLEN_WIDTH 14 +#define PHYTMAC_TX_LAST_INDEX 15 +#define PHYTMAC_TX_LAST_WIDTH 1 +#define PHYTMAC_TX_NOCRC_INDEX 16 +#define PHYTMAC_TX_NOCRC_WIDTH 1 +#define PHYTMAC_MSS_MFS_INDEX 16 +#define PHYTMAC_MSS_MFS_WIDTH 14 +#define PHYTMAC_TX_LSO_INDEX 17 +#define PHYTMAC_TX_LSO_WIDTH 2 +#define PHYTMAC_TX_TCP_SEQ_SRC_INDEX 19 +#define PHYTMAC_TX_TCP_SEQ_SRC_WIDTH 1 +#define PHYTMAC_TX_TS_VALID_INDEX 23 +#define PHYTMAC_TX_TS_VALID_WIDTH 1 +#define PHYTMAC_TX_BUF_EXHAUSTED_INDEX 27 +#define PHYTMAC_TX_BUF_EXHAUSTED_WIDTH 1 +#define PHYTMAC_TX_UNDERRUN_INDEX 28 +#define PHYTMAC_TX_UNDERRUN_WIDTH 1 +#define PHYTMAC_TX_ERROR_INDEX 29 +#define PHYTMAC_TX_ERROR_WIDTH 1 +#define PHYTMAC_TX_WRAP_INDEX 30 +#define PHYTMAC_TX_WRAP_WIDTH 1 +#define PHYTMAC_TX_USED_INDEX 31 +#define PHYTMAC_TX_USED_WIDTH 1 + +/* Transmit DMA buffer descriptor Word 4 */ +#define PHYTMAC_DMA_NSEC_INDEX 0 +#define PHYTMAC_DMA_NSEC_WIDTH 30 +#define PHYTMAC_DMA_SECL_INDEX 30 +#define PHYTMAC_DMA_SECL_WIDTH 2 + +/* Transmit DMA buffer descriptor Word 4 */ +#define PHYTMAC_DMA_SECH_INDEX 0 +#define PHYTMAC_DMA_SECH_WIDTH 4 +#define PHYTMAC_DMA_SEC_MASK 0x3f +#define PHYTMAC_DMA_SEC_TOP 0x40 + +/* Buffer descriptor constants */ +#define PHYTMAC_RX_CSUM_NONE 0 +#define PHYTMAC_RX_CSUM_IP_ONLY 1 +#define PHYTMAC_RX_CSUM_IP_TCP 2 +#define PHYTMAC_RX_CSUM_IP_UDP 3 + +/* limit RX checksum offload to TCP and UDP packets */ +#define PHYTMAC_RX_CSUM_CHECKED_MASK 2 + +#endif diff --git a/drivers/net/ethernet/phytium/phytmac_v2.c b/drivers/net/ethernet/phytium/phytmac_v2.c new file mode 100644 index 0000000000000..77fde5c5e2915 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v2.c @@ -0,0 +1,1254 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v2.h" + +static int phytmac_msg_send(struct phytmac *pdata, u16 cmd_id, + u16 cmd_subid, void *data, int len, int wait) +{ + int index = 0; + struct phytmac_msg_info msg; + struct phytmac_msg_info msg_rx; + int ret = 0; + + ++pdata->msg_ring.tx_msg_tail; + if (pdata->msg_ring.tx_msg_tail > pdata->msg_ring.tx_msg_ring_size) + pdata->msg_ring.tx_msg_tail = 1; + index = pdata->msg_ring.tx_msg_tail; + + wait = 1; + memset(&msg, 0, sizeof(msg)); + memset(&msg_rx, 0, sizeof(msg_rx)); + msg.module_id = PHYTMAC_MODULE_ID_GMAC; + msg.cmd_id = cmd_id; + msg.cmd_subid = cmd_subid; + msg.flags = PHYTMAC_FLAGS_MSG_NOINT; + + if (len) + memcpy(&msg.para[0], data, len); + + if (netif_msg_hw(pdata)) { + netdev_info(pdata->ndev, "tx msg: cmdid=%d, subid=%d, flags=%d, len=%d, tail=%d\n", + msg.cmd_id, msg.cmd_subid, msg.flags, len, pdata->msg_ring.tx_msg_tail); + } + + memcpy(pdata->msg_regs + PHYTMAC_MSG(index), &msg, sizeof(msg)); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_MSG_TAIL, + pdata->msg_ring.tx_msg_tail | PHYTMAC_BIT(TX_MSG_INT)); + + if (wait) { + memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(index), MSG_HDR_LEN); + while (!(msg_rx.flags & 0x1)) { + cpu_relax(); + memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(index), MSG_HDR_LEN); + } + } + + return ret; +} + +static void phytmac_reset_hw(struct phytmac *pdata) +{ + int q; + u16 cmd_id, cmd_subid; + struct phytmac_ring_info ring; + + /* Disable and clear all interrupts and disable queues */ + for (q = 0; q < pdata->queues_max_num; ++q) { + PHYTMAC_WRITE(pdata, PHYTMAC_INT_DR(q), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_SR(q), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(q), 0); + } + + /* reset hw rx/tx enable */ + cmd_id = PHYTMAC_MSG_CMD_DEFAULT; + cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_HW; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + /* reset tx ring */ + memset(&ring, 0, sizeof(ring)); + ring.queue_num = pdata->queues_max_num; + cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_TX_QUEUE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 0); + + /* reset rx ring */ + cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_RX_QUEUE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 1); +} + +static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) +{ + int index; + u16 cmd_id, cmd_subid; + struct phytmac_mac para; + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_ADDR; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + index = pdata->msg_ring.tx_msg_tail; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, + sizeof(struct phytmac_mac)); + + addr[0] = para.addrl & 0xff; + addr[1] = (para.addrl >> 8) & 0xff; + addr[2] = (para.addrl >> 16) & 0xff; + addr[3] = (para.addrl >> 24) & 0xff; + addr[4] = para.addrh & 0xff; + addr[5] = (para.addrh >> 8) & 0xff; + + return 0; +} + +static int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) +{ + u16 cmd_id; + u16 cmd_subid; + struct phytmac_mac para; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ADDR; + para.addrl = cpu_to_le32(*((u32 *)addr)); + para.addrh = cpu_to_le16(*((u16 *)(addr + 4))); + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_init_hw(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + struct phytmac_dma_info dma; + struct phytmac_eth_info eth; + + phytmac_set_mac_addr(pdata, pdata->ndev->dev_addr); + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_JUMBO; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_1536_FRAMES; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + if (pdata->ndev->flags & IFF_PROMISC) { + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + if (pdata->ndev->features & NETIF_F_RXCSUM) { + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + if (!(pdata->ndev->flags & IFF_BROADCAST)) { + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_BC; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_STRIPCRC; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + memset(&dma, 0, sizeof(dma)); + cmd_subid = PHYTMAC_MSG_CMD_SET_DMA; + dma.dma_burst_length = pdata->dma_burst_length; + if (pdata->dma_addr_width) + dma.hw_dma_cap |= HW_DMA_CAP_64B; + if (pdata->ndev->features & NETIF_F_HW_CSUM) + dma.hw_dma_cap |= HW_DMA_CAP_CSUM; + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + dma.hw_dma_cap |= HW_DMA_CAP_PTP; + if (pdata->dma_data_width == PHYTMAC_DBW64) + dma.hw_dma_cap |= HW_DMA_CAP_DDW64; + if (pdata->dma_data_width == PHYTMAC_DBW128) + dma.hw_dma_cap |= HW_DMA_CAP_DDW128; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&dma, sizeof(dma), 0); + + memset(ð, 0, sizeof(eth)); + cmd_subid = PHYTMAC_MSG_CMD_SET_ETH_MATCH; + eth.index = 0; + eth.etype = (uint16_t)ETH_P_IP; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)ð, sizeof(eth), 1); + + return 0; +} + +static int phytmac_enable_multicast(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_MC; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_MC; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + return 0; +} + +static int phytmac_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) +{ + u16 cmd_id, cmd_subid; + struct phytmac_mc_info para; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_HASH_MC; + para.mc_bottom = (u32)mc_filter[0]; + para.mc_top = (u32)mc_filter[1]; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_init_ring_hw(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + struct phytmac_ring_info rxring; + struct phytmac_ring_info txring; + struct phytmac_rxbuf_info rxbuf; + struct phytmac_queue *queue; + u32 q; + + memset(&rxring, 0, sizeof(rxring)); + memset(&txring, 0, sizeof(txring)); + memset(&rxbuf, 0, sizeof(rxbuf)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_INIT_TX_RING; + txring.queue_num = pdata->queues_num; + rxring.queue_num = pdata->queues_num; + txring.hw_dma_cap |= HW_DMA_CAP_64B; + rxring.hw_dma_cap |= HW_DMA_CAP_64B; + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(q), queue->tx_head); + txring.addr[q] = queue->tx_ring_addr; + rxring.addr[q] = queue->rx_ring_addr; + } + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&txring), sizeof(txring), 0); + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_DMA_RX_BUFSIZE; + rxbuf.queue_num = pdata->queues_num; + rxbuf.buffer_size = pdata->rx_buffer_len / 64; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxbuf), sizeof(rxbuf), 0); + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_INIT_RX_RING; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxring), sizeof(rxring), 0); + + return 0; +} + +static int phytmac_init_msg_ring(struct phytmac *pdata) +{ + u32 size = 0; + + pdata->msg_ring.tx_msg_tail = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_TAIL) & 0xff; + size = PHYTMAC_READ_BITS(pdata, PHYTMAC_SIZE, TXRING_SIZE); + pdata->msg_ring.tx_msg_ring_size = size; + if (pdata->msg_ring.tx_msg_tail == size) + pdata->msg_ring.tx_msg_tail = 0; + + PHYTMAC_WRITE(pdata, PHYTMAC_MSG_IMR, 0xfffffffe); + if (netif_msg_hw(pdata)) + netdev_info(pdata->ndev, "mac msg ring: tx_msg_ring_size=%d, tx_msg_tail=%d\n", + size, pdata->msg_ring.tx_msg_tail); + + return 0; +} + +static int phytmac_get_feature_all(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + int index; + struct phytmac_feature para; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_CAPS; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + index = pdata->msg_ring.tx_msg_tail; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + + memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, + sizeof(struct phytmac_feature)); + + pdata->queues_max_num = para.queue_num; + if (para.dma_addr_width) + pdata->dma_addr_width = 64; + else + pdata->dma_addr_width = 32; + pdata->dma_data_width = para.dma_data_width; + pdata->max_rx_fs = para.max_rx_fs; + pdata->tx_bd_prefetch = (2 << (para.tx_bd_prefetch - 1)) * + sizeof(struct phytmac_dma_desc); + pdata->rx_bd_prefetch = (2 << (para.rx_bd_prefetch - 1)) * + sizeof(struct phytmac_dma_desc); + + if (netif_msg_hw(pdata)) { + netdev_info(pdata->ndev, "feature qnum=%d, daw=%d, dbw=%d, rxfs=%d, rxbd=%d, txbd=%d\n", + pdata->queues_num, pdata->dma_addr_width, pdata->dma_data_width, + pdata->max_rx_fs, pdata->rx_bd_prefetch, pdata->tx_bd_prefetch); + } + + return 0; +} + +static void phytmac_get_regs(struct phytmac *pdata, u32 *reg_buff) +{ + u16 cmd_id, cmd_subid; + u16 reg_num; + int index; + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_REG_READ; + reg_num = 16; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)®_num, 2, 1); + + index = pdata->msg_ring.tx_msg_tail; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + + memcpy(reg_buff, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, 64); +} + +static void phytmac_get_hw_stats(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + u8 count; + int i, j, index; + u32 stats[48]; + u64 val; + u64 *p = &pdata->stats.tx_octets; + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; + count = 1; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 0); + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; + count = 2; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 0); + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; + count = 3; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 1); + + for (i = 0; i < 3; i++) { + index = pdata->msg_ring.tx_msg_tail + i - 2; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + memcpy(&stats[i * 16], pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, 64); + } + + for (i = 0, j = 0; i < 44; i++) { + if (i == 0 || i == 20) { + val = (u64)stats[i + 1] << 32 | stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } else { + if (i != 1 && i != 21) { + val = stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } + } + } +} + +static void phytmac_mdio_idle(struct phytmac *pdata) +{ + u32 val; + + /* wait for end of transfer */ + val = PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS); + while (!(val & PHYTMAC_BIT(MIDLE))) { + cpu_relax(); + val = PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS); + } +} + +static int phytmac_mdio_data_read_c22(struct phytmac *pdata, int mii_id, int regnum) +{ + u16 data; + + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C22) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C22_READ) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, regnum) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + data = PHYTMAC_READ(pdata, PHYTMAC_MDIO) & 0xffff; + phytmac_mdio_idle(pdata); + return data; +} + +static int phytmac_mdio_data_write_c22(struct phytmac *pdata, int mii_id, + int regnum, u16 data) +{ + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C22) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C22_WRITE) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, regnum) + | PHYTMAC_BITS(VALUE, data) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + + return 0; +} + +static int phytmac_mdio_data_read_c45(struct phytmac *pdata, int mii_id, int devad, int regnum) +{ + u16 data; + + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_ADDR) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, devad & 0x1F) + | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_READ) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, devad & 0x1F) + | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + data = PHYTMAC_READ(pdata, PHYTMAC_MDIO) & 0xffff; + phytmac_mdio_idle(pdata); + return data; +} + +static int phytmac_mdio_data_write_c45(struct phytmac *pdata, int mii_id, int devad, + int regnum, u16 data) +{ + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_ADDR) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_WRITE) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(VALUE, data) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + + return 0; +} + +static int phytmac_powerup_hw(struct phytmac *pdata, int on) +{ + u32 status, data0, data1, rdata1; + int ret; + + if (pdata->capacities & PHYTMAC_CAPS_LPI) { + ret = readx_poll_timeout(PHYTMAC_READ_STAT, pdata, status, !status, + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh status is busy"); + + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy"); + + data0 = 0; + data0 = PHYTMAC_SET_BITS(data0, DATA0_MSG, PHYTMAC_MSG_PM); + data0 = PHYTMAC_SET_BITS(data0, DATA0_PRO, PHYTMAC_PRO_ID); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA0, data0); + data1 = 0; + + if (on == PHYTMAC_POWERON) { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATON); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } else { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATOFF); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } + + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_AP_CPP_SET, 1); + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy\n"); + + rdata1 = PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA1); + if (rdata1 == data1) + netdev_err(pdata->ndev, "gmac power %s success, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + else + netdev_err(pdata->ndev, "gmac power %s failed, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + } + + pdata->power_state = on; + + return 0; +} + +static int phytmac_set_wake(struct phytmac *pdata, int wake) +{ + u16 cmd_id, cmd_subid; + u8 wol = (u8)wake; + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_WOL; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&wol), 1, 1); + + return 0; +} + +static int phytmac_enable_promise(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + u8 rxcsum = 0; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) { + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE; + } else { + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_PROMISE; + if (pdata->ndev->features & NETIF_F_RXCSUM) + rxcsum = 1; + } + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxcsum), 1, 1); + + return 0; +} + +static int phytmac_enable_rxcsum(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_RXCSUM; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_txcsum(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_TXCSUM; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_TXCSUM; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_mdio(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_MDIO; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_MDIO; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_autoneg(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_AUTONEG; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_AUTONEG; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + pdata->autoneg = enable; + return 0; +} + +static int phytmac_enable_pause(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_PAUSE; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_network(struct phytmac *pdata, int enable, int rx_tx) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_NETWORK; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_NETWORK; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; + struct phytmac_fdir_info fdir; + u16 cmd_id, cmd_subid; + + memset(&fdir, 0, sizeof(fdir)); + + tp4sp_v = &rx_flow->h_u.tcp_ip4_spec; + tp4sp_m = &rx_flow->m_u.tcp_ip4_spec; + if (tp4sp_m->ip4src == 0xFFFFFFFF) { + fdir.ipsrc_en = true; + fdir.ip4src = tp4sp_v->ip4src; + } + + if (tp4sp_m->ip4dst == 0xFFFFFFFF) { + fdir.ipdst_en = true; + fdir.ip4dst = tp4sp_v->ip4dst; + } + + if (tp4sp_m->psrc == 0xFFFF || tp4sp_m->pdst == 0xFFFF) { + fdir.port_en = true; + fdir.dstport = tp4sp_v->pdst; + fdir.srcport = tp4sp_v->psrc; + fdir.dstport_mask = tp4sp_m->pdst; + fdir.srcport_mask = tp4sp_m->psrc; + } + + fdir.location = rx_flow->location; + fdir.queue = rx_flow->ring_cookie; + + if (fdir.ipsrc_en || fdir.ipdst_en || fdir.port_en) { + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ADD_FDIR; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); + } + + return 0; +} + +static int phytmac_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + struct phytmac_fdir_info fdir; + u16 cmd_id, cmd_subid; + + memset(&fdir, 0, sizeof(fdir)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_DEL_FDIR; + fdir.location = (u8)rx_flow->location; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); + + return 0; +} + +static void phytmac_tx_start(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(queue->index), queue->tx_tail); + queue->tx_xmit_more = 0; +} + +static u32 phytmac_get_irq_mask(u32 mask) +{ + u32 value = 0; + + value |= (mask & PHYTMAC_INT_TX_COMPLETE) ? PHYTMAC_BIT(TXCOMP) : 0; + value |= (mask & PHYTMAC_INT_TX_ERR) ? PHYTMAC_BIT(DMA_ERR) : 0; + value |= (mask & PHYTMAC_INT_RX_COMPLETE) ? PHYTMAC_BIT(RXCOMP) : 0; + value |= (mask & PHYTMAC_INT_RX_OVERRUN) ? PHYTMAC_BIT(RXOVERRUN) : 0; + value |= (mask & PHYTMAC_INT_RX_DESC_FULL) ? PHYTMAC_BIT(RUSED) : 0; + + return value; +} + +static u32 phytmac_get_irq_status(u32 value) +{ + u32 status = 0; + + status |= (value & PHYTMAC_BIT(TXCOMP)) ? PHYTMAC_INT_TX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(DMA_ERR)) ? PHYTMAC_INT_TX_ERR : 0; + status |= (value & PHYTMAC_BIT(RXCOMP)) ? PHYTMAC_INT_RX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(RXOVERRUN)) ? PHYTMAC_INT_RX_OVERRUN : 0; + status |= (value & PHYTMAC_BIT(RUSED)) ? PHYTMAC_INT_RX_DESC_FULL : 0; + + return status; +} + +static void phytmac_enable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_ER(queue_index), value); +} + +static void phytmac_disable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_DR(queue_index), value); +} + +static void phytmac_clear_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_SR(queue_index), value); +} + +static unsigned int phytmac_get_irq(struct phytmac *pdata, int queue_index) +{ + u32 status; + u32 value; + + value = PHYTMAC_READ(pdata, PHYTMAC_INT_SR(queue_index)); + status = phytmac_get_irq_status(value); + + return status; +} + +static void phytmac_interface_config(struct phytmac *pdata, unsigned int mode, + const struct phylink_link_state *state) +{ + struct phytmac_interface_info para; + u16 cmd_id, cmd_subid; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_MAC_CONFIG; + para.interface = state->interface; + para.autoneg = (mode == MLO_AN_FIXED ? 0 : 1); + para.speed = state->speed; + para.duplex = state->duplex; + pdata->autoneg = para.autoneg; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); +} + +static int phytmac_interface_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + struct phytmac_interface_info para; + u16 cmd_id, cmd_subid; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_MAC_LINK_CONFIG; + para.interface = interface; + para.duplex = duplex; + para.speed = speed; + para.autoneg = pdata->autoneg; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_interface_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static int phytmac_pcs_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + struct phytmac_interface_info para; + u16 cmd_id, cmd_subid; + + if (interface == PHY_INTERFACE_MODE_USXGMII || + interface == PHY_INTERFACE_MODE_10GBASER) { + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_PCS_LINK_UP; + para.interface = interface; + para.duplex = duplex; + para.speed = speed; + para.autoneg = 0; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + } + + return 0; +} + +static int phytmac_pcs_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static unsigned int phytmac_pcs_get_link(struct phytmac *pdata, phy_interface_t interface) +{ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_2500BASEX) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_NETWORK_STATUS, LINK); + else if (interface == PHY_INTERFACE_MODE_USXGMII || + interface == PHY_INTERFACE_MODE_10GBASER) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_USX_LINK_STATUS, USX_LINK); + + return 0; +} + +static unsigned int phytmac_tx_map_desc(struct phytmac_queue *queue, + u32 tx_tail, struct packet_info *packet) +{ + unsigned int i, ctrl; + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + struct phytmac_tx_skb *tx_skb; + unsigned int eof = 1; + + i = tx_tail; + + do { + i--; + tx_skb = phytmac_get_tx_skb(queue, i); + desc = phytmac_get_tx_desc(queue, i); + + ctrl = (u32)tx_skb->length; + if (eof) { + ctrl |= PHYTMAC_BIT(TXLAST); + eof = 0; + } + + if (unlikely(i == (pdata->tx_ring_size - 1))) + ctrl |= PHYTMAC_BIT(TXWRAP); + + if (i == queue->tx_tail) { + ctrl |= PHYTMAC_BITS(TXLSO, packet->lso); + ctrl |= PHYTMAC_BITS(TXTCP_SEQ_SRC, packet->seq); + if (packet->nocrc) + ctrl |= PHYTMAC_BIT(TXNOCRC); + } else { + ctrl |= PHYTMAC_BITS(MSSMFS, packet->mss); + } + + desc->desc2 = upper_32_bits(tx_skb->addr); + desc->desc0 = lower_32_bits(tx_skb->addr); + /* make newly desc1 to hardware */ + wmb(); + desc->desc1 = ctrl; + } while (i != queue->tx_tail); + + return 0; +} + +static void phytmac_init_rx_map_desc(struct phytmac_queue *queue, + u32 index) +{ + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + desc->desc1 = 0; + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 |= PHYTMAC_BIT(RXUSED); +} + +static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, u32 index, dma_addr_t addr) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + if (addr) { + if (unlikely(index == (pdata->rx_ring_size - 1))) + addr |= PHYTMAC_BIT(RXWRAP); + desc->desc1 = 0; + desc->desc2 = upper_32_bits(addr); + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 = lower_32_bits(addr); + } else { + desc->desc1 = 0; + /* make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 &= ~PHYTMAC_BIT(RXUSED); + } + + return 0; +} + +static int phytmac_tx_complete(const struct phytmac_dma_desc *desc) +{ + return PHYTMAC_GET_BITS(desc->desc1, TXUSED); +} + +static int phytmac_rx_complete(const struct phytmac_dma_desc *desc) +{ + return PHYTMAC_GET_BITS(desc->desc0, RXUSED); +} + +static int phytmac_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_desc *desc) +{ + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + return desc->desc1 & PHYTMAC_RXJFRMLEN_MASK; + else + return desc->desc1 & PHYTMAC_RXFRMLEN_MASK; +} + +static dma_addr_t phytmac_get_desc_addr(const struct phytmac_dma_desc *desc) +{ + dma_addr_t addr = 0; + + addr = ((u64)(desc->desc2) << 32); + + addr |= (desc->desc0 & 0xfffffffc); + return addr; +} + +static bool phytmac_rx_checksum(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + u32 check = value >> PHYTMAC_RXCSUM_INDEX & 0x3; + + return (check == PHYTMAC_RXCSUM_IP_TCP || check == PHYTMAC_RXCSUM_IP_UDP); +} + +static bool phytmac_rx_single_buffer(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return ((value & PHYTMAC_BIT(RXSOF)) && (value & PHYTMAC_BIT(RXEOF))); +} + +static bool phytmac_rx_sof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RXSOF)); +} + +static bool phytmac_rx_eof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RXEOF)); +} + +static void phytmac_clear_rx_desc(struct phytmac_queue *queue, int begin, int end) +{ + unsigned int frag; + unsigned int tmp = end; + struct phytmac_dma_desc *desc; + + if (begin > end) + tmp = end + queue->pdata->rx_ring_size; + + for (frag = begin; frag != end; frag++) { + desc = phytmac_get_rx_desc(queue, frag); + desc->desc0 &= ~PHYTMAC_BIT(RXUSED); + } +} + +static void phytmac_clear_tx_desc(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc = NULL; + struct phytmac_tx_skb *tx_skb = NULL; + int i; + + for (i = 0; i < queue->pdata->tx_ring_size; i++) { + desc = phytmac_get_tx_desc(queue, i); + tx_skb = phytmac_get_tx_skb(queue, i); + desc->desc2 = upper_32_bits(tx_skb->addr); + desc->desc0 = lower_32_bits(tx_skb->addr); + /* make newly desc to hardware */ + wmb(); + desc->desc1 = PHYTMAC_BIT(TXUSED); + } + desc->desc1 |= PHYTMAC_BIT(TXWRAP); + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(queue->index), queue->tx_tail); +} + +static void phytmac_get_time(struct phytmac *pdata, struct timespec64 *ts) +{ + u32 ns, secl, sech; + + ns = PHYTMAC_READ(pdata, PHYTMAC_TIMER_NSEC); + secl = PHYTMAC_READ(pdata, PHYTMAC_TIMER_SEC); + sech = PHYTMAC_READ(pdata, PHYTMAC_TIMER_MSB_SEC); + + ts->tv_nsec = ns; + ts->tv_sec = (((u64)sech << 32) | secl) & TIMER_SEC_MAX_VAL; +} + +static void phytmac_set_time(struct phytmac *pdata, time64_t sec, long nsec) +{ + u32 secl, sech; + + secl = (u32)sec; + sech = (sec >> 32) & (0xffff); + + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_NSEC, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_MSB_SEC, sech); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_SEC, secl); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_NSEC, nsec); +} + +static void phytmac_clear_time(struct phytmac *pdata) +{ + u32 value; + + pdata->ts_incr.sub_ns = 0; + pdata->ts_incr.ns = 0; + + value = PHYTMAC_READ(pdata, PHYTMAC_TIMER_INCR); + value = PHYTMAC_SET_BITS(value, INCR_NSEC, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR, value); + + value = PHYTMAC_READ(pdata, PHYTMAC_TIMER_INCR_SUB_NSEC); + value = PHYTMAC_SET_BITS(value, INCR_SNSEC, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR_SUB_NSEC, value); + + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_ADJUST, 0); +} + +static int phytmac_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) +{ + u16 cmd_id, cmd_subid; + struct phytmac_ts_config para; + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_TS_CONFIG; + para.tx_mode = ctrl->tx_control; + para.rx_mode = ctrl->rx_control; + para.one_step = ctrl->one_step; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) +{ + u32 value; + + value = PHYTMAC_BITS(INCR_SNSEC, incr->sub_ns); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR_SUB_NSEC, value); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR, incr->ns); + + return 0; +} + +static void phytmac_ptp_init_hw(struct phytmac *pdata) +{ + struct timespec64 ts; + + ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); + phytmac_set_time(pdata, ts.tv_sec, ts.tv_nsec); + + phytmac_set_tsincr(pdata, &pdata->ts_incr); +} + +static int phytmac_adjust_fine(struct phytmac *pdata, long ppm, bool negative) +{ + struct ts_incr ts_incr; + u32 tmp; + u64 adj; + + ts_incr.ns = pdata->ts_incr.ns; + ts_incr.sub_ns = pdata->ts_incr.sub_ns; + + /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */ + tmp = ((u64)ts_incr.ns << PHYTMAC_INCR_SNSECL_INDEX) + ts_incr.sub_ns; + adj = ((u64)ppm * tmp + (USEC_PER_SEC >> 1)) >> PHYTMAC_INCR_SNSECL_INDEX; + + adj = div_u64(adj, USEC_PER_SEC); + adj = negative ? (tmp - adj) : (tmp + adj); + + ts_incr.ns = (adj >> PHYTMAC_INCR_SNSEC_WIDTH) + & ((1 << PHYTMAC_INCR_NSEC_WIDTH) - 1); + ts_incr.sub_ns = adj & ((1 << PHYTMAC_INCR_SNSEC_WIDTH) - 1); + + phytmac_set_tsincr(pdata, &ts_incr); + + return 0; +} + +static int phytmac_adjust_time(struct phytmac *pdata, s64 delta, int neg) +{ + u32 adj; + + if (delta > PHYTMAC_ASEC_MAX) { + struct timespec64 now, then; + + then = ns_to_timespec64(delta); + phytmac_get_time(pdata, &now); + now = timespec64_add(now, then); + phytmac_set_time(pdata, now.tv_sec, now.tv_nsec); + } else { + adj = (neg << PHYTMAC_AADD_INDEX) | delta; + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_ADJUST, adj); + } + + return 0; +} + +static int phytmac_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc, int direction) +{ + int ts_valid = 0; + + if (direction == PHYTMAC_TX) + ts_valid = desc->desc1 & PHYTMAC_BIT(TXTSVALID); + else if (direction == PHYTMAC_RX) + ts_valid = desc->desc0 & PHYTMAC_BIT(RXTSVALID); + return ts_valid; +} + +static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct timespec64 *ts) +{ + struct timespec64 ts2; + + ts->tv_sec = (PHYTMAC_GET_BITS(ts_2, TS_SECH) << 2) | + PHYTMAC_GET_BITS(ts_1, TS_SECL); + ts->tv_nsec = PHYTMAC_GET_BITS(ts_1, TS_NSEC); + + phytmac_get_time(pdata, &ts2); + + if (((ts->tv_sec ^ ts2.tv_sec) & (PHYTMAC_TS_SEC_TOP >> 1)) != 0) + ts->tv_sec -= PHYTMAC_TS_SEC_TOP; + + ts->tv_sec += (ts2.tv_sec & (~PHYTMAC_TS_SEC_MASK)); +} + +static unsigned int phytmac_get_ts_rate(struct phytmac *pdata) +{ + return 300000000; +} + +struct phytmac_hw_if phytmac_2p0_hw = { + .init_msg_ring = phytmac_init_msg_ring, + .reset_hw = phytmac_reset_hw, + .init_hw = phytmac_init_hw, + .init_ring_hw = phytmac_init_ring_hw, + .get_feature = phytmac_get_feature_all, + .get_regs = phytmac_get_regs, + .get_stats = phytmac_get_hw_stats, + .set_mac_address = phytmac_set_mac_addr, + .get_mac_address = phytmac_get_mac_addr, + .mdio_read = phytmac_mdio_data_read_c22, + .mdio_write = phytmac_mdio_data_write_c22, + .mdio_read_c45 = phytmac_mdio_data_read_c45, + .mdio_write_c45 = phytmac_mdio_data_write_c45, + .poweron = phytmac_powerup_hw, + .set_wol = phytmac_set_wake, + .enable_promise = phytmac_enable_promise, + .enable_multicast = phytmac_enable_multicast, + .set_hash_table = phytmac_set_mc_hash, + .enable_rx_csum = phytmac_enable_rxcsum, + .enable_tx_csum = phytmac_enable_txcsum, + .enable_mdio_control = phytmac_enable_mdio, + .enable_autoneg = phytmac_enable_autoneg, + .enable_pause = phytmac_enable_pause, + .enable_network = phytmac_enable_network, + .add_fdir_entry = phytmac_add_fdir_entry, + .del_fdir_entry = phytmac_del_fdir_entry, + + /* mac config */ + .mac_config = phytmac_interface_config, + .mac_linkup = phytmac_interface_linkup, + .mac_linkdown = phytmac_interface_linkdown, + .pcs_linkup = phytmac_pcs_linkup, + .pcs_linkdown = phytmac_pcs_linkdown, + .get_link = phytmac_pcs_get_link, + + /* irq */ + .enable_irq = phytmac_enable_irq, + .disable_irq = phytmac_disable_irq, + .clear_irq = phytmac_clear_irq, + .get_irq = phytmac_get_irq, + + /* tx and rx */ + .tx_map = phytmac_tx_map_desc, + .transmit = phytmac_tx_start, + .tx_complete = phytmac_tx_complete, + .rx_complete = phytmac_rx_complete, + .get_rx_pkt_len = phytmac_rx_pkt_len, + .get_desc_addr = phytmac_get_desc_addr, + .init_rx_map = phytmac_init_rx_map_desc, + .rx_map = phytmac_rx_map_desc, + .rx_checksum = phytmac_rx_checksum, + .rx_single_buffer = phytmac_rx_single_buffer, + .rx_pkt_start = phytmac_rx_sof, + .rx_pkt_end = phytmac_rx_eof, + .clear_rx_desc = phytmac_clear_rx_desc, + .clear_tx_desc = phytmac_clear_tx_desc, + + /* ptp */ + .init_ts_hw = phytmac_ptp_init_hw, + .set_time = phytmac_set_time, + .clear_time = phytmac_clear_time, + .get_time = phytmac_get_time, + .set_ts_config = phytmac_set_tsmode, + .set_incr = phytmac_set_tsincr, + .adjust_fine = phytmac_adjust_fine, + .adjust_time = phytmac_adjust_time, + .ts_valid = phytmac_ts_valid, + .get_timestamp = phytmac_get_dma_ts, + .get_ts_rate = phytmac_get_ts_rate, +}; +EXPORT_SYMBOL_GPL(phytmac_2p0_hw); diff --git a/drivers/net/ethernet/phytium/phytmac_v2.h b/drivers/net/ethernet/phytium/phytmac_v2.h new file mode 100644 index 0000000000000..92e4806ac2c1f --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v2.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _PHYTMAC_V2_H +#define _PHYTMAC_V2_H + +extern struct phytmac_hw_if phytmac_2p0_hw; + +#define PHYTMAC_MSG_SRAM_SIZE 4096 +#define MSG_HDR_LEN 8 + +#define PHYTMAC_TX_MSG_HEAD 0x000 +#define PHYTMAC_TX_MSG_TAIL 0x004 +#define PHYTMAC_RX_MSG_HEAD 0x008 +#define PHYTMAC_RX_MSG_TAIL 0x00c +#define PHYTMAC_MSG_IMR 0x020 +#define PHYTMAC_MSG_ISR 0x02c + +#define PHYTMAC_SIZE 0x0048 +#define PHYTMAC_NETWORK_STATUS 0x0240 +#define PHYTMAC_PCS_AN_LP 0x0244 +#define PHYTMAC_USX_LINK_STATUS 0x0248 +#define PHYTMAC_MDIO 0x0264 +#define PHYTMAC_TIMER_INCR_SUB_NSEC 0x024c +#define PHYTMAC_TIMER_INCR 0x0250 +#define PHYTMAC_TIMER_MSB_SEC 0x0254 +#define PHYTMAC_TIMER_SEC 0x0258 +#define PHYTMAC_TIMER_NSEC 0x025c +#define PHYTMAC_TIMER_ADJUST 0x0260 +#define PHYTMAC_MSG(i) (((i) - 1) * 0x48) + +#define PHYTMAC_MODULE_ID_GMAC 0x60 +#define PHYTMAC_FLAGS_MSG_COMP 0x1 +#define PHYTMAC_FLAGS_MSG_NOINT 0x2 + +/* Bitfields in PHYTMAC_TX_MSG_TAIL */ +#define PHYTMAC_TX_MSG_INT_INDEX 16 +#define PHYTMAC_TX_MSG_INT_WIDTH 1 + +/* Bitfields in PHYTMAC_MSG_ISR */ +#define PHYTMAC_MSG_COMPLETE_INDEX 0 +#define PHYTMAC_MSG_COMPLETE_WIDTH 1 + +/* Bitfields in PHYTMAC_SIZE */ +#define PHYTMAC_MEM_SIZE_INDEX 0 +#define PHYTMAC_MEM_SIZE_WIDTH 4 +#define PHYTMAC_TXRING_SIZE_INDEX 8 +#define PHYTMAC_TXRING_SIZE_WIDTH 6 + +/* Bitfields in PHYTMAC_TIMER_INCR_SUB_NSEC */ +#define PHYTMAC_INCR_SNSECH_INDEX 0 +#define PHYTMAC_INCR_SNSECH_WIDTH 16 +#define PHYTMAC_INCR_SNSECL_INDEX 24 +#define PHYTMAC_INCR_SNSECL_WIDTH 8 +#define PHYTMAC_INCR_SNSEC_WIDTH 24 + +/* Bitfields in PHYTMAC_TIMER_INCR_SUB_NSEC */ +#define PHYTMAC_INCR_SNSEC_INDEX 0 +#define PHYTMAC_INCR_SNSEC_WIDTH 24 + +/* Bitfields in PHYTMAC_TIMER_INCR */ +#define PHYTMAC_INCR_NSEC_INDEX 0 +#define PHYTMAC_INCR_NSEC_WIDTH 8 + +/* Bitfields in PHYTMAC_TIMER_MSB_SEC */ +#define PHYTMAC_TIMER_SECH_INDEX 0 +#define PHYTMAC_TIMER_SECH_WIDTH 16 + +/* Bitfields in PHYTMAC_TIMER_SEC */ +#define PHYTMAC_TIMER_SECL_INDEX 0 +#define PHYTMAC_TIMER_SECL_WIDTH 32 + +/* Bitfields in PHYTMAC_TIMER_NSEC */ +#define PHYTMAC_TIMER_NSEC_INDEX 0 +#define PHYTMAC_TIMER_NSEC_WIDTH 30 + +/* Bitfields in PHYTMAC_TIMER_ADJUST */ +#define PHYTMAC_ASEC_INDEX 0 +#define PHYTMAC_ASEC_WIDTH 30 +#define PHYTMAC_AADD_INDEX 31 +#define PHYTMAC_AADD_WIDTH 1 +#define PHYTMAC_ASEC_MAX ((1 << PHYTMAC_ASEC_WIDTH) - 1) + +#define PHYTMAC_TIMER_SEC_WIDTH (PHYTMAC_TIMER_SECH_WIDTH + PHYTMAC_TIMER_SECL_WIDTH) +#define TIMER_SEC_MAX_VAL (((u64)1 << PHYTMAC_TIMER_SEC_WIDTH) - 1) +#define TIMER_NSEC_MAX_VAL ((1 << PHYTMAC_TIMER_NSEC_WIDTH) - 1) + +#define PHYTMAC_TAIL_PTR(i) (0x0100 + ((i) * 4)) +#define PHYTMAC_INT_ER(i) (0x0140 + ((i) * 4)) +#define PHYTMAC_INT_DR(i) (0x0180 + ((i) * 4)) +#define PHYTMAC_INT_MR(i) (0x01c0 + ((i) * 4)) +#define PHYTMAC_INT_SR(i) (0x0200 + ((i) * 4)) + +#define PHYTMAC_LINK_INDEX 0 /* PCS link status */ +#define PHYTMAC_LINK_WIDTH 1 +#define PHYTMAC_MIDLE_INDEX 2 /* Mdio idle */ +#define PHYTMAC_MIDLE_WIDTH 1 + +/* Int stauts/Enable/Disable/Mask Register */ +#define PHYTMAC_RXCOMP_INDEX 1 /* Rx complete */ +#define PHYTMAC_RXCOMP_WIDTH 1 +#define PHYTMAC_RUSED_INDEX 2 /* Rx used bit read */ +#define PHYTMAC_RUSED_WIDTH 1 +#define PHYTMAC_DMA_ERR_INDEX 6 /* AMBA error */ +#define PHYTMAC_DMA_ERR_WIDTH 1 +#define PHYTMAC_TXCOMP_INDEX 7 /* Tx complete */ +#define PHYTMAC_TXCOMP_WIDTH 1 +#define PHYTMAC_RXOVERRUN_INDEX 10 /* Rx overrun */ +#define PHYTMAC_RXOVERRUN_WIDTH 1 +#define PHYTMAC_RESP_ERR_INDEX 11 /* Resp not ok */ +#define PHYTMAC_RESP_ERR_WIDTH 1 + +/* pcs an lp */ +#define PHYTMAC_AUTO_NEG_INDEX 12 +#define PHYTMAC_AUTO_NEG_WIDTH 1 + +/* Bitfields in USX_STATUS. */ +#define PHYTMAC_USX_LINK_INDEX 0 +#define PHYTMAC_USX_LINK_WIDTH 1 + +/* Mdio read/write Register */ +#define PHYTMAC_VALUE_INDEX 0 /* value */ +#define PHYTMAC_VALUE_WIDTH 16 +#define PHYTMAC_CONST_INDEX 16 /* Must Be 10 */ +#define PHYTMAC_CONST_WIDTH 2 +#define PHYTMAC_REGADDR_INDEX 18 /* Register address */ +#define PHYTMAC_REGADDR_WIDTH 5 +#define PHYTMAC_PHYADDR_INDEX 23 /* Phy address */ +#define PHYTMAC_PHYADDR_WIDTH 5 +#define PHYTMAC_MDCOPS_INDEX 28 +#define PHYTMAC_MDCOPS_WIDTH 2 +#define PHYTMAC_CLAUSESEL_INDEX 30 +#define PHYTMAC_CLAUSESEL_WIDTH 1 +#define PHYTMAC_C22 1 +#define PHYTMAC_C45 0 +#define PHYTMAC_C45_ADDR 0 +#define PHYTMAC_C45_WRITE 1 +#define PHYTMAC_C45_READ 3 +#define PHYTMAC_C22_WRITE 1 +#define PHYTMAC_C22_READ 2 + +/* rx dma desc bit */ +/* DMA descriptor bitfields */ +#define PHYTMAC_RXUSED_INDEX 0 +#define PHYTMAC_RXUSED_WIDTH 1 +#define PHYTMAC_RXWRAP_INDEX 1 +#define PHYTMAC_RXWRAP_WIDTH 1 +#define PHYTMAC_RXTSVALID_INDEX 2 +#define PHYTMAC_RXTSVALID_WIDTH 1 +#define PHYTMAC_RXWADDR_INDEX 2 +#define PHYTMAC_RXWADDR_WIDTH 30 + +#define PHYTMAC_RXFRMLEN_INDEX 0 +#define PHYTMAC_RXFRMLEN_WIDTH 12 +#define PHYTMAC_RXINDEX_INDEX 12 +#define PHYTMAC_RXINDEX_WIDTH 2 +#define PHYTMAC_RXSOF_INDEX 14 +#define PHYTMAC_RXSOF_WIDTH 1 +#define PHYTMAC_RXEOF_INDEX 15 +#define PHYTMAC_RXEOF_WIDTH 1 + +#define PHYTMAC_RXFRMLEN_MASK 0x1FFF +#define PHYTMAC_RXJFRMLEN_MASK 0x3FFF + +#define PHYTMAC_RXTYPEID_MATCH_INDEX 22 +#define PHYTMAC_RXTYPEID_MATCH_WIDTH 2 +#define PHYTMAC_RXCSUM_INDEX 22 +#define PHYTMAC_RXCSUM_WIDTH 2 + +/* Buffer descriptor constants */ +#define PHYTMAC_RXCSUM_NONE 0 +#define PHYTMAC_RXCSUM_IP 1 +#define PHYTMAC_RXCSUM_IP_TCP 2 +#define PHYTMAC_RXCSUM_IP_UDP 3 + +#define PHYTMAC_TXFRMLEN_INDEX 0 +#define PHYTMAC_TXFRMLEN_WIDTH 14 +#define PHYTMAC_TXLAST_INDEX 15 +#define PHYTMAC_TXLAST_WIDTH 1 +#define PHYTMAC_TXNOCRC_INDEX 16 +#define PHYTMAC_TXNOCRC_WIDTH 1 +#define PHYTMAC_MSSMFS_INDEX 16 +#define PHYTMAC_MSSMFS_WIDTH 14 +#define PHYTMAC_TXLSO_INDEX 17 +#define PHYTMAC_TXLSO_WIDTH 2 +#define PHYTMAC_TXTCP_SEQ_SRC_INDEX 19 +#define PHYTMAC_TXTCP_SEQ_SRC_WIDTH 1 +#define PHYTMAC_TXTSVALID_INDEX 23 +#define PHYTMAC_TXTSVALID_WIDTH 1 +#define PHYTMAC_TXWRAP_INDEX 30 +#define PHYTMAC_TXWRAP_WIDTH 1 +#define PHYTMAC_TXUSED_INDEX 31 +#define PHYTMAC_TXUSED_WIDTH 1 + +/* dma ts */ +#define PHYTMAC_TS_NSEC_INDEX 0 +#define PHYTMAC_TS_NSEC_WIDTH 30 +#define PHYTMAC_TS_SECL_INDEX 30 +#define PHYTMAC_TS_SECL_WIDTH 2 +#define PHYTMAC_TS_SECH_INDEX 0 +#define PHYTMAC_TS_SECH_WIDTH 4 +#define PHYTMAC_TS_SEC_MASK 0x3f +#define PHYTMAC_TS_SEC_TOP 0x40 + +#define HW_DMA_CAP_64B 0x1 +#define HW_DMA_CAP_CSUM 0x2 +#define HW_DMA_CAP_PTP 0x4 +#define HW_DMA_CAP_DDW64 0x8 +#define HW_DMA_CAP_DDW128 0x10 + +#define PHYTMAC_DBW64 2 +#define PHYTMAC_DBW128 4 + +enum phytmac_msg_cmd_id { + PHYTMAC_MSG_CMD_DEFAULT = 0, + PHYTMAC_MSG_CMD_SET, + PHYTMAC_MSG_CMD_GET, + PHYTMAC_MSG_CMD_DATA, + PHYTMAC_MSG_CMD_REPORT, +}; + +enum phytmac_default_subid { + PHYTMAC_MSG_CMD_DEFAULT_RESET_HW = 0, + PHYTMAC_MSG_CMD_DEFAULT_RESET_TX_QUEUE, + PHYTMAC_MSG_CMD_DEFAULT_RESET_RX_QUEUE, +}; + +enum phytmac_set_subid { + PHYTMAC_MSG_CMD_SET_INIT_ALL = 0, + PHYTMAC_MSG_CMD_SET_INIT_RING = 1, + PHYTMAC_MSG_CMD_SET_INIT_TX_RING = 2, + PHYTMAC_MSG_CMD_SET_INIT_RX_RING = 3, + PHYTMAC_MSG_CMD_SET_MAC_CONFIG = 4, + PHYTMAC_MSG_CMD_SET_ADDR = 5, + PHYTMAC_MSG_CMD_SET_DMA_RX_BUFSIZE = 6, + PHYTMAC_MSG_CMD_SET_DMA = 7, + PHYTMAC_MSG_CMD_SET_CAPS = 8, + PHYTMAC_MSG_CMD_SET_TS_CONFIG = 9, + PHYTMAC_MSG_CMD_SET_INIT_TX_ENABLE_TRANSMIT = 10, + PHYTMAC_MSG_CMD_SET_INIT_RX_ENABLE_RECEIVE = 11, + PHYTMAC_MSG_CMD_SET_ENABLE_NETWORK = 12, + PHYTMAC_MSG_CMD_SET_DISABLE_NETWORK = 13, + PHYTMAC_MSG_CMD_SET_ENABLE_MDIO = 14, + PHYTMAC_MSG_CMD_SET_DISABLE_MDIO = 15, + PHYTMAC_MSG_CMD_SET_ENABLE_TXCSUM = 16, + PHYTMAC_MSG_CMD_SET_DISABLE_TXCSUM = 17, + PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM = 18, + PHYTMAC_MSG_CMD_SET_DISABLE_RXCSUM = 19, + PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE = 20, + PHYTMAC_MSG_CMD_SET_DISABLE_PROMISE = 21, + PHYTMAC_MSG_CMD_SET_ENABLE_MC = 22, + PHYTMAC_MSG_CMD_SET_DISABLE_MC = 23, + PHYTMAC_MSG_CMD_SET_ENABLE_HASH_MC = 24, + PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE = 25, + PHYTMAC_MSG_CMD_SET_DISABLE_PAUSE = 26, + PHYTMAC_MSG_CMD_SET_ENABLE_JUMBO = 27, + PHYTMAC_MSG_CMD_SET_DISABLE_JUMBO = 28, + PHYTMAC_MSG_CMD_SET_ENABLE_1536_FRAMES = 29, + PHYTMAC_MSG_CMD_SET_ENABLE_STRIPCRC = 30, + PHYTMAC_MSG_CMD_SET_DISABLE_STRIPCRC = 31, + PHYTMAC_MSG_CMD_SET_PCS_LINK_UP = 32, + PHYTMAC_MSG_CMD_SET_PCS_LINK_DOWN = 33, + PHYTMAC_MSG_CMD_SET_MAC_LINK_CONFIG = 34, + PHYTMAC_MSG_CMD_SET_REG_WRITE = 35, + PHYTMAC_MSG_CMD_SET_ENABLE_BC = 36, + PHYTMAC_MSG_CMD_SET_DISABLE_BC = 37, + PHYTMAC_MSG_CMD_SET_ETH_MATCH = 38, + PHYTMAC_MSG_CMD_SET_ADD_FDIR = 39, + PHYTMAC_MSG_CMD_SET_DEL_FDIR = 40, + PHYTMAC_MSG_CMD_SET_ENABLE_AUTONEG = 41, + PHYTMAC_MSG_CMD_SET_DISABLE_AUTONEG = 42, + PHYTMAC_MSG_CMD_SET_RX_DATA_OFFSET = 43, + PHYTMAC_MSG_CMD_SET_WOL = 44, +}; + +enum phytmac_get_subid { + PHYTMAC_MSG_CMD_GET_ADDR, + PHYTMAC_MSG_CMD_GET_QUEUENUMS, + PHYTMAC_MSG_CMD_GET_CAPS, + PHYTMAC_MSG_CMD_GET_BD_PREFETCH, + PHYTMAC_MSG_CMD_GET_STATS, + PHYTMAC_MSG_CMD_GET_REG_READ, +}; + +struct phytmac_interface_info { + u8 interface; + u8 autoneg; + u16 duplex; + u32 speed; +} __packed; + +struct phytmac_mc_info { + u32 mc_bottom; + u32 mc_top; +} __packed; + +struct phytmac_fdir_info { + u32 ip4src; + u32 ip4dst; + u16 srcport; + u16 srcport_mask; + u16 dstport; + u16 dstport_mask; + u8 location; + u8 queue; + u8 ipsrc_en; + u8 ipdst_en; + u8 port_en; +} __packed; + +struct phytmac_ts_config { + u8 tx_mode; + u8 rx_mode; + u8 one_step; +} __packed; + +struct phytmac_ring_info { + u64 addr[4]; + u8 queue_num; + u8 hw_dma_cap; +} __packed; + +struct phytmac_rxbuf_info { + u8 queue_num; + u8 buffer_size; +} __packed; + +struct phytmac_dma_info { + u16 dma_burst_length; + u8 hw_dma_cap; +} __packed; + +struct phytmac_eth_info { + u16 index; + u16 etype; +} __packed; + +struct phytmac_mac { + u32 addrl; + u16 addrh; +} __packed; + +struct phytmac_feature { + u8 irq_read_clear; + u8 dma_data_width; + u8 dma_addr_width; + u8 tx_pkt_buffer; + u8 rx_pkt_buffer; + u8 pbuf_lso; + u8 queue_num; + u8 tx_bd_prefetch; + u8 rx_bd_prefetch; + u8 max_rx_fs; +} __packed; + +struct phytmac_msg_info { + u16 module_id; + u16 cmd_id; + u16 cmd_subid; + u16 flags; + u8 para[64]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c index c13f200fc1b9e..77f67ff0ba3cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c @@ -65,6 +65,21 @@ static int phytium_dwmac_probe(struct platform_device *pdev) return -ENOMEM; plat->phy_interface = phytium_get_mac_mode(fwnode); +#ifdef CONFIG_ACPI + static const struct acpi_device_id phytium_old_acpi_id[] = { + { .id = "FTGM0001" }, // compat FT2000/4 id + { } + }; + /* "phy-mode" in phytium platform DSDT is not correct in some old device. + * Force this PHY mode to rgmii-rxid and info of its use. + * If the phy-mode rgmii is realy used, a blacklist may need to be added. + */ + if (acpi_match_device_ids(to_acpi_device(&pdev->dev), phytium_old_acpi_id) && + plat->phy_interface == PHY_INTERFACE_MODE_RGMII) { + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_RXID; + dev_info(&pdev->dev, "phytium workaround: phy-mode from rgmii to rgmii-rxid\n"); + } +#endif /* Configure PHY if using device-tree */ if (pdev->dev.of_node) { @@ -147,7 +162,15 @@ static int phytium_dwmac_probe(struct platform_device *pdev) plat->dma_cfg->aal = fwnode_property_read_bool(fwnode, "snps,aal"); plat->dma_cfg->fixed_burst = fwnode_property_read_bool(fwnode, "snps,fixed-burst"); plat->dma_cfg->mixed_burst = fwnode_property_read_bool(fwnode, "snps,mixed-burst"); - +#ifdef CONFIG_ACPI + /* Some old phytium 2000/4 FTGM0001 cannot auto deferred stmmac DMA settings + * show kernel error 'DMA descriptors allocation failed' + */ + if (acpi_match_device_ids(to_acpi_device(&pdev->dev), phytium_old_acpi_id)) { + pdev->dev.dma_ops = NULL; // solved set DMA mask Failed + plat->host_dma_width = 32; + } +#endif plat->axi->axi_lpi_en = false; plat->axi->axi_xit_frm = false; plat->axi->axi_wr_osr_lmt = 7; diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 638321fc9800c..4eee24327a146 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -311,4 +311,6 @@ config MARVELL_PEM_PMU Enable support for PCIe Interface performance monitoring on Marvell platform. +source "drivers/perf/phytium/Kconfig" + endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index ea52711a87e32..b2c0f7113e007 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -35,3 +35,4 @@ obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ obj-$(CONFIG_CXL_PMU) += cxl_pmu.o +obj-$(CONFIG_PHYTIUM_PMU) += phytium/ diff --git a/drivers/perf/phytium/Kconfig b/drivers/perf/phytium/Kconfig new file mode 100644 index 0000000000000..e9dabba7227eb --- /dev/null +++ b/drivers/perf/phytium/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig PHYTIUM_PMU + bool "Phytium PMU support" + help + Say Y here if you want to support Phytium performance monitoring + unit (PMU) drivers. + +if PHYTIUM_PMU + +config PHYT_DDR_PMU + tristate "Phytium SoC DDR PMU driver" + depends on (ARCH_PHYTIUM && ACPI) || COMPILE_TEST + default m + help + Provides support for Phytium SoC DDR Controller performance + monitoring unit (PMU). + +config PHYT_PCIE_PMU + tristate "Phytium SoC PCIE PMU driver" + depends on (ARCH_PHYTIUM && ACPI) || COMPILE_TEST + default m + help + Provides support for Phytium SoC PCIe Controller performance + monitoring unit (PMU). + +endif + diff --git a/drivers/perf/phytium/Makefile b/drivers/perf/phytium/Makefile new file mode 100644 index 0000000000000..af37afc6920c5 --- /dev/null +++ b/drivers/perf/phytium/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +# +obj-$(CONFIG_PHYT_DDR_PMU) += phytium_ddr_pmu.o +obj-$(CONFIG_PHYT_PCIE_PMU) += phytium_pcie_pmu.o diff --git a/drivers/perf/phytium/phytium_ddr_pmu.c b/drivers/perf/phytium/phytium_ddr_pmu.c new file mode 100644 index 0000000000000..931acf29a559d --- /dev/null +++ b/drivers/perf/phytium/phytium_ddr_pmu.c @@ -0,0 +1,724 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC DDR performance monitoring unit support + * + * Copyright (c) 2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "phytium_ddr_pmu: " fmt + +#define PHYTIUM_DDR_MAX_COUNTERS 8 + +#define DDR_START_TIMER 0x000 +#define DDR_STOP_TIMER 0x004 +#define DDR_CLEAR_EVENT 0x008 +#define DDR_SET_TIMER_L 0x00c +#define DDR_SET_TIMER_H 0x010 +#define DDR_TRIG_MODE 0x014 +#define DDR_NOW_STATE 0x0e0 +#define DDR_EVENT_CYCLES 0x0e4 +#define DDR_TPOINT_END_L 0x0e4 +#define DDR_TPOINT_END_H 0x0e8 +#define DDR_STATE_STOP 0x0ec +#define DDR_EVENT_RXREQ 0x100 +#define DDR_EVENT_RXDAT 0x104 +#define DDR_EVENT_TXDAT 0x108 +#define DDR_EVENT_RXREQ_RNS 0x10c +#define DDR_EVENT_RXREQ_WNSP 0x110 +#define DDR_EVENT_RXREQ_WNSF 0x114 +#define DDR_EVENT_BANDWIDTH 0x200 +#define DDR_W_DATA_BASE 0x200 +#define DDR_CLK_FRE 0xe00 +#define DDR_DATA_WIDTH 0xe04 + +#define to_phytium_ddr_pmu(p) (container_of(p, struct phytium_ddr_pmu, pmu)) + +static int phytium_ddr_pmu_hp_state; + +struct phytium_ddr_pmu_hwevents { + struct perf_event *hw_events[PHYTIUM_DDR_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, PHYTIUM_DDR_MAX_COUNTERS); +}; + +struct phytium_ddr_pmu { + struct device *dev; + void __iomem *base; + void __iomem *csr_base; + struct pmu pmu; + struct phytium_ddr_pmu_hwevents pmu_events; + u32 die_id; + u32 ddr_id; + u32 pmu_id; + int bit_idx; + int on_cpu; + int irq; + struct hlist_node node; +}; + +#define GET_DDR_EVENTID(hwc) (hwc->config_base & 0x7) +#define EVENT_VALID(idx) ((idx >= 0) && (idx < PHYTIUM_DDR_MAX_COUNTERS)) + +static const u32 ddr_counter_reg_offset[] = { + DDR_EVENT_CYCLES, DDR_EVENT_RXREQ, DDR_EVENT_RXDAT, + DDR_EVENT_TXDAT, DDR_EVENT_RXREQ_RNS, DDR_EVENT_RXREQ_WNSP, + DDR_EVENT_RXREQ_WNSF, DDR_EVENT_BANDWIDTH +}; + +static ssize_t phytium_ddr_pmu_format_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t phytium_ddr_pmu_event_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_ddr_pmu *ddr_pmu = + to_phytium_ddr_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(ddr_pmu->on_cpu)); +} + +#define PHYTIUM_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]){ \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } })[0] \ + .attr.attr) + +#define PHYTIUM_DDR_PMU_FORMAT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_ddr_pmu_format_sysfs_show, \ + (void *)_config) + +#define PHYTIUM_DDR_PMU_EVENT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_ddr_pmu_event_sysfs_show, \ + (unsigned long)_config) + +static struct attribute *phytium_ddr_pmu_format_attr[] = { + PHYTIUM_DDR_PMU_FORMAT_ATTR(event, "config:0-2"), + PHYTIUM_DDR_PMU_FORMAT_ATTR(timer, "config1:0-31"), + NULL, +}; + +static const struct attribute_group phytium_ddr_pmu_format_group = { + .name = "format", + .attrs = phytium_ddr_pmu_format_attr, +}; + +static struct attribute *phytium_ddr_pmu_events_attr[] = { + PHYTIUM_DDR_PMU_EVENT_ATTR(cycles, 0x00), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq, 0x01), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxdat, 0x02), + PHYTIUM_DDR_PMU_EVENT_ATTR(txdat, 0x03), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq_RNS, 0x04), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq_WNSP, 0x05), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq_WNSF, 0x06), + PHYTIUM_DDR_PMU_EVENT_ATTR(bandwidth, 0x07), + NULL, +}; + +static const struct attribute_group phytium_ddr_pmu_events_group = { + .name = "events", + .attrs = phytium_ddr_pmu_events_attr, +}; + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *phytium_ddr_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group phytium_ddr_pmu_cpumask_attr_group = { + .attrs = phytium_ddr_pmu_cpumask_attrs, +}; + +static const struct attribute_group *phytium_ddr_pmu_attr_groups[] = { + &phytium_ddr_pmu_format_group, + &phytium_ddr_pmu_events_group, + &phytium_ddr_pmu_cpumask_attr_group, + NULL, +}; + +static u32 phytium_ddr_pmu_get_event_timer(struct perf_event *event) +{ + return FIELD_GET(GENMASK(31, 0), event->attr.config1); +} + +static u64 phytium_ddr_pmu_read_counter(struct phytium_ddr_pmu *ddr_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = GET_DDR_EVENTID(hwc); + u32 cycle_l, cycle_h, w_data, ddr_data_width; + u64 val64 = 0; + int i; + u32 counter_offset = ddr_counter_reg_offset[idx]; + + if (!EVENT_VALID(idx)) { + dev_err(ddr_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + switch (idx) { + case 0: + cycle_l = readl(ddr_pmu->base + counter_offset); + cycle_h = readl(ddr_pmu->base + counter_offset + 4); + val64 = (u64)cycle_h << 32 | (u64)cycle_l; + break; + case 7: + ddr_data_width = readl(ddr_pmu->base + DDR_DATA_WIDTH); + for (i = 0; i < (ddr_data_width / 8); i++) { + w_data = readl(ddr_pmu->base + counter_offset + 4 * i); + val64 += w_data; + } + break; + default: + val64 = readl(ddr_pmu->base + counter_offset); + break; + } + + return val64; +} + +static void phytium_ddr_pmu_enable_clk(struct phytium_ddr_pmu *ddr_pmu) +{ + u32 val; + + val = readl(ddr_pmu->csr_base); + val |= 0xF; + writel(val, ddr_pmu->csr_base); +} + +static void phytium_ddr_pmu_disable_clk(struct phytium_ddr_pmu *ddr_pmu) +{ + u32 val; + + val = readl(ddr_pmu->csr_base); + val &= ~(0xF); + writel(val, ddr_pmu->csr_base); +} + +static void phytium_ddr_pmu_clear_all_counters(struct phytium_ddr_pmu *ddr_pmu) +{ + writel(0x1, ddr_pmu->base + DDR_CLEAR_EVENT); +} + +static void phytium_ddr_pmu_start_all_counters(struct phytium_ddr_pmu *ddr_pmu) +{ + writel(0x1, ddr_pmu->base + DDR_START_TIMER); +} + +static void phytium_ddr_pmu_stop_all_counters(struct phytium_ddr_pmu *ddr_pmu) +{ + writel(0x1, ddr_pmu->base + DDR_STOP_TIMER); +} + +static void phytium_ddr_pmu_set_timer(struct phytium_ddr_pmu *ddr_pmu, + u32 th_val) +{ + u32 val; + + val = readl(ddr_pmu->base + DDR_SET_TIMER_L); + val = readl(ddr_pmu->base + DDR_SET_TIMER_H); + + writel(th_val, ddr_pmu->base + DDR_SET_TIMER_L); + writel(0, ddr_pmu->base + DDR_SET_TIMER_H); +} + +static void phytium_ddr_pmu_reset_timer(struct phytium_ddr_pmu *ddr_pmu) +{ + u32 val; + + val = readl(ddr_pmu->base + DDR_SET_TIMER_L); + val = readl(ddr_pmu->base + DDR_SET_TIMER_H); + + writel(0xFFFFFFFF, ddr_pmu->base + DDR_SET_TIMER_L); + writel(0xFFFFFFFF, ddr_pmu->base + DDR_SET_TIMER_H); +} + +static unsigned long +phytium_ddr_pmu_get_stop_state(struct phytium_ddr_pmu *ddr_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(ddr_pmu->base + DDR_STATE_STOP); + return val; +} + +static unsigned long +phytium_ddr_pmu_get_irq_flag(struct phytium_ddr_pmu *ddr_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(ddr_pmu->csr_base + 4); + return val; +} + +static int phytium_ddr_pmu_mark_event(struct perf_event *event) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + unsigned long *used_mask = ddr_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + + int idx = GET_DDR_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void phytium_ddr_pmu_unmark_event(struct phytium_ddr_pmu *ddr_pmu, + int idx) +{ + if (!EVENT_VALID(idx)) { + dev_err(ddr_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, ddr_pmu->pmu_events.used_mask); +} + +static int phytium_ddr_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct phytium_ddr_pmu *ddr_pmu; + u32 event_timer; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + ddr_pmu = to_phytium_ddr_pmu(event->pmu); + + if (event->cpu < 0) { + dev_warn(ddr_pmu->dev, "Can't provide per-task data!\n"); + return -EINVAL; + } + + if (event->attr.config > PHYTIUM_DDR_MAX_COUNTERS) + return -EINVAL; + + if (ddr_pmu->on_cpu == -1) + return -EINVAL; + + event_timer = phytium_ddr_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_ddr_pmu_set_timer(ddr_pmu, event_timer); + + hwc->idx = -1; + hwc->config_base = event->attr.config; + + event->cpu = ddr_pmu->on_cpu; + + return 0; +} + +static void phytium_ddr_pmu_event_update(struct perf_event *event) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta; + + delta = phytium_ddr_pmu_read_counter(ddr_pmu, hwc); + local64_add(delta, &event->count); +} + +static void phytium_ddr_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state = 0; + perf_event_update_userpage(event); +} + +static void phytium_ddr_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_UPDATE) + phytium_ddr_pmu_event_update(event); +} + +static int phytium_ddr_pmu_event_add(struct perf_event *event, int flags) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state |= PERF_HES_STOPPED; + + idx = phytium_ddr_pmu_mark_event(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + ddr_pmu->pmu_events.hw_events[idx] = event; + + return 0; +} + +static void phytium_ddr_pmu_event_del(struct perf_event *event, int flags) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long val; + u32 event_timer; + + phytium_ddr_pmu_event_stop(event, PERF_EF_UPDATE); + val = phytium_ddr_pmu_get_irq_flag(ddr_pmu); + val = phytium_ddr_pmu_get_stop_state(ddr_pmu); + phytium_ddr_pmu_unmark_event(ddr_pmu, hwc->idx); + + event_timer = phytium_ddr_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_ddr_pmu_reset_timer(ddr_pmu); + + perf_event_update_userpage(event); + ddr_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} + +static void phytium_ddr_pmu_enable(struct pmu *pmu) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(pmu); + int event_added = bitmap_weight(ddr_pmu->pmu_events.used_mask, + PHYTIUM_DDR_MAX_COUNTERS); + + if (event_added) { + phytium_ddr_pmu_clear_all_counters(ddr_pmu); + phytium_ddr_pmu_start_all_counters(ddr_pmu); + } +} + +static void phytium_ddr_pmu_disable(struct pmu *pmu) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(pmu); + int event_added = bitmap_weight(ddr_pmu->pmu_events.used_mask, + PHYTIUM_DDR_MAX_COUNTERS); + + if (event_added) + phytium_ddr_pmu_stop_all_counters(ddr_pmu); +} + +static const struct acpi_device_id phytium_ddr_pmu_acpi_match[] = { + { + "PHYT0043", + }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, phytium_ddr_pmu_acpi_match); + +static irqreturn_t phytium_ddr_pmu_overflow_handler(int irq, void *dev_id) +{ + struct phytium_ddr_pmu *ddr_pmu = dev_id; + struct perf_event *event; + unsigned long overflown, stop_state; + int idx; + unsigned long *used_mask = ddr_pmu->pmu_events.used_mask; + + int event_added = bitmap_weight(used_mask, PHYTIUM_DDR_MAX_COUNTERS); + + overflown = phytium_ddr_pmu_get_irq_flag(ddr_pmu); + + if (!test_bit(ddr_pmu->bit_idx, &overflown)) + return IRQ_NONE; + + stop_state = phytium_ddr_pmu_get_stop_state(ddr_pmu); + + if (bitmap_weight(&stop_state, 6)) { + for_each_set_bit(idx, used_mask, PHYTIUM_DDR_MAX_COUNTERS) { + event = ddr_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + phytium_ddr_pmu_event_update(event); + } + phytium_ddr_pmu_clear_all_counters(ddr_pmu); + phytium_ddr_pmu_start_all_counters(ddr_pmu); + + return IRQ_HANDLED; + } + + if (!event_added) { + phytium_ddr_pmu_clear_all_counters(ddr_pmu); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int phytium_ddr_pmu_init_irq(struct phytium_ddr_pmu *ddr_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, + phytium_ddr_pmu_overflow_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), ddr_pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Fail to request IRQ:%d ret:%d\n", irq, + ret); + return ret; + } + + ddr_pmu->irq = irq; + + return 0; +} + +static int phytium_ddr_pmu_init_data(struct platform_device *pdev, + struct phytium_ddr_pmu *ddr_pmu) +{ + struct resource *res, *clkres; + + if (device_property_read_u32(&pdev->dev, "phytium,die-id", + &ddr_pmu->die_id)) { + dev_err(&pdev->dev, "Can not read phytium,die-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "phytium,ddr-id", + &ddr_pmu->ddr_id)) { + dev_err(&pdev->dev, "Can not read phytium,ddr-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "phytium,pmu-id", + &ddr_pmu->pmu_id)) { + dev_err(&pdev->dev, "Can not read ddr pmu-id!\n"); + return -EINVAL; + } + + ddr_pmu->bit_idx = ddr_pmu->ddr_id * 2 + ddr_pmu->pmu_id; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ddr_pmu->base = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(ddr_pmu->base)) { + dev_err(&pdev->dev, + "ioremap failed for ddr_pmu base resource\n"); + return PTR_ERR(ddr_pmu->base); + } + + clkres = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!clkres) { + dev_err(&pdev->dev, "failed for get ddr_pmu clk resource.\n"); + return -EINVAL; + } + + ddr_pmu->csr_base = + devm_ioremap(&pdev->dev, clkres->start, resource_size(clkres)); + if (IS_ERR(ddr_pmu->csr_base)) { + dev_err(&pdev->dev, + "ioremap failed for ddr_pmu csr resource\n"); + return PTR_ERR(ddr_pmu->csr_base); + } + + return 0; +} + +static int phytium_ddr_pmu_dev_probe(struct platform_device *pdev, + struct phytium_ddr_pmu *ddr_pmu) +{ + int ret; + + ret = phytium_ddr_pmu_init_data(pdev, ddr_pmu); + if (ret) + return ret; + + ret = phytium_ddr_pmu_init_irq(ddr_pmu, pdev); + if (ret) + return ret; + + ddr_pmu->dev = &pdev->dev; + ddr_pmu->on_cpu = raw_smp_processor_id(); + WARN_ON(irq_set_affinity(ddr_pmu->irq, cpumask_of(ddr_pmu->on_cpu))); + + return 0; +} + +static int phytium_ddr_pmu_probe(struct platform_device *pdev) +{ + struct phytium_ddr_pmu *ddr_pmu; + char *name; + int ret; + + ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL); + if (!ddr_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, ddr_pmu); + + ret = phytium_ddr_pmu_dev_probe(pdev, ddr_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance_nocalls( + phytium_ddr_pmu_hp_state, &ddr_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "phyt%u_ddr%u_pmu%u", + ddr_pmu->die_id, ddr_pmu->ddr_id, + ddr_pmu->pmu_id); + ddr_pmu->pmu = (struct pmu){ + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = phytium_ddr_pmu_event_init, + .pmu_enable = phytium_ddr_pmu_enable, + .pmu_disable = phytium_ddr_pmu_disable, + .add = phytium_ddr_pmu_event_add, + .del = phytium_ddr_pmu_event_del, + .start = phytium_ddr_pmu_event_start, + .stop = phytium_ddr_pmu_event_stop, + .read = phytium_ddr_pmu_event_update, + .attr_groups = phytium_ddr_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); + if (ret) { + dev_err(ddr_pmu->dev, "DDR PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + phytium_ddr_pmu_hp_state, + &ddr_pmu->node); + } + + phytium_ddr_pmu_enable_clk(ddr_pmu); + + pr_info("Phytium DDR PMU: "); + pr_info(" die_id = %d ddr_id = %d pmu_id = %d.\n", ddr_pmu->die_id, + ddr_pmu->ddr_id, ddr_pmu->pmu_id); + + return ret; +} + +static void phytium_ddr_pmu_remove(struct platform_device *pdev) +{ + struct phytium_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev); + + phytium_ddr_pmu_disable_clk(ddr_pmu); + + perf_pmu_unregister(&ddr_pmu->pmu); + cpuhp_state_remove_instance_nocalls( + phytium_ddr_pmu_hp_state, &ddr_pmu->node); +} + +static struct platform_driver phytium_ddr_pmu_driver = { + .driver = { + .name = "phytium_ddr_pmu", + .acpi_match_table = ACPI_PTR(phytium_ddr_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = phytium_ddr_pmu_probe, + .remove = phytium_ddr_pmu_remove, +}; + +static int phytium_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct phytium_ddr_pmu *ddr_pmu = + hlist_entry_safe(node, struct phytium_ddr_pmu, node); + unsigned int target; + cpumask_t available_cpus; + + if (ddr_pmu->on_cpu != cpu) + return 0; + + cpumask_and(&available_cpus, + cpumask_of_node(ddr_pmu->die_id), cpu_online_mask); + target = cpumask_any_but(&available_cpus, cpu); + if (target >= nr_cpu_ids) { + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + } + + perf_pmu_migrate_context(&ddr_pmu->pmu, cpu, target); + WARN_ON(irq_set_affinity(ddr_pmu->irq, cpumask_of(target))); + ddr_pmu->on_cpu = target; + + return 0; +} + +static int __init phytium_ddr_pmu_module_init(void) +{ + int ret; + + phytium_ddr_pmu_hp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/phytium/ddrpmu:offline", + NULL, phytium_ddr_pmu_offline_cpu); + if (phytium_ddr_pmu_hp_state < 0) { + pr_err("DDR PMU: setup hotplug, phytium_ddr_pmu_hp_state = %d\n", + phytium_ddr_pmu_hp_state); + return phytium_ddr_pmu_hp_state; + } + + ret = platform_driver_register(&phytium_ddr_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + phytium_ddr_pmu_hp_state); + + return ret; +} +module_init(phytium_ddr_pmu_module_init); + +static void __exit phytium_ddr_pmu_module_exit(void) +{ + platform_driver_unregister(&phytium_ddr_pmu_driver); + cpuhp_remove_multi_state(phytium_ddr_pmu_hp_state); +} +module_exit(phytium_ddr_pmu_module_exit); + +MODULE_DESCRIPTION("Phytium DDR PMU driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hu Xianghua "); diff --git a/drivers/perf/phytium/phytium_pcie_pmu.c b/drivers/perf/phytium/phytium_pcie_pmu.c new file mode 100644 index 0000000000000..929518402c227 --- /dev/null +++ b/drivers/perf/phytium/phytium_pcie_pmu.c @@ -0,0 +1,871 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Soc PCIe performance monitoring unit support + * + * Copyright (c) 2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "phytium_pcie_pmu: " fmt + +#define PHYTIUM_PCIE_MAX_COUNTERS 18 + +#define PCIE_START_TIMER 0x000 +#define PCIE_STOP_TIMER 0x004 +#define PCIE_CLEAR_EVENT 0x008 +#define PCIE_SET_TIMER_L 0x00c +#define PCIE_SET_TIMER_H 0x010 +#define PCIE_TRIG_MODE 0x014 + +#define PCIE_NOW_STATE 0x0e0 +#define PCIE_EVENT_CYCLES 0x0e4 +#define PCIE_TPOINT_END_L 0x0e4 +#define PCIE_TPOINT_END_H 0x0e8 +#define PCIE_STATE_STOP 0x0ec + +#define PCIE_EVENT_AW 0x100 +#define PCIE_EVENT_W_LAST 0x104 +#define PCIE_EVENT_B 0x108 +#define PCIE_EVENT_AR 0x10c +#define PCIE_EVENT_R_LAST 0x110 +#define PCIE_EVENT_R_FULL 0x114 +#define PCIE_EVENT_R_ERR 0x118 +#define PCIE_EVENT_W_ERR 0x11c +#define PCIE_EVENT_DELAY_RD 0x120 +#define PCIE_EVENT_DELAY_WR 0x124 +#define PCIE_EVENT_RD_MAX 0x128 +#define PCIE_EVENT_RD_MIN 0x12c +#define PCIE_EVENT_WR_MAX 0x130 +#define PCIE_EVENT_WR_MIN 0x134 + +#define PCIE_EVENT_W_DATA 0x200 +#define PCIE_W_DATA_BASE 0x200 + +#define PCIE_EVENT_RDELAY_TIME 0x300 +#define PCIE_RDELAY_TIME_BASE 0x300 + +#define PCIE_EVENT_WDELAY_TIME 0x700 +#define PCIE_WDELAY_TIME_BASE 0x700 + +#define PCIE_CLK_FRE 0xe00 +#define PCIE_DATA_WIDTH 0xe04 + +#define to_phytium_pcie_pmu(p) (container_of(p, struct phytium_pcie_pmu, pmu)) + +static int phytium_pcie_pmu_hp_state; + +struct phytium_pcie_pmu_hwevents { + struct perf_event *hw_events[PHYTIUM_PCIE_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, PHYTIUM_PCIE_MAX_COUNTERS); +}; + +struct phytium_pcie_pmu { + struct device *dev; + void __iomem *base; + void __iomem *csr_base; + void __iomem *irq_reg; + struct pmu pmu; + struct phytium_pcie_pmu_hwevents pmu_events; + u32 die_id; + u32 pmu_id; + int on_cpu; + int irq; + struct hlist_node node; + int ctrler_id; + int real_ctrler; + u32 clk_bits; +}; + +#define GET_PCIE_EVENTID(hwc) (hwc->config_base & 0x1F) + +#define EVENT_VALID(idx) ((idx >= 0) && (idx < PHYTIUM_PCIE_MAX_COUNTERS)) + +static const u32 pcie_counter_reg_offset[] = { + PCIE_EVENT_CYCLES, PCIE_EVENT_AW, PCIE_EVENT_W_LAST, + PCIE_EVENT_B, PCIE_EVENT_AR, PCIE_EVENT_R_LAST, + PCIE_EVENT_R_FULL, PCIE_EVENT_R_ERR, PCIE_EVENT_W_ERR, + PCIE_EVENT_DELAY_RD, PCIE_EVENT_DELAY_WR, PCIE_EVENT_RD_MAX, + PCIE_EVENT_RD_MIN, PCIE_EVENT_WR_MAX, PCIE_EVENT_WR_MIN, + PCIE_EVENT_W_DATA, PCIE_EVENT_RDELAY_TIME, PCIE_EVENT_WDELAY_TIME +}; + +static ssize_t phytium_pcie_pmu_format_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t phytium_pcie_pmu_event_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_pcie_pmu *pcie_pmu = + to_phytium_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} + +#define PHYTIUM_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]){ \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } })[0] \ + .attr.attr) + +#define PHYTIUM_PCIE_PMU_FORMAT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_pcie_pmu_format_sysfs_show, \ + (void *)_config) + +#define PHYTIUM_PCIE_PMU_EVENT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_pcie_pmu_event_sysfs_show, \ + (unsigned long)_config) + +static struct attribute *phytium_pcie_pmu_format_attr[] = { + PHYTIUM_PCIE_PMU_FORMAT_ATTR(event, "config:0-4"), + PHYTIUM_PCIE_PMU_FORMAT_ATTR(ctrler, "config:8-10"), + PHYTIUM_PCIE_PMU_FORMAT_ATTR(timer, "config1:0-31"), + NULL, +}; + +static const struct attribute_group phytium_pcie_pmu_format_group = { + .name = "format", + .attrs = phytium_pcie_pmu_format_attr, +}; + +static struct attribute *phytium_pcie_pmu_events_attr[] = { + PHYTIUM_PCIE_PMU_EVENT_ATTR(cycles, 0x00), + PHYTIUM_PCIE_PMU_EVENT_ATTR(aw, 0x01), + PHYTIUM_PCIE_PMU_EVENT_ATTR(w_last, 0x02), + PHYTIUM_PCIE_PMU_EVENT_ATTR(b, 0x03), + PHYTIUM_PCIE_PMU_EVENT_ATTR(ar, 0x04), + PHYTIUM_PCIE_PMU_EVENT_ATTR(r_last, 0x05), + PHYTIUM_PCIE_PMU_EVENT_ATTR(r_full, 0x06), + PHYTIUM_PCIE_PMU_EVENT_ATTR(r_err, 0x07), + PHYTIUM_PCIE_PMU_EVENT_ATTR(w_err, 0x08), + PHYTIUM_PCIE_PMU_EVENT_ATTR(delay_rd, 0x09), + PHYTIUM_PCIE_PMU_EVENT_ATTR(delay_wr, 0x0a), + PHYTIUM_PCIE_PMU_EVENT_ATTR(rd_max, 0x0b), + PHYTIUM_PCIE_PMU_EVENT_ATTR(rd_min, 0x0c), + PHYTIUM_PCIE_PMU_EVENT_ATTR(wr_max, 0x0d), + PHYTIUM_PCIE_PMU_EVENT_ATTR(wr_min, 0x0e), + PHYTIUM_PCIE_PMU_EVENT_ATTR(w_data, 0x0f), + PHYTIUM_PCIE_PMU_EVENT_ATTR(rdelay_time, 0x10), + PHYTIUM_PCIE_PMU_EVENT_ATTR(wdelay_time, 0x11), + NULL, +}; + +static const struct attribute_group phytium_pcie_pmu_events_group = { + .name = "events", + .attrs = phytium_pcie_pmu_events_attr, +}; + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *phytium_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group phytium_pcie_pmu_cpumask_attr_group = { + .attrs = phytium_pcie_pmu_cpumask_attrs, +}; + +static const struct attribute_group *phytium_pcie_pmu_attr_groups[] = { + &phytium_pcie_pmu_format_group, + &phytium_pcie_pmu_events_group, + &phytium_pcie_pmu_cpumask_attr_group, + NULL, +}; + +static u32 phytium_pcie_pmu_get_event_ctrler(struct perf_event *event) +{ + return FIELD_GET(GENMASK(10, 8), event->attr.config); +} + +static u32 phytium_pcie_pmu_get_event_timer(struct perf_event *event) +{ + return FIELD_GET(GENMASK(31, 0), event->attr.config1); +} + +static u64 phytium_pcie_pmu_read_counter(struct phytium_pcie_pmu *pcie_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = GET_PCIE_EVENTID(hwc); + u32 cycle_l, cycle_h, rdelay_l, rdelay_h, w_data, wdelay_l, wdelay_h, + pcie_data_width; + u64 val64 = 0; + int i; + u32 counter_offset = pcie_counter_reg_offset[idx]; + + if (!EVENT_VALID(idx)) { + dev_err(pcie_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + switch (idx) { + case 0: + cycle_l = readl(pcie_pmu->base + counter_offset); + cycle_h = readl(pcie_pmu->base + counter_offset + 4); + val64 = (u64)cycle_h << 32 | (u64)cycle_l; + break; + case 15: + pcie_data_width = readl(pcie_pmu->base + PCIE_DATA_WIDTH); + for (i = 0; i < (pcie_data_width / 8); i++) { + w_data = readl(pcie_pmu->base + counter_offset + 4 * i); + val64 += w_data; + } + break; + case 16: + for (i = 0; i <= 127; i = i + 2) { + rdelay_l = + readl(pcie_pmu->base + counter_offset + 4 * i); + rdelay_h = readl(pcie_pmu->base + counter_offset + + 4 * i + 4); + val64 += (u64)rdelay_h << 32 | (u64)rdelay_l; + } + break; + case 17: + for (i = 0; i <= 63; i++) { + wdelay_l = + readl(pcie_pmu->base + counter_offset + 4 * i); + wdelay_h = readl(pcie_pmu->base + counter_offset + + 4 * i + 4); + val64 += (u64)wdelay_h << 32 | (u64)wdelay_l; + } + break; + default: + val64 = readl(pcie_pmu->base + counter_offset); + break; + } + return val64; +} + +static void phytium_pcie_pmu_enable_clk(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val; + + val = readl(pcie_pmu->csr_base); + val |= (pcie_pmu->clk_bits); + writel(val, pcie_pmu->csr_base); +} + +static void phytium_pcie_pmu_disable_clk(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val; + + val = readl(pcie_pmu->csr_base); + val &= ~(pcie_pmu->clk_bits); + writel(val, pcie_pmu->csr_base); +} + +static void phytium_pcie_pmu_select_ctrler(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val, offset = 0; + + if (pcie_pmu->pmu_id != 2) + offset = 0xc; + + val = readl(pcie_pmu->csr_base + offset); + + if (pcie_pmu->pmu_id == 2) { + val &= 0xffffffcf; + val |= pcie_pmu->real_ctrler; + } else { + val &= 0xfffffffc; + val |= pcie_pmu->real_ctrler; + } + + writel(val, pcie_pmu->csr_base + offset); +} + +static void +phytium_pcie_pmu_clear_all_counters(struct phytium_pcie_pmu *pcie_pmu) +{ + writel(0x1, pcie_pmu->base + PCIE_CLEAR_EVENT); +} + +static void +phytium_pcie_pmu_start_all_counters(struct phytium_pcie_pmu *pcie_pmu) +{ + writel(0x1, pcie_pmu->base + PCIE_START_TIMER); +} + +static void +phytium_pcie_pmu_stop_all_counters(struct phytium_pcie_pmu *pcie_pmu) +{ + writel(0x1, pcie_pmu->base + PCIE_STOP_TIMER); +} + +static void phytium_pcie_pmu_set_timer(struct phytium_pcie_pmu *pcie_pmu, + u32 th_val) +{ + u32 val; + + val = readl(pcie_pmu->base + PCIE_SET_TIMER_L); + val = readl(pcie_pmu->base + PCIE_SET_TIMER_H); + + writel(th_val, pcie_pmu->base + PCIE_SET_TIMER_L); + writel(0, pcie_pmu->base + PCIE_SET_TIMER_H); +} + +static void phytium_pcie_pmu_reset_timer(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val; + + val = readl(pcie_pmu->base + PCIE_SET_TIMER_L); + val = readl(pcie_pmu->base + PCIE_SET_TIMER_H); + + writel(0xFFFFFFFF, pcie_pmu->base + PCIE_SET_TIMER_L); + writel(0xFFFFFFFF, pcie_pmu->base + PCIE_SET_TIMER_H); +} + +static unsigned long +phytium_pcie_pmu_get_stop_state(struct phytium_pcie_pmu *pcie_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(pcie_pmu->base + PCIE_STATE_STOP); + return val; +} + +static unsigned long +phytium_pcie_pmu_get_irq_flag(struct phytium_pcie_pmu *pcie_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(pcie_pmu->irq_reg); + return val; +} + +static int phytium_pcie_pmu_mark_event(struct perf_event *event) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + unsigned long *used_mask = pcie_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + + int idx = GET_PCIE_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void phytium_pcie_pmu_unmark_event(struct phytium_pcie_pmu *pcie_pmu, + int idx) +{ + if (!EVENT_VALID(idx)) { + dev_err(pcie_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, pcie_pmu->pmu_events.used_mask); +} + +static int phytium_pcie_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct phytium_pcie_pmu *pcie_pmu; + u32 event_ctrler, event_timer; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + pcie_pmu = to_phytium_pcie_pmu(event->pmu); + + if (event->cpu < 0) { + dev_warn(pcie_pmu->dev, "Can't provide per-task data!\n"); + return -EINVAL; + } + + if ((event->attr.config & 0x1F) > PHYTIUM_PCIE_MAX_COUNTERS) + return -EINVAL; + + if (pcie_pmu->on_cpu == -1) + return -EINVAL; + + event_timer = phytium_pcie_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_pcie_pmu_set_timer(pcie_pmu, event_timer); + + event_ctrler = phytium_pcie_pmu_get_event_ctrler(event); + switch (pcie_pmu->pmu_id) { + case 0: + if (event_ctrler != 0) { + dev_warn(pcie_pmu->dev, + "Wrong ctrler id(%d) for pcie-pmu0!\n", + event_ctrler); + return -EINVAL; + } + break; + case 1: + if ((event_ctrler < 1) || (event_ctrler > 3)) { + dev_warn(pcie_pmu->dev, + "Wrong ctrler id(%d) for pcie-pmu1!\n", + event_ctrler); + return -EINVAL; + } + break; + case 2: + if ((event_ctrler < 4) || (event_ctrler > 7)) { + dev_warn(pcie_pmu->dev, + "Wrong ctrler id(%d) for pcie-pmu2!\n", + event_ctrler); + return -EINVAL; + } + break; + default: + dev_err(pcie_pmu->dev, "Unsupported pmu id:%d!\n", + pcie_pmu->pmu_id); + return -EINVAL; + } + + pcie_pmu->ctrler_id = event_ctrler; + switch (pcie_pmu->pmu_id) { + case 0: + case 1: + pcie_pmu->real_ctrler = pcie_pmu->ctrler_id; + break; + case 2: + pcie_pmu->real_ctrler = (pcie_pmu->ctrler_id - 4) * 16; + break; + default: + dev_err(pcie_pmu->dev, "Unsupported pmu id:%d!\n", + pcie_pmu->pmu_id); + return -EINVAL; + } + phytium_pcie_pmu_select_ctrler(pcie_pmu); + + hwc->idx = -1; + hwc->config_base = event->attr.config; + + event->cpu = pcie_pmu->on_cpu; + return 0; +} + +static void phytium_pcie_pmu_event_update(struct perf_event *event) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta; + + delta = phytium_pcie_pmu_read_counter(pcie_pmu, hwc); + local64_add(delta, &event->count); +} + +static void phytium_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state = 0; + perf_event_update_userpage(event); +} + +static void phytium_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_UPDATE) + phytium_pcie_pmu_event_update(event); +} + +static int phytium_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state |= PERF_HES_STOPPED; + + idx = phytium_pcie_pmu_mark_event(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + pcie_pmu->pmu_events.hw_events[idx] = event; + + return 0; +} + +static void phytium_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long val; + u32 event_timer; + + phytium_pcie_pmu_event_stop(event, PERF_EF_UPDATE); + val = phytium_pcie_pmu_get_irq_flag(pcie_pmu); + val = phytium_pcie_pmu_get_stop_state(pcie_pmu); + phytium_pcie_pmu_unmark_event(pcie_pmu, hwc->idx); + + event_timer = phytium_pcie_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_pcie_pmu_reset_timer(pcie_pmu); + + perf_event_update_userpage(event); + pcie_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} + +static void phytium_pcie_pmu_enable(struct pmu *pmu) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(pmu); + int event_added = bitmap_weight(pcie_pmu->pmu_events.used_mask, + PHYTIUM_PCIE_MAX_COUNTERS); + + if (event_added) { + phytium_pcie_pmu_clear_all_counters(pcie_pmu); + phytium_pcie_pmu_start_all_counters(pcie_pmu); + } +} + +static void phytium_pcie_pmu_disable(struct pmu *pmu) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(pmu); + int event_added = bitmap_weight(pcie_pmu->pmu_events.used_mask, + PHYTIUM_PCIE_MAX_COUNTERS); + + if (event_added) + phytium_pcie_pmu_stop_all_counters(pcie_pmu); +} + +static const struct acpi_device_id phytium_pcie_pmu_acpi_match[] = { + { + "PHYT0044", + }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, phytium_pcie_pmu_acpi_match); + +static irqreturn_t phytium_pcie_pmu_overflow_handler(int irq, void *dev_id) +{ + struct phytium_pcie_pmu *pcie_pmu = dev_id; + struct perf_event *event; + unsigned long overflown, stop_state; + int idx; + unsigned long *used_mask = pcie_pmu->pmu_events.used_mask; + int event_added = bitmap_weight(used_mask, PHYTIUM_PCIE_MAX_COUNTERS); + + overflown = phytium_pcie_pmu_get_irq_flag(pcie_pmu); + + if (!test_bit(pcie_pmu->pmu_id + 4, &overflown)) + return IRQ_NONE; + + stop_state = phytium_pcie_pmu_get_stop_state(pcie_pmu); + + if (bitmap_weight(&stop_state, 6)) { + for_each_set_bit(idx, used_mask, PHYTIUM_PCIE_MAX_COUNTERS) { + event = pcie_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + phytium_pcie_pmu_event_update(event); + } + phytium_pcie_pmu_clear_all_counters(pcie_pmu); + phytium_pcie_pmu_start_all_counters(pcie_pmu); + + return IRQ_HANDLED; + } + if (!event_added) { + phytium_pcie_pmu_clear_all_counters(pcie_pmu); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static int phytium_pcie_pmu_init_irq(struct phytium_pcie_pmu *pcie_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, + phytium_pcie_pmu_overflow_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), pcie_pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Fail to request IRQ:%d ret:%d\n", irq, + ret); + return ret; + } + + pcie_pmu->irq = irq; + + return 0; +} + +static int phytium_pcie_pmu_init_data(struct platform_device *pdev, + struct phytium_pcie_pmu *pcie_pmu) +{ + struct resource *res, *clkres, *irqres; + + if (device_property_read_u32(&pdev->dev, "phytium,die-id", + &pcie_pmu->die_id)) { + dev_err(&pdev->dev, "Can not read phytium,die-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "phytium,pmu-id", + &pcie_pmu->pmu_id)) { + dev_err(&pdev->dev, "Can not read phytium,pmu-id!\n"); + return -EINVAL; + } + + switch (pcie_pmu->pmu_id) { + case 0: + pcie_pmu->clk_bits = 0x1; + break; + case 1: + pcie_pmu->clk_bits = 0xe; + break; + case 2: + pcie_pmu->clk_bits = 0xf; + break; + default: + dev_err(&pdev->dev, "Unsupported pmu id:%d!\n", + pcie_pmu->pmu_id); + break; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pcie_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for pcie_pmu resource\n"); + return PTR_ERR(pcie_pmu->base); + } + + clkres = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!clkres) { + dev_err(&pdev->dev, "failed for get pcie_pmu clk resource.\n"); + return -EINVAL; + } + + pcie_pmu->csr_base = + devm_ioremap(&pdev->dev, clkres->start, resource_size(clkres)); + if (IS_ERR(pcie_pmu->csr_base)) { + dev_err(&pdev->dev, + "ioremap failed for pcie_pmu csr resource\n"); + return PTR_ERR(pcie_pmu->csr_base); + } + + irqres = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!irqres) { + dev_err(&pdev->dev, + "failed for get pcie_pmu irq reg resource.\n"); + return -EINVAL; + } + + pcie_pmu->irq_reg = + devm_ioremap(&pdev->dev, irqres->start, resource_size(irqres)); + if (IS_ERR(pcie_pmu->irq_reg)) { + dev_err(&pdev->dev, + "ioremap failed for pcie_pmu irq resource\n"); + return PTR_ERR(pcie_pmu->irq_reg); + } + + return 0; +} + +static int phytium_pcie_pmu_dev_probe(struct platform_device *pdev, + struct phytium_pcie_pmu *pcie_pmu) +{ + int ret; + + ret = phytium_pcie_pmu_init_data(pdev, pcie_pmu); + if (ret) + return ret; + + ret = phytium_pcie_pmu_init_irq(pcie_pmu, pdev); + if (ret) + return ret; + pcie_pmu->dev = &pdev->dev; + pcie_pmu->on_cpu = raw_smp_processor_id(); + WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(pcie_pmu->on_cpu))); + pcie_pmu->ctrler_id = -1; + + return 0; +} + +static int phytium_pcie_pmu_probe(struct platform_device *pdev) +{ + struct phytium_pcie_pmu *pcie_pmu; + char *name; + int ret; + + pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, pcie_pmu); + + ret = phytium_pcie_pmu_dev_probe(pdev, pcie_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance_nocalls( + phytium_pcie_pmu_hp_state, &pcie_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "phyt%u_pcie_pmu%u", + pcie_pmu->die_id, pcie_pmu->pmu_id); + pcie_pmu->pmu = (struct pmu){ + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = phytium_pcie_pmu_event_init, + .pmu_enable = phytium_pcie_pmu_enable, + .pmu_disable = phytium_pcie_pmu_disable, + .add = phytium_pcie_pmu_event_add, + .del = phytium_pcie_pmu_event_del, + .start = phytium_pcie_pmu_event_start, + .stop = phytium_pcie_pmu_event_stop, + .read = phytium_pcie_pmu_event_update, + .attr_groups = phytium_pcie_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + dev_err(pcie_pmu->dev, "PCIE PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + phytium_pcie_pmu_hp_state, + &pcie_pmu->node); + } + + phytium_pcie_pmu_enable_clk(pcie_pmu); + + pr_info("Phytium PCIe PMU: "); + pr_info("die_id = %d pmu_id = %d.\n", pcie_pmu->die_id, + pcie_pmu->pmu_id); + + return ret; +} + +static void phytium_pcie_pmu_remove(struct platform_device *pdev) +{ + struct phytium_pcie_pmu *pcie_pmu = platform_get_drvdata(pdev); + + phytium_pcie_pmu_disable_clk(pcie_pmu); + + perf_pmu_unregister(&pcie_pmu->pmu); + cpuhp_state_remove_instance_nocalls( + phytium_pcie_pmu_hp_state, &pcie_pmu->node); +} + +static struct platform_driver phytium_pcie_pmu_driver = { + .driver = { + .name = "phytium_pcie_pmu", + .acpi_match_table = ACPI_PTR(phytium_pcie_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = phytium_pcie_pmu_probe, + .remove = phytium_pcie_pmu_remove, +}; + +static int phytium_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct phytium_pcie_pmu *pcie_pmu = + hlist_entry_safe(node, struct phytium_pcie_pmu, node); + unsigned int target; + cpumask_t available_cpus; + + if (pcie_pmu->on_cpu != cpu) + return 0; + + cpumask_and(&available_cpus, + cpumask_of_node(pcie_pmu->die_id), cpu_online_mask); + + target = cpumask_any_but(&available_cpus, cpu); + if (target >= nr_cpu_ids) { + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + } + + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target))); + pcie_pmu->on_cpu = target; + + return 0; +} + +static int __init phytium_pcie_pmu_module_init(void) +{ + int ret; + + phytium_pcie_pmu_hp_state = + cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/phytium/pciepmu:offline", NULL, + phytium_pcie_pmu_offline_cpu); + if (phytium_pcie_pmu_hp_state < 0) { + pr_err("PCIE PMU: setup hotplug, ret = %d\n", + phytium_pcie_pmu_hp_state); + return phytium_pcie_pmu_hp_state; + } + + ret = platform_driver_register(&phytium_pcie_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + phytium_pcie_pmu_hp_state); + + return ret; +} +module_init(phytium_pcie_pmu_module_init); + +static void __exit phytium_pcie_pmu_module_exit(void) +{ + platform_driver_unregister(&phytium_pcie_pmu_driver); + cpuhp_remove_multi_state(phytium_pcie_pmu_hp_state); +} +module_exit(phytium_pcie_pmu_module_exit); + +MODULE_DESCRIPTION("Phytium PCIe PMU driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hu Xianghua "); diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c index 6b9a2351c545b..b4df5cdf411a7 100644 --- a/drivers/spi/spi-phytium.c +++ b/drivers/spi/spi-phytium.c @@ -405,13 +405,6 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) fts->dma_addr = (dma_addr_t)(fts->paddr + DR); snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); - ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, - fts->name, master); - if (ret < 0) { - dev_err(dev, "can not get IRQ\n"); - goto err_free_master; - } - master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); @@ -440,6 +433,14 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) } spi_controller_set_devdata(master, fts); + + ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, + fts->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); + goto err_free_master; + } + ret = spi_register_controller(master); if (ret) { dev_err(&master->dev, "problem registering spi master\n"); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index abf8c6cdea9ea..89e529900c1f3 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -139,6 +139,8 @@ source "drivers/usb/chipidea/Kconfig" source "drivers/usb/isp1760/Kconfig" +source "drivers/usb/phytium/Kconfig" + comment "USB port drivers" if USB diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 949eca0adebea..0bb5d64f16b0c 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_USB_CDNS3) += cdns3/ obj-$(CONFIG_USB_CDNSP_PCI) += cdns3/ obj-$(CONFIG_USB_FOTG210) += fotg210/ +obj-$(CONFIG_USB_PHYTIUM) += phytium/ obj-$(CONFIG_USB_MON) += mon/ obj-$(CONFIG_USB_MTU3) += mtu3/ diff --git a/drivers/usb/phytium/Kconfig b/drivers/usb/phytium/Kconfig new file mode 100644 index 0000000000000..324c8eb8d188d --- /dev/null +++ b/drivers/usb/phytium/Kconfig @@ -0,0 +1,22 @@ +config USB_PHYTIUM + tristate "Phytium USB Support" + depends on USB + depends on USB_GADGET + help + Say Y or M here if your system has a OTG USB Controller based on PHYTIUM SOC. + like Pe220x. + + If you choose to build this driver is a dynamically linked modules, the module will + be called phytium-usb.ko + +config USB_PHYTIUM_PCI + tristate "Phytium PCI USB Support" + default n + depends on USB + depends on USB_GADGET + help + Say Y or M here if your system has a OTG USB Controller based on PHYTIUM SOC. + like Pe220x. + + If you choose to build this driver is a dynamically linked modules, the module will + be called phytium-usb.ko diff --git a/drivers/usb/phytium/Makefile b/drivers/usb/phytium/Makefile new file mode 100644 index 0000000000000..e176c334cba56 --- /dev/null +++ b/drivers/usb/phytium/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_USB_PHYTIUM) += phytium-usb.o +obj-$(CONFIG_USB_PHYTIUM_PCI) += phytium-usb-pci.o + +phytium-usb-y := core.o dma.o platform.o host.o gadget.o +phytium-usb-pci-y := core.o dma.o pci.o host.o gadget.o diff --git a/drivers/usb/phytium/core.c b/drivers/usb/phytium/core.c new file mode 100644 index 0000000000000..c0182c0770e5e --- /dev/null +++ b/drivers/usb/phytium/core.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "core.h" + +int phytium_core_reset(struct phytium_cusb *config, bool skip_wait) +{ + if (!config) + return 0; + + spin_lock_init(&config->lock); + + return 0; +} + +uint32_t phytium_read32(uint32_t *address) +{ + return readl(address); +} + +void phytium_write32(uint32_t *address, uint32_t value) +{ + writel(value, address); +} + +uint16_t phytium_read16(uint16_t *address) +{ + return readw(address); +} + +void phytium_write16(uint16_t *address, uint16_t value) +{ + writew(value, address); +} + +uint8_t phytium_read8(uint8_t *address) +{ + return readb(address); +} + +void phytium_write8(uint8_t *address, uint8_t value) +{ + writeb(value, address); +} + diff --git a/drivers/usb/phytium/core.h b/drivers/usb/phytium/core.h new file mode 100644 index 0000000000000..f563672ccaa99 --- /dev/null +++ b/drivers/usb/phytium/core.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_CORE_H__ +#define __PHYTIUM_CORE_H__ + +#include +#include +#include "host_api.h" +#include "gadget.h" + +#define MAX_EPS_CHANNELS 16 + +struct phytium_ep { + struct phytium_cusb *config; + u16 max_packet; + u8 ep_num; + struct GADGET_EP *gadget_ep; + struct list_head req_list; + struct usb_ep end_point; + char name[12]; + u8 is_tx; + const struct usb_endpoint_descriptor *desc; + u8 busy; +}; + +struct phytium_request { + struct usb_request request; + struct GADGET_REQ *gadget_request; + struct list_head list; + struct phytium_ep *ep; + struct phytium_cusb *config; + u8 is_tx; + u8 epnum; +}; + +struct phytium_cusb { + struct device *dev; + void __iomem *regs; + void __iomem *phy_regs; + int irq; + spinlock_t lock; + enum usb_dr_mode dr_mode; + + struct GADGET_OBJ *gadget_obj; + struct GADGET_CFG gadget_cfg; + struct GADGET_CALLBACKS gadget_callbacks; + struct GADGET_SYSREQ gadget_sysreq; + struct GADGET_DEV *gadget_dev; + void *gadget_priv; + + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + struct phytium_ep endpoints_tx[MAX_EPS_CHANNELS]; + struct phytium_ep endpoints_rx[MAX_EPS_CHANNELS]; + u8 ep0_data_stage_is_tx; + + struct HOST_OBJ *host_obj; + struct HOST_CFG host_cfg; + struct HOST_CALLBACKS host_callbacks; + struct HOST_SYSREQ host_sysreq; + void *host_priv; + struct usb_hcd *hcd; + + struct DMA_OBJ *dma_obj; + struct DMA_CFG dma_cfg; + struct DMA_CALLBACKS dma_callbacks; + struct DMA_SYSREQ dma_sysreq; + bool isVhubHost; +}; + +int phytium_core_reset(struct phytium_cusb *config, bool skip_wait); + +int phytium_host_init(struct phytium_cusb *config); +int phytium_host_uninit(struct phytium_cusb *config); + +#ifdef CONFIG_PM +int phytium_host_resume(void *priv); +int phytium_host_suspend(void *priv); +int phytium_gadget_resume(void *priv); +int phytium_gadget_suspend(void *priv); +#endif + +int phytium_gadget_init(struct phytium_cusb *config); +int phytium_gadget_uninit(struct phytium_cusb *config); + +uint32_t phytium_read32(uint32_t *address); + +void phytium_write32(uint32_t *address, uint32_t value); + +uint16_t phytium_read16(uint16_t *address); + +void phytium_write16(uint16_t *address, uint16_t value); + +uint8_t phytium_read8(uint8_t *address); + +void phytium_write8(uint8_t *address, uint8_t value); + +#endif diff --git a/drivers/usb/phytium/dma.c b/drivers/usb/phytium/dma.c new file mode 100644 index 0000000000000..664d5646ab9b1 --- /dev/null +++ b/drivers/usb/phytium/dma.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "core.h" +#include "dma.h" +#include "hw-regs.h" + +#define TRB_POOL_SIZE (sizeof(struct DMA_Trb) * (NUM_OF_TRB)) +#define TRB_BURST_LENGTH (0x80 << 24) + +#define BUILD_NORMAL_TRB_NO_IOC(trb, data_ptr, data_size, stream_id) { \ + trb.dmaAddr = data_ptr; \ + trb.dmaSize = data_size; \ + trb.ctrl = (stream_id << 16) | TD_TYPE_NORMAL | TDF_CYCLE_BIT; } + +#define BUILD_NORMAL_TRB_NO_IOC_CHAIN(trb, data_ptr, data_size, stream_id) { \ + trb.dmaAddr = data_ptr; \ + trb.dmaSize = data_size; \ + trb.ctrl = (stream_id << 16) | TD_TYPE_NORMAL | TDF_CYCLE_BIT | TDF_CHAIN_BIT; } + +#define BUILD_NORMAL_TRB(trb, data_ptr, data_size, stream_id) { \ + trb.dmaAddr = data_ptr; \ + trb.dmaSize = (data_size) | TRB_BURST_LENGTH; \ + trb.ctrl = (stream_id << 16) | TD_TYPE_NORMAL | TDF_CYCLE_BIT |\ + TDF_INT_ON_COMPLECTION | TDF_INT_ON_SHORT_PACKET; } + +#define BUILD_LINK_TRB(trb, target_ptr) { \ + trb.dmaAddr = target_ptr; \ + trb.dmaSize = 0; \ + trb.ctrl = TD_TYPE_LINK | TDF_CYCLE_BIT | TDF_CHAIN_BIT; } + +#define BUILD_EMPTY_TRB(trb) { \ + trb.dmaAddr = 0; \ + trb.dmaSize = 0; \ + trb.ctrl = 0; } + +static uint32_t divRoundUp(uint32_t divident, uint32_t divisor) +{ + return divisor ? ((divident + divisor - 1) / divisor) : 0; +} + +static inline struct DMA_TrbChainDesc *GetTrbChainDescEntry(struct list_head *list) +{ + return (struct DMA_TrbChainDesc *)((uintptr_t)list - + (uintptr_t)&(((struct DMA_TrbChainDesc *)0)->chainNode)); +} + +static int32_t phytium_dma_probe(struct DMA_CFG *config, struct DMA_SYSREQ *sysReq) +{ + if (!sysReq) + return -EINVAL; + + sysReq->trbMemSize = TRB_POOL_SIZE; + sysReq->privDataSize = sizeof(struct DMA_CONTROLLER); + + return 0; +} + +static int32_t phytium_dma_init(struct DMA_CONTROLLER *priv, + const struct DMA_CFG *config, struct DMA_CALLBACKS *callbacks) +{ + if (!priv || !config || !callbacks) + return -EINVAL; + + if (!config->trbAddr || !config->trbDmaAddr) + return -EINVAL; + + memset((void *)priv, 0, sizeof(struct DMA_CONTROLLER)); + memset((void *)config->trbAddr, 0, TRB_POOL_SIZE); + + priv->trbDMAPoolAddr = config->trbDmaAddr; + priv->trbPoolAddr = config->trbAddr; + priv->regs = (struct DMARegs *)config->regBase; + priv->dmaCfg = *config; + priv->dmaDrv = DMA_GetInstance(); + priv->dmaCallbacks = *callbacks; + priv->isHostCtrlMode = 0; + return 0; +} + +static void phytium_dma_destroy(struct DMA_CONTROLLER *priv) +{ + +} + +static int32_t phytium_dma_start(struct DMA_CONTROLLER *priv) +{ + int i; + + if (!priv) + return -EINVAL; + + priv->dmaMode = DMA_MODE_CHANNEL_INDIVIDUAL; + if ((priv->dmaCfg.dmaModeRx & priv->dmaCfg.dmaModeTx) == 0xFFFF) { + priv->dmaMode = DMA_MODE_GLOBAL_DMULT; + phytium_write32(&priv->regs->conf, DMARF_DMULT); + } else if ((priv->dmaCfg.dmaModeRx | priv->dmaCfg.dmaModeTx)) { + priv->dmaMode = DMA_MODE_GLOBAL_DSING; + phytium_write32(&priv->regs->conf, DMARF_DSING); + } + + for (i = 0; i < MAX_DMA_CHANNELS; i++) { + if (priv->dmaCfg.dmaModeRx & (1 << i)) { + priv->rx[i].dmultEnabled = 1; + priv->rx[i].maxTrbLen = TD_DMULT_MAX_TRB_DATA_SIZE; + priv->rx[i].maxTdLen = TD_DMULT_MAX_TD_DATA_SIZE; + } else { + priv->rx[i].dmultEnabled = 0; + priv->rx[i].maxTrbLen = TD_SING_MAX_TRB_DATA_SIZE; + priv->rx[i].maxTdLen = TD_SING_MAX_TD_DATA_SIZE; + } + priv->rx[i].lastTransferLength = 0; + + if (priv->dmaCfg.dmaModeTx & (1 << i)) { + priv->tx[i].dmultEnabled = 1; + priv->tx[i].maxTrbLen = TD_DMULT_MAX_TRB_DATA_SIZE; + priv->tx[i].maxTdLen = TD_DMULT_MAX_TD_DATA_SIZE; + } else { + priv->tx[i].dmultEnabled = 0; + priv->tx[i].maxTrbLen = TD_SING_MAX_TRB_DATA_SIZE; + priv->tx[i].maxTdLen = TD_SING_MAX_TD_DATA_SIZE; + } + priv->tx[i].lastTransferLength = 0; + } + + return 0; +} + +static uint32_t phytium_dma_stop(struct DMA_CONTROLLER *priv) +{ + + return 0; +} + +static int32_t updateDescBuffer(struct DMA_CONTROLLER *priv, + struct DMA_TrbChainDesc *trbChainDesc, uint16_t status) +{ + uint32_t ep_sel, i; + struct DMA_Channel *channel; + + if (!priv || !trbChainDesc) + return -EINVAL; + + channel = trbChainDesc->channel; + if (!channel) + return -EINVAL; + + if (channel->isIsoc && trbChainDesc->lastTrbIsLink) { + if (channel->trbChainDescList.prev == channel->trbChainDescList.next) + return 0; + } + + for (i = 0; i < trbChainDesc->numOfTrbs; i++) { + if (trbChainDesc->framesDesc) { + if (trbChainDesc->isoError) + trbChainDesc->framesDesc[i].length = 0; + else + trbChainDesc->framesDesc[i].length = + (uint32_t)trbChainDesc->trbPool[i].dmaSize & TD_SIZE_MASK; + } + + trbChainDesc->actualLen += + (uint32_t)trbChainDesc->trbPool[i].dmaSize & TD_SIZE_MASK; + } + + if (trbChainDesc->isoError) + trbChainDesc->actualLen = 0; + + ep_sel = phytium_read32(&priv->regs->ep_sel); + channel->lastTransferLength = trbChainDesc->actualLen; + + if (!trbChainDesc->lastTrbIsLink) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + trbChainDesc = NULL; + } + } + + if (channel->trbChainDescList.prev == &channel->trbChainDescList) + channel->status = DMA_STATUS_FREE; + + if (priv->dmaCallbacks.complete) + priv->dmaCallbacks.complete(priv->parent, channel->hwUsbEppNum, + channel->isDirTx, false); + + if (channel->trbChainDescList.next != &channel->trbChainDescList) { + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (trbChainDesc && trbChainDesc->lastTrbIsLink) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + } + } + } + phytium_write32(&priv->regs->ep_sel, ep_sel); + + return 0; +} + +static void phytium_dma_isr(struct DMA_CONTROLLER *priv) +{ + uint32_t ep_sts, ep_ists, ep_cfg; + int i; + uint8_t isDirTx, epNum; + struct DMA_Channel *channel; + struct DMA_TrbChainDesc *trbChainDesc; + + if (!priv) + return; + + ep_ists = phytium_read32(&priv->regs->ep_ists); + if (!ep_ists) { + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | + DMARF_EP_ISP | DMARF_EP_TRBERR); + return; + } + + for (i = 0; i < 32; i++) { + if (!(ep_ists & (1 << i))) + continue; + + isDirTx = i > 15 ? DMARD_EP_TX : 0u; + epNum = isDirTx ? i - 16 : i; + phytium_write32(&priv->regs->ep_sel, epNum | isDirTx); + + if (isDirTx) + channel = &priv->tx[epNum]; + else + channel = &priv->rx[epNum]; + + ep_sts = phytium_read32(&priv->regs->ep_sts); + + if (ep_sts & DMARF_EP_TRBERR) + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + + if ((ep_sts & DMARF_EP_IOC) || (ep_sts & DMARF_EP_ISP) || (channel->dmultGuard + & DMARF_EP_IOC) || (channel->dmultGuard & DMARF_EP_ISP)) { +retransmit: + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP); + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (!(ep_sts & DMARF_EP_TRBERR) && channel->dmultEnabled + && !trbChainDesc->lastTrbIsLink) { + if (!priv->isHostCtrlMode && !channel->isDirTx) { + channel->dmultGuard = 0; + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + ep_cfg &= ~DMARV_EP_ENABLED; + phytium_write32(&priv->regs->ep_cfg, (uint32_t)ep_cfg); + updateDescBuffer(priv, trbChainDesc, 0); + break; + } + channel->dmultGuard = ep_sts; + break; + } + + channel->dmultGuard = 0; + updateDescBuffer(priv, trbChainDesc, 0); + } + + if (ep_sts & DMARF_EP_OUTSMM) + phytium_write32(&priv->regs->ep_sts, DMARF_EP_OUTSMM); + + if (ep_sts & DMARF_EP_DESCMIS) + phytium_write32(&priv->regs->ep_sts, DMARF_EP_DESCMIS); + + if (ep_sts & DMARF_EP_ISOERR) { + phytium_write32(&priv->regs->ep_sts, DMARF_EP_ISOERR); + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + trbChainDesc->isoError = 1; + goto retransmit; + } + } +} + +static void phytium_dma_errIsr(struct DMA_CONTROLLER *priv, uint8_t irqNr, uint8_t isDirTx) +{ + struct DMA_Channel *channel; + + if (!priv) + return; + + if (isDirTx) + channel = &priv->tx[irqNr]; + else + channel = &priv->rx[irqNr]; + + if (channel->status >= DMA_STATUS_BUSY) + channel->status = DMA_STATUS_ABORT; + + if (priv->dmaCallbacks.complete) { + if (!irqNr && priv->resubmit) { + priv->dmaCallbacks.complete(priv->parent, channel->hwUsbEppNum, + channel->isDirTx, true); + priv->resubmit = false; + } else + priv->dmaCallbacks.complete(priv->parent, channel->hwUsbEppNum, + channel->isDirTx, false); + } +} + +static void *phytium_dma_channelAlloc(struct DMA_CONTROLLER *priv, + uint8_t isDirTx, uint8_t hwEpNum, uint8_t isIsoc) +{ + struct DMA_Channel *channel; + uint32_t ep_ien, ep_cfg; + uint16_t dmaMode; + + if (!priv || (hwEpNum >= MAX_DMA_CHANNELS)) + return NULL; + + if (!priv->regs) { + pr_err("dma regs is null\n"); + return NULL; + } + + ep_ien = phytium_read32(&priv->regs->ep_ien); + + if (isDirTx) { + if (priv->tx[hwEpNum].status > DMA_STATUS_FREE) + return NULL; + + channel = &priv->tx[hwEpNum]; + channel->isDirTx = 0x80; + ep_ien |= (0x01 << (hwEpNum + 16)); + dmaMode = priv->dmaCfg.dmaModeTx; + } else { + if (priv->rx[hwEpNum].status > DMA_STATUS_FREE) + return NULL; + + channel = &priv->rx[hwEpNum]; + channel->isDirTx = 0x00; + ep_ien |= (0x01 << hwEpNum); + dmaMode = priv->dmaCfg.dmaModeRx; + } + + channel->isIsoc = 0; + if (isIsoc) + channel->isIsoc = 1; + + INIT_LIST_HEAD(&channel->trbChainDescList); + channel->numOfTrbChain = 0; + channel->controller = priv; + channel->dmultGuard = 0; + channel->status = DMA_STATUS_FREE; + channel->hwUsbEppNum = hwEpNum; + + phytium_write32(&priv->regs->ep_sel, (uint32_t)hwEpNum | channel->isDirTx); + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + + if (priv->dmaMode == DMA_MODE_CHANNEL_INDIVIDUAL) { + if (dmaMode & (1 << hwEpNum)) + ep_cfg |= DMARV_EP_DMULT; + else + ep_cfg |= DMARV_EP_DSING; + } + + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP | DMARF_EP_TRBERR); + phytium_write32(&priv->regs->ep_cfg, (uint32_t)DMARV_EP_ENABLED | ep_cfg); + phytium_write32(&priv->regs->ep_ien, ep_ien); + phytium_write32(&priv->regs->ep_sts_en, DMARF_EP_TRBERR); + + return channel; +} + +static int32_t phytium_dma_channelRelease(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + uint32_t ep_ien, i; + struct DMA_TrbChainDesc *trbChainDesc; + + if (!channel || !priv) + return -EINVAL; + + ep_ien = phytium_read32(&priv->regs->ep_ien); + if (channel->isDirTx) + ep_ien &= ~(0x01 << (channel->hwUsbEppNum + 16)); + else + ep_ien &= ~(0x01 << channel->hwUsbEppNum); + + phytium_write32(&priv->regs->ep_sel, (uint32_t)channel->hwUsbEppNum | channel->isDirTx); + phytium_write32(&priv->regs->ep_cfg, (uint32_t)0); + phytium_write32(&priv->regs->ep_ien, ep_ien); + phytium_write32(&priv->regs->ep_sts_en, 0x0); + phytium_write32(&priv->regs->ep_cmd, DMARF_EP_EPRST); + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP | + DMARF_EP_TRBERR | DMARF_EP_ISOERR); + + if (channel->status >= DMA_STATUS_BUSY) + channel->status = DMA_STATUS_ABORT; + + priv->dmaDrv->dma_channelAbort(priv, channel); + + while (channel->trbChainDescList.next != &channel->trbChainDescList) { + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (trbChainDesc) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + } + } + } + + channel->status = DMA_STATUS_UNKNOWN; + + return 0; +} + +static void ShowTrbChain(struct DMA_TrbChainDesc *trbChainDesc) +{ + int i; + + if (!trbChainDesc) + return; + pr_debug("Trb Chain %p for channel %p\n", trbChainDesc, trbChainDesc->channel); + pr_debug("idx | trb size | trb addr | FLAG: NORMAL LINK CHAIN ALL\n"); + for (i = 0; i < trbChainDesc->numOfTrbs + 2; i++) + pr_debug("%02d | %08x | %08x | %d %d %d %08x\n", + i, + trbChainDesc->trbPool[i].dmaSize & TD_SIZE_MASK, + trbChainDesc->trbPool[i].dmaAddr, + (trbChainDesc->trbPool[i].ctrl & TD_TYPE_NORMAL) ? 1 : 0, + (trbChainDesc->trbPool[i].ctrl & TD_TYPE_LINK) ? 1 : 0, + (trbChainDesc->trbPool[i].ctrl & TDF_CHAIN_BIT) ? 1 : 0, + trbChainDesc->trbPool[i].ctrl); +} + +static uint32_t phytium_dma_NewTd(struct DMA_CONTROLLER *priv, + struct DMA_TrbChainDesc *trbChainDesc) +{ + uint32_t startAddress, dataSize; + struct DMA_Channel *channel; + int i = 0; + uint32_t tmp = 0; + + if (!priv || !trbChainDesc) + return 0; + + startAddress = trbChainDesc->dwStartAddress; + channel = trbChainDesc->channel; + dataSize = channel->maxTrbLen; + + if (trbChainDesc->numOfTrbs > 1) { + for (i = 0; i < (trbChainDesc->numOfTrbs - 1); i++) { + if (channel->dmultEnabled) { + if (trbChainDesc->framesDesc && priv->isHostCtrlMode + && channel->isIsoc) { + BUILD_NORMAL_TRB_NO_IOC(trbChainDesc->trbPool[i], + trbChainDesc->dwStartAddress + + trbChainDesc->framesDesc[i].offset, + trbChainDesc->framesDesc[i].length, 0); + continue; + } else + BUILD_NORMAL_TRB_NO_IOC(trbChainDesc->trbPool[i], + startAddress, dataSize, 0); + } else + BUILD_NORMAL_TRB_NO_IOC_CHAIN(trbChainDesc->trbPool[i], + startAddress, dataSize, 0); + startAddress += dataSize; + tmp += dataSize; + } + } + + if (trbChainDesc->framesDesc && priv->isHostCtrlMode && channel->isIsoc) { + BUILD_NORMAL_TRB(trbChainDesc->trbPool[i], + trbChainDesc->dwStartAddress + trbChainDesc->framesDesc[i].offset, + trbChainDesc->framesDesc[i].length, 0); + } else + BUILD_NORMAL_TRB(trbChainDesc->trbPool[i], startAddress, + trbChainDesc->len - tmp, 0); + + trbChainDesc->lastTrbIsLink = 0; + if (channel->dmultEnabled) { + if (channel->isIsoc) { + trbChainDesc->lastTrbIsLink = 1; + BUILD_LINK_TRB(trbChainDesc->trbPool[i + 1], trbChainDesc->trbDMAAddr); + } else + BUILD_EMPTY_TRB(trbChainDesc->trbPool[i + 1]); + } + + ShowTrbChain(trbChainDesc); + + return 0; +} + +static void phytium_dma_ArmTd(struct DMA_CONTROLLER *priv, struct DMA_TrbChainDesc *trbChainDesc) +{ + uint32_t ep_sts, ep_cfg, ep_cmd; + struct DMA_TrbChainDesc *startedChain; + struct DMA_Trb *lastPrevTrb; + struct DMA_Channel *channel; + + if (!priv || !trbChainDesc) + return; + + channel = trbChainDesc->channel; + phytium_write32(&priv->regs->ep_sel, (channel->isDirTx | channel->hwUsbEppNum)); + + ep_sts = phytium_read32(&priv->regs->ep_sts); + + if (channel->status == DMA_STATUS_FREE) { + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + phytium_write32(&priv->regs->traddr, trbChainDesc->trbDMAAddr); + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + phytium_write32(&priv->regs->ep_cmd, DMARF_EP_DRDY); + if (!(ep_cfg & DMARV_EP_ENABLED)) { + ep_cfg |= DMARV_EP_ENABLED; + phytium_write32(&priv->regs->ep_cfg, ep_cfg); + } + } else { + if (channel->trbChainDescList.prev != channel->trbChainDescList.next) { + startedChain = GetTrbChainDescEntry(channel->trbChainDescList.prev->prev); + lastPrevTrb = &startedChain->trbPool[startedChain->numOfTrbs]; + startedChain->lastTrbIsLink = 1; + BUILD_LINK_TRB((*lastPrevTrb), trbChainDesc->trbDMAAddr); + ep_cmd = phytium_read32(&priv->regs->ep_cmd); + if (!(ep_cmd & DMARF_EP_DRDY)) { + phytium_write32(&priv->regs->traddr, trbChainDesc->trbDMAAddr); + phytium_write32(&priv->regs->ep_cmd, DMARF_EP_DRDY); + } + ShowTrbChain(startedChain); + } + } +} + +static int32_t phytium_dma_TrbChainAlloc(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint32_t numOfTrbs, struct DMA_TrbChainDesc **chain) +{ + struct DMA_TrbChainDesc *trbChainDesc = NULL; + int i, j; + uint32_t mapcount, count = 0; + + if (!priv || !channel) + return -ENOMEM; + + for (i = 0; i < TAB_SIZE_OF_DMA_CHAIN; i++) { + if (!priv->trbChainDesc[i].reserved) { + trbChainDesc = &priv->trbChainDesc[i]; + break; + } + } + + if (!trbChainDesc) { + pr_err("No Free TRB Chain Descriptor\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&trbChainDesc->chainNode); + *chain = NULL; + mapcount = (numOfTrbs + 2 + 7) >> 3; + + for (i = 0; i < TRB_MAP_SIZE; i++) { + if (priv->trbChainFreeMap[i] == 0x0) + count++; + else + count = 0; + + if (count == mapcount) + break; + } + + if (count != mapcount) { + pr_err("No Free TRBs count:0x%x, mapcount:0x%x\n", count, mapcount); + return -ENOMEM; + } + + trbChainDesc->reserved = 1; + trbChainDesc->channel = channel; + trbChainDesc->actualLen = 0; + trbChainDesc->mapSize = mapcount; + trbChainDesc->numOfTrbs = numOfTrbs; + trbChainDesc->start = (i + 1 - mapcount) << 3; + trbChainDesc->end = trbChainDesc->start; + trbChainDesc->trbPool = (struct DMA_Trb *)priv->trbPoolAddr + trbChainDesc->start; + trbChainDesc->trbDMAAddr = (uint32_t)(uintptr_t)((struct DMA_Trb *)priv->trbDMAPoolAddr + + trbChainDesc->start); + trbChainDesc->isoError = 0; + + list_add_tail(&trbChainDesc->chainNode, &channel->trbChainDescList); + + channel->numOfTrbChain++; + + for (j = 0; j < count; j++) + priv->trbChainFreeMap[i - j] = 0xFF; + + *chain = trbChainDesc; + + return 0; +} + +static int32_t phytium_dma_channelProgram(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint16_t packetSize, uintptr_t dmaAddr, + uint32_t len, void *framesDesc, uint32_t framesNumber) +{ + uint32_t numOfTrbs; + uint8_t retval; + struct DMA_TrbChainDesc *trbChainDesc; + + if (!priv || !channel) + return -EINVAL; + + //printk(KERN_ERR "%s %d\n",__func__,__LINE__); + if (framesDesc && priv->isHostCtrlMode && channel->isIsoc) + numOfTrbs = framesNumber; + else + numOfTrbs = divRoundUp(len, channel->maxTrbLen); + + retval = phytium_dma_TrbChainAlloc(priv, channel, numOfTrbs, &trbChainDesc); + if (retval) + return retval; + + trbChainDesc->dwStartAddress = dmaAddr; + trbChainDesc->len = len; + trbChainDesc->framesDesc = framesDesc; + + channel->wMaxPacketSize = packetSize; + phytium_dma_NewTd(priv, trbChainDesc); + channel->lastTransferLength = 0; + phytium_dma_ArmTd(priv, trbChainDesc); + + channel->status = DMA_STATUS_BUSY; + + return 0; +} + +static enum DMA_Status phytium_dma_getChannelStatus(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel) +{ + uint32_t ep_cmd, ep_sts; + + if (!priv || !channel) + return DMA_STATUS_UNKNOWN; + + if (channel->status >= DMA_STATUS_BUSY) { + phytium_write32(&priv->regs->ep_sel, channel->isDirTx | channel->hwUsbEppNum); + ep_cmd = phytium_read32(&priv->regs->ep_cmd); + ep_sts = phytium_read32(&priv->regs->ep_sts); + + if ((ep_cmd & DMARF_EP_DRDY) || (ep_sts & DMARF_EP_DBUSY)) + channel->status = DMA_STATUS_ARMED; + else + channel->status = DMA_STATUS_BUSY; + } + + return channel->status; +} + +static int32_t phytium_dma_channelAbort(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + struct DMA_TrbChainDesc *trbChainDesc; + uint32_t ep_cfg, i; + + if (!priv || !channel) + return -EINVAL; + + if (phytium_dma_getChannelStatus(priv, channel) >= DMA_STATUS_BUSY) + phytium_write32(&priv->regs->conf, DMARF_RESET); + + phytium_write32(&priv->regs->ep_sel, (uint32_t)(channel->isDirTx | channel->hwUsbEppNum)); + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + ep_cfg &= ~DMARV_EP_ENABLED; + phytium_write32(&priv->regs->ep_cfg, ep_cfg); + phytium_write32(&priv->regs->ep_sts, 0xFFFFFFFF); + + while (channel->trbChainDescList.next != &channel->trbChainDescList) { + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (trbChainDesc) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + } + } + } + if (channel->status != DMA_STATUS_UNKNOWN) + channel->status = DMA_STATUS_FREE; + + return 0; +} + +static int32_t phytium_dma_getActualLength(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + if (!priv || !channel) + return -EINVAL; + + return channel->lastTransferLength; +} + +static int32_t phytium_dma_getMaxLength(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + if (!priv || !channel) + return -EINVAL; + + return channel->maxTdLen; +} + +static int32_t phytium_dma_setMaxLength(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint32_t val) +{ + if (!priv || !channel) + return -EINVAL; + + if (channel->dmultEnabled) + channel->maxTrbLen = val; + else + channel->maxTrbLen = (val > TD_SING_MAX_TRB_DATA_SIZE) ? + TD_SING_MAX_TRB_DATA_SIZE : val; + + return 0; +} + +static void phytium_dma_setParentPriv(struct DMA_CONTROLLER *priv, void *parent) +{ + if (!priv) + return; + + priv->parent = parent; +} + +static void phytium_dma_controllerReset(struct DMA_CONTROLLER *priv) +{ + uint32_t conf; + + if (!priv) + return; + + conf = phytium_read32(&priv->regs->conf); + conf |= DMARF_RESET; + phytium_write32(&priv->regs->conf, conf); + + priv->resubmit = true; +} + +static void phytium_dma_setHostMode(struct DMA_CONTROLLER *priv) +{ + if (!priv) + return; + + priv->isHostCtrlMode = 1; +} + +struct DMA_OBJ phytium_dma_Drv = { + .dma_probe = phytium_dma_probe, + .dma_init = phytium_dma_init, + .dma_destroy = phytium_dma_destroy, + .dma_start = phytium_dma_start, + .dma_stop = phytium_dma_stop, + .dma_isr = phytium_dma_isr, + .dma_errIsr = phytium_dma_errIsr, + .dma_channelAlloc = phytium_dma_channelAlloc, + .dma_channelRelease = phytium_dma_channelRelease, + .dma_channelProgram = phytium_dma_channelProgram, + .dma_channelAbort = phytium_dma_channelAbort, + .dma_getChannelStatus = phytium_dma_getChannelStatus, + .dma_getActualLength = phytium_dma_getActualLength, + .dma_getMaxLength = phytium_dma_getMaxLength, + .dma_setMaxLength = phytium_dma_setMaxLength, + .dma_setParentPriv = phytium_dma_setParentPriv, + .dma_controllerReset = phytium_dma_controllerReset, + .dma_setHostMode = phytium_dma_setHostMode, +}; + +struct DMA_OBJ *DMA_GetInstance(void) +{ + return &phytium_dma_Drv; +} diff --git a/drivers/usb/phytium/dma.h b/drivers/usb/phytium/dma.h new file mode 100644 index 0000000000000..218c99d3d0155 --- /dev/null +++ b/drivers/usb/phytium/dma.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_DMA_H__ +#define __PHYTIUM_DMA_H__ + +#include + +#define NUM_OF_TRB 1024 +#define TRB_MAP_SIZE ((NUM_OF_TRB + (sizeof(uint8_t) * 8) - 1) / (sizeof(uint8_t) * 8)) +#define MAX_DMA_CHANNELS 16 +#define TAB_SIZE_OF_DMA_CHAIN (MAX_DMA_CHANNELS * 2) + +#define DMARD_EP_TX 0x80ul +#define DMARD_EP_RX 0x00ul + +#define DMARF_EP_EPRST 0x00000001ul +#define DMARF_EP_DRDY 0x00000040ul +#define DMARF_EP_DFLUSH 0x00000080ul + +#define DMARF_EP_IOC 0x4ul +#define DMARF_EP_ISP 0x8ul +#define DMARF_EP_DESCMIS 0x10ul +#define DMARF_EP_TRBERR 0x80ul +#define DMARF_EP_DBUSY 0x200ul +#define DMARF_EP_CCS 0x800ul +#define DMARF_EP_OUTSMM 0x4000ul +#define DMARF_EP_ISOERR 0x8000ul +#define DMARF_EP_DTRANS 0x80000000ul + +#define DMARV_EP_DISABLED 0ul +#define DMARV_EP_ENABLED 1ul +#define DMARV_EP_DSING 0x1000ul +#define DMARV_EP_DMULT 0x2000ul + +#define TD_SIZE_MASK 0x00001FFFF + +#define DMARF_RESET 0x00000001ul +#define DMARF_DSING 0x00000100ul +#define DMARF_DMULT 0x00000200ul + +#define TD_DMULT_MAX_TRB_DATA_SIZE 65536u +#define TD_DMULT_MAX_TD_DATA_SIZE (~1u) +#define TD_SING_MAX_TRB_DATA_SIZE 65536u +#define TD_SING_MAX_TD_DATA_SIZE 65536u + +#define TD_TYPE_NORMAL 0x400L +#define TD_TYPE_LINK 0x1800L +#define TDF_CYCLE_BIT 0x1L +#define TDF_TOGGLE_CYCLE_BIT 0x2L +#define TDF_INT_ON_SHORT_PACKET 0x4L +#define TDF_FIFO_MODE 0x8L +#define TDF_CHAIN_BIT 0x10L +#define TDF_INT_ON_COMPLECTION 0x20L +#define TDF_STREAMID_VALID 0x200L + +struct DMA_Trb { + uint32_t dmaAddr; + uint32_t dmaSize; + uint32_t ctrl; +}; + +enum DMA_Status { + DMA_STATUS_UNKNOWN, + DMA_STATUS_FREE, + DMA_STATUS_ABORT, + DMA_STATUS_BUSY, + DMA_STATUS_ARMED +}; + +struct DMA_CFG { + uintptr_t regBase; + uint16_t dmaModeTx; + uint16_t dmaModeRx; + void *trbAddr; + uintptr_t trbDmaAddr; +}; + +struct DMA_SYSREQ { + uint32_t privDataSize; + uint32_t trbMemSize; +}; + +struct DMA_CALLBACKS { + void (*complete)(void *pD, uint8_t epNum, uint8_t dir, bool resubmit); +}; + +struct DMA_CONTROLLER; + +struct DMA_Channel { + struct DMA_CONTROLLER *controller; + uint16_t wMaxPacketSize; + uint8_t hwUsbEppNum; + uint8_t isDirTx; + uint32_t maxTdLen; + uint32_t maxTrbLen; + enum DMA_Status status; + void *priv; + uint32_t dmultGuard; + uint8_t dmultEnabled; + uint8_t numOfTrbChain; + struct list_head trbChainDescList; + uint32_t lastTransferLength; + uint8_t isIsoc; +}; + +struct DMA_OBJ { + int32_t (*dma_probe)(struct DMA_CFG *config, struct DMA_SYSREQ *sysReq); + + int32_t (*dma_init)(struct DMA_CONTROLLER *priv, const struct DMA_CFG *config, + struct DMA_CALLBACKS *callbacks); + + void (*dma_destroy)(struct DMA_CONTROLLER *priv); + + int32_t (*dma_start)(struct DMA_CONTROLLER *priv); + + uint32_t (*dma_stop)(struct DMA_CONTROLLER *priv); + + void (*dma_isr)(struct DMA_CONTROLLER *priv); + + void (*dma_errIsr)(struct DMA_CONTROLLER *priv, uint8_t irqNr, uint8_t isDirTx); + + void * (*dma_channelAlloc)(struct DMA_CONTROLLER *priv, + uint8_t isDirTx, uint8_t hwEpNum, uint8_t isIso); + + int32_t (*dma_channelRelease)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + int32_t (*dma_channelProgram)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel, + uint16_t packetSize, uintptr_t dmaAddr, + uint32_t len, void *framesDesc, uint32_t framesNumber); + + int32_t (*dma_channelAbort)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + enum DMA_Status (*dma_getChannelStatus)(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel); + + int32_t (*dma_getActualLength)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + int32_t (*dma_getMaxLength)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + int32_t (*dma_setMaxLength)(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint32_t val); + + void (*dma_setParentPriv)(struct DMA_CONTROLLER *priv, void *parent); + + void (*dma_controllerReset)(struct DMA_CONTROLLER *priv); + + void (*dma_setHostMode)(struct DMA_CONTROLLER *priv); +}; + +enum DMA_Mode { + DMA_MODE_GLOBAL_DMULT, + DMA_MODE_GLOBAL_DSING, + DMA_MODE_CHANNEL_INDIVIDUAL, +}; + +struct DMA_TrbFrameDesc { + uint32_t length; + uint32_t offset; +}; + +struct DMA_TrbChainDesc { + uint8_t reserved; + struct DMA_Channel *channel; + struct DMA_Trb *trbPool; + uint32_t trbDMAAddr; + uint32_t len; + uint32_t dwStartAddress; + uint32_t actualLen; + uint8_t isoError; + uint8_t lastTrbIsLink; + uint32_t mapSize; + uint32_t numOfTrbs; + uint32_t start; + uint32_t end; + struct DMA_TrbFrameDesc *framesDesc; + struct list_head chainNode; +}; + +struct DMA_CONTROLLER { + struct DMARegs *regs; + struct DMA_OBJ *dmaDrv; + struct DMA_CFG dmaCfg; + struct DMA_CALLBACKS dmaCallbacks; + struct DMA_Channel rx[MAX_DMA_CHANNELS]; + struct DMA_Channel tx[MAX_DMA_CHANNELS]; + enum DMA_Mode dmaMode; + uint8_t isHostCtrlMode; + void *parent; + void *trbPoolAddr; + uintptr_t trbDMAPoolAddr; + uint8_t trbChainFreeMap[TRB_MAP_SIZE]; + struct DMA_TrbChainDesc trbChainDesc[TAB_SIZE_OF_DMA_CHAIN]; + bool resubmit; +}; + +struct DMA_OBJ *DMA_GetInstance(void); +#endif diff --git a/drivers/usb/phytium/gadget.c b/drivers/usb/phytium/gadget.c new file mode 100644 index 0000000000000..d966387a4b48d --- /dev/null +++ b/drivers/usb/phytium/gadget.c @@ -0,0 +1,2538 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "gadget.h" +#include "dma.h" +#include "core.h" +#include "hw-regs.h" + +#define DRV_NAME "phytium_gadget" + +#define GADGET_PRIV_BUFFER_SIZE 64 +#define GADGET_USB_EP_NUMBER_MASK 0xf +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#define GADGET_ESTALL 1 +#define GADGET_EUNHANDLED 2 +#define GADGET_EAUTOACK 3 +#define GADGET_ESHUTDOWN 4 +#define GADGET_ECONNRESET 5 +#define GADGET_EAGAIN 6 + +static inline struct GadgetEp *toGadgetEp(struct GADGET_EP *gadget_Ep) +{ + return (struct GadgetEp *)((uintptr_t)gadget_Ep - + ((uintptr_t)&((struct GadgetEp *)0)->gadgetEp)); +} + +static inline struct GadgetRequest *requestToGadgetRequest(struct GADGET_REQ *req) +{ + return (struct GadgetRequest *)((uintptr_t)req - + ((uintptr_t)&((struct GadgetRequest *)0)->request)); +} + +static inline struct GADGET_REQ *listToGadgetRequest(struct list_head *list) +{ + return (struct GADGET_REQ *)((uintptr_t)list - + ((uintptr_t)&((struct GADGET_REQ *)0)->list)); +} + +#define listBrowsingRequest(iterator, head, memeber) \ + for (iterator = listToGadgetRequest((head)->next); \ + &iterator->list != (head); \ + iterator = listToGadgetRequest(iterator->list.next)) + +static inline struct GADGET_REQ *gadgetGetNextReq(struct GadgetEp *gadgetEp) +{ + struct list_head *list = &gadgetEp->request; + + if (list_empty(list)) { + pr_debug("no request available for %s\n", gadgetEp->gadgetEp.name); + return NULL; + } + + return listToGadgetRequest(list->next); +} + +static inline struct GADGET_REQ *gadgetGetNextEp0Req(struct GADGET_CTRL *priv) +{ + struct list_head *queue; + + if (!priv) + return NULL; + + queue = &priv->in[0].request; + + if (list_empty(queue)) + return NULL; + + return listToGadgetRequest(queue->next); +} + +static void gadget_giveback(struct phytium_ep *phy_ep, struct usb_request *usb_req, int status) +{ + struct phytium_request *phy_req; + struct phytium_cusb *config; + int busy; + + if (!phy_ep || !usb_req) + return; + + busy = phy_ep->busy; + phy_req = usb_req ? container_of(usb_req, struct phytium_request, request) : NULL; + config = phy_req->config; + + list_del(&phy_req->list); + + if (usb_req->status == -EINPROGRESS) { + if (status == GADGET_ESHUTDOWN) + usb_req->status = -ESHUTDOWN; + else if (status == GADGET_ECONNRESET) + usb_req->status = -ECONNRESET; + else + usb_req->status = -phy_req->gadget_request->status; + } + + if (usb_req->status == 0) + pr_debug("%s done request %p, %d/%d\n", phy_ep->end_point.name, + usb_req, usb_req->actual, usb_req->length); + else + pr_debug("%s request %p, %d/%d fault %d\n", phy_ep->end_point.name, + usb_req, usb_req->actual, usb_req->length, usb_req->status); + + usb_gadget_unmap_request(&config->gadget, &phy_req->request, phy_req->is_tx); + + busy = phy_ep->busy; + phy_ep->busy = 1; + + spin_unlock(&config->lock); + + if (phy_req->request.complete) + phy_req->request.complete(&phy_req->ep->end_point, usb_req); + + spin_lock(&config->lock); + + phy_ep->busy = busy; +} + +static void gadget_callback_complete(struct GADGET_EP *gadget_ep, struct GADGET_REQ *gadget_req) +{ + struct phytium_ep *phy_ep; + struct phytium_request *phy_req; + struct usb_request *usb_req; + + if (!gadget_ep || !gadget_req) + return; + + phy_req = gadget_req->context; + usb_req = &phy_req->request; + phy_ep = phy_req->ep; + usb_req->actual = gadget_req->actual; + usb_req->length = gadget_req->length; + + gadget_giveback(phy_ep, usb_req, gadget_req->status); +} + +static void gadgetDisconnect(struct GADGET_CTRL *priv) +{ + pr_info("Disconnect USB Device Driver\n"); + + if (!priv) + return; + + priv->gadgetDev.speed = USB_SPEED_UNKNOWN; + priv->gadgetDev.state = USB_STATE_NOTATTACHED; + + if (priv->eventCallback.disconnect) + priv->eventCallback.disconnect(priv); +} + +static int gadget_get_frame(struct usb_gadget *gadget) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_wakeup(struct usb_gadget *gadget) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_vbus_session(struct usb_gadget *gadget, int is_active) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_vbus_draw(struct usb_gadget *gadget, unsigned int mA) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) +{ + unsigned long flags; + struct phytium_cusb *config; + struct GADGET_CTRL *priv; + uint32_t gen_cfg; + + if (!gadget || !driver) + return -EINVAL; + + if (driver->max_speed < USB_SPEED_HIGH) + return -EINVAL; + + config = container_of(gadget, struct phytium_cusb, gadget); + + pr_info("registering driver %s\n", driver->function); + + spin_lock_irqsave(&config->lock, flags); + config->gadget_driver = driver; + spin_unlock_irqrestore(&config->lock, flags); + + config->gadget_obj->gadget_start(config->gadget_priv); + + priv = (struct GADGET_CTRL *)config->gadget_priv; + if (priv->phy_regs) { + gen_cfg = phytium_read32(&priv->phy_regs->gen_cfg); + gen_cfg = gen_cfg & (~BIT(7)); + phytium_write32(&priv->phy_regs->gen_cfg, gen_cfg); + } + + return 0; +} + +static int gadget_udc_stop(struct usb_gadget *gadget) +{ + struct phytium_cusb *config; + unsigned long flags; + struct GADGET_CTRL *priv; + uint32_t gen_cfg; + + if (!gadget) + return -EINVAL; + + config = container_of(gadget, struct phytium_cusb, gadget); + + priv = (struct GADGET_CTRL *)config->gadget_priv; + if (priv->phy_regs) { + gen_cfg = phytium_read32(&priv->phy_regs->gen_cfg); + gen_cfg = gen_cfg | BIT(7); + phytium_write32(&priv->phy_regs->gen_cfg, gen_cfg); + } + spin_lock_irqsave(&config->lock, flags); + config->gadget_driver = NULL; + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static struct usb_gadget_ops phytium_gadget_ops = { + .get_frame = gadget_get_frame, + .wakeup = gadget_wakeup, + .vbus_session = gadget_vbus_session, + .vbus_draw = gadget_vbus_draw, + .pullup = gadget_pullup, + .udc_start = gadget_udc_start, + .udc_stop = gadget_udc_stop, +}; + +static int gadget_ep_enable(struct usb_ep *ls_ep, const struct usb_endpoint_descriptor *desc) +{ + struct phytium_ep *phy_ep = NULL; + struct phytium_cusb *config; + unsigned long flags; + + if (!ls_ep || !desc) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + + if (phy_ep->desc) + return -EBUSY; + + spin_lock_irqsave(&config->lock, flags); + + phy_ep->desc = desc; + phy_ep->busy = 0; + config->gadget_obj->gadget_epEnable(config->gadget_priv, phy_ep->gadget_ep, desc); + + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static int gadget_ep_disable(struct usb_ep *ls_ep) +{ + struct phytium_ep *phy_ep = NULL; + struct phytium_cusb *config; + unsigned long flags; + + if (!ls_ep) + return -EBUSY; + + pr_info("%s %d\n", __func__, __LINE__); + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + + spin_lock_irqsave(&config->lock, flags); + + phy_ep->desc = NULL; + phy_ep->busy = 0; + phy_ep->end_point.desc = NULL; + config->gadget_obj->gadget_epDisable(config->gadget_priv, phy_ep->gadget_ep); + + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static struct usb_request *gadget_ep_alloc_request(struct usb_ep *ls_ep, gfp_t gfp_flags) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct GADGET_EP *gadget_ep; + struct phytium_request *phy_request; + + if (!ls_ep) + return NULL; + + pr_info("%s %d\n", __func__, __LINE__); + phy_request = kzalloc(sizeof(*phy_request), gfp_flags); + if (!phy_request) { + pr_err("not enough momory\n"); + return NULL; + } + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + gadget_ep = phy_ep->gadget_ep; + + INIT_LIST_HEAD(&phy_request->list); + phy_request->request.dma = DMA_ADDR_INVALID; + phy_request->epnum = phy_ep->ep_num; + phy_request->ep = phy_ep; + phy_request->config = phy_ep->config; + + config->gadget_obj->gadget_reqAlloc(config->gadget_priv, gadget_ep, + &phy_request->gadget_request); + + return &phy_request->request; +} + +static void gadget_ep_free_request(struct usb_ep *ls_ep, struct usb_request *ls_req) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct phytium_request *phy_request; + + if (!ls_ep || !ls_req) + return; + + pr_info("%s %d\n", __func__, __LINE__); + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + config = phy_request->config; + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + + config->gadget_obj->gadget_reqFree(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); + kfree(phy_request); +} + +static int gadget_ep_enqueue(struct usb_ep *ls_ep, struct usb_request *ls_req, gfp_t gfp_flags) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct phytium_request *phy_request; + unsigned long flags; + int status = 0; + + if (!ls_ep || !ls_req) + return -EINVAL; + + if (!ls_req->buf) + return -ENODATA; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + phy_request->config = config; + + if (phy_request->ep != phy_ep) + return -EINVAL; + + phy_request->request.actual = 0; + phy_request->request.status = -EINPROGRESS; + phy_request->epnum = phy_ep->ep_num; + phy_request->is_tx = phy_ep->is_tx; + + phy_request->gadget_request->length = ls_req->length; + phy_request->gadget_request->status = 0; + phy_request->gadget_request->complete = gadget_callback_complete; + phy_request->gadget_request->buf = ls_req->buf; + phy_request->gadget_request->context = ls_req; + + status = usb_gadget_map_request(&config->gadget, &phy_request->request, phy_request->is_tx); + + if (!phy_ep->desc) { + pr_debug("req %p queued to %s while ep %s\n", phy_request, ls_ep->name, "disabled"); + status = -ESHUTDOWN; + usb_gadget_unmap_request(&config->gadget, &phy_request->request, + phy_request->is_tx); + return status; + } + + spin_lock_irqsave(&config->lock, flags); + + phy_request->gadget_request->dma = phy_request->request.dma; + list_add_tail(&phy_request->list, &phy_ep->req_list); + + pr_debug("queue to %s (%s), length = %d\n", phy_ep->name, + phy_ep->is_tx ? "IN/TX" : "OUT/RX", phy_request->request.length); + + status = config->gadget_obj->gadget_reqQueue(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); + + spin_unlock_irqrestore(&config->lock, flags); + + return status; +} + +static int gadget_ep_dequeue(struct usb_ep *ls_ep, struct usb_request *ls_req) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + unsigned long flags; + int status = 0; + struct phytium_request *phy_request; + struct phytium_request *phy_next_request; + + if (!ls_ep || !ls_req) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + + if (phy_request->ep != phy_ep) + return -EINVAL; + + spin_lock_irqsave(&config->lock, flags); + + list_for_each_entry(phy_next_request, &phy_ep->req_list, list) { + if (phy_next_request == phy_request) + break; + } + + if (phy_next_request != phy_request) { + pr_info("request %p not queued to %s\n", phy_request, ls_ep->name); + status = -EINVAL; + goto done; + } + + status = config->gadget_obj->gadget_reqDequeue(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); +done: + spin_unlock_irqrestore(&config->lock, flags); + return status; +} + +static int gadget_ep_set_halt(struct usb_ep *ls_ep, int value) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct GADGET_EP *gadget_ep = NULL; + unsigned long flags; + int status = 0; + + if (!ls_ep) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + gadget_ep = phy_ep->gadget_ep; + + spin_lock_irqsave(&config->lock, flags); + + status = config->gadget_obj->gadget_epSetHalt(config->gadget_priv, + phy_ep->gadget_ep, value); + if (status > 0) { + spin_unlock_irqrestore(&config->lock, flags); + return -status; + } + + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static const struct usb_ep_ops gadget_ep_ops = { + .enable = gadget_ep_enable, + .disable = gadget_ep_disable, + .alloc_request = gadget_ep_alloc_request, + .free_request = gadget_ep_free_request, + .queue = gadget_ep_enqueue, + .dequeue = gadget_ep_dequeue, + .set_halt = gadget_ep_set_halt, +}; + +static int gadget_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) +{ + return -EINVAL; +} + +static int gadget_ep0_disable(struct usb_ep *ep) +{ + return -EINVAL; +} + +static int gadget_ep0_enqueue(struct usb_ep *ls_ep, struct usb_request *ls_req, gfp_t gfp_flags) +{ + struct phytium_ep *phy_ep; + struct phytium_request *phy_request; + struct phytium_cusb *config; + int status = 0; + unsigned long flags; + + if (!ls_ep || !ls_req) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + + spin_lock_irqsave(&config->lock, flags); + + if (!list_empty(&phy_ep->req_list)) { + status = -EBUSY; + goto cleanup; + } + + phy_request->config = config; + phy_request->request.actual = 0; + phy_request->gadget_request->actual = 0; + phy_request->is_tx = config->ep0_data_stage_is_tx; + phy_request->gadget_request->length = phy_request->request.length; + phy_request->gadget_request->status = 0; + phy_request->gadget_request->complete = gadget_callback_complete; + phy_request->gadget_request->buf = phy_request->request.buf; + phy_request->gadget_request->context = phy_request; + + status = usb_gadget_map_request(&config->gadget, &phy_request->request, phy_request->is_tx); + if (status) { + pr_info("failed to map request\n"); + status = -EINVAL; + goto cleanup; + } + + phy_request->gadget_request->dma = phy_request->request.dma; + list_add_tail(&phy_request->list, &phy_ep->req_list); + + pr_debug("queue to %s (%s), length = %d\n", phy_ep->name, + phy_ep->is_tx ? "IN/TX" : "OUT/RX", phy_request->request.length); + + status = config->gadget_obj->gadget_reqQueue(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); + if (status > 0) { + status = -status; + usb_gadget_unmap_request(&config->gadget, &phy_request->request, + phy_request->is_tx); + list_del(&phy_request->list); + goto cleanup; + } + +cleanup: + spin_unlock_irqrestore(&config->lock, flags); + return status; +} + +static int gadget_ep0_dequeue(struct usb_ep *ep, struct usb_request *ls_request) +{ + return -EOPNOTSUPP; +} + +static int gadget_ep0_set_halt(struct usb_ep *ep, int value) +{ + return -EINVAL; +} + + +static const struct usb_ep_ops gadget_ep0_ops = { + .enable = gadget_ep0_enable, + .disable = gadget_ep0_disable, + .alloc_request = gadget_ep_alloc_request, + .free_request = gadget_ep_free_request, + .queue = gadget_ep0_enqueue, + .dequeue = gadget_ep0_dequeue, + .set_halt = gadget_ep0_set_halt, +}; + +static int32_t gadgetWaitForBusyBit(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + struct GadgetEp *gadgetEp; + uint8_t epNum; + uint8_t txcs = CS_BUSY; + uint8_t buf = 0; + uint8_t bufflag = 0; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + epNum = gadgetEp->hwEpNum; + + if (gadgetEp->isInEp || gadgetEp->hwEpNum == 0) + return 0; + + buf = phytium_read8(&priv->regs->ep[epNum - 1].txcon) & CON_BUF; + + while ((txcs & CS_BUSY) || (bufflag == 0)) { + txcs = phytium_read8(&priv->regs->ep[epNum - 1].txcs); + + if (((txcs & CS_NPAK) >> CS_NPAK_OFFSET) == buf || buf == 0) + bufflag = 1; + else + bufflag = 0; + } + + return 0; +} + +static inline void gadgetEpXDataReceive(struct GADGET_CTRL *priv, + struct GadgetRequest *gadgetRequest) +{ + struct GadgetEp *gadgetEp; + struct GADGET_REQ *gadgetReq; + uint8_t epType; + uint32_t requestSize, channelStatus, chMaxLen; + + if (!priv || !gadgetRequest) + return; + + gadgetEp = gadgetRequest->ep; + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, gadgetEp->channel); + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + channelStatus = priv->dmaDrv->dma_getChannelStatus(priv->dmaController, gadgetEp->channel); + + gadgetReq = &gadgetRequest->request; + if (gadgetReq->actual < gadgetReq->length || gadgetRequest->zlp) { + gadgetRequest->zlp = 0; + if ((gadgetReq->length - gadgetReq->actual) < chMaxLen) + requestSize = gadgetReq->length - gadgetReq->actual; + else + requestSize = chMaxLen; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, gadgetEp->channel, + gadgetEp->gadgetEp.maxPacket, + gadgetReq->dma + gadgetReq->actual, requestSize, NULL, 0); + } +} + +static inline void gadgetEpXDataSend(struct GADGET_CTRL *priv, struct GadgetRequest *gadgetRequest) +{ + struct GadgetEp *gadgetEp; + struct GADGET_REQ *gadgetReq; + uint8_t epType; + uint32_t requestSize, channelStatus, chMaxLen; + + if (!priv || !gadgetRequest) + return; + + gadgetEp = gadgetRequest->ep; + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, gadgetEp->channel); + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + channelStatus = priv->dmaDrv->dma_getChannelStatus(priv->dmaController, gadgetEp->channel); + + gadgetReq = &gadgetRequest->request; + if ((gadgetReq->length - gadgetReq->actual) < chMaxLen) + requestSize = gadgetReq->length - gadgetReq->actual; + else + requestSize = chMaxLen; + pr_debug("Transmit/IN %s gadgetReq %p gadgetRequest:%p requestSize:0x%x packetSize:0x%x\n", + gadgetEp->gadgetEp.name, gadgetReq, gadgetRequest, + requestSize, gadgetEp->gadgetEp.maxPacket); + + gadgetRequest->zlp = 0; + priv->dmaDrv->dma_channelProgram(priv->dmaController, gadgetEp->channel, + gadgetEp->gadgetEp.maxPacket, + gadgetReq->dma + gadgetReq->actual, requestSize, NULL, 0); +} + +static int32_t gadgetEpXSetHalt(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + uint8_t value) +{ + struct GadgetEp *gadgetEp; + uint8_t epType; + struct GADGET_REQ *req = NULL; + struct GadgetRequest *gadgetRequest = NULL; + uint8_t epNum, txcon, rxcon; + uint32_t status = DMA_STATUS_ARMED; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + if (epType == USB_ENDPOINT_XFER_ISOC) + return -EINVAL; + + pr_debug("%s: %s stall\n", gadget_Ep->name, value ? "set" : "clear"); + req = gadgetGetNextReq(gadgetEp); + + if (!value) + gadgetEp->wedged = 0; + + if (value && gadgetEp->isInEp && req && gadgetEp->state == GADGET_EP_BUSY) { + while (status == DMA_STATUS_ARMED) + status = priv->dmaDrv->dma_getChannelStatus(priv->dmaController, + gadgetEp->channel); + + gadgetWaitForBusyBit(priv, gadget_Ep); + } + + epNum = gadgetEp->hwEpNum; + + if (gadgetEp->isInEp) { + txcon = phytium_read8(&priv->regs->ep[epNum - 1].txcon); + if (value) { + phytium_write8(&priv->regs->ep[epNum - 1].txcon, txcon | CON_STALL); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, + ENDPRST_IO_TX | epNum | ENDPRST_FIFORST); + + } else { + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, + ENDPRST_IO_TX | epNum | ENDPRST_TOGRST); + phytium_write8(&priv->regs->ep[epNum - 1].txcon, txcon & (~CON_STALL)); + } + } else { + rxcon = phytium_read8(&priv->regs->ep[epNum - 1].rxcon); + if (value) { + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, rxcon | CON_STALL); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, epNum | ENDPRST_FIFORST); + } else { + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, + epNum | ENDPRST_TOGRST | ENDPRST_FIFORST); + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, rxcon & (~CON_STALL)); + } + } + + if (gadgetEp->state != GADGET_EP_BUSY && !value && req) { + gadgetRequest = requestToGadgetRequest(req); + if (gadgetEp->isInEp) + gadgetEpXDataSend(priv, gadgetRequest); + else + gadgetEpXDataReceive(priv, gadgetRequest); + } + + return 0; +} + +static int32_t gadgetEp0SetHalt(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + uint8_t value) +{ + struct GadgetEp *gadgetEp; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + if (!list_empty(&gadgetEp->request)) + return -EBUSY; + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_IN: + case GADGET_EP0_STAGE_OUT: + case GADGET_EP0_STAGE_ACK: + case GADGET_EP0_STAGE_STATUSIN: + case GADGET_EP0_STAGE_STATUSOUT: + priv->ep0State = GADGET_EP0_STAGE_SETUP; + break; + default: + return -EINVAL; + } + + return 0; +} + + +static void gadgetEp0Callback(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req, uint8_t status) +{ + if (!priv || !gadgetEp || !req) + return; + + priv->ep0State = GADGET_EP0_STAGE_SETUP; + list_del(&req->list); + gadgetEp->requestsInList--; + + if (req->status == EINPROGRESS) + req->status = status; + + if (req->complete) + req->complete(&gadgetEp->gadgetEp, req); +} + +static enum usb_device_speed gadgetGetActualSpeed(struct GADGET_CTRL *priv) +{ + uint8_t speedctrl; + + if (!priv) + return USB_SPEED_UNKNOWN; + + speedctrl = phytium_read8(&priv->regs->speedctrl) & (~SPEEDCTRL_HSDISABLE); + switch (speedctrl) { + case SPEEDCTRL_HS: + return USB_SPEED_HIGH; + case SPEEDCTRL_FS: + return USB_SPEED_FULL; + case SPEEDCTRL_LS: + return USB_SPEED_LOW; + default: + return USB_SPEED_UNKNOWN; + } +} + +static int32_t gadgetServiceSetFeatureReq(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + uint8_t epNum, isIn; + struct GadgetEp *gadgetEp; + + if (!priv || !setup) + return 0; + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + switch (setup->wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + pr_info("set feature - remote wakup\n"); + priv->isRemoteWakeup = 1; + break; + case USB_DEVICE_B_HNP_ENABLE: + pr_info("set feature - B HNP Enable\n"); + pr_info("otg not implement\n"); + return -EINVAL; + case USB_DEVICE_A_HNP_SUPPORT: + pr_info("set feature - A HNP support\n"); + pr_info("otg not implete\n"); + return -EINVAL; + } + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epNum = setup->wIndex & 0x0f; + isIn = setup->wIndex & USB_DIR_IN; + if (epNum == 0 || epNum > 15 || setup->wValue != 0) + return -EINVAL; + + gadgetEp = isIn ? &priv->in[epNum] : &priv->out[epNum]; + + gadgetEpXSetHalt(priv, &gadgetEp->gadgetEp, 1); + break; + default: + return -EINVAL; + } + return 0; +} + +static int32_t gadgetServiceClearFeatureReq(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + uint8_t epNum, isIn; + struct GadgetEp *gadgetEp; + + if (!priv || !setup) + return -EINVAL; + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + if (setup->wValue == USB_DEVICE_B_HNP_ENABLE) { + pr_err("otg not implement\n"); + return -EINVAL; + } + + if (setup->wValue != USB_DEVICE_REMOTE_WAKEUP) + return GADGET_EUNHANDLED; + + priv->isRemoteWakeup = 0; + return GADGET_EAUTOACK; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + pr_info("clear feature wIndex:0x%x wValue:0x%x\n", setup->wIndex, setup->wValue); + epNum = setup->wIndex & 0x7f; + isIn = setup->wIndex & USB_DIR_IN; + if (epNum == 0 || epNum > 15 || setup->wValue != 0) + return GADGET_EUNHANDLED; + + gadgetEp = isIn ? &priv->in[epNum] : &priv->out[epNum]; + if (!gadgetEp->gadgetEp.desc) + return -EINVAL; + + if (gadgetEp->wedged) + break; + gadgetEpXSetHalt(priv, &gadgetEp->gadgetEp, 0); + break; + default: + return GADGET_EUNHANDLED; + } + + return 0; +} + +static int32_t gadgetServiceSetupReq(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + struct GadgetEp *gadgetEp; + uint8_t isIn, epNum; + uint8_t rxcon, txcon; + int len; + + if (!priv || !setup) + return -EINVAL; + + if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + if (setup->bRequest != USB_REQ_GET_STATUS) + return GADGET_EUNHANDLED; + } else + return GADGET_EUNHANDLED; + + priv->privBuffAddr[1] = 0; + priv->privBuffAddr[0] = 0; + + switch (setup->bRequest & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + pr_info("wIndex:0x%x isSelfPowered:%d isRomoteWakeup:%d\n", + setup->wIndex, priv->isSelfPowered, priv->isRemoteWakeup); + + if (setup->wIndex == OTG_STS_SELECTOR) + priv->privBuffAddr[0] = priv->hostRequestFlag; + else { + priv->privBuffAddr[0] = priv->isSelfPowered ? USB_DEVICE_SELF_POWERED : 0; + priv->privBuffAddr[1] = priv->isRemoteWakeup ? USB_DEVICE_REMOTE_WAKEUP : 0; + } + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epNum = setup->wIndex & 0x0f; + if (!epNum) + break; + + isIn = setup->wIndex & USB_DIR_IN; + + gadgetEp = isIn ? &priv->in[epNum] : &priv->out[epNum]; + if (!gadgetEp->gadgetEp.desc) + return -EINVAL; + + if (isIn) { + txcon = phytium_read8(&priv->regs->ep[epNum - 1].txcon); + priv->privBuffAddr[0] = (txcon & CON_STALL) ? 1 : 0; + } else { + rxcon = phytium_read8(&priv->regs->ep[epNum - 1].rxcon); + priv->privBuffAddr[0] = (rxcon & CON_STALL) ? 1 : 0; + } + break; + default: + return GADGET_EUNHANDLED; + } + + len = setup->wLength; + if (len > 2) + len = 2; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, priv->in[0].channel, + priv->in[0].gadgetEp.maxPacket, priv->privBuffDma, len, NULL, 0); + + return 0; +} + +static int32_t gadgetGetSetup(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + int i; + uint8_t ep0cs; + struct GADGET_REQ *request = NULL; + struct GadgetRequest *gadgetRequest; + + if (!priv || !setup) + return -EINVAL; + + phytium_write8(&priv->regs->ep0cs, EP0CS_CHGSET); + + for (i = 0; i < 8; i++) + ((char *)setup)[i] = phytium_read8(&priv->regs->setupdat[i]); + + ep0cs = phytium_read8(&priv->regs->ep0cs); + if (ep0cs & EP0CS_CHGSET) { + pr_info("setup flags change: not error\n"); + return GADGET_EAUTOACK; + } + + phytium_write8(&priv->regs->usbirq, USBIR_SUDAV); + pr_debug("setup packet: req%02x.%02x v:%04x i:%04x I%d\n", setup->bRequestType, + setup->bRequest, setup->wValue, setup->wIndex, setup->wLength); + + request = gadgetGetNextEp0Req(priv); + if (request) { + gadgetRequest = requestToGadgetRequest(request); + pr_info("Previous request has not been finished but new was received\n"); + gadgetEp0Callback(priv, gadgetRequest->ep, request, 0); + } + + if (setup->wLength) { + if (setup->bRequestType & USB_DIR_IN) + priv->ep0State = GADGET_EP0_STAGE_IN; + else + priv->ep0State = GADGET_EP0_STAGE_OUT; + } else + priv->ep0State = GADGET_EP0_STAGE_ACK; + + return 0; +} + +static void gadgetEp0StageSetup(struct GADGET_CTRL *priv) +{ + struct usb_ctrlrequest setup; + int32_t retval; + uint8_t ep0cs; + + if (!priv) + return; + + retval = gadgetGetSetup(priv, &setup); + + priv->gadgetDev.speed = gadgetGetActualSpeed(priv); + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_ACK: + if ((setup.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) + retval = GADGET_EUNHANDLED; + else { + switch (setup.bRequest) { + case USB_REQ_SET_ADDRESS: + priv->deviceAddress = setup.wValue & 0x7F; + priv->gadgetDev.state = USB_STATE_ADDRESS; + pr_info("set address: %d\n", priv->deviceAddress); + retval = GADGET_EAUTOACK; + break; + case USB_REQ_SET_FEATURE: + retval = gadgetServiceSetFeatureReq(priv, &setup); + break; + case USB_REQ_CLEAR_FEATURE: + retval = gadgetServiceClearFeatureReq(priv, &setup); + break; + default: + retval = GADGET_EUNHANDLED; + break; + } + } + + if (retval == GADGET_EUNHANDLED) + break; + else if (retval == 0) + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + + priv->ep0State = GADGET_EP0_STAGE_SETUP; + break; + case GADGET_EP0_STAGE_IN: + pr_debug("setup data stage in\n"); + retval = gadgetServiceSetupReq(priv, &setup); + + if (retval == 0) + priv->ep0State = GADGET_EP0_STAGE_STATUSOUT; + break; + case GADGET_EP0_STAGE_OUT: + pr_debug("setup data stage out\n"); + phytium_write8(&priv->regs->ep0Rxbc, 0); + retval = GADGET_EUNHANDLED; + break; + default: + if (retval == GADGET_EAUTOACK) + return; + + pr_debug("forward request\n"); + retval = GADGET_EUNHANDLED; + break; + } + + if (retval == GADGET_EUNHANDLED) { + if (priv->eventCallback.setup) + retval = priv->eventCallback.setup(priv, &setup); + + if (retval == 0x7FFF) { + pr_debug("Respond Delayed not finished yet\n"); + return; + } + + if (retval) + retval = GADGET_ESTALL; + } + + if (retval == GADGET_EUNHANDLED || retval == GADGET_ESTALL) { + pr_debug("request not handled - send stall\n"); + ep0cs = phytium_read8(&priv->regs->ep0cs); + ep0cs |= EP0CS_STALL; + phytium_write8(&priv->regs->ep0cs, ep0cs); + priv->ep0State = GADGET_EP0_STAGE_SETUP; + } else if (priv->ep0State == GADGET_EP0_STAGE_ACK) { + priv->ep0State = GADGET_EP0_STAGE_SETUP; + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + pr_debug("setup transfer completed\n"); + } +} +static void gadgetEp0DataSend(struct GADGET_CTRL *priv) +{ + struct GADGET_REQ *request; + uint32_t chMaxLen, requestSize; + + if (!priv) + return; + + request = gadgetGetNextEp0Req(priv); + if (!request) { + pr_debug("Ep0 queue is empty\n"); + return; + } + + if (priv->dmaDrv->dma_getChannelStatus(priv->dmaController, priv->in[0].channel) + >= DMA_STATUS_BUSY) { + pr_err("transfer is pending now\n"); + return; + } + + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, priv->in[0].channel); + requestSize = (request->length < chMaxLen) ? request->length : chMaxLen; + pr_debug("usbRequest;%p requestSize:%d packetSize:%d\n", request, requestSize, + priv->in[0].gadgetEp.maxPacket); + priv->ep0State = GADGET_EP0_STAGE_STATUSOUT; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, priv->in[0].channel, + priv->in[0].gadgetEp.maxPacket, request->dma, requestSize, NULL, 0); +} + +static void gadgetEp0DataReceive(struct GADGET_CTRL *priv) +{ + uint32_t chMaxLen, requestSize; + struct GADGET_REQ *request; + + if (!priv) + return; + + request = gadgetGetNextEp0Req(priv); + if (!request) { + pr_debug("Ep0 queue is empty\n"); + return; + } + + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, priv->out[0].channel); + requestSize = (request->length < chMaxLen) ? request->length : chMaxLen; + pr_debug("usbRequest;%p requestSize:%d packetSize:%d\n", request, requestSize, + priv->out[0].gadgetEp.maxPacket); + priv->ep0State = GADGET_EP0_STAGE_STATUSIN; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, priv->out[0].channel, + priv->out[0].gadgetEp.maxPacket, request->dma, requestSize, NULL, 0); +} + +static uint32_t gadgetEp0Irq(struct GADGET_CTRL *priv) +{ + uint8_t usbcs; + struct GADGET_REQ *request; + + if (!priv) + return 0; + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_IN://send data + pr_debug("DATA Stage IN\n"); + gadgetEp0DataSend(priv); + break; + case GADGET_EP0_STAGE_OUT://receive data + pr_debug("DATA Stage OUT\n"); + gadgetEp0DataReceive(priv); + break; + case GADGET_EP0_STAGE_STATUSIN: + pr_debug("DATA Stage STATUS IN\n"); + request = gadgetGetNextEp0Req(priv); + if (request) + request->actual = priv->dmaDrv->dma_getActualLength(priv->dmaController, + priv->out[0].channel); + + priv->ep0State = GADGET_EP0_STAGE_SETUP; + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + gadgetEp0Callback(priv, &priv->out[0], request, 0); + break; + case GADGET_EP0_STAGE_STATUSOUT: + pr_debug("DATA Stage STATUS OUT\n"); + request = gadgetGetNextEp0Req(priv); + if (request) + request->actual = priv->dmaDrv->dma_getActualLength(priv->dmaController, + priv->in[0].channel); + + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + if (request) + gadgetEp0Callback(priv, &priv->in[0], request, 0); + else + priv->ep0State = GADGET_EP0_STAGE_SETUP; + break; + case GADGET_EP0_STAGE_SETUP: + pr_debug("DATA Stage SETUP\n"); + gadgetEp0StageSetup(priv); + break; + case GADGET_EP0_STAGE_ACK: + pr_debug("DATA Stage ACK\n"); + break; + default: + pr_debug("DATA Stage UNKNOWN\n"); + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs |= EP0CS_STALL; + phytium_write8(&priv->regs->usbcs, usbcs); + break; + } + + return 0; +} + +static void gadgetEpXCallback(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req, uint32_t status) +{ + if (!gadgetEp || !req) + return; + + list_del(&req->list); + gadgetEp->requestsInList--; + req->status = status; + + if (req->complete) + req->complete(&gadgetEp->gadgetEp, req); +} + +static void gadgetEpXDataCallback(struct GADGET_CTRL *priv, uint8_t epNum, uint8_t epDir) +{ + struct GadgetEp *gadgetEp; + struct GADGET_REQ *gadgetReq; + uint32_t actual_length = 0; + uint32_t chMaxLen = 0; + uint8_t epType; + + if (!priv) + return; + + pr_debug("%s %d epNum:%d epDir:%d\n", __func__, __LINE__, epNum, epDir); + gadgetEp = epDir ? &priv->in[epNum] : &priv->out[epNum]; + + gadgetReq = gadgetGetNextReq(gadgetEp); + if (!gadgetReq) { + pr_debug("%s queue is empty\n", gadgetEp->gadgetEp.name); + return; + } + + if (gadgetEp->channel) { + actual_length = priv->dmaDrv->dma_getActualLength(priv->dmaController, + gadgetEp->channel); + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, gadgetEp->channel); + gadgetReq->actual += actual_length; + } + + if (gadgetReq->actual == gadgetReq->length || actual_length < chMaxLen) { + gadgetEpXCallback(priv, gadgetEp, gadgetReq, 0); + + if (gadgetEp->gadgetEp.desc) { + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + if (epType == USB_ENDPOINT_XFER_ISOC) + return; + + gadgetReq = gadgetGetNextReq(gadgetEp); + gadgetEp->state = GADGET_EP_ALLOCATED; + if (!gadgetReq) { + pr_debug("%s queue is empty\n", gadgetEp->gadgetEp.name); + return; + } + gadgetEp->state = GADGET_EP_BUSY; + if (epDir) + gadgetEpXDataSend(priv, requestToGadgetRequest(gadgetReq)); + else + gadgetEpXDataReceive(priv, requestToGadgetRequest(gadgetReq)); + } + } else { + if (epDir) + gadgetEpXDataSend(priv, requestToGadgetRequest(gadgetReq)); + else + gadgetEpXDataReceive(priv, requestToGadgetRequest(gadgetReq)); + } +} + +static void gadget_CallbackTransfer(void *priv, uint8_t epNum, uint8_t epDir, bool resubmit) +{ + if (!epNum) + gadgetEp0Irq(priv); + else + gadgetEpXDataCallback(priv, epNum, epDir); +} + +static void gadgetAbortEndpoint(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + struct GADGET_REQ *gadgetReq; + + pr_debug("Abort Device endpoint: %s, dma channel: %p\n", gadgetEp->gadgetEp.name, + gadgetEp->channel); + + if (gadgetEp->channel && gadgetEp->hwEpNum != 0) { + priv->dmaDrv->dma_channelRelease(priv->dmaController, gadgetEp->channel); + gadgetEp->channel = NULL; + } + + if (gadgetEp->channel && gadgetEp->hwEpNum == 0) { + if (priv->releaseEp0Flag == 1) { + priv->dmaDrv->dma_channelAbort(priv->dmaController, gadgetEp->channel); + priv->dmaDrv->dma_channelAlloc(priv->dmaController, gadgetEp->isInEp, + gadgetEp->hwEpNum, 0); + } + } + + while (gadgetEp->request.next != &gadgetEp->request) { + gadgetReq = listToGadgetRequest(gadgetEp->request.next); + pr_debug("shutdown request %p form epName:%s\n", gadgetReq, + gadgetEp->gadgetEp.name); + if (!gadgetEp->gadgetEp.address) + gadgetEp0Callback(priv, gadgetEp, gadgetReq, GADGET_ESHUTDOWN); + else + gadgetEpXCallback(priv, gadgetEp, gadgetReq, GADGET_ESHUTDOWN); + } + + gadgetEp->state = GADGET_EP_FREE; +} + +static void gadgetStopActivity(struct GADGET_CTRL *priv) +{ + int i = 0; + struct GadgetEp *gadgetEp; + + pr_debug("USB Stop Activity\n"); + + if (!priv) + return; + + for (i = 0; i < 16; i++) { + gadgetEp = &priv->in[i]; + if (gadgetEp->state != GADGET_EP_NOT_IMPLEMENTED) + gadgetAbortEndpoint(priv, gadgetEp); + + gadgetEp = &priv->out[i]; + if (gadgetEp->state != GADGET_EP_NOT_IMPLEMENTED) + gadgetAbortEndpoint(priv, gadgetEp); + } +} + + +static int32_t gadgetEp0Enable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + uint8_t txien, rxien; + + if (!priv || !gadgetEp) + return -EINVAL; + + if (gadgetEp->state != GADGET_EP_FREE) + return -EBUSY; + + if (!gadgetEp->isInEp) { + rxien = phytium_read16(&priv->regs->rxien); + rxien &= ~(1 << gadgetEp->hwEpNum); + phytium_write16(&priv->regs->rxien, rxien); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_IO_TX | FIFOCTRL_FIFOAUTO); + } else { + txien = phytium_read16(&priv->regs->txien); + txien &= ~(1 << gadgetEp->hwEpNum); + phytium_write16(&priv->regs->txien, txien); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO); + } + + gadgetEp->gadgetEp.desc = NULL; + gadgetEp->state = GADGET_EP_ALLOCATED; + gadgetEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + gadgetEp->isInEp, gadgetEp->hwEpNum, 0); + phytium_write8(&priv->regs->ep0maxpack, 0x40); + + return 0; +} + +static int32_t gadgetEpXEnable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + const struct usb_endpoint_descriptor *desc) +{ + uint32_t status = -EINVAL; + uint32_t payload; + uint16_t type, iso = 0; + uint8_t epNum = 0; + + if (!priv || !gadgetEp || !desc) + return -EINVAL; + + pr_debug("enable endpoint %s\n", gadgetEp->gadgetEp.name); + if (gadgetEp->state != GADGET_EP_FREE) { + status = -EBUSY; + goto fail; + } + + payload = desc->wMaxPacketSize & 0x7ff; + if (!payload) { + status = -EINVAL; + goto fail; + } + + epNum = gadgetEp->hwEpNum; + + type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + switch (type) { + case USB_ENDPOINT_XFER_ISOC: + type = CON_TYPE_ISOC; + switch (payload >> 11) { + case 0: + iso = CON_TYPE_ISOC_1_ISOD; + break; + case 1: + payload *= 2; + iso = CON_TYPE_ISOC_2_ISOD; + break; + case 2: + payload *= 3; + iso = CON_TYPE_ISOC_3_ISOD; + break; + } + break; + case USB_ENDPOINT_XFER_INT: + type = CON_TYPE_INT; + break; + case USB_ENDPOINT_XFER_BULK: + type = CON_TYPE_BULK; + break; + } + + if (desc->bEndpointAddress & USB_DIR_IN) { + if (!gadgetEp->isInEp) { + status = -ENODEV; + goto fail; + } + + if (payload > priv->gadgetCfg.epIN[epNum].maxPacketSize) { + status = -EINVAL; + goto fail; + } + + phytium_write16(&priv->regs->txmaxpack[epNum - 1], payload); + phytium_write8(&priv->regs->ep[epNum - 1].txcon, CON_VAL | type + | iso | (priv->gadgetCfg.epIN[epNum - 1].bufferingValue - 1)); + + phytium_write8(&priv->regs->fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum | + ENDPRST_FIFORST | ENDPRST_TOGRST); + } else { + if (gadgetEp->isInEp) { + status = -ENODEV; + goto fail; + } + + if (payload > priv->gadgetCfg.epOUT[epNum].maxPacketSize) { + status = -EINVAL; + goto fail; + } + + phytium_write16(&priv->regs->rxmaxpack[epNum - 1], payload); + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, CON_VAL | type + | iso | (priv->gadgetCfg.epIN[epNum - 1].bufferingValue - 1)); + + phytium_write8(&priv->regs->fifoctrl, FIFOCTRL_FIFOAUTO | epNum); + phytium_write8(&priv->regs->endprst, epNum); + phytium_write8(&priv->regs->endprst, epNum | ENDPRST_FIFORST | ENDPRST_TOGRST); + } + + if (priv->dmaController) + gadgetEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + gadgetEp->isInEp, epNum, (type == CON_TYPE_ISOC) ? 1 : 0); + + if (type == CON_TYPE_ISOC) { + if (gadgetEp->isInEp) { + phytium_write16(&priv->regs->isoautodump, 1 << epNum); + phytium_write16(&priv->regs->isodctrl, 1 << epNum); + } + + priv->dmaDrv->dma_setMaxLength(priv->dmaController, gadgetEp->channel, payload); + } + + gadgetEp->state = GADGET_EP_ALLOCATED; + gadgetEp->gadgetEp.desc = desc; +fail: + return status; +} + +static int32_t gadgetEpEnable(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + const struct usb_endpoint_descriptor *desc) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep || !desc) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + gadgetEp->wedged = 0; + + if (gadgetEp->hwEpNum) + return gadgetEpXEnable(priv, gadgetEp, desc); + else + return gadgetEp0Enable(priv, gadgetEp); +} + +static int32_t gadgetEpXDisable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + uint8_t txcon, rxcon; + + if (!priv || !gadgetEp) + return -EINVAL; + + pr_debug("disable endpoint %s\n", gadgetEp->gadgetEp.name); + if (gadgetEp->isInEp) { + txcon = phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].txcon); + txcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[gadgetEp->hwEpNum - 1].txcon, txcon); + } else { + rxcon = phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].rxcon); + rxcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[gadgetEp->hwEpNum - 1].rxcon, rxcon); + } + gadgetAbortEndpoint(priv, gadgetEp); + gadgetEp->gadgetEp.desc = 0; + gadgetEp->state = GADGET_EP_FREE; + return 0; +} + +static int32_t gadgetEp0Disable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + if (!priv || !gadgetEp) + return -EINVAL; + + pr_debug("disable endpoint %s\n", gadgetEp->gadgetEp.name); + gadgetAbortEndpoint(priv, gadgetEp); + + return 0; +} + +static int32_t gadgetEpDisable(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep) + return -EINVAL; + + if (gadget_Ep->address == 0) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + gadgetEp->wedged = 0; + if (gadgetEp->hwEpNum) + return gadgetEpXDisable(priv, gadgetEp); + else + return gadgetEp0Disable(priv, gadgetEp); +} + +static int32_t gadgetEpXQueue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest; + + if (!priv || !gadgetEp || !req) + return -EINVAL; + + req->actual = 0; + req->status = EINPROGRESS; + + gadgetRequest = requestToGadgetRequest(req); + gadgetRequest->ep = gadgetEp; + + if (req->length == 0) + gadgetRequest->zlp = 1; + + if (gadgetEp->gadgetEp.desc == NULL) { + pr_info("%s is disabled - can not queue request %p\n", + gadgetEp->gadgetEp.name, req); + return -EINVAL; + } + + list_add_tail(&req->list, &gadgetEp->request); + pr_debug("queue to %s (%s), length:%d\n", gadgetEp->gadgetEp.name, + (gadgetEp->isInEp ? "IN/TX" : "OUT/RX"), req->length); + + if ((gadgetEp->state == GADGET_EP_ALLOCATED) && (&req->list == gadgetEp->request.next)) { + if (gadgetEp->isInEp) { + if (!(phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].txcon) + & CON_STALL)) { + gadgetEp->state = GADGET_EP_BUSY; + gadgetEpXDataSend(priv, gadgetRequest); + } + } else { + if (!(phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].rxcon) + & CON_STALL)) { + gadgetEp->state = GADGET_EP_BUSY; + gadgetEpXDataReceive(priv, gadgetRequest); + } + } + } else if (gadgetEp->state == GADGET_EP_BUSY) { + if (usb_endpoint_xfer_isoc(gadgetEp->gadgetEp.desc)) { + if (gadgetEp->isInEp) + gadgetEpXDataSend(priv, gadgetRequest); + else + gadgetEpXDataReceive(priv, gadgetRequest); + } + } + + pr_debug("endpoint %s (%s) now is busy - transfer will be waiting in Queue\n", + gadgetEp->gadgetEp.name, (gadgetEp->isInEp ? "IN/TX" : "OUT/RX")); + + return 0; +} + +static int32_t gadgetEp0Queue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest; + + if (!priv || !gadgetEp || !req) + return -EINVAL; + + req->actual = 0; + req->status = EINPROGRESS; + + if (!list_empty(&gadgetEp->request)) + return -EBUSY; + + gadgetRequest = requestToGadgetRequest(req); + gadgetRequest->ep = gadgetEp; + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_OUT: + case GADGET_EP0_STAGE_IN: + case GADGET_EP0_STAGE_ACK: + break; + default: + return -EINVAL; + } + + list_add_tail(&req->list, &gadgetEp->request); + gadgetEp->requestsInList++; + + pr_debug("queue to %s (%s), length:%d stage:%d\n", gadgetEp->gadgetEp.name, + gadgetEp->isInEp ? "IN/TX" : "OUT/RX", req->length, priv->ep0State); + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_OUT: + gadgetEp0DataReceive(priv); + break; + case GADGET_EP0_STAGE_IN: + gadgetEp0DataSend(priv); + break; + case GADGET_EP0_STAGE_ACK: + if (req->length) + return -EINVAL; + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + gadgetEp0Callback(priv, gadgetRequest->ep, req, 0); + pr_info("control transfer completed\n"); + break; + default: + break; + } + + return 0; +} + +static int32_t gadgetEpQueue(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ *req) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep || !req) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + + if (gadget_Ep->address & GADGET_USB_EP_NUMBER_MASK) + return gadgetEpXQueue(priv, gadgetEp, req); + else + return gadgetEp0Queue(priv, gadgetEp, req); +} + +static int32_t gadgetEpXDequeue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest; + struct GADGET_REQ *iterator; + + if (!priv || !gadgetEp || !req) + return -EINVAL; + + gadgetRequest = requestToGadgetRequest(req); + if (gadgetRequest->ep != gadgetEp) + return -EINVAL; + + pr_debug("Dequeue request %p form %s\n", req, gadgetEp->gadgetEp.name); + + listBrowsingRequest(iterator, &gadgetEp->request, list) { + if (req == iterator) + break; + } + + if (req != iterator) { + pr_info("request %p not queued to %s\n", req, gadgetEp->gadgetEp.name); + return -EINVAL; + } + + if (gadgetEp->state == GADGET_EP_BUSY) { + priv->dmaDrv->dma_channelAbort(priv->dmaController, gadgetEp->channel); + gadgetEp->state = GADGET_EP_ALLOCATED; + } + + gadgetEpXCallback(priv, gadgetEp, req, GADGET_ECONNRESET); + + return 0; +} + +static int32_t gadgetEp0Dequeue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + return 0; +} + +static int32_t gadgetEpDequeue(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ *req) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep || !req) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + + if (gadget_Ep->address & GADGET_USB_EP_NUMBER_MASK) + return gadgetEpXDequeue(priv, gadgetEp, req); + else + return gadgetEp0Dequeue(priv, gadgetEp, req); +} + +static int32_t gadgetEpSetHalt(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, uint8_t value) +{ + if (!priv || !gadget_Ep) + return -EINVAL; + + if (gadget_Ep->address & GADGET_USB_EP_NUMBER_MASK) + return gadgetEpXSetHalt(priv, gadget_Ep, value); + else + return gadgetEp0SetHalt(priv, gadget_Ep, value); +} + +static int32_t gadgetEpSetWedge(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + gadgetEp->wedged = 1; + + return gadgetEpSetHalt(priv, gadget_Ep, 1); +} + +static int32_t gadgetEpFifoStatus(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + if (!priv || !gadget_Ep) + return -EINVAL; + + return 0; +} + +static void gadgetEpFifoFlush(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + if (!priv || !gadget_Ep) + return; +} + +static int32_t gadgetEpAllocRequest(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ **req) +{ + struct GadgetEp *gadgetEp = NULL; + struct GadgetRequest *gadgetRequest = NULL; + + if (!priv || !gadget_Ep || !req) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + if (priv->eventCallback.usbRequestMemAlloc) + gadgetRequest = priv->eventCallback.usbRequestMemAlloc(priv, + sizeof(*gadgetRequest)); + if (!gadgetRequest) + return -ENOMEM; + + memset(gadgetRequest, 0, sizeof(*gadgetRequest)); + *req = &gadgetRequest->request; + INIT_LIST_HEAD(&gadgetRequest->request.list); + gadgetRequest->ep = gadgetEp; + + return 0; +} + +static void gadgetEpFreeRequest(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest = NULL; + + if (!priv || !gadget_Ep || !req) + return; + + gadgetRequest = requestToGadgetRequest(req); + + if (priv->eventCallback.usbRequestMemFree) + priv->eventCallback.usbRequestMemFree(priv, gadgetRequest); +} + +static struct GADGET_EP_OPS gadgetEpOps = { + .epEnable = gadgetEpEnable, + .epDisable = gadgetEpDisable, + .reqQueue = gadgetEpQueue, + .reqDequeue = gadgetEpDequeue, + .epSetHalt = gadgetEpSetHalt, + .epSetWedge = gadgetEpSetWedge, + .epFifoStatus = gadgetEpFifoStatus, + .reqAlloc = gadgetEpAllocRequest, + .reqFree = gadgetEpFreeRequest +}; + +static void gadgetInitDeviceEp(struct GADGET_CTRL *priv, uint8_t isInEp) +{ + uint8_t num; + struct GadgetEp *gadgetEp; + struct GADGET_EP_CFG epCfg; + + if (!priv) + return; + + for (num = 0; num < 16; num++) { + gadgetEp = isInEp ? &priv->in[num] : &priv->out[num]; + if (num) { + epCfg = isInEp ? priv->gadgetCfg.epIN[num] : priv->gadgetCfg.epOUT[num]; + if (!epCfg.bufferingValue) { + gadgetEp->state = GADGET_EP_NOT_IMPLEMENTED; + gadgetEp->hwEpNum = num; + continue; + } + } + INIT_LIST_HEAD(&gadgetEp->gadgetEp.epList); + INIT_LIST_HEAD(&gadgetEp->request); + gadgetEp->state = GADGET_EP_FREE; + gadgetEp->hwEpNum = num; + gadgetEp->isInEp = isInEp; + gadgetEp->requestsInList = 0; + snprintf(gadgetEp->gadgetEp.name, sizeof(gadgetEp->gadgetEp.name), + "Ep%d%s", num, isInEp ? "in" : "out"); + if (!num) { + gadgetEp->gadgetEp.maxPacket = priv->gadgetCfg.epIN[num].maxPacketSize; + if (isInEp) + priv->gadgetDev.ep0 = &gadgetEp->gadgetEp; + gadgetEp->gadgetEp.ops = &gadgetEpOps; + gadgetEp0Enable(priv, gadgetEp); + continue; + } + + if (isInEp) { + if (epCfg.startBuf) + phytium_write16(&priv->regs->txstaddr[num - 1].addr, + epCfg.startBuf); + phytium_write8(&priv->regs->ep[num - 1].txcon, 0); + gadgetEp->gadgetEp.maxPacket = epCfg.maxPacketSize; + } else { + if (epCfg.startBuf) + phytium_write16(&priv->regs->rxstaddr[num - 1].addr, + epCfg.startBuf); + phytium_write8(&priv->regs->ep[num - 1].rxcon, 0); + gadgetEp->gadgetEp.maxPacket = epCfg.maxPacketSize; + } + + gadgetEp->gadgetEp.ops = &gadgetEpOps; + gadgetEp->gadgetEp.address = isInEp ? (0x80 | num) : num; + gadgetEp->gadgetEp.maxburst = 0; + gadgetEp->gadgetEp.mult = 0; + gadgetEp->gadgetEp.maxStreams = 0; + priv->endpointInList++; + list_add_tail(&gadgetEp->gadgetEp.epList, &priv->gadgetDev.epList); + } +} + +static void gadgetInitEndpoint(struct GADGET_CTRL *priv) +{ + if (!priv) + return; + + gadgetInitDeviceEp(priv, 1); + gadgetInitDeviceEp(priv, 0); +} + +static void gadgetSetup(struct GADGET_CTRL *priv) +{ + if (!priv) + return; + + INIT_LIST_HEAD(&priv->gadgetDev.epList); + priv->gadgetDev.state = USB_STATE_NOTATTACHED; + priv->gadgetDev.maxSpeed = USB_SPEED_HIGH; + priv->gadgetDev.speed = USB_SPEED_FULL; + snprintf(priv->gadgetDev.name, sizeof(priv->gadgetDev.name), "Phytium USB SD Driver"); + + gadgetInitEndpoint(priv); + + phytium_write8(&priv->regs->ep0maxpack, 0x40); + phytium_write16(&priv->regs->rxien, 0); + phytium_write16(&priv->regs->txien, 0); + phytium_write16(&priv->regs->rxirq, 0xFFFF); + phytium_write16(&priv->regs->txirq, 0xFFFF); + phytium_write8(&priv->regs->usbirq, 0xEF); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + priv->isReady = 1; +} +static int32_t gadgetInit(struct GADGET_CTRL *priv, struct GADGET_CFG *config, + struct GADGET_CALLBACKS *callbacks, struct device *pdev) +{ + struct DMA_SYSREQ dmaSysReq; + uint8_t usbcs; + + if (!priv || !config || !callbacks) + return -EINVAL; + + priv->dev = pdev; + priv->eventCallback = *callbacks; + priv->gadgetCfg = *config; + priv->regs = (struct HW_REGS *)config->regBase; + priv->phy_regs = (struct VHUB_REGS *)config->phy_regBase; + priv->gadgetDrv = GADGET_GetInstance(); + priv->dmaDrv = DMA_GetInstance(); + priv->dmaController = (void *)(priv + 1); + priv->dmaCfg.dmaModeRx = 0xFFFF; + priv->dmaCfg.dmaModeTx = 0xFFFF; + priv->dmaCfg.regBase = config->regBase + 0x400; + priv->dmaCfg.trbAddr = config->trbAddr; + priv->dmaCfg.trbDmaAddr = config->trbDmaAddr; + + priv->dmaDrv->dma_probe(NULL, &dmaSysReq); + priv->privBuffAddr = (uint8_t *)((uintptr_t)config->trbAddr + dmaSysReq.trbMemSize); + priv->privBuffDma = (uintptr_t)((uintptr_t)config->trbDmaAddr + dmaSysReq.trbMemSize); + priv->dmaCallback.complete = gadget_CallbackTransfer; + priv->dmaDrv->dma_init(priv->dmaController, &priv->dmaCfg, &priv->dmaCallback); + priv->dmaDrv->dma_setParentPriv(priv->dmaController, priv); + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs |= USBCS_DISCON | USBCS_LPMNYET; + phytium_write8(&priv->regs->usbcs, usbcs); + + gadgetSetup(priv); + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= USBCS_DISCON; + phytium_write8(&priv->regs->usbcs, usbcs); + + return 0; +} + +static void gadgetDestroy(struct GADGET_CTRL *priv) +{ + pr_debug("Destroy Device Controller driver\n"); + + if (priv) + return; + gadgetDisconnect(priv); + + gadgetStopActivity(priv); + + phytium_write8(&priv->regs->usbcs, USBCS_DISCON); + + priv->isReady = 0; +} + +static void gadgetStart(struct GADGET_CTRL *priv) +{ + uint8_t usbien, usbcs; + + pr_debug("Usb Device Controller start\n"); + if (!priv) + return; + + usbien = phytium_read8(&priv->regs->usbien); + usbien |= USBIR_URES | USBIR_SUDAV | USBIR_LPMIR; + phytium_write8(&priv->regs->usbien, usbien); + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= ~USBCS_DISCON; + phytium_write8(&priv->regs->usbcs, usbcs); + + priv->dmaDrv->dma_start(priv->dmaController); +} + +static void gadgetReset(struct GADGET_CTRL *priv) +{ + int i = 0; + + pr_debug("Usb Disable Device Activity\n"); + + if (!priv) + return; + + if (priv->gadgetDev.speed != USB_SPEED_UNKNOWN) + gadgetDisconnect(priv); + + priv->gadgetDev.aHnpSupport = 0; + priv->gadgetDev.bHnpEnable = 0; + priv->gadgetDev.state = USB_STATE_DEFAULT; + priv->deviceAddress = 0; + priv->ep0State = GADGET_EP0_STAGE_SETUP; + + gadgetStopActivity(priv); + + for (i = 0; i < 1000; i++) { + priv->gadgetDev.speed = gadgetGetActualSpeed(priv); + if (priv->gadgetDev.speed == USB_SPEED_HIGH) + return; + } +} + +static void gadgetStop(struct GADGET_CTRL *priv) +{ + pr_debug("Usb Device Controller stop\n"); + + if (!priv) + return; + + if (!priv->isReady) + return; + + gadgetReset(priv); + + phytium_write8(&priv->regs->usbien, 0); + priv->dmaDrv->dma_stop(priv->dmaController); + + priv->isReady = 0; +} + +static void gadgetIsr(struct GADGET_CTRL *priv) +{ + uint8_t usbirq, usbien, usbcs; + + if (!priv) + return; + + usbirq = phytium_read8(&priv->regs->usbirq); + usbien = phytium_read8(&priv->regs->usbien); + + pr_debug("usbirq:0x%x usbien:0x%x\n", usbirq, usbien); + + usbirq = usbirq & usbien; + + if (!usbirq) + goto DMA_IRQ; + + if (usbirq & USBIR_LPMIR) { + pr_debug("USBIRQ LPM\n"); + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= ~USBCS_LPMNYET; + phytium_write8(&priv->regs->usbcs, usbcs); + phytium_write8(&priv->regs->usbirq, USBIR_LPMIR); + } + + if (usbirq & USBIR_URES) { + pr_debug("USBIRQ RESET\n"); + phytium_write8(&priv->regs->usbirq, USBIR_URES); + priv->releaseEp0Flag = 1; + gadgetReset(priv); + priv->releaseEp0Flag = 0; + priv->gadgetDev.state = USB_STATE_DEFAULT; + if (priv->eventCallback.connect) + priv->eventCallback.connect(priv); + } + + if (usbirq & USBIR_HSPEED) { + pr_debug("USBIRQ HighSpeed\n"); + phytium_write8(&priv->regs->usbirq, USBIR_HSPEED); + priv->gadgetDev.speed = USB_SPEED_HIGH; + } + + if (usbirq & USBIR_SUDAV) { + pr_debug("USBIRQ SUDAV\n"); + priv->ep0State = GADGET_EP0_STAGE_SETUP; + gadgetEp0Irq(priv); + } + + if (usbirq & USBIR_SOF) { + pr_debug("USBIRQ SOF\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SOF); + } + + if (usbirq & USBIR_SUTOK) { + pr_debug("USBIRQ SUTOK\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SUTOK); + } + + if (usbirq & USBIR_SUSP) { + pr_debug("USBIRQ SUSPEND\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SUSP); + } + + return; +DMA_IRQ: + priv->dmaDrv->dma_isr(priv->dmaController); +} + +static void gadgetGetDevInstance(struct GADGET_CTRL *priv, struct GADGET_DEV **dev) +{ + if (!priv || !dev) + return; + + *dev = &priv->gadgetDev; +} + +static int32_t gadgetGetFrame(struct GADGET_CTRL *priv, uint32_t *numOfFrame) +{ + if (!priv || !numOfFrame) + return -EINVAL; + + *numOfFrame = phytium_read16(&priv->regs->frmnr); + + return 0; +} + +static int32_t gadgetWakeUp(struct GADGET_CTRL *priv) +{ + if (!priv) + return -EINVAL; + + return -EOPNOTSUPP; +} + +static int32_t gadgetSetSelfPowered(struct GADGET_CTRL *priv) +{ + if (!priv) + return -EINVAL; + + priv->isSelfPowered = 1; + + return 0; +} + +static int32_t gadgetClearSelfPowered(struct GADGET_CTRL *priv) +{ + if (!priv) + return -EINVAL; + + priv->isSelfPowered = 0; + + return 0; +} + +static int32_t gadgetVbusSession(struct GADGET_CTRL *priv, uint8_t isActive) +{ + if (!priv) + return -EINVAL; + + return -EOPNOTSUPP; +} + +static int32_t gadgetVbusDraw(struct GADGET_CTRL *priv, uint8_t mA) +{ + if (!priv) + return -EINVAL; + + return -EOPNOTSUPP; +} + +static int32_t gadgetPullUp(struct GADGET_CTRL *priv, uint8_t isOn) +{ + if (!priv) + return -EINVAL; + + return -EOPNOTSUPP; +} + +struct GADGET_OBJ GadgetObj = { + .gadget_init = gadgetInit, + .gadget_destroy = gadgetDestroy, + .gadget_start = gadgetStart, + .gadget_stop = gadgetStop, + .gadget_isr = gadgetIsr, + //endpoint operation + .gadget_epEnable = gadgetEpEnable, + .gadget_epDisable = gadgetEpDisable, + .gadget_epSetHalt = gadgetEpSetHalt, + .gadget_epSetWedge = gadgetEpSetWedge, + .gadget_epFifoStatus = gadgetEpFifoStatus, + .gadget_epFifoFlush = gadgetEpFifoFlush, + .gadget_reqQueue = gadgetEpQueue, + .gadget_reqDequeue = gadgetEpDequeue, + .gadget_reqAlloc = gadgetEpAllocRequest, + .gadget_reqFree = gadgetEpFreeRequest, + + //Device operations + .gadget_getDevInstance = gadgetGetDevInstance, + .gadget_dGetFrame = gadgetGetFrame, + .gadget_dWakeUp = gadgetWakeUp, + .gadget_dSetSelfpowered = gadgetSetSelfPowered, + .gadget_dClearSelfpowered = gadgetClearSelfPowered, + .gadget_dVbusSession = gadgetVbusSession, + .gadget_dVbusDraw = gadgetVbusDraw, + .gadget_dPullUp = gadgetPullUp, +}; + +struct GADGET_OBJ *GADGET_GetInstance(void) +{ + return &GadgetObj; +} + +static int phytium_gadget_set_default_cfg(struct phytium_cusb *config) +{ + int index; + + config->gadget_cfg.regBase = (uintptr_t)config->regs; + config->gadget_cfg.phy_regBase = (uintptr_t)config->phy_regs; + config->gadget_cfg.dmaInterfaceWidth = GADGET_DMA_32_WIDTH; + + for (index = 0; index < 16; index++) { + if (index == 0) { + config->gadget_cfg.epIN[index].bufferingValue = 1; + config->gadget_cfg.epIN[index].maxPacketSize = 64; + config->gadget_cfg.epIN[index].startBuf = 0; + + config->gadget_cfg.epOUT[index].bufferingValue = 1; + config->gadget_cfg.epOUT[index].maxPacketSize = 64; + config->gadget_cfg.epOUT[index].startBuf = 0; + } else { + config->gadget_cfg.epIN[index].bufferingValue = 4; + config->gadget_cfg.epIN[index].maxPacketSize = 1024; + config->gadget_cfg.epIN[index].startBuf = 64 + 4096 * (index - 1); + + config->gadget_cfg.epOUT[index].bufferingValue = 4; + config->gadget_cfg.epOUT[index].maxPacketSize = 1024; + config->gadget_cfg.epOUT[index].startBuf = 64 + 4096 * (index - 1); + } + } + + return 0; +} + +static void gadget_callback_connect(struct GADGET_CTRL *priv) +{ + if (!priv) + return; +} + +static void gadget_callback_disconnect(struct GADGET_CTRL *priv) +{ + if (!priv) + return; +} + +static int32_t gadget_callback_setup(struct GADGET_CTRL *priv, struct usb_ctrlrequest *ctrl) +{ + struct phytium_cusb *config; + int ret = 0; + + if (!priv || !ctrl) + return -EINVAL; + + config = dev_get_drvdata(priv->dev); + if (!config) + return -1; + + if (!config->gadget_driver) + return -EOPNOTSUPP; + + if (ctrl->bRequestType & USB_DIR_IN) + config->ep0_data_stage_is_tx = 1; + else + config->ep0_data_stage_is_tx = 0; + + spin_unlock(&config->lock); + ret = config->gadget_driver->setup(&config->gadget, ctrl); + spin_lock(&config->lock); + + if (ret == 0x7FFF) + return ret; + + if (ret < 0) + return 1; + + return 0; +} + +static void *gadget_callback_usbRequestMemAlloc(struct GADGET_CTRL *priv, u32 size) +{ + struct GADGET_REQ *gadget_req = NULL; + + gadget_req = kzalloc(size, GFP_NOWAIT); + if (!gadget_req) + return NULL; + + return gadget_req; +} + +static void gadget_callback_usbRequestMemFree(struct GADGET_CTRL *priv, void *usbReq) +{ + if (!usbReq) + return; + + kfree(usbReq); +} + +static void init_peripheral_ep(struct phytium_cusb *config, + struct phytium_ep *phy_ep, struct GADGET_EP *gadget_ep, int is_tx) +{ + if (!config || !phy_ep || !gadget_ep) + return; + + memset(phy_ep, 0, sizeof(*phy_ep)); + phy_ep->config = config; + phy_ep->is_tx = is_tx; + phy_ep->gadget_ep = gadget_ep; + phy_ep->ep_num = gadget_ep->address & 0xF; + phy_ep->end_point.maxpacket = gadget_ep->maxPacket; + phy_ep->end_point.maxpacket_limit = 1024; + + INIT_LIST_HEAD(&phy_ep->req_list); + sprintf(phy_ep->name, "ep%d%s", phy_ep->ep_num, is_tx ? "in" : "out"); + + switch (phy_ep->ep_num) { + case 0: + phy_ep->end_point.caps.type_control = 1; + break; + case 1: + phy_ep->end_point.caps.type_bulk = 1; + break; + case 2: + phy_ep->end_point.caps.type_int = 1; + break; + case 3: + phy_ep->end_point.caps.type_iso = 1; + break; + default: + phy_ep->end_point.caps.type_int = 1; + phy_ep->end_point.caps.type_bulk = 1; + break; + } + + if (is_tx) { + phy_ep->end_point.caps.dir_in = 1; + phy_ep->end_point.caps.dir_out = 0; + } else { + phy_ep->end_point.caps.dir_in = 0; + phy_ep->end_point.caps.dir_out = 1; + } + + phy_ep->end_point.name = phy_ep->name; + + INIT_LIST_HEAD(&phy_ep->end_point.ep_list); + + if (!phy_ep->ep_num) { + phy_ep->end_point.ops = &gadget_ep0_ops; + config->gadget.ep0 = &phy_ep->end_point; + + if (config->gadget_dev->maxSpeed > USB_SPEED_HIGH) + config->gadget.ep0->maxpacket = 9; + } else { + phy_ep->end_point.ops = &gadget_ep_ops; + list_add_tail(&phy_ep->end_point.ep_list, &config->gadget.ep_list); + } +} + +static void gadget_init_endpoint(struct phytium_cusb *config) +{ + struct list_head *list; + struct GADGET_EP *gadget_ep; + + if (!config) + return; + + INIT_LIST_HEAD(&(config->gadget.ep_list)); + + init_peripheral_ep(config, &config->endpoints_tx[0], config->gadget_dev->ep0, 1); + init_peripheral_ep(config, &config->endpoints_rx[0], config->gadget_dev->ep0, 0); + + list_for_each(list, &config->gadget_dev->epList) { + gadget_ep = (struct GADGET_EP *)list; + if (gadget_ep->address & USB_DIR_IN) + init_peripheral_ep(config, &config->endpoints_tx[gadget_ep->address & 0xf], + gadget_ep, 1); + else + init_peripheral_ep(config, &config->endpoints_rx[gadget_ep->address & 0xf], + gadget_ep, 0); + } +} + +static int gadget_setup(struct phytium_cusb *config) +{ + int ret = -1; + + config->gadget_obj->gadget_getDevInstance(config->gadget_priv, &config->gadget_dev); + config->gadget.ops = &phytium_gadget_ops; + config->gadget.max_speed = config->gadget_dev->maxSpeed; + config->gadget.speed = USB_SPEED_HIGH; + config->gadget.name = "phytium_gadget"; + config->gadget.is_otg = 0; + + gadget_init_endpoint(config); + + ret = usb_add_gadget_udc(config->dev, &config->gadget); + if (ret) + goto err; + + return 0; + +err: + config->gadget.dev.parent = NULL; + device_unregister(&config->gadget.dev); + return ret; +} + +static int phytium_gadget_reinit(struct phytium_cusb *config) +{ + struct GADGET_CTRL *ctrl; + + if (!config) + return 0; + + ctrl = (struct GADGET_CTRL *)config->gadget_priv; + if (!ctrl) + return 0; + + gadgetStop(ctrl); + + config->gadget_obj->gadget_init(config->gadget_priv, &config->gadget_cfg, + &config->gadget_callbacks, config->dev); + + return 0; +} + +int phytium_gadget_init(struct phytium_cusb *config) +{ + int ret; + + if (!config) + return 0; + + phytium_gadget_set_default_cfg(config); + config->gadget_obj = &GadgetObj; + + config->dma_cfg.regBase = config->gadget_cfg.regBase + 0x400; + config->dma_obj = DMA_GetInstance(); + config->dma_obj->dma_probe(&config->dma_cfg, &config->dma_sysreq); + + config->gadget_sysreq.privDataSize = sizeof(struct GADGET_CTRL); + config->gadget_sysreq.trbMemSize = config->dma_sysreq.trbMemSize + GADGET_PRIV_BUFFER_SIZE; + config->gadget_sysreq.privDataSize += config->dma_sysreq.privDataSize; + + config->gadget_priv = devm_kzalloc(config->dev, + config->gadget_sysreq.privDataSize, GFP_KERNEL); + if (!config->gadget_priv) { + ret = -ENOMEM; + goto err_probe; + } + config->gadget_cfg.trbAddr = dma_alloc_coherent(config->dev, + config->gadget_sysreq.trbMemSize, + (dma_addr_t *)&config->gadget_cfg.trbDmaAddr, GFP_KERNEL); + if (!config->gadget_cfg.trbAddr) { + ret = -ENOMEM; + goto err_dma_coherent; + } + + config->gadget_callbacks.connect = gadget_callback_connect; + config->gadget_callbacks.disconnect = gadget_callback_disconnect; + config->gadget_callbacks.setup = gadget_callback_setup; + config->gadget_callbacks.usbRequestMemAlloc = gadget_callback_usbRequestMemAlloc; + config->gadget_callbacks.usbRequestMemFree = gadget_callback_usbRequestMemFree; + + ret = config->gadget_obj->gadget_init(config->gadget_priv, &config->gadget_cfg, + &config->gadget_callbacks, config->dev); + if (ret) { + ret = -ENODEV; + goto err_init; + } + + //dev_set_drvdata(config->dev, config); + + gadget_setup(config); + + return 0; + +err_init: + dma_free_coherent(config->dev, config->gadget_sysreq.trbMemSize, + config->gadget_cfg.trbAddr, config->gadget_cfg.trbDmaAddr); +err_dma_coherent: +err_probe: + dev_set_drvdata(config->dev, NULL); + + return ret; +} + +int phytium_gadget_uninit(struct phytium_cusb *config) +{ + if (config) + usb_del_gadget_udc(&config->gadget); + + return 0; +} + +#ifdef CONFIG_PM +int phytium_gadget_resume(void *priv) +{ + struct GADGET_CTRL *ctrl; + uint32_t gen_cfg; + unsigned long flags = 0; + struct phytium_cusb *config = (struct phytium_cusb *)priv; + + if (!config) + return 0; + + ctrl = (struct GADGET_CTRL *)config->gadget_priv; + if (!ctrl) + return 0; + + spin_lock_irqsave(&config->lock, flags); + phytium_gadget_reinit(config); + + if (config->gadget_driver) { + config->gadget_obj->gadget_start(config->gadget_priv); + if (ctrl->phy_regs) { + gen_cfg = phytium_read32(&ctrl->phy_regs->gen_cfg); + gen_cfg = gen_cfg & (~BIT(7)); + phytium_write32(&ctrl->phy_regs->gen_cfg, gen_cfg); + } + } + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +int phytium_gadget_suspend(void *priv) +{ + return 0; +} +#endif diff --git a/drivers/usb/phytium/gadget.h b/drivers/usb/phytium/gadget.h new file mode 100644 index 0000000000000..d87b55ade7a70 --- /dev/null +++ b/drivers/usb/phytium/gadget.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_GADGET_H_ +#define __PHYTIUM_GADGET_H_ + +#include +#include +#include "dma.h" + +struct GADGET_CTRL; +struct GADGET_EP; +struct GADGET_REQ; + +enum GADGET_EP_STATE { + GADGET_EP_FREE, + GADGET_EP_ALLOCATED, + GADGET_EP_BUSY, + GADGET_EP_NOT_IMPLEMENTED +}; + +enum GADGET_EP0_STAGE { + GADGET_EP0_STAGE_SETUP, + GADGET_EP0_STAGE_IN, + GADGET_EP0_STAGE_OUT, + GADGET_EP0_STAGE_STATUSIN, + GADGET_EP0_STAGE_STATUSOUT, + GADGET_EP0_STAGE_ACK +}; + +struct GADGET_EP_OPS { + int32_t (*epEnable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + const struct usb_endpoint_descriptor *desc); + + int32_t (*epDisable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*epSetHalt)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, uint8_t value); + + int32_t (*epSetWedge)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*epFifoStatus)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*epFifoFlush)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*reqQueue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*reqDequeue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*reqAlloc)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ **req); + + void (*reqFree)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); +}; + +struct GADGET_EP { + struct list_head epList; + char name[255]; + struct GADGET_EP_OPS *ops; + uint16_t maxPacket; + uint16_t maxStreams; + uint8_t mult; + uint8_t maxburst; + uint8_t address; + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *compDesc; +}; + +enum GADGET_DMAInterfaceWidth { + GADGET_DMA_32_WIDTH = 4, + GADGET_DMA_64_WIDTH = 8, +}; + +struct GADGET_EP_CFG { + uint8_t bufferingValue; + uint16_t startBuf; + uint16_t maxPacketSize; +}; + +struct GADGET_CFG { + uintptr_t regBase; + uintptr_t phy_regBase; + struct GADGET_EP_CFG epIN[16]; + struct GADGET_EP_CFG epOUT[16]; + enum GADGET_DMAInterfaceWidth dmaInterfaceWidth; + void *trbAddr; + uintptr_t trbDmaAddr; +}; + +struct GADGET_SYSREQ { + uint32_t privDataSize; + uint32_t trbMemSize; +}; + +struct GadgetEp { + struct GADGET_EP gadgetEp; + enum GADGET_EP_STATE state; + uint8_t hwEpNum; + uint8_t isInEp; + struct list_head request; + uint8_t iso_flag; + void *channel; + uint32_t requestsInList; + uint8_t wedged; +}; + +struct GADGET_DEV { + struct list_head epList; + struct GADGET_EP *ep0; + unsigned int speed; + unsigned int maxSpeed; + enum usb_device_state state; + uint8_t sgSupported; + uint8_t bHnpEnable; + uint8_t aHnpSupport; + char name[255]; +}; + +struct GADGET_SgList { + uintptr_t link; + uint32_t offset; + uint32_t length; + uintptr_t dmaAddress; +}; + +struct GADGET_REQ { + struct list_head list; + void *buf; + uint32_t length; + uintptr_t dma; + uint32_t numOfSgs; + uint32_t numMappedSgs; + uint16_t streamId; + uint8_t oInterrupt; + uint8_t zero; + uint8_t shortNotOk; + void *context; + uint32_t status; + uint32_t actual; + struct GADGET_SgList *sg; + void (*complete)(struct GADGET_EP *ep, struct GADGET_REQ *req); +}; + + +struct GADGET_CALLBACKS { + void (*disconnect)(struct GADGET_CTRL *priv); + + void (*connect)(struct GADGET_CTRL *priv); + + int32_t (*setup)(struct GADGET_CTRL *priv, + struct usb_ctrlrequest *ctrl); + + void *(*usbRequestMemAlloc)(struct GADGET_CTRL *priv, + uint32_t requiredSize); + + void (*usbRequestMemFree)(struct GADGET_CTRL *priv, void *usbRequest); +}; + +struct GADGET_OBJ { + int32_t (*gadget_init)(struct GADGET_CTRL *priv, struct GADGET_CFG *config, + struct GADGET_CALLBACKS *callbacks, struct device *pdev); + + void (*gadget_destroy)(struct GADGET_CTRL *priv); + + void (*gadget_start)(struct GADGET_CTRL *priv); + + void (*gadget_stop)(struct GADGET_CTRL *priv); + + void (*gadget_isr)(struct GADGET_CTRL *priv); + + int32_t (*gadget_epEnable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + const struct usb_endpoint_descriptor *desc); + + int32_t (*gadget_epDisable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*gadget_epSetHalt)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, uint8_t value); + + int32_t (*gadget_epSetWedge)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*gadget_epFifoStatus)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + void (*gadget_epFifoFlush)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*gadget_reqQueue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*gadget_reqDequeue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*gadget_reqAlloc)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ **req); + + void (*gadget_reqFree)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + void (*gadget_getDevInstance)(struct GADGET_CTRL *priv, struct GADGET_DEV **dev); + + int32_t (*gadget_dGetFrame)(struct GADGET_CTRL *priv, uint32_t *numOfFrame); + + int32_t (*gadget_dWakeUp)(struct GADGET_CTRL *priv); + + int32_t (*gadget_dSetSelfpowered)(struct GADGET_CTRL *priv); + + int32_t (*gadget_dClearSelfpowered)(struct GADGET_CTRL *priv); + + int32_t (*gadget_dVbusSession)(struct GADGET_CTRL *priv, uint8_t isActive); + + int32_t (*gadget_dVbusDraw)(struct GADGET_CTRL *priv, uint8_t mA); + + int32_t (*gadget_dPullUp)(struct GADGET_CTRL *priv, uint8_t isOn); + + void (*gadget_dGetConfigParams)(struct GADGET_CTRL *priv, + struct usb_dcd_config_params *configParams); +}; + +struct GADGET_CTRL { + struct device *dev; + struct GADGET_DEV gadgetDev; + struct HW_REGS *regs; + struct GADGET_OBJ *gadgetDrv; + struct GADGET_CFG gadgetCfg; + struct GADGET_CALLBACKS eventCallback; + struct GadgetEp in[16]; + struct GadgetEp out[16]; + enum GADGET_EP0_STAGE ep0State; + uint8_t isRemoteWakeup; + uint8_t isSelfPowered; + uint8_t deviceAddress; + struct DMA_OBJ *dmaDrv; + void *dmaController; + struct DMA_CFG dmaCfg; + struct DMA_CALLBACKS dmaCallback; + uint8_t releaseEp0Flag; + uint8_t isReady; + uint8_t *privBuffAddr; + uintptr_t privBuffDma; + uint8_t endpointInList; + uint8_t hostRequestFlag; + struct VHUB_REGS *phy_regs; +}; + +struct GadgetRequest { + struct GADGET_REQ request; + struct GadgetEp *ep; + struct GADGET_DEV *dev; + uint8_t zlp; +}; + +struct GADGET_OBJ *GADGET_GetInstance(void); + +#endif /* __LINUX_PHYTIUM_GADGET */ + diff --git a/drivers/usb/phytium/host.c b/drivers/usb/phytium/host.c new file mode 100644 index 0000000000000..e5833af08d841 --- /dev/null +++ b/drivers/usb/phytium/host.c @@ -0,0 +1,2761 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +//#include "list.h" +#include "core.h" +#include "dma.h" +#include "hw-regs.h" + +#define DRV_NAME "phytium_usb" + +#define HOST_GENERIC_EP_CONTROL 0x00 +#define HOST_GENERIC_EP_ISOC 0x01 +#define HOST_GENERIC_EP_BULK 0x02 +#define HOST_GENERIC_EP_INT 0x03 + +#define HOST_ESTALL 1 +#define HOST_EUNHANDLED 2 +#define HOST_EAUTOACK 3 +#define HOST_ESHUTDOWN 4 + +#define HOST_EP_NUM 16 + +static int get_epnum_from_pool(struct HOST_CTRL *priv, int real_epNum, bool dirIn) +{ + int index, dir = 0; + int ret = 0; + + if (!priv) + return 0; + + if (!dirIn) + dir = 1; + + if (real_epNum <= MAX_INSTANCE_EP_NUM) { + if (!priv->ep_remap_pool[dir][real_epNum]) { + priv->ep_remap_pool[dir][real_epNum] = real_epNum; + ret = real_epNum; + goto out; + } + + if (priv->ep_remap_pool[dir][real_epNum] == real_epNum) { + ret = real_epNum; + goto out; + } + } else { + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) { + if (priv->ep_remap_pool[dir][index] == real_epNum) { + ret = index; + goto out; + } + } + } + + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) { + if (!priv->ep_remap_pool[dir][index]) { + priv->ep_remap_pool[dir][index] = real_epNum; + ret = index; + goto out; + } + } + +out: + return ret; +} + +static int release_epnum_from_pool(struct HOST_CTRL *priv, int real_epNum, bool dirIn) +{ + int index = 0; + int dir = 0; + + if (!priv) + return 0; + + if (!dirIn) + dir = 1; + + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) { + if (priv->ep_remap_pool[dir][index] == real_epNum) { + priv->ep_remap_pool[dir][index] = 0; + + return 0; + } + } + + return 0; +} + +static inline struct HOST_REQ *getUsbRequestEntry(struct list_head *list) +{ + return (struct HOST_REQ *)((uintptr_t)list - (uintptr_t)&(((struct HOST_REQ *)0)->list)); +} + +static inline struct HOST_EP_PRIV *getUsbHEpPrivEntry(struct list_head *list) +{ + struct HOST_EP_PRIV *hostEpPriv; + + if (list_empty(list)) + return NULL; + + hostEpPriv = (struct HOST_EP_PRIV *)((uintptr_t)list - + (uintptr_t)&(((struct HOST_EP_PRIV *)0)->node)); + + return hostEpPriv; +} + +static struct HOST_REQ *getNextReq(struct HOST_EP *usbEp) +{ + struct list_head *queue; + + if (!usbEp) + return NULL; + + queue = &usbEp->reqList; + + if (list_empty(queue)) + return NULL; + + return getUsbRequestEntry(queue->next); +} + +static void host_SetVbus(struct HOST_CTRL *priv, uint8_t isOn) +{ + uint8_t otgctrl = phytium_read8(&priv->regs->otgctrl); + + if (isOn) { + if (!(otgctrl & OTGCTRL_BUSREQ) || (otgctrl & OTGCTRL_ABUSDROP)) { + otgctrl &= ~OTGCTRL_ABUSDROP; + otgctrl |= OTGCTRL_BUSREQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + priv->otgState = HOST_OTG_STATE_A_WAIT_BCON; + } else { + if ((otgctrl & OTGCTRL_BUSREQ) || (otgctrl & OTGCTRL_ABUSDROP)) { + otgctrl |= OTGCTRL_ABUSDROP; + otgctrl &= ~OTGCTRL_BUSREQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + priv->otgState = HOST_OTG_STATE_A_IDLE; + } +} + +static inline void disconnectHostDetect(struct HOST_CTRL *priv) +{ + uint8_t otgctrl, otgstate; + uint32_t gen_cfg; + + if (!priv) + return; + + memset(priv->ep_remap_pool, 0, sizeof(priv->ep_remap_pool)); + otgctrl = phytium_read8(&priv->regs->otgctrl); + if ((otgctrl & OTGCTRL_ASETBHNPEN) && priv->otgState == HOST_OTG_STATE_A_SUSPEND) + pr_info("Device no Response\n"); + + phytium_write8(&priv->regs->otgirq, OTGIRQ_CONIRQ); +retry: + otgstate = phytium_read8(&priv->regs->otgstate); + if ((otgstate == HOST_OTG_STATE_A_HOST || otgstate == HOST_OTG_STATE_B_HOST)) { + pr_info("IRQ OTG: DisconnIrq Babble\n"); + goto retry; + } + + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0 | 0x04); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | 0 | 0x04); + + priv->portStatus = USB_PORT_STAT_POWER; + priv->portStatus |= USB_PORT_STAT_C_CONNECTION << 16; + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + + if (priv->otgState == HOST_OTG_STATE_A_SUSPEND) + host_SetVbus(priv, 1); + + priv->otgState = HOST_OTG_STATE_A_IDLE; + if (priv->custom_regs) { + phytium_write32(&priv->custom_regs->wakeup, 1); + } else { + gen_cfg = phytium_read32(&priv->vhub_regs->gen_cfg); + gen_cfg |= BIT(1); + phytium_write32(&priv->vhub_regs->gen_cfg, gen_cfg); + } +} + +static inline void A_IdleDetect(struct HOST_CTRL *priv, uint8_t otgstate) +{ + uint8_t otgctrl; + + if (!priv) + return; + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + + if (otgstate != HOST_OTG_STATE_A_IDLE) { + pr_info("IRQ OTG: A_IDLE Babble\n"); + return; + } + + priv->portStatus = 0; + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGCTRL_ASETBHNPEN; + phytium_write8(&priv->regs->otgctrl, otgctrl); + + host_SetVbus(priv, 1); + + priv->otgState = HOST_OTG_STATE_A_IDLE; +} + +static inline void B_IdleDetect(struct HOST_CTRL *priv, uint8_t otgstate) +{ + uint8_t otgctrl, usbcs; + + if (!priv) + return; + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + + if (otgstate != HOST_OTG_STATE_B_IDLE) { + pr_info("IRQ OTG: B_IDLE Babble\n"); + return; + } + + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGCTRL_ASETBHNPEN; + phytium_write8(&priv->regs->otgctrl, otgctrl); + + host_SetVbus(priv, 0); + + priv->otgState = HOST_OTG_STATE_B_IDLE; + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= ~USBCS_DISCON; + phytium_write8(&priv->regs->usbcs, usbcs); +} + +static uint32_t waitForBusyBit(struct HOST_CTRL *priv, struct HostEp *hwEp) +{ + uint8_t *csReg; + uint8_t flag = CS_BUSY; + uint8_t buf = 0; + uint8_t val = CS_BUSY; + uint8_t otgstate; + uint8_t bufflag = 0; + + if (!priv || !hwEp) + return 0; + + if (hwEp->isInEp) + return 0; + + if (hwEp->hwEpNum == 0) { + csReg = &priv->regs->ep0cs; + flag = EP0CS_TXBUSY_MASK; + buf = 0; + } else { + csReg = &priv->regs->ep[hwEp->hwEpNum - 1].txcs; + buf = phytium_read8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon) & CON_BUF; + } + + while ((val & flag) || bufflag == 0) { + otgstate = phytium_read8(&priv->regs->otgstate); + if (otgstate != HOST_OTG_STATE_B_HOST && otgstate != HOST_OTG_STATE_A_HOST) { + priv->ep0State = HOST_EP0_STAGE_IDLE; + return HOST_ESHUTDOWN; + } + + val = phytium_read8(csReg); + if (((val & CS_NPAK) >> CS_NPAK_OFFSET) == buf || buf == 0) + bufflag = 1; + else + bufflag = 0; + } + + return 0; +} + +static inline void connectHostDetect(struct HOST_CTRL *priv, uint8_t otgState) +{ + uint32_t gen_cfg; + + if (!priv) + return; + pr_debug("otgState:0x%x pirv->otgState:0x%x\n", otgState, priv->otgState); + if (priv->custom_regs) { + phytium_write32(&priv->custom_regs->wakeup, 0); + } else { + gen_cfg = phytium_read32(&priv->vhub_regs->gen_cfg); + gen_cfg &= ~BIT(1); + phytium_write32(&priv->vhub_regs->gen_cfg, gen_cfg); + } + + phytium_write8(&priv->regs->otgirq, OTGIRQ_CONIRQ); + + if ((otgState != HOST_OTG_STATE_A_HOST) && (otgState != HOST_OTG_STATE_B_HOST)) + return; + + if ((priv->otgState == HOST_OTG_STATE_A_PERIPHERAL) + || (priv->otgState == HOST_OTG_STATE_B_PERIPHERAL)) + priv->otgState = otgState; + + priv->ep0State = HOST_EP0_STAGE_IDLE; + + priv->portStatus &= ~(USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED | + USB_PORT_STAT_ENABLE); + + priv->portStatus |= USB_PORT_STAT_C_CONNECTION | (USB_PORT_STAT_C_CONNECTION << 16); + priv->dmaDrv->dma_controllerReset(priv->dmaController); + priv->port_resetting = 1; + host_SetVbus(priv, 1); + + switch (phytium_read8(&priv->regs->speedctrl)) { + case SPEEDCTRL_HS: + priv->portStatus |= USB_PORT_STAT_HIGH_SPEED; + pr_debug("detect High speed device\n"); + break; + case SPEEDCTRL_FS: + priv->portStatus &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); + pr_debug("detect Full speed device\n"); + break; + case SPEEDCTRL_LS: + priv->portStatus |= USB_PORT_STAT_LOW_SPEED; + pr_debug("detect Low speed device\n"); + break; + } + + priv->vBusErrCnt = 0; + priv->dmaDrv->dma_setHostMode(priv->dmaController); + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + + priv->otgState = otgState; +} + +static void hostOtgIrq(struct HOST_CTRL *priv) +{ + uint8_t otgirq, otgien; + uint8_t otgstatus, otgstate; + uint8_t otgctrl; + + if (!priv) + return; + + otgirq = phytium_read8(&priv->regs->otgirq); + otgien = phytium_read8(&priv->regs->otgien); + otgstatus = phytium_read8(&priv->regs->otgstatus); + otgstate = phytium_read8(&priv->regs->otgstate); + otgirq &= otgien; + + if (!otgirq) + return; + + if (otgirq & OTGIRQ_BSE0SRPIRQ) { + otgirq &= ~OTGIRQ_BSE0SRPIRQ; + phytium_write8(&priv->regs->otgirq, OTGIRQ_BSE0SRPIRQ); + + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGIRQ_BSE0SRPIRQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + + if (otgirq & OTGIRQ_SRPDETIRQ) { + otgirq &= ~OTGIRQ_SRPDETIRQ; + phytium_write8(&priv->regs->otgirq, OTGIRQ_SRPDETIRQ); + + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGIRQ_SRPDETIRQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + + if (otgirq & OTGIRQ_VBUSERRIRQ) { + otgirq &= ~OTGIRQ_VBUSERRIRQ; + phytium_write8(&priv->regs->otgirq, OTGIRQ_VBUSERRIRQ); + + if (otgstate != HOST_OTG_STATE_A_VBUS_ERR) { + pr_info("IRQ OTG: VBUS ERROR Babble\n"); + return; + } + + host_SetVbus(priv, 0); + priv->otgState = HOST_OTG_STATE_A_VBUS_ERR; + if (priv->portStatus & USB_PORT_STAT_CONNECTION) { + priv->portStatus = USB_PORT_STAT_POWER | (USB_PORT_STAT_C_CONNECTION << 16); + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + return; + } + + if (priv->vBusErrCnt >= 3) { + priv->vBusErrCnt = 0; + pr_info("%s %d VBUS OVER CURRENT\n", __func__, __LINE__); + priv->portStatus |= USB_PORT_STAT_OVERCURRENT | + (USB_PORT_STAT_C_OVERCURRENT << 16); + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + } else { + priv->vBusErrCnt++; + host_SetVbus(priv, 1); + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + } + } + + if (otgirq & OTGIRQ_CONIRQ) { + if (priv->otgState == HOST_OTG_STATE_A_HOST || + priv->otgState == HOST_OTG_STATE_B_HOST || + priv->otgState == HOST_OTG_STATE_A_SUSPEND) { + if (otgstate == HOST_OTG_STATE_A_WAIT_VFALL || + otgstate == HOST_OTG_STATE_A_WAIT_BCON || + otgstate == HOST_OTG_STATE_A_SUSPEND) + disconnectHostDetect(priv); + } else if (priv->otgState != HOST_OTG_STATE_A_HOST && + priv->otgState != HOST_OTG_STATE_B_HOST && + priv->otgState != HOST_OTG_STATE_A_SUSPEND) + connectHostDetect(priv, otgstate); + + phytium_write8(&priv->regs->otgirq, OTGIRQ_CONIRQ); + } + + if (otgirq & OTGIRQ_IDLEIRQ) { + if (!(otgstatus & OTGSTATUS_ID)) + A_IdleDetect(priv, otgstate); + else + B_IdleDetect(priv, otgstate); + } + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDCHANGEIRQ | + OTGIRQ_SRPDETIRQ); +} + +static void hostErrorIrq(struct HOST_CTRL *priv) +{ + uint16_t txerrirq, txerrien; + uint16_t rxerrirq, rxerrien; + uint16_t i, mask; + + if (!priv) + return; + + txerrirq = phytium_read16(&priv->regs->txerrirq); + txerrien = phytium_read16(&priv->regs->txerrien); + txerrirq &= txerrien; + + rxerrirq = phytium_read16(&priv->regs->rxerrirq); + rxerrien = phytium_read16(&priv->regs->rxerrien); + rxerrirq &= rxerrien; + if (!txerrirq && !rxerrirq) + return; + + for (i = 0; i < HOST_EP_NUM; i++) { + mask = 1 << i; + if (rxerrirq & mask) { + phytium_write16(&priv->regs->rxerrirq, mask); + rxerrien &= ~mask; + phytium_write16(&priv->regs->rxerrien, rxerrien); + priv->dmaDrv->dma_errIsr(priv->dmaController, i, 0); + } + + if (txerrirq & mask) { + phytium_write16(&priv->regs->txerrirq, mask); + txerrien &= ~mask; + phytium_write16(&priv->regs->txerrien, txerrien); + priv->dmaDrv->dma_errIsr(priv->dmaController, i, 1); + } + } +} + +static uint32_t decodeErrorCode(uint8_t code) +{ + uint32_t status = 0; + + switch (code) { + case ERR_NONE: + status = 0; + break; + case ERR_CRC: + pr_info("CRC Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_DATA_TOGGLE_MISMATCH: + pr_info("Toggle MisMatch Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_STALL: + pr_debug("Stall Error\n"); + status = HOST_ESTALL; + break; + case ERR_TIMEOUT: + pr_debug("Timeout Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_PID: + pr_info("PID Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_TOO_LONG_PACKET: + pr_info("TOO_LONG_PACKET Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_DATA_UNDERRUN: + pr_info("UNDERRUN Error\n"); + status = HOST_ESHUTDOWN; + break; + } + + return status; +} + +static struct HOST_EP_PRIV *getIntTransfer(struct list_head *head) +{ + struct list_head *listEntry = NULL; + struct HOST_EP_PRIV *usbHEpPriv = NULL; + struct HOST_EP_PRIV *usbHEpPrivActual = NULL; + + list_for_each(listEntry, head) { + usbHEpPriv = getUsbHEpPrivEntry(listEntry); + if (!usbHEpPrivActual) + usbHEpPrivActual = usbHEpPriv; + + if (usbHEpPriv->frame < usbHEpPrivActual->frame) + usbHEpPrivActual = usbHEpPriv; + } + + return usbHEpPrivActual; +} + +static void givebackRequest(struct HOST_CTRL *priv, struct HOST_REQ *usbReq, uint32_t status) +{ + if (!priv || !usbReq) + return; + + list_del(&usbReq->list); + + if (usbReq->status == EINPROGRESS) + usbReq->status = status; + + if (priv->hostCallbacks.givebackRequest) + priv->hostCallbacks.givebackRequest(priv, usbReq, status); +} + +static void hostEpProgram(struct HOST_CTRL *priv, struct HostEp *hwEp, + struct HOST_REQ *usbReq, uintptr_t dmaBuff, uint32_t length) +{ + struct HOST_EP *usbHEp; + struct HOST_EP_PRIV *usbEpPriv; + uint32_t chMaxLen; + uint8_t regCon = 0; + uint8_t ep0cs; + uint16_t txerrien = 0; + uint16_t rxerrien = 0; + uint32_t result; + uint8_t txsoftimer, rxsoftimer; + u8 retval = 0; + + if (!priv || !hwEp || !usbReq) + return; + + usbHEp = hwEp->scheduledUsbHEp; + usbEpPriv = (struct HOST_EP_PRIV *)usbHEp->hcPriv; + + if (!hwEp->channel) { + if (usbEpPriv->type == USB_ENDPOINT_XFER_ISOC) + hwEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + !hwEp->isInEp, hwEp->hwEpNum, 1); + else + hwEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + !hwEp->isInEp, hwEp->hwEpNum, 0); + } + + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, hwEp->channel); + + pr_debug("chMaxLen:0x%x buffLength:0x%x\n", chMaxLen, usbReq->buffLength); + if (usbReq->buffLength > chMaxLen) + length = chMaxLen; + + switch (usbEpPriv->type) { + case USB_ENDPOINT_XFER_CONTROL: + regCon = CON_TYPE_CONTROL; + break; + case USB_ENDPOINT_XFER_BULK: + regCon = CON_TYPE_BULK; + break; + case USB_ENDPOINT_XFER_INT: + regCon = CON_TYPE_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + if (usbEpPriv->isocEpConfigured) + goto dma_program; + + usbEpPriv->isocEpConfigured = 1; + regCon = CON_TYPE_ISOC; + switch (usbHEp->desc.wMaxPacketSize >> 11) { + case 0: + regCon |= CON_TYPE_ISOC_1_ISOD; + priv->dmaDrv->dma_setMaxLength(priv->dmaController, + hwEp->channel, usbEpPriv->maxPacketSize); + break; + case 1: + regCon |= CON_TYPE_ISOC_2_ISOD; + priv->dmaDrv->dma_setMaxLength(priv->dmaController, + hwEp->channel, 2 * 1024); + break; + case 2: + priv->dmaDrv->dma_setMaxLength(priv->dmaController, + hwEp->channel, 3 * 1024); + regCon |= CON_TYPE_ISOC_3_ISOD; + break; + } + break; + } + + if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + if (!hwEp->hwEpNum) { + if (phytium_read8(&priv->regs->ep0cs) & 0x4) { + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + 0 | 0x4); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + FIFOCTRL_IO_TX | 0 | 0x4); + } + } + if (waitForBusyBit(priv, hwEp) > 0) { + usbReq->status = HOST_ESHUTDOWN; + givebackRequest(priv, usbReq, HOST_ESHUTDOWN); + pr_info("something error happen\n"); + return; + } + } + + if (!hwEp->isInEp) { + if (hwEp->hwEpNum) { + regCon |= hwEp->hwBuffers - 1; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, regCon); + if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + retval = priv->hostCallbacks.getEpToggle(priv, + usbReq->usbDev, usbHEp->device_epNum, 0); + if (retval) { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_IO_TX); + + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGSETQ | ENDPRST_IO_TX | ENDPRST_FIFORST); + } else { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_IO_TX); + + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGRST | ENDPRST_IO_TX | ENDPRST_FIFORST); + } + } + + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, regCon | CON_VAL); + phytium_write16(&priv->regs->txmaxpack[hwEp->hwEpNum - 1], + usbEpPriv->maxPacketSize); + + phytium_write8(&priv->regs->epExt[hwEp->hwEpNum - 1].txctrl, + usbHEp->device_epNum); + + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + + txsoftimer = phytium_read8(&priv->regs->txsoftimer[hwEp->hwEpNum].ctrl); + txsoftimer = txsoftimer | BIT(1); + phytium_write8(&priv->regs->txsoftimer[hwEp->hwEpNum].ctrl, txsoftimer); + + phytium_write16(&priv->regs->txsoftimer[hwEp->hwEpNum].timer, + usbEpPriv->frame); + + phytium_write8(&priv->regs->txsoftimer[hwEp->hwEpNum].ctrl, 0x83); + } else { + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + phytium_write8(&priv->regs->ep0maxpack, usbEpPriv->maxPacketSize); + phytium_write8(&priv->regs->ep0ctrl, usbEpPriv->epNum); + + if (priv->ep0State == HOST_EP0_STAGE_SETUP) { + ep0cs = phytium_read8(&priv->regs->ep0cs); + ep0cs |= EP0CS_HCSET; + phytium_write8(&priv->regs->ep0cs, ep0cs); + } + } + + phytium_write16(&priv->regs->txerrirq, 1 << hwEp->hwEpNum); + txerrien = phytium_read16(&priv->regs->txerrien); + txerrien |= 1 << hwEp->hwEpNum; + phytium_write16(&priv->regs->txerrien, txerrien); + } else { + if (hwEp->hwEpNum) { + regCon |= hwEp->hwBuffers - 1; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, regCon); + + if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + if (priv->hostCallbacks.getEpToggle) { + retval = priv->hostCallbacks.getEpToggle(priv, + usbReq->usbDev, usbHEp->device_epNum, 1); + if (retval) { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGSETQ | ENDPRST_FIFORST); + } else { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGRST | ENDPRST_FIFORST); + } + } + } + + phytium_write16(&priv->regs->rxmaxpack[hwEp->hwEpNum - 1], + usbEpPriv->maxPacketSize); + + phytium_write8(&priv->regs->epExt[hwEp->hwEpNum - 1].rxctrl, + usbHEp->device_epNum); + + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, regCon | CON_VAL); + rxsoftimer = phytium_read8(&priv->regs->rxsoftimer[hwEp->hwEpNum].ctrl); + rxsoftimer = rxsoftimer | BIT(1); + phytium_write8(&priv->regs->rxsoftimer[hwEp->hwEpNum].ctrl, rxsoftimer); + + phytium_write16(&priv->regs->rxsoftimer[hwEp->hwEpNum].timer, + usbEpPriv->frame); + + phytium_write8(&priv->regs->rxsoftimer[hwEp->hwEpNum].ctrl, 0x83); + } else { + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + phytium_write8(&priv->regs->ep0maxpack, usbEpPriv->maxPacketSize); + phytium_write8(&priv->regs->ep0ctrl, usbEpPriv->epNum); + + if (priv->ep0State == HOST_EP0_STAGE_IN + || priv->ep0State == HOST_EP0_STAGE_STATUSIN) + phytium_write8(&priv->regs->ep0cs, EP0CS_HCSETTOGGLE); + } + + phytium_write16(&priv->regs->rxerrirq, 1 << hwEp->hwEpNum); + rxerrien = phytium_read16(&priv->regs->rxerrien); + rxerrien |= 1 << hwEp->hwEpNum; + phytium_write16(&priv->regs->rxerrien, rxerrien); + } +dma_program: + result = priv->dmaDrv->dma_channelProgram(priv->dmaController, hwEp->channel, + usbEpPriv->maxPacketSize, dmaBuff, length, + (void *)usbReq->isoFramesDesc, usbReq->isoFramesNumber); + if (result) { + if (!hwEp->isInEp) { + txerrien &= ~(1 << hwEp->hwEpNum); + if (hwEp->hwEpNum) + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, 0x08); + phytium_write16(&priv->regs->txerrien, txerrien); + } else { + rxerrien &= ~(1 << hwEp->hwEpNum); + if (hwEp->hwEpNum) + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, 0x08); + phytium_write16(&priv->regs->rxerrien, rxerrien); + } + + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } +} + +static void hostStartReq(struct HOST_CTRL *priv, struct HOST_REQ *req) +{ + uintptr_t dmaBuff; + uint32_t length; + struct HOST_EP *hostEp; + struct HOST_EP_PRIV *hostEpPriv; + struct HOST_EP_PRIV *hostNewEpPriv; + struct HOST_REQ *usbReq = NULL; + struct HostEp *hwEp = NULL; + int num; + + if (!priv || !req) + return; + + hostEp = req->hcPriv; + if (hostEp) { + hostEpPriv = (struct HOST_EP_PRIV *)hostEp->hcPriv; + if (hostEpPriv) { + hostEpPriv->genericHwEp->state = HOST_EP_BUSY; + switch (hostEpPriv->type) { + case USB_ENDPOINT_XFER_CONTROL: + usbReq = getNextReq(hostEp); + + priv->in[HOST_GENERIC_EP_CONTROL].scheduledUsbHEp = hostEp; + priv->ep0State = HOST_EP0_STAGE_SETUP; + hostEpPriv->currentHwEp = hostEpPriv->genericHwEp; + hostEpPriv->genericHwEp->scheduledUsbHEp = hostEp; + dmaBuff = usbReq->setupDma; + length = 8; + pr_debug("packet info: %02x %02x %04x %04x %04x\n", + usbReq->setup->bRequestType, + usbReq->setup->bRequest, + usbReq->setup->wValue, + usbReq->setup->wIndex, + usbReq->setup->wLength); + hostEpProgram(priv, hostEpPriv->genericHwEp, + usbReq, dmaBuff, length); + break; + case USB_ENDPOINT_XFER_BULK: + hwEp = hostEpPriv->genericHwEp; + usbReq = getNextReq(hostEp); + hostEpPriv->currentHwEp = hwEp; + hwEp->scheduledUsbHEp = hostEp; + dmaBuff = usbReq->buffDma + usbReq->actualLength; + length = usbReq->buffLength - usbReq->actualLength; + hostEpProgram(priv, hwEp, usbReq, dmaBuff, length); + break; + case USB_ENDPOINT_XFER_INT: + if (hostEpPriv->genericHwEp->scheduledUsbHEp) + return; + + num = req->epNum - 1; + if (hostEpPriv->isIn) + hostNewEpPriv = getIntTransfer(&priv->intInHEpQueue[num]); + else + hostNewEpPriv = getIntTransfer(&priv->intOutHEpQueue[num]); + + hostNewEpPriv->currentHwEp = hostEpPriv->genericHwEp; + hostEpPriv->genericHwEp->scheduledUsbHEp = hostNewEpPriv->usbHEp; + usbReq = getNextReq(hostEp); + dmaBuff = usbReq->buffDma + usbReq->actualLength; + length = usbReq->buffLength - usbReq->actualLength; + hostEpProgram(priv, hostEpPriv->genericHwEp, + usbReq, dmaBuff, length); + break; + case USB_ENDPOINT_XFER_ISOC: + hostEpPriv->currentHwEp = hostEpPriv->genericHwEp; + hostEpPriv->genericHwEp->scheduledUsbHEp = hostEp; + dmaBuff = req->buffDma + req->actualLength; + length = req->buffLength - req->actualLength; + hostEpProgram(priv, hostEpPriv->genericHwEp, req, dmaBuff, length); + break; + } + } + } +} + +static void abortTransfer(struct HOST_CTRL *priv, + struct HOST_REQ *usbReq, struct HostEp *hwEp) +{ + struct HOST_EP *usbEp; + struct HOST_EP_PRIV *usbHEpPriv; + uint32_t status; + + if (!priv || !usbReq || !hwEp || !hwEp->scheduledUsbHEp) + return; + + usbEp = hwEp->scheduledUsbHEp; + usbHEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + if (!usbHEpPriv) + return; + + status = (usbReq->status == EINPROGRESS) ? 0 : usbReq->status; + givebackRequest(priv, usbReq, status); + + if (list_empty(&usbEp->reqList)) { + usbHEpPriv->epIsReady = 0; + usbHEpPriv->currentHwEp = NULL; + hwEp->scheduledUsbHEp = NULL; + + if (hwEp->channel) { + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } + + if (usb_endpoint_xfer_int(&usbEp->desc)) + list_del(&usbHEpPriv->node); + } +} + +static void scheduleNextTransfer(struct HOST_CTRL *priv, + struct HOST_REQ *usbReq, struct HostEp *hwEp) +{ + struct HOST_EP *usbEp; + struct HOST_EP_PRIV *usbHEpPriv; + uint8_t endprst; + uint32_t status; + struct HOST_REQ *usbNextReq = NULL; + + if (!priv || !usbReq || !hwEp) + return; + + usbEp = hwEp->scheduledUsbHEp; + usbHEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + if (!usbHEpPriv) + return; + + status = (usbReq->status == EINPROGRESS) ? 0 : usbReq->status; + switch (usbHEpPriv->type) { + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + if (hwEp->isInEp) { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); + endprst = (phytium_read8(&priv->regs->endprst) & ENDPRST_TOGSETQ) ? 1 : 0; + if (priv->hostCallbacks.setEpToggle) + priv->hostCallbacks.setEpToggle(priv, usbReq->usbDev, + usbEp->device_epNum, usbHEpPriv->isIn, endprst); + } else { + if (waitForBusyBit(priv, hwEp) > 0) { + usbReq->status = HOST_ESHUTDOWN; + givebackRequest(priv, usbReq, HOST_ESHUTDOWN); + return; + } + + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | ENDPRST_IO_TX); + endprst = (phytium_read8(&priv->regs->endprst) & ENDPRST_TOGSETQ) ? 1 : 0; + if (priv->hostCallbacks.setEpToggle) + priv->hostCallbacks.setEpToggle(priv, usbReq->usbDev, + usbEp->device_epNum, usbHEpPriv->isIn, endprst); + } + break; + } + + if (usbHEpPriv->transferFinished) + givebackRequest(priv, usbReq, status); + + if (list_empty(&usbEp->reqList)) { + if (usbHEpPriv->type == USB_ENDPOINT_XFER_CONTROL) + hwEp->state = HOST_EP_ALLOCATED; + else { + if (usbHEpPriv->genericHwEp == hwEp) + hwEp->state = HOST_EP_ALLOCATED; + else + hwEp->state = HOST_EP_FREE; + } + + usbHEpPriv->epIsReady = 0; + usbHEpPriv->currentHwEp = NULL; + hwEp->scheduledUsbHEp = NULL; + + if (hwEp->channel) { + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } + + if (usb_endpoint_xfer_int(&usbEp->desc)) + list_del(&usbHEpPriv->node); + usbHEpPriv = NULL; + } else { + if (usbHEpPriv->type == USB_ENDPOINT_XFER_INT) { + usbHEpPriv->currentHwEp = NULL; + hwEp->scheduledUsbHEp = NULL; + } + + if (usbHEpPriv->type == USB_ENDPOINT_XFER_ISOC) + return; + } + + if (usbHEpPriv) { + usbNextReq = getNextReq(usbHEpPriv->usbHEp); + hostStartReq(priv, usbNextReq); + } +} + +static int32_t hostEp0Irq(struct HOST_CTRL *priv, uint8_t isIn) +{ + struct HostEp *hwEp; + struct HOST_EP *hostEp; + struct HOST_REQ *usbReq = NULL; + struct HOST_EP_PRIV *usbHEpPriv; + uint32_t status, length; + uint8_t usbError; + uint8_t nextStage = 0; + int ret = 0; + + if (!priv) + return 0; + + hwEp = isIn ? &priv->in[HOST_GENERIC_EP_CONTROL] : &priv->out[HOST_GENERIC_EP_CONTROL]; + hostEp = hwEp->scheduledUsbHEp; + usbHEpPriv = (struct HOST_EP_PRIV *)hostEp->hcPriv; + + usbReq = getNextReq(hostEp); + if (!usbReq) + return 0; + + usbError = isIn ? phytium_read8(&priv->regs->rx0err) : phytium_read8(&priv->regs->tx0err); + usbError = (usbError & ERR_TYPE) >> 2; + status = decodeErrorCode(usbError); + if (status) { + usbReq->status = status; + + if (status == HOST_ESTALL) { + ret = 1; + priv->dmaDrv->dma_controllerReset(priv->dmaController); + } + + phytium_write16(&priv->regs->rxerrirq, 1 << hwEp->hwEpNum); + phytium_write16(&priv->regs->txerrirq, 1 << hwEp->hwEpNum); + + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0 | 0x4); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + FIFOCTRL_IO_TX | 0 | 0x4); + } + + length = priv->dmaDrv->dma_getActualLength(priv->dmaController, hwEp->channel); + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + + if (usbReq->status == EINPROGRESS && priv->ep0State < HOST_EP0_STAGE_STATUSIN) { + nextStage = 0; + switch (priv->ep0State) { + case HOST_EP0_STAGE_IN: + pr_debug("Ep0 Data IN\n"); + usbHEpPriv->currentHwEp = &priv->out[HOST_GENERIC_EP_CONTROL]; + usbReq->actualLength = length; + priv->ep0State = HOST_EP0_STAGE_STATUSOUT; + break; + case HOST_EP0_STAGE_OUT: + pr_debug("Ep0 Data OUT\n"); + usbHEpPriv->currentHwEp = &priv->in[HOST_GENERIC_EP_CONTROL]; + usbReq->actualLength = length; + priv->ep0State = HOST_EP0_STAGE_STATUSIN; + break; + case HOST_EP0_STAGE_SETUP: + pr_debug("Ep0 Stage Setup\n"); + if (!usbReq->setup->wLength) { + pr_debug("EP0_STAGE_STATUSIN\n"); + priv->ep0State = HOST_EP0_STAGE_STATUSIN; + usbHEpPriv->currentHwEp = &priv->in[HOST_GENERIC_EP_CONTROL]; + break; + } else if (usbReq->setup->bRequestType & USB_DIR_IN) { + pr_debug("EP0_STAGE_STAGE_IN\n"); + priv->ep0State = HOST_EP0_STAGE_IN; + usbHEpPriv->currentHwEp = &priv->in[HOST_GENERIC_EP_CONTROL]; + nextStage = 1; + break; + } + priv->ep0State = HOST_EP0_STAGE_OUT; + nextStage = 1; + break; + case HOST_EP0_STAGE_STATUSIN: + case HOST_EP0_STAGE_STATUSOUT: + case HOST_EP0_STAGE_ACK: + case HOST_EP0_STAGE_IDLE: + default: + pr_debug("EP0 STAGE is %d\n", priv->ep0State); + break; + } + + if (nextStage) { + length = usbReq->buffLength; + hostEpProgram(priv, usbHEpPriv->currentHwEp, usbReq, + usbReq->buffDma, length); + } else + hostEpProgram(priv, usbHEpPriv->currentHwEp, usbReq, usbReq->setupDma, 0); + } else + priv->ep0State = HOST_EP0_STAGE_IDLE; + + if (priv->ep0State == HOST_EP0_STAGE_IDLE || usbReq->status != EINPROGRESS) { + usbHEpPriv->transferFinished = 1; + scheduleNextTransfer(priv, usbReq, hwEp); + } + + return ret; +} + +static void updateTimeIntTransfer(struct list_head *head, struct HOST_EP_PRIV *lastFinished) +{ + struct list_head *listEntry = NULL; + struct HOST_EP_PRIV *usbHEpPriv = NULL; + uint16_t time = lastFinished->frame; + + list_for_each(listEntry, head) { + usbHEpPriv = getUsbHEpPrivEntry(listEntry); + if (usbHEpPriv == lastFinished) { + lastFinished->frame = lastFinished->interval; + continue; + } + + if (usbHEpPriv->frame < time) + usbHEpPriv->frame = 0; + else + lastFinished->interval = usbHEpPriv->frame; + } +} + +static int32_t hostEpXIrq(struct HOST_CTRL *priv, uint8_t hwEpNum, uint8_t isIn, bool resubmit) +{ + struct HostEp *hwEp; + struct HOST_EP *hostEp; + struct HOST_EP_PRIV *usbHEpPriv; + struct HOST_REQ *usbReq; + + uint8_t usbError, rxcon, txcon; + uint32_t status, length, chMaxLen; + + if (!priv) + return -EINVAL; + + hwEp = isIn ? &priv->in[hwEpNum] : &priv->out[hwEpNum]; + hostEp = hwEp->scheduledUsbHEp; + if (!hostEp) + return -EINVAL; + + usbHEpPriv = (struct HOST_EP_PRIV *)hostEp->hcPriv; + + usbReq = getNextReq(hostEp); + if (!usbReq) + return 0; + + if (isIn) + usbError = phytium_read8(&priv->regs->epExt[hwEpNum - 1].rxerr); + else + usbError = phytium_read8(&priv->regs->epExt[hwEpNum - 1].txerr); + + usbError = (usbError & ERR_TYPE) >> 2; + status = decodeErrorCode(usbError); + if (status) { + pr_debug("%s %d Aborting\n", __func__, __LINE__); + if (isIn) + phytium_write16(&priv->regs->rxerrirq, 1 << hwEpNum); + else + phytium_write16(&priv->regs->txerrirq, 1 << hwEpNum); + priv->dmaDrv->dma_channelAbort(priv->dmaController, hwEp->channel); + } + + length = priv->dmaDrv->dma_getActualLength(priv->dmaController, hwEp->channel); + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, hwEp->channel); + + if (status != 0) + usbReq->status = status; + + usbReq->actualLength += length; + + if (!resubmit) + usbHEpPriv->transferFinished = 1; + + if (length == chMaxLen) { + if ((usbReq->buffLength - usbReq->actualLength) == 0) + usbHEpPriv->transferFinished = 1; + else + usbHEpPriv->transferFinished = 0; + } + + if (usbHEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + if (!hwEp->isInEp) { + txcon = phytium_read8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon); + txcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, txcon); + } else { + rxcon = phytium_read8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon); + rxcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, rxcon); + } + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } + + if (usbHEpPriv->type == USB_ENDPOINT_XFER_INT) { + if (usbHEpPriv->isIn) + updateTimeIntTransfer(&priv->intInHEpQueue[hwEp->hwEpNum - 1], usbHEpPriv); + else + updateTimeIntTransfer(&priv->intOutHEpQueue[hwEp->hwEpNum - 1], usbHEpPriv); + } + + scheduleNextTransfer(priv, usbReq, hwEp); + + return 0; +} + +static void host_CallbackTransfer(void *priv, uint8_t epNum, uint8_t isDirTx, bool resubmit) +{ + struct HOST_CTRL *host_priv = (struct HOST_CTRL *)priv; + int i; + int ret = 0; + + if (!epNum) { + ret = hostEp0Irq(host_priv, !isDirTx); + if (resubmit | ret) { + for (i = 1; i < HOST_EP_NUM; i++) { + hostEpXIrq(host_priv, i, !isDirTx, true); + hostEpXIrq(host_priv, i, isDirTx, true); + } + } + } else + hostEpXIrq(host_priv, epNum, !isDirTx, false); +} + +static int hc_reset(struct usb_hcd *hcd) +{ + hcd->speed = HCD_USB2; + hcd->self.root_hub->speed = USB_SPEED_HIGH; + + return 0; +} + +static int hc_start(struct usb_hcd *hcd) +{ + hcd->state = HC_STATE_RUNNING; + return 0; +} + +static void hc_stop(struct usb_hcd *hcd) +{ + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (config) + config->host_obj->host_stop(config->host_priv); + + if (config->host_cfg.trbAddr) { + dma_free_coherent(config->dev, config->host_sysreq.trbMemSize, + config->host_cfg.trbAddr, config->host_cfg.trbDmaAddr); + config->host_cfg.trbAddr = NULL; + } + + if (config->host_priv) { + devm_kfree(config->dev, config->host_priv); + config->host_priv = NULL; + } + + hcd->state = HC_STATE_HALT; +} + +static void hc_shutdown(struct usb_hcd *hcd) +{ +} + +static void host_endpoint_update(struct phytium_cusb *config, + struct HOST_USB_DEVICE *udev, struct usb_host_endpoint *ep) +{ + int epnum; + struct HOST_EP *hostEp; + + if (!config || !udev || !ep) + return; + + epnum = get_epnum_from_pool(config->host_priv, usb_endpoint_num(&ep->desc), + usb_endpoint_dir_in(&ep->desc)); + + if (usb_endpoint_dir_out(&ep->desc)) { + if (udev->out_ep[epnum] == NULL) { + hostEp = kzalloc(sizeof(struct HOST_EP) + + config->host_obj->host_epGetPrivateDataSize(config->host_priv), + GFP_ATOMIC); + udev->out_ep[epnum] = hostEp; + } else + hostEp = udev->out_ep[epnum]; + } else { + if (udev->in_ep[epnum] == NULL) { + hostEp = kzalloc(sizeof(struct HOST_EP) + + config->host_obj->host_epGetPrivateDataSize(config->host_priv), + GFP_ATOMIC); + udev->in_ep[epnum] = hostEp; + } else + hostEp = udev->in_ep[epnum]; + } + + hostEp->desc = *(struct usb_endpoint_descriptor *)(&ep->desc); + hostEp->userExt = (void *)ep; + INIT_LIST_HEAD(&hostEp->reqList); + hostEp->hcPriv = &((uint8_t *)hostEp)[sizeof(struct HOST_EP)]; +} + +static int hc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ + unsigned long flags; + u32 isoFrameSize; + int retval; + int index; + struct HOST_REQ *req; + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usbDev; + struct usb_host_endpoint *host_ep; + struct usb_endpoint_descriptor *host_ep_desc; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return -ENODEV; + + priv = config->host_priv; + if (!priv) + return -ENODEV; + + usbDev = priv->host_devices_table[urb->dev->slot_id]; + if (!usbDev) + return -ENODEV; + + host_ep = urb->ep; + host_ep_desc = &host_ep->desc; + + spin_lock_irqsave(&config->lock, flags); + retval = usb_hcd_link_urb_to_ep(hcd, urb); + if (retval < 0) { + spin_unlock_irqrestore(&config->lock, flags); + return retval; + } + spin_unlock_irqrestore(&config->lock, flags); + isoFrameSize = urb->number_of_packets * sizeof(struct HOST_ISOFRAMESDESC); + req = kzalloc((sizeof(struct HOST_REQ) + + isoFrameSize), mem_flags); + if (!req) { + spin_lock_irqsave(&config->lock, flags); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&config->lock, flags); + return -ENOMEM; + } + + req->isoFramesDesc = NULL; + req->isoFramesNumber = urb->number_of_packets; + req->epNum = get_epnum_from_pool(config->host_priv, usb_endpoint_num(host_ep_desc), + usb_endpoint_dir_in(host_ep_desc)); + + if (usb_endpoint_dir_in(host_ep_desc)) { + if (!usbDev->in_ep[req->epNum]) + host_endpoint_update(config, usbDev, host_ep); + } else { + if (!usbDev->out_ep[req->epNum]) + host_endpoint_update(config, usbDev, host_ep); + } + + if (usb_endpoint_xfer_isoc(host_ep_desc)) { + req->isoFramesDesc = (struct HOST_ISOFRAMESDESC *)(&req[1]); + for (index = 0; index < urb->number_of_packets; index++) { + req->isoFramesDesc[index].length = urb->iso_frame_desc[index].length; + req->isoFramesDesc[index].offset = urb->iso_frame_desc[index].offset; + } + } + + spin_lock_irqsave(&config->lock, flags); + INIT_LIST_HEAD(&req->list); + req->userExt = (void *)urb; + req->actualLength = urb->actual_length; + req->bufAddress = urb->transfer_buffer; + req->buffDma = urb->transfer_dma; + req->buffLength = urb->transfer_buffer_length; + req->epIsIn = usb_endpoint_dir_in(host_ep_desc); + req->eptype = usb_endpoint_type(host_ep_desc); + req->faddress = usb_pipedevice(urb->pipe); + req->interval = urb->interval; + req->reqUnlinked = 0; + req->setup = (struct usb_ctrlrequest *)urb->setup_packet; + req->setupDma = urb->setup_dma; + req->status = EINPROGRESS; + req->usbDev = &usbDev->udev; + req->usbEp = req->epIsIn ? usbDev->in_ep[req->epNum] : usbDev->out_ep[req->epNum]; + req->usbEp->device_epNum = usb_endpoint_num(host_ep_desc); + if (!req->epNum) + usbDev->ep0_hep.desc.wMaxPacketSize = urb->dev->ep0.desc.wMaxPacketSize; + + req->hcPriv = (void *)req->usbEp; + urb->hcpriv = req; + host_ep->hcpriv = (void *)usbDev; + retval = config->host_obj->host_reqQueue(config->host_priv, req); + if (retval) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + urb->hcpriv = NULL; + spin_unlock_irqrestore(&config->lock, flags); + kfree(req); + return -retval; + } + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static int hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + unsigned long flags; + struct HOST_CTRL *priv; + int ret = 0; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return -ENODEV; + + priv = config->host_priv; + if (!priv->host_devices_table[urb->dev->slot_id]) + return -ENODEV; + + spin_lock_irqsave(&config->lock, flags); + if (usb_hcd_check_unlink_urb(hcd, urb, status)) + goto done; + + if (!urb->hcpriv) + goto err_giveback; + + ret = config->host_obj->host_reqDequeue(priv, urb->hcpriv, status); + + release_epnum_from_pool(config->host_priv, usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe)); + + kfree(urb->hcpriv); + urb->hcpriv = NULL; +done: + spin_unlock_irqrestore(&config->lock, flags); + return ret; + +err_giveback: + kfree(urb->hcpriv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&config->lock, flags); + usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); + return ret; +} + +static void hc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ld_ep) +{ + struct HOST_USB_DEVICE *usbDev; + int ep_num; + struct phytium_cusb *config; + + config = *(struct phytium_cusb **)hcd->hcd_priv; + if (!config) + return; + + ep_num = get_epnum_from_pool(config->host_priv, usb_endpoint_num(&ld_ep->desc), + usb_endpoint_dir_in(&ld_ep->desc)); + + usbDev = (struct HOST_USB_DEVICE *)ld_ep->hcpriv; + if (!usbDev) + return; + + if (ld_ep->desc.bEndpointAddress) { + if (usb_endpoint_dir_in(&ld_ep->desc)) { + if (usbDev->in_ep[ep_num]) { + usbDev->in_ep[ep_num]->userExt = NULL; + INIT_LIST_HEAD(&usbDev->in_ep[ep_num]->reqList); + kfree(usbDev->in_ep[ep_num]); + usbDev->in_ep[ep_num] = NULL; + } + } else { + if (usbDev->out_ep[ep_num]) { + usbDev->out_ep[ep_num]->userExt = NULL; + INIT_LIST_HEAD(&usbDev->out_ep[ep_num]->reqList); + kfree(usbDev->out_ep[ep_num]); + usbDev->out_ep[ep_num] = NULL; + } + } + } +} + +static int hc_get_frame(struct usb_hcd *hcd) +{ + return 0; +} + +static int hc_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_USB_DEVICE *usbDev; + struct phytium_cusb *config; + struct HOST_CTRL *priv; + unsigned long flags = 0; + int index; + int slot = -EINVAL; + + if (!hcd || !udev) + return -EINVAL; + + config = *(struct phytium_cusb **)hcd->hcd_priv; + if (!config) + return 0; + + spin_lock_irqsave(&config->lock, flags); + priv = config->host_priv; + for (index = 0; index < MAX_SUPPORTED_DEVICES; index++) { + if (priv->host_devices_table[index] == NULL) { + slot = index; + break; + } + } + spin_unlock_irqrestore(&config->lock, flags); + + if (slot < 0) + return -ENOMEM; + + usbDev = kzalloc((sizeof(struct HOST_USB_DEVICE) + + config->host_obj->host_epGetPrivateDataSize(priv)), GFP_KERNEL); + if (!usbDev) + return -ENOMEM; + + usbDev->ep0_hep.hcPriv = &((uint8_t *)usbDev)[sizeof(struct HOST_USB_DEVICE)]; + usbDev->udev.userExt = (void *)usbDev; + usbDev->ld_udev = udev; + usbDev->ld_udev->slot_id = slot; + usbDev->ep0_hep.desc.bLength = 7; + usbDev->ep0_hep.desc.bDescriptorType = USB_DT_ENDPOINT; + usbDev->in_ep[0] = &usbDev->ep0_hep; + usbDev->out_ep[0] = &usbDev->ep0_hep; + INIT_LIST_HEAD(&usbDev->ep0_hep.reqList); + + priv->host_devices_table[slot] = usbDev; + + return 1; +} + +static void hc_free_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usbDev; + struct phytium_cusb *config; + int i; + + if (!hcd || !udev) + return; + + config = *(struct phytium_cusb **)(hcd->hcd_priv); + if (!config) + return; + + priv = (struct HOST_CTRL *)config->host_priv; + usbDev = priv->host_devices_table[udev->slot_id]; + if (!usbDev) + return; + + usbDev->in_ep[0] = NULL; + usbDev->out_ep[0] = NULL; + for (i = 1; i < HOST_EP_NUM; i++) { + if (usbDev->in_ep[i]) + hc_endpoint_disable(hcd, + (struct usb_host_endpoint *)usbDev->in_ep[i]->userExt); + if (usbDev->out_ep[i]) + hc_endpoint_disable(hcd, + (struct usb_host_endpoint *)usbDev->out_ep[i]->userExt); + } + + priv->host_devices_table[udev->slot_id] = NULL; + usbDev->ld_udev->slot_id = 0; + usbDev->udev.userExt = (void *)NULL; + kfree(usbDev); +} + +static int hc_reset_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usb_device; + struct phytium_cusb *config; + + if (!hcd || !udev) + return -EINVAL; + + config = *(struct phytium_cusb **)hcd->hcd_priv; + priv = (struct HOST_CTRL *)config->host_priv; + usb_device = priv->host_devices_table[udev->slot_id]; + if (!usb_device) + return -ENODEV; + + usb_device->udev.speed = usb_device->ld_udev->speed; + usb_device->udev.devnum = usb_device->ld_udev->devnum; + + if (USB_SPEED_HIGH == usb_device->udev.speed || USB_SPEED_FULL == usb_device->udev.speed) + usb_device->ep0_hep.desc.wMaxPacketSize = 64; + else + usb_device->ep0_hep.desc.wMaxPacketSize = 8; + + pr_debug("speed:%d ep0 wMaxPacketSize:%d\n", usb_device->udev.speed, + usb_device->ep0_hep.desc.wMaxPacketSize); + + return 0; +} + +static int hc_update_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usb_device; + struct phytium_cusb *config; + + if (!hcd || !udev) + return -EINVAL; + + config = *(struct phytium_cusb **)(hcd->hcd_priv); + priv = (struct HOST_CTRL *)config->host_priv; + + usb_device = priv->host_devices_table[udev->slot_id]; + if (!usb_device) + return -ENODEV; + + usb_device->udev.devnum = udev->devnum; + + return 0; +} + +static int hc_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + return 0; +} + +static int hc_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + return 0; +} + +static int hc_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct HOST_CTRL *priv; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return 0; + + priv = (struct HOST_CTRL *)config->host_priv; + if (!priv) + return 0; + + if (priv->portStatus & 0xffff0000) { + *buf = 0x02; + return 1; + } + + return 0; +} + +#ifdef CONFIG_PM +static int hc_bus_suspend(struct usb_hcd *hcd) +{ + unsigned long flags; + struct phytium_cusb *config = *(struct phytium_cusb **)(hcd->hcd_priv); + + if (!config) + return 0; + + spin_lock_irqsave(&config->lock, flags); + hcd->state = HC_STATE_SUSPENDED; + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static int hc_bus_resume(struct usb_hcd *hcd) +{ + unsigned long flags; + struct phytium_cusb *config = *(struct phytium_cusb **)(hcd->hcd_priv); + + if (!config) + return 0; + + spin_lock_irqsave(&config->lock, flags); + hcd->state = HC_STATE_RESUMING; + spin_unlock_irqrestore(&config->lock, flags); + return 0; +} +#endif + +static void host_giveback_request(struct HOST_CTRL *priv, + struct HOST_REQ *req, uint32_t status) +{ + struct urb *urb_req; + int urb_status = 0; + int i = 0; + struct phytium_cusb *config; + + if (!priv || !req) + return; + + urb_req = req->userExt; + urb_req->actual_length = req->actualLength; + + switch (status) { + case HOST_ESTALL: + urb_status = -EPIPE; + break; + case HOST_EUNHANDLED: + urb_status = -EPROTO; + break; + case HOST_ESHUTDOWN: + urb_status = -ESHUTDOWN; + break; + default: + urb_status = status; + } + pr_debug("complete %p %pS (%d) dev%d ep%d%s %d/%d\n", + urb_req, urb_req->complete, urb_status, + usb_pipedevice(urb_req->pipe), + usb_pipeendpoint(urb_req->pipe), + usb_pipein(urb_req->pipe) ? "in" : "out", + urb_req->actual_length, + urb_req->transfer_buffer_length); + + if (usb_endpoint_xfer_isoc(&urb_req->ep->desc)) { + for (i = 0; i < urb_req->number_of_packets; i++) { + urb_req->iso_frame_desc[i].status = 0; + urb_req->iso_frame_desc[i].actual_length = req->isoFramesDesc[i].length; + } + } + + config = dev_get_drvdata(priv->dev); + usb_hcd_unlink_urb_from_ep(config->hcd, urb_req); + spin_unlock(&config->lock); + usb_hcd_giveback_urb(config->hcd, urb_req, urb_status); + kfree(req); + spin_lock(&config->lock); +} + +static void host_rh_port_status_change(struct HOST_CTRL *priv) +{ + uint32_t statusHub; + char *status; + uint32_t retval; + struct usb_ctrlrequest setup; + struct phytium_cusb *config; + + if (!priv) + return; + + config = dev_get_drvdata(priv->dev); + if (!config) + return; + + status = (char *)&statusHub; + hc_hub_status_data(config->hcd, status); + + if (status) { + setup.bRequestType = USB_TYPE_CLASS | USB_RECIP_OTHER | USB_DIR_IN;//to host + setup.bRequest = USB_REQ_GET_STATUS; + setup.wValue = 0; + setup.wIndex = 1; + setup.wLength = 4; + retval = config->host_obj->host_vhubControl(priv, &setup, (uint8_t *)&statusHub); + if (retval) + return; + + if (status[1] & USB_PORT_STAT_C_CONNECTION) { + if (status[0] & USB_PORT_STAT_CONNECTION) { + if (config->hcd->status_urb) + usb_hcd_poll_rh_status(config->hcd); + else + usb_hcd_resume_root_hub(config->hcd); + } else { + usb_hcd_resume_root_hub(config->hcd); + usb_hcd_poll_rh_status(config->hcd); + } + } + } else + usb_hcd_resume_root_hub(config->hcd); +} + +static void host_set_ep_toggle(struct HOST_CTRL *priv, + struct HOST_DEVICE *udev, u8 ep_num, u8 is_in, u8 toggle) +{ + struct HOST_USB_DEVICE *device; + + if (!priv || !udev) + return; + + device = (struct HOST_USB_DEVICE *)udev->userExt; + + usb_settoggle(device->ld_udev, ep_num, !is_in, toggle); +} + +static u8 host_get_ep_toggle(struct HOST_CTRL *priv, + struct HOST_DEVICE *udev, u8 ep_num, u8 is_in) +{ + struct HOST_USB_DEVICE *device; + + if (!priv || !udev) + return 0; + + device = (struct HOST_USB_DEVICE *)udev->userExt; + + return usb_gettoggle(device->ld_udev, ep_num, !is_in); +} + +static uint32_t initEndpoints(struct HOST_CTRL *priv) +{ + int epNum; + + if (!priv) + return 0; + + priv->hwEpInCount = 0; + priv->hwEpOutCount = 0; + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | 0); + memset(priv->ep_remap_pool, 0, sizeof(priv->ep_remap_pool)); + + for (epNum = 0; epNum < HOST_EP_NUM; epNum++) { + priv->in[epNum].isInEp = 1; + priv->in[epNum].hwEpNum = epNum; + if (priv->hostCfg.epIN[epNum].bufferingValue) { + priv->in[epNum].hwMaxPacketSize = priv->hostCfg.epIN[epNum].maxPacketSize; + priv->in[epNum].hwBuffers = priv->hostCfg.epIN[epNum].bufferingValue; + priv->in[epNum].state = HOST_EP_FREE; + priv->in[epNum].channel = NULL; + priv->hwEpInCount++; + + if (epNum) { + phytium_write16(&priv->regs->rxstaddr[epNum - 1].addr, + priv->hostCfg.epIN[epNum].startBuf); + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, 0x08); + phytium_write8(&priv->regs->fifoctrl, FIFOCTRL_FIFOAUTO | epNum); + phytium_write8(&priv->regs->irqmode[epNum - 1].inirqmode, 0x80); + } + } else + priv->in[epNum].state = HOST_EP_NOT_IMPLEMENTED; + + priv->out[epNum].isInEp = 0; + priv->out[epNum].hwEpNum = epNum; + if (priv->hostCfg.epOUT[epNum].bufferingValue) { + priv->out[epNum].hwMaxPacketSize = priv->hostCfg.epOUT[epNum].maxPacketSize; + priv->out[epNum].hwBuffers = priv->hostCfg.epOUT[epNum].bufferingValue; + priv->out[epNum].state = HOST_EP_FREE; + priv->out[epNum].channel = NULL; + priv->hwEpInCount++; + + if (epNum) { + phytium_write16(&priv->regs->txstaddr[epNum - 1].addr, + priv->hostCfg.epOUT[epNum].startBuf); + phytium_write8(&priv->regs->ep[epNum - 1].txcon, 0x08); + phytium_write8(&priv->regs->fifoctrl, + FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | epNum); + phytium_write8(&priv->regs->irqmode[epNum - 1].outirqmode, 0x80); + } + } else + priv->out[epNum].state = HOST_EP_NOT_IMPLEMENTED; + } + + return 0; +} + +static int32_t hostInit(struct HOST_CTRL *priv, struct HOST_CFG *config, + struct HOST_CALLBACKS *callbacks, struct device *pdev, bool isVhubHost) +{ + int index; + + if (!config || !priv || !callbacks || !pdev) + return -EINVAL; + + priv->dev = pdev; + priv->hostCallbacks = *callbacks; + priv->hostCfg = *config; + priv->regs = (struct HW_REGS *)config->regBase; + priv->hostDrv = HOST_GetInstance(); + + priv->dmaDrv = DMA_GetInstance(); + priv->dmaCfg.dmaModeRx = 0xFFFF; + priv->dmaCfg.dmaModeTx = 0xFFFF; + priv->dmaCfg.regBase = config->regBase + 0x400; + priv->dmaCfg.trbAddr = config->trbAddr; + priv->dmaCfg.trbDmaAddr = config->trbDmaAddr; + priv->dmaCallback.complete = host_CallbackTransfer; + + priv->dmaController = (void *)(priv + 1); + priv->dmaDrv->dma_init(priv->dmaController, &priv->dmaCfg, &priv->dmaCallback); + priv->dmaDrv->dma_setParentPriv(priv->dmaController, priv); + + INIT_LIST_HEAD(&priv->ctrlHEpQueue); + + for (index = 0; index < MAX_INSTANCE_EP_NUM; index++) { + INIT_LIST_HEAD(&priv->isoInHEpQueue[index]); + INIT_LIST_HEAD(&priv->isoOutHEpQueue[index]); + INIT_LIST_HEAD(&priv->intInHEpQueue[index]); + INIT_LIST_HEAD(&priv->intOutHEpQueue[index]); + INIT_LIST_HEAD(&priv->bulkInHEpQueue[index]); + INIT_LIST_HEAD(&priv->bulkOutHEpQueue[index]); + } + + phytium_write8(&priv->regs->cpuctrl, BIT(1)); + phytium_write8(&priv->regs->otgctrl, OTGCTRL_ABUSDROP); + phytium_write8(&priv->regs->ep0maxpack, 0x40); + + //disable interrupts + phytium_write8(&priv->regs->otgien, 0x0); + phytium_write8(&priv->regs->usbien, 0x0); + phytium_write16(&priv->regs->txerrien, 0x0); + phytium_write16(&priv->regs->rxerrien, 0x0); + + //clear all interrupt except otg idle irq + phytium_write8(&priv->regs->otgirq, 0xFE); + phytium_write8(&priv->regs->usbirq, 0xFF); + phytium_write16(&priv->regs->txerrirq, 0xFF); + phytium_write16(&priv->regs->rxerrirq, 0xFF); + + phytium_write8(&priv->regs->tawaitbcon, 0x80); + + initEndpoints(priv); + priv->otgState = HOST_OTG_STATE_B_IDLE; + + //reset all endpoint + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + + if (isVhubHost) + priv->vhub_regs = (struct VHUB_REGS *)(config->phy_regBase); + else + priv->custom_regs = (struct CUSTOM_REGS *)(config->phy_regBase); + + return 0; +} + +static void hostDestroy(struct HOST_CTRL *priv) +{ +} + +static void hostStart(struct HOST_CTRL *priv) +{ + uint8_t otgstate, usbien; + + if (!priv) + return; + + priv->dmaDrv->dma_start(priv->dmaController); + usbien = phytium_read8(&priv->regs->usbien); + usbien = usbien | USBIR_URES | USBIR_SUSP; + phytium_write8(&priv->regs->usbien, usbien); +retry: + otgstate = phytium_read8(&priv->regs->otgstate); + switch (otgstate) { + case HOST_OTG_STATE_A_IDLE: + priv->ep0State = HOST_EP0_STAGE_IDLE; + priv->otgState = HOST_OTG_STATE_A_IDLE; + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + host_SetVbus(priv, 1); + break; + case HOST_OTG_STATE_B_IDLE: + host_SetVbus(priv, 1); + break; + case HOST_OTG_STATE_A_WAIT_VFALL: + goto retry; + } + + phytium_write8(&priv->regs->otgien, OTGIRQ_CONIRQ | + OTGIRQ_VBUSERRIRQ | OTGIRQ_SRPDETIRQ); +} + +static void hostStop(struct HOST_CTRL *priv) +{ + if (!priv) + return; + + phytium_write8(&priv->regs->otgien, 0x0); + phytium_write8(&priv->regs->usbien, 0x0); + phytium_write16(&priv->regs->txerrien, 0x0); + phytium_write16(&priv->regs->rxerrien, 0x0); + + phytium_write8(&priv->regs->otgirq, 0xFE); + phytium_write8(&priv->regs->usbirq, 0xFF); + phytium_write16(&priv->regs->txerrirq, 0xFF); + phytium_write16(&priv->regs->rxerrirq, 0xFF); + + host_SetVbus(priv, 0); + priv->dmaDrv->dma_stop(priv->dmaController); +} + +static void handleReset(struct HOST_CTRL *priv) +{ + if (!priv) + return; + + if (priv->otgState == HOST_OTG_STATE_A_WAIT_BCON + || priv->otgState == HOST_OTG_STATE_B_WAIT_ACON) { + switch (phytium_read8(&priv->regs->speedctrl)) { + case SPEEDCTRL_HS: + priv->portStatus |= USB_PORT_STAT_HIGH_SPEED; + break; + case SPEEDCTRL_FS: + priv->portStatus &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); + break; + case SPEEDCTRL_LS: + priv->portStatus |= USB_PORT_STAT_LOW_SPEED; + break; + } + + priv->dmaDrv->dma_setHostMode(priv->dmaController); + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + + switch (phytium_read8(&priv->regs->otgstate)) { + case HOST_OTG_STATE_B_HOST: + priv->otgState = HOST_OTG_STATE_B_HOST; + break; + case HOST_OTG_STATE_A_HOST: + break; + default: + priv->otgState = HOST_OTG_STATE_A_HOST; + break; + } + } +} + +static void hostIsr(struct HOST_CTRL *priv) +{ + uint8_t usbirq, usbien; + + if (!priv) + return; + + usbirq = phytium_read8(&priv->regs->usbirq); + usbien = phytium_read8(&priv->regs->usbien); + pr_debug("raw usbirq:0x%x usbien:0x%x\n", usbirq, usbien); + usbirq = usbirq & usbien; + + hostErrorIrq(priv); + hostOtgIrq(priv); + + if (!usbirq) + goto DMA_IRQ; + + if (usbirq & USBIR_URES) { + phytium_write8(&priv->regs->usbirq, USBIR_URES); + priv->port_resetting = 0; + handleReset(priv); + } + + if (usbirq & USBIR_SOF) + phytium_write8(&priv->regs->usbirq, USBIR_SOF); + + if (usbirq & USBIR_SUSP) { + pr_debug("clear suspend irq\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SUSP); + phytium_write8(&priv->regs->clkgate, 0x7); + } + + return; +DMA_IRQ: + priv->dmaDrv->dma_isr(priv->dmaController); +} + +static int32_t hostEpDisable(struct HOST_CTRL *priv, struct HOST_EP *ep) +{ + return 0; +} + +static unsigned int get_endpoint_interval(struct usb_endpoint_descriptor desc, + int speed) +{ + unsigned int interval = 0; + + switch (speed) { + case USB_SPEED_HIGH: + if (usb_endpoint_xfer_control(&desc) || usb_endpoint_xfer_bulk(&desc)) { + if (desc.bInterval == 0) + return interval; + interval = fls(desc.bInterval) - 1; + interval = clamp_val(interval, 0, 15); + interval = 1 << interval; + if ((1 << interval) != desc.bInterval) + pr_debug("rounding to %d microframes, desc %d microframes\n", + 1 << interval, desc.bInterval); + break; + } + + if (usb_endpoint_xfer_isoc(&desc) || usb_endpoint_xfer_int(&desc)) { + interval = clamp_val(desc.bInterval, 1, 16) - 1; + interval = 1 << interval; + if (interval != desc.bInterval - 1) + pr_debug("rounding to %d %sframes\n", 1 << interval, + speed == USB_SPEED_FULL ? "" : "micro"); + } + break; + case USB_SPEED_FULL: + if (usb_endpoint_xfer_isoc(&desc)) { + interval = clamp_val(desc.bInterval, 1, 16) - 1; + if (interval != desc.bInterval - 1) + pr_debug("rounding to %d %sframes\n", 1 << interval, + speed == USB_SPEED_FULL ? "" : "micro"); + interval += 3; + break; + } + fallthrough; + case USB_SPEED_LOW: + if (usb_endpoint_xfer_int(&desc) || usb_endpoint_xfer_isoc(&desc)) { + interval = fls(desc.bInterval * 8) - 1; + interval = clamp_val(interval, 3, 10); + if ((1 << interval) != desc.bInterval * 8) + pr_debug("rounding to %d microframes, desc %d microframes\n", + 1 << interval, desc.bInterval); + } + } + + return interval; +} + +static int32_t hostReqQueue(struct HOST_CTRL *priv, struct HOST_REQ *req) +{ + struct HOST_EP_PRIV *hostEpPriv; + struct list_head *hEpQueue = NULL; + uint8_t idleQueue = 0; + + if (!priv || !req) + return -EINVAL; + + list_add_tail((struct list_head *)&req->list, (struct list_head *)&req->usbEp->reqList); + hostEpPriv = (struct HOST_EP_PRIV *)req->usbEp->hcPriv; + + if (hostEpPriv->epIsReady) { + if (usb_endpoint_xfer_isoc(&req->usbEp->desc)) { + hostEpPriv->isocEpConfigured = 1; + hostStartReq(priv, req); + } + return 0; + } + + hostEpPriv->epIsReady = 1; + hostEpPriv->maxPacketSize = req->usbEp->desc.wMaxPacketSize; + hostEpPriv->interval = req->interval; + hostEpPriv->epNum = req->usbEp->desc.bEndpointAddress & 0xf; + hostEpPriv->faddress = req->faddress; + hostEpPriv->usbHEp = req->usbEp; + hostEpPriv->isIn = req->epIsIn; + + hostEpPriv->frame = get_endpoint_interval(req->usbEp->desc, req->usbDev->speed); + hostEpPriv->interval = hostEpPriv->frame; + switch (usb_endpoint_type(&req->usbEp->desc)) { + case USB_ENDPOINT_XFER_CONTROL: + hostEpPriv->isIn = 0; + hEpQueue = &priv->ctrlHEpQueue; + hostEpPriv->type = USB_ENDPOINT_XFER_CONTROL; + break; + case USB_ENDPOINT_XFER_BULK: + hEpQueue = hostEpPriv->isIn ? &priv->bulkInHEpQueue[req->epNum - 1] : + &priv->bulkOutHEpQueue[req->epNum - 1]; + hostEpPriv->type = USB_ENDPOINT_XFER_BULK; + break; + case USB_ENDPOINT_XFER_INT: + hEpQueue = hostEpPriv->isIn ? &priv->intInHEpQueue[req->epNum - 1] : + &priv->intOutHEpQueue[req->epNum - 1]; + hostEpPriv->type = USB_ENDPOINT_XFER_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + hEpQueue = hostEpPriv->isIn ? &priv->isoInHEpQueue[req->epNum - 1] : + &priv->isoOutHEpQueue[req->epNum - 1]; + hostEpPriv->type = USB_ENDPOINT_XFER_ISOC; + break; + default: + break; + } + + if (hostEpPriv->isIn) + hostEpPriv->genericHwEp = &priv->in[req->epNum]; + else + hostEpPriv->genericHwEp = &priv->out[req->epNum]; + + hostEpPriv->genericHwEp->state = HOST_EP_ALLOCATED; + hostEpPriv->genericHwEp->refCount++; + + if (list_empty(hEpQueue)) { + if (hostEpPriv->genericHwEp->state == HOST_EP_BUSY) { + pr_err("Error:Hardware endpoint %d is busy\n", + hostEpPriv->genericHwEp->hwEpNum); + return -EINVAL; + } + idleQueue = 1; + } + + if (usb_endpoint_xfer_int(&req->usbEp->desc)) + list_add_tail(&hostEpPriv->node, hEpQueue); + + if (idleQueue) + hostStartReq(priv, req); + + return 0; +} + +static int abortActuallyUsbRequest(struct HOST_CTRL *priv, + struct HOST_REQ *req, struct HOST_EP *usbEp) +{ + struct HOST_EP_PRIV *usbEpPriv; + struct HostEp *hostEp; + uint8_t rxcon, txcon; + + if (!priv || !req || !usbEp) + return -EINVAL; + + usbEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + hostEp = usbEpPriv->currentHwEp; + + usbEpPriv->transferFinished = 1; + if (hostEp->isInEp) { + if (hostEp->hwEpNum) { + rxcon = phytium_read8(&priv->regs->ep[hostEp->hwEpNum - 1].rxcon); + rxcon = rxcon & (~BIT(7)); + phytium_write8(&priv->regs->ep[hostEp->hwEpNum - 1].rxcon, rxcon); + } + } else { + if (hostEp->hwEpNum) { + txcon = phytium_read8(&priv->regs->ep[hostEp->hwEpNum - 1].txcon); + txcon = txcon & (~BIT(7)); + phytium_write8(&priv->regs->ep[hostEp->hwEpNum - 1].txcon, txcon); + } + } + abortTransfer(priv, req, hostEp); + + return 0; +} + +static int32_t hostReqDequeue(struct HOST_CTRL *priv, struct HOST_REQ *req, uint32_t status) +{ + struct HOST_EP *usbEp; + struct HOST_EP_PRIV *usbEpPriv; + int ret = 0; + + if (!priv || !req) + return -EINVAL; + + usbEp = req->usbEp; + usbEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + + pr_debug("Dequeue usbReq:%p dev%d usbEp%d%s\n", req, req->faddress, req->epNum, + req->epIsIn ? "in" : "out"); + + if (!usbEpPriv->epIsReady) + return 0; + + if (!usbEpPriv->currentHwEp || req->list.prev != &usbEp->reqList) { + givebackRequest(priv, req, status); + if (list_empty(&usbEp->reqList)) { + usbEpPriv->epIsReady = 0; + usbEpPriv->currentHwEp = NULL; + if (usb_endpoint_xfer_int(&usbEp->desc)) + list_del(&usbEpPriv->node); + } + } else + ret = abortActuallyUsbRequest(priv, req, usbEp); + + if (usbEpPriv->isocEpConfigured) + usbEpPriv->isocEpConfigured = 0; + + return ret; +} + +static int32_t hostVHubStatusData(struct HOST_CTRL *priv, uint8_t *status) +{ + return 0; +} + +static int32_t hostGetDevicePD(struct HOST_CTRL *priv) +{ + return 0; +} + +static int32_t hostGetPrivateDataSize(struct HOST_CTRL *priv) +{ + if (!priv) + return 0; + + return sizeof(struct HOST_EP_PRIV); +} + +static int hub_descriptor(struct usb_hub_descriptor *buf) +{ + buf->bDescLength = 9; + buf->bDescriptorType = 0x29; + buf->bNbrPorts = 1; + buf->wHubCharacteristics = 0x11; + buf->bPwrOn2PwrGood = 5; + buf->bHubContrCurrent = 0; + buf->u.hs.DeviceRemovable[0] = 0x2; + + return 0; +} + +static int HubPortSuspend(struct HOST_CTRL *priv, u16 on) +{ + uint8_t otgctrl; + + if (!priv) + return 0; + + otgctrl = phytium_read8(&priv->regs->otgctrl); + + if (on) { + otgctrl &= ~OTGCTRL_BUSREQ; + otgctrl &= ~OTGCTRL_BHNPEN; + + priv->portStatus |= USB_PORT_STAT_SUSPEND; + + switch (phytium_read8(&priv->regs->otgstate)) { + case HOST_OTG_STATE_A_HOST: + priv->otgState = HOST_OTG_STATE_A_SUSPEND; + otgctrl |= OTGCTRL_ASETBHNPEN; + break; + case HOST_OTG_STATE_B_HOST: + priv->otgState = HOST_OTG_STATE_B_HOST_2; + break; + default: + break; + } + phytium_write8(&priv->regs->otgctrl, otgctrl); + } else { + otgctrl |= OTGCTRL_BUSREQ; + otgctrl &= ~OTGCTRL_ASETBHNPEN; + phytium_write8(&priv->regs->otgctrl, otgctrl); + priv->portStatus |= USB_PORT_STAT_RESUME; + } + return 0; +} + +static void HubPortReset(struct HOST_CTRL *priv, uint8_t on) +{ + uint8_t speed; + + if (!priv) + return; + + if (on) { + phytium_write16(&priv->regs->txerrirq, 0xFFFF); + phytium_write16(&priv->regs->txirq, 0xFFFF); + phytium_write16(&priv->regs->rxerrirq, 0xFFFF); + phytium_write16(&priv->regs->rxirq, 0xFFFF); + + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | + ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0 | 0x04); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + FIFOCTRL_IO_TX | 0 | 0x04); + + priv->portStatus |= USB_PORT_STAT_RESET; + priv->portStatus &= ~USB_PORT_STAT_ENABLE; + priv->port_resetting = 0; + } else { + speed = phytium_read8(&priv->regs->speedctrl); + if (speed == SPEEDCTRL_HS) + priv->portStatus |= USB_PORT_STAT_HIGH_SPEED; + else if (speed == SPEEDCTRL_FS) + priv->portStatus &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); + else + priv->portStatus |= USB_PORT_STAT_LOW_SPEED; + + priv->portStatus &= ~USB_PORT_STAT_RESET; + priv->portStatus |= USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET << 16) + | (USB_PORT_STAT_C_ENABLE << 16); + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + } +} + +static int get_PortStatus(struct HOST_CTRL *priv, u16 wIndex, uint8_t *buff) +{ + uint32_t temp = 0; + + if (!priv || !buff) + return 0; + + if ((wIndex & 0xff) != 1) + return 1; + + if (priv->portStatus & USB_PORT_STAT_RESET) { + if (!priv->port_resetting) + HubPortReset(priv, 0); + } + + if (priv->portStatus & USB_PORT_STAT_RESUME) { + priv->portStatus &= ~(USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESUME); + priv->portStatus |= USB_PORT_STAT_C_SUSPEND << 16; + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + } + + temp = priv->portStatus & (~USB_PORT_STAT_RESUME); + buff[0] = temp; + buff[1] = temp >> 8; + buff[2] = temp >> 16; + buff[3] = temp >> 24; + + return 0; +} + +static int set_PortFeature(struct HOST_CTRL *priv, u16 wValue, u16 wIndex) +{ + if ((wIndex & 0xff) != 1) + return 1; + + switch (wValue) { + case USB_PORT_FEAT_CONNECTION: + break; + case USB_PORT_FEAT_ENABLE: + break; + case USB_PORT_FEAT_SUSPEND: + HubPortSuspend(priv, 1); + break; + case USB_PORT_FEAT_OVER_CURRENT: + break; + case USB_PORT_FEAT_RESET: + HubPortReset(priv, 1); + break; + case USB_PORT_FEAT_L1: + break; + case USB_PORT_FEAT_POWER: + hostStart(priv); + break; + case USB_PORT_FEAT_LOWSPEED: + break; + case USB_PORT_FEAT_C_CONNECTION: + break; + case USB_PORT_FEAT_C_ENABLE: + break; + case USB_PORT_FEAT_C_SUSPEND: + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + break; + case USB_PORT_FEAT_INDICATOR: + break; + case USB_PORT_FEAT_C_PORT_L1: + break; + default: + break; + } + priv->portStatus |= 1 << wValue; + + return 0; +} + +static int Clear_PortFeature(struct HOST_CTRL *priv, u16 wValue, u16 wIndex) +{ + if ((wIndex & 0xff) != 1) + return 1; + + switch (wValue) { + case USB_PORT_FEAT_CONNECTION: + break; + case USB_PORT_FEAT_ENABLE: + break; + case USB_PORT_FEAT_SUSPEND: + HubPortSuspend(priv, 0); + break; + case USB_PORT_FEAT_OVER_CURRENT: + break; + case USB_PORT_FEAT_RESET: + break; + case USB_PORT_FEAT_L1: + break; + case USB_PORT_FEAT_POWER: + break; + case USB_PORT_FEAT_LOWSPEED: + break; + case USB_PORT_FEAT_C_CONNECTION: + break; + case USB_PORT_FEAT_C_ENABLE: + break; + case USB_PORT_FEAT_C_SUSPEND: + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + break; + case USB_PORT_FEAT_INDICATOR: + break; + case USB_PORT_FEAT_C_PORT_L1: + break; + default: + break; + } + priv->portStatus &= ~(1 << wValue); + + return 0; +} + +static int hc_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + unsigned long flags = 0; + int retval = 0; + struct HOST_CTRL *priv; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return -EINVAL; + + if (!buf) + return -EINVAL; + + if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) { + spin_unlock_irqrestore(&config->lock, flags); + return -ESHUTDOWN; + } + + priv = (struct HOST_CTRL *)config->host_priv; + if (!priv) + return -EINVAL; + + spin_lock_irqsave(&config->lock, flags); + switch (typeReq) { + case GetHubStatus: + break; + case GetPortStatus: + get_PortStatus(priv, wIndex, (uint8_t *)buf); + break; + case GetHubDescriptor: + hub_descriptor((struct usb_hub_descriptor *)buf); + break; + case SetPortFeature: + set_PortFeature(priv, wValue, wIndex); + break; + case ClearPortFeature: + retval = Clear_PortFeature(priv, wValue, wIndex); + break; + case SetHubFeature: + break; + case ClearHubFeature: + break; + default: + break; + } + + spin_unlock_irqrestore(&config->lock, flags); + + return retval; +} + +static int32_t hostVHubControl(struct HOST_CTRL *priv, struct usb_ctrlrequest *setup, uint8_t *buff) +{ + uint16_t request; + uint32_t retval = 0; + + if (!priv || !setup || !buff) + return -EINVAL; + + request = setup->bRequestType << 0x08 | setup->bRequest; + switch (request) { + case ClearHubFeature: + break; + case SetHubFeature: + break; + case ClearPortFeature: + retval = Clear_PortFeature(priv, setup->wValue, setup->wIndex); + break; + case SetPortFeature: + retval = set_PortFeature(priv, setup->wValue, setup->wIndex); + break; + case GetHubDescriptor: + retval = hub_descriptor((struct usb_hub_descriptor *)buff); + break; + case GetHubStatus: + break; + case GetPortStatus: + retval = get_PortStatus(priv, setup->wIndex, (uint8_t *)buff); + break; + default: + retval = EOPNOTSUPP; + break; + } + + return retval; +} + +struct HOST_OBJ hostDrv = { + .host_init = hostInit, + .host_destroy = hostDestroy, + .host_start = hostStart, + .host_stop = hostStop, + .host_isr = hostIsr, + + //endpoint operation + .host_epDisable = hostEpDisable, + .host_reqQueue = hostReqQueue, + .host_reqDequeue = hostReqDequeue, + .host_vhubStatusData = hostVHubStatusData, + .host_vhubControl = hostVHubControl, + .host_getDevicePD = hostGetDevicePD, + .host_epGetPrivateDataSize = hostGetPrivateDataSize, +}; + +static struct hc_driver host_driver = { + .description = "phytium-hcd", + .product_desc = "Phytium Host USB Driver", + .hcd_priv_size = sizeof(struct phytium_cusb *), + .flags = HCD_MEMORY | HCD_USB2 | HCD_DMA, + .reset = hc_reset, + .start = hc_start, + .stop = hc_stop, + .shutdown = hc_shutdown, + .urb_enqueue = hc_urb_enqueue, + .urb_dequeue = hc_urb_dequeue, + .endpoint_disable = hc_endpoint_disable, + .get_frame_number = hc_get_frame, + .alloc_dev = hc_alloc_dev, + .free_dev = hc_free_dev, + .reset_device = hc_reset_device, + .update_device = hc_update_device, + .add_endpoint = hc_add_endpoint, + .drop_endpoint = hc_drop_endpoint, + .hub_status_data = hc_hub_status_data, + .hub_control = hc_hub_control, +#ifdef CONFIG_PM + .bus_suspend = hc_bus_suspend, + .bus_resume = hc_bus_resume, +#endif +}; + +static int phytium_host_set_default_cfg(struct phytium_cusb *config) +{ + int index; + + config->host_cfg.regBase = (uintptr_t)config->regs; + config->host_cfg.phy_regBase = (uintptr_t)config->phy_regs; + config->host_cfg.dmultEnabled = 1; + config->host_cfg.memoryAlignment = 0; + config->host_cfg.dmaSupport = 1; + config->host_cfg.isEmbeddedHost = 1; + + for (index = 0; index < HOST_EP_NUM; index++) { + if (index == 0) { + config->host_cfg.epIN[index].bufferingValue = 1; + config->host_cfg.epIN[index].maxPacketSize = 64; + config->host_cfg.epIN[index].startBuf = 0; + + config->host_cfg.epOUT[index].bufferingValue = 1; + config->host_cfg.epOUT[index].maxPacketSize = 64; + config->host_cfg.epOUT[index].startBuf = 0; + } else { + config->host_cfg.epIN[index].bufferingValue = 2; + config->host_cfg.epIN[index].maxPacketSize = 1024; + config->host_cfg.epIN[index].startBuf = 64 + 2 * 1024 * (index - 1); + + config->host_cfg.epOUT[index].bufferingValue = 2; + config->host_cfg.epOUT[index].maxPacketSize = 1024; + config->host_cfg.epOUT[index].startBuf = 64 + 2 * 1024 * (index - 1); + } + } + + return 0; +} + +static int phytium_host_reinit(struct phytium_cusb *config) +{ + struct HOST_CTRL *ctrl; + + if (!config || !config->host_priv) + return 0; + + ctrl = (struct HOST_CTRL *)config->host_priv; + + usb_root_hub_lost_power(config->hcd->self.root_hub); + hostStop(ctrl); + + ctrl->portStatus = 0; + + config->host_obj->host_init(config->host_priv, &config->host_cfg, + &config->host_callbacks, config->dev, config->isVhubHost); + + return 0; +} + +int phytium_host_init(struct phytium_cusb *config) +{ + int ret; + + if (!config) + return 0; + + phytium_host_set_default_cfg(config); + config->host_obj = HOST_GetInstance(); + config->dma_cfg.regBase = config->host_cfg.regBase + 0x400; + + config->dma_obj = DMA_GetInstance(); + config->dma_obj->dma_probe(&config->dma_cfg, &config->dma_sysreq); + + config->host_sysreq.privDataSize = sizeof(struct HOST_CTRL); + config->host_sysreq.trbMemSize = config->dma_sysreq.trbMemSize; + config->host_sysreq.privDataSize += config->dma_sysreq.privDataSize; + + config->host_priv = devm_kzalloc(config->dev, config->host_sysreq.privDataSize, GFP_KERNEL); + if (!config->host_priv) { + ret = -ENOMEM; + goto err_probe; + } + + config->host_cfg.trbAddr = dma_alloc_coherent(config->dev, config->host_sysreq.trbMemSize, + (dma_addr_t *)&config->host_cfg.trbDmaAddr, GFP_KERNEL); + if (!config->host_cfg.trbAddr) { + ret = -ENOMEM; + goto err_dma_coherent; + } + + config->host_callbacks.portStatusChange = host_rh_port_status_change; + config->host_callbacks.getEpToggle = host_get_ep_toggle; + config->host_callbacks.setEpToggle = host_set_ep_toggle; + config->host_callbacks.givebackRequest = host_giveback_request; + + config->host_obj->host_init(config->host_priv, &config->host_cfg, + &config->host_callbacks, config->dev, config->isVhubHost); + + config->hcd = usb_create_hcd(&host_driver, config->dev, dev_name(config->dev)); + if (!config->hcd) { + ret = -ENODEV; + goto err_host; + } + + *config->hcd->hcd_priv = (unsigned long)config; + config->hcd->self.uses_pio_for_control = 0; + config->hcd->uses_new_polling = 1; + config->hcd->has_tt = 1; + + dev_set_drvdata(config->dev, config); + + config->hcd->self.otg_port = 1; + config->hcd->power_budget = 500; + ret = usb_add_hcd(config->hcd, 0, 0); + if (ret < 0) + goto err_setup; + + return 0; +err_setup: + usb_put_hcd(config->hcd); +err_host: + config->host_obj->host_destroy(config->host_priv); + dma_free_coherent(config->dev, config->host_sysreq.trbMemSize, + config->host_cfg.trbAddr, config->host_cfg.trbDmaAddr); +err_dma_coherent: +err_probe: + dev_set_drvdata(config->dev, NULL); + return ret; +} + +int phytium_host_uninit(struct phytium_cusb *config) +{ + if (!config) + return 0; + + if (config->hcd) { + usb_remove_hcd(config->hcd); + usb_put_hcd(config->hcd); + config->hcd = NULL; + } + + return 0; +} + +#ifdef CONFIG_PM +int phytium_host_resume(void *priv) +{ + int otgctrl; + struct phytium_cusb *config = (struct phytium_cusb *)priv; + struct HOST_CTRL *ctrl = (struct HOST_CTRL *)config->host_priv; + + if (!ctrl) + return -EINVAL; + + otgctrl = phytium_read8(&ctrl->regs->otgctrl); + otgctrl |= 1; + phytium_write8(&ctrl->regs->otgctrl, otgctrl); + + phytium_host_reinit(config); + + return 0; +} + +int phytium_host_suspend(void *priv) +{ + int otgctrl; + struct phytium_cusb *config = (struct phytium_cusb *)priv; + struct HOST_CTRL *ctrl = (struct HOST_CTRL *)config->host_priv; + + if (!ctrl) + return -EINVAL; + + otgctrl = phytium_read8(&ctrl->regs->otgctrl); + otgctrl = otgctrl & (~1); + phytium_write8(&ctrl->regs->otgctrl, otgctrl); + + return 0; +} +#endif + +struct HOST_OBJ *HOST_GetInstance(void) +{ + return &hostDrv; +} diff --git a/drivers/usb/phytium/host_api.h b/drivers/usb/phytium/host_api.h new file mode 100644 index 0000000000000..c47cb04ede444 --- /dev/null +++ b/drivers/usb/phytium/host_api.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_HOST_API_H_ +#define __PHYTIUM_HOST_API_H_ + +#include +#include "dma.h" + +#define MAX_SUPPORTED_DEVICES 16 +#define USB_PORT_STAT_RESUME (1 << 31) +#define MAX_INSTANCE_EP_NUM 6 +#define ENDPOINT_DIR 2 + +enum HOST_OtgState { + HOST_OTG_STATE_A_IDLE, + HOST_OTG_STATE_A_WAIT_VRISE, + HOST_OTG_STATE_A_WAIT_BCON, + HOST_OTG_STATE_A_HOST, + HOST_OTG_STATE_A_SUSPEND, + HOST_OTG_STATE_A_PERIPHERAL, + HOST_OTG_STATE_A_VBUS_ERR, + HOST_OTG_STATE_A_WAIT_VFALL, + HOST_OTG_STATE_B_IDLE = 0x10, + HOST_OTG_STATE_B_PERIPHERAL, + HOST_OTG_STATE_B_WAIT_ACON, + HOST_OTG_STATE_B_HOST, + HOST_OTG_STATE_B_HOST_2, + HOST_OTG_STATE_B_SRP_INT1, + HOST_OTG_STATE_B_SRP_INT2, + HOST_OTG_STATE_B_DISCHRG1, + HOST_OTG_STATE_B_DISCHRG2, + HOST_OTG_STATE_UNKNOWN, +}; + +enum HOST_EP0_STAGE { + HOST_EP0_STAGE_IDLE, + HOST_EP0_STAGE_SETUP, + HOST_EP0_STAGE_IN, + HOST_EP0_STAGE_OUT, + HOST_EP0_STAGE_STATUSIN, + HOST_EP0_STAGE_STATUSOUT, + HOST_EP0_STAGE_ACK, +}; + +enum HOST_EP_STATE { + HOST_EP_FREE, + HOST_EP_ALLOCATED, + HOST_EP_BUSY, + HOST_EP_NOT_IMPLEMENTED +}; + +struct HOST_DEVICE { + uint8_t devnum; + uint8_t hubPort; + unsigned int speed; + struct HOST_DEVICE *parent; + void *hcPriv; + void *userExt; +}; + +struct HOST_EP { + struct usb_endpoint_descriptor desc; + struct list_head reqList; + void *userExt; + uint8_t *hcPriv; + uint8_t device_epNum; +}; + +struct HOST_USB_DEVICE { + struct HOST_EP ep0_hep; + struct HOST_EP *in_ep[16]; + struct HOST_EP *out_ep[16]; + struct HOST_DEVICE udev; + struct usb_device *ld_udev; +}; + +struct HostEp { + uint8_t name[255]; + uint8_t hwEpNum; + uint8_t hwBuffers; + uint16_t hwMaxPacketSize; + uint8_t isInEp; + void *channel; + uint8_t usbEpNum; + uint8_t type; + uint8_t usbPacketSize; + enum HOST_EP_STATE state; + struct HOST_EP *scheduledUsbHEp; + uint8_t refCount; +}; + +struct HOST_ISOFRAMESDESC { + uint32_t length; + uint32_t offset; +}; + +struct HOST_EP_PRIV { + struct list_head node; + struct HostEp *genericHwEp; + struct HostEp *currentHwEp; + uint8_t epIsReady; + uint8_t isIn; + uint8_t type; + uint8_t interval; + uint8_t epNum; + uint8_t faddress; + uint16_t maxPacketSize; + uint32_t frame; + uint8_t hubAddress; + uint8_t portAddress; + uint8_t split; + struct HOST_EP *usbHEp; + uint8_t isocEpConfigured; + uint8_t transferFinished; +}; + +struct HOST_REQ { + struct list_head list; + struct HOST_EP *usbEp; + void *userExt; + void *hcPriv; + struct HOST_DEVICE *usbDev; + struct usb_ctrlrequest *setup; + uintptr_t setupDma; + void *bufAddress; + uintptr_t buffDma; + uint32_t buffLength; + uint32_t actualLength; + uint8_t epIsIn; + uint8_t eptype; + uint8_t epNum; + uint8_t faddress; + uint8_t interval; + uint8_t status; + uint8_t reqUnlinked; + struct HOST_ISOFRAMESDESC *isoFramesDesc; + uint32_t isoFramesNumber; +}; + +struct HOST_SYSREQ { + uint32_t privDataSize; + uint32_t trbMemSize; +}; + +struct HOST_EP_CFG { + uint8_t bufferingValue; + uint16_t startBuf; + uint16_t maxPacketSize; +}; + +struct HOST_CFG { + uintptr_t regBase; + uintptr_t phy_regBase; + struct HOST_EP_CFG epIN[16]; + struct HOST_EP_CFG epOUT[16]; + uint8_t dmultEnabled; + uint8_t memoryAlignment; + uint8_t dmaSupport; + uint8_t isEmbeddedHost; + void *trbAddr; + uintptr_t trbDmaAddr; +}; + +struct HOST_CTRL; + +struct HOST_CALLBACKS { + void (*portStatusChange)(struct HOST_CTRL *priv); + + uint8_t (*getEpToggle)(struct HOST_CTRL *priv, + struct HOST_DEVICE *usbDev, uint8_t epNum, uint8_t isIn); + + void (*setEpToggle)(struct HOST_CTRL *priv, struct HOST_DEVICE *usbDev, + uint8_t epNum, uint8_t isIn, uint8_t toggle); + + void (*givebackRequest)(struct HOST_CTRL *priv, + struct HOST_REQ *usbReq, uint32_t status); + + void (*setTimer)(struct HOST_CTRL *priv, uint32_t time, uint8_t id); +}; + +struct HOST_OBJ { + int32_t (*host_probe)(struct HOST_CFG *config, struct HOST_SYSREQ *sysReq); + + int32_t (*host_init)(struct HOST_CTRL *priv, struct HOST_CFG *config, + struct HOST_CALLBACKS *callbacks, struct device *pdev, bool isVhubHost); + + void (*host_destroy)(struct HOST_CTRL *priv); + + void (*host_start)(struct HOST_CTRL *priv); + + void (*host_stop)(struct HOST_CTRL *priv); + + void (*host_isr)(struct HOST_CTRL *priv); + + int32_t (*host_epDisable)(struct HOST_CTRL *priv, struct HOST_EP *ep); + + int32_t (*host_reqQueue)(struct HOST_CTRL *priv, struct HOST_REQ *req); + + int32_t (*host_reqDequeue)(struct HOST_CTRL *priv, + struct HOST_REQ *req, uint32_t status); + + int32_t (*host_vhubStatusData)(struct HOST_CTRL *priv, uint8_t *status); + + int32_t (*host_vhubControl)(struct HOST_CTRL *priv, + struct usb_ctrlrequest *setup, uint8_t *buff); + + int32_t (*host_getDevicePD)(struct HOST_CTRL *priv); + + int32_t (*host_epGetPrivateDataSize)(struct HOST_CTRL *priv); +}; + +struct HOST_CTRL { + struct device *dev; + struct HW_REGS *regs; + struct HOST_OBJ *hostDrv; + struct HOST_CFG hostCfg; + struct HOST_CALLBACKS hostCallbacks; + struct HostEp in[16]; + struct HostEp out[16]; + uint32_t portStatus; + struct list_head ctrlHEpQueue; + struct list_head isoInHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head isoOutHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head intInHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head intOutHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head bulkInHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head bulkOutHEpQueue[MAX_INSTANCE_EP_NUM]; + uint8_t hwEpInCount; + uint8_t hwEpOutCount; + unsigned int speed; + enum HOST_OtgState otgState; + enum HOST_EP0_STAGE ep0State; + uint8_t vBusErrCnt; + uint8_t isRemoteWakeup; + uint8_t isSelfPowered; + uint8_t deviceAddress; + struct DMA_OBJ *dmaDrv; + void *dmaController; + struct DMA_CFG dmaCfg; + struct DMA_CALLBACKS dmaCallback; + uint8_t port_resetting; + struct HOST_USB_DEVICE *host_devices_table[MAX_SUPPORTED_DEVICES]; + struct CUSTOM_REGS *custom_regs; + struct VHUB_REGS *vhub_regs; + int ep_remap_pool[ENDPOINT_DIR][MAX_INSTANCE_EP_NUM + 1]; +}; + +struct HOST_OBJ *HOST_GetInstance(void); + +#endif diff --git a/drivers/usb/phytium/hw-regs.h b/drivers/usb/phytium/hw-regs.h new file mode 100644 index 0000000000000..8da9f8e9b9253 --- /dev/null +++ b/drivers/usb/phytium/hw-regs.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PHYTIUM_HW_REGS +#define __LINUX_PHYTIUM_HW_REGS + +#define USBIEN 0x198 +#define USBIEN_SUDAVIE BIT(0) +#define USBIEN_SOFIE BIT(1) +#define USBIEN_SUTOKIE BIT(2) +#define USBIEN_SUSPIE BIT(3) +#define USBIEN_URESIE BIT(4) +#define USBIEN_HSPEEDIE BIT(5) +#define USBIEN_LPMIE BIT(7) + +#define USBCS 0x1a3 +#define USBCS_LSMODE BIT(0) +#define USBCS_LPMNYET BIT(1) +#define USBCS_SIGSUME BIT(5) +#define USBCS_DISCON BIT(6) +#define USBCS_WAKESRC BIT(7) + +#define USBIR_SOF BIT(1) +#define USBIR_SUSP BIT(3) +#define USBIR_URES BIT(4) +#define USBIR_HSPEED BIT(5) + +#define USBIR_SUDAV BIT(0) +#define USBIR_SUTOK BIT(2) +#define USBIR_LPMIR BIT(7) + +#define OTGIRQ_IDLEIRQ BIT(0) +#define OTGIRQ_SRPDETIRQ BIT(1) +#define OTGIRQ_CONIRQ BIT(2) +#define OTGIRQ_LOCSOFIRQ BIT(2) +#define OTGIRQ_VBUSERRIRQ BIT(3) +#define OTGIRQ_PERIPHIRQ BIT(4) +#define OTGIRQ_IDCHANGEIRQ BIT(5) +#define OTGIRQ_HOSTDISCON BIT(6) +#define OTGIRQ_BSE0SRPIRQ BIT(7) + +#define OTGCTRL_BUSREQ BIT(0) +#define OTGCTRL_ABUSDROP BIT(1) +#define OTGCTRL_ASETBHNPEN BIT(2) +#define OTGCTRL_BHNPEN BIT(3) +#define OTGCTRL_SRPVBUSDETEN BIT(4) +#define OTGCTRL_SRPDATDETEN BIT(5) +#define OTGCTRL_FORCEBCONN BIT(7) + +#define OTGSTATUS_ID BIT(6) + +#define ENDPRST_EP 0x0f +#define ENDPRST_IO_TX BIT(4) +#define ENDPRST_TOGRST BIT(5) +#define ENDPRST_FIFORST BIT(6) +#define ENDPRST_TOGSETQ BIT(7) + +#define FIFOCTRL_EP 0x0f +#define FIFOCTRL_IO_TX BIT(4) +#define FIFOCTRL_FIFOAUTO BIT(5) + +#define SPEEDCTRL_LS BIT(0) +#define SPEEDCTRL_FS BIT(1) +#define SPEEDCTRL_HS BIT(2) +#define SPEEDCTRL_HSDISABLE BIT(7) + +#define CON_TYPE_CONTROL 0x00 +#define CON_TYPE_ISOC 0x04 +#define CON_TYPE_BULK 0x08 +#define CON_TYPE_INT 0x0C +#define CON_TYPE_ISOC_1_ISOD 0x00 +#define CON_TYPE_ISOC_2_ISOD 0x10 +#define CON_TYPE_ISOC_3_ISOD 0x20 +#define CON_STALL 0x40 +#define CON_VAL 0x80 + +#define ERR_TYPE 0x1c +#define ERR_COUNT 0x03 +#define ERR_RESEND BIT(6) +#define ERR_UNDERRIEN BIT(7) + +#define ERR_NONE 0 +#define ERR_CRC 1 +#define ERR_DATA_TOGGLE_MISMATCH 2 +#define ERR_STALL 3 +#define ERR_TIMEOUT 4 +#define ERR_PID 5 +#define ERR_TOO_LONG_PACKET 6 +#define ERR_DATA_UNDERRUN 7 + +#define EP0CS_HCSETTOGGLE BIT(6) +#define EP0CS_HCSET BIT(4) +#define EP0CS_RXBUSY_MASK BIT(3) +#define EP0CS_TXBUSY_MASK BIT(2) +#define EP0CS_STALL BIT(0) +#define EP0CS_HSNAK BIT(1) +#define EP0CS_DSTALL BIT(4) +#define EP0CS_CHGSET BIT(7) + +#define CS_ERR 0x01 +#define CS_BUSY 0x02 +#define CS_NPAK 0x0c +#define CS_NPAK_OFFSET 0x02 +#define CS_AUTO 0x10 + +#define CON_BUF_SINGLE 0x00 +#define CON_BUF_DOUBLE 0x01 +#define CON_BUF_TRIPLE 0x02 +#define CON_BUF_QUAD 0x03 +#define CON_BUF 0x03 + +struct HW_REGS { + uint8_t ep0Rxbc; /*address 0x00*/ + uint8_t ep0Txbc; /*address 0x01*/ + uint8_t ep0cs; /*address 0x02*/ + int8_t reserved0; /*address 0x03*/ + uint8_t lpmctrll; /*address 0x04*/ + uint8_t lpmctrlh; /*address 0x05*/ + uint8_t lpmclock; + uint8_t ep0fifoctrl; + struct ep { /*address 0x08*/ + uint16_t rxbc; //outbc (hcinbc) + uint8_t rxcon; + uint8_t rxcs; + uint16_t txbc; //inbc (hcoutbc + uint8_t txcon; + uint8_t txcs; + } ep[15]; + uint8_t reserved1[4]; + uint32_t fifodat[15]; /*address 0x84*/ + uint8_t ep0ctrl; /*address 0xC0*/ + uint8_t tx0err; /*address 0xC1*/ + uint8_t reserved2; + uint8_t rx0err; /*address 0xC3*/ + struct epExt { + uint8_t txctrl; + uint8_t txerr; + uint8_t rxctrl; + uint8_t rxerr; + } epExt[15]; + uint8_t ep0datatx[64]; /*address 0x100*/ + uint8_t ep0datarx[64]; /*address 0x140*/ + uint8_t setupdat[8]; /*address 0x180*/ + uint16_t txirq; /*address 0x188*/ + uint16_t rxirq; /*address 0x18A*/ + uint8_t usbirq; /*address 0x18C*/ + uint8_t reserved4; + uint16_t rxpngirq; /*address 0x18E*/ + uint16_t txfullirq; /*address 0x190*/ + uint16_t rxemptirq; /*address 0x192*/ + uint16_t txien; /*address 0x194*/ + uint16_t rxien; /*address 0x196*/ + uint8_t usbien; /*address 0x198*/ + uint8_t reserved6; + uint16_t rxpngien; /*address 0x19A*/ + uint16_t txfullien; /*address 0x19C*/ + uint16_t rxemptien; /*address 0x19E*/ + uint8_t usbivect; /*address 0x1A0*/ + uint8_t fifoivect; /*address 0x1A1*/ + uint8_t endprst; /*address 0x1A2*/ + uint8_t usbcs; /*address 0x1A3*/ + uint16_t frmnr; /*address 0x1A4*/ + uint8_t fnaddr; /*address 0x1A6*/ + uint8_t clkgate; /*address 0x1A7*/ + uint8_t fifoctrl; /*address 0x1A8*/ + uint8_t speedctrl; /*address 0x1A9*/ + uint8_t reserved8[1]; /*address 0x1AA*/ + uint8_t portctrl; /*address 0x1AB*/ + uint16_t hcfrmnr; /*address 0x1AC*/ + uint16_t hcfrmremain; /*address 0x1AE*/ + uint8_t reserved9[4]; /*address 0x1B0*/ + uint16_t rxerrirq; /*address 0x1B4*/ + uint16_t txerrirq; /*address 0x1B6*/ + uint16_t rxerrien; /*address 0x1B8*/ + uint16_t txerrien; /*address 0x1BA*/ + /*OTG extension*/ + uint8_t otgirq; /*address 0x1BC*/ + uint8_t otgstate; /*address 0x1BD*/ + uint8_t otgctrl; /*address 0x1BE*/ + uint8_t otgstatus; /*address 0x1BF*/ + uint8_t otgien; /*address 0x1C0*/ + uint8_t taaidlbdis; /*address 0x1C1*/ + uint8_t tawaitbcon; /*address 0x1C2*/ + uint8_t tbvbuspls; /*address 0x1C3*/ + uint8_t otg2ctrl; /*address 0x1C4*/ + uint8_t reserved10[2]; /*address 0x1C5*/ + uint8_t tbvbusdispls; /*address 0x1C7*/ + uint8_t traddr; /*address 0x1C8*/ + uint8_t trwdata; /*address 0x1C9*/ + uint8_t trrdata; /*address 0x1CA*/ + uint8_t trctrl; /*address 0x1CB*/ + uint16_t isoautoarm; /*address 0x1CC*/ + uint8_t adpbc1ien; /*address 0x1CE*/ + uint8_t adpbc2ien; /*address 0x1CF*/ + uint8_t adpbcctr0; /*address 0x1D0*/ + uint8_t adpbcctr1; /*address 0x1D1*/ + uint8_t adpbcctr2; /*address 0x1D2*/ + uint8_t adpbc1irq; /*address 0x1D3*/ + uint8_t adpbc0status; /*address 0x1D4*/ + uint8_t adpbc1status; /*address 0x1D5*/ + uint8_t adpbc2status; /*address 0x1D6*/ + uint8_t adpbc2irq; /*address 0x1D7*/ + uint16_t isodctrl; /*address 0x1D8*/ + uint8_t reserved11[2]; + uint16_t isoautodump; /*address 0x1DC*/ + uint8_t reserved12[2]; + uint8_t ep0maxpack; /*address 0x1E0*/ + uint8_t reserved13; + uint16_t rxmaxpack[15]; /*address 0x1E2*/ + struct rxsoftimer { /*address 0x200 to 0x23F*/ + uint16_t timer; + uint8_t reserved; + uint8_t ctrl; + } rxsoftimer[16]; + + struct txsoftimer { /*address 0x240 to 0x27F*/ + uint16_t timer; + uint8_t reserved; + uint8_t ctrl; + } txsoftimer[16]; + uint8_t reserved14[132]; + struct rxstaddr { /*address 0x304*/ + uint16_t addr; + uint16_t reserved; + } rxstaddr[15]; + uint8_t reserved15[4]; + struct txstaddr { /*address 0x344*/ + uint16_t addr; + uint16_t reserved; + } txstaddr[15]; + int8_t reserved16[4]; /*address 0x380*/ + struct irqmode { /*address 0x384*/ + int8_t inirqmode; + int8_t reserved21; + int8_t outirqmode; + int8_t reserved22; + } irqmode[15]; + /*The Microprocessor control*/ + uint8_t cpuctrl; /*address 0x3C0*/ + int8_t reserved17[15]; + /*The debug counters and workarounds*/ + uint8_t debug_rx_bcl; /*address 0x3D0*/ + uint8_t debug_rx_bch; /*address 0x3D1*/ + uint8_t debug_rx_status; /*address 0x3D2*/ + uint8_t debug_irq; /*address 0x3D3*/ + uint8_t debug_tx_bcl; /*address 0x3D4*/ + uint8_t debug_tx_bch; /*address 0x3D5*/ + uint8_t debug_tx_status; /*address 0x3D6*/ + uint8_t debug_ien; /*address 0x3D7*/ + uint8_t phywa_en; /*address 0x3D8*/ + /*endian*/ + uint8_t wa1_cnt; /*address 0x3D9*/ + int8_t reserved18[2]; /*address 0x3DA*/ + uint8_t endian_sfr_cs; /*address 0x3DC*/ + int8_t reserved19[2]; /*address 0x3DD*/ + uint8_t endian_sfr_s; /*address 0x3DF*/ + int8_t reserved20[2]; /*address 0x3E0*/ + uint16_t txmaxpack[15]; /*address 0x3E2*/ +}; + +struct CUSTOM_REGS { + uint32_t secure_ctrl; /*address 0x80000*/ + uint32_t secsid_atst; /*address 0x80004*/ + uint32_t nsaid_smmuid; /*address 0x80008*/ + uint32_t ace; /*address 0x8000c*/ + uint32_t wakeup; /*address 0x80010*/ + uint32_t debug; /*address 0x80014*/ +}; + +struct VHUB_REGS { + uint32_t gen_cfg; /*address 0x00*/ + uint32_t gen_st; /*address 0x04*/ + uint32_t bc_cfg; /*address 0x08*/ + uint32_t bc_st; /*address 0x0c*/ + uint32_t adp_cfg; /*address 0x10*/ + uint32_t adp_st; /*address 0x14*/ + uint32_t dbg_cfg; /*address 0x18*/ + uint32_t dbg_st; /*address 0x1c*/ + uint32_t utmip_cfg; /*address 0x20*/ + uint32_t utmip_st; /*address 0x24*/ +}; + +struct DMARegs { + uint32_t conf; /*address 0x400*/ + uint32_t sts; /*address 0x404*/ + uint32_t reserved5[5]; + uint32_t ep_sel; /*address 0x41C*/ + uint32_t traddr; /*address 0x420*/ + uint32_t ep_cfg; /*address 0x424*/ + uint32_t ep_cmd; /*address 0x428*/ + uint32_t ep_sts; /*address 0x42c*/ + uint32_t ep_sts_sid;/*address 0x430*/ + uint32_t ep_sts_en; /*address 0x434*/ + uint32_t drbl; /*address 0x438*/ + uint32_t ep_ien; /*address 0x43C*/ + uint32_t ep_ists; /*address 0x440*/ +}; + +#endif diff --git a/drivers/usb/phytium/pci.c b/drivers/usb/phytium/pci.c new file mode 100644 index 0000000000000..b4d675effb88b --- /dev/null +++ b/drivers/usb/phytium/pci.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include "core.h" + +#define PHYTIUM_OTG_USB_LOADED 3 + +static bool phytium_hw_is_device(struct phytium_cusb *config) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return false; +} + +static bool phytium_hw_is_host(struct phytium_cusb *config) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return true; +} + +static int phytium_get_dr_mode(struct phytium_cusb *config) +{ + enum usb_dr_mode mode; + + config->dr_mode = usb_get_dr_mode(config->dev); + if (config->dr_mode == USB_DR_MODE_UNKNOWN) + config->dr_mode = USB_DR_MODE_OTG; + + mode = config->dr_mode; + if (phytium_hw_is_device(config)) { + if (IS_ENABLED(CONFIG_USB_PHYTIUM_HOST)) { + dev_err(config->dev, "Controller does not support host mode.\n"); + return -EINVAL; + } + + mode = USB_DR_MODE_PERIPHERAL; + } else if (phytium_hw_is_host(config)) { + if (IS_ENABLED(CONFIG_USB_PHYTIUM_PERIPHERAL)) { + dev_err(config->dev, "Controller does not support device mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_HOST; + } else { + if (IS_ENABLED(CONFIG_USB_PHYTIUM_HOST)) + mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_PHYTIUM_PERIPHERAL)) + mode = USB_DR_MODE_PERIPHERAL; + } + + if (mode != config->dr_mode) { + dev_warn(config->dev, "Configuration mismatch. dr_mode forced to %s\n", + mode == USB_DR_MODE_HOST ? "host" : "device"); + config->dr_mode = mode; + } + + return 0; +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_cusb *config; + int retval = 0; + + if (usb_disabled()) + return -ENODEV; + + retval = pcim_enable_device(pdev); + if (retval < 0) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return -ENODEV; + } + pci_set_master(pdev); + + config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + spin_lock_init(&config->lock); + config->dev = &pdev->dev; + + config->irq = pdev->irq; + if (config->irq <= 0) { + dev_err(config->dev, "getting usb irq failed\n"); + return config->irq; + } + + config->regs = pci_iomap(pdev, 0, 0); + if (IS_ERR(config->regs)) { + dev_err(config->dev, "map IOMEM resource failed\n"); + return -1; + } + + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev_err(&pdev->dev, "failed to set 64-bit dma\n"); + else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + dev_err(&pdev->dev, "failed to set 32-bit dma\n"); + + pci_enable_msi(pdev); + + phytium_get_dr_mode(config); + + phytium_core_reset(config, false); + + if (config->dr_mode == USB_DR_MODE_HOST || config->dr_mode == USB_DR_MODE_OTG) + phytium_host_init(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_init(config); + + dev_set_drvdata(config->dev, config); + + return 0; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_cusb *config = dev_get_drvdata(&pdev->dev); + + phytium_get_dr_mode(config); + if (config->dr_mode == USB_DR_MODE_HOST || config->dr_mode == USB_DR_MODE_OTG) + phytium_host_uninit(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_uninit(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + usb_del_gadget_udc(&config->gadget); + + dev_set_drvdata(&pdev->dev, NULL); + pr_info("%s %d\n", __func__, __LINE__); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct phytium_cusb *config; + + config = dev_get_drvdata(&pdev->dev); + + phytium_get_dr_mode(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + usb_del_gadget_udc(&config->gadget); +} + +#ifdef CONFIG_PM +static int phytium_pci_resume(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct phytium_cusb *config; + int ret = 0; + + config = dev_get_drvdata(&pdev->dev); + + spin_lock_irqsave(&config->lock, flags); + ret = phytium_host_resume(config); + spin_unlock_irqrestore(&config->lock, flags); + + return ret; +} + +static int phytium_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + unsigned long flags = 0; + struct phytium_cusb *config; + int ret; + + config = dev_get_drvdata(&pdev->dev); + + spin_lock_irqsave(&config->lock, flags); + ret = phytium_host_suspend(config); + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} +#endif + +const struct pci_device_id phytium_pci_id_table[] = { + {0x10ee, 0x8012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {} +}; + +static struct pci_driver phytium_otg_driver = { + .name = "phytium_usb", + .id_table = phytium_pci_id_table, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, +#ifdef CONFIG_PM + .resume = phytium_pci_resume, + .suspend = phytium_pci_suspend, +#endif +}; + +module_pci_driver(phytium_otg_driver); + +MODULE_AUTHOR("Chen Zhenhua "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium usb pci wrapper"); diff --git a/drivers/usb/phytium/platform.c b/drivers/usb/phytium/platform.c new file mode 100644 index 0000000000000..67326392de11a --- /dev/null +++ b/drivers/usb/phytium/platform.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "hw-regs.h" + +#define PHYTIUM_OTG_USB_LOADED 3 +#define USB2_2_BASE_ADDRESS 0x31800000 + +static const struct of_device_id phytium_otg_of_match[] = { + { + .compatible = "phytium,usb2", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_otg_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_otg_acpi_match[] = { + { "PHYT0037", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_otg_acpi_match); +#endif + +static int phytium_get_dr_mode(struct phytium_cusb *config) +{ + config->dr_mode = usb_get_dr_mode(config->dev); + if (config->dr_mode == USB_DR_MODE_UNKNOWN) + config->dr_mode = USB_DR_MODE_PERIPHERAL; + + return 0; +} + +static irqreturn_t platform_usb_irq(int irq, void *dev_id) +{ + unsigned long flags; + uint8_t otgstate; + + struct phytium_cusb *config = (struct phytium_cusb *)dev_id; + struct GADGET_CTRL *gadget_ctrl = config->gadget_priv; + struct HOST_CTRL *host_ctrl = config->host_priv; + + if (gadget_ctrl || host_ctrl) { + if (host_ctrl) + otgstate = phytium_read8(&host_ctrl->regs->otgstate); + else + otgstate = phytium_read8(&gadget_ctrl->regs->otgstate); + + spin_lock_irqsave(&config->lock, flags); + if (otgstate > HOST_OTG_STATE_A_WAIT_VFALL) + config->gadget_obj->gadget_isr(config->gadget_priv); + else + config->host_obj->host_isr(config->host_priv); + spin_unlock_irqrestore(&config->lock, flags); + } + + return IRQ_HANDLED; +} + +static int phytium_driver_probe(struct platform_device *pdev) +{ + struct phytium_cusb *config; + struct resource *res, *phy_res; + int retval = 0; + + config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + spin_lock_init(&config->lock); + config->dev = &pdev->dev; + config->isVhubHost = false; + + config->irq = platform_get_irq(pdev, 0); + if (config->irq <= 0) { + dev_err(config->dev, "getting usb irq failed\n"); + return config->irq; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + config->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(config->regs)) { + dev_err(config->dev, "map IOMEM resource failed\n"); + return -1; + } + + phy_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + config->phy_regs = devm_ioremap(&pdev->dev, phy_res->start, resource_size(phy_res)); + if (IS_ERR(config->phy_regs)) { + dev_err(config->dev, "map IOMEM phy resource failed\n"); + return -1; + } + + phytium_get_dr_mode(config); + + phytium_core_reset(config, false); + + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) { + if (res->start == USB2_2_BASE_ADDRESS) + config->isVhubHost = true; + + phytium_host_init(config); + } + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_init(config); + + dev_set_drvdata(&pdev->dev, config); + + if (config->irq > 0) { + retval = devm_request_irq(config->dev, config->irq, platform_usb_irq, + IRQF_SHARED, "phytium_otg", config); + if (retval != 0) { + dev_err(config->dev, "request irq %d err %d\n", config->irq, retval); + config->irq = 0; + } + } + + return retval; +} + +static void phytium_driver_remove(struct platform_device *dev) +{ + struct phytium_cusb *config = platform_get_drvdata(dev); + + if (!config) + return; + + phytium_get_dr_mode(config); + + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) + phytium_host_uninit(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_uninit(config); + + dev_set_drvdata(&dev->dev, NULL); +} + +static void phytium_driver_shutdown(struct platform_device *dev) +{ + pr_info("%s %d\n", __func__, __LINE__); +} + +#ifdef CONFIG_PM +static int phytium_driver_suspend(struct device *dev) +{ + struct phytium_cusb *config; + int ret = 0; + + config = dev_get_drvdata(dev); + + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_host_suspend(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_gadget_suspend(config); + + return ret; +} + +static int phytium_driver_resume(struct device *dev) +{ + struct phytium_cusb *config; + int ret = 0; + + config = dev_get_drvdata(dev); + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_host_resume(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_gadget_resume(config); + + return ret; +} + +static const struct dev_pm_ops phytium_usb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_driver_suspend, phytium_driver_resume) +}; +#endif + +static struct platform_driver phytium_otg_driver = { + .driver = { + .name = "phytium-otg", + .of_match_table = of_match_ptr(phytium_otg_of_match), + .acpi_match_table = ACPI_PTR(phytium_otg_acpi_match), +#ifdef CONFIG_PM + .pm = &phytium_usb_pm_ops, +#endif + }, + .probe = phytium_driver_probe, + .remove = phytium_driver_remove, + .shutdown = phytium_driver_shutdown, +}; + +module_platform_driver(phytium_otg_driver); + +MODULE_AUTHOR("Chen Zhenhua "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium usb platform wrapper"); diff --git a/include/acpi/apei.h b/include/acpi/apei.h index dc60f7db5524f..fcb5814a3f43b 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -52,6 +52,8 @@ int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); +void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err); +void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err); #endif #endif diff --git a/include/linux/cper.h b/include/linux/cper.h index 5b1236d8c65bb..edafa243c54c3 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -611,4 +611,5 @@ struct cxl_cper_sec_prot_err; void cxl_cper_print_prot_err(const char *pfx, const struct cxl_cper_sec_prot_err *prot_err); +const char *cper_zdi_zpi_err_type_str(unsigned int etype); #endif diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 692b2b4457618..8d7455aa567ba 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -112,6 +112,9 @@ extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, void *start, void*end, u16 segment, struct dmar_dev_scope *devices, int devices_cnt); +extern bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number, + struct acpi_device *adev, void *start, void *end, + struct dmar_dev_scope *devices, int devices_cnt); extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, struct dmar_dev_scope *devices, int count); @@ -144,6 +147,7 @@ extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg); extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +extern int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } @@ -155,6 +159,11 @@ static inline void intel_iommu_shutdown(void) { } #define dmar_release_one_atsr dmar_res_noop #define dmar_parse_one_satc dmar_res_noop +static inline int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + return 0; +} + static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index c52a82dd79a63..9fb09709f6439 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -190,6 +190,7 @@ struct i3c_dev_boardinfo { u8 static_addr; u64 pid; struct device_node *of_node; + struct fwnode_handle *fwnode; }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index c30d12e16473d..58f5ec82595eb 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -969,6 +969,21 @@ void iommu_set_dma_strict(void); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); +static inline bool apply_zhaoxin_dmar_acpi_a_behavior(void) +{ +#if defined(CONFIG_CPU_SUP_ZHAOXIN) || defined(CONFIG_CPU_SUP_CENTAUR) + if (((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) || + (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)) && + ((boot_cpu_data.x86 == 7) && (boot_cpu_data.x86_model == 0x3b))) + return true; +#endif + return false; +} + +extern int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end); + +int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev); + static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) { if (domain->ops->flush_iotlb_all) diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 0000000000000..3688d93a5c3e2 --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd + */ + +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_TYPER2 0x000C +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 +#define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +#define ESPI_BASE_INTID 4096 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) + +#define GICD_TYPER2_nASSGIcap (1U << 8) +#define GICD_TYPER2_VIL (1U << 7) +#define GICD_TYPER2_VID GENMASK(4, 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +#define GIC_PAGE_SIZE_4K 0ULL +#define GIC_PAGE_SIZE_16K 1ULL +#define GIC_PAGE_SIZE_64K 2ULL +#define GIC_PAGE_SIZE_MASK 3ULL + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_CES (1UL << 1) +#define GICR_CTLR_IR (1UL << 2) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_RVPEID (1U << 7) +#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24) +#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32) + +#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0) +#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32) +#define GICR_INVLPIR_V GENMASK_ULL(63, 63) + +#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID +#define GICR_INVALLR_V GICR_INVLPIR_V + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +/* + * GICv4.1 VPROPBASER reinvention. A subtle mix between the old + * VPROPBASER and ITS_BASER. Just not quite any of the two. + */ +#define GICR_VPROPBASER_4_1_VALID (1ULL << 63) +#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59) +#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55) +#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53) +#define GICR_VPROPBASER_4_1_Z (1ULL << 52) +#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12) +#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields, + * also use the above Valid, PendingLast and Dirty. + */ +#define GICR_VPENDBASER_4_1_DB (1ULL << 62) +#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59) +#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) +#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) + +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_MPIDR 0x0018 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) +#define GITS_TYPER_VMAPP (1ULL << 40) +#define GITS_TYPER_SVPET GENMASK_ULL(42, 41) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) +#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) +#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) +#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) +#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PMHE_SHIFT 6 +#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + u64 flags; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + int cpuhp_memreserve_state; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +struct irq_domain; +struct fwnode_handle; +int __init phytium_its_lpi_memreserve_init(void); +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index af5d401d579e3..a2684ea89bacc 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -352,6 +352,7 @@ struct hdac_bus { bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ bool use_pio_for_commands:1; /* Use PIO instead of CORB for commands */ + bool cmd_resend:1; /* command resend */ bool hygon_dword_access:1; int poll_count; diff --git a/scripts/build-version b/scripts/build-version index 537d458150832..0fbc0cd46f444 100755 --- a/scripts/build-version +++ b/scripts/build-version @@ -3,8 +3,14 @@ prev_ver=$(cat .version 2>/dev/null) && ver=$(expr ${prev_ver} + 1 2>/dev/null) || -ver=1 +# Respect $prev_ver as the final build version ID. +if [ X$prev_ver = "X" ] +then + ver=1 +else + ver=$prev_ver +fi echo ${ver} > .version echo ${ver} diff --git a/sound/hda/common/controller.c b/sound/hda/common/controller.c index b1cfd9bd4dcb7..fc19a77895ada 100644 --- a/sound/hda/common/controller.c +++ b/sound/hda/common/controller.c @@ -17,6 +17,8 @@ #include #include +#include "../controllers/phytium.h" + #ifdef CONFIG_X86 /* for art-tsc conversion */ #include @@ -161,6 +163,9 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) struct hda_spdif_out *spdif = snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); unsigned short ctls = spdif ? spdif->ctls : 0; + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + + hda->substream = substream; trace_azx_pcm_prepare(chip, azx_dev); guard_dsp_lock(azx_dev); diff --git a/sound/hda/controllers/Kconfig b/sound/hda/controllers/Kconfig index 34721f50b055a..c2e9431092e2a 100644 --- a/sound/hda/controllers/Kconfig +++ b/sound/hda/controllers/Kconfig @@ -14,6 +14,22 @@ config SND_HDA_INTEL To compile this driver as a module, choose M here: the module will be called snd-hda-intel. +config SND_HDA_PHYTIUM + tristate "PHYTIUM HD Audio" + depends on SOUND + select SND_HDA + select SND_HDA_ALIGNED_MMIO + help + Say Y here to support the HDA controller present in PHYTIUM + SoCs + + This options enables support for the HD Audio controller + present in some PHYTIUM SoCs, used to communicate audio + to the "High Definition Audio" codec. + + To compile this driver as a module, choose M here: the module + will be called snd-hda-phytium. + config SND_HDA_TEGRA tristate "NVIDIA Tegra HD Audio" depends on ARCH_TEGRA diff --git a/sound/hda/controllers/Makefile b/sound/hda/controllers/Makefile index a4bcd055e9aef..85df102433b16 100644 --- a/sound/hda/controllers/Makefile +++ b/sound/hda/controllers/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 snd-hda-intel-y := intel.o +snd-hda-phytium-y := phytium.o snd-hda-tegra-y := tegra.o snd-hda-acpi-y := acpi.o @@ -9,5 +10,6 @@ subdir-ccflags-y += -I$(src)/../common CFLAGS_intel.o := -I$(src) obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-intel.o +obj-$(CONFIG_SND_HDA_PHYTIUM) += snd-hda-phytium.o obj-$(CONFIG_SND_HDA_TEGRA) += snd-hda-tegra.o obj-$(CONFIG_SND_HDA_ACPI) += snd-hda-acpi.o diff --git a/sound/hda/controllers/phytium.c b/sound/hda/controllers/phytium.c new file mode 100644 index 0000000000000..4da5e06be9adc --- /dev/null +++ b/sound/hda/controllers/phytium.c @@ -0,0 +1,1083 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implementation of primary ALSA driver code for Phytium HD Audio. + * + * Copyright(c) 2018-2022, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium.h" + +#include "intel_trace.h" + +/* position fix mode */ +enum { + POS_FIX_AUTO, + POS_FIX_LPIB, + POS_FIX_POSBUF, + POS_FIX_VIACOMBO, + POS_FIX_COMBO, +}; + +/* Define IN stream 0 FIFO size offset in VIA controller */ +#define VIA_IN_STREAM0_FIFO_SIZE_OFFSET 0x90 + +/* FT have 4 playback and 4 capture */ +#define FT4C_NUM_CAPTURE 4 +#define FT4C_NUM_PLAYBACK 4 + +#define DWORD_BYTE_WIDTH 4 +#define BYTE_BIT_WIDTH 8 + +static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; +static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; +static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; +static char *model[SNDRV_CARDS]; +static int position_fix[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; +static int bdl_pos_adj[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; +static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; +static int probe_only[SNDRV_CARDS]; +static int jackpoll_ms[SNDRV_CARDS]; +static int single_cmd = -1; +static int enable_msi = -1; +#ifdef CONFIG_SND_HDA_INPUT_BEEP +static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = + CONFIG_SND_HDA_INPUT_BEEP_MODE}; +#endif + +module_param_array(index, int, NULL, 0444); +MODULE_PARM_DESC(index, "Index value for Intel HD audio interface."); +module_param_array(id, charp, NULL, 0444); +MODULE_PARM_DESC(id, "ID string for Intel HD audio interface."); +module_param_array(enable, bool, NULL, 0444); +MODULE_PARM_DESC(enable, "Enable Intel HD audio interface."); +module_param_array(model, charp, NULL, 0444); +MODULE_PARM_DESC(model, "Use the given board model."); +module_param_array(position_fix, int, NULL, 0444); +MODULE_PARM_DESC(position_fix, "DMA pointer read method. (-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO)."); +module_param_array(bdl_pos_adj, int, NULL, 0644); +MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset."); +module_param_array(probe_mask, int, NULL, 0444); +MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1)."); +module_param_array(probe_only, int, NULL, 0444); +MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization."); +module_param_array(jackpoll_ms, int, NULL, 0444); +MODULE_PARM_DESC(jackpoll_ms, "Ms between polling for jack events (default = 0, using unsol events only)"); +module_param(single_cmd, bint, 0444); +MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs (for debugging only)."); +module_param(enable_msi, bint, 0444); +MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)"); +#ifdef CONFIG_SND_HDA_INPUT_BEEP +module_param_array(beep_mode, bool, NULL, 0444); +MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode (0=off, 1=on) (default=1)."); +#endif + +#define power_save 0 + +static int align_buffer_size = -1; +module_param(align_buffer_size, bint, 0644); +MODULE_PARM_DESC(align_buffer_size, + "Force buffer and period sizes to be multiple of 128 bytes."); + +/* driver types */ +enum { + AZX_DRIVER_ICH, + AZX_DRIVER_PCH, + AZX_DRIVER_SCH, + AZX_DRIVER_HDMI, + AZX_DRIVER_ATI, + AZX_DRIVER_ATIHDMI, + AZX_DRIVER_ATIHDMI_NS, + AZX_DRIVER_VIA, + AZX_DRIVER_SIS, + AZX_DRIVER_ULI, + AZX_DRIVER_NVIDIA, + AZX_DRIVER_TERA, + AZX_DRIVER_CTX, + AZX_DRIVER_CTHDA, + AZX_DRIVER_CMEDIA, + AZX_DRIVER_GENERIC, + AZX_DRIVER_FT, + AZX_NUM_DRIVERS, /* keep this as last entry */ +}; + +/* NOP for other archs */ +static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf, + bool on) +{ +} + +static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, + struct snd_pcm_substream *substream, bool on) +{ +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect); + +/* calculate runtime delay from LPIB */ +static int azx_get_delay_from_lpib(struct azx *chip, struct azx_dev *azx_dev, + unsigned int pos) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + int stream = substream->stream; + unsigned int lpib_pos = azx_get_pos_lpib(chip, azx_dev); + int delay; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + delay = pos - lpib_pos; + else + delay = lpib_pos - pos; + if (delay < 0) { + if (delay >= azx_dev->core.delay_negative_threshold) + delay = 0; + else + delay += azx_dev->core.bufsize; + } + + if (delay >= azx_dev->core.period_bytes) { + dev_info(chip->card->dev, + "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n", + delay, azx_dev->core.period_bytes); + delay = 0; + chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY; + chip->get_delay[stream] = NULL; + } + + return bytes_to_frames(substream->runtime, delay); +} + +static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev); + +/* called from IRQ */ +static int azx_position_check(struct azx *chip, struct azx_dev *azx_dev) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + int ok; + + ok = azx_position_ok(chip, azx_dev); + if (ok == 1) { + azx_dev->irq_pending = 0; + return ok; + } else if (ok == 0) { + /* bogus IRQ, process it later */ + azx_dev->irq_pending = 1; + schedule_work(&hda->irq_pending_work); + } + return 0; +} + +static int azx_ft_link_power(struct azx *chip, bool enable) +{ + return 0; +} + +/* + * Check whether the current DMA position is acceptable for updating + * periods. Returns non-zero if it's OK. + * + * Many HD-audio controllers appear pretty inaccurate about + * the update-IRQ timing. The IRQ is issued before actually the + * data is processed. So, we need to process it afterwords in a + * workqueue. + */ +static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + int stream = substream->stream; + u32 wallclk; + unsigned int pos; + + wallclk = (azx_readl(chip, WALLCLK) - azx_dev->core.start_wallclk); + + if (wallclk < (azx_dev->core.period_wallclk * 2) / 3) + return -1; /* bogus (too early) interrupt */ + + if (chip->get_position[stream]) + pos = chip->get_position[stream](chip, azx_dev); + else { /* use the position buffer as default */ + pos = azx_get_pos_posbuf(chip, azx_dev); + if (!pos || pos == (u32)-1) { + dev_info(chip->card->dev, + "Invalid position buffer, using LPIB read method instead.\n"); + chip->get_position[stream] = azx_get_pos_lpib; + if (chip->get_position[0] == azx_get_pos_lpib && + chip->get_position[1] == azx_get_pos_lpib) + azx_bus(chip)->use_posbuf = false; + pos = azx_get_pos_lpib(chip, azx_dev); + chip->get_delay[stream] = NULL; + } else { + chip->get_position[stream] = azx_get_pos_posbuf; + if (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY) + chip->get_delay[stream] = azx_get_delay_from_lpib; + } + } + + if (pos >= azx_dev->core.bufsize) + pos = 0; + + if (WARN_ONCE(!azx_dev->core.period_bytes, + "hda-ft: zero azx_dev->period_bytes")) + return -1; /* this shouldn't happen! */ + if (wallclk < (azx_dev->core.period_wallclk * 5) / 4 && + pos % azx_dev->core.period_bytes > azx_dev->core.period_bytes / 2) + /* NG - it's below the first next period boundary */ + return chip->bdl_pos_adj ? 0 : -1; + + azx_dev->core.start_wallclk += wallclk; + + return 1; /* OK, it's fine */ +} + +/* The work for pending PCM period updates. */ +static void azx_irq_pending_work(struct work_struct *work) +{ + struct hda_ft *hda = container_of(work, struct hda_ft, irq_pending_work); + struct azx *chip = &hda->chip; + struct hdac_bus *bus = azx_bus(chip); + struct hdac_stream *s; + int pending, ok; + + if (!hda->irq_pending_warned) { + dev_info(chip->card->dev, + "IRQ timing workaround is activated for card #%d. Suggest a bigger bdl_pos_adj.\n", + chip->card->number); + hda->irq_pending_warned = 1; + } + + for (;;) { + pending = 0; + spin_lock_irq(&bus->reg_lock); + list_for_each_entry(s, &bus->stream_list, list) { + struct azx_dev *azx_dev = stream_to_azx_dev(s); + + if (!azx_dev->irq_pending || + !s->substream || !s->running) + continue; + ok = azx_position_ok(chip, azx_dev); + if (ok > 0) { + azx_dev->irq_pending = 0; + spin_unlock(&bus->reg_lock); + snd_pcm_period_elapsed(s->substream); + spin_lock(&bus->reg_lock); + } else if (ok < 0) { + pending = 0; /* too early */ + } else { + pending++; + } + } + spin_unlock_irq(&bus->reg_lock); + if (!pending) + return; + udelay(1000); + } +} + +/* clear irq_pending flags and assure no on-going workq */ +static void azx_clear_irq_pending(struct azx *chip) +{ + struct hdac_bus *bus = azx_bus(chip); + struct hdac_stream *s; + + spin_lock_irq(&bus->reg_lock); + list_for_each_entry(s, &bus->stream_list, list) { + struct azx_dev *azx_dev = stream_to_azx_dev(s); + + azx_dev->irq_pending = 0; + } + spin_unlock_irq(&bus->reg_lock); +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect) +{ + struct hdac_bus *bus = azx_bus(chip); + + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct platform_device *pdev = to_platform_device(hda->dev); + int irq_id = platform_get_irq(pdev, 0); + int err; + + err = request_irq(irq_id, azx_interrupt, + IRQF_SHARED, KBUILD_MODNAME, chip); + if (err) { + dev_err(chip->card->dev, + "unable to request IRQ %d, disabling device\n", + irq_id); + if (do_disconnect) + snd_card_disconnect(chip->card); + return err; + } + bus->irq = irq_id; + + return 0; +} + +/* get the current DMA position with correction on VIA chips */ +static unsigned int azx_via_get_position(struct azx *chip, + struct azx_dev *azx_dev) +{ + unsigned int link_pos, mini_pos, bound_pos; + unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos; + unsigned int fifo_size; + + link_pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); + if (azx_dev->core.substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + /* Playback, no problem using link position */ + return link_pos; + } + + /* Capture */ + /* For new chipset, + * use mod to get the DMA position just like old chipset + */ + mod_dma_pos = le32_to_cpu(*azx_dev->core.posbuf); + mod_dma_pos %= azx_dev->core.period_bytes; + + /* azx_dev->fifo_size can't get FIFO size of in stream. + * Get from base address + offset. + */ + fifo_size = readw(azx_bus(chip)->remap_addr + + VIA_IN_STREAM0_FIFO_SIZE_OFFSET); + + if (azx_dev->insufficient) { + /* Link position never gather than FIFO size */ + if (link_pos <= fifo_size) + return 0; + + azx_dev->insufficient = 0; + } + + if (link_pos <= fifo_size) + mini_pos = azx_dev->core.bufsize + link_pos - fifo_size; + else + mini_pos = link_pos - fifo_size; + + /* Find nearest previous boudary */ + mod_mini_pos = mini_pos % azx_dev->core.period_bytes; + mod_link_pos = link_pos % azx_dev->core.period_bytes; + if (mod_link_pos >= fifo_size) + bound_pos = link_pos - mod_link_pos; + else if (mod_dma_pos >= mod_mini_pos) + bound_pos = mini_pos - mod_mini_pos; + else { + bound_pos = mini_pos - mod_mini_pos + azx_dev->core.period_bytes; + if (bound_pos >= azx_dev->core.bufsize) + bound_pos = 0; + } + + /* Calculate real DMA position we want */ + return bound_pos + mod_dma_pos; +} + +#ifdef CONFIG_PM +static DEFINE_MUTEX(card_list_lock); +static LIST_HEAD(card_list); + +static void azx_add_card_list(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + + mutex_lock(&card_list_lock); + list_add(&hda->list, &card_list); + mutex_unlock(&card_list_lock); +} + +static void azx_del_card_list(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + + mutex_lock(&card_list_lock); + list_del_init(&hda->list); + mutex_unlock(&card_list_lock); +} + +#else +#define azx_add_card_list(chip) /* NOP */ +#define azx_del_card_list(chip) /* NOP */ +#endif /* CONFIG_PM */ + +#if defined(CONFIG_PM_SLEEP) +/* power management */ +static int azx_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled || !chip->running) + return 0; + + bus = azx_bus(chip); + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); + azx_clear_irq_pending(chip); + azx_stop_chip(chip); + if (bus->irq >= 0) { + free_irq(bus->irq, (void *)chip); + bus->irq = -1; + } + + return 0; +} + +static int azx_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + int index; + struct snd_pcm_substream *substream; + struct azx_dev *azx_dev; + int err; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + bus = azx_bus(chip); + if (chip->disabled || !chip->running) + return 0; + + if (azx_acquire_irq(chip, 1) < 0) + return -EIO; + + index = chip->dev_index; + + snd_hdac_bus_exit_link_reset(bus); + usleep_range(1000, 1200); + + azx_init_chip(chip, 0); + + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + + if (hda->substream && hda->substream->runtime) { + substream = hda->substream; + + if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { + substream->runtime->status->state = + substream->runtime->status->suspended_state; + err = substream->ops->prepare(substream); + if (err < 0) + return err; + } + + azx_dev = get_azx_dev(substream); + hda->substream = NULL; + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int azx_runtime_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip)) + return 0; + + azx_stop_chip(chip); + azx_enter_link_reset(chip); + azx_clear_irq_pending(chip); + + return 0; +} + +static int azx_runtime_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + struct hda_codec *codec; + int status; + int index; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + bus = azx_bus(chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip)) + return 0; + + /* Read STATESTS before controller reset */ + status = azx_readw(chip, STATESTS); + + index = chip->dev_index; + + snd_hdac_bus_exit_link_reset(bus); + usleep_range(1000, 1200); + + azx_init_chip(chip, 0); + + if (status) { + list_for_each_codec(codec, &chip->bus) + if (status & (1 << codec->addr)) + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); + } + + return 0; +} + +static int azx_runtime_idle(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip) || + azx_bus(chip)->codec_powered || !chip->running) + return -EBUSY; + + return 0; +} + +static const struct dev_pm_ops azx_pm = { + SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) + SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) +}; + +#define hda_ft_pm (&azx_pm) +#else +#define hda_ft_pm NULL +#endif /* CONFIG_PM */ + +static int azx_probe_continue(struct azx *chip); + +/* + * destructor + */ +static int azx_free(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct hdac_bus *bus = azx_bus(chip); + struct platform_device *pdev = to_platform_device(hda->dev); + struct device *hddev = hda->dev; + + if (azx_has_pm_runtime(chip) && chip->running) + pm_runtime_get_noresume(&pdev->dev); + + azx_del_card_list(chip); + + complete_all(&hda->probe_wait); + + if (bus->chip_init) { + azx_clear_irq_pending(chip); + azx_stop_all_streams(chip); + azx_stop_chip(chip); + } + + if (bus->irq >= 0) { + free_irq(bus->irq, (void *)chip); + bus->irq = -1; + } + + devm_iounmap(hddev, bus->remap_addr); + + azx_free_stream_pages(chip); + azx_free_streams(chip); + snd_hdac_bus_exit(bus); + + return 0; +} + +static int azx_dev_disconnect(struct snd_device *device) +{ + struct azx *chip = device->device_data; + + chip->bus.shutdown = 1; + return 0; +} + +static int azx_dev_free(struct snd_device *device) +{ + return azx_free(device->device_data); +} + +static int check_position_fix(struct azx *chip, int fix) +{ + switch (fix) { + case POS_FIX_AUTO: + case POS_FIX_LPIB: + case POS_FIX_POSBUF: + case POS_FIX_VIACOMBO: + case POS_FIX_COMBO: + return fix; + } + + if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { + dev_dbg(chip->card->dev, "Using LPIB position fix\n"); + return POS_FIX_LPIB; + } + return POS_FIX_AUTO; +} + +static void assign_position_fix(struct azx *chip, int fix) +{ + static azx_get_pos_callback_t callbacks[] = { + [POS_FIX_AUTO] = NULL, + [POS_FIX_LPIB] = azx_get_pos_lpib, + [POS_FIX_POSBUF] = azx_get_pos_posbuf, + [POS_FIX_VIACOMBO] = azx_via_get_position, + [POS_FIX_COMBO] = azx_get_pos_lpib, + }; + + chip->get_position[0] = chip->get_position[1] = callbacks[fix]; + + /* combo mode uses LPIB only for playback */ + if (fix == POS_FIX_COMBO) + chip->get_position[1] = NULL; + + if (fix == POS_FIX_POSBUF && + (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) { + chip->get_delay[0] = chip->get_delay[1] = + azx_get_delay_from_lpib; + } + +} + +#define AZX_FORCE_CODEC_MASK 0x100 + +static void check_probe_mask(struct azx *chip, int dev) +{ + chip->codec_probe_mask = probe_mask[dev]; + + /* check forced option */ + if (chip->codec_probe_mask != -1 && + (chip->codec_probe_mask & AZX_FORCE_CODEC_MASK)) { + azx_bus(chip)->codec_mask = chip->codec_probe_mask & 0xff; + dev_info(chip->card->dev, "codec_mask forced to 0x%x\n", + (int)azx_bus(chip)->codec_mask); + } +} + +static void azx_probe_work(struct work_struct *work) +{ + struct hda_ft *hda = container_of(work, struct hda_ft, probe_work); + + azx_probe_continue(&hda->chip); +} + +/* + * constructor + */ +static const struct hda_controller_ops axi_hda_ops; + +static int hda_ft_create(struct snd_card *card, struct platform_device *pdev, + int dev, unsigned int driver_caps, + struct azx **rchip) +{ + static struct snd_device_ops ops = { + .dev_disconnect = azx_dev_disconnect, + .dev_free = azx_dev_free, + }; + struct hda_ft *hda; + struct azx *chip; + int err; + + *rchip = NULL; + + hda = devm_kzalloc(&pdev->dev, sizeof(*hda), GFP_KERNEL); + if (!hda) + return -ENOMEM; + hda->dev = &pdev->dev; + chip = &hda->chip; + mutex_init(&chip->open_mutex); + chip->card = card; + chip->ops = &axi_hda_ops; + chip->driver_caps = driver_caps; + chip->driver_type = driver_caps & 0xff; + chip->dev_index = dev; + if (jackpoll_ms[dev] >= 50 && jackpoll_ms[dev] <= 60000) + chip->jackpoll_interval = msecs_to_jiffies(jackpoll_ms[dev]); + INIT_LIST_HEAD(&chip->pcm_list); + INIT_WORK(&hda->irq_pending_work, azx_irq_pending_work); + INIT_LIST_HEAD(&hda->list); + + init_completion(&hda->probe_wait); + assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); + check_probe_mask(chip, dev); + + if (single_cmd < 0) /* allow fallback to single_cmd at errors */ + chip->fallback_to_single_cmd = 0; + else /* explicitly set to single_cmd or not */ + chip->single_cmd = single_cmd; + + if (bdl_pos_adj[dev] < 0) { + switch (chip->driver_type) { + case AZX_DRIVER_FT: + bdl_pos_adj[dev] = 32; + break; + default: + bdl_pos_adj[dev] = 32; + break; + } + } + chip->bdl_pos_adj = bdl_pos_adj[dev]; + + err = azx_bus_init(chip, model[dev]); + if (err < 0) + return err; + + chip->bus.core.aligned_mmio = 1; + + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); + if (err < 0) { + dev_err(card->dev, "Error creating device [card]!\n"); + azx_free(chip); + return err; + } + + /* continue probing in work context as may trigger request module */ + INIT_WORK(&hda->probe_work, azx_probe_work); + + *rchip = chip; + + return 0; +} + +static int azx_first_init(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct platform_device *pdev = to_platform_device(hda->dev); + struct device *hddev = hda->dev; + + int dev = chip->dev_index; + bool full_reset; + + struct snd_card *card = chip->card; + struct hdac_bus *bus = azx_bus(chip); + int err; + unsigned short gcap; + unsigned int dma_bits = 64; + + struct resource *res; + const struct acpi_device_id *match; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hda->regs = devm_ioremap_resource(hddev, res); + if (IS_ERR(hda->regs)) + return PTR_ERR(hda->regs); + + bus->addr = res->start; + bus->remap_addr = hda->regs; + if (bus->remap_addr == NULL) { + dev_err(card->dev, "ioremap error\n"); + return -ENXIO; + } + + bus->cmd_resend = 1; + + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + + synchronize_irq(bus->irq); + + gcap = azx_readw(chip, GCAP); + dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); + + /* disable 64bit DMA address on some devices */ + if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { + dev_dbg(card->dev, "Disabling 64bit DMA\n"); + gcap &= ~AZX_GCAP_64OK; + } + + /* disable buffer size rounding to 128-byte multiples if supported */ + if (align_buffer_size >= 0) + chip->align_buffer_size = !!align_buffer_size; + else { + if (chip->driver_caps & AZX_DCAPS_NO_ALIGN_BUFSIZE) + chip->align_buffer_size = 0; + else + chip->align_buffer_size = 1; + } + + if (has_acpi_companion(hddev)) { + match = acpi_match_device(hddev->driver->acpi_match_table, hddev); + if (!match) { + dev_err(hddev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + set_dma_ops(hddev, NULL); + acpi_dma_configure(hddev, DEV_DMA_NON_COHERENT); + } + + /* allow 64bit DMA address if supported by H/W */ + if (!(gcap & AZX_GCAP_64OK)) + dma_bits = 32; + if (!dma_set_mask(hddev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(hddev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(hddev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(hddev, DMA_BIT_MASK(32)); + } + + /* read number of streams from GCAP register instead of using + * hardcoded value + */ + chip->capture_streams = (gcap >> 8) & 0x0f; + chip->playback_streams = (gcap >> 12) & 0x0f; + if (!chip->playback_streams && !chip->capture_streams) { + /* gcap didn't give any info, switching to old method */ + chip->playback_streams = FT4C_NUM_PLAYBACK; + chip->capture_streams = FT4C_NUM_CAPTURE; + } + chip->capture_index_offset = 0; + chip->playback_index_offset = chip->capture_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + /* initialize streams */ + err = azx_init_streams(chip); + if (err < 0) + return err; + + err = azx_alloc_stream_pages(chip); + if (err < 0) + return err; + + full_reset = (probe_only[dev] & 2) ? false : true; + azx_init_chip(chip, full_reset); + + /* codec detection */ + if (!azx_bus(chip)->codec_mask) { + dev_err(card->dev, "no codecs found!\n"); + return -ENODEV; + } + + strscpy(card->driver, "ft-hda", sizeof(card->driver)); + strscpy(card->shortname, "ft-hda", sizeof(card->shortname)); + snprintf(card->longname, sizeof(card->longname), + "%s at 0x%lx irq %i", + card->shortname, bus->addr, bus->irq); + + return 0; +} + + +static const struct hda_controller_ops axi_hda_ops = { + .position_check = azx_position_check, + .link_power = azx_ft_link_power, +}; + +static DECLARE_BITMAP(probed_devs, SNDRV_CARDS); + +static int hda_ft_probe(struct platform_device *pdev) +{ + const unsigned int driver_flags = AZX_DRIVER_FT; + struct snd_card *card; + struct hda_ft *hda; + struct azx *chip; + bool schedule_probe; + int err; + int dev; + + dev = find_first_zero_bit(probed_devs, SNDRV_CARDS); + + if (dev >= SNDRV_CARDS) + return -ENODEV; + if (!enable[dev]) { + set_bit(dev, probed_devs); + return -ENOENT; + } + + err = snd_card_new(&pdev->dev, index[dev], id[dev], THIS_MODULE, + 0, &card); + if (err < 0) { + dev_err(&pdev->dev, "Error creating card!\n"); + return err; + } + + err = hda_ft_create(card, pdev, dev, driver_flags, &chip); + if (err < 0) + goto out_free; + card->private_data = chip; + hda = container_of(chip, struct hda_ft, chip); + + dev_set_drvdata(&pdev->dev, card); + + schedule_probe = !chip->disabled; + + if (schedule_probe) + schedule_work(&hda->probe_work); + + set_bit(dev, probed_devs); + if (chip->disabled) + complete_all(&hda->probe_wait); + return 0; + +out_free: + snd_card_free(card); + return err; +} + +/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ +static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { + [AZX_DRIVER_FT] = 4, +}; + +static int azx_probe_continue(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct device *hddev = hda->dev; + int dev = chip->dev_index; + int err; + struct hdac_bus *bus = azx_bus(chip); + + hda->probe_continued = 1; + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + +#ifdef CONFIG_SND_HDA_INPUT_BEEP + chip->beep_mode = beep_mode[dev]; +#endif + + /* create codec instances */ + err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); + if (err < 0) + goto out_free; + + if ((probe_only[dev] & 1) == 0) { + err = azx_codec_configure(chip); + if (err < 0) + goto out_free; + } + + err = snd_card_register(chip->card); + if (err < 0) + goto out_free; + + chip->running = 1; + azx_add_card_list(chip); + snd_hda_set_power_save(&chip->bus, power_save * 1000); + + if (azx_has_pm_runtime(chip)) + pm_runtime_put_noidle(hddev); + return err; + +out_free: + if (bus->irq >= 0) { + free_irq(bus->irq, (void *)chip); + bus->irq = -1; + } + return err; +} + +static void hda_ft_remove(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + struct hda_ft *hda; + + if (card) { + /* cancel the pending probing work */ + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + cancel_work_sync(&hda->probe_work); + clear_bit(chip->dev_index, probed_devs); + + snd_card_free(card); + } +} + +static void hda_ft_shutdown(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + + if (!card) + return; + chip = card->private_data; + if (chip && chip->running) + azx_stop_chip(chip); +} + +static const struct of_device_id hda_ft_of_match[] = { + { .compatible = "phytium,hda" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hda_ft_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id hda_ft_acpi_match[] = { + { .id = "PHYT0006" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hda_ft_acpi_match); +#else +#define hda_ft_acpi_match NULL +#endif + +static struct platform_driver ft_platform_hda = { + .driver = { + .name = "ft-hda", + .pm = hda_ft_pm, + .of_match_table = hda_ft_of_match, + .acpi_match_table = hda_ft_acpi_match, + }, + .probe = hda_ft_probe, + .remove = hda_ft_remove, + .shutdown = hda_ft_shutdown, +}; + +module_platform_driver(ft_platform_hda); + +MODULE_DESCRIPTION("FT HDA bus driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/hda/controllers/phytium.h b/sound/hda/controllers/phytium.h new file mode 100644 index 0000000000000..a2183ef5e0d83 --- /dev/null +++ b/sound/hda/controllers/phytium.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Implementation of primary ALSA driver code base for Phytium HD Audio. + * + * Copyright(c) 2018-2022, Phytium Technology Co., Ltd. + */ +#ifndef __SOUND_HDA_PHYTIUM_H__ +#define __SOUND_HDA_PHYTIUM_H__ + +#include "hda_controller.h" + +struct hda_ft { + struct azx chip; + struct snd_pcm_substream *substream; + struct device *dev; + void __iomem *regs; + + /* for pending irqs */ + struct work_struct irq_pending_work; + + /* sync probing */ + struct completion probe_wait; + struct work_struct probe_work; + + /* card list (for power_save trigger) */ + struct list_head list; + + /* extra flags */ + unsigned int irq_pending_warned:1; + unsigned int probe_continued:1; + +}; + +#endif diff --git a/sound/hda/core/controller.c b/sound/hda/core/controller.c index 675462b573d25..d25d274815300 100644 --- a/sound/hda/core/controller.c +++ b/sound/hda/core/controller.c @@ -220,6 +220,9 @@ static int snd_hdac_bus_send_cmd_corb(struct hdac_bus *bus, unsigned int val) { unsigned int addr = azx_command_addr(val); unsigned int wp, rp; + unsigned long timeout; + unsigned int rirb_wp; + int i = 0; guard(spinlock_irq)(&bus->reg_lock); @@ -244,6 +247,40 @@ static int snd_hdac_bus_send_cmd_corb(struct hdac_bus *bus, unsigned int val) bus->corb.buf[wp] = cpu_to_le32(val); snd_hdac_chip_writew(bus, CORBWP, wp); + if (bus->cmd_resend) { + timeout = jiffies + msecs_to_jiffies(1000); + udelay(80); + rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); + while (rirb_wp == bus->rirb.wp) { + udelay(80); + rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); + if (rirb_wp != bus->rirb.wp) + break; + if (i > 5) + break; + if (time_after(jiffies, timeout)) + break; + + /* add command to corb */ + wp = snd_hdac_chip_readw(bus, CORBWP); + if (wp == 0xffff) { + /* something wrong, controller likely turned to D3 */ + return -EIO; + } + wp++; + wp %= AZX_MAX_CORB_ENTRIES; + + rp = snd_hdac_chip_readw(bus, CORBRP); + if (wp == rp) { + /* oops, it's full */ + return -EAGAIN; + } + bus->corb.buf[wp] = cpu_to_le32(val); + snd_hdac_chip_writew(bus, CORBWP, wp); + i++; + } + } + return 0; } diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index ce74818bd7152..40964d70964a8 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -121,6 +121,7 @@ source "sound/soc/intel/Kconfig" source "sound/soc/mediatek/Kconfig" source "sound/soc/meson/Kconfig" source "sound/soc/mxs/Kconfig" +source "sound/soc/phytium/Kconfig" source "sound/soc/pxa/Kconfig" source "sound/soc/qcom/Kconfig" source "sound/soc/renesas/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 462322c38aa42..34fd226645309 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_SND_SOC) += mediatek/ obj-$(CONFIG_SND_SOC) += meson/ obj-$(CONFIG_SND_SOC) += mxs/ obj-$(CONFIG_SND_SOC) += kirkwood/ +obj-$(CONFIG_SND_SOC) += phytium/ obj-$(CONFIG_SND_SOC) += pxa/ obj-$(CONFIG_SND_SOC) += qcom/ obj-$(CONFIG_SND_SOC) += renesas/ diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 91ac99bc3edb2..46f2a60aaf6c3 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -1253,6 +1253,15 @@ config SND_SOC_FS210X Digital Input Class-D Power Amplifiers with Enhanced Signal Processing. The amplifiers support I2C and I2S/TDM. +config SND_SOC_ES8336 + tristate "Everest Semi ES8336 CODEC" + depends on I2C + select GPIO_PHYTIUM_PCI + +config SND_SOC_ES8388 + tristate "Everest Semi ES8388 CODEC" + depends on I2C + config SND_SOC_GTM601 tristate 'GTM601 UMTS modem audio codec' diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index da4077463278e..cecf5d6322629 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -418,6 +418,8 @@ snd-soc-wsa881x-y := wsa881x.o snd-soc-wsa883x-y := wsa883x.o snd-soc-wsa884x-y := wsa884x.o snd-soc-zl38060-y := zl38060.o +snd-soc-es8336-objs := es8336.o +snd-soc-es8388-objs := es8388.o # Amp snd-soc-max9877-y := max9877.o snd-soc-max98504-y := max98504.o @@ -567,6 +569,8 @@ obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o obj-$(CONFIG_SND_SOC_ES8375) += snd-soc-es8375.o +obj-$(CONFIG_SND_SOC_ES8336)+= snd-soc-es8336.o +obj-$(CONFIG_SND_SOC_ES8388) += snd-soc-es8388.o obj-$(CONFIG_SND_SOC_ES8389) += snd-soc-es8389.o obj-$(CONFIG_SND_SOC_FRAMER) += snd-soc-framer.o obj-$(CONFIG_SND_SOC_FS_AMP_LIB)+= snd-soc-fs-amp-lib.o diff --git a/sound/soc/codecs/es8336.c b/sound/soc/codecs/es8336.c new file mode 100644 index 0000000000000..7626300ae2b7f --- /dev/null +++ b/sound/soc/codecs/es8336.c @@ -0,0 +1,1083 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * es8336 ALSA SoC audio driver + * + * Copyright (C) Everest Semiconductor Co.,Ltd + * Copyright (C) 2022-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "es8336.h" + +#define INVALID_GPIO -1 +#define GPIO_LOW 0 +#define GPIO_HIGH 1 +#define ES8336_MUTE (1 << 5) + +static struct snd_soc_component *es8336_component; + +static const struct reg_default es8336_reg_defaults[] = { + {0x00, 0x03}, {0x01, 0x03}, {0x02, 0x00}, {0x03, 0x20}, + {0x04, 0x11}, {0x05, 0x00}, {0x06, 0x11}, {0x07, 0x00}, + {0x08, 0x00}, {0x09, 0x01}, {0x0a, 0x00}, {0x0b, 0x00}, + {0x0c, 0xf8}, {0x0d, 0x3f}, {0x0e, 0x00}, {0x0f, 0x00}, + {0x10, 0x01}, {0x11, 0xfc}, {0x12, 0x28}, {0x13, 0x00}, + {0x14, 0x00}, {0x15, 0x33}, {0x16, 0x00}, {0x17, 0x00}, + {0x18, 0x88}, {0x19, 0x06}, {0x1a, 0x22}, {0x1b, 0x03}, + {0x1c, 0x0f}, {0x1d, 0x00}, {0x1e, 0x80}, {0x1f, 0x80}, + {0x20, 0x00}, {0x21, 0x00}, {0x22, 0xc0}, {0x23, 0x00}, + {0x24, 0x01}, {0x25, 0x08}, {0x26, 0x10}, {0x27, 0xc0}, + {0x28, 0x00}, {0x29, 0x1c}, {0x2a, 0x00}, {0x2b, 0xb0}, + {0x2c, 0x32}, {0x2d, 0x03}, {0x2e, 0x00}, {0x2f, 0x11}, + {0x30, 0x10}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0xc0}, + {0x34, 0xc0}, {0x35, 0x1f}, {0x36, 0xf7}, {0x37, 0xfd}, + {0x38, 0xff}, {0x39, 0x1f}, {0x3a, 0xf7}, {0x3b, 0xfd}, + {0x3c, 0xff}, {0x3d, 0x1f}, {0x3e, 0xf7}, {0x3f, 0xfd}, + {0x40, 0xff}, {0x41, 0x1f}, {0x42, 0xf7}, {0x43, 0xfd}, + {0x44, 0xff}, {0x45, 0x1f}, {0x46, 0xf7}, {0x47, 0xfd}, + {0x48, 0xff}, {0x49, 0x1f}, {0x4a, 0xf7}, {0x4b, 0xfd}, + {0x4c, 0xff}, {0x4d, 0x00}, {0x4e, 0x00}, {0x4f, 0xff}, + {0x50, 0x00}, {0x51, 0x00}, {0x52, 0x00}, {0x53, 0x00}, +}; + +/* codec private data */ +struct es8336_priv { + struct regmap *regmap; + unsigned int dmic_amic; + unsigned int sysclk; + struct snd_pcm_hw_constraint_list *sysclk_constraints; + struct clk *mclk; + int debounce_time; + int hp_det_invert; + struct delayed_work work; + + int spk_ctl_gpio; + int hp_det_gpio; + bool muted; + bool hp_inserted; + bool spk_active_level; + + int pwr_count; +}; + +/* + * es8336_reset + * write value 0xff to reg0x00, the chip will be in reset mode + * then, writer 0x00 to reg0x00, unreset the chip + */ +static int es8336_reset(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8336_RESET_REG00, 0x3F); + usleep_range(5000, 5500); + return snd_soc_component_write(component, ES8336_RESET_REG00, 0x03); +} + +static void es8336_enable_spk(struct es8336_priv *es8336, bool enable) +{ + bool level; + + if (es8336->spk_ctl_gpio == INVALID_GPIO) + return; + + level = enable ? es8336->spk_active_level : !es8336->spk_active_level; + gpio_set_value(es8336->spk_ctl_gpio, level); +} + +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -9600, 50, 1); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -9600, 50, 1); +static const DECLARE_TLV_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0); +static const DECLARE_TLV_DB_SCALE(mic_bst_tlv, 0, 1200, 0); + +static unsigned int linin_pga_tlv[] = { + TLV_DB_RANGE_HEAD(9), + 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), + 1, 1, TLV_DB_SCALE_ITEM(300, 0, 0), + 2, 2, TLV_DB_SCALE_ITEM(600, 0, 0), + 3, 3, TLV_DB_SCALE_ITEM(900, 0, 0), + 4, 4, TLV_DB_SCALE_ITEM(1200, 0, 0), + 5, 5, TLV_DB_SCALE_ITEM(1500, 0, 0), + 6, 6, TLV_DB_SCALE_ITEM(1800, 0, 0), + 7, 7, TLV_DB_SCALE_ITEM(2100, 0, 0), + 8, 8, TLV_DB_SCALE_ITEM(2400, 0, 0), +}; + +static unsigned int hpout_vol_tlv[] = { + TLV_DB_RANGE_HEAD(1), + 0, 3, TLV_DB_SCALE_ITEM(-4800, 1200, 0), +}; + +static const char *const alc_func_txt[] = { "Off", "On" }; + +static const struct soc_enum alc_func = + SOC_ENUM_SINGLE(ES8336_ADC_ALC1_REG29, 6, 2, alc_func_txt); + +static const char *const ng_type_txt[] = { + "Constant PGA Gain", "Mute ADC Output" }; + +static const struct soc_enum ng_type = + SOC_ENUM_SINGLE(ES8336_ADC_ALC6_REG2E, 6, 2, ng_type_txt); + +static const char *const adcpol_txt[] = { "Normal", "Invert" }; + +static const struct soc_enum adcpol = + SOC_ENUM_SINGLE(ES8336_ADC_MUTE_REG26, 1, 2, adcpol_txt); + +static const char *const dacpol_txt[] = { + "Normal", "R Invert", "L Invert", "L + R Invert" }; + +static const struct soc_enum dacpol = + SOC_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 0, 4, dacpol_txt); + +static const struct snd_kcontrol_new es8336_snd_controls[] = { + /* HP OUT VOLUME */ + SOC_DOUBLE_TLV("HP Playback Volume", ES8336_CPHP_ICAL_VOL_REG18, + 4, 0, 4, 1, hpout_vol_tlv), + /* HPMIXER VOLUME Control */ + SOC_DOUBLE_TLV("HPMixer Gain", ES8336_HPMIX_VOL_REG16, + 0, 4, 7, 0, hpmixer_gain_tlv), + + /* DAC Digital controls */ + SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8336_DAC_VOLL_REG33, + ES8336_DAC_VOLR_REG34, 0, 0xC0, 1, dac_vol_tlv), + + SOC_SINGLE("Enable DAC Soft Ramp", ES8336_DAC_SET1_REG30, 4, 1, 1), + SOC_SINGLE("DAC Soft Ramp Rate", ES8336_DAC_SET1_REG30, 2, 4, 0), + + SOC_ENUM("Playback Polarity", dacpol), + SOC_SINGLE("DAC Notch Filter", ES8336_DAC_SET2_REG31, 6, 1, 0), + SOC_SINGLE("DAC Double Fs Mode", ES8336_DAC_SET2_REG31, 7, 1, 0), + SOC_SINGLE("DAC Volume Control-LeR", ES8336_DAC_SET2_REG31, 2, 1, 0), + SOC_SINGLE("DAC Stereo Enhancement", ES8336_DAC_SET3_REG32, 0, 7, 0), + + /* +20dB D2SE PGA Control */ + SOC_SINGLE_TLV("MIC Boost", ES8336_ADC_D2SEPGA_REG24, + 0, 1, 0, mic_bst_tlv), + /* 0-+24dB Lineinput PGA Control */ + SOC_SINGLE_TLV("Input PGA", ES8336_ADC_PGAGAIN_REG23, + 4, 8, 0, linin_pga_tlv), +}; + +/* Analog Input MUX */ +static const char * const es8336_analog_in_txt[] = { + "lin1-rin1", + "lin2-rin2", + "lin1-rin1 with 20db Boost", + "lin2-rin2 with 20db Boost" +}; + +static const unsigned int es8336_analog_in_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_analog_input_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_ADC_PDN_LINSEL_REG22, 4, 3, + ARRAY_SIZE(es8336_analog_in_txt), + es8336_analog_in_txt, + es8336_analog_in_values); + +static const struct snd_kcontrol_new es8336_analog_in_mux_controls = + SOC_DAPM_ENUM("Route", es8336_analog_input_enum); + +/* Dmic MUX */ +static const char * const es8336_dmic_txt[] = { + "dmic disable", + "dmic data at high level", + "dmic data at low level", +}; + +static const unsigned int es8336_dmic_values[] = { 0, 2, 3 }; + +static const struct soc_enum es8336_dmic_src_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_ADC_DMIC_REG25, 0, 3, + ARRAY_SIZE(es8336_dmic_txt), + es8336_dmic_txt, + es8336_dmic_values); + +static const struct snd_kcontrol_new es8336_dmic_src_controls = + SOC_DAPM_ENUM("Route", es8336_dmic_src_enum); + +/* hp mixer mux */ +static const char *const es8336_hpmux_texts[] = { + "lin1-rin1", + "lin2-rin2", + "lin-rin with Boost", + "lin-rin with Boost and PGA" +}; + +static const unsigned int es8336_hpmux_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_left_hpmux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 4, 7, + ARRAY_SIZE(es8336_hpmux_texts), + es8336_hpmux_texts, + es8336_hpmux_values); + +static const struct snd_kcontrol_new es8336_left_hpmux_controls = + SOC_DAPM_ENUM("Route", es8336_left_hpmux_enum); + +static const struct soc_enum es8336_right_hpmux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 0, 7, + ARRAY_SIZE(es8336_hpmux_texts), + es8336_hpmux_texts, + es8336_hpmux_values); + +static const struct snd_kcontrol_new es8336_right_hpmux_controls = + SOC_DAPM_ENUM("Route", es8336_right_hpmux_enum); + +/* headphone Output Mixer */ +static const struct snd_kcontrol_new es8336_out_left_mix[] = { + SOC_DAPM_SINGLE("LLIN Switch", ES8336_HPMIX_SWITCH_REG14, + 6, 1, 0), + SOC_DAPM_SINGLE("Left DAC Switch", ES8336_HPMIX_SWITCH_REG14, + 7, 1, 0), +}; + +static const struct snd_kcontrol_new es8336_out_right_mix[] = { + SOC_DAPM_SINGLE("RLIN Switch", ES8336_HPMIX_SWITCH_REG14, + 2, 1, 0), + SOC_DAPM_SINGLE("Right DAC Switch", ES8336_HPMIX_SWITCH_REG14, + 3, 1, 0), +}; + +/* DAC data source mux */ +static const char * const es8336_dacsrc_texts[] = { + "LDATA TO LDAC, RDATA TO RDAC", + "LDATA TO LDAC, LDATA TO RDAC", + "RDATA TO LDAC, RDATA TO RDAC", + "RDATA TO LDAC, LDATA TO RDAC", +}; + +static const unsigned int es8336_dacsrc_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_dacsrc_mux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 6, 4, + ARRAY_SIZE(es8336_dacsrc_texts), + es8336_dacsrc_texts, + es8336_dacsrc_values); +static const struct snd_kcontrol_new es8336_dacsrc_mux_controls = + SOC_DAPM_ENUM("Route", es8336_dacsrc_mux_enum); + +static const struct snd_soc_dapm_widget es8336_dapm_widgets[] = { + /* Input Lines */ + SND_SOC_DAPM_INPUT("DMIC"), + SND_SOC_DAPM_INPUT("MIC1"), + SND_SOC_DAPM_INPUT("MIC2"), + + SND_SOC_DAPM_MICBIAS("micbias", SND_SOC_NOPM, + 0, 0), + /* Input MUX */ + SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, + &es8336_analog_in_mux_controls), + + SND_SOC_DAPM_PGA("Line input PGA", ES8336_ADC_PDN_LINSEL_REG22, + 7, 1, NULL, 0), + + /* ADCs */ + SND_SOC_DAPM_ADC("Mono ADC", NULL, ES8336_ADC_PDN_LINSEL_REG22, 6, 1), + + /* Dmic MUX */ + SND_SOC_DAPM_MUX("Digital Mic Mux", SND_SOC_NOPM, 0, 0, + &es8336_dmic_src_controls), + + /* Digital Interface */ + SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 0, + SND_SOC_NOPM, 0, 0), + + SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0, + SND_SOC_NOPM, 0, 0), + + /* DACs DATA SRC MUX */ + SND_SOC_DAPM_MUX("DAC SRC Mux", SND_SOC_NOPM, 0, 0, + &es8336_dacsrc_mux_controls), + /* DACs */ + SND_SOC_DAPM_DAC("Right DAC", NULL, ES8336_DAC_PDN_REG2F, 0, 1), + SND_SOC_DAPM_DAC("Left DAC", NULL, ES8336_DAC_PDN_REG2F, 4, 1), + + /* Headphone Output Side */ + /* hpmux for hp mixer */ + SND_SOC_DAPM_MUX("Left Hp mux", SND_SOC_NOPM, 0, 0, + &es8336_left_hpmux_controls), + SND_SOC_DAPM_MUX("Right Hp mux", SND_SOC_NOPM, 0, 0, + &es8336_right_hpmux_controls), + /* Output mixer */ + SND_SOC_DAPM_MIXER("Left Hp mixer", ES8336_HPMIX_PDN_REG15, + 4, 1, &es8336_out_left_mix[0], + ARRAY_SIZE(es8336_out_left_mix)), + SND_SOC_DAPM_MIXER("Right Hp mixer", ES8336_HPMIX_PDN_REG15, + 0, 1, &es8336_out_right_mix[0], + ARRAY_SIZE(es8336_out_right_mix)), + + /* Output charge pump */ + SND_SOC_DAPM_PGA("HPCP L", ES8336_CPHP_OUTEN_REG17, + 6, 0, NULL, 0), + SND_SOC_DAPM_PGA("HPCP R", ES8336_CPHP_OUTEN_REG17, + 2, 0, NULL, 0), + + /* Output Driver */ + SND_SOC_DAPM_PGA("HPVOL L", ES8336_CPHP_OUTEN_REG17, + 5, 0, NULL, 0), + SND_SOC_DAPM_PGA("HPVOL R", ES8336_CPHP_OUTEN_REG17, + 1, 0, NULL, 0), + /* Output Lines */ + SND_SOC_DAPM_OUTPUT("HPOL"), + SND_SOC_DAPM_OUTPUT("HPOR"), +}; + +static const struct snd_soc_dapm_route es8336_dapm_routes[] = { + /* + * record route map + */ + {"MIC1", NULL, "micbias"}, + {"MIC2", NULL, "micbias"}, + {"DMIC", NULL, "micbias"}, + + {"Differential Mux", "lin1-rin1", "MIC1"}, + {"Differential Mux", "lin2-rin2", "MIC2"}, + {"Differential Mux", "lin1-rin1 with 20db Boost", "MIC1"}, + {"Differential Mux", "lin2-rin2 with 20db Boost", "MIC2"}, + {"Line input PGA", NULL, "Differential Mux"}, + + {"Mono ADC", NULL, "Line input PGA"}, + + {"Digital Mic Mux", "dmic disable", "Mono ADC"}, + {"Digital Mic Mux", "dmic data at high level", "DMIC"}, + {"Digital Mic Mux", "dmic data at low level", "DMIC"}, + + {"I2S OUT", NULL, "Digital Mic Mux"}, + /* + * playback route map + */ + {"DAC SRC Mux", "LDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "LDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "RDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "RDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, + + {"Left DAC", NULL, "DAC SRC Mux"}, + {"Right DAC", NULL, "DAC SRC Mux"}, + + {"Left Hp mux", "lin1-rin1", "MIC1"}, + {"Left Hp mux", "lin2-rin2", "MIC2"}, + {"Left Hp mux", "lin-rin with Boost", "Differential Mux"}, + {"Left Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, + + {"Right Hp mux", "lin1-rin1", "MIC1"}, + {"Right Hp mux", "lin2-rin2", "MIC2"}, + {"Right Hp mux", "lin-rin with Boost", "Differential Mux"}, + {"Right Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, + + {"Left Hp mixer", "LLIN Switch", "Left Hp mux"}, + {"Left Hp mixer", "Left DAC Switch", "Left DAC"}, + + {"Right Hp mixer", "RLIN Switch", "Right Hp mux"}, + {"Right Hp mixer", "Right DAC Switch", "Right DAC"}, + + {"HPCP L", NULL, "Left Hp mixer"}, + {"HPCP R", NULL, "Right Hp mixer"}, + + {"HPVOL L", NULL, "HPCP L"}, + {"HPVOL R", NULL, "HPCP R"}, + + {"HPOL", NULL, "HPVOL L"}, + {"HPOR", NULL, "HPVOL R"}, +}; + + +/* The set of rates we can generate from the above for each SYSCLK */ + +static unsigned int rates_12288[] = { + 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, +}; + +static struct snd_pcm_hw_constraint_list constraints_12288 = { + .count = ARRAY_SIZE(rates_12288), + .list = rates_12288, +}; + +static unsigned int rates_112896[] = { + 8000, 11025, 22050, 44100, +}; + +static struct snd_pcm_hw_constraint_list constraints_112896 = { + .count = ARRAY_SIZE(rates_112896), + .list = rates_112896, +}; + +static unsigned int rates_12[] = { + 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, + 48000, 88235, 96000, +}; + +static struct snd_pcm_hw_constraint_list constraints_12 = { + .count = ARRAY_SIZE(rates_12), + .list = rates_12, +}; + +/* + * Note that this should be called from init rather than from hw_params. + */ +static int es8336_set_dai_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + switch (freq) { + case 11289600: + case 18432000: + case 22579200: + case 36864000: + es8336->sysclk_constraints = &constraints_112896; + es8336->sysclk = freq; + return 0; + case 12288000: + case 19200000: + case 16934400: + case 24576000: + case 33868800: + es8336->sysclk_constraints = &constraints_12288; + es8336->sysclk = freq; + return 0; + case 12000000: + case 24000000: + es8336->sysclk_constraints = &constraints_12; + es8336->sysclk = freq; + return 0; + } + + return 0; +} + +static int es8336_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_component *component = codec_dai->component; + u8 iface = 0; + u8 adciface = 0; + u8 daciface = 0; + + iface = snd_soc_component_read(component, ES8336_IFACE); + adciface = snd_soc_component_read(component, ES8336_ADC_IFACE); + daciface = snd_soc_component_read(component, ES8336_DAC_IFACE); + + /* set master/slave audio interface */ + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBP_CFP: + iface |= 0x80; + break; + case SND_SOC_DAIFMT_CBC_CFC: + iface &= 0x7F; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + adciface &= 0xFC; + daciface &= 0xFC; + break; + case SND_SOC_DAIFMT_RIGHT_J: + return -EINVAL; + case SND_SOC_DAIFMT_LEFT_J: + adciface &= 0xFC; + daciface &= 0xFC; + adciface |= 0x01; + daciface |= 0x01; + break; + case SND_SOC_DAIFMT_DSP_A: + adciface &= 0xDC; + daciface &= 0xDC; + adciface |= 0x03; + daciface |= 0x03; + break; + case SND_SOC_DAIFMT_DSP_B: + adciface &= 0xDC; + daciface &= 0xDC; + adciface |= 0x23; + daciface |= 0x23; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + iface &= 0xDF; + adciface &= 0xDF; + daciface &= 0xDF; + break; + case SND_SOC_DAIFMT_IB_IF: + iface |= 0x20; + adciface |= 0x20; + daciface |= 0x20; + break; + case SND_SOC_DAIFMT_IB_NF: + iface |= 0x20; + adciface &= 0xDF; + daciface &= 0xDF; + break; + case SND_SOC_DAIFMT_NB_IF: + iface &= 0xDF; + adciface |= 0x20; + daciface |= 0x20; + break; + default: + return -EINVAL; + } + snd_soc_component_write(component, ES8336_IFACE, iface); + snd_soc_component_write(component, ES8336_ADC_IFACE, adciface); + snd_soc_component_write(component, ES8336_DAC_IFACE, daciface); + return 0; +} + +static int es8336_pcm_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + + snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + /* es8336: both playback and capture need dac mclk */ + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_MCLK_DIV_MASK | + ES8336_CLKMGR_DAC_MCLK_MASK, + ES8336_CLKMGR_MCLK_DIV_NML | + ES8336_CLKMGR_DAC_MCLK_EN); + es8336->pwr_count++; + + if (playback) { + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x00); + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x66); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_DAC_MCLK_MASK | + ES8336_CLKMGR_DAC_ANALOG_MASK, + ES8336_CLKMGR_DAC_MCLK_EN | + ES8336_CLKMGR_DAC_ANALOG_EN); + msleep(50); + } else { + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_ADC_MCLK_MASK | + ES8336_CLKMGR_ADC_ANALOG_MASK, + ES8336_CLKMGR_ADC_MCLK_EN | + ES8336_CLKMGR_ADC_ANALOG_EN); + } + + return 0; +} + +static void es8336_pcm_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + + if (playback) { + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_DAC_ANALOG_MASK, + ES8336_CLKMGR_DAC_ANALOG_DIS); + } else { + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_ADC_MCLK_MASK | + ES8336_CLKMGR_ADC_ANALOG_MASK, + ES8336_CLKMGR_ADC_MCLK_DIS | + ES8336_CLKMGR_ADC_ANALOG_DIS); + } + + if (--es8336->pwr_count == 0) { + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + } +} + + +static int es8336_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + int val = 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + val = ES8336_DACWL_16; + break; + case SNDRV_PCM_FORMAT_S20_3LE: + val = ES8336_DACWL_20; + break; + case SNDRV_PCM_FORMAT_S24_LE: + val = ES8336_DACWL_24; + break; + case SNDRV_PCM_FORMAT_S32_LE: + val = ES8336_DACWL_32; + break; + default: + val = ES8336_DACWL_16; + break; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_soc_component_update_bits(component, ES8336_SDP_DACFMT_REG0B, + ES8336_DACWL_MASK, val); + else + snd_soc_component_update_bits(component, ES8336_SDP_ADCFMT_REG0A, + ES8336_ADCWL_MASK, val); + + return 0; +} + +static int es8336_mute(struct snd_soc_dai *dai, int mute, int direction) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + es8336->muted = mute; + if (!es8336->hp_inserted) + es8336_enable_spk(es8336, true); + else + es8336_enable_spk(es8336, false); + if (direction) + return snd_soc_component_update_bits(dai->component, ES8336_ADC_MUTE_REG26, + ES8336_MUTE, + mute ? ES8336_MUTE : 0); + else + return snd_soc_component_update_bits(dai->component, ES8336_DAC_SET1_REG30, + ES8336_MUTE, + mute ? ES8336_MUTE : 0); +} + +static int es8336_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + break; + + case SND_SOC_BIAS_STANDBY: + break; + + case SND_SOC_BIAS_OFF: + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); + break; + } + + return 0; +} + +#define es8336_RATES SNDRV_PCM_RATE_8000_96000 + +#define es8336_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ + SNDRV_PCM_FMTBIT_S24_LE) + +static const struct snd_soc_dai_ops es8336_ops = { + .startup = es8336_pcm_startup, + .hw_params = es8336_pcm_hw_params, + .set_fmt = es8336_set_dai_fmt, + .set_sysclk = es8336_set_dai_sysclk, + .mute_stream = es8336_mute, + .shutdown = es8336_pcm_shutdown, + .no_capture_mute = 1, +}; + +static struct snd_soc_dai_driver es8336_dai = { + .name = "es8336-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 1, + .channels_max = 2, + .rates = es8336_RATES, + .formats = es8336_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 1, + .channels_max = 2, + .rates = es8336_RATES, + .formats = es8336_FORMATS, + }, + .ops = &es8336_ops, + .symmetric_rate = 1, +}; + +static int es8336_init_regs(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8336_RESET_REG00, 0x3f); + usleep_range(5000, 5500); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); + snd_soc_component_write(component, ES8336_SYS_VMIDSEL_REG0C, 0xFF); + msleep(30); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSEL_REG02, 0x08); + snd_soc_component_write(component, ES8336_CLKMGR_ADCOSR_REG03, 0x20); + snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV1_REG04, 0x11); + snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV2_REG05, 0x00); + snd_soc_component_write(component, ES8336_CLKMGR_DACDIV1_REG06, 0x11); + snd_soc_component_write(component, ES8336_CLKMGR_DACDIV2_REG07, 0x00); + snd_soc_component_write(component, ES8336_CLKMGR_CPDIV_REG08, 0x00); + snd_soc_component_write(component, ES8336_SDP_MS_BCKDIV_REG09, 0x04); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0x7F); + snd_soc_component_write(component, ES8336_CAL_TYPE_REG1C, 0x0F); + snd_soc_component_write(component, ES8336_CAL_HPLIV_REG1E, 0x90); + snd_soc_component_write(component, ES8336_CAL_HPRIV_REG1F, 0x90); + snd_soc_component_write(component, ES8336_ADC_VOLUME_REG27, 0x00); + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x00); + snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); + snd_soc_component_write(component, ES8336_DAC_SET2_REG31, 0x20); + snd_soc_component_write(component, ES8336_DAC_SET3_REG32, 0x00); + snd_soc_component_write(component, ES8336_DAC_VOLL_REG33, 0x00); + snd_soc_component_write(component, ES8336_DAC_VOLR_REG34, 0x00); + snd_soc_component_write(component, ES8336_SDP_ADCFMT_REG0A, 0x00); + snd_soc_component_write(component, ES8336_SDP_DACFMT_REG0B, 0x00); + snd_soc_component_write(component, ES8336_SYS_VMIDLOW_REG10, 0x11); + snd_soc_component_write(component, ES8336_SYS_VSEL_REG11, 0xFC); + snd_soc_component_write(component, ES8336_SYS_REF_REG12, 0x28); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x04); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x0C); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_HPMIX_SEL_REG13, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); + snd_soc_component_write(component, ES8336_CPHP_ICAL_VOL_REG18, 0x00); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0x02); + snd_soc_component_write(component, ES8336_TESTMODE_REG50, 0xA0); + snd_soc_component_write(component, ES8336_TEST1_REG51, 0x00); + snd_soc_component_write(component, ES8336_TEST2_REG52, 0x00); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); + msleep(50); + snd_soc_component_write(component, ES8336_ADC_PGAGAIN_REG23, 0x60); + snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x01); + /* adc ds mode, HPF enable */ + snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); + snd_soc_component_write(component, ES8336_ADC_ALC1_REG29, 0xcd); + snd_soc_component_write(component, ES8336_ADC_ALC2_REG2A, 0x08); + snd_soc_component_write(component, ES8336_ADC_ALC3_REG2B, 0xa0); + snd_soc_component_write(component, ES8336_ADC_ALC4_REG2C, 0x05); + snd_soc_component_write(component, ES8336_ADC_ALC5_REG2D, 0x06); + snd_soc_component_write(component, ES8336_ADC_ALC6_REG2E, 0x61); + return 0; +} + +static int es8336_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int es8336_resume(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + int ret; + + es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ + ret = snd_soc_component_read(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_init_regs(component); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + /* max debance time, enable interrupt, low active */ + snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); + /* es8336_set_bias_level(component, SND_SOC_BIAS_OFF); */ + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + } + return 0; +} + +static irqreturn_t es8336_irq_handler(int irq, void *data) +{ + struct es8336_priv *es8336 = data; + + queue_delayed_work(system_power_efficient_wq, &es8336->work, + msecs_to_jiffies(es8336->debounce_time)); + + return IRQ_HANDLED; +} + +static void hp_work(struct work_struct *work) +{ + struct es8336_priv *es8336; + int enable; + + es8336 = container_of(work, struct es8336_priv, work.work); + enable = gpio_get_value(es8336->hp_det_gpio); + if (es8336->hp_det_invert) + enable = !enable; + + es8336->hp_inserted = !enable; + if (!es8336->muted) { + if (es8336->hp_inserted) + es8336_enable_spk(es8336, false); + else + es8336_enable_spk(es8336, true); + } +} + +static int es8336_probe(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + int ret = 0; + + es8336_component = component; + ret = snd_soc_component_read(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ + ret = snd_soc_component_read(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_init_regs(component); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + /* max debance time, enable interrupt, low active */ + snd_soc_component_write(component, + ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); + + /* es8336_set_bias_level(codec, SND_SOC_BIAS_OFF); */ + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, + 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + snd_soc_component_write(component, + ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + } + } + + return ret; +} + +static void es8336_remove(struct snd_soc_component *component) +{ + es8336_set_bias_level(component, SND_SOC_BIAS_OFF); +} + +const struct regmap_config es8336_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ES8336_TEST3_REG53, + .cache_type = REGCACHE_RBTREE, + .reg_defaults = es8336_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(es8336_reg_defaults), +}; + +static const struct snd_soc_component_driver soc_component_dev_es8336 = { + .probe = es8336_probe, + .remove = es8336_remove, + .suspend = es8336_suspend, + .resume = es8336_resume, + .set_bias_level = es8336_set_bias_level, + + .controls = es8336_snd_controls, + .num_controls = ARRAY_SIZE(es8336_snd_controls), + .dapm_widgets = es8336_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(es8336_dapm_widgets), + .dapm_routes = es8336_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(es8336_dapm_routes), +}; + +static int es8336_i2c_probe(struct i2c_client *i2c) +{ + struct es8336_priv *es8336; + struct gpio_desc *gpiod; + int ret = -1; + int hp_irq; + int active_level = 0; + + es8336 = devm_kzalloc(&i2c->dev, sizeof(*es8336), GFP_KERNEL); + if (!es8336) + return -ENOMEM; + + es8336->debounce_time = 200; + es8336->hp_det_invert = 0; + es8336->pwr_count = 0; + es8336->hp_inserted = false; + es8336->muted = true; + + es8336->regmap = devm_regmap_init_i2c(i2c, &es8336_regmap_config); + if (IS_ERR(es8336->regmap)) { + ret = PTR_ERR(es8336->regmap); + dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); + return ret; + } + + i2c_set_clientdata(i2c, es8336); + + gpiod = devm_gpiod_get_index_optional(&i2c->dev, "sel", 0, + GPIOD_OUT_HIGH); + device_property_read_u32(&i2c->dev, "spk-active-level", &active_level); + if (!gpiod) { + dev_info(&i2c->dev, "Can not get spk_ctl_gpio\n"); + es8336->spk_ctl_gpio = INVALID_GPIO; + } else { + es8336->spk_ctl_gpio = desc_to_gpio(gpiod); + es8336->spk_active_level = active_level; + es8336_enable_spk(es8336, false); + } + + gpiod = devm_gpiod_get_index_optional(&i2c->dev, "det", 0, + GPIOD_IN); + + if (!gpiod) { + dev_info(&i2c->dev, "Can not get hp_det_gpio\n"); + es8336->hp_det_gpio = INVALID_GPIO; + } else { + es8336->hp_det_gpio = desc_to_gpio(gpiod); + INIT_DELAYED_WORK(&es8336->work, hp_work); + es8336->hp_det_invert = 0; + hp_irq = gpio_to_irq(es8336->hp_det_gpio); + ret = devm_request_threaded_irq(&i2c->dev, hp_irq, NULL, + es8336_irq_handler, + IRQF_TRIGGER_FALLING | + IRQF_TRIGGER_RISING | + IRQF_ONESHOT, + "es8336_interrupt", es8336); + if (ret < 0) { + dev_err(&i2c->dev, "request_irq failed: %d\n", ret); + return ret; + } + + schedule_delayed_work(&es8336->work, + msecs_to_jiffies(es8336->debounce_time)); + } + + ret = devm_snd_soc_register_component(&i2c->dev, + &soc_component_dev_es8336, + &es8336_dai, 1); + + return ret; +} + +static void es8336_i2c_remove(struct i2c_client *client) +{ + +} + +static void es8336_i2c_shutdown(struct i2c_client *client) +{ + struct es8336_priv *es8336 = i2c_get_clientdata(client); + + if (es8336_component != NULL) { + es8336_enable_spk(es8336, false); + msleep(20); + es8336_set_bias_level(es8336_component, SND_SOC_BIAS_OFF); + } +} + +static const struct i2c_device_id es8336_i2c_id[] = { + {"es8336", 0}, + {"10ES8336:00", 0}, + {"10ES8336", 0}, + { } +}; +MODULE_DEVICE_TABLE(i2c, es8336_i2c_id); + +static const struct of_device_id es8336_of_match[] = { + { .compatible = "everest,es8336", }, + { } +}; +MODULE_DEVICE_TABLE(of, es8336_of_match); + +static const struct acpi_device_id es8336_acpi_match[] = { + { "ESSX8336", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, es8336_acpi_match); + +static struct i2c_driver es8336_i2c_driver = { + .driver = { + .name = "es8336", + .of_match_table = es8336_of_match, + .acpi_match_table = es8336_acpi_match, + }, + .probe = es8336_i2c_probe, + .remove = es8336_i2c_remove, + .shutdown = es8336_i2c_shutdown, + .id_table = es8336_i2c_id, +}; + +module_i2c_driver(es8336_i2c_driver); +MODULE_DESCRIPTION("ASoC es8336 driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/es8336.h b/sound/soc/codecs/es8336.h new file mode 100644 index 0000000000000..368a0a4accdde --- /dev/null +++ b/sound/soc/codecs/es8336.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Everest Semiconductor Co.,Ltd + * Copyright (C) 2022-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _ES8336_H +#define _ES8336_H + +/* ES8336 register space */ +/* + * RESET Control + */ +#define ES8336_RESET_REG00 0x00 +/* + * Clock Managerment + */ +#define ES8336_CLKMGR_CLKSW_REG01 0x01 +#define ES8336_CLKMGR_CLKSEL_REG02 0x02 +#define ES8336_CLKMGR_ADCOSR_REG03 0x03 +#define ES8336_CLKMGR_ADCDIV1_REG04 0x04 +#define ES8336_CLKMGR_ADCDIV2_REG05 0x05 +#define ES8336_CLKMGR_DACDIV1_REG06 0x06 +#define ES8336_CLKMGR_DACDIV2_REG07 0x07 +#define ES8336_CLKMGR_CPDIV_REG08 0x08 +/* + * SDP Control + */ +#define ES8336_SDP_MS_BCKDIV_REG09 0x09 +#define ES8336_SDP_ADCFMT_REG0A 0x0a +#define ES8336_SDP_DACFMT_REG0B 0x0b +/* + * System Control + */ +#define ES8336_SYS_VMIDSEL_REG0C 0x0c +#define ES8336_SYS_PDN_REG0D 0x0d +#define ES8336_SYS_LP1_REG0E 0x0e +#define ES8336_SYS_LP2_REG0F 0x0f +#define ES8336_SYS_VMIDLOW_REG10 0x10 +#define ES8336_SYS_VSEL_REG11 0x11 +#define ES8336_SYS_REF_REG12 0x12 +/* + * HP Mixer + */ +#define ES8336_HPMIX_SEL_REG13 0x13 +#define ES8336_HPMIX_SWITCH_REG14 0x14 +#define ES8336_HPMIX_PDN_REG15 0x15 +#define ES8336_HPMIX_VOL_REG16 0x16 +/* + * Charge Pump Headphone driver + */ +#define ES8336_CPHP_OUTEN_REG17 0x17 +#define ES8336_CPHP_ICAL_VOL_REG18 0x18 +#define ES8336_CPHP_PDN1_REG19 0x19 +#define ES8336_CPHP_PDN2_REG1A 0x1a +#define ES8336_CPHP_LDOCTL_REG1B 0x1b +/* + * Calibration + */ +#define ES8336_CAL_TYPE_REG1C 0x1c +#define ES8336_CAL_SET_REG1D 0x1d +#define ES8336_CAL_HPLIV_REG1E 0x1e +#define ES8336_CAL_HPRIV_REG1F 0x1f +#define ES8336_CAL_HPLMV_REG20 0x20 +#define ES8336_CAL_HPRMV_REG21 0x21 +/* + * ADC Control + */ +#define ES8336_ADC_PDN_LINSEL_REG22 0x22 +#define ES8336_ADC_PGAGAIN_REG23 0x23 +#define ES8336_ADC_D2SEPGA_REG24 0x24 +#define ES8336_ADC_DMIC_REG25 0x25 +#define ES8336_ADC_MUTE_REG26 0x26 +#define ES8336_ADC_VOLUME_REG27 0x27 +#define ES8336_ADC_ALC1_REG29 0x29 +#define ES8336_ADC_ALC2_REG2A 0x2a +#define ES8336_ADC_ALC3_REG2B 0x2b +#define ES8336_ADC_ALC4_REG2C 0x2c +#define ES8336_ADC_ALC5_REG2D 0x2d +#define ES8336_ADC_ALC6_REG2E 0x2e +/* + * DAC Control + */ +#define ES8336_DAC_PDN_REG2F 0x2f +#define ES8336_DAC_SET1_REG30 0x30 +#define ES8336_DAC_SET2_REG31 0x31 +#define ES8336_DAC_SET3_REG32 0x32 +#define ES8336_DAC_VOLL_REG33 0x33 +#define ES8336_DAC_VOLR_REG34 0x34 +/* + * GPIO + */ +#define ES8336_GPIO_SEL_REG4D 0x4D +#define ES8336_GPIO_DEBUNCE_INT_REG4E 0x4E +#define ES8336_GPIO_FLAG 0x4F +/* + * TEST MODE + */ +#define ES8336_TESTMODE_REG50 0x50 +#define ES8336_TEST1_REG51 0x51 +#define ES8336_TEST2_REG52 0x52 +#define ES8336_TEST3_REG53 0x53 + +#define ES8336_IFACE ES8336_SDP_MS_BCKDIV_REG09 +#define ES8336_ADC_IFACE ES8336_SDP_ADCFMT_REG0A +#define ES8336_DAC_IFACE ES8336_SDP_DACFMT_REG0B + +#define ES8336_REGNUM 84 + +/* REGISTER 0X01 CLOCK MANAGER */ +#define ES8336_CLKMGR_MCLK_DIV_MASK (0X1<<7) +#define ES8336_CLKMGR_MCLK_DIV_NML (0X0<<7) +#define ES8336_CLKMGR_MCLK_DIV_1 (0X1<<7) +#define ES8336_CLKMGR_ADC_MCLK_MASK (0X1<<3) +#define ES8336_CLKMGR_ADC_MCLK_EN (0X1<<3) +#define ES8336_CLKMGR_ADC_MCLK_DIS (0X0<<3) +#define ES8336_CLKMGR_DAC_MCLK_MASK (0X1<<2) +#define ES8336_CLKMGR_DAC_MCLK_EN (0X1<<2) +#define ES8336_CLKMGR_DAC_MCLK_DIS (0X0<<2) +#define ES8336_CLKMGR_ADC_ANALOG_MASK (0X1<<1) +#define ES8336_CLKMGR_ADC_ANALOG_EN (0X1<<1) +#define ES8336_CLKMGR_ADC_ANALOG_DIS (0X0<<1) +#define ES8336_CLKMGR_DAC_ANALOG_MASK (0X1<<0) +#define ES8336_CLKMGR_DAC_ANALOG_EN (0X1<<0) +#define ES8336_CLKMGR_DAC_ANALOG_DIS (0X0<<0) + +/* REGISTER 0X0A */ +#define ES8336_ADCWL_MASK (0x7 << 2) +#define ES8336_ADCWL_32 (0x4 << 2) +#define ES8336_ADCWL_24 (0x0 << 2) +#define ES8336_ADCWL_20 (0x1 << 2) +#define ES8336_ADCWL_18 (0x2 << 2) +#define ES8336_ADCWL_16 (0x3 << 2) +#define ES8336_ADCFMT_MASK (0x3 << 0) +#define ES8336_ADCFMT_I2S (0x0 << 0) +#define ES8336_ADCWL_LEFT (0x1 << 0) +#define ES8336_ADCWL_RIGHT (0x2 << 0) +#define ES8336_ADCWL_PCM (0x3 << 0) + +/* REGISTER 0X0B */ +#define ES8336_DACWL_MASK (0x7 << 2) +#define ES8336_DACWL_32 (0x4 << 2) +#define ES8336_DACWL_24 (0x0 << 2) +#define ES8336_DACWL_20 (0x1 << 2) +#define ES8336_DACWL_18 (0x2 << 2) +#define ES8336_DACWL_16 (0x3 << 2) +#define ES8336_DACFMT_MASK (0x3 << 0) +#define ES8336_DACFMT_I2S (0x0 << 0) +#define ES8336_DACWL_LEFT (0x1 << 0) +#define ES8336_DACWL_RIGHT (0x2 << 0) +#define ES8336_DACWL_PCM (0x3 << 0) + +#endif diff --git a/sound/soc/codecs/es8388.c b/sound/soc/codecs/es8388.c new file mode 100644 index 0000000000000..7a0ff9fa8a01e --- /dev/null +++ b/sound/soc/codecs/es8388.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * es8388.c -- ES8388 ALSA SoC Audio driver + * + * Copyright 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "es8388.h" +#include +#include + +static const unsigned int rates_12288[] = { + 8000, 12000, 16000, 24000, 32000, 48000, 96000, +}; + +static const int ratios_12288[] = { + 10, 7, 6, 4, 3, 2, 0, +}; + +static const struct snd_pcm_hw_constraint_list constraints_12288 = { + .count = ARRAY_SIZE(rates_12288), + .list = rates_12288, +}; + +static const unsigned int rates_11289[] = { + 8018, 11025, 22050, 44100, 88200, +}; + +static const int ratios_11289[] = { + 9, 7, 4, 2, 0, +}; + +static const struct snd_pcm_hw_constraint_list constraints_11289 = { + .count = ARRAY_SIZE(rates_11289), + .list = rates_11289, +}; + +#define ES8388_RATES (SNDRV_PCM_RATE_192000 | \ + SNDRV_PCM_RATE_96000 | \ + SNDRV_PCM_RATE_88200 | \ + SNDRV_PCM_RATE_8000_48000) +#define ES8388_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S18_3LE | \ + SNDRV_PCM_FMTBIT_S20_3LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +struct es8388_priv { + struct regmap *regmap; + struct clk *clk; + int playback_fs; + bool deemph; + int mclkdiv2; + const struct snd_pcm_hw_constraint_list *sysclk_constraints; + const int *mclk_ratios; + bool master; +}; + +/* + * ES8388 Controls + */ +static const char * const adcpol_txt[] = {"Normal", "L Invert", "R Invert", + "L + R Invert"}; +static SOC_ENUM_SINGLE_DECL(adcpol, + ES8388_ADCCONTROL6, 6, adcpol_txt); + +static const DECLARE_TLV_DB_SCALE(play_tlv, -3000, 100, 0); +static const DECLARE_TLV_DB_SCALE(dac_adc_tlv, -9600, 50, 0); +static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0); +static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); +static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); + +static const struct { + int rate; + unsigned int val; +} deemph_settings[] = { + { 0, ES8388_DACCONTROL6_DEEMPH_OFF }, + { 32000, ES8388_DACCONTROL6_DEEMPH_32k }, + { 44100, ES8388_DACCONTROL6_DEEMPH_44_1k }, + { 48000, ES8388_DACCONTROL6_DEEMPH_48k }, +}; + +static int es8388_set_deemph(struct snd_soc_component *component) +{ + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int val, i, best; + + /* + * If we're using deemphasis select the nearest available sample + * rate. + */ + if (es8388->deemph) { + best = 0; + for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { + if (abs(deemph_settings[i].rate - es8388->playback_fs) < + abs(deemph_settings[best].rate - es8388->playback_fs)) + best = i; + } + + val = deemph_settings[best].val; + } else { + val = ES8388_DACCONTROL6_DEEMPH_OFF; + } + + dev_dbg(component->dev, "Set deemphasis %d\n", val); + + return snd_soc_component_update_bits(component, ES8388_DACCONTROL6, + ES8388_DACCONTROL6_DEEMPH_MASK, val); +} + +static int es8388_get_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + + ucontrol->value.integer.value[0] = es8388->deemph; + return 0; +} + +static int es8388_put_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + unsigned int deemph = ucontrol->value.integer.value[0]; + int ret; + + if (deemph > 1) + return -EINVAL; + + ret = es8388_set_deemph(component); + if (ret < 0) + return ret; + + es8388->deemph = deemph; + + return 0; +} + +static const struct snd_kcontrol_new es8388_snd_controls[] = { + SOC_DOUBLE_R_TLV("Capture Digital Volume", + ES8388_ADCCONTROL8, ES8388_ADCCONTROL9, + 0, 0xc0, 1, dac_adc_tlv), + + SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0, + es8388_get_deemph, es8388_put_deemph), + + SOC_ENUM("Capture Polarity", adcpol), + + SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", + ES8388_DACCONTROL17, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", + ES8388_DACCONTROL19, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", + ES8388_DACCONTROL18, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", + ES8388_DACCONTROL20, 3, 7, 1, bypass_tlv), + + SOC_DOUBLE_R_TLV("PCM Volume", + ES8388_LDACVOL, ES8388_RDACVOL, + 0, ES8388_DACVOL_MAX, 1, dac_adc_tlv), + + SOC_DOUBLE_R_TLV("Output 1 Playback Volume", + ES8388_LOUT1VOL, ES8388_ROUT1VOL, + 0, ES8388_OUT1VOL_MAX, 0, play_tlv), + + SOC_DOUBLE_R_TLV("Output 2 Playback Volume", + ES8388_LOUT2VOL, ES8388_ROUT2VOL, + 0, ES8388_OUT2VOL_MAX, 0, play_tlv), + + SOC_DOUBLE_TLV("Mic PGA Volume", ES8388_ADCCONTROL1, + 4, 0, 8, 0, mic_tlv), +}; + +/* + * DAPM Controls + */ +static const char * const es8388_line_texts[] = { + "Line 1", "Line 2", "PGA", "Differential"}; + +static const struct soc_enum es8388_lline_enum = + SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 3, + ARRAY_SIZE(es8388_line_texts), + es8388_line_texts); +static const struct snd_kcontrol_new es8388_left_line_controls = + SOC_DAPM_ENUM("Route", es8388_lline_enum); + +static const struct soc_enum es8388_rline_enum = + SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 0, + ARRAY_SIZE(es8388_line_texts), + es8388_line_texts); +static const struct snd_kcontrol_new es8388_right_line_controls = + SOC_DAPM_ENUM("Route", es8388_lline_enum); + +/* Left Mixer */ +static const struct snd_kcontrol_new es8388_left_mixer_controls[] = { + SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL17, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL17, 6, 1, 0), + SOC_DAPM_SINGLE("Right Playback Switch", ES8388_DACCONTROL18, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL18, 6, 1, 0), +}; + +/* Right Mixer */ +static const struct snd_kcontrol_new es8388_right_mixer_controls[] = { + SOC_DAPM_SINGLE("Left Playback Switch", ES8388_DACCONTROL19, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL19, 6, 1, 0), + SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL20, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL20, 6, 1, 0), +}; + +static const char * const es8388_pga_sel[] = { + "Line 1", "Line 2", "Line 3", "Differential"}; + +/* Left PGA Mux */ +static const struct soc_enum es8388_lpga_enum = + SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 6, + ARRAY_SIZE(es8388_pga_sel), + es8388_pga_sel); +static const struct snd_kcontrol_new es8388_left_pga_controls = + SOC_DAPM_ENUM("Route", es8388_lpga_enum); + +/* Right PGA Mux */ +static const struct soc_enum es8388_rpga_enum = + SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 4, + ARRAY_SIZE(es8388_pga_sel), + es8388_pga_sel); +static const struct snd_kcontrol_new es8388_right_pga_controls = + SOC_DAPM_ENUM("Route", es8388_rpga_enum); + +/* Differential Mux */ +static const char * const es8388_diff_sel[] = {"Line 1", "Line 2"}; +static SOC_ENUM_SINGLE_DECL(diffmux, + ES8388_ADCCONTROL3, 7, es8388_diff_sel); +static const struct snd_kcontrol_new es8388_diffmux_controls = + SOC_DAPM_ENUM("Route", diffmux); + +/* Mono ADC Mux */ +static const char * const es8388_mono_mux[] = {"Stereo", "Mono (Left)", + "Mono (Right)", "Digital Mono"}; +static SOC_ENUM_SINGLE_DECL(monomux, + ES8388_ADCCONTROL3, 3, es8388_mono_mux); +static const struct snd_kcontrol_new es8388_monomux_controls = + SOC_DAPM_ENUM("Route", monomux); + +static const struct snd_soc_dapm_widget es8388_dapm_widgets[] = { + SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, + &es8388_diffmux_controls), + SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, + &es8388_monomux_controls), + SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, + &es8388_monomux_controls), + + SND_SOC_DAPM_MUX("Left PGA Mux", ES8388_ADCPOWER, + ES8388_ADCPOWER_AINL_OFF, 1, + &es8388_left_pga_controls), + SND_SOC_DAPM_MUX("Right PGA Mux", ES8388_ADCPOWER, + ES8388_ADCPOWER_AINR_OFF, 1, + &es8388_right_pga_controls), + + SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, + &es8388_left_line_controls), + SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, + &es8388_right_line_controls), + + SND_SOC_DAPM_ADC("Right ADC", "Right Capture", ES8388_ADCPOWER, + ES8388_ADCPOWER_ADCR_OFF, 1), + SND_SOC_DAPM_ADC("Left ADC", "Left Capture", ES8388_ADCPOWER, + ES8388_ADCPOWER_ADCL_OFF, 1), + + SND_SOC_DAPM_SUPPLY("DAC STM", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACSTM_RESET, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC STM", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCSTM_RESET, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC DIG", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACDIG_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC DIG", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCDIG_OFF, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC DLL", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACDLL_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC DLL", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCDLL_OFF, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("ADC Vref", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCVREF_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("DAC Vref", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACVREF_OFF, 1, NULL, 0), + + SND_SOC_DAPM_DAC("Right DAC", "Right Playback", ES8388_DACPOWER, + ES8388_DACPOWER_RDAC_OFF, 1), + SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8388_DACPOWER, + ES8388_DACPOWER_LDAC_OFF, 1), + + SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + &es8388_left_mixer_controls[0], + ARRAY_SIZE(es8388_left_mixer_controls)), + SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + &es8388_right_mixer_controls[0], + ARRAY_SIZE(es8388_right_mixer_controls)), + + SND_SOC_DAPM_PGA("Right Out 2", ES8388_DACPOWER, + ES8388_DACPOWER_ROUT2_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Left Out 2", ES8388_DACPOWER, + ES8388_DACPOWER_LOUT2_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Right Out 1", ES8388_DACPOWER, + ES8388_DACPOWER_ROUT1_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Left Out 1", ES8388_DACPOWER, + ES8388_DACPOWER_LOUT1_ON, 0, NULL, 0), + + SND_SOC_DAPM_OUTPUT("LOUT1"), + SND_SOC_DAPM_OUTPUT("ROUT1"), + SND_SOC_DAPM_OUTPUT("LOUT2"), + SND_SOC_DAPM_OUTPUT("ROUT2"), + + SND_SOC_DAPM_INPUT("LINPUT1"), + SND_SOC_DAPM_INPUT("LINPUT2"), + SND_SOC_DAPM_INPUT("RINPUT1"), + SND_SOC_DAPM_INPUT("RINPUT2"), +}; + +static const struct snd_soc_dapm_route es8388_dapm_routes[] = { + { "Left Line Mux", "Line 1", "LINPUT1" }, + { "Left Line Mux", "Line 2", "LINPUT2" }, + { "Left Line Mux", "PGA", "Left PGA Mux" }, + { "Left Line Mux", "Differential", "Differential Mux" }, + + { "Right Line Mux", "Line 1", "RINPUT1" }, + { "Right Line Mux", "Line 2", "RINPUT2" }, + { "Right Line Mux", "PGA", "Right PGA Mux" }, + { "Right Line Mux", "Differential", "Differential Mux" }, + + { "Left PGA Mux", "Line 1", "LINPUT1" }, + { "Left PGA Mux", "Line 2", "LINPUT2" }, + { "Left PGA Mux", "Differential", "Differential Mux" }, + + { "Right PGA Mux", "Line 1", "RINPUT1" }, + { "Right PGA Mux", "Line 2", "RINPUT2" }, + { "Right PGA Mux", "Differential", "Differential Mux" }, + + { "Differential Mux", "Line 1", "LINPUT1" }, + { "Differential Mux", "Line 1", "RINPUT1" }, + { "Differential Mux", "Line 2", "LINPUT2" }, + { "Differential Mux", "Line 2", "RINPUT2" }, + + { "Left ADC Mux", "Stereo", "Left PGA Mux" }, + { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, + { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, + + { "Right ADC Mux", "Stereo", "Right PGA Mux" }, + { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, + { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, + + { "Left ADC", NULL, "Left ADC Mux" }, + { "Right ADC", NULL, "Right ADC Mux" }, + + { "ADC DIG", NULL, "ADC STM" }, + { "ADC DIG", NULL, "ADC Vref" }, + { "ADC DIG", NULL, "ADC DLL" }, + + { "Left ADC", NULL, "ADC DIG" }, + { "Right ADC", NULL, "ADC DIG" }, + + { "Left Line Mux", "Line 1", "LINPUT1" }, + { "Left Line Mux", "Line 2", "LINPUT2" }, + { "Left Line Mux", "PGA", "Left PGA Mux" }, + { "Left Line Mux", "Differential", "Differential Mux" }, + + { "Right Line Mux", "Line 1", "RINPUT1" }, + { "Right Line Mux", "Line 2", "RINPUT2" }, + { "Right Line Mux", "PGA", "Right PGA Mux" }, + { "Right Line Mux", "Differential", "Differential Mux" }, + + { "Left Out 1", NULL, "Left DAC" }, + { "Right Out 1", NULL, "Right DAC" }, + { "Left Out 2", NULL, "Left DAC" }, + { "Right Out 2", NULL, "Right DAC" }, + + { "Left Mixer", "Playback Switch", "Left DAC" }, + { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, + { "Left Mixer", "Right Playback Switch", "Right DAC" }, + { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, + + { "Right Mixer", "Left Playback Switch", "Left DAC" }, + { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, + { "Right Mixer", "Playback Switch", "Right DAC" }, + { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, + + { "DAC DIG", NULL, "DAC STM" }, + { "DAC DIG", NULL, "DAC Vref" }, + { "DAC DIG", NULL, "DAC DLL" }, + + { "Left DAC", NULL, "DAC DIG" }, + { "Right DAC", NULL, "DAC DIG" }, + + { "Left Out 1", NULL, "Left Mixer" }, + { "LOUT1", NULL, "Left Out 1" }, + { "Right Out 1", NULL, "Right Mixer" }, + { "ROUT1", NULL, "Right Out 1" }, + + { "Left Out 2", NULL, "Left Mixer" }, + { "LOUT2", NULL, "Left Out 2" }, + { "Right Out 2", NULL, "Right Mixer" }, + { "ROUT2", NULL, "Right Out 2" }, +}; + +static int es8388_mute(struct snd_soc_dai *dai, int mute, int direction) +{ + if (direction) + return snd_soc_component_update_bits(dai->component, ES8388_ADCCONTROL7, + ES8388_ADCCONTROL7_ADC_MUTE, + mute ? ES8388_ADCCONTROL7_ADC_MUTE : 0); + else + return snd_soc_component_update_bits(dai->component, ES8388_DACCONTROL3, + ES8388_DACCONTROL3_DACMUTE, + mute ? ES8388_DACCONTROL3_DACMUTE : 0); +} + +static int es8388_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + + if (es8388->master && es8388->sysclk_constraints) + snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + es8388->sysclk_constraints); + + return 0; +} + +static int es8388_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int i; + int reg; + int wl; + int ratio; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + reg = ES8388_DACCONTROL2; + else + reg = ES8388_ADCCONTROL5; + + if (es8388->master) { + if (!es8388->sysclk_constraints) { + dev_err(component->dev, "No MCLK configured\n"); + return -EINVAL; + } + + for (i = 0; i < es8388->sysclk_constraints->count; i++) + if (es8388->sysclk_constraints->list[i] == + params_rate(params)) + break; + + if (i == es8388->sysclk_constraints->count) { + dev_err(component->dev, + "LRCLK %d unsupported with current clock\n", + params_rate(params)); + return -EINVAL; + } + ratio = es8388->mclk_ratios[i]; + } else { + ratio = 0; + es8388->mclkdiv2 = 0; + } + + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MCLKDIV2, + es8388->mclkdiv2 ? ES8388_MASTERMODE_MCLKDIV2 : 0); + + switch (params_width(params)) { + case 16: + wl = 3; + break; + case 18: + wl = 2; + break; + case 20: + wl = 1; + break; + case 24: + wl = 0; + break; + case 32: + wl = 4; + break; + default: + return -EINVAL; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + snd_soc_component_update_bits(component, ES8388_DACCONTROL1, + ES8388_DACCONTROL1_DACWL_MASK, + wl << ES8388_DACCONTROL1_DACWL_SHIFT); + + es8388->playback_fs = params_rate(params); + es8388_set_deemph(component); + } else + snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, + ES8388_ADCCONTROL4_ADCWL_MASK, + wl << ES8388_ADCCONTROL4_ADCWL_SHIFT); + + return snd_soc_component_update_bits(component, reg, ES8388_RATEMASK, ratio); +} + +static int es8388_set_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int mclkdiv2 = 0; + + switch (freq) { + case 0: + es8388->sysclk_constraints = NULL; + es8388->mclk_ratios = NULL; + break; + case 22579200: + mclkdiv2 = 1; + fallthrough; + case 11289600: + es8388->sysclk_constraints = &constraints_11289; + es8388->mclk_ratios = ratios_11289; + break; + case 24576000: + mclkdiv2 = 1; + fallthrough; + case 12288000: + es8388->sysclk_constraints = &constraints_12288; + es8388->mclk_ratios = ratios_12288; + break; + default: + return -EINVAL; + } + + es8388->mclkdiv2 = mclkdiv2; + return 0; +} + +static int es8388_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + u8 dac_mode = 0; + u8 adc_mode = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBP_CFP: + /* Master serial port mode, with BCLK generated automatically */ + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MSC, + ES8388_MASTERMODE_MSC); + es8388->master = true; + break; + case SND_SOC_DAIFMT_CBC_CFC: + /* Slave serial port mode */ + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MSC, 0); + es8388->master = false; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_I2S; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_I2S; + break; + case SND_SOC_DAIFMT_RIGHT_J: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_RJUST; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_RJUST; + break; + case SND_SOC_DAIFMT_LEFT_J: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_LJUST; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_LJUST; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) + return -EINVAL; + + snd_soc_component_update_bits(component, ES8388_DACCONTROL1, + ES8388_DACCONTROL1_DACFORMAT_MASK, dac_mode); + snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, + ES8388_ADCCONTROL4_ADCFORMAT_MASK, adc_mode); + + return 0; +} + +static int es8388_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + /* VREF, VMID=2x50k, digital enabled */ + snd_soc_component_write(component, ES8388_CHIPPOWER, 0); + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_50k | + ES8388_CONTROL1_ENREF); + break; + + case SND_SOC_BIAS_STANDBY: + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_5k | + ES8388_CONTROL1_ENREF); + + /* Charge caps */ + msleep(100); + } + + snd_soc_component_write(component, ES8388_CONTROL2, + ES8388_CONTROL2_OVERCURRENT_ON | + ES8388_CONTROL2_THERMAL_SHUTDOWN_ON); + + /* VREF, VMID=2*500k, digital stopped */ + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_500k | + ES8388_CONTROL1_ENREF); + break; + + case SND_SOC_BIAS_OFF: + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + 0); + break; + } + return 0; +} + +static const struct snd_soc_dai_ops es8388_dai_ops = { + .startup = es8388_startup, + .hw_params = es8388_hw_params, + .mute_stream = es8388_mute, + .set_sysclk = es8388_set_sysclk, + .set_fmt = es8388_set_dai_fmt, + .no_capture_mute = 1, +}; + +static struct snd_soc_dai_driver es8388_dai = { + .name = "es8388-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = ES8388_RATES, + .formats = ES8388_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 2, + .rates = ES8388_RATES, + .formats = ES8388_FORMATS, + }, + .ops = &es8388_dai_ops, + .symmetric_rate = 1, +}; + +static int es8388_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int es8388_resume(struct snd_soc_component *component) +{ + struct regmap *regmap = dev_get_regmap(component->dev, NULL); + struct es8388_priv *es8388; + int ret; + + es8388 = snd_soc_component_get_drvdata(component); + + regcache_mark_dirty(regmap); + ret = regcache_sync(regmap); + if (ret) { + dev_err(component->dev, "unable to sync regcache\n"); + return ret; + } + + return 0; +} + +static int es8388_component_probe(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8388_ADCPOWER, 0xf0); + snd_soc_component_write(component, ES8388_CONTROL1, 0x30); + snd_soc_component_write(component, ES8388_DACCONTROL21, 0x80); + snd_soc_component_write(component, ES8388_ADCCONTROL10, 0xda); + + return 0; +} + +static void es8388_remove(struct snd_soc_component *component) +{ +} + +const struct regmap_config es8388_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ES8388_REG_MAX, + .cache_type = REGCACHE_RBTREE, + .use_single_read = true, + .use_single_write = true, +}; +EXPORT_SYMBOL_GPL(es8388_regmap_config); + +static const struct snd_soc_component_driver es8388_component_driver = { + .probe = es8388_component_probe, + .remove = es8388_remove, + .suspend = es8388_suspend, + .resume = es8388_resume, + .set_bias_level = es8388_set_bias_level, + .controls = es8388_snd_controls, + .num_controls = ARRAY_SIZE(es8388_snd_controls), + .dapm_widgets = es8388_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(es8388_dapm_widgets), + .dapm_routes = es8388_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(es8388_dapm_routes), + .suspend_bias_off = 1, + .idle_bias_on = 1, + .use_pmdown_time = 1, + .endianness = 1, +}; + +int es8388_probe(struct device *dev, struct regmap *regmap) +{ + struct es8388_priv *es8388; + + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + es8388 = devm_kzalloc(dev, sizeof(*es8388), GFP_KERNEL); + if (es8388 == NULL) + return -ENOMEM; + + es8388->regmap = regmap; + + dev_set_drvdata(dev, es8388); + + return devm_snd_soc_register_component(dev, + &es8388_component_driver, &es8388_dai, 1); +} +EXPORT_SYMBOL_GPL(es8388_probe); + +static const struct i2c_device_id es8388_id[] = { + { "es8388", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, es8388_id); + +static const struct of_device_id es8388_of_match[] = { + { .compatible = "everest,es8388", }, + { } +}; +MODULE_DEVICE_TABLE(of, es8388_of_match); + +static struct acpi_device_id es8388_acpi_match[] = { + {"ESSX8388", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, es8388_acpi_match); + +static int es8388_i2c_probe(struct i2c_client *i2c) +{ + return es8388_probe(&i2c->dev, + devm_regmap_init_i2c(i2c, &es8388_regmap_config)); +} + +static struct i2c_driver es8388_i2c_driver = { + .driver = { + .name = "es8388", + .of_match_table = es8388_of_match, + .acpi_match_table = es8388_acpi_match, + }, + .probe = es8388_i2c_probe, + .id_table = es8388_id, +}; + +module_i2c_driver(es8388_i2c_driver); + +MODULE_DESCRIPTION("ASoC ES8388 driver"); +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/es8388.h b/sound/soc/codecs/es8388.h new file mode 100644 index 0000000000000..5858a71261fba --- /dev/null +++ b/sound/soc/codecs/es8388.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * es8388.h -- ES8388 ALSA SoC Audio driver + */ + +#ifndef _ES8388_H +#define _ES8388_H + +#include + +struct device; + +extern const struct regmap_config es8388_regmap_config; +int es8388_probe(struct device *dev, struct regmap *regmap); + +#define ES8388_DACLVOL 46 +#define ES8388_DACRVOL 47 +#define ES8388_DACCTL 28 +#define ES8388_RATEMASK (0x1f << 0) + +#define ES8388_CONTROL1 0x00 +#define ES8388_CONTROL1_VMIDSEL_OFF (0 << 0) +#define ES8388_CONTROL1_VMIDSEL_50k (1 << 0) +#define ES8388_CONTROL1_VMIDSEL_500k (2 << 0) +#define ES8388_CONTROL1_VMIDSEL_5k (3 << 0) +#define ES8388_CONTROL1_VMIDSEL_MASK (3 << 0) +#define ES8388_CONTROL1_ENREF (1 << 2) +#define ES8388_CONTROL1_SEQEN (1 << 3) +#define ES8388_CONTROL1_SAMEFS (1 << 4) +#define ES8388_CONTROL1_DACMCLK_ADC (0 << 5) +#define ES8388_CONTROL1_DACMCLK_DAC (1 << 5) +#define ES8388_CONTROL1_LRCM (1 << 6) +#define ES8388_CONTROL1_SCP_RESET (1 << 7) + +#define ES8388_CONTROL2 0x01 +#define ES8388_CONTROL2_VREF_BUF_OFF (1 << 0) +#define ES8388_CONTROL2_VREF_LOWPOWER (1 << 1) +#define ES8388_CONTROL2_IBIASGEN_OFF (1 << 2) +#define ES8388_CONTROL2_ANALOG_OFF (1 << 3) +#define ES8388_CONTROL2_VREF_BUF_LOWPOWER (1 << 4) +#define ES8388_CONTROL2_VCM_MOD_LOWPOWER (1 << 5) +#define ES8388_CONTROL2_OVERCURRENT_ON (1 << 6) +#define ES8388_CONTROL2_THERMAL_SHUTDOWN_ON (1 << 7) + +#define ES8388_CHIPPOWER 0x02 +#define ES8388_CHIPPOWER_DACVREF_OFF 0 +#define ES8388_CHIPPOWER_ADCVREF_OFF 1 +#define ES8388_CHIPPOWER_DACDLL_OFF 2 +#define ES8388_CHIPPOWER_ADCDLL_OFF 3 +#define ES8388_CHIPPOWER_DACSTM_RESET 4 +#define ES8388_CHIPPOWER_ADCSTM_RESET 5 +#define ES8388_CHIPPOWER_DACDIG_OFF 6 +#define ES8388_CHIPPOWER_ADCDIG_OFF 7 + +#define ES8388_ADCPOWER 0x03 +#define ES8388_ADCPOWER_INT1_LOWPOWER 0 +#define ES8388_ADCPOWER_FLASH_ADC_LOWPOWER 1 +#define ES8388_ADCPOWER_ADC_BIAS_GEN_OFF 2 +#define ES8388_ADCPOWER_MIC_BIAS_OFF 3 +#define ES8388_ADCPOWER_ADCR_OFF 4 +#define ES8388_ADCPOWER_ADCL_OFF 5 +#define ES8388_ADCPOWER_AINR_OFF 6 +#define ES8388_ADCPOWER_AINL_OFF 7 + +#define ES8388_DACPOWER 0x04 +#define ES8388_DACPOWER_OUT3_ON 0 +#define ES8388_DACPOWER_MONO_ON 1 +#define ES8388_DACPOWER_ROUT2_ON 2 +#define ES8388_DACPOWER_LOUT2_ON 3 +#define ES8388_DACPOWER_ROUT1_ON 4 +#define ES8388_DACPOWER_LOUT1_ON 5 +#define ES8388_DACPOWER_RDAC_OFF 6 +#define ES8388_DACPOWER_LDAC_OFF 7 + +#define ES8388_CHIPLOPOW1 0x05 +#define ES8388_CHIPLOPOW2 0x06 +#define ES8388_ANAVOLMANAG 0x07 + +#define ES8388_MASTERMODE 0x08 +#define ES8388_MASTERMODE_BCLKDIV (0 << 0) +#define ES8388_MASTERMODE_BCLK_INV (1 << 5) +#define ES8388_MASTERMODE_MCLKDIV2 (1 << 6) +#define ES8388_MASTERMODE_MSC (1 << 7) + +#define ES8388_ADCCONTROL1 0x09 +#define ES8388_ADCCONTROL2 0x0a +#define ES8388_ADCCONTROL3 0x0b + +#define ES8388_ADCCONTROL4 0x0c +#define ES8388_ADCCONTROL4_ADCFORMAT_MASK (3 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_I2S (0 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_LJUST (1 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_RJUST (2 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_PCM (3 << 0) +#define ES8388_ADCCONTROL4_ADCWL_SHIFT 2 +#define ES8388_ADCCONTROL4_ADCWL_MASK (7 << 2) +#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_NORMAL (0 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_INV (1 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK2 (0 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK1 (1 << 5) + +#define ES8388_ADCCONTROL5 0x0d +#define ES8388_ADCCONTROL5_RATEMASK (0x1f << 0) + +#define ES8388_ADCCONTROL6 0x0e + +#define ES8388_ADCCONTROL7 0x0f +#define ES8388_ADCCONTROL7_ADC_MUTE (1 << 2) +#define ES8388_ADCCONTROL7_ADC_LER (1 << 3) +#define ES8388_ADCCONTROL7_ADC_ZERO_CROSS (1 << 4) +#define ES8388_ADCCONTROL7_ADC_SOFT_RAMP (1 << 5) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_4 (0 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_8 (1 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_16 (2 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_32 (3 << 6) + +#define ES8388_ADCCONTROL8 0x10 +#define ES8388_ADCCONTROL9 0x11 +#define ES8388_ADCCONTROL10 0x12 +#define ES8388_ADCCONTROL11 0x13 +#define ES8388_ADCCONTROL12 0x14 +#define ES8388_ADCCONTROL13 0x15 +#define ES8388_ADCCONTROL14 0x16 + +#define ES8388_DACCONTROL1 0x17 +#define ES8388_DACCONTROL1_DACFORMAT_MASK (3 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_I2S (0 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_LJUST (1 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_RJUST (2 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_PCM (3 << 1) +#define ES8388_DACCONTROL1_DACWL_SHIFT 3 +#define ES8388_DACCONTROL1_DACWL_MASK (7 << 3) +#define ES8388_DACCONTROL1_DACLRP_I2S_POL_NORMAL (0 << 6) +#define ES8388_DACCONTROL1_DACLRP_I2S_POL_INV (1 << 6) +#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK2 (0 << 6) +#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK1 (1 << 6) +#define ES8388_DACCONTROL1_LRSWAP (1 << 7) + +#define ES8388_DACCONTROL2 0x18 +#define ES8388_DACCONTROL2_RATEMASK (0x1f << 0) +#define ES8388_DACCONTROL2_DOUBLESPEED (1 << 5) + +#define ES8388_DACCONTROL3 0x19 +#define ES8388_DACCONTROL3_AUTOMUTE (1 << 2) +#define ES8388_DACCONTROL3_DACMUTE (1 << 2) +#define ES8388_DACCONTROL3_LEFTGAINVOL (1 << 3) +#define ES8388_DACCONTROL3_DACZEROCROSS (1 << 4) +#define ES8388_DACCONTROL3_DACSOFTRAMP (1 << 5) +#define ES8388_DACCONTROL3_DACRAMPRATE (3 << 6) + +#define ES8388_LDACVOL 0x1a +#define ES8388_LDACVOL_MASK (0 << 0) +#define ES8388_LDACVOL_MAX (0xc0) + +#define ES8388_RDACVOL 0x1b +#define ES8388_RDACVOL_MASK (0 << 0) +#define ES8388_RDACVOL_MAX (0xc0) + +#define ES8388_DACVOL_MAX (0xc0) + +#define ES8388_DACCONTROL4 0x1a +#define ES8388_DACCONTROL5 0x1b + +#define ES8388_DACCONTROL6 0x1c +#define ES8388_DACCONTROL6_CLICKFREE (1 << 3) +#define ES8388_DACCONTROL6_DAC_INVR (1 << 4) +#define ES8388_DACCONTROL6_DAC_INVL (1 << 5) +#define ES8388_DACCONTROL6_DEEMPH_MASK (3 << 6) +#define ES8388_DACCONTROL6_DEEMPH_OFF (0 << 6) +#define ES8388_DACCONTROL6_DEEMPH_32k (1 << 6) +#define ES8388_DACCONTROL6_DEEMPH_44_1k (2 << 6) +#define ES8388_DACCONTROL6_DEEMPH_48k (3 << 6) + +#define ES8388_DACCONTROL7 0x1d +#define ES8388_DACCONTROL7_VPP_SCALE_3p5 (0 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_4p0 (1 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_3p0 (2 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_2p5 (3 << 0) +#define ES8388_DACCONTROL7_SHELVING_STRENGTH (1 << 2) /* In eights */ +#define ES8388_DACCONTROL7_MONO (1 << 5) +#define ES8388_DACCONTROL7_ZEROR (1 << 6) +#define ES8388_DACCONTROL7_ZEROL (1 << 7) + +/* Shelving filter */ +#define ES8388_DACCONTROL8 0x1e +#define ES8388_DACCONTROL9 0x1f +#define ES8388_DACCONTROL10 0x20 +#define ES8388_DACCONTROL11 0x21 +#define ES8388_DACCONTROL12 0x22 +#define ES8388_DACCONTROL13 0x23 +#define ES8388_DACCONTROL14 0x24 +#define ES8388_DACCONTROL15 0x25 + +#define ES8388_DACCONTROL16 0x26 +#define ES8388_DACCONTROL16_RMIXSEL_RIN1 (0 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RIN2 (1 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RIN3 (2 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RADC (3 << 0) +#define ES8388_DACCONTROL16_LMIXSEL_LIN1 (0 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LIN2 (1 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LIN3 (2 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LADC (3 << 3) + +#define ES8388_DACCONTROL17 0x27 +#define ES8388_DACCONTROL17_LI2LOVOL (7 << 3) +#define ES8388_DACCONTROL17_LI2LO (1 << 6) +#define ES8388_DACCONTROL17_LD2LO (1 << 7) + +#define ES8388_DACCONTROL18 0x28 +#define ES8388_DACCONTROL18_RI2LOVOL (7 << 3) +#define ES8388_DACCONTROL18_RI2LO (1 << 6) +#define ES8388_DACCONTROL18_RD2LO (1 << 7) + +#define ES8388_DACCONTROL19 0x29 +#define ES8388_DACCONTROL19_LI2ROVOL (7 << 3) +#define ES8388_DACCONTROL19_LI2RO (1 << 6) +#define ES8388_DACCONTROL19_LD2RO (1 << 7) + +#define ES8388_DACCONTROL20 0x2a +#define ES8388_DACCONTROL20_RI2ROVOL (7 << 3) +#define ES8388_DACCONTROL20_RI2RO (1 << 6) +#define ES8388_DACCONTROL20_RD2RO (1 << 7) + +#define ES8388_DACCONTROL21 0x2b +#define ES8388_DACCONTROL21_LI2MOVOL (7 << 3) +#define ES8388_DACCONTROL21_LI2MO (1 << 6) +#define ES8388_DACCONTROL21_LD2MO (1 << 7) + +#define ES8388_DACCONTROL22 0x2c +#define ES8388_DACCONTROL22_RI2MOVOL (7 << 3) +#define ES8388_DACCONTROL22_RI2MO (1 << 6) +#define ES8388_DACCONTROL22_RD2MO (1 << 7) + +#define ES8388_DACCONTROL23 0x2d +#define ES8388_DACCONTROL23_MOUTINV (1 << 1) +#define ES8388_DACCONTROL23_HPSWPOL (1 << 2) +#define ES8388_DACCONTROL23_HPSWEN (1 << 3) +#define ES8388_DACCONTROL23_VROI_1p5k (0 << 4) +#define ES8388_DACCONTROL23_VROI_40k (1 << 4) +#define ES8388_DACCONTROL23_OUT3_VREF (0 << 5) +#define ES8388_DACCONTROL23_OUT3_ROUT1 (1 << 5) +#define ES8388_DACCONTROL23_OUT3_MONOOUT (2 << 5) +#define ES8388_DACCONTROL23_OUT3_RIGHT_MIXER (3 << 5) +#define ES8388_DACCONTROL23_ROUT2INV (1 << 7) + +/* LOUT1 Amplifier */ +#define ES8388_LOUT1VOL 0x2e +#define ES8388_LOUT1VOL_MASK (0 << 5) +#define ES8388_LOUT1VOL_MAX (0x24) + +/* ROUT1 Amplifier */ +#define ES8388_ROUT1VOL 0x2f +#define ES8388_ROUT1VOL_MASK (0 << 5) +#define ES8388_ROUT1VOL_MAX (0x24) + +#define ES8388_OUT1VOL_MAX (0x24) + +/* LOUT2 Amplifier */ +#define ES8388_LOUT2VOL 0x30 +#define ES8388_LOUT2VOL_MASK (0 << 5) +#define ES8388_LOUT2VOL_MAX (0x24) + +/* ROUT2 Amplifier */ +#define ES8388_ROUT2VOL 0x31 +#define ES8388_ROUT2VOL_MASK (0 << 5) +#define ES8388_ROUT2VOL_MAX (0x24) + +#define ES8388_OUT2VOL_MAX (0x24) + +/* Mono Out Amplifier */ +#define ES8388_MONOOUTVOL 0x32 +#define ES8388_MONOOUTVOL_MASK (0 << 5) +#define ES8388_MONOOUTVOL_MAX (0x24) + +#define ES8388_DACCONTROL29 0x33 +#define ES8388_DACCONTROL30 0x34 + +#define ES8388_SYSCLK 0 + +#define ES8388_REG_MAX 0x35 + +#define ES8388_1536FS 1536 +#define ES8388_1024FS 1024 +#define ES8388_768FS 768 +#define ES8388_512FS 512 +#define ES8388_384FS 384 +#define ES8388_256FS 256 +#define ES8388_128FS 128 + +#endif diff --git a/sound/soc/phytium/Kconfig b/sound/soc/phytium/Kconfig new file mode 100644 index 0000000000000..b4fd9ae0b654b --- /dev/null +++ b/sound/soc/phytium/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +config SND_SOC_PHYTIUM_I2S + tristate "Phytium I2S Device Driver" + depends on ARCH_PHYTIUM + help + Say Y or M if you want to add support for I2S driver for + Phytium I2S device . The device supports 2 channels each + for play and record. + +config SND_PMDK_ES8388 + tristate "Phytium machine support with ES8388" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_ES8388 + help + Say Y if you want to add Phytium machine support for + ES8388 codecs. + +config SND_PMDK_ES8336 + tristate "Phytium machine support with ES8336" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_ES8336 + help + Say Y if you want to add Phytium machine support for + ES8336 codecs. + +config SND_PMDK_DP + tristate "Phytium machine support with DP" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_HDMI_CODEC + help + Say Y if you want to add Phytium machine support for + Displayport. diff --git a/sound/soc/phytium/Makefile b/sound/soc/phytium/Makefile new file mode 100644 index 0000000000000..45cd2ed9d420c --- /dev/null +++ b/sound/soc/phytium/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# PHYTIUM Platform Support + +snd-soc-phytium-i2s-objs :=phytium_i2s.o +obj-$(CONFIG_SND_SOC_PHYTIUM_I2S) += snd-soc-phytium-i2s.o + +snd-soc-pmdk-es8388-objs :=pmdk_es8388.o +obj-$(CONFIG_SND_PMDK_ES8388) += snd-soc-pmdk-es8388.o + +snd-soc-pmdk-es8336-objs :=pmdk_es8336.o +obj-$(CONFIG_SND_PMDK_ES8336) += snd-soc-pmdk-es8336.o + +snd-soc-pmdk-dp-objs :=pmdk_dp.o +obj-$(CONFIG_SND_PMDK_DP) += snd-soc-pmdk-dp.o diff --git a/sound/soc/phytium/local.h b/sound/soc/phytium/local.h new file mode 100644 index 0000000000000..47c3c6c8cfbb0 --- /dev/null +++ b/sound/soc/phytium/local.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_I2S_LOCAL_H +#define __PHYTIUM_I2S_LOCAL_H + +#include +#include +#include +#include + +/* I2S clk setting*/ +#define CLK_CFG0 0xc00 +#define CLK_CFG1 0xc04 + +/* Common register for all channel */ +#define I2S_IER 0x000 +#define IRER 0x004 +#define ITER 0x008 +#define CER 0x00C + +#define RXFFR 0x014 +#define TXFFR 0x018 + +/* Interrupt status register fields */ +#define ISR_TXFO BIT(5) +#define ISR_TXFE BIT(4) +#define ISR_RXFO BIT(1) +#define ISR_RXDA BIT(0) + +/* I2STxRxRegisters for all channels */ +#define LRBR_LTHR(x) (0x40 * x + 0x020) +#define RRBR_RTHR(x) (0x40 * x + 0x024) +#define RER(x) (0x40 * x + 0x028) + +#define RCR(x) (0x40 * x + 0x030) + +#define ISR(x) (0x40 * x + 0x038) +#define IMR(x) (0x40 * x + 0x03C) +#define ROR(x) (0x40 * x + 0x040) +#define TOR(x) (0x40 * x + 0x044) +#define RFCR(x) (0x40 * x + 0x048) +#define TFCR(x) (0x40 * x + 0x04C) +#define RFF(x) (0x40 * x + 0x050) +#define TFF(x) (0x40 * x + 0x054) + +/* Enable txd and rxd block channel0~3 */ +#define TER(x) (0x40 * x + 0x02C) +#define CCR 0x010 +#define TCR(x) (0x40 * x + 0x034) + +/* I2SCOMPRegisters */ +#define I2S_COMP_PARAM_2 0x01F0 +#define I2S_COMP_PARAM_1 0x01F4 +#define I2S_COMP_VERSION 0x01F8 +#define I2S_COMP_TYPE 0x01FC + +/* I2S AND DMA */ +#define DMA_GCAP 0x0024 + +#define DMA_CHAL_CONFG1 0x0028 + +#define DMA_CHAL_CONFG0 0x0004 +#define DMA_MASK_INT 0x000c +#define DMA_BDLPU(x) (0x40 * x + 0x0040) +#define DMA_BDLPL(x) (0x40 * x + 0x0044) +#define DMA_CHALX_DEV_ADDR(x) (0x40 * x + 0x0048) +#define DMA_CHALX_CBL(x) (0x40 * x + 0x0054) +#define DMA_CHALX_LVI(x) (0x40 * x + 0x004c) + +#define DMA_CHALX_DSIZE(x) (0x40 * x + 0x0064) +#define DMA_CHALX_DLENTH(x) (0x40 * x + 0x0068) +#define DMA_CHALX_CTL(x) (0x40 * x + 0x0058) + + +#define DMA_CTL 0x0000 + +#define DMA_LPIB(x) (0x40 * x + 0x0050) + +#define DMA_STS 0x0008 + +/* max number of fragments - we may use more if allocating more pages for BDL */ +#define BDL_SIZE 4096 +#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16) + +/* + * Component parameter register fields - define the I2S block's + * configuration. + */ +#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25) +#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22) +#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19) +#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16) +#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9) +#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7) +#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6) +#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5) +#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4) +#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2) +#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0) + +#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10) +#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7) +#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3) +#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0) + +/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */ +#define COMP_MAX_WORDSIZE (1 << 3) +#define COMP_MAX_DATA_WIDTH (1 << 2) + +#define MAX_CHANNEL_NUM 8 +#define MIN_CHANNEL_NUM 2 + +#define azx_bus(chip) (&(chip)->bus.core) +#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core) + +#define I2S_UNSOL_QUEUE_SIZE 64 +#define I2S_MAX_CODECS 8 /* limit by controller side */ + +#define azx_stream(dev) (&(dev)->core) + +struct i2s_clk_config_data { + int chan_nr; + u32 data_width; + u32 sample_rate; +}; + +struct i2sc_bus { + struct device *dev; + const struct i2s_bus_ops *ops; + const struct i2s_io_ops *io_ops; + const struct i2s_ext_bus_ops *ext_ops; + + /* h/w resources */ + unsigned long addr; + void __iomem *remap_addr; + int irq; + + /* codec linked list */ + struct list_head codec_list; + unsigned int num_codecs; + + unsigned int unsol_rp, unsol_wp; + struct work_struct unsol_work; + + struct snd_dma_buffer bdl0; + struct snd_dma_buffer bdl1; + + /* i2s_stream linked list */ + struct list_head stream_list; + + bool reverse_assign; /* assign devices in reverse order */ + + int bdl_pos_adj; /* BDL position adjustment */ + + /* locks */ + spinlock_t reg_lock; +}; + +struct i2s_bus { + struct i2sc_bus core; + + struct snd_card *card; + + struct pci_dev *pci; + + struct mutex prepare_mutex; +}; + + +/* + * i2s stream + */ +struct i2s_stream { + struct i2sc_bus *bus; + struct snd_dma_buffer bdl; /* BDL buffer */ + __le32 *posbuf; /* position buffer pointer */ + int direction; /* playback / capture (SNDRV_PCM_STREAM_*) */ + + unsigned int bufsize; /* size of the play buffer in bytes */ + unsigned int period_bytes; /* size of the period in bytes */ + unsigned int frags; /* number for period in the play buffer */ + unsigned int fifo_size; /* FIFO size */ + + void __iomem *sd_addr; /* stream descriptor pointer */ + + u32 sd_int_sta_mask; /* stream int status mask */ + + /* pcm support */ + struct snd_pcm_substream *substream; /* assigned substream, + * set in PCM open + */ + unsigned int format_val; /* format value to be set in the + * controller and the codec + */ + unsigned char stream_tag; /* assigned stream */ + unsigned char index; /* stream index */ + int assigned_key; /* last device# key assigned to */ + + bool opened; + bool running; + bool prepared; + bool no_period_wakeup; + + int delay_negative_threshold; + + struct list_head list; + +}; + +struct azx_dev { + struct i2s_stream core; + unsigned int irq_pending:1; +}; + +/* PCM setup */ +static inline struct azx_dev *get_azx_dev(struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +#define AZX_MAX_CODECS HDA_MAX_CODECS +#define AZX_DEFAULT_CODECS 4 + +#define stream_to_azx_dev(s) container_of(s, struct azx_dev, core) + +struct azx; + +struct i2s_controller_ops { + int (*substream_alloc_pages)(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size); + int (*substream_free_pages)(struct azx *chip, + struct snd_pcm_substream *substream); + int (*position_check)(struct azx *chip, struct azx_dev *azx_dev); +}; + +struct i2s_io_ops { + int (*dma_alloc_pages)(struct i2sc_bus *bus, int type, size_t size, + struct snd_dma_buffer *buf); + void (*dma_free_pages)(struct i2sc_bus *bus, + struct snd_dma_buffer *buf); +}; + +struct azx { + struct i2s_bus bus; + + struct snd_card *card; + struct pci_dev *pci; + int dev_index; + + int playback_streams; + int playback_index_offset; + int capture_streams; + int capture_index_offset; + int num_streams; + + /* Register interaction. */ + const struct i2s_controller_ops *ops; + + /* locks */ + struct mutex open_mutex; /* Prevents concurrent open/close operations */ + + /* PCM */ + struct list_head pcm_list; /* azx_pcm list */ + + /* flags */ + int bdl_pos_adj; + unsigned int running:1; + unsigned int region_requested:1; + unsigned int disabled:1; +}; + +struct i2s_phytium { + struct azx chip; + struct snd_pcm_substream *substream; + struct device *dev; + struct device *pdev; + u32 paddr; + void __iomem *regs; + void __iomem *regs_db; + int irq_id; + + /* for pending irqs */ + struct work_struct irq_pending_work; + + /* sync probing */ + struct completion probe_wait; + struct work_struct probe_work; + + /* extra flags */ + unsigned int pcie:1; + unsigned int irq_pending_warned:1; + unsigned int probe_continued:1; + unsigned int i2s_dp:1; + + unsigned int i2s_reg_comp1; + unsigned int i2s_reg_comp2; + struct clk *clk; + unsigned int capability; + unsigned int quirks; + u32 fifo_th; + int active; + u32 xfer_resolution; + u32 ccr; + u32 clk_base; + u32 cfg; + struct i2s_clk_config_data config; + + /*azx_dev*/ + struct i2s_stream core; +}; + +#define azx_alloc_stream_pages(chip) \ + snd_i2s_bus_alloc_stream_pages(azx_bus(chip)) + +#endif diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c new file mode 100644 index 0000000000000..b68d8f4349706 --- /dev/null +++ b/sound/soc/phytium/phytium_i2s.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S ASoC driver + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + * + * Derived from sound/soc/dwc/dwc-i2s.c + * Copyright (C) 2010 ST Microelectronics + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "local.h" + +#define NUM_CAPTURE 1 +#define NUM_PLAYBACK 1 + +#define PHYTIUM_I2S_PLAY (1 << 0) +#define PHYTIUM_I2S_RECORD (1 << 1) +#define PHYTIUM_I2S_SLAVE (1 << 2) +#define PHYTIUM_I2S_MASTER (1 << 3) + +#define PHYTIUM_I2S_QUIRK_16BIT_IDX_OVERRIDE (1 << 2) + +#define TWO_CHANNEL_SUPPORT 2 /* up to 2.0 */ +#define FOUR_CHANNEL_SUPPORT 4 /* up to 3.1 */ +#define SIX_CHANNEL_SUPPORT 6 /* up to 5.1 */ +#define EIGHT_CHANNEL_SUPPORT 8 /* up to 7.1 */ + +struct pdata_px210_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val) +{ + writel(val, io_base + reg); +} + +static inline u32 i2s_read_reg(void __iomem *io_base, int reg) +{ + return readl(io_base + reg); +} + +static inline void i2s_disable_channels(struct i2s_phytium *dev, u32 stream) +{ + u32 i = 0; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) + i2s_write_reg(dev->regs, TER(i), 0); + } else { + for (i = 0; i < 4; i++) + i2s_write_reg(dev->regs, RER(i), 0); + } +} + +static int substream_free_pages(struct azx *chip, + struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_pages(substream); +} + +static void stream_update(struct i2sc_bus *bus, struct i2s_stream *s) +{ + struct azx *chip = bus_to_azx(bus); + + struct azx_dev *azx_dev = stream_to_azx_dev(s); + + /* check whether this IRQ is really acceptable */ + if (!chip->ops->position_check || + chip->ops->position_check(chip, azx_dev)) { + spin_unlock(&bus->reg_lock); + snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); + spin_lock(&bus->reg_lock); + } +} + +static int snd_i2s_bus_handle_stream_irq(struct i2sc_bus *bus, unsigned int status, + void (*ack)(struct i2sc_bus *, struct i2s_stream *)) +{ + struct i2s_stream *azx_dev; + u32 sd_status, qc_sd_status; + int handled = 0; + + list_for_each_entry(azx_dev, &bus->stream_list, list) { + if (status & azx_dev->sd_int_sta_mask) { + sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); + qc_sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); + handled |= 1 << azx_dev->index; + azx_dev->running = 1; + if (!azx_dev->substream || !azx_dev->running || + !(sd_status & 0xffffffff)) { + continue; + } + if (ack) + ack(bus, azx_dev); + } + } + + return handled; +} + +static irqreturn_t azx_i2s_interrupt(int irq, void *dev_id) +{ + struct azx *chip = dev_id; + struct i2sc_bus *bus = azx_bus(chip); + u32 status; + bool active, handled = false; + int repeat = 0; /* count for avoiding endless loop */ + + spin_lock(&bus->reg_lock); + + if (chip->disabled) + goto unlock; + + do { + status = i2s_read_reg(bus->remap_addr, DMA_STS); + + if (status == 0) + break; + + handled = true; + active = false; + if (snd_i2s_bus_handle_stream_irq(bus, status, stream_update)) + active = true; + } while (active && ++repeat < 1); + + unlock: + spin_unlock(&bus->reg_lock); + return IRQ_RETVAL(handled); +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect) +{ + struct i2sc_bus *bus = azx_bus(chip); + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + int err; + + err = devm_request_irq(i2s->dev, i2s->irq_id, azx_i2s_interrupt, IRQF_SHARED, + "phytium i2s", chip); + + if (err < 0) { + dev_err(i2s->dev, "failed to request irq\n"); + return err; + } + + bus->irq = i2s->irq_id; + + return 0; +} + +static void i2s_start(struct i2s_phytium *dev, + struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, ITER, 1); + else + i2s_write_reg(dev->regs, IRER, 1); + + /*enable the clock*/ + i2s_write_reg(dev->regs, CER, 1); + + /*enable the i2s*/ + i2s_write_reg(dev->regs, I2S_IER, 1); +} + +static void i2s_stop(struct i2s_phytium *dev, struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, ITER, 0); + else + i2s_write_reg(dev->regs, IRER, 0); + + if (!dev->active) { + i2s_write_reg(dev->regs, CER, 0); + i2s_write_reg(dev->regs, I2S_IER, 0); + } +} + +static void phytium_i2s_config(struct i2s_phytium *dev, int stream) +{ + i2s_disable_channels(dev, stream); + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(dev->regs, TCR(0), dev->xfer_resolution); + i2s_write_reg(dev->regs, TER(0), 1); + } else { + i2s_write_reg(dev->regs, RCR(0), dev->xfer_resolution); + i2s_write_reg(dev->regs, RER(0), 1); + } +} + +static int phytium_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + struct i2s_clk_config_data *config = &dev->config; + u64 fix, point; + u32 cfg = 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + config->data_width = 16; + dev->ccr = 0x00; + dev->xfer_resolution = 0x02; + break; + + case SNDRV_PCM_FORMAT_S24_LE: + config->data_width = 24; + dev->ccr = 0x08; + dev->xfer_resolution = 0x04; + break; + + case SNDRV_PCM_FORMAT_S32_LE: + config->data_width = 32; + dev->ccr = 0x10; + dev->xfer_resolution = 0x05; + break; + + default: + dev_err(dev->dev, "phytium-i2s: unsupported PCM fmt"); + return -EINVAL; + } + + config->chan_nr = params_channels(params); + + switch (config->chan_nr) { + case EIGHT_CHANNEL_SUPPORT: + case SIX_CHANNEL_SUPPORT: + case FOUR_CHANNEL_SUPPORT: + case TWO_CHANNEL_SUPPORT: + break; + default: + dev_err(dev->dev, "channel not supported\n"); + return -EINVAL; + } + + phytium_i2s_config(dev, substream->stream); + + i2s_write_reg(dev->regs, CCR, dev->ccr); + + config->sample_rate = params_rate(params); + if (dev->capability & PHYTIUM_I2S_MASTER) { + fix = dev->clk_base / config->sample_rate / config->data_width / 32; + point = ((dev->clk_base / config->sample_rate) << 10) / config->data_width / 32; + point = (point - (fix << 10)) * 10; + cfg = ((u16) fix << 16) | (u16) point; + i2s_write_reg(dev->regs, CLK_CFG0, cfg); + i2s_write_reg(dev->regs, CLK_CFG1, 0xf); + } + dev->cfg = cfg; + return 0; +} + +static int phytium_i2s_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, TXFFR, 1); + else + i2s_write_reg(dev->regs, RXFFR, 1); + + return 0; +} + +static int phytium_i2s_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + int ret = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dev->active++; + i2s_start(dev, substream); + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dev->active--; + i2s_stop(dev, substream); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int phytium_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(cpu_dai); + int ret = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_BC_FC: + if (dev->capability & PHYTIUM_I2S_SLAVE) + ret = 0; + else + ret = -EINVAL; + break; + case SND_SOC_DAIFMT_BP_FP: + if (dev->capability & PHYTIUM_I2S_MASTER) + ret = 0; + else + ret = -EINVAL; + break; + case SND_SOC_DAIFMT_CBP_CFC: + case SND_SOC_DAIFMT_CBC_CFP: + ret = -EINVAL; + break; + default: + dev_dbg(dev->dev, "phytium/i2s: Invalid master/slave format\n"); + ret = -EINVAL; + break; + } + return ret; +} + +static const struct snd_soc_dai_ops phytium_i2s_dai_ops = { + .hw_params = phytium_i2s_hw_params, + .prepare = phytium_i2s_prepare, + .trigger = phytium_i2s_trigger, + .set_fmt = phytium_i2s_set_fmt, +}; + +#ifdef CONFIG_PM +static int phytium_i2s_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int phytium_i2s_resume(struct snd_soc_component *component) +{ + struct i2s_phytium *dev = snd_soc_component_get_drvdata(component); + struct snd_soc_dai *dai; + + for_each_component_dais(component, dai) { + if (snd_soc_dai_stream_active(dai, SNDRV_PCM_STREAM_PLAYBACK)) + phytium_i2s_config(dev, SNDRV_PCM_STREAM_PLAYBACK); + if (snd_soc_dai_stream_active(dai, SNDRV_PCM_STREAM_CAPTURE)) + phytium_i2s_config(dev, SNDRV_PCM_STREAM_CAPTURE); + } + i2s_write_reg(dev->regs, CLK_CFG0, dev->cfg); + i2s_write_reg(dev->regs, CLK_CFG1, 0xf); + return 0; +} +#else +#define phytium_i2s_suspend NULL +#define phytium_i2s_resume NULL +#endif + +static struct snd_soc_dai_driver phytium_i2s_dai = { + .playback = { + .stream_name = "i2s-Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .capture = { + .stream_name = "i2s-Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .ops = &phytium_i2s_dai_ops, + .symmetric_rate = 1, +}; + +static const struct snd_pcm_hardware phytium_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + .rates = SNDRV_PCM_RATE_8000 | + SNDRV_PCM_RATE_32000 | + SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 8000, + .rate_max = 48000, + .formats = (SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 2, + .channels_max = 2, + .buffer_bytes_max = 4096*16, + .period_bytes_min = 1024, + .period_bytes_max = 4096*4, + .periods_min = 2, + .periods_max = 16, + .fifo_size = 16, +}; + +static struct i2s_stream *snd_i2s_stream_assign(struct i2sc_bus *bus, + struct snd_pcm_substream *substream) +{ + struct i2s_stream *azx_dev; + struct i2s_stream *res = NULL; + + /* make a non-zero unique key for the substream */ + int key = (substream->pcm->device << 16) | (substream->number << 2) | + (substream->stream + 1); + + list_for_each_entry(azx_dev, &bus->stream_list, list) { + if (azx_dev->direction != substream->stream) + continue; + + azx_dev->opened = 0; + + if (azx_dev->assigned_key == key) { + res = azx_dev; + break; + } + + if (!res || bus->reverse_assign) + res = azx_dev; + } + + if (res) { + spin_lock_irq(&bus->reg_lock); + res->opened = 1; + res->running = 0; + res->assigned_key = key; + res->substream = substream; + spin_unlock_irq(&bus->reg_lock); + } + + return res; +} + +/* assign a stream for the PCM */ +static inline struct azx_dev * +azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) +{ + struct i2s_stream *s; + + s = snd_i2s_stream_assign(azx_bus(chip), substream); + if (!s) + return NULL; + return stream_to_azx_dev(s); +} + +static int phytium_pcm_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev; + struct snd_pcm_runtime *runtime = substream->runtime; + + azx_dev = azx_assign_device(chip, substream); + if (azx_dev == NULL) + return -EBUSY; + + snd_soc_set_runtime_hwparams(substream, &phytium_pcm_hardware); + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128); + runtime->private_data = azx_dev; + + return 0; +} + +static int phytium_pcm_close(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + + mutex_lock(&chip->open_mutex); + azx_stream(azx_dev)->opened = 0; + azx_stream(azx_dev)->running = 0; + azx_stream(azx_dev)->substream = NULL; + + mutex_unlock(&chip->open_mutex); + return 0; +} + +static int phytium_pcm_new(struct snd_soc_component *component, + struct snd_soc_pcm_runtime *rtd) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + size_t size = phytium_pcm_hardware.buffer_bytes_max; + + snd_pcm_set_managed_buffer_all(rtd->pcm, + SNDRV_DMA_TYPE_DEV, + dev->pdev, size, size); + + return 0; +} + +static const struct i2s_io_ops axi_i2s_io_ops; +static const struct i2s_controller_ops axi_i2s_ops; + +static int phytium_pcm_hw_params(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + int ret; + + azx_dev->core.bufsize = 0; + azx_dev->core.period_bytes = 0; + azx_dev->core.format_val = 0; + + switch (params_format(hw_params)) { + case SNDRV_PCM_FORMAT_S16_LE: + azx_dev->core.format_val = 2; + break; + + case SNDRV_PCM_FORMAT_S24_LE: + azx_dev->core.format_val = 0; + break; + + case SNDRV_PCM_FORMAT_S32_LE: + azx_dev->core.format_val = 0; + break; + + default: + dev_err(dev->dev, "phytium-i2s: unsupported PCM fmt"); + return -EINVAL; + } + + + ret = chip->ops->substream_alloc_pages(chip, substream, + params_buffer_bytes(hw_params)); + + return ret; +} +/* + * set up a BDL entry + */ +static int setup_bdle(struct i2sc_bus *bus, + struct snd_dma_buffer *dmab, + struct i2s_stream *azx_dev, __le32 **bdlp, + int ofs, int size, int with_ioc) +{ + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + __le32 *bdl = *bdlp; + + dmab->addr = runtime->dma_addr; + while (size > 0) { + dma_addr_t addr; + int chunk; + + if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES) + return -EINVAL; + + addr = snd_sgbuf_get_addr(dmab, ofs); + + /* program the address field of the BDL entry */ + bdl[0] = cpu_to_le32((u32)addr); + + bdl[1] = cpu_to_le32(upper_32_bits(addr)); + + /* program the size field of the BDL entry */ + chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); + + bdl[2] = cpu_to_le32(chunk); + + /* program the IOC to enable interrupt + * only when the whole fragment is processed + */ + size -= chunk; + bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); + + bdl += 4; + azx_dev->frags++; + ofs += chunk; + } + *bdlp = bdl; + return ofs; +} + +static int snd_i2s_stream_setup_periods(struct i2s_stream *azx_dev) +{ + struct i2sc_bus *bus = azx_dev->bus; + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + __le32 *bdl; + int i, ofs, periods, period_bytes; + int pos_adj, pos_align; + + period_bytes = azx_dev->period_bytes; + periods = azx_dev->bufsize / period_bytes; + + /* program the initial BDL entries */ + bdl = (__le32 *)azx_dev->bdl.area; + + ofs = 0; + azx_dev->frags = 0; + + pos_adj = bus->bdl_pos_adj; + + if (!azx_dev->no_period_wakeup && pos_adj > 0) { + + pos_align = pos_adj; + pos_adj = (pos_adj * runtime->rate + 47999) / 48000; + + if (!pos_adj) + pos_adj = pos_align; + else + pos_adj = ((pos_adj + pos_align - 1) / pos_align) * + pos_align; + + pos_adj = frames_to_bytes(runtime, pos_adj); + if (pos_adj >= period_bytes) { + dev_warn(bus->dev, "Too big adjustment %d\n", + pos_adj); + pos_adj = 0; + } else { + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, + &bdl, ofs, pos_adj, true); + if (ofs < 0) + goto error; + } + } else { + pos_adj = 0; + } + + for (i = 0; i < periods; i++) { + if (i == periods - 1 && pos_adj) + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, &bdl, ofs, + period_bytes - pos_adj, 0); + else + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, &bdl, ofs, + period_bytes, + !azx_dev->no_period_wakeup); + if (ofs < 0) + goto error; + } + + return 0; + error: + dev_err(bus->dev, "Too many BDL entries: buffer=%d, period=%d\n", + azx_dev->bufsize, period_bytes); + return -EINVAL; +} + +static int snd_i2s_stream_set_params(struct i2s_stream *azx_dev, + unsigned int format_val) +{ + unsigned int bufsize, period_bytes; + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime; + int err; + + if (!substream) + return -EINVAL; + + runtime = substream->runtime; + bufsize = snd_pcm_lib_buffer_bytes(substream); + period_bytes = snd_pcm_lib_period_bytes(substream); + if (bufsize != azx_dev->bufsize || + period_bytes != azx_dev->period_bytes || + runtime->no_period_wakeup != azx_dev->no_period_wakeup) { + azx_dev->bufsize = bufsize; + azx_dev->period_bytes = period_bytes; + azx_dev->no_period_wakeup = runtime->no_period_wakeup; + err = snd_i2s_stream_setup_periods(azx_dev); + if (err < 0) + return err; + } + + return 0; +} + +static int snd_i2s_stream_setup(struct i2s_stream *azx_dev, int pcie, u32 paddr) +{ + struct snd_pcm_runtime *runtime; + + if (azx_dev->substream) + runtime = azx_dev->substream->runtime; + else + runtime = NULL; + + i2s_write_reg(azx_dev->sd_addr, DMA_CHAL_CONFG0, 0x8081); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, 0x80000003); + + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), upper_32_bits(azx_dev->bdl.addr)); + if (pcie) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), 0x1c8); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), paddr + 0x1c8); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(1), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(1), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(1), azx_dev->format_val);//0x2 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(1), 0x0);//0x0 + } else { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), upper_32_bits(azx_dev->bdl.addr)); + if (pcie) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), 0x1c0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), paddr + 0x1c0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(0), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(0), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(0), azx_dev->format_val << 2);//0x8 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(0), 0x0); + } + + if (runtime && runtime->period_size > 64) + azx_dev->delay_negative_threshold = + -frames_to_bytes(runtime, 64); + else + azx_dev->delay_negative_threshold = 0; + + return 0; +} + +static int phytium_pcm_prepare(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + struct i2sc_bus *bus = azx_bus(chip); + struct i2s_stream *hstr_p; + int err; + + dev->substream = substream; + azx_dev->core.substream = substream; + azx_dev->core.sd_addr = dev->regs_db; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + azx_dev->core.bdl.area = bus->bdl0.area; + azx_dev->core.bdl.addr = bus->bdl0.addr; + } else { + azx_dev->core.bdl.area = bus->bdl1.area; + azx_dev->core.bdl.addr = bus->bdl1.addr; + } + + if (!substream) + return -EINVAL; + + hstr_p = azx_stream(azx_dev); + hstr_p->direction = substream->stream; + + err = snd_i2s_stream_set_params(azx_stream(azx_dev), 0); + if (err < 0) + goto unlock; + + snd_i2s_stream_setup(azx_stream(azx_dev), dev->pcie, dev->paddr); + + unlock: + if (!err) + azx_stream(azx_dev)->prepared = 1; + + return err; +} + +static void snd_i2s_stream_clear(struct i2s_stream *azx_dev) +{ + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x0); + + azx_dev->running = false; +} + +static void snd_i2s_stream_stop(struct i2s_stream *azx_dev) +{ + snd_i2s_stream_clear(azx_dev); +} + +static void snd_i2s_stream_start(struct i2s_stream *azx_dev, bool fresh_start) +{ + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x1); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x5); + + azx_dev->running = true; +} + +static int phytium_pcm_trigger(struct snd_soc_component *component, + struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + struct azx *chip = &dev->chip; + struct i2sc_bus *bus = azx_bus(chip); + struct azx_dev *azx_dev = get_azx_dev(substream); + struct snd_pcm_substream *s; + struct i2s_stream *hstr; + bool start; + int sbits = 0; + + hstr = azx_stream(azx_dev); + hstr->direction = substream->stream; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + case SNDRV_PCM_TRIGGER_RESUME: + start = true; + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_STOP: + start = false; + break; + default: + return -EINVAL; + } + + snd_pcm_group_for_each_entry(s, substream) { + if (s->pcm->card != substream->pcm->card) + continue; + azx_dev = get_azx_dev(s); + sbits |= 1 << azx_dev->core.index; + snd_pcm_trigger_done(s, substream); + } + + spin_lock(&bus->reg_lock); + + snd_pcm_group_for_each_entry(s, substream) { + if (s->pcm->card != substream->pcm->card) + continue; + azx_dev = get_azx_dev(s); + if (start) + snd_i2s_stream_start(azx_stream(azx_dev), true); + else + snd_i2s_stream_stop(azx_stream(azx_dev)); + } + + i2s_write_reg(dev->regs_db, DMA_CTL, 0x1); + spin_unlock(&bus->reg_lock); + + return 0; +} + +static void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) +{ + int cnt = 10; + u32 mask; + + if (azx_dev->sd_addr) { + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); + mask &= ~BIT(1); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); + } else { + mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); + mask &= ~BIT(0); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); + } + } +} + +static int phytium_pcm_hw_free(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + struct azx *chip = &dev->chip; + struct i2s_stream *hstr_p; + struct azx_dev *azx_dev = get_azx_dev(substream); + int err; + + hstr_p = azx_stream(azx_dev); + hstr_p->direction = substream->stream; + snd_i2s_stream_cleanup(azx_stream(azx_dev)); + + err = chip->ops->substream_free_pages(chip, substream); + azx_stream(azx_dev)->prepared = 0; + + return err; +} + +static snd_pcm_uframes_t phytium_pcm_pointer(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); + u32 pos = 0; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + pos = i2s_read_reg(dev->regs_db, DMA_LPIB(1)); + else + pos = i2s_read_reg(dev->regs_db, DMA_LPIB(0)); + + return bytes_to_frames(substream->runtime, pos); +} + +static const struct snd_soc_component_driver phytium_i2s_component = { + .name = "phytium-i2s", + .pcm_construct = phytium_pcm_new, + .open = phytium_pcm_open, + .close = phytium_pcm_close, + .hw_params = phytium_pcm_hw_params, + .hw_free = phytium_pcm_hw_free, + .prepare = phytium_pcm_prepare, + .trigger = phytium_pcm_trigger, + .pointer = phytium_pcm_pointer, + .suspend = phytium_i2s_suspend, + .resume = phytium_i2s_resume, + .legacy_dai_naming = 1, +}; + +/* Maximum bit resolution of a channel - not uniformly spaced */ +static const u32 fifo_width[COMP_MAX_WORDSIZE] = { + 12, 16, 20, 24, 32, 0, 0, 0 +}; + +/* Width of (DMA) bus */ +static const u32 __maybe_unused bus_widths[COMP_MAX_DATA_WIDTH] = { + DMA_SLAVE_BUSWIDTH_1_BYTE, + DMA_SLAVE_BUSWIDTH_2_BYTES, + DMA_SLAVE_BUSWIDTH_4_BYTES, + DMA_SLAVE_BUSWIDTH_UNDEFINED +}; + +/* PCM format to support channel resolution */ +static const u32 __maybe_unused formats[COMP_MAX_WORDSIZE] = { + SNDRV_PCM_FMTBIT_S16_LE, + SNDRV_PCM_FMTBIT_S16_LE, + SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_FMTBIT_S32_LE, + 0, + 0, + 0 +}; + +static int phytium_configure_dai(struct i2s_phytium *dev) +{ + u32 comp1 = i2s_read_reg(dev->regs, dev->i2s_reg_comp1); + u32 comp2 = i2s_read_reg(dev->regs, dev->i2s_reg_comp2); + u32 fifo_depth = 1 << (1 + COMP1_FIFO_DEPTH_GLOBAL(comp1)); + u32 idx; + + if (COMP1_TX_ENABLED(comp1)) { + dev_dbg(dev->dev, " phytium: play supported\n"); + idx = COMP1_TX_WORDSIZE_0(comp1); + if (WARN_ON(idx >= ARRAY_SIZE(formats))) + return -EINVAL; + } + + if (COMP1_RX_ENABLED(comp1)) { + dev_dbg(dev->dev, "phytium: record supported\n"); + idx = COMP2_RX_WORDSIZE_0(comp2); + if (WARN_ON(idx >= ARRAY_SIZE(formats))) + return -EINVAL; + if (dev->quirks & PHYTIUM_I2S_QUIRK_16BIT_IDX_OVERRIDE) + idx = 1; + } + + if (COMP1_MODE_EN(comp1)) { + dev_dbg(dev->dev, "phytium: i2s master mode supported\n"); + dev->capability |= PHYTIUM_I2S_MASTER; + } else { + dev_dbg(dev->dev, "phytium: i2s slave mode supported\n"); + dev->capability |= PHYTIUM_I2S_SLAVE; + } + + dev->fifo_th = fifo_depth / 2; + return 0; +} + +static int phytium_configure_dai_by_dt(struct i2s_phytium *dev) +{ + u32 comp1 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_1); + u32 comp2 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_2); + u32 idx = COMP1_APB_DATA_WIDTH(comp1); + u32 idx2; + int ret; + + if (WARN_ON(idx >= ARRAY_SIZE(bus_widths))) + return -EINVAL; + + ret = phytium_configure_dai(dev); + if (ret < 0) + return ret; + + if (COMP1_TX_ENABLED(comp1)) { + idx2 = COMP1_TX_WORDSIZE_0(comp1); + dev->capability |= PHYTIUM_I2S_PLAY; + } + if (COMP1_RX_ENABLED(comp1)) { + idx2 = COMP2_RX_WORDSIZE_0(comp2); + dev->capability |= PHYTIUM_I2S_RECORD; + } + + return 0; +} + +static int phytium_i2s_dma_alloc_pages(struct i2sc_bus *bus, int type, size_t size, + struct snd_dma_buffer *buf) +{ + int err; + + err = snd_dma_alloc_pages(type, bus->dev, size, buf); + if (err < 0) + return err; + + return 0; +} + +static int snd_i2s_bus_alloc_stream_pages(struct i2sc_bus *bus) +{ + struct i2s_stream *s; + int num_streams = 0; + int err; + + list_for_each_entry(s, &bus->stream_list, list) { + /* allocate memory for the BDL for each stream */ + err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, + BDL_SIZE, &s->bdl); + if (num_streams == 0) { + bus->bdl0.addr = s->bdl.addr; + bus->bdl0.area = s->bdl.area; + } else { + bus->bdl1.addr = s->bdl.addr; + bus->bdl1.area = s->bdl.area; + } + num_streams++; + if (err < 0) + return -ENOMEM; + } + + if (WARN_ON(!num_streams)) + return -EINVAL; + + return 0; +} + +static int stream_direction(struct azx *chip, unsigned char index) +{ + if (index >= chip->playback_index_offset && + index < chip->playback_index_offset + chip->playback_streams) + return SNDRV_PCM_STREAM_PLAYBACK; + return SNDRV_PCM_STREAM_CAPTURE; +} + +static void snd_i2s_stream_init(struct i2sc_bus *bus, struct i2s_stream *azx_dev, + int idx, int direction, int tag) +{ + azx_dev->bus = bus; + azx_dev->sd_addr = bus->remap_addr; + + if (idx == 0) + azx_dev->sd_int_sta_mask = 1 << 8; + else + azx_dev->sd_int_sta_mask = 1; + + azx_dev->index = idx; + azx_dev->direction = direction; + azx_dev->stream_tag = tag; + + list_add_tail(&azx_dev->list, &bus->stream_list); +} + +static int azx_i2s_init_streams(struct azx *chip) +{ + int i; + + for (i = 0; i < chip->num_streams; i++) { + struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); + int dir, tag; + + if (!azx_dev) + return -ENOMEM; + + dir = stream_direction(chip, i); + + tag = i + 1; + + snd_i2s_stream_init(azx_bus(chip), azx_stream(azx_dev), i, dir, tag); + } + + return 0; +} + +static int azx_first_init(struct azx *chip) +{ + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + struct platform_device *pdev = to_platform_device(i2s->dev); + struct device *i2sdev = i2s->dev; + struct i2sc_bus *bus = azx_bus(chip); + struct resource *res; + int err; + unsigned int dma_bits = 64; + + chip->region_requested = 1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bus->addr = res->start; + bus->remap_addr = i2s->regs_db; + bus->dev = i2s->pdev; + + if (bus->remap_addr == NULL) { + dev_err(i2sdev, "ioremap error\n"); + return -ENXIO; + } + + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + + synchronize_irq(bus->irq); + + spin_lock_init(&bus->reg_lock); + + if (!dma_set_mask(i2sdev, DMA_BIT_MASK(dma_bits))) { + err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(dma_bits)); + } else { + err = dma_set_mask(i2sdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(32)); + } + + chip->playback_streams = NUM_PLAYBACK; + chip->capture_streams = NUM_CAPTURE; + + chip->playback_index_offset = 0; + chip->capture_index_offset = chip->playback_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + err = azx_i2s_init_streams(chip); + if (err < 0) + return err; + + err = azx_alloc_stream_pages(chip); + if (err < 0) + return err; + + return 0; +} + +static int azx_probe_continue(struct azx *chip) +{ + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + int err; + + i2s->probe_continued = 1; + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + + chip->running = 1; + +out_free: + return err; +} + +static void azx_probe_work(struct work_struct *work) +{ + struct i2s_phytium *i2s = container_of(work, struct i2s_phytium, probe_work); + + azx_probe_continue(&i2s->chip); +} + +static int azx_i2s_bus_init(struct azx *chip, + const struct i2s_io_ops *io_ops) +{ + struct i2s_bus *bus = &chip->bus; + + bus->core.io_ops = io_ops; + + INIT_LIST_HEAD(&bus->core.stream_list); + bus->card = chip->card; + mutex_init(&bus->prepare_mutex); + bus->pci = chip->pci; + + bus->core.bdl_pos_adj = chip->bdl_pos_adj; + return 0; +} + +static int i2s_phytium_create(struct platform_device *pdev, + int dev, struct azx **rchip, struct i2s_phytium *i2s) +{ + struct azx *chip; + int err; + + *rchip = NULL; + + if (!i2s) + return -ENOMEM; + chip = &i2s->chip; + + mutex_init(&chip->open_mutex); + + chip->ops = &axi_i2s_ops; + chip->dev_index = dev; + + INIT_LIST_HEAD(&chip->pcm_list); + init_completion(&i2s->probe_wait); + + chip->bdl_pos_adj = 32; + err = azx_i2s_bus_init(chip, &axi_i2s_io_ops); + if (err < 0) { + kfree(i2s); + return err; + } + + INIT_WORK(&i2s->probe_work, azx_probe_work); + *rchip = chip; + return 0; +} + +static int substream_alloc_pages(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size) +{ + int ret; + + ret = snd_pcm_lib_malloc_pages(substream, size); + if (ret < 0) + return ret; + + return 0; +} + +static void phytium_i2s_dma_free_pages(struct i2sc_bus *bus, + struct snd_dma_buffer *buf) +{ + snd_dma_free_pages(buf); +} + +static const struct i2s_io_ops axi_i2s_io_ops = { + .dma_alloc_pages = phytium_i2s_dma_alloc_pages, + .dma_free_pages = phytium_i2s_dma_free_pages, +}; + +static const struct i2s_controller_ops axi_i2s_ops = { + .substream_alloc_pages = substream_alloc_pages, + .substream_free_pages = substream_free_pages, +}; + +static int phytium_i2s_probe(struct platform_device *pdev) +{ + struct i2s_phytium *i2s; + struct azx *chip; + struct resource *res; + struct pdata_px210_mfd *pdata; + struct snd_soc_dai_driver *dai_drv; + struct clk *clk; + int err, ret; + int card_num = 1; + bool schedule_probe; + struct fwnode_handle *np; + + i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); + if (!i2s) + return -ENOMEM; + + dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL); + if (!dai_drv) + return -ENOMEM; + memcpy(dai_drv, &phytium_i2s_dai, sizeof(phytium_i2s_dai)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2s->paddr = res->start; + i2s->regs = devm_ioremap_resource(&pdev->dev, res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + i2s->regs_db = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(i2s->regs)) + return PTR_ERR(i2s->regs); + + i2s->irq_id = platform_get_irq(pdev, 0); + + if (i2s->irq_id < 0) + return i2s->irq_id; + + i2s->i2s_reg_comp1 = I2S_COMP_PARAM_1; + i2s->i2s_reg_comp2 = I2S_COMP_PARAM_2; + + ret = phytium_configure_dai_by_dt(i2s); + if (ret < 0) + return ret; + + err = i2s_phytium_create(pdev, card_num, &chip, i2s); + if (err < 0) + return err; + i2s = container_of(chip, struct i2s_phytium, chip); + schedule_probe = !chip->disabled; + + dev_set_drvdata(&pdev->dev, i2s); + + pdata = dev_get_platdata(&pdev->dev); + i2s->dev = &pdev->dev; + if (pdata) { + dai_drv->name = pdata->name; + i2s->pdev = pdata->dev; + i2s->clk_base = pdata->clk_base; + i2s->pcie = 1; + } else if (pdev->dev.of_node) { + device_property_read_string(&pdev->dev, "dai-name", &dai_drv->name); + i2s->pdev = &pdev->dev; + clk = devm_clk_get(&pdev->dev, NULL); + i2s->clk_base = clk_get_rate(clk); + } else if (has_acpi_companion(&pdev->dev)) { + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_string(np, "dai-name", &dai_drv->name); + if (ret < 0) { + dev_err(&pdev->dev, "missing dai-name property from acpi\n"); + goto failed_get_dai_name; + } + + i2s->pdev = &pdev->dev; + ret = fwnode_property_read_u32(np, "i2s_clk", &i2s->clk_base); + if (ret < 0) { + dev_err(&pdev->dev, "missing i2s_clk property from acpi\n"); + goto failed_get_dai_name; + } + } + + ret = devm_snd_soc_register_component(&pdev->dev, &phytium_i2s_component, + dai_drv, 1); + if (ret != 0) + dev_err(&pdev->dev, "not able to register dai\n"); + + if (schedule_probe) + schedule_work(&i2s->probe_work); + + if (chip->disabled) + complete_all(&i2s->probe_wait); + + return 0; + +failed_get_dai_name: + return ret; +} + +static void phytium_i2s_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); +} + +static const struct of_device_id phytium_i2s_of_match[] = { + { .compatible = "phytium,i2s", }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_i2s_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_i2s_acpi_match[] = { + { "PHYT0016", 0 }, + { } +}; +#else +#define phytium_i2s_acpi_match NULL +#endif + +static struct platform_driver phytium_i2s_driver = { + .probe = phytium_i2s_probe, + .remove = phytium_i2s_remove, + .driver = { + .name = "phytium-i2s", + .of_match_table = of_match_ptr(phytium_i2s_of_match), + .acpi_match_table = phytium_i2s_acpi_match, + }, +}; + +module_platform_driver(phytium_i2s_driver); + +MODULE_DESCRIPTION("Phytium I2S Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhang Yiqun "); diff --git a/sound/soc/phytium/pmdk_dp.c b/sound/soc/phytium/pmdk_dp.c new file mode 100644 index 0000000000000..74488df3d21ab --- /dev/null +++ b/sound/soc/phytium/pmdk_dp.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include + +struct pmdk_dp_private { + struct snd_soc_jack jack0; + struct snd_soc_jack jack1; + struct snd_soc_jack jack2; +}; + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_dp_dapm_widgets[] = { + SND_SOC_DAPM_LINE("DP", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("DP"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_dp_audio_map[] = { + {"DP", NULL, "TX"}, +}; + +static struct snd_soc_jack_pin dp0_pins[] = { + { + .pin = "DP/HDMI 0", + .mask = SND_JACK_LINEOUT, + }, +}; + +static struct snd_soc_jack_pin dp1_pins[] = { + { + .pin = "DP/HDMI 1", + .mask = SND_JACK_LINEOUT, + }, +}; + +static struct snd_soc_jack_pin dp2_pins[] = { + { + .pin = "DP/HDMI 2", + .mask = SND_JACK_LINEOUT, + }, +}; + +#define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_BC_FC) + +static int pmdk_dp0_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = snd_soc_rtd_to_codec(runtime, 0)->component; + int ret; + + ret = snd_soc_card_jack_new_pins(card, "DP/HDMI 0", + SND_JACK_LINEOUT, &priv->jack0, + dp0_pins, ARRAY_SIZE(dp0_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack0, NULL); + return ret; +} + +static int pmdk_dp1_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = snd_soc_rtd_to_codec(runtime, 0)->component; + int ret; + + ret = snd_soc_card_jack_new_pins(card, "DP/HDMI 1", + SND_JACK_LINEOUT, &priv->jack1, + dp1_pins, ARRAY_SIZE(dp1_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack1, NULL); + return ret; +} + +static int pmdk_dp2_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = snd_soc_rtd_to_codec(runtime, 0)->component; + int ret; + + ret = snd_soc_card_jack_new_pins(card, "DP/HDMI 2", + SND_JACK_LINEOUT, &priv->jack2, + dp2_pins, ARRAY_SIZE(dp2_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack2, NULL); + return ret; +} + +SND_SOC_DAILINK_DEFS(pmdk_dp0_dai, + DAILINK_COMP_ARRAY(COMP_CPU("phytium-i2s-dp0")), + DAILINK_COMP_ARRAY(COMP_CODEC("hdmi-audio-codec.1346918656", "i2s-hifi")), + DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); + +SND_SOC_DAILINK_DEFS(pmdk_dp1_dai, + DAILINK_COMP_ARRAY(COMP_CPU("phytium-i2s-dp1")), + DAILINK_COMP_ARRAY(COMP_CODEC("hdmi-audio-codec.1346918657", "i2s-hifi")), + DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); + +SND_SOC_DAILINK_DEFS(pmdk_dp2_dai, + DAILINK_COMP_ARRAY(COMP_CPU("phytium-i2s-dp2")), + DAILINK_COMP_ARRAY(COMP_CODEC("hdmi-audio-codec.1346918658", "i2s-hifi")), + DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); + +static struct snd_soc_dai_link pmdk_dai0 = { + .name = "Phytium dp0-audio", + .stream_name = "Playback", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp0_init, + SND_SOC_DAILINK_REG(pmdk_dp0_dai), +}; + +static struct snd_soc_dai_link pmdk_dai1 = { + .name = "Phytium dp1-audio", + .stream_name = "Playback", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp1_init, + SND_SOC_DAILINK_REG(pmdk_dp1_dai), +}; + +static struct snd_soc_dai_link pmdk_dai2 = { + .name = "Phytium dp2-audio", + .stream_name = "Playback", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp2_init, + SND_SOC_DAILINK_REG(pmdk_dp2_dai), +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + + .dapm_widgets = pmdk_dp_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_dp_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_dp_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_dp_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct pmdk_dp_private *priv; + struct snd_soc_dai_link *pmdk_dai; + int num_dp = 2; + + card->dev = &pdev->dev; + device_property_read_u32(&pdev->dev, "num-dp", &num_dp); + pmdk_dai = devm_kzalloc(&pdev->dev, num_dp * sizeof(*pmdk_dai), GFP_KERNEL); + if (!pmdk_dai) + return -ENOMEM; + + switch (num_dp) { + case 1: + pmdk_dai[0] = pmdk_dai0; + break; + case 2: + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai1; + break; + case 3: + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai1; + pmdk_dai[2] = pmdk_dai2; + break; + default: + return -EINVAL; + } + + card->dai_link = pmdk_dai; + card->num_links = num_dp; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + snd_soc_card_set_drvdata(card, priv); + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct of_device_id pmdk_sound_of_match[] = { + { .compatible = "phytium,pmdk-dp",}, + { } +}; +MODULE_DEVICE_TABLE(of, pmdk_sound_of_match); + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8006", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_dp", + .acpi_match_table = pmdk_sound_acpi_match, + .of_match_table = pmdk_sound_of_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); + +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("ALSA SoC PMDK DP"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/phytium/pmdk_es8336.c b/sound/soc/phytium/pmdk_es8336.c new file mode 100644 index 0000000000000..fb0bcc034b10c --- /dev/null +++ b/sound/soc/phytium/pmdk_es8336.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2023, Phytium Techonology Co., Ltd. + */ + +#include +#include +#include +#include + + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_es8336_dapm_widgets[] = { + SND_SOC_DAPM_HP("HP", NULL), + SND_SOC_DAPM_MIC("Int Mic", NULL), + SND_SOC_DAPM_MIC("Mic In", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("HP"), + SOC_DAPM_PIN_SWITCH("Int Mic"), + SOC_DAPM_PIN_SWITCH("Mic In"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_es8336_audio_map[] = { + {"DMIC", NULL, "Int Mic"}, + {"MIC1", NULL, "Mic In"}, + {"MIC2", NULL, "Mic In"}, + + {"HP", NULL, "HPOL"}, + {"HP", NULL, "HPOR"}, +}; + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBC_CFC) + +SND_SOC_DAILINK_DEFS(pmdk_es8366, + DAILINK_COMP_ARRAY(COMP_CPU("phytium-i2s-lsd")), + DAILINK_COMP_ARRAY(COMP_CODEC("i2c-ESSX8336:00", "es8336-hifi")), + DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); + +static struct snd_soc_dai_link pmdk_dai[] = { + { + .name = "ES8336 HIFI", + .stream_name = "ES8336 HIFI", + .dai_fmt = PMDK_DAI_FMT, + SND_SOC_DAILINK_REG(pmdk_es8366), + }, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + .dai_link = pmdk_dai, + .num_links = ARRAY_SIZE(pmdk_dai), + + .dapm_widgets = pmdk_es8336_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_es8336_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_es8336_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_es8336_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct device *dev = &pdev->dev; + + card->dev = dev; + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8005", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_es8336", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("ALSA SoC PMDK ES8336"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/phytium/pmdk_es8388.c b/sound/soc/phytium/pmdk_es8388.c new file mode 100644 index 0000000000000..b408226dca928 --- /dev/null +++ b/sound/soc/phytium/pmdk_es8388.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2023, Phytium Techonology Co., Ltd. + */ + +#include +#include +#include +#include +#include + +static struct snd_soc_jack hs_jack; + +/* Headset jack detection DAPM pins */ +static struct snd_soc_jack_pin hs_jack_pins[] = { + { + .pin = "FrontIn", + .mask = SND_JACK_MICROPHONE, + }, + { + .pin = "RearIn", + .mask = SND_JACK_MICROPHONE, + .invert = 1 + }, + { + .pin = "Front", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Rear", + .mask = SND_JACK_HEADPHONE, + .invert = 1 + }, +}; + +/* Headset jack detection gpios */ +static struct snd_soc_jack_gpio hs_jack_gpios[] = { + { + .name = "det", + .report = SND_JACK_HEADSET, + .debounce_time = 200, + .invert = 1, + }, +}; + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_es8388_dapm_widgets[] = { + SND_SOC_DAPM_HP("Front", NULL), + SND_SOC_DAPM_HP("Rear", NULL), + + SND_SOC_DAPM_MIC("FrontIn", NULL), + SND_SOC_DAPM_MIC("RearIn", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("Front"), + SOC_DAPM_PIN_SWITCH("Rear"), + SOC_DAPM_PIN_SWITCH("FrontIn"), + SOC_DAPM_PIN_SWITCH("RearIn"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_es8388_audio_map[] = { + {"LINPUT1", NULL, "FrontIn"}, + {"RINPUT1", NULL, "FrontIn"}, + + {"LINPUT2", NULL, "RearIn"}, + {"RINPUT2", NULL, "RearIn"}, + + {"Front", NULL, "LOUT1"}, + {"Front", NULL, "ROUT1"}, + + {"Rear", NULL, "LOUT2"}, + {"Rear", NULL, "ROUT2"}, +}; + +static int pmdk_es8388_init(struct snd_soc_pcm_runtime *rtd) +{ + int ret; + + /* Jack detection API stuff */ + ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack", + SND_JACK_HEADSET, &hs_jack, + hs_jack_pins, ARRAY_SIZE(hs_jack_pins)); + if (ret) + goto err; + + ret = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios), + hs_jack_gpios); + if (ret) + goto err; + + return 0; + +err: + return ret; +} + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBC_CFC) + +SND_SOC_DAILINK_DEFS(pmdk_es8388, + DAILINK_COMP_ARRAY(COMP_CPU("phytium-i2s-lsd")), + DAILINK_COMP_ARRAY(COMP_CODEC("i2c-ESSX8388:00", "es8388-hifi")), + DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); + +static struct snd_soc_dai_link pmdk_dai[] = { + { + .name = "ES8388 HIFI", + .stream_name = "ES8388 HIFI", + .dai_fmt = PMDK_DAI_FMT, + .init = pmdk_es8388_init, + SND_SOC_DAILINK_REG(pmdk_es8388), + }, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + .dai_link = pmdk_dai, + .num_links = ARRAY_SIZE(pmdk_dai), + + .dapm_widgets = pmdk_es8388_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_es8388_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_es8388_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_es8388_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct device *dev = &pdev->dev; + int n; + + card->dev = dev; + hs_jack_gpios[0].gpiod_dev = dev; + n = gpiod_count(dev, "det"); + + if (n < 0) + pmdk_dai[0].init = NULL; + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8004", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_es8388", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); + +MODULE_AUTHOR("Zhang Yiqun"); +MODULE_DESCRIPTION("ALSA SoC PMDK ES8388"); +MODULE_LICENSE("GPL"); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b7a0ae2a7b205..bad73626a5902 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -5567,7 +5567,7 @@ static struct miscdevice kvm_dev = { }; #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING -bool enable_virt_at_load = true; +bool enable_virt_at_load = false; module_param(enable_virt_at_load, bool, 0444); EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);