diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index d94f36400ad9c..f12556b9d4b88 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -530,6 +530,13 @@ config I2C_CADENCE Say yes here to select Cadence I2C Host Controller. This controller is e.g. used by Xilinx Zynq. +config I2C_CIX + tristate "Cadence I2C Controller for Cix" + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST + help + Say yes here to select Cadence I2C Host Controller on Cix platform. + This controller is e.g. used by Cix sky1. + config I2C_CBUS_GPIO tristate "CBUS I2C driver" depends on GPIOLIB || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 2149e9aad00a5..28866a70d2e08 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o obj-$(CONFIG_I2C_BCM_IPROC) += i2c-bcm-iproc.o obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o +obj-$(CONFIG_I2C_CIX) += i2c-cix.o obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o diff --git a/drivers/i2c/busses/i2c-cix.c b/drivers/i2c/busses/i2c-cix.c new file mode 100644 index 0000000000000..9d0a6a3c19981 --- /dev/null +++ b/drivers/i2c/busses/i2c-cix.c @@ -0,0 +1,1578 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * I2C bus driver for the Cadence I2C controller. + * + * Copyright (C) 2009 - 2014 Xilinx, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register offsets for the I2C device. */ +#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */ +#define CDNS_I2C_SR_OFFSET 0x04 /* Status Register, RO */ +#define CDNS_I2C_ADDR_OFFSET 0x08 /* I2C Address Register, RW */ +#define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */ +#define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */ +#define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */ +#define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */ +#define CDNS_I2C_IMR_OFFSET 0x20 /* IRQ Mask Register, RO */ +#define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */ +#define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */ + +/* Control Register Bit mask definitions */ +#define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */ +#define CDNS_I2C_CR_ACK_EN BIT(3) +#define CDNS_I2C_CR_NEA BIT(2) +#define CDNS_I2C_CR_MS BIT(1) +/* Read or Write Master transfer 0 = Transmitter, 1 = Receiver */ +#define CDNS_I2C_CR_RW BIT(0) +/* 1 = Auto init FIFO to zeroes */ +#define CDNS_I2C_CR_CLR_FIFO BIT(6) +#define CDNS_I2C_CR_DIVA_SHIFT 14 +#define CDNS_I2C_CR_DIVA_MASK (3 << CDNS_I2C_CR_DIVA_SHIFT) +#define CDNS_I2C_CR_DIVB_SHIFT 8 +#define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT) + +#define CDNS_I2C_CR_MASTER_EN_MASK (CDNS_I2C_CR_NEA | \ + CDNS_I2C_CR_ACK_EN | \ + CDNS_I2C_CR_MS) + +#define CDNS_I2C_CR_SLAVE_EN_MASK ~CDNS_I2C_CR_MASTER_EN_MASK + +/* Status Register Bit mask definitions */ +#define CDNS_I2C_SR_BA BIT(8) +#define CDNS_I2C_SR_TXDV BIT(6) +#define CDNS_I2C_SR_RXDV BIT(5) +#define CDNS_I2C_SR_RXRW BIT(3) + +/* + * I2C Address Register Bit mask definitions + * Normal addressing mode uses [6:0] bits. Extended addressing mode uses [9:0] + * bits. A write access to this register always initiates a transfer if the I2C + * is in master mode. + */ +#define CDNS_I2C_ADDR_MASK 0x000003FF /* I2C Address Mask */ + +/* + * I2C Interrupt Registers Bit mask definitions + * All the four interrupt registers (Status/Mask/Enable/Disable) have the same + * bit definitions. + */ +#define CDNS_I2C_IXR_ARB_LOST BIT(9) +#define CDNS_I2C_IXR_RX_UNF BIT(7) +#define CDNS_I2C_IXR_TX_OVF BIT(6) +#define CDNS_I2C_IXR_RX_OVF BIT(5) +#define CDNS_I2C_IXR_SLV_RDY BIT(4) +#define CDNS_I2C_IXR_TO BIT(3) +#define CDNS_I2C_IXR_NACK BIT(2) +#define CDNS_I2C_IXR_DATA BIT(1) +#define CDNS_I2C_IXR_COMP BIT(0) + +#define CDNS_I2C_IXR_ALL_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \ + CDNS_I2C_IXR_RX_UNF | \ + CDNS_I2C_IXR_TX_OVF | \ + CDNS_I2C_IXR_RX_OVF | \ + CDNS_I2C_IXR_SLV_RDY | \ + CDNS_I2C_IXR_TO | \ + CDNS_I2C_IXR_NACK | \ + CDNS_I2C_IXR_DATA | \ + CDNS_I2C_IXR_COMP) + +#define CDNS_I2C_IXR_ERR_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \ + CDNS_I2C_IXR_RX_UNF | \ + CDNS_I2C_IXR_TX_OVF | \ + CDNS_I2C_IXR_RX_OVF | \ + CDNS_I2C_IXR_NACK) + +#define CDNS_I2C_ENABLED_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \ + CDNS_I2C_IXR_RX_UNF | \ + CDNS_I2C_IXR_TX_OVF | \ + CDNS_I2C_IXR_RX_OVF | \ + CDNS_I2C_IXR_NACK | \ + CDNS_I2C_IXR_DATA | \ + CDNS_I2C_IXR_COMP) + +#define CDNS_I2C_IXR_SLAVE_INTR_MASK (CDNS_I2C_IXR_RX_UNF | \ + CDNS_I2C_IXR_TX_OVF | \ + CDNS_I2C_IXR_RX_OVF | \ + CDNS_I2C_IXR_TO | \ + CDNS_I2C_IXR_NACK | \ + CDNS_I2C_IXR_DATA | \ + CDNS_I2C_IXR_COMP) + +#define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000) +/* timeout for pm runtime autosuspend */ +#define CNDS_I2C_PM_TIMEOUT 1000 /* ms */ + +#define CDNS_I2C_FIFO_DEPTH_DEFAULT 16 +#define CDNS_I2C_MAX_TRANSFER_SIZE 255 +/* Transfer size in multiples of data interrupt depth */ +#define CDNS_I2C_TRANSFER_SIZE(max) ((max) - 3) + +#define DRIVER_NAME "cix-i2c" + +#define CDNS_I2C_DIVA_MAX 4 +#define CDNS_I2C_DIVB_MAX 64 + +#define CDNS_I2C_TIMEOUT_MAX 0xFF + +#define CDNS_I2C_BROKEN_HOLD_BIT BIT(0) +#define CDNS_I2C_POLL_US 100000 +#define CDNS_I2C_TIMEOUT_US 500000 + +#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) +#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset) + +#if IS_ENABLED(CONFIG_I2C_SLAVE) +/** + * enum cdns_i2c_mode - I2C Controller current operating mode + * + * @CDNS_I2C_MODE_SLAVE: I2C controller operating in slave mode + * @CDNS_I2C_MODE_MASTER: I2C Controller operating in master mode + */ +enum cdns_i2c_mode { + CDNS_I2C_MODE_SLAVE, + CDNS_I2C_MODE_MASTER, +}; + +/** + * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode + * + * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle + * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master + * @CDNS_I2C_SLAVE_STATE_RECV: I2C slave receiving data from master + */ +enum cdns_i2c_slave_state { + CDNS_I2C_SLAVE_STATE_IDLE, + CDNS_I2C_SLAVE_STATE_SEND, + CDNS_I2C_SLAVE_STATE_RECV, +}; +#endif + +/** + * struct cdns_i2c - I2C device private data structure + * + * @dev: Pointer to device structure + * @membase: Base address of the I2C device + * @adap: I2C adapter instance + * @p_msg: Message pointer + * @err_status: Error status in Interrupt Status Register + * @xfer_done: Transfer complete status + * @p_send_buf: Pointer to transmit buffer + * @p_recv_buf: Pointer to receive buffer + * @send_count: Number of bytes still expected to send + * @recv_count: Number of bytes still expected to receive + * @curr_recv_count: Number of bytes to be received in current transfer + * @input_clk: Input clock to I2C controller + * @i2c_clk: Maximum I2C clock speed + * @bus_hold_flag: Flag used in repeated start for clearing HOLD bit + * @clk: Pointer to struct clk + * @clk_rate_change_nb: Notifier block for clock rate changes + * @reset: Reset control for the device + * @quirks: flag for broken hold bit usage in r1p10 + * @ctrl_reg: Cached value of the control register. + * @rinfo: I2C GPIO recovery information + * @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register + * @slave: Registered slave instance. + * @dev_mode: I2C operating role(master/slave). + * @slave_state: I2C Slave state(idle/read/write). + * @fifo_depth: The depth of the transfer FIFO + * @transfer_size: The maximum number of bytes in one transfer + */ +struct cdns_i2c { + struct device *dev; + void __iomem *membase; + struct i2c_adapter adap; + struct i2c_msg *p_msg; + int err_status; + struct completion xfer_done; + unsigned char *p_send_buf; + unsigned char *p_recv_buf; + unsigned int send_count; + unsigned int recv_count; + unsigned int curr_recv_count; + unsigned long input_clk; + unsigned int i2c_clk; + unsigned int bus_hold_flag; + struct clk *clk; + struct notifier_block clk_rate_change_nb; + struct reset_control *reset; + u32 quirks; + u32 ctrl_reg; + struct i2c_bus_recovery_info rinfo; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + u16 ctrl_reg_diva_divb; + struct i2c_client *slave; + enum cdns_i2c_mode dev_mode; + enum cdns_i2c_slave_state slave_state; +#endif + u32 fifo_depth; + unsigned int transfer_size; +}; + +struct cdns_platform_data { + u32 quirks; +}; + +#define to_cdns_i2c(_nb) container_of(_nb, struct cdns_i2c, \ + clk_rate_change_nb) + +/** + * cdns_i2c_clear_bus_hold - Clear bus hold bit + * @id: Pointer to driver data struct + * + * Helper to clear the controller's bus hold bit. + */ +static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id) +{ + u32 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + + if (reg & CDNS_I2C_CR_HOLD) + cdns_i2c_writereg(reg & ~CDNS_I2C_CR_HOLD, CDNS_I2C_CR_OFFSET); +} + +static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround) +{ + return (hold_wrkaround && + (id->curr_recv_count == id->fifo_depth + 1)); +} + +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static void cdns_i2c_set_mode(enum cdns_i2c_mode mode, struct cdns_i2c *id) +{ + /* Disable all interrupts */ + cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET); + + /* Clear FIFO and transfer size */ + cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET); + + /* Update device mode and state */ + id->dev_mode = mode; + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + + switch (mode) { + case CDNS_I2C_MODE_MASTER: + /* Enable i2c master */ + cdns_i2c_writereg(id->ctrl_reg_diva_divb | + CDNS_I2C_CR_MASTER_EN_MASK, + CDNS_I2C_CR_OFFSET); + /* + * This delay is needed to give the IP some time to switch to + * the master mode. With lower values(like 110 us) i2cdetect + * will not detect any slave and without this delay, the IP will + * trigger a timeout interrupt. + */ + usleep_range(115, 125); + break; + case CDNS_I2C_MODE_SLAVE: + /* Enable i2c slave */ + cdns_i2c_writereg(id->ctrl_reg_diva_divb & + CDNS_I2C_CR_SLAVE_EN_MASK, + CDNS_I2C_CR_OFFSET); + + /* Setting slave address */ + cdns_i2c_writereg(id->slave->addr & CDNS_I2C_ADDR_MASK, + CDNS_I2C_ADDR_OFFSET); + + /* Enable slave send/receive interrupts */ + cdns_i2c_writereg(CDNS_I2C_IXR_SLAVE_INTR_MASK, + CDNS_I2C_IER_OFFSET); + break; + } +} + +static void cdns_i2c_slave_rcv_data(struct cdns_i2c *id) +{ + u8 bytes; + unsigned char data; + + /* Prepare backend for data reception */ + if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) { + id->slave_state = CDNS_I2C_SLAVE_STATE_RECV; + i2c_slave_event(id->slave, I2C_SLAVE_WRITE_REQUESTED, NULL); + } + + /* Fetch number of bytes to receive */ + bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + + /* Read data and send to backend */ + while (bytes--) { + data = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + i2c_slave_event(id->slave, I2C_SLAVE_WRITE_RECEIVED, &data); + } +} + +static void cdns_i2c_slave_send_data(struct cdns_i2c *id) +{ + u8 data; + + /* Prepare backend for data transmission */ + if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) { + id->slave_state = CDNS_I2C_SLAVE_STATE_SEND; + i2c_slave_event(id->slave, I2C_SLAVE_READ_REQUESTED, &data); + } else { + i2c_slave_event(id->slave, I2C_SLAVE_READ_PROCESSED, &data); + } + + /* Send data over bus */ + cdns_i2c_writereg(data, CDNS_I2C_DATA_OFFSET); +} + +/** + * cdns_i2c_slave_isr - Interrupt handler for the I2C device in slave role + * @ptr: Pointer to I2C device private data + * + * This function handles the data interrupt and transfer complete interrupt of + * the I2C device in slave role. + * + * Return: IRQ_HANDLED always + */ +static irqreturn_t cdns_i2c_slave_isr(void *ptr) +{ + struct cdns_i2c *id = ptr; + unsigned int isr_status, i2c_status; + + /* Fetch the interrupt status */ + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); + + /* Ignore masked interrupts */ + isr_status &= ~cdns_i2c_readreg(CDNS_I2C_IMR_OFFSET); + + /* Fetch transfer mode (send/receive) */ + i2c_status = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET); + + /* Handle data send/receive */ + if (i2c_status & CDNS_I2C_SR_RXRW) { + /* Send data to master */ + if (isr_status & CDNS_I2C_IXR_DATA) + cdns_i2c_slave_send_data(id); + + if (isr_status & CDNS_I2C_IXR_COMP) { + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); + } + } else { + /* Receive data from master */ + if (isr_status & CDNS_I2C_IXR_DATA) + cdns_i2c_slave_rcv_data(id); + + if (isr_status & CDNS_I2C_IXR_COMP) { + cdns_i2c_slave_rcv_data(id); + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); + } + } + + /* Master indicated xfer stop or fifo underflow/overflow */ + if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_RX_OVF | + CDNS_I2C_IXR_RX_UNF | CDNS_I2C_IXR_TX_OVF)) { + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); + cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET); + } + + return IRQ_HANDLED; +} +#endif + +/** + * cdns_i2c_master_isr - Interrupt handler for the I2C device in master role + * @ptr: Pointer to I2C device private data + * + * This function handles the data interrupt, transfer complete interrupt and + * the error interrupts of the I2C device in master role. + * + * Return: IRQ_HANDLED always + */ +static irqreturn_t cdns_i2c_master_isr(void *ptr) +{ + unsigned int isr_status, avail_bytes; + unsigned int bytes_to_send; + bool updatetx; + struct cdns_i2c *id = ptr; + /* Signal completion only after everything is updated */ + int done_flag = 0; + irqreturn_t status = IRQ_NONE; + + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); + id->err_status = 0; + + /* Handling nack and arbitration lost interrupt */ + if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) { + done_flag = 1; + status = IRQ_HANDLED; + } + + /* + * Check if transfer size register needs to be updated again for a + * large data receive operation. + */ + updatetx = id->recv_count > id->curr_recv_count; + + /* When receiving, handle data interrupt and completion interrupt */ + if (id->p_recv_buf && + ((isr_status & CDNS_I2C_IXR_COMP) || + (isr_status & CDNS_I2C_IXR_DATA))) { + /* Read data if receive data valid is set */ + while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & + CDNS_I2C_SR_RXDV) { + if (id->recv_count > 0) { + *(id->p_recv_buf)++ = + cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + id->recv_count--; + id->curr_recv_count--; + + /* + * Clear hold bit that was set for FIFO control + * if RX data left is less than or equal to + * FIFO DEPTH unless repeated start is selected + */ + if (id->recv_count <= id->fifo_depth && + !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + + } else { + dev_err(id->adap.dev.parent, + "xfer_size reg rollover. xfer aborted!\n"); + id->err_status |= CDNS_I2C_IXR_TO; + break; + } + + if (cdns_is_holdquirk(id, updatetx)) + break; + } + + /* + * The controller sends NACK to the slave when transfer size + * register reaches zero without considering the HOLD bit. + * This workaround is implemented for large data transfers to + * maintain transfer size non-zero while performing a large + * receive operation. + */ + if (cdns_is_holdquirk(id, updatetx)) { + /* wait while fifo is full */ + while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != + (id->curr_recv_count - id->fifo_depth)) + ; + + /* + * Check number of bytes to be received against maximum + * transfer size and update register accordingly. + */ + if (((int)(id->recv_count) - id->fifo_depth) > + id->transfer_size) { + cdns_i2c_writereg(id->transfer_size, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->transfer_size + + id->fifo_depth; + } else { + cdns_i2c_writereg(id->recv_count - + id->fifo_depth, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->recv_count; + } + } + + /* Clear hold (if not repeated start) and signal completion */ + if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) { + if (!id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + done_flag = 1; + } + + status = IRQ_HANDLED; + } + + /* When sending, handle transfer complete interrupt */ + if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) { + /* + * If there is more data to be sent, calculate the + * space available in FIFO and fill with that many bytes. + */ + if (id->send_count) { + avail_bytes = id->fifo_depth - + cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + if (id->send_count > avail_bytes) + bytes_to_send = avail_bytes; + else + bytes_to_send = id->send_count; + + while (bytes_to_send--) { + cdns_i2c_writereg( + (*(id->p_send_buf)++), + CDNS_I2C_DATA_OFFSET); + id->send_count--; + } + } else { + /* + * Signal the completion of transaction and + * clear the hold bus bit if there are no + * further messages to be processed. + */ + done_flag = 1; + } + if (!id->send_count && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + + status = IRQ_HANDLED; + } + + /* Update the status for errors */ + id->err_status |= isr_status & CDNS_I2C_IXR_ERR_INTR_MASK; + if (id->err_status) + status = IRQ_HANDLED; + + if (done_flag) + complete(&id->xfer_done); + + return status; +} + +/** + * cdns_i2c_isr - Interrupt handler for the I2C device + * @irq: irq number for the I2C device + * @ptr: void pointer to cdns_i2c structure + * + * This function passes the control to slave/master based on current role of + * i2c controller. + * + * Return: IRQ_HANDLED always + */ +static irqreturn_t cdns_i2c_isr(int irq, void *ptr) +{ +#if IS_ENABLED(CONFIG_I2C_SLAVE) + struct cdns_i2c *id = ptr; + + if (id->dev_mode == CDNS_I2C_MODE_SLAVE) + return cdns_i2c_slave_isr(ptr); +#endif + return cdns_i2c_master_isr(ptr); +} + +/** + * cdns_i2c_mrecv - Prepare and start a master receive operation + * @id: pointer to the i2c device structure + */ +static void cdns_i2c_mrecv(struct cdns_i2c *id) +{ + unsigned int ctrl_reg; + unsigned int isr_status; + unsigned long flags; + bool hold_clear = false; + bool irq_save = false; + + u32 addr; + + id->p_recv_buf = id->p_msg->buf; + id->recv_count = id->p_msg->len; + + /* Put the controller in master receive mode and clear the FIFO */ + ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO; + + /* + * Receive up to I2C_SMBUS_BLOCK_MAX data bytes, plus one message length + * byte, plus one checksum byte if PEC is enabled. p_msg->len will be 2 if + * PEC is enabled, otherwise 1. + */ + if (id->p_msg->flags & I2C_M_RECV_LEN) + id->recv_count = I2C_SMBUS_BLOCK_MAX + id->p_msg->len; + + id->curr_recv_count = id->recv_count; + + /* + * Check for the message size against FIFO depth and set the + * 'hold bus' bit if it is greater than FIFO depth. + */ + if (id->recv_count > id->fifo_depth) + ctrl_reg |= CDNS_I2C_CR_HOLD; + + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + + /* Clear the interrupts in interrupt status register */ + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); + + /* + * The no. of bytes to receive is checked against the limit of + * max transfer size. Set transfer size register with no of bytes + * receive if it is less than transfer size and transfer size if + * it is more. Enable the interrupts. + */ + if (id->recv_count > id->transfer_size) { + cdns_i2c_writereg(id->transfer_size, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->transfer_size; + } else { + cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); + } + + /* Determine hold_clear based on number of bytes to receive and hold flag */ + if (!id->bus_hold_flag && id->recv_count <= id->fifo_depth) { + if (ctrl_reg & CDNS_I2C_CR_HOLD) { + hold_clear = true; + if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) + irq_save = true; + } + } + + addr = id->p_msg->addr; + addr &= CDNS_I2C_ADDR_MASK; + + if (hold_clear) { + ctrl_reg &= ~CDNS_I2C_CR_HOLD; + ctrl_reg &= ~CDNS_I2C_CR_CLR_FIFO; + /* + * In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size + * register reaches '0'. This is an IP bug which causes transfer size + * register overflow to 0xFF. To satisfy this timing requirement, + * disable the interrupts on current processor core between register + * writes to slave address register and control register. + */ + if (irq_save) + local_irq_save(flags); + + cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET); + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + /* Read it back to avoid bufferring and make sure write happens */ + cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + + if (irq_save) + local_irq_restore(flags); + } else { + cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET); + } + + cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); +} + +/** + * cdns_i2c_msend - Prepare and start a master send operation + * @id: pointer to the i2c device + */ +static void cdns_i2c_msend(struct cdns_i2c *id) +{ + unsigned int avail_bytes; + unsigned int bytes_to_send; + unsigned int ctrl_reg; + unsigned int isr_status; + + id->p_recv_buf = NULL; + id->p_send_buf = id->p_msg->buf; + id->send_count = id->p_msg->len; + + /* Set the controller in Master transmit mode and clear the FIFO. */ + ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + ctrl_reg &= ~CDNS_I2C_CR_RW; + ctrl_reg |= CDNS_I2C_CR_CLR_FIFO; + + /* + * Check for the message size against FIFO depth and set the + * 'hold bus' bit if it is greater than FIFO depth. + */ + if (id->send_count > id->fifo_depth) + ctrl_reg |= CDNS_I2C_CR_HOLD; + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + + /* Clear the interrupts in interrupt status register. */ + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); + + /* + * Calculate the space available in FIFO. Check the message length + * against the space available, and fill the FIFO accordingly. + * Enable the interrupts. + */ + avail_bytes = id->fifo_depth - + cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + + if (id->send_count > avail_bytes) + bytes_to_send = avail_bytes; + else + bytes_to_send = id->send_count; + + while (bytes_to_send--) { + cdns_i2c_writereg((*(id->p_send_buf)++), CDNS_I2C_DATA_OFFSET); + id->send_count--; + } + + /* + * Clear the bus hold flag if there is no more data + * and if it is the last message. + */ + if (!id->bus_hold_flag && !id->send_count) + cdns_i2c_clear_bus_hold(id); + /* Set the slave address in address register - triggers operation. */ + cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, + CDNS_I2C_ADDR_OFFSET); + + cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); +} + +/** + * cdns_i2c_master_reset - Reset the interface + * @adap: pointer to the i2c adapter driver instance + * + * This function cleanup the fifos, clear the hold bit and status + * and disable the interrupts. + */ +static void cdns_i2c_master_reset(struct i2c_adapter *adap) +{ + struct cdns_i2c *id = adap->algo_data; + u32 regval; + + /* Disable the interrupts */ + cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET); + /* Clear the hold bit and fifos */ + regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + regval &= ~CDNS_I2C_CR_HOLD; + regval |= CDNS_I2C_CR_CLR_FIFO; + cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET); + /* Update the transfercount register to zero */ + cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET); + /* Clear the interrupt status register */ + regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET); + /* Clear the status register */ + regval = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET); + cdns_i2c_writereg(regval, CDNS_I2C_SR_OFFSET); +} + +static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, + struct i2c_adapter *adap) +{ + unsigned long time_left, msg_timeout; + u32 reg; + + id->p_msg = msg; + id->err_status = 0; + reinit_completion(&id->xfer_done); + + /* Check for the TEN Bit mode on each msg */ + reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + if (msg->flags & I2C_M_TEN) { + if (reg & CDNS_I2C_CR_NEA) + cdns_i2c_writereg(reg & ~CDNS_I2C_CR_NEA, + CDNS_I2C_CR_OFFSET); + } else { + if (!(reg & CDNS_I2C_CR_NEA)) + cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA, + CDNS_I2C_CR_OFFSET); + } + + /* Check for the R/W flag on each msg */ + if (msg->flags & I2C_M_RD) + cdns_i2c_mrecv(id); + else + cdns_i2c_msend(id); + + /* Minimal time to execute this message */ + msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk); + /* Plus some wiggle room */ + msg_timeout += msecs_to_jiffies(500); + + if (msg_timeout < adap->timeout) + msg_timeout = adap->timeout; + + /* Wait for the signal of completion */ + time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); + if (time_left == 0) { + cdns_i2c_master_reset(adap); + dev_err(id->adap.dev.parent, + "timeout waiting on completion\n"); + return -ETIMEDOUT; + } + + cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, + CDNS_I2C_IDR_OFFSET); + + /* If it is bus arbitration error, try again */ + if (id->err_status & CDNS_I2C_IXR_ARB_LOST) + return -EAGAIN; + + if (msg->flags & I2C_M_RECV_LEN) + msg->len += min_t(unsigned int, msg->buf[0], I2C_SMBUS_BLOCK_MAX); + + return 0; +} + +/** + * cdns_i2c_master_xfer - The main i2c transfer function + * @adap: pointer to the i2c adapter driver instance + * @msgs: pointer to the i2c message structure + * @num: the number of messages to transfer + * + * Initiates the send/recv activity based on the transfer message received. + * + * Return: number of msgs processed on success, negative error otherwise + */ +static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + int ret, count; + u32 reg; + struct cdns_i2c *id = adap->algo_data; + bool hold_quirk; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + bool change_role = false; +#endif + + ret = pm_runtime_resume_and_get(id->dev); + if (ret < 0) + return ret; + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Check i2c operating mode and switch if possible */ + if (id->dev_mode == CDNS_I2C_MODE_SLAVE) { + if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) { + ret = -EAGAIN; + goto out; + } + + /* Set mode to master */ + cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); + + /* Mark flag to change role once xfer is completed */ + change_role = true; + } +#endif + + /* Check if the bus is free */ + + ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET, + reg, + !(reg & CDNS_I2C_SR_BA), + CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US); + if (ret) { + ret = -EAGAIN; + if (id->adap.bus_recovery_info) + i2c_recover_bus(adap); + goto out; + } + + hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT); + /* + * Set the flag to one when multiple messages are to be + * processed with a repeated start. + */ + if (num > 1) { + /* + * This controller does not give completion interrupt after a + * master receive message if HOLD bit is set (repeated start), + * resulting in SW timeout. Hence, if a receive message is + * followed by any other message, an error is returned + * indicating that this sequence is not supported. + */ + for (count = 0; (count < num - 1 && hold_quirk); count++) { + if (msgs[count].flags & I2C_M_RD) { + dev_warn(adap->dev.parent, + "Can't do repeated start after a receive message\n"); + ret = -EOPNOTSUPP; + goto out; + } + } + id->bus_hold_flag = 1; + reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + reg |= CDNS_I2C_CR_HOLD; + cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET); + } else { + id->bus_hold_flag = 0; + } + + /* Process the msg one by one */ + for (count = 0; count < num; count++, msgs++) { + if (count == (num - 1)) + id->bus_hold_flag = 0; + + ret = cdns_i2c_process_msg(id, msgs, adap); + if (ret) + goto out; + + /* Report the other error interrupts to application */ + if (id->err_status) { + cdns_i2c_master_reset(adap); + + if (id->err_status & CDNS_I2C_IXR_NACK) { + ret = -ENXIO; + goto out; + } + ret = -EIO; + goto out; + } + } + + ret = num; + +out: + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Switch i2c mode to slave */ + if (change_role) + cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); +#endif + + pm_runtime_mark_last_busy(id->dev); + pm_runtime_put_autosuspend(id->dev); + return ret; +} + +/** + * cdns_i2c_func - Returns the supported features of the I2C driver + * @adap: pointer to the i2c adapter structure + * + * Return: 32 bit value, each bit corresponding to a feature + */ +static u32 cdns_i2c_func(struct i2c_adapter *adap) +{ + u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | + (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | + I2C_FUNC_SMBUS_BLOCK_DATA; + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + func |= I2C_FUNC_SLAVE; +#endif + + return func; +} + +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static int cdns_reg_slave(struct i2c_client *slave) +{ + int ret; + struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c, + adap); + + if (id->slave) + return -EBUSY; + + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + + ret = pm_runtime_resume_and_get(id->dev); + if (ret < 0) + return ret; + + /* Store slave information */ + id->slave = slave; + + /* Enable I2C slave */ + cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); + + return 0; +} + +static int cdns_unreg_slave(struct i2c_client *slave) +{ + struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c, + adap); + + pm_runtime_put(id->dev); + + /* Remove slave information */ + id->slave = NULL; + + /* Enable I2C master */ + cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); + + return 0; +} +#endif + +static void cdns_i2c_prepare_recovery(struct i2c_adapter *adapter) +{ + struct cdns_i2c *id = container_of(adapter, struct cdns_i2c, adap); + + pinctrl_select_state(id->rinfo.pinctrl, id->rinfo.pins_gpio); +} + +static void cdns_i2c_unprepare_recovery(struct i2c_adapter *adapter) +{ + struct cdns_i2c *id = container_of(adapter, struct cdns_i2c, adap); + + pinctrl_select_state(id->rinfo.pinctrl, id->rinfo.pins_default); +} + +/* + * We switch SCL and SDA to their GPIO function and do some bitbanging + * for bus recovery. These alternative pinmux settings can be + * described in the device tree by a separate pinctrl state "gpio". If + * this is missing this is not a big problem, the only implication is + * that we can't do bus recovery. + */ +static int cdns_i2c_init_recovery_info(struct cdns_i2c *id, + struct platform_device *pdev) +{ + struct i2c_bus_recovery_info *rinfo = &id->rinfo; + + rinfo->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(rinfo->pinctrl)) { + dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n"); + return PTR_ERR(id->rinfo.pinctrl); + } + + rinfo->pins_default = pinctrl_lookup_state(rinfo->pinctrl, + PINCTRL_STATE_DEFAULT); + rinfo->pins_gpio = pinctrl_lookup_state(rinfo->pinctrl, + "gpio"); + + rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); + rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); + + if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || + PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) { + /* Give it another chance if pinctrl used is not ready yet */ + return -EPROBE_DEFER; + } else if (IS_ERR(rinfo->sda_gpiod) || + IS_ERR(rinfo->scl_gpiod) || + IS_ERR(rinfo->pins_default) || + IS_ERR(rinfo->pins_gpio)) { + dev_info(&pdev->dev, "recovery information incomplete\n"); + return 0; + } + + dev_dbg(&pdev->dev, "using scl%s for recovery\n", + rinfo->sda_gpiod ? ",sda" : ""); + + rinfo->prepare_recovery = cdns_i2c_prepare_recovery; + rinfo->unprepare_recovery = cdns_i2c_unprepare_recovery; + rinfo->recover_bus = i2c_generic_scl_recovery; + id->adap.bus_recovery_info = rinfo; + + return 0; +} + +static const struct i2c_algorithm cdns_i2c_algo = { + .master_xfer = cdns_i2c_master_xfer, + .functionality = cdns_i2c_func, +#if IS_ENABLED(CONFIG_I2C_SLAVE) + .reg_slave = cdns_reg_slave, + .unreg_slave = cdns_unreg_slave, +#endif +}; + +/** + * cdns_i2c_calc_divs - Calculate clock dividers + * @f: I2C clock frequency + * @input_clk: Input clock frequency + * @a: First divider (return value) + * @b: Second divider (return value) + * + * f is used as input and output variable. As input it is used as target I2C + * frequency. On function exit f holds the actually resulting I2C frequency. + * + * Return: 0 on success, negative errno otherwise. + */ +static int cdns_i2c_calc_divs(unsigned long *f, unsigned long input_clk, + unsigned int *a, unsigned int *b) +{ + unsigned long fscl = *f, best_fscl = *f, actual_fscl, temp; + unsigned int div_a, div_b, calc_div_a = 0, calc_div_b = 0; + unsigned int last_error, current_error; + + /* calculate (divisor_a+1) x (divisor_b+1) */ + temp = input_clk / (22 * fscl); + + /* + * If the calculated value is negative or 0, the fscl input is out of + * range. Return error. + */ + if (!temp || (temp > (CDNS_I2C_DIVA_MAX * CDNS_I2C_DIVB_MAX))) + return -EINVAL; + + last_error = -1; + for (div_a = 0; div_a < CDNS_I2C_DIVA_MAX; div_a++) { + div_b = DIV_ROUND_UP(input_clk, 22 * fscl * (div_a + 1)); + + if ((div_b < 1) || (div_b > CDNS_I2C_DIVB_MAX)) + continue; + div_b--; + + actual_fscl = input_clk / (22 * (div_a + 1) * (div_b + 1)); + + if (actual_fscl > fscl) + continue; + + current_error = fscl - actual_fscl; + + if (last_error > current_error) { + calc_div_a = div_a; + calc_div_b = div_b; + best_fscl = actual_fscl; + last_error = current_error; + } + } + + *a = calc_div_a; + *b = calc_div_b; + *f = best_fscl; + + return 0; +} + +/** + * cdns_i2c_setclk - This function sets the serial clock rate for the I2C device + * @clk_in: I2C clock input frequency in Hz + * @id: Pointer to the I2C device structure + * + * The device must be idle rather than busy transferring data before setting + * these device options. + * The data rate is set by values in the control register. + * The formula for determining the correct register values is + * Fscl = Fpclk/(22 x (divisor_a+1) x (divisor_b+1)) + * See the hardware data sheet for a full explanation of setting the serial + * clock rate. The clock can not be faster than the input clock divide by 22. + * The two most common clock rates are 100KHz and 400KHz. + * + * Return: 0 on success, negative error otherwise + */ +static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id) +{ + unsigned int div_a, div_b; + unsigned int ctrl_reg; + int ret = 0; + unsigned long fscl = id->i2c_clk; + + ret = cdns_i2c_calc_divs(&fscl, clk_in, &div_a, &div_b); + if (ret) + return ret; + + ctrl_reg = id->ctrl_reg; + ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK); + ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) | + (div_b << CDNS_I2C_CR_DIVB_SHIFT)); + id->ctrl_reg = ctrl_reg; + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + id->ctrl_reg_diva_divb = ctrl_reg & (CDNS_I2C_CR_DIVA_MASK | + CDNS_I2C_CR_DIVB_MASK); +#endif + return 0; +} + +/** + * cdns_i2c_clk_notifier_cb - Clock rate change callback + * @nb: Pointer to notifier block + * @event: Notification reason + * @data: Pointer to notification data object + * + * This function is called when the cdns_i2c input clock frequency changes. + * The callback checks whether a valid bus frequency can be generated after the + * change. If so, the change is acknowledged, otherwise the change is aborted. + * New dividers are written to the HW in the pre- or post change notification + * depending on the scaling direction. + * + * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK + * to acknowledge the change, NOTIFY_DONE if the notification is + * considered irrelevant. + */ +static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long + event, void *data) +{ + struct clk_notifier_data *ndata = data; + struct cdns_i2c *id = to_cdns_i2c(nb); + + if (pm_runtime_suspended(id->dev)) + return NOTIFY_OK; + + switch (event) { + case PRE_RATE_CHANGE: + { + unsigned long input_clk = ndata->new_rate; + unsigned long fscl = id->i2c_clk; + unsigned int div_a, div_b; + int ret; + + ret = cdns_i2c_calc_divs(&fscl, input_clk, &div_a, &div_b); + if (ret) { + dev_warn(id->adap.dev.parent, + "clock rate change rejected\n"); + return NOTIFY_STOP; + } + + /* scale up */ + if (ndata->new_rate > ndata->old_rate) + cdns_i2c_setclk(ndata->new_rate, id); + + return NOTIFY_OK; + } + case POST_RATE_CHANGE: + id->input_clk = ndata->new_rate; + /* scale down */ + if (ndata->new_rate < ndata->old_rate) + cdns_i2c_setclk(ndata->new_rate, id); + return NOTIFY_OK; + case ABORT_RATE_CHANGE: + /* scale up */ + if (ndata->new_rate > ndata->old_rate) + cdns_i2c_setclk(ndata->old_rate, id); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +/** + * cdns_i2c_runtime_suspend - Runtime suspend method for the driver + * @dev: Address of the I2C device structure + * + * Put the driver into low power mode. + * + * Return: 0 always + */ +static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + + clk_disable_unprepare(xi2c->clk); + + return 0; +} + +/** + * cdns_i2c_init - Controller initialisation + * @id: Device private data structure + * + * Initialise the i2c controller. + * + */ +static void cdns_i2c_init(struct cdns_i2c *id) +{ + cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET); + /* + * Cadence I2C controller has a bug wherein it generates + * invalid read transaction after HW timeout in master receiver mode. + * HW timeout is not used by this driver and the interrupt is disabled. + * But the feature itself cannot be disabled. Hence maximum value + * is written to this register to reduce the chances of error. + */ + cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); +} + +/** + * cdns_i2c_runtime_resume - Runtime resume + * @dev: Address of the I2C device structure + * + * Runtime resume callback. + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(xi2c->clk); + if (ret) { + dev_err(dev, "Cannot enable clock.\n"); + return ret; + } + cdns_i2c_init(xi2c); + + return 0; +} + +/** + * cdns_i2c_suspend - Suspend method for the I2C driver + * @dev: Address of the I2C device structure + * + * This function disables the I2C controller and + * changes the driver state to "suspend" + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused cdns_i2c_suspend(struct device *dev) +{ + int ret; + + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) + dev_err(dev, "%s: failed to set pins.\n", + __func__); + + ret = pm_runtime_force_suspend(dev); + if (ret) { + dev_err(dev, "Force suspend error.\n"); + return ret; + } + + return 0; +} + +/** + * cdns_i2c_resume - Resume method for the I2C driver + * @dev: Address of the I2C device structure + * + * This function changes the driver state to "ready" + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused cdns_i2c_resume(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + dev_err(dev, "%s: failed to set pins.\n", + __func__); + + /* reset */ + reset_control_assert(xi2c->reset); + /* release reset */ + reset_control_deassert(xi2c->reset); + + ret = pm_runtime_force_resume(dev); + if (ret) { + dev_err(dev, "Force resume error.\n"); + return ret; + } + + return 0; +} + +static const struct dev_pm_ops cdns_i2c_dev_pm_ops = { + SET_RUNTIME_PM_OPS(cdns_i2c_runtime_suspend, + cdns_i2c_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(cdns_i2c_suspend, cdns_i2c_resume) +}; + +static const struct cdns_platform_data r1p10_i2c_def = { + .quirks = CDNS_I2C_BROKEN_HOLD_BIT, +}; + +static const struct of_device_id cdns_i2c_of_match[] = { + { .compatible = "cdns,i2c-r1p10", .data = &r1p10_i2c_def }, + { .compatible = "cdns,i2c-r1p14",}, + { /* end of table */ } +}; +MODULE_DEVICE_TABLE(of, cdns_i2c_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id cdns_i2c_acpi_ids[] = { + {"CIXH200B", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, cdns_i2c_acpi_ids); +#endif + +/** + * cdns_i2c_detect_transfer_size - Detect the maximum transfer size supported + * @id: Device private data structure + * + * Detect the maximum transfer size that is supported by this instance of the + * Cadence I2C controller. + */ +static void cdns_i2c_detect_transfer_size(struct cdns_i2c *id) +{ + u32 val; + + /* + * Writing to the transfer size register is only possible if these two bits + * are set in the control register. + */ + cdns_i2c_writereg(CDNS_I2C_CR_MS | CDNS_I2C_CR_RW, CDNS_I2C_CR_OFFSET); + + /* + * The number of writable bits of the transfer size register can be between + * 4 and 8. This is a controlled through a synthesis parameter of the IP + * core and can vary from instance to instance. The unused MSBs always read + * back as 0. Writing 0xff and then reading the value back will report the + * maximum supported transfer size. + */ + cdns_i2c_writereg(CDNS_I2C_MAX_TRANSFER_SIZE, CDNS_I2C_XFER_SIZE_OFFSET); + val = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + id->transfer_size = CDNS_I2C_TRANSFER_SIZE(val); + cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET); + cdns_i2c_writereg(0, CDNS_I2C_CR_OFFSET); +} + +/** + * cdns_i2c_probe - Platform registration call + * @pdev: Handle to the platform device structure + * + * This function does all the memory allocation and registration for the i2c + * device. User can modify the address mode to 10 bit address mode using the + * ioctl call with option I2C_TENBIT. + * + * Return: 0 on success, negative error otherwise + */ +static int cdns_i2c_probe(struct platform_device *pdev) +{ + struct resource *r_mem; + struct cdns_i2c *id; + int ret, irq; + const struct of_device_id *match; + + id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + id->dev = &pdev->dev; + platform_set_drvdata(pdev, id); + + match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node); + if (match && match->data) { + const struct cdns_platform_data *data = match->data; + id->quirks = data->quirks; + } + + /* Init optional bus recovery function */ + ret = cdns_i2c_init_recovery_info(id, pdev); + if (ret && ret != ENODEV) + return ret; + + id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem); + if (IS_ERR(id->membase)) + return PTR_ERR(id->membase); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + id->adap.owner = THIS_MODULE; + id->adap.dev.of_node = pdev->dev.of_node; + id->adap.algo = &cdns_i2c_algo; + id->adap.timeout = CDNS_I2C_TIMEOUT; + id->adap.retries = 3; /* Default retry value. */ + id->adap.algo_data = id; + id->adap.dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&id->adap.dev, ACPI_COMPANION(id->adap.dev.parent)); + + init_completion(&id->xfer_done); + snprintf(id->adap.name, sizeof(id->adap.name), + "Cadence I2C at %08lx", (unsigned long)r_mem->start); + + id->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(id->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(id->clk), + "input clock not found.\n"); + + ret = clk_prepare_enable(id->clk); + if (ret) + dev_err(&pdev->dev, "Unable to enable clock.\n"); + + id->reset = devm_reset_control_array_get_optional_exclusive(&pdev->dev); + if (IS_ERR(id->reset)) { + dev_err(&pdev->dev, "[%s:%d]get reset error\n", __func__, __LINE__); + ret = PTR_ERR(id->reset); + goto err_clk_dis; + } + /* reset */ + reset_control_assert(id->reset); + /* release reset */ + reset_control_deassert(id->reset); + + pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT); + pm_runtime_use_autosuspend(id->dev); + pm_runtime_set_active(id->dev); + pm_runtime_enable(id->dev); + + id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb; + if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) + dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); + id->input_clk = clk_get_rate(id->clk); + + ret = device_property_read_u32(&pdev->dev, "clock-frequency", + &id->i2c_clk); + if (ret || (id->i2c_clk > I2C_MAX_FAST_MODE_FREQ)) + id->i2c_clk = I2C_MAX_STANDARD_MODE_FREQ; + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Set initial mode to master */ + id->dev_mode = CDNS_I2C_MODE_MASTER; + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; +#endif + id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS; + + id->fifo_depth = CDNS_I2C_FIFO_DEPTH_DEFAULT; + of_property_read_u32(pdev->dev.of_node, "fifo-depth", &id->fifo_depth); + + cdns_i2c_detect_transfer_size(id); + + ret = cdns_i2c_setclk(id->input_clk, id); + if (ret) { + dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk); + ret = -EINVAL; + goto err_clk_notifier_unregister; + } + + ret = devm_request_irq(&pdev->dev, irq, cdns_i2c_isr, 0, + DRIVER_NAME, id); + if (ret) { + dev_err(&pdev->dev, "cannot get irq %d\n", irq); + goto err_clk_notifier_unregister; + } + cdns_i2c_init(id); + + ret = i2c_add_adapter(&id->adap); + if (ret < 0) + goto err_clk_notifier_unregister; + + dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", + id->i2c_clk / 1000, (unsigned long)r_mem->start, irq); + + return 0; + +err_clk_notifier_unregister: + clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); + reset_control_assert(id->reset); +err_clk_dis: + clk_disable_unprepare(id->clk); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + return ret; +} + +/** + * cdns_i2c_remove - Unregister the device after releasing the resources + * @pdev: Handle to the platform device structure + * + * This function frees all the resources allocated to the device. + * + * Return: 0 always + */ +static void cdns_i2c_remove(struct platform_device *pdev) +{ + struct cdns_i2c *id = platform_get_drvdata(pdev); + + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + + i2c_del_adapter(&id->adap); + clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); + reset_control_assert(id->reset); + clk_disable_unprepare(id->clk); +} + +static struct platform_driver cdns_i2c_drv = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = cdns_i2c_of_match, + .acpi_match_table = ACPI_PTR(cdns_i2c_acpi_ids), + .pm = &cdns_i2c_dev_pm_ops, + }, + .probe = cdns_i2c_probe, + .remove_new = cdns_i2c_remove, +}; + +module_platform_driver(cdns_i2c_drv); + +MODULE_AUTHOR("Xilinx Inc."); +MODULE_DESCRIPTION("Cadence I2C bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 160a670f5fdb1..7315ceb614bf3 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -361,6 +361,14 @@ config IMX_GPCV2 help Enables the wakeup IRQs for IMX platforms with GPCv2 block +config SKY1_PDC + bool + depends on ARM_GIC + default y + select IRQ_DOMAIN + help + Enables the wakeup IRQs for SKY1 platforms with PDC block + config IRQ_MXS def_bool y if MACH_ASM9260 || ARCH_MXS select IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 8a72dc73346de..29dee8b97ea14 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o obj-$(CONFIG_INGENIC_TCU_IRQ) += irq-ingenic-tcu.o obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o +obj-$(CONFIG_SKY1_PDC) += irq-sky1-pdc.o obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o obj-$(CONFIG_MSCC_OCELOT_IRQ) += irq-mscc-ocelot.o obj-$(CONFIG_MVEBU_GICP) += irq-mvebu-gicp.o diff --git a/drivers/irqchip/irq-sky1-pdc.c b/drivers/irqchip/irq-sky1-pdc.c new file mode 100644 index 0000000000000..619a1a4d5f895 --- /dev/null +++ b/drivers/irqchip/irq-sky1-pdc.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include + +#define SKY1_SIP_PDC 0xC2000009 +#define SKY1_SIP_CONFIG_PDC_SET_WAKE 0x02 + +#define PDC_MAX_IRQS 1000 + +struct irq_domain *cix_domain; + +struct pdcv1_irqchip_data { + struct raw_spinlock rlock; + void __iomem *pdc_base; +}; + +static int pdcv1_wakeup_source_save(void) +{ + return 0; +} + +static void pdcv1_wakeup_source_restore(void) +{ +} + +static struct syscore_ops sky1_pdc_syscore_ops = { + .suspend = pdcv1_wakeup_source_save, + .resume = pdcv1_wakeup_source_restore, +}; + +static int sky1_pdc_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct pdcv1_irqchip_data *cd = d->chip_data; + struct arm_smccc_res res; + unsigned long flags; + + raw_spin_lock_irqsave(&cd->rlock, flags); + arm_smccc_smc(SKY1_SIP_PDC, SKY1_SIP_CONFIG_PDC_SET_WAKE, + d->hwirq, on, 0, 0, 0, 0, &res); + raw_spin_unlock_irqrestore(&cd->rlock, flags); + + return 0; +} + +static struct irq_chip pdcv1_irqchip_data_chip = { + .name = "PDCv1", + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_wake = sky1_pdc_irq_set_wake, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif +}; + +static int sky1_pdc_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count != 3) + return -EINVAL; + + /* No PPI should point to this domain */ + if (fwspec->param[0] != 0) + return -EINVAL; + + *hwirq = fwspec->param[1]; + *type = fwspec->param[2]; + return 0; + } else if (is_acpi_device_node(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if (fwspec->param[0] < 32) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + /* In ACPI asl file, using GSI to configure interrupt resource. */ + *hwirq = fwspec->param[0] - 32; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int sky1_pdc_domain_alloc(struct irq_domain *domain, + unsigned int irq, unsigned int nr_irqs, + void *data) +{ + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq; + unsigned int type; + int err; + int i; + + err = sky1_pdc_domain_translate(domain, fwspec, &hwirq, &type); + if (err) + return err; + + if (hwirq >= PDC_MAX_IRQS) + return -EINVAL; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, + &pdcv1_irqchip_data_chip, domain->host_data); + } + + parent_fwspec = *fwspec; + parent_fwspec.fwnode = domain->parent->fwnode; + + return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, + &parent_fwspec); +} + +static const struct irq_domain_ops pdcv1_irqchip_data_domain_ops = { + .translate = sky1_pdc_domain_translate, + .alloc = sky1_pdc_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static const struct of_device_id pdcv1_of_match[] = { + { .compatible = "cix,sky1-pdc", .data = (const void *) 2 }, + { /* END */ } +}; + +static int __init sky1_pdc_irqchip_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *parent_domain, *domain; + struct pdcv1_irqchip_data *cd; + const struct of_device_id *id; + + if (!parent) { + pr_err("%pOF: no parent, giving up\n", node); + return -ENODEV; + } + + id = of_match_node(pdcv1_of_match, node); + if (!id) { + pr_err("%pOF: unknown compatibility string\n", node); + return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%pOF: unable to get parent domain\n", node); + return -ENXIO; + } + + cd = kzalloc(sizeof(struct pdcv1_irqchip_data), GFP_KERNEL); + if (!cd) + return -ENOMEM; + + raw_spin_lock_init(&cd->rlock); + + cd->pdc_base = of_iomap(node, 0); + if (!cd->pdc_base) { + pr_err("%pOF: unable to map pdc registers\n", node); + kfree(cd); + return -ENOMEM; + } + + domain = irq_domain_add_hierarchy(parent_domain, 0, PDC_MAX_IRQS, + node, &pdcv1_irqchip_data_domain_ops, cd); + if (!domain) { + iounmap(cd->pdc_base); + kfree(cd); + return -ENOMEM; + } + irq_set_default_host(domain); + + cix_domain = domain; + + register_syscore_ops(&sky1_pdc_syscore_ops); + + /* + * Clear the OF_POPULATED flag set in of_irq_init so that + * later the pdc power domain driver will not be skipped. + */ + of_node_clear_flag(node, OF_POPULATED); + + return 0; +} + +#ifdef CONFIG_ACPI +static int __init sky1_acpi_pdc_irqchip_init(struct platform_device *pdev) +{ + struct irq_domain *domain; + struct pdcv1_irqchip_data *cd; + struct resource *res_dp; + + cd = kzalloc(sizeof(struct pdcv1_irqchip_data), GFP_KERNEL); + if (!cd) + return -ENOMEM; + + raw_spin_lock_init(&cd->rlock); + res_dp = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cd->pdc_base = devm_ioremap_resource(&pdev->dev, res_dp); + if (!cd->pdc_base) { + pr_err("%p ACPI: unable to map pdc registers\n", pdev); + kfree(cd); + return -ENOMEM; + } + + domain = acpi_irq_create_hierarchy(0, PDC_MAX_IRQS, pdev->dev.fwnode, + &pdcv1_irqchip_data_domain_ops, cd); + + if (!domain) { + iounmap(cd->pdc_base); + kfree(cd); + return -ENOMEM; + } + + irq_set_default_host(domain); + cix_domain = domain; + + register_syscore_ops(&sky1_pdc_syscore_ops); + + return 0; +} + +static const struct acpi_device_id pdcv1_acpi_match[] = { + { .id = "CIXHA019", .driver_data = 0 }, + { /* END */ }, +}; + +static int pdc_probe(struct platform_device *p_dev) +{ + return sky1_acpi_pdc_irqchip_init(p_dev); +} + +static struct platform_driver pdc_platform_driver = { + .probe = pdc_probe, + .driver = { .name = "sky1-pdc", + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(pdcv1_acpi_match) }, +}; + +static int __init sky1_pdc_init(void) +{ + return platform_driver_register(&pdc_platform_driver); +} + +core_initcall(sky1_pdc_init); +#endif + +IRQCHIP_DECLARE(sky1_pdc, "cix,sky1-pdc", sky1_pdc_irqchip_init); + +MODULE_AUTHOR("Copyright 2024 Cix Technology Group Co., Ltd."); +MODULE_DESCRIPTION("Cix Sky1 irq pdc driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig index 0a514b5915272..8511963d8a28c 100644 --- a/drivers/usb/cdns3/Kconfig +++ b/drivers/usb/cdns3/Kconfig @@ -22,6 +22,17 @@ config USB_CDNS3 If you choose to build this driver is a dynamically linked as module, the module will be called cdns3.ko. + +config USB_CDNSP_CIX + tristate "Cadence USBSSP Dual-Role Controller for Cix platform" + depends on USB_CDNS_SUPPORT + help + Say Y here if your system has a Cadence USBSSP dual-role controller. + It supports: dual-role switch, Host-only, and Peripheral-only. + + If you choose to build this driver is a dynamically linked + as module, the module will be called cdnsp-cix.ko. + endif if USB_CDNS3 @@ -130,4 +141,14 @@ config USB_CDNSP_HOST Host controller is compliant with XHCI so it uses standard XHCI driver. +config USB_CDNSP_SKY1 + tristate "sky1 base on Cadence CDNSP Dual-Role Controller" + depends on OF || COMPILE_TEST + select USB_CDNSP_HOST + help + Say Y here if sky1 has a Cadence CDNSP dual-role controller. + + If you choose to build this driver is a dynamically linked + module, the module will be called cdnsp-sky1.ko. + endif diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile index 48dfae75b5aaf..9d8af6e907cc4 100644 --- a/drivers/usb/cdns3/Makefile +++ b/drivers/usb/cdns3/Makefile @@ -6,6 +6,8 @@ CFLAGS_cdnsp-trace.o := -I$(src) cdns-usb-common-y := core.o drd.o cdns3-y := cdns3-plat.o +obj-$(CONFIG_USB_CDNSP_CIX) += cdnsp-cix.o + ifeq ($(CONFIG_USB),m) obj-m += cdns-usb-common.o obj-m += cdns3.o @@ -42,3 +44,5 @@ cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET) += cdnsp-ring.o cdnsp-gadget.o \ ifneq ($(CONFIG_USB_CDNSP_GADGET),) cdnsp-udc-pci-$(CONFIG_TRACING) += cdnsp-trace.o endif + +obj-$(CONFIG_USB_CDNSP_SKY1) += cdnsp-sky1.o diff --git a/drivers/usb/cdns3/cdnsp-cix.c b/drivers/usb/cdns3/cdnsp-cix.c new file mode 100644 index 0000000000000..a94ea026db165 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-cix.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include +#include + +#include "../host/xhci-plat.h" +#include "../host/xhci.h" +#include "core.h" +#include "gadget-export.h" +#include "drd.h" + +static int set_phy_power_on(struct cdns *cdns) +{ + int ret; + + ret = phy_power_on(cdns->usb2_phy); + if (ret) + return ret; + + ret = phy_power_on(cdns->usb3_phy); + if (ret) + phy_power_off(cdns->usb2_phy); + + return ret; +} + +static void set_phy_power_off(struct cdns *cdns) +{ + phy_power_off(cdns->usb3_phy); + phy_power_off(cdns->usb2_phy); +} + +static void devm_phy_release(struct device *dev, void *res) +{ + struct phy *phy = *(struct phy **)res; + + phy_put(dev, phy); +} + +struct phy *devm_phy_optional_ref_get(struct device *dev, + const char *string) +{ + struct phy **ptr, *phy; + struct fwnode_handle *fwnode; + struct device *rdev; + + fwnode = fwnode_find_reference(dev_fwnode(dev), string, 0); + if (IS_ERR_OR_NULL(fwnode)) + return NULL; + + ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + rdev = get_dev_from_fwnode(fwnode_get_parent(fwnode)); + if (IS_ERR_OR_NULL(rdev)) + return NULL; + phy = phy_get(rdev, fwnode_get_name(fwnode)); + if (!IS_ERR_OR_NULL(phy)) { + *ptr = phy; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + put_device(rdev); + + return phy; +} + +static int cdnsp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct cdns *cdns; + void __iomem *regs; + int ret; + + cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL); + if (!cdns) + return -ENOMEM; + + cdns->dev = dev; + cdns->pdata = dev_get_platdata(dev); + + platform_set_drvdata(pdev, cdns); + + ret = platform_get_irq_byname(pdev, "host"); + if (ret < 0) { + dev_err(dev, "couldn't get host irq\n"); + return ret; + } + + cdns->xhci_res[0].start = ret; + cdns->xhci_res[0].end = ret; + cdns->xhci_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret); + cdns->xhci_res[0].name = "host"; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci"); + if (!res) { + dev_err(dev, "couldn't get xhci resource\n"); + return -ENXIO; + } + + cdns->xhci_res[1] = *res; + + cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral"); + + if (cdns->dev_irq < 0) + return cdns->dev_irq; + + regs = devm_platform_ioremap_resource_byname(pdev, "dev"); + if (IS_ERR(regs)) + return PTR_ERR(regs); + cdns->dev_regs = regs; + + cdns->otg_irq = platform_get_irq_byname(pdev, "otg"); + if (cdns->otg_irq < 0) + return cdns->otg_irq; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg"); + if (!res) { + dev_err(dev, "couldn't get otg resource\n"); + return -ENXIO; + } + + cdns->otg_res = *res; + + cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); + if (cdns->wakeup_irq == -EPROBE_DEFER) + return cdns->wakeup_irq; + + if (cdns->wakeup_irq < 0) { + dev_dbg(dev, "couldn't get wakeup irq\n"); + cdns->wakeup_irq = 0x0; + } + cdns->usb2_phy = devm_phy_optional_get(dev, "cdnsp,usb2-phy"); + if (IS_ERR(cdns->usb2_phy)) { + dev_err(dev, "couldn't get usb2_phy\n"); + return PTR_ERR(cdns->usb2_phy); + } + + ret = phy_init(cdns->usb2_phy); + if (ret) + return ret; + + cdns->usb3_phy = devm_phy_optional_get(dev, "cdnsp,usb3-phy"); + if (IS_ERR_OR_NULL(cdns->usb3_phy)) + cdns->usb3_phy = + devm_phy_optional_ref_get(dev, "cdnsp,usb3-phy"); + if (IS_ERR(cdns->usb3_phy)) + return PTR_ERR(cdns->usb3_phy); + + ret = phy_init(cdns->usb3_phy); + if (ret) + goto err_phy3_init; + + ret = set_phy_power_on(cdns); + + if (ret) + goto err_phy_power_on; + + cdns->gadget_init = cdnsp_gadget_init; + + ret = cdns_init(cdns); + + if (ret) + goto err_cdns_init; + + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW))) + pm_runtime_forbid(dev); + + /* + * The controller needs less time between bus and controller suspend, + * and we also needs a small delay to avoid frequently entering low + * power mode. + */ + pm_runtime_set_autosuspend_delay(dev, 20); + pm_runtime_mark_last_busy(dev); + pm_runtime_use_autosuspend(dev); + + return 0; + +err_cdns_init: + set_phy_power_off(cdns); +err_phy_power_on: + phy_exit(cdns->usb3_phy); +err_phy3_init: + phy_exit(cdns->usb2_phy); + + return ret; +} + +static int cdnsp_remove(struct platform_device *pdev) +{ + struct cdns *cdns = platform_get_drvdata(pdev); + struct device *dev = cdns->dev; + + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + cdns_remove(cdns); + set_phy_power_off(cdns); + phy_exit(cdns->usb2_phy); + phy_exit(cdns->usb3_phy); + return 0; +} +#ifdef CONFIG_PM + +static int cdnsp_set_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret = 0; + + if (cdns->pdata && cdns->pdata->platform_suspend) + ret = cdns->pdata->platform_suspend(dev, suspend, wakeup); + + return ret; +} + +static int cdnsp_controller_suspend(struct device *dev, pm_message_t msg) +{ + struct cdns *cdns = dev_get_drvdata(dev); + bool wakeup; + unsigned long flags; + + if (cdns->in_lpm) + return 0; + + if (PMSG_IS_AUTO(msg)) + wakeup = true; + else + wakeup = device_may_wakeup(dev); + + cdnsp_set_platform_suspend(cdns->dev, true, wakeup); + set_phy_power_off(cdns); + spin_lock_irqsave(&cdns->lock, flags); + cdns->in_lpm = true; + spin_unlock_irqrestore(&cdns->lock, flags); + dev_dbg(cdns->dev, "%s ends\n", __func__); + + return 0; +} + +static int cdnsp_controller_resume(struct device *dev, pm_message_t msg) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret; + unsigned long flags; + + if (!cdns->in_lpm) + return 0; + + ret = set_phy_power_on(cdns); + if (ret) + return ret; + + cdnsp_set_platform_suspend(cdns->dev, false, false); + + spin_lock_irqsave(&cdns->lock, flags); + cdns_resume(cdns); + cdns->in_lpm = false; + spin_unlock_irqrestore(&cdns->lock, flags); + + return ret; +} + +static int cdnsp_plat_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct cdns *cdns = dev_get_drvdata(dev); + struct platform_device *xhci_dev; + struct usb_hcd *hcd; + struct xhci_hcd *xhci; + u32 command = 0; + u32 result; + + if (cdns->role == USB_ROLE_HOST) { + xhci_dev = cdns->host_dev; + hcd = dev_get_drvdata(&xhci_dev->dev); + xhci = hcd_to_xhci(hcd); + + /* XHCI irq and Wakeup irq are the same interrupt,set Run/Stop bit, + * Otherwise, can not receive interrupt after entering runtime suspend. + */ + command = readl(&xhci->op_regs->command); + command |= CMD_RUN; + writel(command, &xhci->op_regs->command); + readl_poll_timeout_atomic(&xhci->op_regs->status, result, + (result & STS_HALT) == 0 || result == U32_MAX, + 1, 250 * 1000); + if (result == U32_MAX) + dev_err(dev, "set controller run timeout\n"); + } + ret = cdnsp_controller_suspend(dev, PMSG_AUTO_SUSPEND); + + return ret; +} + +static int cdnsp_plat_runtime_resume(struct device *dev) +{ + int ret = 0; + struct cdns *cdns = dev_get_drvdata(dev); + struct platform_device *xhci_dev; + struct usb_hcd *hcd; + struct xhci_hcd *xhci; + + ret = cdnsp_controller_resume(dev, PMSG_AUTO_RESUME); + + if (cdns->role == USB_ROLE_HOST) { + xhci_dev = cdns->host_dev; + hcd = dev_get_drvdata(&xhci_dev->dev); + xhci = hcd_to_xhci(hcd); + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + if (xhci->shared_hcd) + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + if (cdns->wakeup_pending) { + enable_irq(cdns->wakeup_irq); + cdns->wakeup_pending = false; + } + } + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int cdnsp_plat_suspend(struct device *dev) +{ + struct cdns *cdns = dev_get_drvdata(dev); + int ret; + + cdns_suspend(cdns); + + ret = cdnsp_controller_suspend(dev, PMSG_SUSPEND); + if (ret) + return ret; + + if (cdns->role == USB_ROLE_HOST) { + if (device_may_wakeup(dev) && cdns->wakeup_irq) { + disable_irq(cdns->wakeup_irq); + enable_irq_wake(cdns->wakeup_irq); + dev_info(cdns->dev, "dis irq,enable wake\n"); + } + } + + return ret; +} + +static void cdnsp_cdnsp_plat_shutdown(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cdns *cdns = dev_get_drvdata(dev); + int ret; + + cdns_suspend(cdns); + + /* suspend the usb controller and config it to D3. */ + ret = cdnsp_controller_suspend(dev, PMSG_SUSPEND); + if (ret) + return; + + /* if the user configures the usb device as a wake-up device, then enable_irq_wake + * will enable the corresponding bit for USB to wake up the system. + */ + if (cdns->role == USB_ROLE_HOST) { + if (device_may_wakeup(dev) && cdns->wakeup_irq) { + disable_irq(cdns->wakeup_irq); + enable_irq_wake(cdns->wakeup_irq); + dev_info(cdns->dev, "dis irq,enable wake\n"); + } + } +} + +static int cdnsp_plat_resume(struct device *dev) +{ + int ret; + struct cdns *cdns = dev_get_drvdata(dev); + + ret = cdnsp_controller_resume(dev, PMSG_RESUME); + if (cdns->role == USB_ROLE_HOST) { + if (device_may_wakeup(dev) && cdns->wakeup_irq) { + disable_irq_wake(cdns->wakeup_irq); + enable_irq(cdns->wakeup_irq); + dev_info(cdns->dev, "dis wake,enable irq\n"); + } + } + return ret; +} +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops cdnsp_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdnsp_plat_suspend, cdnsp_plat_resume) + SET_RUNTIME_PM_OPS(cdnsp_plat_runtime_suspend, + cdnsp_plat_runtime_resume, NULL) +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_cdnsp_match[] = { + { .compatible = "cdns,usbssp" }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_cdnsp_match); +#endif + +static const struct acpi_device_id acpi_cdnsp_match[] = { + { "CIXH2031" }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, acpi_cdnsp_match); + +static struct platform_driver cdnsp_driver = { + .probe = cdnsp_probe, + .remove = cdnsp_remove, + .driver = { + .name = "cdns-cix", + .of_match_table = of_match_ptr(of_cdnsp_match), + .acpi_match_table = ACPI_PTR(acpi_cdnsp_match), + .pm = &cdnsp_pm_ops, + }, + /* some users need to wake up the system through a USB device after the system shutting + * down, add shutdown callback to suspend the usb controller and config it to D3, and + * enable_irq_wake will enable the corresponding bit for USB to wake up the system. + */ + .shutdown = cdnsp_cdnsp_plat_shutdown, +}; + +module_platform_driver(cdnsp_driver); + +MODULE_AUTHOR("Chao Zeng "); +MODULE_AUTHOR("Matthew MA "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform: cdnsp"); +MODULE_DESCRIPTION("Cdnsp common platform driver"); diff --git a/drivers/usb/cdns3/cdnsp-sky1.c b/drivers/usb/cdns3/cdnsp-sky1.c new file mode 100644 index 0000000000000..62cefb2c1ba69 --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-sky1.c @@ -0,0 +1,748 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * cdnsp-sky1 - USB controller driver for CIX's sky1 SoCs + * + * Author: Chao Zeng + * Author: Matthew MA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "../host/xhci.h" +#include "../host/xhci-plat.h" +#include "cdnsp-sky1.h" + +static const char *cix_usb_clk_names[CIX_USB_CLK_NUM] = { + "sof_clk", "usb_aclk", "lpm_clk", "usb_pclk" +}; + +struct sky1_src_signal { + unsigned int offset, bit; +}; + +static const struct sky1_src_signal sky1_usb_signals[SKY1_USB_S5_NUM] = { + /* usb config in s5 domain */ + [U3_TYPEC_DRD_ID] = { USB_MODE_STRAP_S5_DOMAIN, U3_TYPEC_DRD_MODE_STRAP_BIT }, + [U3_TYPEC_HOST0_ID] = { USB_MODE_STRAP_S5_DOMAIN, U3_TYPEC_HOST0_MODE_STRAP_BIT }, + [U3_TYPEC_HOST1_ID] = { USB_MODE_STRAP_S5_DOMAIN, U3_TYPEC_HOST1_MODE_STRAP_BIT }, + [U3_TYPEC_HOST2_ID] = { USB_MODE_STRAP_S5_DOMAIN, U3_TYPEC_HOST2_MODE_STRAP_BIT }, + [U3_TYPEA_CTRL0_ID] = { USB_MODE_STRAP_S5_DOMAIN, U3_TYPEA_CTRL0_MODE_STRAP_BIT }, + [U3_TYPEA_CTRL1_ID] = { USB_MODE_STRAP_S5_DOMAIN, U3_TYPEA_CTRL1_MODE_STRAP_BIT}, + [U2_HOST0_ID] = { USB_MODE_STRAP_S5_DOMAIN, U2_HOST0_MODE_STRAP_BIT }, + [U2_HOST1_ID] = { USB_MODE_STRAP_S5_DOMAIN, U2_HOST1_MODE_STRAP_BIT }, + [U2_HOST2_ID] = { USB_MODE_STRAP_S5_DOMAIN, U2_HOST2_MODE_STRAP_BIT }, + [U2_HOST3_ID] = { USB_MODE_STRAP_S5_DOMAIN, U2_HOST3_MODE_STRAP_BIT }, +}; + +int sky1_set_mode_by_id(struct device *dev, int mode) +{ + struct cdnsp_sky1 *data = dev_get_drvdata(dev); + + return regmap_update_bits(data->usb_syscon, + sky1_usb_signals[data->id].offset, + GENMASK(sky1_usb_signals[data->id].bit+1, + sky1_usb_signals[data->id].bit), + mode << sky1_usb_signals[data->id].bit); +} + +/** + * sky1_usb_clk_enable_all() - enable all clocks for usb controller + * @dev: Pointer to the device of platform_device + * + */ + +int sky1_usb_clk_enable_all(struct device *dev) +{ + int i, ret; + struct cdnsp_sky1 *data = dev_get_drvdata(dev); + struct clk **cix_usb_clks = data->cix_usb_clks; + + dev_info(dev, "-------%s--------\n", __func__); + WARN_ON(!dev); + + for (i = 0; i < CIX_USB_CLK_NUM; i++) { + cix_usb_clks[i] = devm_clk_get_optional(dev, cix_usb_clk_names[i]); + dev_info(dev, "[%s:%d]get cix_usb_clks[%d] %s:%llx\n", __func__, __LINE__, + i, cix_usb_clk_names[i], (unsigned long long)cix_usb_clks[i]); + if (IS_ERR(cix_usb_clks[i])) { + ret = dev_err_probe(dev, PTR_ERR(cix_usb_clks[i]), + "could not get %s clock\n", cix_usb_clk_names[i]); + goto err_usb_clks; + } + + if (cix_usb_clks[i] == 0) + continue; + + ret = clk_prepare_enable(cix_usb_clks[i]); + if (ret) { + dev_err(dev, "%s enable failed:%d\n", cix_usb_clk_names[i], ret); + goto err_usb_clks; + } + } + + dev_info(dev, "enable sky1 USB clock done\n"); + return 0; + +err_usb_clks: + while (--i >= 0) { + clk_disable_unprepare(cix_usb_clks[i]); + cix_usb_clks[i] = 0; + } + + return ret; +}; + +/** + * sky1_usb_clk_disable_all() - disable all clocks for usb controller + * @dev: Pointer to the device of platform_device + * + */ + +void sky1_usb_clk_disable_all(struct device *dev) +{ + int i; + struct cdnsp_sky1 *data = dev_get_drvdata(dev); + struct clk **cix_usb_clks = data->cix_usb_clks; + + dev_info(dev, "------%s-------\n", __func__); + WARN_ON(!dev); + + for (i = 0; i < CIX_USB_CLK_NUM; i++) + clk_disable_unprepare(cix_usb_clks[i]); +}; + +/** + * sky1_usb_clk_enable_resume() - enable the clocks that are turned + * off while suspend + * @dev: Pointer to the device of platform_device + * + */ + +static int sky1_usb_clk_enable_resume(struct device *dev) +{ + int i, ret; + struct cdnsp_sky1 *data = dev_get_drvdata(dev); + struct clk **cix_usb_clks = data->cix_usb_clks; + + dev_info(dev, "-------%s--------\n", __func__); + WARN_ON(!dev); + + for (i = 0; i < CIX_USB_CLK_OFF_NUM; i++) { + ret = clk_prepare_enable(cix_usb_clks[i]); + if (ret) { + dev_err(dev, "[%s:%d] enable clock:%s error:%d\n", __func__, __LINE__, + cix_usb_clk_names[i], ret); + goto err_usb_clks; + } + } + return 0; + +err_usb_clks: + while (--i >= 0) { + clk_disable_unprepare(cix_usb_clks[i]); + cix_usb_clks[i] = 0; + } + return ret; +}; + +/** + * sky1_usb_clk_disable_suspend() - disable the clocks which are not + * needed when suspend + * @dev: Pointer to the device of platform_device + * + */ + +static void sky1_usb_clk_disable_suspend(struct device *dev) +{ + int i; + struct cdnsp_sky1 *data = dev_get_drvdata(dev); + struct clk **cix_usb_clks = data->cix_usb_clks; + + dev_info(dev, "-------%s--------\n", __func__); + WARN_ON(!dev); + + for (i = 0; i < CIX_USB_CLK_OFF_NUM; i++) + clk_disable_unprepare(cix_usb_clks[i]); +}; + +#define XECP_PM_PMCSR 0x2240 +/* XECP_PM_PMCSR */ +#define PS_MASK GENMASK(1, 0) +#define PS_D0 0 +#define PS_D1 1 +#define PS_D2 2 +#define PS_D3 3 +#define PS_PME_En (1 << 8) + +int sky1_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) +{ + u32 result; + int ret; + + ret = readl_poll_timeout_atomic(ptr, result, + (result & mask) == done || + result == U32_MAX, + 1, timeout_us); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; +} + +static int cdns_sky1_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + + struct cdns *cdns = dev_get_drvdata(dev); + struct platform_device *xhci_dev = cdns->host_dev; + struct usb_hcd *hcd; + struct xhci_hcd *xhci; + struct device *parent = cdns->dev->parent; + struct cdnsp_sky1 *data = dev_get_drvdata(parent); + u32 value; + int ret = 0; + + if (cdns->role != USB_ROLE_HOST) + return 0; + + hcd = dev_get_drvdata(&xhci_dev->dev); + if (!hcd) { + dev_info(dev, "host controller have not registered\n"); + return 0; + } + xhci = hcd_to_xhci(hcd); + dev_info(dev, "[%s:%d]dev name:%s, xhci_dev name:%s\n", __func__, __LINE__, dev_name(dev), + dev_name(&xhci_dev->dev)); + + dev_info(dev, "[%s:%d]hcd->regs:%llx\n", __func__, __LINE__, (unsigned long long)hcd->regs); + if (suspend) { + /* SW request low power when all usb ports allow to it ??? */ + value = readl(hcd->regs + XECP_PM_PMCSR); + value &= ~PS_MASK; + value |= PS_D3 | PS_PME_En; + writel(value, hcd->regs + XECP_PM_PMCSR); + dev_info(dev, "[%s:%d] enter D3 write hcd regs(offset):%x:%x\n", __func__, __LINE__, + XECP_PM_PMCSR, value); + /* after controller enter D3, disable axi and sof until axi valid flag change to 0. + */ + if (sky1_handshake(data->ctst_base, AXI_CLOCK_VALID, 0, 100 * 1000)) + dev_err(dev, "[%s:%d] enter D3 failed,register value:%x\n", + __func__, __LINE__, readl(data->ctst_base)); + else + dev_info(dev, "[%s:%d] enter D3 succeed\n", __func__, __LINE__); + + } else { + value = readl(hcd->regs + XECP_PM_PMCSR); + value &= ~PS_MASK; + value |= PS_D0; + value &= ~PS_PME_En; + writel(value, hcd->regs + XECP_PM_PMCSR); + dev_info(dev, "[%s:%d] exit D3 write hcd regs(offset):%x:%x\n", __func__, __LINE__, + XECP_PM_PMCSR, value); + + /* Wait power state back to D0.*/ + if (sky1_handshake(hcd->regs + XECP_PM_PMCSR, PS_MASK, 0, 100 * 1000)) + dev_err(dev, "[%s:%d] exit D3 timeout, power state=0x%lx\n", + __func__, __LINE__, readl(hcd->regs + XECP_PM_PMCSR) & PS_MASK); + else + dev_info(dev, "[%s:%d] exit D3 succeed\n", __func__, __LINE__); + + } + + return ret; + +} + +static int cdnsp_sky1_drd_init(struct cdnsp_sky1 *data) +{ + int ret; + int clk; + int v0, v1, v2; + + reset_control_assert(data->reset); + reset_control_assert(data->preset); + sky1_usb_clk_disable_all(data->dev); + ret = sky1_usb_clk_enable_all(data->dev); + if (ret) + return ret; + writel(CIX_USB_AXI_WR_CACHE_VALUE, data->axi_base); + sky1_set_mode_by_id(data->dev, MODE_STRAP_OTG); + reset_control_deassert(data->preset); + if (data->u3_disable) { + dev_info(data->dev, "[%s:%d]disable u3 port\n", __func__, __LINE__); + writel(D_XEC_CFG_3XPORT_MODE_VALUE, (void *)(data->device_base) + + D_XEC_CFG_3XPORT_MODE); + } + writel(AXI_HALT, (void *)(data->device_base) + D_XEC_AXI_CAP); + writel(AXI_HALT, (void *)(data->xhci_base) + D_XEC_AXI_CAP); + writel(data->axi_bmax_value, (void *)(data->device_base) + D_XEC_AXI_CTRL0); + writel(data->axi_bmax_value, (void *)(data->xhci_base) + D_XEC_AXI_CTRL0); + writel((unsigned int)(~(AXI_HALT)), (void *)(data->device_base) + D_XEC_AXI_CAP); + writel((unsigned int)(~(AXI_HALT)), (void *)(data->xhci_base) + D_XEC_AXI_CAP); + + /* v0 = (int) (250 * data->sof_clk_freq / 1000000000) = (int) ((250/10) * + * (data->sof_clk_freq ) / (1000000000/10)) = (int) (25 * clk / 100000000) 250ns + * v1 = (int) (100 * data->sof_clk_freq / 1000000) = (int) (clk / 10000) 100us + * v2 = (int) (100 * data->sof_clk_freq / 1000) = (int) (clk / 10) 100ms + */ + + clk = (int)data->sof_clk_freq; + v0 = (int)(25*clk / 100000000); + v1 = (int)(clk / 10000); + v2 = (int)(clk/10); + + writel((unsigned int)((v0 > 1) ? v0 - 1 : 1), (void *)data->device_base + + D_XEC_PRE_REG_250NS); + writel((unsigned int)((v1 / 100 > 1) > 0 ? v1 / 100 - 1 : 1), + (void *)data->device_base + D_XEC_PRE_REG_1US); + writel((unsigned int)((v1 / 10 > 1) > 0 ? v1 / 10 - 1 : 1), + (void *)data->device_base + D_XEC_PRE_REG_10US); + writel((unsigned int)((v1) > 1 ? v1 - 1 : 1), (void *)data->device_base + + D_XEC_PRE_REG_100US); + writel((unsigned int)((125 * clk / 1000000) > 1 ? (125 * clk / 1000000) : 1), + (void *)data->device_base + D_XEC_PRE_REG_125US); + writel((unsigned int)((v2 / 100 > 1) ? v2 / 100 - 1 : 1), (void *)data->device_base + + D_XEC_PRE_REG_1MS); + writel((unsigned int)((v2 / 10) > 1 ? v2 / 10 - 1 : 1), (void *)data->device_base + + D_XEC_PRE_REG_10MS); + writel((unsigned int)(v2 > 1 ? v2 - 1 : 1), (void *)data->device_base + + D_XEC_PRE_REG_100MS); + dev_info(data->dev, "[%s:%d]readl:%x, %x ,%x, %x, %x, %x, %x, %x\n", __func__, __LINE__, + readl((void *)data->device_base + D_XEC_PRE_REG_250NS), + readl((void *)data->device_base + D_XEC_PRE_REG_1US), + readl((void *)data->device_base + D_XEC_PRE_REG_10US), + readl((void *)data->device_base + D_XEC_PRE_REG_100US), + readl((void *)data->device_base + D_XEC_PRE_REG_125US), + readl((void *)data->device_base + D_XEC_PRE_REG_1MS), + readl((void *)data->device_base + D_XEC_PRE_REG_10MS), + readl((void *)data->device_base + D_XEC_PRE_REG_100MS)); + + clk = (int)data->lpm_clk_freq; + v0 = (int)(25*clk / 100000000); + v1 = (int)(clk / 10000); + v2 = (int)(clk/10); + + writel((unsigned int)((v0 > 1) ? v0 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_250NS); + writel((unsigned int)((v1 / 100 > 1) > 0 ? v1 / 100 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_1US); + writel((unsigned int)((v1 / 10 > 1) > 0 ? v1 / 10 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_10US); + writel((unsigned int)((v1) > 1 ? v1 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_100US); + writel((unsigned int)((125 * clk / 1000000) > 1 ? (125 * clk / 1000000) : 1), + (void *)data->device_base + D_XEC_LPM_PRE_REG_125US); + writel((unsigned int)((v2 / 100 > 1) ? v2 / 100 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_1MS); + writel((unsigned int)((v2 / 10) > 1 ? v2 / 10 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_10MS); + writel((unsigned int)(v2 > 1 ? v2 - 1 : 1), (void *)data->device_base + + D_XEC_LPM_PRE_REG_100MS); + + dev_info(data->dev, "[%s:%d]readl:%x, %x ,%x, %x, %x, %x, %x, %x\n", __func__, __LINE__, + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_250NS), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_1US), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_10US), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_100US), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_125US), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_1MS), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_10MS), + readl((void *)data->device_base + D_XEC_LPM_PRE_REG_100MS)); + + + v0 = readl((void *)data->xhci_base + XEC_USBSSP_CHICKEN_BITS_3); + v0 &= APB_TIMEOUT_MASK; + v0 |= APB_TIMEOUT_VALUE_50MS_FREQ_200M; + writel(v0, (void *)data->xhci_base + XEC_USBSSP_CHICKEN_BITS_3); + if (data->u3_disable) { + dev_info(data->dev, "[%s:%d]disable u3 port\n", __func__, __LINE__); + writel(D_XEC_CFG_3XPORT_MODE_VALUE, (void *)(data->xhci_base) + + XEC_CFG_3XPORT_MODE); + } + + /* v0 = (int) (250 * data->sof_clk_freq / 1000000000) = (int) ((250/10) * + * (data->sof_clk_freq ) / (1000000000/10)) = (int) (25 * clk / 100000000) 250ns + * v1 = (int) (100 * data->sof_clk_freq / 1000000) = (int) (clk / 10000) 100us + * v2 = (int) (100 * data->sof_clk_freq / 1000) = (int) (clk / 10) 100ms + */ + + clk = (int)data->sof_clk_freq; + v0 = (int)(25*clk / 100000000); + v1 = (int)(clk / 10000); + v2 = (int)(clk/10); + + writel((unsigned int)((v0 > 1) ? v0 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_250NS); + writel((unsigned int)((v1 / 100 > 1) > 0 ? v1 / 100 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_1US); + writel((unsigned int)((v1 / 10 > 1) > 0 ? v1 / 10 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_10US); + writel((unsigned int)((v1) > 1 ? v1 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_100US); + writel((unsigned int)((125 * clk / 1000000) > 1 ? (125 * clk / 1000000) : 0), + (void *)data->xhci_base + XEC_PRE_REG_125US); + writel((unsigned int)((v2 / 100 > 1) ? v2 / 100 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_1MS); + writel((unsigned int)((v2 / 10) > 1 ? v2 / 10 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_10MS); + writel((unsigned int)(v2 > 1 ? v2 - 1 : 0), (void *)data->xhci_base + + XEC_PRE_REG_100MS); + + dev_info(data->dev, "[%s:%d]readl:%x, %x ,%x, %x, %x, %x, %x, %x\n", __func__, __LINE__, + readl((void *)data->xhci_base + XEC_PRE_REG_250NS), + readl((void *)data->xhci_base + XEC_PRE_REG_1US), + readl((void *)data->xhci_base + XEC_PRE_REG_10US), + readl((void *)data->xhci_base + XEC_PRE_REG_100US), + readl((void *)data->xhci_base + XEC_PRE_REG_125US), + readl((void *)data->xhci_base + XEC_PRE_REG_1MS), + readl((void *)data->xhci_base + XEC_PRE_REG_10MS), + readl((void *)data->xhci_base + XEC_PRE_REG_100MS)); + + clk = (int)data->lpm_clk_freq; + v0 = (int)(25*clk / 100000000); + v1 = (int)(clk / 10000); + v2 = (int)(clk/10); + writel((unsigned int)((v0 > 1) ? v0 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_250NS); + writel((unsigned int)((v1 / 100 > 1) > 0 ? v1 / 100 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_1US); + writel((unsigned int)((v1 / 10 > 1) > 0 ? v1 / 10 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_10US); + writel((unsigned int)((v1) > 1 ? v1 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_100US); + writel((unsigned int)((125 * clk / 1000000) > 1 ? (125 * clk / 1000000) : 0), + (void *)data->xhci_base + XEC_LPM_PRE_REG_125US); + writel((unsigned int)((v2 / 100 > 1) ? v2 / 100 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_1MS); + writel((unsigned int)((v2 / 10) > 1 ? v2 / 10 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_10MS); + writel((unsigned int)(v2 > 1 ? v2 - 1 : 0), (void *)data->xhci_base + + XEC_LPM_PRE_REG_100MS); + + dev_info(data->dev, "[%s:%d]readl:%x, %x ,%x, %x, %x, %x, %x, %x\n", __func__, __LINE__, + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_250NS), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_1US), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_10US), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_100US), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_125US), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_1MS), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_10MS), + readl((void *)data->xhci_base + XEC_LPM_PRE_REG_100MS)); + reset_control_deassert(data->reset); + return 0; +} + +int cdns_sky1_platform_reset(struct device *dev) +{ + int ret; + struct device *parent = dev->parent; + struct cdnsp_sky1 *data = dev_get_drvdata(parent); + + if (data) + ret = cdnsp_sky1_drd_init(data); + return ret; +} + +static void *sky1_of_get_addr_by_name(struct device_node *parent, char *name) +{ + struct device_node *node; + int index; + + node = of_get_next_child(parent, NULL); + if (node) { + index = of_property_match_string(node, "reg-names", name); + if (index >= 0) + return of_iomap(node, index); + } + + return NULL; +} + +static void *sky1_acpi_get_addr_by_name(struct device *dev, char *name) +{ + struct fwnode_handle *fwnode; + struct platform_device *pdev; + struct device *device; + struct resource *res; + + fwnode = device_get_next_child_node(dev, NULL); + if (!fwnode) + return NULL; + + device = bus_find_device_by_fwnode(&platform_bus_type, fwnode); + if (!device) + return NULL; + + pdev = to_platform_device(device); + if (!pdev) + return NULL; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!res) + return NULL; + + return ioremap(res->start, resource_size(res)); +} + +static void *sky1_get_addr_by_name(struct device *dev, char *name) +{ + if (!ACPI_COMPANION(dev)) + return sky1_of_get_addr_by_name(dev->of_node, name); + else + return sky1_acpi_get_addr_by_name(dev, name); +} + +static void sky1_put_addr(void __iomem *regs) +{ + if (regs) + iounmap(regs); +} + +static struct of_dev_auxdata cdns_sky1_auxdata[] = { + { + .compatible = "cdns,usbssp", + }, + {}, +}; + +static const struct acpi_device_id cdns_sky1_sub_match[] = { + { "CIXH2031", }, + {}, +}; + +static int cdnsp_sky1_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct cdnsp_sky1 *data; + int ret; + struct cdns3_platform_data *cdns_sky1_pdata; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->axi_base = devm_platform_ioremap_resource_byname(pdev, "axi_property"); + if (IS_ERR(data->axi_base)) { + dev_err(dev, "can't map IOMEM resource\n"); + return PTR_ERR(data->axi_base); + } + + data->ctst_base = devm_platform_ioremap_resource_byname(pdev, "controller_status"); + if (IS_ERR(data->ctst_base)) { + dev_err(dev, "can't map IOMEM resource\n"); + return PTR_ERR(data->ctst_base); + } + + data->reset = devm_reset_control_get(&pdev->dev, "usb_reset"); + if (IS_ERR(data->reset)) { + ret = PTR_ERR(data->reset); + dev_err(dev, "[%s:%d]get reset error:%d\n", __func__, __LINE__, ret); + return ret; + } + data->preset = devm_reset_control_get(&pdev->dev, "usb_preset"); + if (IS_ERR(data->preset)) { + ret = PTR_ERR(data->preset); + dev_err(dev, "[%s:%d]get reset error:%d\n", __func__, __LINE__, ret); + return ret; + } + + platform_set_drvdata(pdev, data); + data->dev = dev; + + ret = of_alias_get_id(dev->of_node, "usb"); + if (ret == -ENODEV) { + if (device_property_read_u32(dev, "id", &ret)) + ret = -ENODEV; + } + if (ret < 0 || ret > 9) { + dev_err(dev, "get alias failed.\n"); + return ret; + } + data->id = ret; + + data->usb_syscon = device_syscon_regmap_lookup_by_property(&pdev->dev, + "cix,usb_syscon"); + + if (IS_ERR(data->usb_syscon)) { + dev_err(dev, "Unable to get cix,usb_syscon regmap"); + return PTR_ERR(data->usb_syscon); + } + + data->u3_disable = device_property_read_bool(dev, "u3-port-disable"); + + if (!device_property_read_u32(dev, "sof_clk_freq", &ret)) + data->sof_clk_freq = ret; + else + data->sof_clk_freq = CIX_USB_CLK_32K; + + if (!device_property_read_u32(dev, "lpm_clk_freq", &ret)) + data->lpm_clk_freq = ret; + else + data->lpm_clk_freq = CIX_USB_CLK_8M; + + if (!device_property_read_u32(dev, "axi_bmax_value", &ret)) + data->axi_bmax_value = ret; + else + data->axi_bmax_value = AXI_BMAX_VALUE_DEFAULT; + + data->xhci_base = sky1_get_addr_by_name(dev, "xhci"); + if (!data->xhci_base) + return PTR_ERR(data->xhci_base); + + data->device_base = sky1_get_addr_by_name(dev, "dev"); + if (!data->device_base) + return PTR_ERR(data->device_base); + + cdnsp_sky1_drd_init(data); + + //release by platform_device_release + cdns_sky1_pdata = kzalloc(sizeof(struct cdns3_platform_data), GFP_KERNEL); + if (!cdns_sky1_pdata) + return -ENOMEM; + + cdns_sky1_pdata->platform_suspend = cdns_sky1_platform_suspend; + if (!ACPI_COMPANION(dev)) { + cdns_sky1_auxdata->platform_data = cdns_sky1_pdata; + ret = of_platform_populate(node, NULL, cdns_sky1_auxdata, dev); + if (ret) { + dev_err(dev, "failed to create children: %d\n", ret); + goto err; + } + } else { + /* + * ACPI "populate" all device at once. + * Using other mechanism to ensure the driver probe sequence, + * like device link. + * Just set the platform data here. + */ + struct fwnode_handle *child; + + device_for_each_child_node(dev, child) { + const struct acpi_device_id *id; + struct device *cdev; + + cdev = bus_find_device_by_fwnode(&platform_bus_type, + child); + if (!cdev) + continue; + + id = acpi_match_device(cdns_sky1_sub_match, cdev); + if (id && !cdev->platform_data) + cdev->platform_data = cdns_sky1_pdata; + else + goto err; + } + } + + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + return 0; +err: + kfree(cdns_sky1_pdata); + return ret; +} + +static int cdnsp_sky1_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cdnsp_sky1 *data = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); + sky1_put_addr(data->xhci_base); + sky1_put_addr(data->device_base); + reset_control_deassert(data->reset); + reset_control_deassert(data->preset); + sky1_usb_clk_disable_all(dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +/* Because the wake-up interrupt and host interrupt are the same interrupt, closing the axi + * and sof clock will result in the inability to generate port status change interrupt. + */ + +static int cdnsp_sky1_resume(struct device *dev) +{ + return 0; +} + +static int cdnsp_sky1_suspend(struct device *dev) +{ + return 0; +} + +static int cdnsp_sky1_system_suspend(struct device *dev) +{ + sky1_usb_clk_disable_suspend(dev); + return 0; +} + +static int cdnsp_sky1_system_resume(struct device *dev) +{ + int ret = 0; + struct cdnsp_sky1 *data; + + ret = sky1_usb_clk_enable_resume(dev); + data = dev_get_drvdata(dev); + writel(CIX_USB_AXI_WR_CACHE_VALUE, data->axi_base); + return ret; +} + +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops cdnsp_sky1_pm_ops = { + SET_RUNTIME_PM_OPS(cdnsp_sky1_suspend, cdnsp_sky1_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(cdnsp_sky1_system_suspend, cdnsp_sky1_system_resume) +}; + +static const struct of_device_id cdns_sky1_of_match[] = { + { .compatible = "cix,sky1-usbssp", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cdns_sky1_of_match); + +static const struct acpi_device_id cdnsp_sky1_acpi_match[] = { + { "CIXH2030" }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, cdnsp_sky1_acpi_match); + +static struct platform_driver cdnsp_sky1_driver = { + .probe = cdnsp_sky1_probe, + .remove = cdnsp_sky1_remove, + .driver = { + .name = "cdnsp-sky1", + .of_match_table = cdns_sky1_of_match, + .acpi_match_table = ACPI_PTR(cdnsp_sky1_acpi_match), + .pm = &cdnsp_sky1_pm_ops, + }, +}; + +module_platform_driver(cdnsp_sky1_driver); + +MODULE_ALIAS("platform: cdnsp-sky1"); +MODULE_AUTHOR("Chao Zeng "); +MODULE_AUTHOR("Matthew MA "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cdnsp Sky1 Glue Layer"); diff --git a/drivers/usb/cdns3/cdnsp-sky1.h b/drivers/usb/cdns3/cdnsp-sky1.h new file mode 100644 index 0000000000000..700736b73051b --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-sky1.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_CDNSP_SKY1_H +#define __LINUX_CDNSP_SKY1_H +#include +#include +#include + +#define USB_MODE_STRAP_S5_DOMAIN 0x424 + +#define MODE_STRAP_OTG 0 +#define MODE_STRAP_HOST 0x1 +#define MODE_STRAP_DEVICE 0x2 + +#define U3_TYPEC_DRD_ID 0 +#define U3_TYPEC_HOST0_ID 1 +#define U3_TYPEC_HOST1_ID 2 +#define U3_TYPEC_HOST2_ID 3 +#define U3_TYPEA_CTRL0_ID 4 +#define U3_TYPEA_CTRL1_ID 5 +#define U2_HOST0_ID 6 +#define U2_HOST1_ID 7 +#define U2_HOST2_ID 8 +#define U2_HOST3_ID 9 +#define SKY1_USB_S5_NUM 10 + +#define U3_TYPEC_DRD_MODE_STRAP_BIT 12 +#define U3_TYPEC_HOST0_MODE_STRAP_BIT 14 +#define U3_TYPEC_HOST1_MODE_STRAP_BIT 16 +#define U3_TYPEC_HOST2_MODE_STRAP_BIT 18 +#define U3_TYPEA_CTRL0_MODE_STRAP_BIT 8 +#define U3_TYPEA_CTRL1_MODE_STRAP_BIT 10 +#define U2_HOST0_MODE_STRAP_BIT 0 +#define U2_HOST1_MODE_STRAP_BIT 2 +#define U2_HOST2_MODE_STRAP_BIT 4 +#define U2_HOST3_MODE_STRAP_BIT 6 + +#define AXI_HALT BIT(31) +#define AXI_BMAX_VALUE_DEFAULT 0x7 + +#define D_XEC_CFG_3XPORT_MODE 0x2040 +#define D_XEC_AXI_CAP 0x2174 +#define D_XEC_AXI_CTRL0 0x217C +#define D_XEC_PRE_REG_250NS 0x21E8 +#define D_XEC_PRE_REG_1US 0x21EC +#define D_XEC_PRE_REG_10US 0x21F0 +#define D_XEC_PRE_REG_100US 0x21F4 +#define D_XEC_PRE_REG_125US 0x21F8 +#define D_XEC_PRE_REG_1MS 0x21FC +#define D_XEC_PRE_REG_10MS 0x2200 +#define D_XEC_PRE_REG_100MS 0x2204 +#define D_XEC_LPM_PRE_REG_250NS 0x2208 +#define D_XEC_LPM_PRE_REG_1US 0x220C +#define D_XEC_LPM_PRE_REG_10US 0x2210 +#define D_XEC_LPM_PRE_REG_100US 0x2214 +#define D_XEC_LPM_PRE_REG_125US 0x2218 +#define D_XEC_LPM_PRE_REG_1MS 0x221C +#define D_XEC_LPM_PRE_REG_10MS 0x2220 +#define D_XEC_LPM_PRE_REG_100MS 0x2224 + +#define XEC_CFG_3XPORT_MODE 0x2040 +#define XEC_PRE_REG_250NS 0x21E8 +#define XEC_PRE_REG_1US 0x21EC +#define XEC_PRE_REG_10US 0x21F0 +#define XEC_PRE_REG_100US 0x21F4 +#define XEC_PRE_REG_125US 0x21F8 +#define XEC_PRE_REG_1MS 0x21FC +#define XEC_PRE_REG_10MS 0x2200 +#define XEC_PRE_REG_100MS 0x2204 +#define XEC_LPM_PRE_REG_250NS 0x2208 +#define XEC_LPM_PRE_REG_1US 0x220C +#define XEC_LPM_PRE_REG_10US 0x2210 +#define XEC_LPM_PRE_REG_100US 0x2214 +#define XEC_LPM_PRE_REG_125US 0x2218 +#define XEC_LPM_PRE_REG_1MS 0x221C +#define XEC_LPM_PRE_REG_10MS 0x2220 +#define XEC_LPM_PRE_REG_100MS 0x2224 +#define XEC_USBSSP_CHICKEN_BITS_3 0x2230 + +#define D_XEC_CFG_3XPORT_MODE_VALUE 0xa0031e03 +#define APB_TIMEOUT_VALUE_50MS_FREQ_200M 0x2710 +#define APB_TIMEOUT_MASK (~((1 << 22) - 1)) + +#define CIX_USB_CLK_NUM (4) +#define CIX_USB_CLK_OFF_NUM (2) +#define CIX_USB_AXI_WR_CACHE_VALUE 0X33 +#define CIX_USB_CLK_32K 32000 +#define CIX_USB_CLK_4M 4000000 +#define CIX_USB_CLK_8M 8000000 +#define CIX_USB_CLK_24M 24000000 + +#define AXI_CLOCK_ENABLE BIT(0) +#define AXI_CLOCK_VALID BIT(1) +#define AXI_CLOCK_REQ BIT(2) + +struct cdnsp_sky1 { + struct device *dev; + void __iomem *axi_base; + void __iomem *ctst_base; + void __iomem *dr_base; + void __iomem *xhci_base; + void __iomem *device_base; + struct platform_device *cdnsp_pdev; + struct reset_control *reset; + struct reset_control *preset; + struct clk *cix_usb_clks[CIX_USB_CLK_NUM]; + int id; + struct regmap *usb_syscon; + int lpm_clk_freq; + int sof_clk_freq; + bool u3_disable; + int axi_bmax_value; +}; + +int sky1_set_mode_by_id(struct device *dev, int mode); +int sky1_usb_clk_enable_all(struct device *dev); +void sky1_usb_clk_disable_all(struct device *dev); + +#endif /* __LINUX_CDNSP_SKY1_H */ diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig index 2f80c2792dbda..2953924094f5d 100644 --- a/drivers/usb/typec/Kconfig +++ b/drivers/usb/typec/Kconfig @@ -110,6 +110,13 @@ config TYPEC_WUSB3801 If you choose to build this driver as a dynamically linked module, the module will be called wusb3801.ko. +config TYPEC_RTS5453 + tristate "Realtek rts5453 Type-C port controller driver" + depends on I2C + select REGMAP_I2C + help + Say Y or M here if your system has a rts5453 Type-C port controller. + source "drivers/usb/typec/mux/Kconfig" source "drivers/usb/typec/altmodes/Kconfig" diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile index 7a368fea61bc9..432cadc6bf060 100644 --- a/drivers/usb/typec/Makefile +++ b/drivers/usb/typec/Makefile @@ -11,4 +11,5 @@ obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o obj-$(CONFIG_TYPEC_STUSB160X) += stusb160x.o obj-$(CONFIG_TYPEC_RT1719) += rt1719.o obj-$(CONFIG_TYPEC_WUSB3801) += wusb3801.o +obj-$(CONFIG_TYPEC_RTS5453) += rts5453.o obj-$(CONFIG_TYPEC) += mux/ diff --git a/drivers/usb/typec/rts5453.c b/drivers/usb/typec/rts5453.c new file mode 100644 index 0000000000000..b7fb53ed2c11c --- /dev/null +++ b/drivers/usb/typec/rts5453.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PD driver for typec connection + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "class.h" + +#define RTS_FIELD_GET(_mask, _reg) ((typeof(_mask))((_reg) & (_mask)) >> __bf_shf(_mask)) +#define RTS5453_REG_VID 0x00 +#define RTS5453_REG_PID 0x00 +#define RTS_DATA_CONTROL 0x50 +#define I2C_INT_ACK_BIT BIT(2) +#define HPD_IRQ_ACK BIT(5) +#define RTS_DATA_STATUS 0x5F +//byte 1 +#define CONNECTION_ORIENTATION BIT(1) +#define USB2_CONNECTION BIT(4) +#define USB3_2_CONNECTION BIT(5) +#define USB_DATA_ROLE BIT(7) +//byte 2 +#define DP_CONNECTION BIT(0) +#define DP_SOURCE_SINK BIT(1) +#define HPD_IRQ BIT(6) +#define HPD_STATE BIT(7) +//byte 3 +#define POWER_SOURCE BIT(0) +#define POWER_SINK BIT(1) +#define NO_POWER 0 +#define POWER_MASK 3 + +#define DATA_STATUS_COUNT 6 + +enum { + ALTMODE_DP = 0, + ALTMODE_MAX, +}; + +struct rts5453h { + struct device *dev; + struct regmap *regmap; + struct mutex lock; /* device lock */ + u8 i2c_protocol:1; /*i2c or smbus*/ + int id; + u8 data_status[DATA_STATUS_COUNT]; + struct typec_mux_state state; + struct typec_capability caps; + struct typec_switch *ori_sw; + struct typec_mux *mux; + struct usb_role_switch *role_sw; + struct typec_altmode p_altmode[ALTMODE_MAX]; + struct typec_port *port; + enum usb_role role; + unsigned long mode; + u32 hpd_status; + struct typec_partner *partner; + struct typec_partner_desc desc; +}; + +#define RTS_MAX_LEN 64 + +static int +rts5453h_block_read(struct rts5453h *rts, u8 reg, void *val, size_t len) +{ + return regmap_bulk_read(rts->regmap, reg, val, len); +} +static int rts5453h_block_write(struct rts5453h *rts, u8 reg, + const void *val, size_t len) +{ + return regmap_bulk_write(rts->regmap, reg, val, len); +} +static inline int rts5453h_read16(struct rts5453h *rts, u8 reg, u16 *val) +{ + return rts5453h_block_read(rts, reg, val, sizeof(u16)); +} +static inline int rts5453h_write16(struct rts5453h *rts, u8 reg, u16 val) +{ + return rts5453h_block_write(rts, reg, &val, sizeof(u16)); +} +static inline int rts5453h_read32(struct rts5453h *rts, u8 reg, u32 *val) +{ + return rts5453h_block_read(rts, reg, val, sizeof(u32)); +} +static inline int rts5453h_write32(struct rts5453h *rts, u8 reg, u32 val) +{ + return rts5453h_block_write(rts, reg, &val, sizeof(u32)); +} + +static const struct regmap_config rts5453h_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x7F, +}; + +char dp_only_mode[] = "Alt Mode:DP"; +char dp_usb_mode[] = "Alt Mode:DP + USB"; +char usb_mode[] = "Alt Mode:USB"; + +char power_source[] = "power source"; +char power_sink[] = "power sink"; + +char oritation_normal[] = "oritation normal"; +char oritation_reverse[] = "oritation reverse"; + +char data_host[] = "usb host"; +char data_device[] = "usb device"; + + +static int rts5453h_typec_port_update(struct rts5453h *typec) +{ + enum typec_orientation orientation; + int ret = 0; + struct typec_displayport_data dp_data; + enum usb_role role; + + if (!(typec->data_status[1] & USB3_2_CONNECTION) && !(typec->data_status[2] & DP_CONNECTION)) + orientation = TYPEC_ORIENTATION_NONE; + else if (typec->data_status[1] & CONNECTION_ORIENTATION) + orientation = TYPEC_ORIENTATION_REVERSE; + else + orientation = TYPEC_ORIENTATION_NORMAL; + + ret = typec_switch_set(typec->ori_sw, orientation); + + if (ret) + return ret; + + if ((!(typec->data_status[1] & USB3_2_CONNECTION)) && (!(typec->data_status[1] & USB2_CONNECTION))) + role = USB_ROLE_NONE; + else if (typec->data_status[1] & USB_DATA_ROLE) + role = USB_ROLE_DEVICE; + else + role = USB_ROLE_HOST; + + if ((typec->data_status[1] & USB3_2_CONNECTION) && + (typec->data_status[2] & DP_CONNECTION)) { + //usb + dp + dp_data.status = DP_STATUS_ENABLED; + if (typec->data_status[2] & HPD_STATE) + dp_data.status |= DP_STATUS_HPD_STATE; + if (typec->data_status[2] & HPD_IRQ) + dp_data.status |= DP_STATUS_IRQ_HPD; + + typec->state.data = &dp_data; + typec->state.mode = TYPEC_DP_STATE_D; + typec->state.alt = &typec->p_altmode[ALTMODE_DP]; + } else if (typec->data_status[2] & DP_CONNECTION) { + //dp only + dp_data.status = DP_STATUS_ENABLED; + if (typec->data_status[2] & HPD_STATE) + dp_data.status |= DP_STATUS_HPD_STATE; + if (typec->data_status[2] & HPD_IRQ) + dp_data.status |= DP_STATUS_IRQ_HPD; + typec->state.data = &dp_data; + typec->state.mode = TYPEC_DP_STATE_E; + typec->state.alt = &typec->p_altmode[ALTMODE_DP]; + } else if (typec->data_status[1] & USB3_2_CONNECTION) { + //usb only + dp_data.status = 0; + typec->state.mode = TYPEC_STATE_USB; + typec->state.alt = NULL; + typec->state.data = NULL; + } else { + //disconnect + dp_data.status = 0; + typec->state.mode = TYPEC_STATE_SAFE; + typec->state.alt = NULL; + typec->state.data = NULL; + } + + ret = typec_mux_set(typec->mux, &typec->state); + + if (ret) + return ret; + + if ((typec->data_status[3] & POWER_MASK) == POWER_SOURCE) { + typec->caps.type = TYPEC_PORT_SRC; + typec_set_pwr_role(typec->port, TYPEC_SOURCE); + } else if ((typec->data_status[3] & POWER_MASK) == POWER_SINK) { + typec->caps.type = TYPEC_PORT_SNK; + typec_set_pwr_role(typec->port, TYPEC_SINK); + } else { + dev_dbg(typec->dev, "typec port(%d): unkonwn power state %d\n", typec->id, typec->data_status[3]); + } + + if (typec->role != role) { + typec->role = role; + ret = usb_role_switch_set_role(typec->role_sw, role); + if (role == USB_ROLE_DEVICE) + typec_set_data_role(typec->port, TYPEC_DEVICE); + else + typec_set_data_role(typec->port, TYPEC_HOST); + } + + if (typec->mode == typec->state.mode && typec->hpd_status == dp_data.status) + return ret; + + if (typec->state.mode == TYPEC_DP_STATE_D) { + dev_info(typec->dev, "typec port(%d): alt mode(%s)\n oritation(%s)," + "usb_data_role(%s)\n power role(%s), hpd_state(%d)\n", + typec->id, + dp_usb_mode, + orientation == TYPEC_ORIENTATION_REVERSE ? oritation_reverse : oritation_normal, + role == USB_ROLE_DEVICE ? data_device : data_host, + typec->caps.type == TYPEC_PORT_SRC ? power_source:power_sink, + (int)(dp_data.status & DP_STATUS_HPD_STATE)); + } else if (typec->state.mode == TYPEC_DP_STATE_E) { + dev_info(typec->dev, "typec port(%d): alt mode(%s)\n oritation(%s)\n power role(%s), hpd_state(%d)\n", + typec->id, + dp_only_mode, + orientation == TYPEC_ORIENTATION_REVERSE ? oritation_reverse : oritation_normal, + typec->caps.type == TYPEC_PORT_SRC ? power_source:power_sink, + (int)(dp_data.status & DP_STATUS_HPD_STATE)); + } else if (typec->state.mode == TYPEC_STATE_USB) { + dev_info(typec->dev, "typec port(%d): alt mode(%s) oritation(%s) power role(%s), usb_data_role(%s)\n", + typec->id, usb_mode, + orientation == TYPEC_ORIENTATION_REVERSE ? oritation_reverse : oritation_normal, + typec->caps.type == TYPEC_PORT_SRC ? power_source:power_sink, + role == USB_ROLE_DEVICE ? data_device : data_host); + } else if (typec->state.mode == TYPEC_STATE_SAFE) { + dev_info(typec->dev, "typec port(%d): disconnect state\n", typec->id); + } + + typec->mode = typec->state.mode; + typec->hpd_status = dp_data.status; + + if ((typec->data_status[1] == 0) && (typec->data_status[2] == 0)) { + dev_info(typec->dev, "typec port(%d): unregister partner\n", typec->id); + typec_unregister_partner(typec->partner); + typec->partner = NULL; + } else { + if (typec->partner == NULL) { + dev_info(typec->dev, "typec port(%d): register partner\n", typec->id); + typec->partner = typec_register_partner(typec->port, &typec->desc); + } + + if (IS_ERR(typec->partner)) + dev_info(typec->dev, "typec port(%d): register partner error\n", typec->id); + } + return ret; +} + +static void rts5453h_typec_register_port_altmodes(struct rts5453h *typec) +{ + /* All PD capable CrOS devices are assumed to support DP altmode. */ + typec->p_altmode[ALTMODE_DP].svid = USB_TYPEC_DP_SID; + typec->p_altmode[ALTMODE_DP].mode = USB_TYPEC_DP_MODE; + typec->state.alt = NULL; + typec->state.mode = TYPEC_STATE_USB; + typec->state.data = NULL; + + typec->partner = NULL; + typec->desc.accessory = TYPEC_ACCESSORY_NONE; + typec->desc.usb_pd = 0; +} + +static int rts5453h_event_handler(struct rts5453h *rts, u8 clear_irq) +{ + int ret = 0; + u8 data_control[3] = {0}; + + mutex_lock(&rts->lock); + if (clear_irq) { + data_control[0] = 2; + data_control[1] = I2C_INT_ACK_BIT; + data_control[2] = 0; + + ret = rts5453h_block_write(rts, RTS_DATA_CONTROL, data_control, 3); + if (ret != 0) + goto i2c_err; + } + + ret = rts5453h_block_read(rts, RTS_DATA_STATUS, rts->data_status, 6); + if (ret != 0) + goto i2c_err; + else + dev_info(rts->dev, "typec port(%d):data status = 0x%x , 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", + rts->id, rts->data_status[0], rts->data_status[1], rts->data_status[2], + rts->data_status[3], rts->data_status[4], rts->data_status[5]); + + rts5453h_typec_port_update(rts); + + if (rts->data_status[2] & HPD_IRQ) { + data_control[0] = 2; + data_control[1] = 0; + data_control[2] = HPD_IRQ_ACK; + ret = rts5453h_block_write(rts, RTS_DATA_CONTROL, data_control, 3); + if (ret != 0) + goto i2c_err; + } + +i2c_err: + if (ret != 0) { + dev_err(rts->dev, "typec port(%d):i2c transfer error = %d, disable irq\n", rts->id, ret); + ret = -ESHUTDOWN; + } + + mutex_unlock(&rts->lock); + return ret; +} + +static int rts5453h_typec_parse_port_props(struct typec_capability *cap, + struct fwnode_handle *fwnode, + struct device *dev) +{ + const char *buf; + int ret; + + memset(cap, 0, sizeof(*cap)); + ret = fwnode_property_read_string(fwnode, "power-role", &buf); + if (ret) { + dev_err(dev, "power-role not found: %d\n", ret); + return ret; + } + ret = typec_find_port_power_role(buf); + if (ret < 0) + return ret; + cap->type = ret; + ret = fwnode_property_read_string(fwnode, "data-role", &buf); + if (ret) { + dev_err(dev, "data-role not found: %d\n", ret); + return ret; + } + ret = typec_find_port_data_role(buf); + if (ret < 0) + return ret; + cap->data = ret; + ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); + if (ret) { + dev_err(dev, "try-power-role not found: %d\n", ret); + return ret; + } + ret = typec_find_power_role(buf); + if (ret < 0) + return ret; + cap->prefer_role = ret; + cap->fwnode = fwnode; + return 0; +} + +static int rts5453h_typec_get_switch_handles(struct rts5453h *typec, + struct fwnode_handle *fwnode, + struct device *dev) +{ + typec->mux = typec->port->mux; + typec->ori_sw = typec->port->sw; + typec->role_sw = fwnode_usb_role_switch_get(fwnode); + if (IS_ERR(typec->role_sw)) { + dev_err(dev, "USB role switch handle is error.\n"); + return PTR_ERR(typec->role_sw); + } + if (!typec->role_sw || !typec->mux || !typec->ori_sw) + dev_warn(dev, "mux or switch or role switch has not found\n"); + + return 0; +} + +static void rts5453h_unregister_ports(struct rts5453h *typec) +{ + if (typec->port) { + typec->state.alt = NULL; + typec->state.mode = TYPEC_STATE_USB; + typec->state.data = NULL; + usb_role_switch_set_role(typec->role_sw, USB_ROLE_NONE); + typec_switch_set(typec->ori_sw, TYPEC_ORIENTATION_NONE); + typec_mux_set(typec->mux, &typec->state); + usb_role_switch_put(typec->role_sw); + typec_unregister_port(typec->port); + } +} + +static irqreturn_t rts5453h_irq_handle(int irq, void *data) +{ + struct rts5453h *rts = (struct rts5453h *)data; + int ret; + + ret = rts5453h_event_handler(rts, 1); + + if (ret == -ESHUTDOWN) + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static int rts5453h_init_ports(struct rts5453h *typec) +{ + struct device *dev = typec->dev; + struct fwnode_handle *fwnode; + + int ret; + int nports; + + nports = device_get_child_node_count(dev); + if (nports == 0) { + dev_err(dev, "No port entries found.\n"); + return -ENODEV; + } + + if (nports > 1) { + dev_err(dev, "More ports listed than can be supported.\n"); + return -EINVAL; + } + + device_for_each_child_node(dev, fwnode) { + ret = rts5453h_typec_parse_port_props(&typec->caps, fwnode, dev); + if (ret < 0) + goto unregister_ports; + + typec->port = typec_register_port(dev, &typec->caps); + if (IS_ERR(typec->port)) { + dev_err(dev, "Failed to register port\n"); + ret = PTR_ERR(typec->port); + goto unregister_ports; + } + + ret = rts5453h_typec_get_switch_handles(typec, fwnode, dev); + if (ret) { + dev_err(dev, "No switch control for port\n"); + goto unregister_ports; + } + } + + return ret; + +unregister_ports: + rts5453h_unregister_ports(typec); + return ret; +} + +static int rts5453h_probe(struct i2c_client *client) +{ + struct rts5453h *rts; + int ret; + + dev_dbg(&client->dev, "IRQ %d supplied\n", client->irq); + rts = devm_kzalloc(&client->dev, sizeof(*rts), GFP_KERNEL); + if (!rts) + return -ENOMEM; + + ret = device_property_read_u32(&client->dev, "id", &rts->id); + if (ret < 0) + return ret; + + rts5453h_typec_register_port_altmodes(rts); + + mutex_init(&rts->lock); + + rts->dev = &client->dev; + rts->regmap = devm_regmap_init_i2c(client, &rts5453h_regmap_config); + + if (IS_ERR(rts->regmap)) + return PTR_ERR(rts->regmap); + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + rts->i2c_protocol = true; + } else { + dev_err(&client->dev, "i2c check functionality failed."); + return -ENODEV; + } + + i2c_set_clientdata(client, rts); + + ret = rts5453h_init_ports(rts); + if (ret) + return -EPROBE_DEFER; + + ret = rts5453h_event_handler(rts, 0); + if (ret) { + dev_err(&client->dev, "typec event handling error %d", ret); + return ret; + } + /* + * TBD Check do anything about in init schedule like enable interrupt + * or check the firmware running status before request irq + */ + if (client->irq) { + ret = devm_request_threaded_irq(&client->dev, + client->irq, NULL, + rts5453h_irq_handle, + IRQF_TRIGGER_LOW | IRQF_SHARED | IRQF_ONESHOT, + dev_name(&client->dev), rts); + + if (ret < 0) + dev_err(&client->dev, "request irq %d failed %d", client->irq, ret); + } + + device_set_wakeup_capable(&client->dev, true); + + return ret; +} + +static void rts5453h_remove(struct i2c_client *client) +{ + struct rts5453h *rts = i2c_get_clientdata(client); + + if (rts->partner) { + typec_unregister_partner(rts->partner); + rts->partner = NULL; + } + + if (rts->role_sw) + usb_role_switch_put(rts->role_sw); + + if (rts->port) + typec_unregister_port(rts->port); +} + +static const struct of_device_id rts5453h_of_match[] = { + { .compatible = "realtek,rts5453h", }, + {} +}; + +static const struct acpi_device_id rts5453h_acpi_match[] = { + { "CIXH200D" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, rts5453h_acpi_match); + +static const struct i2c_device_id rts5453h_id[] = { + { "rts5453h" }, + { } +}; + +#ifdef CONFIG_PM_SLEEP +static int rts5453h_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (device_may_wakeup(dev) && client->irq) { + disable_irq(client->irq); + enable_irq_wake(client->irq); + + dev_info(&client->dev, "enable wake"); + } + + return 0; +} + +static int rts5453h_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (device_may_wakeup(dev) && client->irq) { + disable_irq_wake(client->irq); + enable_irq(client->irq); + + dev_info(&client->dev, "enable irq"); + } + + return 0; +} +#endif + +static const struct dev_pm_ops rts5453h_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rts5453h_suspend, rts5453h_resume) +}; + +static void rts5453h_shutdown(struct i2c_client *client) +{ + if (device_may_wakeup(&client->dev) && client->irq) { + disable_irq(client->irq); + enable_irq_wake(client->irq); + dev_info(&client->dev, "enable wake"); + } +} + +MODULE_DEVICE_TABLE(i2c, rts5453h_id); +static struct i2c_driver rts5453h_i2c_driver = { + .driver = { + .name = "rts5453h", + .of_match_table = rts5453h_of_match, + .acpi_match_table = rts5453h_acpi_match, + .pm = &rts5453h_pm_ops + }, + .probe = rts5453h_probe, + .remove = rts5453h_remove, + .id_table = rts5453h_id, + .shutdown = rts5453h_shutdown +}; + +module_i2c_driver(rts5453h_i2c_driver); +MODULE_AUTHOR("Chao Zeng "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("REALTEK RTS5453H USB PD Controller Driver");