|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (c) 2023 Cai Huoqing |
| 4 | + * Synopsys DesignWare HDMA v0 core |
| 5 | + */ |
| 6 | + |
| 7 | +#include <linux/bitfield.h> |
| 8 | +#include <linux/irqreturn.h> |
| 9 | +#include <linux/io-64-nonatomic-lo-hi.h> |
| 10 | + |
| 11 | +#include "dw-edma-core.h" |
| 12 | +#include "dw-hdma-v0-core.h" |
| 13 | +#include "dw-hdma-v0-regs.h" |
| 14 | + |
| 15 | +enum dw_hdma_control { |
| 16 | + DW_HDMA_V0_CB = BIT(0), |
| 17 | + DW_HDMA_V0_TCB = BIT(1), |
| 18 | + DW_HDMA_V0_LLP = BIT(2), |
| 19 | + DW_HDMA_V0_LIE = BIT(3), |
| 20 | + DW_HDMA_V0_RIE = BIT(4), |
| 21 | + DW_HDMA_V0_CCS = BIT(8), |
| 22 | + DW_HDMA_V0_LLE = BIT(9), |
| 23 | +}; |
| 24 | + |
| 25 | +static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) |
| 26 | +{ |
| 27 | + return dw->chip->reg_base; |
| 28 | +} |
| 29 | + |
| 30 | +static inline struct dw_hdma_v0_ch_regs __iomem * |
| 31 | +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) |
| 32 | +{ |
| 33 | + if (dir == EDMA_DIR_WRITE) |
| 34 | + return &(__dw_regs(dw)->ch[ch].wr); |
| 35 | + else |
| 36 | + return &(__dw_regs(dw)->ch[ch].rd); |
| 37 | +} |
| 38 | + |
| 39 | +#define SET_CH_32(dw, dir, ch, name, value) \ |
| 40 | + writel(value, &(__dw_ch_regs(dw, dir, ch)->name)) |
| 41 | + |
| 42 | +#define GET_CH_32(dw, dir, ch, name) \ |
| 43 | + readl(&(__dw_ch_regs(dw, dir, ch)->name)) |
| 44 | + |
| 45 | +#define SET_BOTH_CH_32(dw, ch, name, value) \ |
| 46 | + do { \ |
| 47 | + writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name)); \ |
| 48 | + writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name)); \ |
| 49 | + } while (0) |
| 50 | + |
| 51 | +/* HDMA management callbacks */ |
| 52 | +static void dw_hdma_v0_core_off(struct dw_edma *dw) |
| 53 | +{ |
| 54 | + int id; |
| 55 | + |
| 56 | + for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { |
| 57 | + SET_BOTH_CH_32(dw, id, int_setup, |
| 58 | + HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); |
| 59 | + SET_BOTH_CH_32(dw, id, int_clear, |
| 60 | + HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); |
| 61 | + SET_BOTH_CH_32(dw, id, ch_en, 0); |
| 62 | + } |
| 63 | +} |
| 64 | + |
| 65 | +static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) |
| 66 | +{ |
| 67 | + u32 num_ch = 0; |
| 68 | + int id; |
| 69 | + |
| 70 | + for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { |
| 71 | + if (GET_CH_32(dw, id, dir, ch_en) & BIT(0)) |
| 72 | + num_ch++; |
| 73 | + } |
| 74 | + |
| 75 | + if (num_ch > HDMA_V0_MAX_NR_CH) |
| 76 | + num_ch = HDMA_V0_MAX_NR_CH; |
| 77 | + |
| 78 | + return (u16)num_ch; |
| 79 | +} |
| 80 | + |
| 81 | +static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan) |
| 82 | +{ |
| 83 | + struct dw_edma *dw = chan->dw; |
| 84 | + u32 tmp; |
| 85 | + |
| 86 | + tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK, |
| 87 | + GET_CH_32(dw, chan->id, chan->dir, ch_stat)); |
| 88 | + |
| 89 | + if (tmp == 1) |
| 90 | + return DMA_IN_PROGRESS; |
| 91 | + else if (tmp == 3) |
| 92 | + return DMA_COMPLETE; |
| 93 | + else |
| 94 | + return DMA_ERROR; |
| 95 | +} |
| 96 | + |
| 97 | +static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan) |
| 98 | +{ |
| 99 | + struct dw_edma *dw = chan->dw; |
| 100 | + |
| 101 | + SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK); |
| 102 | +} |
| 103 | + |
| 104 | +static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan) |
| 105 | +{ |
| 106 | + struct dw_edma *dw = chan->dw; |
| 107 | + |
| 108 | + SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK); |
| 109 | +} |
| 110 | + |
| 111 | +static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan) |
| 112 | +{ |
| 113 | + struct dw_edma *dw = chan->dw; |
| 114 | + |
| 115 | + return GET_CH_32(dw, chan->dir, chan->id, int_stat); |
| 116 | +} |
| 117 | + |
| 118 | +static irqreturn_t |
| 119 | +dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir, |
| 120 | + dw_edma_handler_t done, dw_edma_handler_t abort) |
| 121 | +{ |
| 122 | + struct dw_edma *dw = dw_irq->dw; |
| 123 | + unsigned long total, pos, val; |
| 124 | + irqreturn_t ret = IRQ_NONE; |
| 125 | + struct dw_edma_chan *chan; |
| 126 | + unsigned long off, mask; |
| 127 | + |
| 128 | + if (dir == EDMA_DIR_WRITE) { |
| 129 | + total = dw->wr_ch_cnt; |
| 130 | + off = 0; |
| 131 | + mask = dw_irq->wr_mask; |
| 132 | + } else { |
| 133 | + total = dw->rd_ch_cnt; |
| 134 | + off = dw->wr_ch_cnt; |
| 135 | + mask = dw_irq->rd_mask; |
| 136 | + } |
| 137 | + |
| 138 | + for_each_set_bit(pos, &mask, total) { |
| 139 | + chan = &dw->chan[pos + off]; |
| 140 | + |
| 141 | + val = dw_hdma_v0_core_status_int(chan); |
| 142 | + if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) { |
| 143 | + dw_hdma_v0_core_clear_done_int(chan); |
| 144 | + done(chan); |
| 145 | + |
| 146 | + ret = IRQ_HANDLED; |
| 147 | + } |
| 148 | + |
| 149 | + if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) { |
| 150 | + dw_hdma_v0_core_clear_abort_int(chan); |
| 151 | + abort(chan); |
| 152 | + |
| 153 | + ret = IRQ_HANDLED; |
| 154 | + } |
| 155 | + } |
| 156 | + |
| 157 | + return ret; |
| 158 | +} |
| 159 | + |
| 160 | +static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, |
| 161 | + u32 control, u32 size, u64 sar, u64 dar) |
| 162 | +{ |
| 163 | + ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli); |
| 164 | + |
| 165 | + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { |
| 166 | + struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; |
| 167 | + |
| 168 | + lli->control = control; |
| 169 | + lli->transfer_size = size; |
| 170 | + lli->sar.reg = sar; |
| 171 | + lli->dar.reg = dar; |
| 172 | + } else { |
| 173 | + struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; |
| 174 | + |
| 175 | + writel(control, &lli->control); |
| 176 | + writel(size, &lli->transfer_size); |
| 177 | + writeq(sar, &lli->sar.reg); |
| 178 | + writeq(dar, &lli->dar.reg); |
| 179 | + } |
| 180 | +} |
| 181 | + |
| 182 | +static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, |
| 183 | + int i, u32 control, u64 pointer) |
| 184 | +{ |
| 185 | + ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli); |
| 186 | + |
| 187 | + if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { |
| 188 | + struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; |
| 189 | + |
| 190 | + llp->control = control; |
| 191 | + llp->llp.reg = pointer; |
| 192 | + } else { |
| 193 | + struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; |
| 194 | + |
| 195 | + writel(control, &llp->control); |
| 196 | + writeq(pointer, &llp->llp.reg); |
| 197 | + } |
| 198 | +} |
| 199 | + |
| 200 | +static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) |
| 201 | +{ |
| 202 | + struct dw_edma_burst *child; |
| 203 | + struct dw_edma_chan *chan = chunk->chan; |
| 204 | + u32 control = 0, i = 0; |
| 205 | + int j; |
| 206 | + |
| 207 | + if (chunk->cb) |
| 208 | + control = DW_HDMA_V0_CB; |
| 209 | + |
| 210 | + j = chunk->bursts_alloc; |
| 211 | + list_for_each_entry(child, &chunk->burst->list, list) { |
| 212 | + j--; |
| 213 | + if (!j) { |
| 214 | + control |= DW_HDMA_V0_LIE; |
| 215 | + if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) |
| 216 | + control |= DW_HDMA_V0_RIE; |
| 217 | + } |
| 218 | + |
| 219 | + dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz, |
| 220 | + child->sar, child->dar); |
| 221 | + } |
| 222 | + |
| 223 | + control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB; |
| 224 | + if (!chunk->cb) |
| 225 | + control |= DW_HDMA_V0_CB; |
| 226 | + |
| 227 | + dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); |
| 228 | +} |
| 229 | + |
| 230 | +static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) |
| 231 | +{ |
| 232 | + struct dw_edma_chan *chan = chunk->chan; |
| 233 | + struct dw_edma *dw = chan->dw; |
| 234 | + u32 tmp; |
| 235 | + |
| 236 | + dw_hdma_v0_core_write_chunk(chunk); |
| 237 | + |
| 238 | + if (first) { |
| 239 | + /* Enable engine */ |
| 240 | + SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0)); |
| 241 | + /* Interrupt enable&unmask - done, abort */ |
| 242 | + tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) | |
| 243 | + HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK | |
| 244 | + HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN; |
| 245 | + SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp); |
| 246 | + /* Channel control */ |
| 247 | + SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN); |
| 248 | + /* Linked list */ |
| 249 | + /* llp is not aligned on 64bit -> keep 32bit accesses */ |
| 250 | + SET_CH_32(dw, chan->dir, chan->id, llp.lsb, |
| 251 | + lower_32_bits(chunk->ll_region.paddr)); |
| 252 | + SET_CH_32(dw, chan->dir, chan->id, llp.msb, |
| 253 | + upper_32_bits(chunk->ll_region.paddr)); |
| 254 | + } |
| 255 | + /* Set consumer cycle */ |
| 256 | + SET_CH_32(dw, chan->dir, chan->id, cycle_sync, |
| 257 | + HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); |
| 258 | + /* Doorbell */ |
| 259 | + SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START); |
| 260 | +} |
| 261 | + |
| 262 | +static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan) |
| 263 | +{ |
| 264 | + struct dw_edma *dw = chan->dw; |
| 265 | + |
| 266 | + /* MSI done addr - low, high */ |
| 267 | + SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo); |
| 268 | + SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi); |
| 269 | + /* MSI abort addr - low, high */ |
| 270 | + SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo); |
| 271 | + SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi); |
| 272 | + /* config MSI data */ |
| 273 | + SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data); |
| 274 | +} |
| 275 | + |
| 276 | +/* HDMA debugfs callbacks */ |
| 277 | +static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw) |
| 278 | +{ |
| 279 | +} |
| 280 | + |
| 281 | +static const struct dw_edma_core_ops dw_hdma_v0_core = { |
| 282 | + .off = dw_hdma_v0_core_off, |
| 283 | + .ch_count = dw_hdma_v0_core_ch_count, |
| 284 | + .ch_status = dw_hdma_v0_core_ch_status, |
| 285 | + .handle_int = dw_hdma_v0_core_handle_int, |
| 286 | + .start = dw_hdma_v0_core_start, |
| 287 | + .ch_config = dw_hdma_v0_core_ch_config, |
| 288 | + .debugfs_on = dw_hdma_v0_core_debugfs_on, |
| 289 | +}; |
| 290 | + |
| 291 | +void dw_hdma_v0_core_register(struct dw_edma *dw) |
| 292 | +{ |
| 293 | + dw->core = &dw_hdma_v0_core; |
| 294 | +} |
0 commit comments