diff options
Diffstat (limited to 'arch/mips/lantiq/xway/dma.c')
-rw-r--r-- | arch/mips/lantiq/xway/dma.c | 271 |
1 files changed, 271 insertions, 0 deletions
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c new file mode 100644 index 000000000..ab13e2571 --- /dev/null +++ b/arch/mips/lantiq/xway/dma.c | |||
@@ -0,0 +1,271 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (C) 2011 John Crispin <john@phrozen.org> | ||
5 | */ | ||
6 | |||
7 | #include <linux/init.h> | ||
8 | #include <linux/platform_device.h> | ||
9 | #include <linux/io.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/clk.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/err.h> | ||
16 | |||
17 | #include <lantiq_soc.h> | ||
18 | #include <xway_dma.h> | ||
19 | |||
20 | #define LTQ_DMA_ID 0x08 | ||
21 | #define LTQ_DMA_CTRL 0x10 | ||
22 | #define LTQ_DMA_CPOLL 0x14 | ||
23 | #define LTQ_DMA_CS 0x18 | ||
24 | #define LTQ_DMA_CCTRL 0x1C | ||
25 | #define LTQ_DMA_CDBA 0x20 | ||
26 | #define LTQ_DMA_CDLEN 0x24 | ||
27 | #define LTQ_DMA_CIS 0x28 | ||
28 | #define LTQ_DMA_CIE 0x2C | ||
29 | #define LTQ_DMA_PS 0x40 | ||
30 | #define LTQ_DMA_PCTRL 0x44 | ||
31 | #define LTQ_DMA_IRNEN 0xf4 | ||
32 | |||
33 | #define DMA_ID_CHNR GENMASK(26, 20) /* channel number */ | ||
34 | #define DMA_DESCPT BIT(3) /* descriptor complete irq */ | ||
35 | #define DMA_TX BIT(8) /* TX channel direction */ | ||
36 | #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ | ||
37 | #define DMA_PDEN BIT(6) /* enable packet drop */ | ||
38 | #define DMA_CHAN_RST BIT(1) /* channel on / off bit */ | ||
39 | #define DMA_RESET BIT(0) /* channel on / off bit */ | ||
40 | #define DMA_IRQ_ACK 0x7e /* IRQ status register */ | ||
41 | #define DMA_POLL BIT(31) /* turn on channel polling */ | ||
42 | #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ | ||
43 | #define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */ | ||
44 | #define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */ | ||
45 | #define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */ | ||
46 | #define DMA_TX_BURST_SHIFT 4 /* tx burst shift */ | ||
47 | #define DMA_RX_BURST_SHIFT 2 /* rx burst shift */ | ||
48 | #define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ | ||
49 | #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ | ||
50 | |||
51 | #define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) | ||
52 | #define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) | ||
53 | #define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ | ||
54 | ltq_dma_membase + (z)) | ||
55 | |||
56 | static void __iomem *ltq_dma_membase; | ||
57 | static DEFINE_SPINLOCK(ltq_dma_lock); | ||
58 | |||
59 | void | ||
60 | ltq_dma_enable_irq(struct ltq_dma_channel *ch) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | |||
64 | spin_lock_irqsave(<q_dma_lock, flags); | ||
65 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
66 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
67 | spin_unlock_irqrestore(<q_dma_lock, flags); | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); | ||
70 | |||
71 | void | ||
72 | ltq_dma_disable_irq(struct ltq_dma_channel *ch) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | |||
76 | spin_lock_irqsave(<q_dma_lock, flags); | ||
77 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
78 | ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); | ||
79 | spin_unlock_irqrestore(<q_dma_lock, flags); | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); | ||
82 | |||
83 | void | ||
84 | ltq_dma_ack_irq(struct ltq_dma_channel *ch) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | |||
88 | spin_lock_irqsave(<q_dma_lock, flags); | ||
89 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
90 | ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); | ||
91 | spin_unlock_irqrestore(<q_dma_lock, flags); | ||
92 | } | ||
93 | EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); | ||
94 | |||
95 | void | ||
96 | ltq_dma_open(struct ltq_dma_channel *ch) | ||
97 | { | ||
98 | unsigned long flag; | ||
99 | |||
100 | spin_lock_irqsave(<q_dma_lock, flag); | ||
101 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
102 | ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); | ||
103 | spin_unlock_irqrestore(<q_dma_lock, flag); | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(ltq_dma_open); | ||
106 | |||
107 | void | ||
108 | ltq_dma_close(struct ltq_dma_channel *ch) | ||
109 | { | ||
110 | unsigned long flag; | ||
111 | |||
112 | spin_lock_irqsave(<q_dma_lock, flag); | ||
113 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
114 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
115 | ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); | ||
116 | spin_unlock_irqrestore(<q_dma_lock, flag); | ||
117 | } | ||
118 | EXPORT_SYMBOL_GPL(ltq_dma_close); | ||
119 | |||
120 | static void | ||
121 | ltq_dma_alloc(struct ltq_dma_channel *ch) | ||
122 | { | ||
123 | unsigned long flags; | ||
124 | |||
125 | ch->desc = 0; | ||
126 | ch->desc_base = dma_alloc_coherent(ch->dev, | ||
127 | LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||
128 | &ch->phys, GFP_ATOMIC); | ||
129 | |||
130 | spin_lock_irqsave(<q_dma_lock, flags); | ||
131 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
132 | ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); | ||
133 | ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); | ||
134 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
135 | wmb(); | ||
136 | ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); | ||
137 | while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) | ||
138 | ; | ||
139 | spin_unlock_irqrestore(<q_dma_lock, flags); | ||
140 | } | ||
141 | |||
142 | void | ||
143 | ltq_dma_alloc_tx(struct ltq_dma_channel *ch) | ||
144 | { | ||
145 | unsigned long flags; | ||
146 | |||
147 | ltq_dma_alloc(ch); | ||
148 | |||
149 | spin_lock_irqsave(<q_dma_lock, flags); | ||
150 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | ||
151 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
152 | ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); | ||
153 | spin_unlock_irqrestore(<q_dma_lock, flags); | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); | ||
156 | |||
157 | void | ||
158 | ltq_dma_alloc_rx(struct ltq_dma_channel *ch) | ||
159 | { | ||
160 | unsigned long flags; | ||
161 | |||
162 | ltq_dma_alloc(ch); | ||
163 | |||
164 | spin_lock_irqsave(<q_dma_lock, flags); | ||
165 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | ||
166 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
167 | ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); | ||
168 | spin_unlock_irqrestore(<q_dma_lock, flags); | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); | ||
171 | |||
172 | void | ||
173 | ltq_dma_free(struct ltq_dma_channel *ch) | ||
174 | { | ||
175 | if (!ch->desc_base) | ||
176 | return; | ||
177 | ltq_dma_close(ch); | ||
178 | dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||
179 | ch->desc_base, ch->phys); | ||
180 | } | ||
181 | EXPORT_SYMBOL_GPL(ltq_dma_free); | ||
182 | |||
183 | void | ||
184 | ltq_dma_init_port(int p) | ||
185 | { | ||
186 | ltq_dma_w32(p, LTQ_DMA_PS); | ||
187 | switch (p) { | ||
188 | case DMA_PORT_ETOP: | ||
189 | /* | ||
190 | * Tell the DMA engine to swap the endianness of data frames and | ||
191 | * drop packets if the channel arbitration fails. | ||
192 | */ | ||
193 | ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN, | ||
194 | LTQ_DMA_PCTRL); | ||
195 | break; | ||
196 | |||
197 | case DMA_PORT_DEU: | ||
198 | ltq_dma_w32((DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT) | | ||
199 | (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT), | ||
200 | LTQ_DMA_PCTRL); | ||
201 | break; | ||
202 | |||
203 | default: | ||
204 | break; | ||
205 | } | ||
206 | } | ||
207 | EXPORT_SYMBOL_GPL(ltq_dma_init_port); | ||
208 | |||
209 | static int | ||
210 | ltq_dma_init(struct platform_device *pdev) | ||
211 | { | ||
212 | struct clk *clk; | ||
213 | struct resource *res; | ||
214 | unsigned int id, nchannels; | ||
215 | int i; | ||
216 | |||
217 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
218 | ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res); | ||
219 | if (IS_ERR(ltq_dma_membase)) | ||
220 | panic("Failed to remap dma resource"); | ||
221 | |||
222 | /* power up and reset the dma engine */ | ||
223 | clk = clk_get(&pdev->dev, NULL); | ||
224 | if (IS_ERR(clk)) | ||
225 | panic("Failed to get dma clock"); | ||
226 | |||
227 | clk_enable(clk); | ||
228 | ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); | ||
229 | |||
230 | usleep_range(1, 10); | ||
231 | |||
232 | /* disable all interrupts */ | ||
233 | ltq_dma_w32(0, LTQ_DMA_IRNEN); | ||
234 | |||
235 | /* reset/configure each channel */ | ||
236 | id = ltq_dma_r32(LTQ_DMA_ID); | ||
237 | nchannels = ((id & DMA_ID_CHNR) >> 20); | ||
238 | for (i = 0; i < nchannels; i++) { | ||
239 | ltq_dma_w32(i, LTQ_DMA_CS); | ||
240 | ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); | ||
241 | ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); | ||
242 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
243 | } | ||
244 | |||
245 | dev_info(&pdev->dev, | ||
246 | "Init done - hw rev: %X, ports: %d, channels: %d\n", | ||
247 | id & 0x1f, (id >> 16) & 0xf, nchannels); | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static const struct of_device_id dma_match[] = { | ||
253 | { .compatible = "lantiq,dma-xway" }, | ||
254 | {}, | ||
255 | }; | ||
256 | |||
257 | static struct platform_driver dma_driver = { | ||
258 | .probe = ltq_dma_init, | ||
259 | .driver = { | ||
260 | .name = "dma-xway", | ||
261 | .of_match_table = dma_match, | ||
262 | }, | ||
263 | }; | ||
264 | |||
265 | int __init | ||
266 | dma_init(void) | ||
267 | { | ||
268 | return platform_driver_register(&dma_driver); | ||
269 | } | ||
270 | |||
271 | postcore_initcall(dma_init); | ||