Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pci-sh7780.c
Go to the documentation of this file.
1 /*
2  * Low-Level PCI Support for the SH7780
3  *
4  * Copyright (C) 2005 - 2010 Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/irq.h>
17 #include <linux/errno.h>
18 #include <linux/delay.h>
19 #include <linux/log2.h>
20 #include "pci-sh4.h"
21 #include <asm/mmu.h>
22 #include <asm/sizes.h>
23 
24 #if defined(CONFIG_CPU_BIG_ENDIAN)
25 # define PCICR_ENDIANNESS SH4_PCICR_BSWP
26 #else
27 # define PCICR_ENDIANNESS 0
28 #endif
29 
30 
31 static struct resource sh7785_pci_resources[] = {
32  {
33  .name = "PCI IO",
34  .start = 0x1000,
35  .end = SZ_4M - 1,
36  .flags = IORESOURCE_IO,
37  }, {
38  .name = "PCI MEM 0",
39  .start = 0xfd000000,
40  .end = 0xfd000000 + SZ_16M - 1,
41  .flags = IORESOURCE_MEM,
42  }, {
43  .name = "PCI MEM 1",
44  .start = 0x10000000,
45  .end = 0x10000000 + SZ_64M - 1,
46  .flags = IORESOURCE_MEM,
47  }, {
48  /*
49  * 32-bit only resources must be last.
50  */
51  .name = "PCI MEM 2",
52  .start = 0xc0000000,
53  .end = 0xc0000000 + SZ_512M - 1,
55  },
56 };
57 
58 static struct pci_channel sh7780_pci_controller = {
59  .pci_ops = &sh4_pci_ops,
60  .resources = sh7785_pci_resources,
61  .nr_resources = ARRAY_SIZE(sh7785_pci_resources),
62  .io_offset = 0,
63  .mem_offset = 0,
64  .io_map_base = 0xfe200000,
65  .serr_irq = evt2irq(0xa00),
66  .err_irq = evt2irq(0xaa0),
67 };
68 
69 struct pci_errors {
70  unsigned int mask;
71  const char *str;
72 } pci_arbiter_errors[] = {
73  { SH4_PCIAINT_MBKN, "master broken" },
74  { SH4_PCIAINT_TBTO, "target bus time out" },
75  { SH4_PCIAINT_MBTO, "master bus time out" },
76  { SH4_PCIAINT_TABT, "target abort" },
77  { SH4_PCIAINT_MABT, "master abort" },
78  { SH4_PCIAINT_RDPE, "read data parity error" },
79  { SH4_PCIAINT_WDPE, "write data parity error" },
80 }, pci_interrupt_errors[] = {
81  { SH4_PCIINT_MLCK, "master lock error" },
82  { SH4_PCIINT_TABT, "target-target abort" },
83  { SH4_PCIINT_TRET, "target retry time out" },
84  { SH4_PCIINT_MFDE, "master function disable error" },
85  { SH4_PCIINT_PRTY, "address parity error" },
86  { SH4_PCIINT_SERR, "SERR" },
87  { SH4_PCIINT_TWDP, "data parity error for target write" },
88  { SH4_PCIINT_TRDP, "PERR detected for target read" },
89  { SH4_PCIINT_MTABT, "target abort for master" },
90  { SH4_PCIINT_MMABT, "master abort for master" },
91  { SH4_PCIINT_MWPD, "master write data parity error" },
92  { SH4_PCIINT_MRPD, "master read data parity error" },
93 };
94 
95 static irqreturn_t sh7780_pci_err_irq(int irq, void *dev_id)
96 {
97  struct pci_channel *hose = dev_id;
98  unsigned long addr;
99  unsigned int status;
100  unsigned int cmd;
101  int i;
102 
103  addr = __raw_readl(hose->reg_base + SH4_PCIALR);
104 
105  /*
106  * Handle status errors.
107  */
108  status = __raw_readw(hose->reg_base + PCI_STATUS);
109  if (status & (PCI_STATUS_PARITY |
114  cmd = pcibios_handle_status_errors(addr, status, hose);
115  if (likely(cmd))
116  __raw_writew(cmd, hose->reg_base + PCI_STATUS);
117  }
118 
119  /*
120  * Handle arbiter errors.
121  */
122  status = __raw_readl(hose->reg_base + SH4_PCIAINT);
123  for (i = cmd = 0; i < ARRAY_SIZE(pci_arbiter_errors); i++) {
124  if (status & pci_arbiter_errors[i].mask) {
125  printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
126  pci_arbiter_errors[i].str, addr);
127  cmd |= pci_arbiter_errors[i].mask;
128  }
129  }
130  __raw_writel(cmd, hose->reg_base + SH4_PCIAINT);
131 
132  /*
133  * Handle the remaining PCI errors.
134  */
135  status = __raw_readl(hose->reg_base + SH4_PCIINT);
136  for (i = cmd = 0; i < ARRAY_SIZE(pci_interrupt_errors); i++) {
137  if (status & pci_interrupt_errors[i].mask) {
138  printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
139  pci_interrupt_errors[i].str, addr);
140  cmd |= pci_interrupt_errors[i].mask;
141  }
142  }
143  __raw_writel(cmd, hose->reg_base + SH4_PCIINT);
144 
145  return IRQ_HANDLED;
146 }
147 
148 static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id)
149 {
150  struct pci_channel *hose = dev_id;
151 
152  printk(KERN_DEBUG "PCI: system error received: ");
154  printk("\n");
155 
156  /* Deassert SERR */
157  __raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM);
158 
159  /* Back off the IRQ for awhile */
160  disable_irq_nosync(irq);
161  hose->serr_timer.expires = jiffies + HZ;
162  add_timer(&hose->serr_timer);
163 
164  return IRQ_HANDLED;
165 }
166 
167 static int __init sh7780_pci_setup_irqs(struct pci_channel *hose)
168 {
169  int ret;
170 
171  /* Clear out PCI arbiter IRQs */
172  __raw_writel(0, hose->reg_base + SH4_PCIAINT);
173 
174  /* Clear all error conditions */
180  PCI_STATUS_PARITY, hose->reg_base + PCI_STATUS);
181 
182  ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0,
183  "PCI SERR interrupt", hose);
184  if (unlikely(ret)) {
185  printk(KERN_ERR "PCI: Failed hooking SERR IRQ\n");
186  return ret;
187  }
188 
189  /*
190  * The PCI ERR IRQ needs to be IRQF_SHARED since all of the power
191  * down IRQ vectors are routed through the ERR IRQ vector. We
192  * only request_irq() once as there is only a single masking
193  * source for multiple events.
194  */
195  ret = request_irq(hose->err_irq, sh7780_pci_err_irq, IRQF_SHARED,
196  "PCI ERR interrupt", hose);
197  if (unlikely(ret)) {
198  free_irq(hose->serr_irq, hose);
199  return ret;
200  }
201 
202  /* Unmask all of the arbiter IRQs. */
205  SH4_PCIAINT_WDPE, hose->reg_base + SH4_PCIAINTM);
206 
207  /* Unmask all of the PCI IRQs */
213  SH4_PCIINTM_MRDPEIM, hose->reg_base + SH4_PCIINTM);
214 
215  return ret;
216 }
217 
218 static inline void __init sh7780_pci_teardown_irqs(struct pci_channel *hose)
219 {
220  free_irq(hose->err_irq, hose);
221  free_irq(hose->serr_irq, hose);
222 }
223 
224 static void __init sh7780_pci66_init(struct pci_channel *hose)
225 {
226  unsigned int tmp;
227 
228  if (!pci_is_66mhz_capable(hose, 0, 0))
229  return;
230 
231  /* Enable register access */
232  tmp = __raw_readl(hose->reg_base + SH4_PCICR);
233  tmp |= SH4_PCICR_PREFIX;
234  __raw_writel(tmp, hose->reg_base + SH4_PCICR);
235 
236  /* Enable 66MHz operation */
237  tmp = __raw_readw(hose->reg_base + PCI_STATUS);
238  tmp |= PCI_STATUS_66MHZ;
239  __raw_writew(tmp, hose->reg_base + PCI_STATUS);
240 
241  /* Done */
242  tmp = __raw_readl(hose->reg_base + SH4_PCICR);
244  __raw_writel(tmp, hose->reg_base + SH4_PCICR);
245 }
246 
247 static int __init sh7780_pci_init(void)
248 {
249  struct pci_channel *chan = &sh7780_pci_controller;
250  phys_addr_t memphys;
251  size_t memsize;
252  unsigned int id;
253  const char *type;
254  int ret, i;
255 
256  printk(KERN_NOTICE "PCI: Starting initialization.\n");
257 
258  chan->reg_base = 0xfe040000;
259 
260  /* Enable CPU access to the PCIC registers. */
262 
263  /* Reset */
265  chan->reg_base + SH4_PCICR);
266 
267  /*
268  * Wait for it to come back up. The spec says to allow for up to
269  * 1 second after toggling the reset pin, but in practice 100ms
270  * is more than enough.
271  */
272  mdelay(100);
273 
274  id = __raw_readw(chan->reg_base + PCI_VENDOR_ID);
275  if (id != PCI_VENDOR_ID_RENESAS) {
276  printk(KERN_ERR "PCI: Unknown vendor ID 0x%04x.\n", id);
277  return -ENODEV;
278  }
279 
280  id = __raw_readw(chan->reg_base + PCI_DEVICE_ID);
281  type = (id == PCI_DEVICE_ID_RENESAS_SH7763) ? "SH7763" :
282  (id == PCI_DEVICE_ID_RENESAS_SH7780) ? "SH7780" :
283  (id == PCI_DEVICE_ID_RENESAS_SH7781) ? "SH7781" :
284  (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" :
285  NULL;
286  if (unlikely(!type)) {
287  printk(KERN_ERR "PCI: Found an unsupported Renesas host "
288  "controller, device id 0x%04x.\n", id);
289  return -EINVAL;
290  }
291 
292  printk(KERN_NOTICE "PCI: Found a Renesas %s host "
293  "controller, revision %d.\n", type,
294  __raw_readb(chan->reg_base + PCI_REVISION_ID));
295 
296  /*
297  * Now throw it in to register initialization mode and
298  * start the real work.
299  */
301  chan->reg_base + SH4_PCICR);
302 
303  memphys = __pa(memory_start);
305 
306  /*
307  * If there's more than 512MB of memory, we need to roll over to
308  * LAR1/LSR1.
309  */
310  if (memsize > SZ_512M) {
311  __raw_writel(memphys + SZ_512M, chan->reg_base + SH4_PCILAR1);
312  __raw_writel((((memsize - SZ_512M) - SZ_1M) & 0x1ff00000) | 1,
313  chan->reg_base + SH4_PCILSR1);
314  memsize = SZ_512M;
315  } else {
316  /*
317  * Otherwise just zero it out and disable it.
318  */
319  __raw_writel(0, chan->reg_base + SH4_PCILAR1);
320  __raw_writel(0, chan->reg_base + SH4_PCILSR1);
321  }
322 
323  /*
324  * LAR0/LSR0 covers up to the first 512MB, which is enough to
325  * cover all of lowmem on most platforms.
326  */
327  __raw_writel(memphys, chan->reg_base + SH4_PCILAR0);
328  __raw_writel(((memsize - SZ_1M) & 0x1ff00000) | 1,
329  chan->reg_base + SH4_PCILSR0);
330 
331  /*
332  * Hook up the ERR and SERR IRQs.
333  */
334  ret = sh7780_pci_setup_irqs(chan);
335  if (unlikely(ret))
336  return ret;
337 
338  /*
339  * Disable the cache snoop controller for non-coherent DMA.
340  */
341  __raw_writel(0, chan->reg_base + SH7780_PCICSCR0);
342  __raw_writel(0, chan->reg_base + SH7780_PCICSAR0);
343  __raw_writel(0, chan->reg_base + SH7780_PCICSCR1);
344  __raw_writel(0, chan->reg_base + SH7780_PCICSAR1);
345 
346  /*
347  * Setup the memory BARs
348  */
349  for (i = 1; i < chan->nr_resources; i++) {
350  struct resource *res = chan->resources + i;
352 
353  if (unlikely(res->flags & IORESOURCE_IO))
354  continue;
355 
356  /*
357  * Make sure we're in the right physical addressing mode
358  * for dealing with the resource.
359  */
360  if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) {
361  chan->nr_resources--;
362  continue;
363  }
364 
365  size = resource_size(res);
366 
367  /*
368  * The MBMR mask is calculated in units of 256kB, which
369  * keeps things pretty simple.
370  */
371  __raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
372  chan->reg_base + SH7780_PCIMBMR(i - 1));
373  __raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1));
374  }
375 
376  /*
377  * And I/O.
378  */
379  __raw_writel(0, chan->reg_base + PCI_BASE_ADDRESS_0);
380  __raw_writel(0, chan->reg_base + SH7780_PCIIOBR);
381  __raw_writel(0, chan->reg_base + SH7780_PCIIOBMR);
382 
385  PCI_COMMAND_MEMORY, chan->reg_base + PCI_COMMAND);
386 
387  /*
388  * Initialization mode complete, release the control register and
389  * enable round robin mode to stop device overruns/starvation.
390  */
393  chan->reg_base + SH4_PCICR);
394 
395  ret = register_pci_controller(chan);
396  if (unlikely(ret))
397  goto err;
398 
399  sh7780_pci66_init(chan);
400 
401  printk(KERN_NOTICE "PCI: Running at %dMHz.\n",
402  (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) ?
403  66 : 33);
404 
405  return 0;
406 
407 err:
408  sh7780_pci_teardown_irqs(chan);
409  return ret;
410 }
411 arch_initcall(sh7780_pci_init);