2007-04-27 12:08:21 +08:00
|
|
|
/* pbm.h: UltraSparc PCI controller software state.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2007-04-27 12:08:21 +08:00
|
|
|
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __SPARC64_PBM_H
|
|
|
|
#define __SPARC64_PBM_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/spinlock.h>
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
#include <linux/msi.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/oplib.h>
|
2006-06-22 09:18:47 +08:00
|
|
|
#include <asm/prom.h>
|
2006-06-30 06:07:37 +08:00
|
|
|
#include <asm/of_device.h>
|
2005-06-01 07:57:59 +08:00
|
|
|
#include <asm/iommu.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* The abstraction used here is that there are PCI controllers,
|
|
|
|
* each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules
|
|
|
|
* underneath. Each PCI bus module uses an IOMMU (shared by both
|
|
|
|
* PBMs of a controller, or per-PBM), and if a streaming buffer
|
|
|
|
* is present, each PCI bus module has it's own. (ie. the IOMMU
|
|
|
|
* might be shared between PBMs, the STC is never shared)
|
|
|
|
* Furthermore, each PCI bus module controls it's own autonomous
|
|
|
|
* PCI bus.
|
|
|
|
*/
|
|
|
|
|
2007-04-27 12:08:21 +08:00
|
|
|
extern void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define PCI_STC_FLUSHFLAG_INIT(STC) \
|
|
|
|
(*((STC)->strbuf_flushflag) = 0UL)
|
|
|
|
#define PCI_STC_FLUSHFLAG_SET(STC) \
|
|
|
|
(*((STC)->strbuf_flushflag) != 0UL)
|
|
|
|
|
|
|
|
/* There can be quite a few ranges and interrupt maps on a PCI
|
|
|
|
* segment. Thus...
|
|
|
|
*/
|
|
|
|
#define PROM_PCIRNG_MAX 64
|
|
|
|
#define PROM_PCIIMAP_MAX 64
|
|
|
|
|
2007-04-27 12:08:21 +08:00
|
|
|
struct pci_controller_info;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pci_pbm_info {
|
|
|
|
/* PCI controller we sit under. */
|
|
|
|
struct pci_controller_info *parent;
|
|
|
|
|
|
|
|
/* Physical address base of controller registers. */
|
|
|
|
unsigned long controller_regs;
|
|
|
|
|
|
|
|
/* Physical address base of PBM registers. */
|
|
|
|
unsigned long pbm_regs;
|
|
|
|
|
2005-07-05 04:26:04 +08:00
|
|
|
/* Physical address of DMA sync register, if any. */
|
|
|
|
unsigned long sync_reg;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Opaque 32-bit system bus Port ID. */
|
|
|
|
u32 portid;
|
|
|
|
|
2006-02-10 14:05:54 +08:00
|
|
|
/* Opaque 32-bit handle used for hypervisor calls. */
|
|
|
|
u32 devhandle;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Chipset version information. */
|
|
|
|
int chip_type;
|
|
|
|
#define PBM_CHIP_TYPE_SABRE 1
|
|
|
|
#define PBM_CHIP_TYPE_PSYCHO 2
|
|
|
|
#define PBM_CHIP_TYPE_SCHIZO 3
|
|
|
|
#define PBM_CHIP_TYPE_SCHIZO_PLUS 4
|
|
|
|
#define PBM_CHIP_TYPE_TOMATILLO 5
|
|
|
|
int chip_version;
|
|
|
|
int chip_revision;
|
|
|
|
|
|
|
|
/* Name used for top-level resources. */
|
2006-06-22 09:18:47 +08:00
|
|
|
char *name;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* OBP specific information. */
|
2006-06-22 09:18:47 +08:00
|
|
|
struct device_node *prom_node;
|
2005-04-17 06:20:36 +08:00
|
|
|
u64 ino_bitmap;
|
|
|
|
|
|
|
|
/* PBM I/O and Memory space resources. */
|
|
|
|
struct resource io_space;
|
|
|
|
struct resource mem_space;
|
|
|
|
|
|
|
|
/* Base of PCI Config space, can be per-PBM or shared. */
|
|
|
|
unsigned long config_space;
|
|
|
|
|
|
|
|
/* State of 66MHz capabilities on this PBM. */
|
|
|
|
int is_66mhz_capable;
|
|
|
|
int all_devs_66mhz;
|
|
|
|
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
/* MSI info. */
|
|
|
|
u32 msiq_num;
|
|
|
|
u32 msiq_ent_count;
|
|
|
|
u32 msiq_first;
|
|
|
|
u32 msiq_first_devino;
|
|
|
|
u32 msi_num;
|
|
|
|
u32 msi_first;
|
|
|
|
u32 msi_data_mask;
|
|
|
|
u32 msix_data_width;
|
|
|
|
u64 msi32_start;
|
|
|
|
u64 msi64_start;
|
|
|
|
u32 msi32_len;
|
|
|
|
u32 msi64_len;
|
|
|
|
void *msi_queues;
|
|
|
|
unsigned long *msi_bitmap;
|
|
|
|
#endif /* !(CONFIG_PCI_MSI) */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* This PBM's streaming buffer. */
|
2007-04-27 12:08:21 +08:00
|
|
|
struct strbuf stc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* IOMMU state, potentially shared by both PBM segments. */
|
2007-04-27 12:08:21 +08:00
|
|
|
struct iommu *iommu;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Now things for the actual PCI bus probes. */
|
|
|
|
unsigned int pci_first_busno;
|
|
|
|
unsigned int pci_last_busno;
|
|
|
|
struct pci_bus *pci_bus;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pci_controller_info {
|
|
|
|
/* List of all PCI controllers. */
|
|
|
|
struct pci_controller_info *next;
|
|
|
|
|
|
|
|
/* Each controller gets a unique index, used mostly for
|
|
|
|
* error logging purposes.
|
|
|
|
*/
|
|
|
|
int index;
|
|
|
|
|
|
|
|
/* The PCI bus modules controlled by us. */
|
|
|
|
struct pci_pbm_info pbm_A;
|
|
|
|
struct pci_pbm_info pbm_B;
|
|
|
|
|
|
|
|
/* Operations which are controller specific. */
|
|
|
|
void (*scan_bus)(struct pci_controller_info *);
|
|
|
|
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
|
|
|
|
struct msi_desc *entry);
|
|
|
|
void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Now things for the actual PCI bus probes. */
|
|
|
|
struct pci_ops *pci_ops;
|
|
|
|
unsigned int pci_first_busno;
|
|
|
|
unsigned int pci_last_busno;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* !(__SPARC64_PBM_H) */
|