linux-stable-rt/include/asm-sparc64/ldc.h

139 lines
4.1 KiB
C
Raw Normal View History

#ifndef _SPARC64_LDC_H
#define _SPARC64_LDC_H
#include <asm/hypervisor.h>
extern int ldom_domaining_enabled;
extern void ldom_set_var(const char *var, const char *value);
extern void ldom_reboot(const char *boot_command);
[SPARC64]: Initial LDOM cpu hotplug support. Only adding cpus is supports at the moment, removal will come next. When new cpus are configured, the machine description is updated. When we get the configure request we pass in a cpu mask of to-be-added cpus to the mdesc CPU node parser so it only fetches information for those cpus. That code also proceeds to update the SMT/multi-core scheduling bitmaps. cpu_up() does all the work and we return the status back over the DS channel. CPUs via dr-cpu need to be booted straight out of the hypervisor, and this requires: 1) A new trampoline mechanism. CPUs are booted straight out of the hypervisor with MMU disabled and running in physical addresses with no mappings installed in the TLB. The new hvtramp.S code sets up the critical cpu state, installs the locked TLB mappings for the kernel, and turns the MMU on. It then proceeds to follow the logic of the existing trampoline.S SMP cpu bringup code. 2) All calls into OBP have to be disallowed when domaining is enabled. Since cpus boot straight into the kernel from the hypervisor, OBP has no state about that cpu and therefore cannot handle being invoked on that cpu. Luckily it's only a handful of interfaces which can be called after the OBP device tree is obtained. For example, rebooting, halting, powering-off, and setting options node variables. CPU removal support will require some infrastructure changes here. Namely we'll have to process the requests via a true kernel thread instead of in a workqueue. workqueues run on a per-cpu thread, but when unconfiguring we might need to force the thread to execute on another cpu if the current cpu is the one being removed. Removal of a cpu also causes the kernel to destroy that cpu's workqueue running thread. Another issue on removal is that we may have interrupts still pointing to the cpu-to-be-removed. So new code will be needed to walk the active INO list and retarget those cpus as-needed. Signed-off-by: David S. Miller <davem@davemloft.net>
2007-07-14 07:03:42 +08:00
extern void ldom_power_off(void);
/* The event handler will be evoked when link state changes
* or data becomes available on the receive side.
*
* For non-RAW links, if the LDC_EVENT_RESET event arrives the
* driver should reset all of it's internal state and reinvoke
* ldc_connect() to try and bring the link up again.
*
* For RAW links, ldc_connect() is not used. Instead the driver
* just waits for the LDC_EVENT_UP event.
*/
struct ldc_channel_config {
void (*event)(void *arg, int event);
u32 mtu;
unsigned int rx_irq;
unsigned int tx_irq;
u8 mode;
#define LDC_MODE_RAW 0x00
#define LDC_MODE_UNRELIABLE 0x01
#define LDC_MODE_RESERVED 0x02
#define LDC_MODE_STREAM 0x03
u8 debug;
#define LDC_DEBUG_HS 0x01
#define LDC_DEBUG_STATE 0x02
#define LDC_DEBUG_RX 0x04
#define LDC_DEBUG_TX 0x08
#define LDC_DEBUG_DATA 0x10
};
#define LDC_EVENT_RESET 0x01
#define LDC_EVENT_UP 0x02
#define LDC_EVENT_DATA_READY 0x04
#define LDC_STATE_INVALID 0x00
#define LDC_STATE_INIT 0x01
#define LDC_STATE_BOUND 0x02
#define LDC_STATE_READY 0x03
#define LDC_STATE_CONNECTED 0x04
struct ldc_channel;
/* Allocate state for a channel. */
extern struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
void *event_arg);
/* Shut down and free state for a channel. */
extern void ldc_free(struct ldc_channel *lp);
/* Register TX and RX queues of the link with the hypervisor. */
extern int ldc_bind(struct ldc_channel *lp, const char *name);
/* For non-RAW protocols we need to complete a handshake before
* communication can proceed. ldc_connect() does that, if the
* handshake completes successfully, an LDC_EVENT_UP event will
* be sent up to the driver.
*/
extern int ldc_connect(struct ldc_channel *lp);
extern int ldc_disconnect(struct ldc_channel *lp);
extern int ldc_state(struct ldc_channel *lp);
/* Read and write operations. Only valid when the link is up. */
extern int ldc_write(struct ldc_channel *lp, const void *buf,
unsigned int size);
extern int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
#define LDC_MAP_SHADOW 0x01
#define LDC_MAP_DIRECT 0x02
#define LDC_MAP_IO 0x04
#define LDC_MAP_R 0x08
#define LDC_MAP_W 0x10
#define LDC_MAP_X 0x20
#define LDC_MAP_RW (LDC_MAP_R | LDC_MAP_W)
#define LDC_MAP_RWX (LDC_MAP_R | LDC_MAP_W | LDC_MAP_X)
#define LDC_MAP_ALL 0x03f
struct ldc_trans_cookie {
u64 cookie_addr;
u64 cookie_size;
};
struct scatterlist;
extern int ldc_map_sg(struct ldc_channel *lp,
struct scatterlist *sg, int num_sg,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm);
extern int ldc_map_single(struct ldc_channel *lp,
void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm);
extern void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies);
extern int ldc_copy(struct ldc_channel *lp, int copy_dir,
void *buf, unsigned int len, unsigned long offset,
struct ldc_trans_cookie *cookies, int ncookies);
static inline int ldc_get_dring_entry(struct ldc_channel *lp,
void *buf, unsigned int len,
unsigned long offset,
struct ldc_trans_cookie *cookies,
int ncookies)
{
return ldc_copy(lp, LDC_COPY_IN, buf, len, offset, cookies, ncookies);
}
static inline int ldc_put_dring_entry(struct ldc_channel *lp,
void *buf, unsigned int len,
unsigned long offset,
struct ldc_trans_cookie *cookies,
int ncookies)
{
return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies);
}
extern void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
struct ldc_trans_cookie *cookies,
int *ncookies, unsigned int map_perm);
extern void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies);
#endif /* _SPARC64_LDC_H */