chushi: kabi: add reserved fields in core structures.
在关键数据结构中增加保留字段,以保障内核二进制接口兼容, 同时这些字段也可用于紧急修复. the following fields involved: PCI: struct pci_sriov struct pci_dev struct pci_bus struct pci_driver block: struct bio_integrity_payload struct bio_set struct bdi_writeback struct backing_dev_info struct blkcg struct blkcg_policy struct queue_limits struct request_queue struct block_device_operations struct blk_mq_hw_ctx struct blk_mq_tag_set struct blk_mq_queue_data struct blk_mq_ops struct block_device struct bio struct bsg_job struct hd_struct struct gendisk cgroup: struct cgroup_subsys_state struct css_set struct cgroup_root struct cgroup_subsys io-sched: struct elevator_mq_ops struct elevator_type fs: struct address_space struct readahead_control struct writeback_control firmware: struct fwnode_handle timer: struct hrtimer struct timer_list irq: struct irq_desc struct irq_domain struct irq_data struct irq_work mm: struct mempolicy struct mempool_s struct vmem_altmap struct dev_pagemap_ops struct dev_pagemap struct vm_fault struct vm_operations_struct struct vm_area_struct struct mm_struct struct zone struct pglist_data struct shrinker struct swap_info_struct net: struct net_device_ops struct xt_target struct sk_buff struct dst_entry struct dst_ops struct fib_rule struct sock sched: struct sched_info struct load_weight struct sched_avg struct sched_statistics struct sched_entity struct sched_rt_entity struct sched_dl_entity struct task_struct struct sched_domain_shared struct sched_domain struct user_struct struct cpuacct struct cpudl struct numa_group struct rt_bandwidth struct dl_bandwidth struct dl_bw struct cfs_bandwidth struct task_group struct cfs_rq struct rt_rq struct dl_rq struct root_domain struct rq struct sched_group_capacity struct sched_group struct sched_class signal: struct signal_struct scsi: struct scsi_disk struct scsi_cmnd struct scsi_device struct scsi_target struct scsi_host_template struct Scsi_Host Signed-off-by: hejiaolong <hejiaolong@kernelsoft.com>
This commit is contained in:
parent
f62513e5d0
commit
71d84b1ac1
|
@ -310,6 +310,15 @@ struct pci_sriov {
|
|||
u16 subsystem_device; /* VF subsystem device */
|
||||
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
|
||||
bool drivers_autoprobe; /* Auto probing of VFs by driver */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI_DOE
|
||||
|
|
|
@ -151,6 +151,11 @@ struct scsi_disk {
|
|||
unsigned urswrz : 1;
|
||||
unsigned security : 1;
|
||||
unsigned ignore_medium_access_errors : 1;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
|
||||
|
||||
|
|
|
@ -158,6 +158,10 @@ struct bdi_writeback {
|
|||
struct rcu_head rcu;
|
||||
};
|
||||
#endif
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
struct backing_dev_info {
|
||||
|
@ -201,6 +205,9 @@ struct backing_dev_info {
|
|||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *debug_dir;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
struct wb_lock_cookie {
|
||||
|
|
|
@ -346,6 +346,10 @@ struct bio_integrity_payload {
|
|||
struct work_struct bip_work; /* I/O completion */
|
||||
|
||||
struct bio_vec *bip_vec;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
struct bio_vec bip_inline_vecs[];/* embedded bvec array */
|
||||
};
|
||||
|
||||
|
@ -705,6 +709,11 @@ struct bio_set {
|
|||
* Hot un-plug notifier for the per-cpu cache, if used
|
||||
*/
|
||||
struct hlist_node cpuhp_dead;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
static inline bool bioset_initialized(struct bio_set *bs)
|
||||
|
|
|
@ -304,6 +304,15 @@ struct blk_mq_hw_ctx {
|
|||
unsigned long state;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
|
||||
/**
|
||||
* @run_work: Used for scheduling a hardware queue run at a later time.
|
||||
*/
|
||||
|
@ -515,6 +524,15 @@ struct blk_mq_tag_set {
|
|||
struct mutex tag_list_lock;
|
||||
struct list_head tag_list;
|
||||
struct srcu_struct *srcu;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -526,6 +544,8 @@ struct blk_mq_tag_set {
|
|||
struct blk_mq_queue_data {
|
||||
struct request *rq;
|
||||
bool last;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
};
|
||||
|
||||
typedef bool (busy_tag_iter_fn)(struct request *, void *);
|
||||
|
@ -645,6 +665,15 @@ struct blk_mq_ops {
|
|||
*/
|
||||
void (*show_rq)(struct seq_file *m, struct request *rq);
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -75,6 +75,8 @@ struct block_device {
|
|||
* path
|
||||
*/
|
||||
struct device bd_device;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
} __randomize_layout;
|
||||
|
||||
#define bdev_whole(_bdev) \
|
||||
|
@ -316,6 +318,8 @@ struct bio {
|
|||
|
||||
struct bio_set *bi_pool;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
/*
|
||||
* We can inline a number of vecs at the end of the bio, to avoid
|
||||
* double allocations for a small number of bio_vecs. This member
|
||||
|
|
|
@ -337,6 +337,15 @@ struct queue_limits {
|
|||
* due to possible offsets.
|
||||
*/
|
||||
unsigned int dma_alignment;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
||||
|
@ -536,6 +545,10 @@ struct request_queue {
|
|||
struct mutex debugfs_mutex;
|
||||
|
||||
bool mq_sysfs_init_done;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
/* Keep blk_queue_flag_name[] in sync with the definitions below */
|
||||
|
@ -1418,6 +1431,11 @@ struct block_device_operations {
|
|||
* driver.
|
||||
*/
|
||||
int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -60,6 +60,9 @@ struct bsg_job {
|
|||
struct bio *bidi_bio;
|
||||
|
||||
void *dd_data; /* Used for driver-specific storage */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
void bsg_job_done(struct bsg_job *job, int result,
|
||||
|
|
|
@ -195,6 +195,9 @@ struct cgroup_subsys_state {
|
|||
struct work_struct destroy_work;
|
||||
struct rcu_work destroy_rwork;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
/*
|
||||
* PI: the parent css. Placed here for cache proximity to following
|
||||
* fields of the containing structure.
|
||||
|
@ -297,6 +300,9 @@ struct css_set {
|
|||
|
||||
/* For RCU-protected deletion */
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
struct cgroup_base_stat {
|
||||
|
@ -562,6 +568,9 @@ struct cgroup_root {
|
|||
/* Hierarchy-specific flags */
|
||||
unsigned int flags;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
/* The path to use for release notifications. */
|
||||
char release_agent_path[PATH_MAX];
|
||||
|
||||
|
@ -690,6 +699,9 @@ struct cgroup_subsys {
|
|||
void (*release)(struct task_struct *task);
|
||||
void (*bind)(struct cgroup_subsys_state *root_css);
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
bool early_init:1;
|
||||
|
||||
/*
|
||||
|
|
|
@ -489,6 +489,10 @@ struct address_space {
|
|||
spinlock_t private_lock;
|
||||
struct list_head private_list;
|
||||
void *private_data;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
|
||||
/*
|
||||
* On most architectures that alignment is already the case; but
|
||||
|
|
|
@ -45,6 +45,8 @@ struct fwnode_handle {
|
|||
struct list_head suppliers;
|
||||
struct list_head consumers;
|
||||
u8 flags;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -124,6 +124,8 @@ struct hrtimer {
|
|||
u8 is_rel;
|
||||
u8 is_soft;
|
||||
u8 is_hard;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -187,6 +187,10 @@ struct irq_data {
|
|||
struct irq_data *parent_data;
|
||||
#endif
|
||||
void *chip_data;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,6 +18,11 @@ struct irq_work {
|
|||
struct __call_single_node node;
|
||||
void (*func)(struct irq_work *);
|
||||
struct rcuwait irqwait;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
|
||||
|
|
|
@ -105,6 +105,10 @@ struct irq_desc {
|
|||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
struct hlist_node resend_node;
|
||||
#endif
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
|
|
@ -170,6 +170,11 @@ struct irq_domain {
|
|||
const struct msi_parent_ops *msi_parent_ops;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
|
||||
/* reverse map data. The linear map gets appended to the irq_domain */
|
||||
irq_hw_number_t hwirq_max;
|
||||
unsigned int revmap_size;
|
||||
|
|
|
@ -52,6 +52,9 @@ struct mempolicy {
|
|||
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
|
||||
nodemask_t user_nodemask; /* nodemask passed by user */
|
||||
} w;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -23,6 +23,9 @@ typedef struct mempool_s {
|
|||
mempool_alloc_t *alloc;
|
||||
mempool_free_t *free;
|
||||
wait_queue_head_t wait;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
} mempool_t;
|
||||
|
||||
static inline bool mempool_initialized(mempool_t *pool)
|
||||
|
|
|
@ -25,6 +25,8 @@ struct vmem_altmap {
|
|||
unsigned long free;
|
||||
unsigned long align;
|
||||
unsigned long alloc;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -99,6 +101,8 @@ struct dev_pagemap_ops {
|
|||
*/
|
||||
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
|
||||
unsigned long nr_pages, int mf_flags);
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
#define PGMAP_ALTMAP_VALID (1 << 0)
|
||||
|
@ -133,6 +137,10 @@ struct dev_pagemap {
|
|||
const struct dev_pagemap_ops *ops;
|
||||
void *owner;
|
||||
int nr_range;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
union {
|
||||
struct range range;
|
||||
DECLARE_FLEX_ARRAY(struct range, ranges);
|
||||
|
|
|
@ -549,6 +549,9 @@ struct vm_fault {
|
|||
* page table to avoid allocation from
|
||||
* atomic context.
|
||||
*/
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -628,6 +631,10 @@ struct vm_operations_struct {
|
|||
*/
|
||||
struct page *(*find_special_page)(struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
|
|
|
@ -660,6 +660,11 @@ struct vm_area_struct {
|
|||
struct vma_numab_state *numab_state; /* NUMA Balancing state */
|
||||
#endif
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
} __randomize_layout;
|
||||
|
||||
#ifdef CONFIG_SCHED_MM_CID
|
||||
|
@ -919,6 +924,11 @@ struct mm_struct {
|
|||
#endif /* CONFIG_LRU_GEN */
|
||||
} __randomize_layout;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
|
||||
/*
|
||||
* The mm_cpumask needs to be at the end of mm_struct, because it
|
||||
* is dynamically sized based on nr_cpu_ids.
|
||||
|
|
|
@ -986,6 +986,9 @@ struct zone {
|
|||
/* Zone statistics */
|
||||
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
||||
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum pgdat_flags {
|
||||
|
@ -1404,6 +1407,9 @@ typedef struct pglist_data {
|
|||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
struct memory_failure_stats mf_stats;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
} pg_data_t;
|
||||
|
||||
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
|
||||
|
|
|
@ -1645,6 +1645,9 @@ struct net_device_ops {
|
|||
int (*ndo_hwtstamp_set)(struct net_device *dev,
|
||||
struct kernel_hwtstamp_config *kernel_config,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -217,6 +217,9 @@ struct xt_target {
|
|||
unsigned short proto;
|
||||
|
||||
unsigned short family;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
/* Furniture shopping... */
|
||||
|
|
|
@ -1270,6 +1270,9 @@ struct readahead_control {
|
|||
struct file *file;
|
||||
struct address_space *mapping;
|
||||
struct file_ra_state *ra;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
/* private: use the readahead_* accessors instead */
|
||||
pgoff_t _index;
|
||||
unsigned int _nr_pages;
|
||||
|
|
|
@ -529,6 +529,23 @@ struct pci_dev {
|
|||
|
||||
/* These methods index pci_reset_fn_methods[] */
|
||||
u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
unsigned long chushi_reserve9;
|
||||
unsigned long chushi_reserve10;
|
||||
unsigned long chushi_reserve11;
|
||||
unsigned long chushi_reserve12;
|
||||
unsigned long chushi_reserve13;
|
||||
unsigned long chushi_reserve14;
|
||||
unsigned long chushi_reserve15;
|
||||
unsigned long chushi_reserve16;
|
||||
};
|
||||
|
||||
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
|
||||
|
@ -678,6 +695,15 @@ struct pci_bus {
|
|||
struct bin_attribute *legacy_mem; /* Legacy mem */
|
||||
unsigned int is_added:1;
|
||||
unsigned int unsafe_warn:1; /* warned about RW1C config write */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
|
||||
|
@ -933,6 +959,15 @@ struct pci_driver {
|
|||
struct device_driver driver;
|
||||
struct pci_dynids dynids;
|
||||
bool driver_managed_dma;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
|
||||
|
|
|
@ -388,6 +388,14 @@ struct sched_info {
|
|||
/* When were we last queued to run? */
|
||||
unsigned long long last_queued;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
#endif /* CONFIG_SCHED_INFO */
|
||||
};
|
||||
|
||||
|
@ -408,6 +416,15 @@ struct sched_info {
|
|||
struct load_weight {
|
||||
unsigned long weight;
|
||||
u32 inv_weight;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -501,6 +518,15 @@ struct sched_avg {
|
|||
unsigned long runnable_avg;
|
||||
unsigned long util_avg;
|
||||
struct util_est util_est;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct sched_statistics {
|
||||
|
@ -542,6 +568,15 @@ struct sched_statistics {
|
|||
#ifdef CONFIG_SCHED_CORE
|
||||
u64 core_forceidle_sum;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
#endif /* CONFIG_SCHEDSTATS */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
@ -584,6 +619,15 @@ struct sched_entity {
|
|||
*/
|
||||
struct sched_avg avg;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
struct sched_rt_entity {
|
||||
|
@ -602,6 +646,14 @@ struct sched_rt_entity {
|
|||
/* rq "owned" by this entity/group: */
|
||||
struct rt_rq *my_q;
|
||||
#endif
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
} __randomize_layout;
|
||||
|
||||
struct sched_dl_entity {
|
||||
|
@ -675,6 +727,14 @@ struct sched_dl_entity {
|
|||
*/
|
||||
struct sched_dl_entity *pi_se;
|
||||
#endif
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
|
@ -1545,6 +1605,15 @@ struct task_struct {
|
|||
*/
|
||||
randomized_struct_fields_end
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
|
||||
/* CPU-specific state of this task: */
|
||||
struct thread_struct thread;
|
||||
|
||||
|
|
|
@ -245,6 +245,10 @@ struct signal_struct {
|
|||
* and may have inconsistent
|
||||
* permissions.
|
||||
*/
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
} __randomize_layout;
|
||||
|
||||
/*
|
||||
|
|
|
@ -82,6 +82,14 @@ struct sched_domain_shared {
|
|||
atomic_t nr_busy_cpus;
|
||||
int has_idle_cores;
|
||||
int nr_idle_scan;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
struct sched_domain {
|
||||
|
@ -151,6 +159,14 @@ struct sched_domain {
|
|||
};
|
||||
struct sched_domain_shared *shared;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
unsigned int span_weight;
|
||||
/*
|
||||
* Span of all CPUs in this domain.
|
||||
|
|
|
@ -34,6 +34,9 @@ struct user_struct {
|
|||
|
||||
/* Miscellaneous per-user rate limit */
|
||||
struct ratelimit_state ratelimit;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
extern int uids_sysfs_init(void);
|
||||
|
|
|
@ -83,6 +83,8 @@ struct shrinker {
|
|||
#endif
|
||||
/* objs pending delete, per node */
|
||||
atomic_long_t *nr_deferred;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
|
||||
|
||||
|
|
|
@ -1043,6 +1043,9 @@ struct sk_buff {
|
|||
|
||||
); /* end headers group */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
/* These elements must be at the end, see alloc_skb() for details. */
|
||||
sk_buff_data_t tail;
|
||||
sk_buff_data_t end;
|
||||
|
|
|
@ -321,6 +321,9 @@ struct swap_info_struct {
|
|||
*/
|
||||
struct work_struct discard_work; /* discard worker */
|
||||
struct swap_cluster_list discard_clusters; /* discard clusters list */
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
struct plist_node avail_lists[]; /*
|
||||
* entries in swap_avail_heads, one
|
||||
* entry per node.
|
||||
|
|
|
@ -21,6 +21,8 @@ struct timer_list {
|
|||
#ifdef CONFIG_LOCKDEP
|
||||
struct lockdep_map lockdep_map;
|
||||
#endif
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
|
|
@ -89,6 +89,9 @@ struct writeback_control {
|
|||
size_t wb_lcand_bytes; /* bytes written by last candidate */
|
||||
size_t wb_tcand_bytes; /* bytes written by this candidate */
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc)
|
||||
|
|
|
@ -92,6 +92,8 @@ struct dst_entry {
|
|||
#ifdef CONFIG_64BIT
|
||||
struct lwtunnel_state *lwtstate;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
};
|
||||
|
||||
struct dst_metrics {
|
||||
|
|
|
@ -40,6 +40,9 @@ struct dst_ops {
|
|||
|
||||
struct kmem_cache *kmem_cachep;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
|
||||
struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@ struct fib_rule {
|
|||
struct fib_rule_port_range sport_range;
|
||||
struct fib_rule_port_range dport_range;
|
||||
struct rcu_head rcu;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
};
|
||||
|
||||
struct fib_lookup_arg {
|
||||
|
|
|
@ -545,6 +545,9 @@ struct sock {
|
|||
struct rcu_head sk_rcu;
|
||||
netns_tracker ns_tracker;
|
||||
struct hlist_node sk_bind2_node;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
};
|
||||
|
||||
enum sk_pacing {
|
||||
|
|
|
@ -141,6 +141,11 @@ struct scsi_cmnd {
|
|||
* to be at an address < 16Mb). */
|
||||
|
||||
int result; /* Status code from lower level driver */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
|
||||
|
|
|
@ -281,6 +281,14 @@ struct scsi_device {
|
|||
struct mutex state_mutex;
|
||||
enum scsi_device_state sdev_state;
|
||||
struct task_struct *quiesced_by;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
|
||||
unsigned long sdev_data[];
|
||||
} __attribute__((aligned(sizeof(unsigned long))));
|
||||
|
||||
|
@ -367,6 +375,12 @@ struct scsi_target {
|
|||
char scsi_level;
|
||||
enum scsi_target_state state;
|
||||
void *hostdata; /* available to low-level driver */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
|
||||
unsigned long starget_data[]; /* for the transport */
|
||||
/* starget_data must be the last element!!!! */
|
||||
} __attribute__((aligned(sizeof(unsigned long))));
|
||||
|
|
|
@ -497,6 +497,11 @@ struct scsi_host_template {
|
|||
|
||||
/* Delay for runtime autosuspend */
|
||||
int rpm_autosuspend_delay;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -710,6 +715,13 @@ struct Scsi_Host {
|
|||
*/
|
||||
struct device *dma_dev;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
|
||||
/*
|
||||
* We should ensure that this is aligned, both for better performance
|
||||
* and also because some compilers (m68k) don't automatically force
|
||||
|
|
|
@ -26,6 +26,11 @@ struct cpuacct {
|
|||
/* cpuusage holds pointer to a u64-type object on every CPU */
|
||||
u64 __percpu *cpuusage;
|
||||
struct kernel_cpustat __percpu *cpustat;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
|
||||
|
|
|
@ -13,6 +13,11 @@ struct cpudl {
|
|||
int size;
|
||||
cpumask_var_t free_cpus;
|
||||
struct cpudl_item *elements;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -1413,6 +1413,12 @@ struct numa_group {
|
|||
struct rcu_head rcu;
|
||||
unsigned long total_faults;
|
||||
unsigned long max_faults_cpu;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
|
||||
/*
|
||||
* faults[] array is split into two regions: faults_mem and faults_cpu.
|
||||
*
|
||||
|
|
|
@ -284,6 +284,14 @@ struct rt_bandwidth {
|
|||
u64 rt_runtime;
|
||||
struct hrtimer rt_period_timer;
|
||||
unsigned int rt_period_active;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
void __dl_clear_params(struct task_struct *p);
|
||||
|
@ -315,6 +323,14 @@ struct dl_bw {
|
|||
raw_spinlock_t lock;
|
||||
u64 bw;
|
||||
u64 total_bw;
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
extern void init_dl_bw(struct dl_bw *dl_b);
|
||||
|
@ -358,6 +374,15 @@ struct cfs_bandwidth {
|
|||
int nr_burst;
|
||||
u64 throttled_time;
|
||||
u64 burst_time;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -414,6 +439,14 @@ struct task_group {
|
|||
struct uclamp_se uclamp[UCLAMP_CNT];
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@ -651,6 +684,15 @@ struct cfs_rq {
|
|||
#endif
|
||||
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
static inline int rt_bandwidth_enabled(void)
|
||||
|
@ -697,6 +739,15 @@ struct rt_rq {
|
|||
struct rq *rq;
|
||||
struct task_group *tg;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
|
||||
|
@ -765,6 +816,15 @@ struct dl_rq {
|
|||
* by the GRUB algorithm.
|
||||
*/
|
||||
u64 bw_ratio;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@ -891,6 +951,15 @@ struct root_domain {
|
|||
* CPUs of the rd. Protected by RCU.
|
||||
*/
|
||||
struct perf_domain __rcu *pd;
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
extern void init_defrootdomain(void);
|
||||
|
@ -1168,6 +1237,15 @@ struct rq {
|
|||
call_single_data_t cfsb_csd;
|
||||
struct list_head cfsb_csd_list;
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@ -1897,6 +1975,16 @@ struct sched_group_capacity {
|
|||
int id;
|
||||
#endif
|
||||
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
|
||||
unsigned long cpumask[]; /* Balance mask */
|
||||
};
|
||||
|
||||
|
@ -1910,6 +1998,16 @@ struct sched_group {
|
|||
int asym_prefer_cpu; /* CPU of highest priority in group */
|
||||
int flags;
|
||||
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
|
||||
/*
|
||||
* The CPUs this group covers.
|
||||
*
|
||||
|
@ -2292,6 +2390,15 @@ struct sched_class {
|
|||
#ifdef CONFIG_SCHED_CORE
|
||||
int (*task_is_throttled)(struct task_struct *p, int cpu);
|
||||
#endif
|
||||
|
||||
unsigned long chushi_reserve1;
|
||||
unsigned long chushi_reserve2;
|
||||
unsigned long chushi_reserve3;
|
||||
unsigned long chushi_reserve4;
|
||||
unsigned long chushi_reserve5;
|
||||
unsigned long chushi_reserve6;
|
||||
unsigned long chushi_reserve7;
|
||||
unsigned long chushi_reserve8;
|
||||
};
|
||||
|
||||
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||
|
|
Loading…
Reference in New Issue