[POWERPC] rheap - eliminates internal fragments caused by alignment
The patch adds fragments caused by rh_alloc_align() back to free list, instead of allocating the whole chunk of memory. This will greatly improve memory utilization managed by rheap. It solves MURAM not enough problem with 3 UCCs enabled on MPC8323. Signed-off-by: Li Yang <leoli@freescale.com> Acked-by: Joakim Tjernlund <joakim.tjernlund@transmode.se> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
This commit is contained in:
parent
7b7a57c77d
commit
7c8545e984
|
@ -437,27 +437,26 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch
|
||||||
struct list_head *l;
|
struct list_head *l;
|
||||||
rh_block_t *blk;
|
rh_block_t *blk;
|
||||||
rh_block_t *newblk;
|
rh_block_t *newblk;
|
||||||
unsigned long start;
|
unsigned long start, sp_size;
|
||||||
|
|
||||||
/* Validate size, and alignment must be power of two */
|
/* Validate size, and alignment must be power of two */
|
||||||
if (size <= 0 || (alignment & (alignment - 1)) != 0)
|
if (size <= 0 || (alignment & (alignment - 1)) != 0)
|
||||||
return (unsigned long) -EINVAL;
|
return (unsigned long) -EINVAL;
|
||||||
|
|
||||||
/* given alignment larger that default rheap alignment */
|
|
||||||
if (alignment > info->alignment)
|
|
||||||
size += alignment - 1;
|
|
||||||
|
|
||||||
/* Align to configured alignment */
|
/* Align to configured alignment */
|
||||||
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
|
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
|
||||||
|
|
||||||
if (assure_empty(info, 1) < 0)
|
if (assure_empty(info, 2) < 0)
|
||||||
return (unsigned long) -ENOMEM;
|
return (unsigned long) -ENOMEM;
|
||||||
|
|
||||||
blk = NULL;
|
blk = NULL;
|
||||||
list_for_each(l, &info->free_list) {
|
list_for_each(l, &info->free_list) {
|
||||||
blk = list_entry(l, rh_block_t, list);
|
blk = list_entry(l, rh_block_t, list);
|
||||||
if (size <= blk->size)
|
if (size <= blk->size) {
|
||||||
break;
|
start = (blk->start + alignment - 1) & ~(alignment - 1);
|
||||||
|
if (start + size <= blk->start + blk->size)
|
||||||
|
break;
|
||||||
|
}
|
||||||
blk = NULL;
|
blk = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,25 +469,36 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch
|
||||||
list_del(&blk->list);
|
list_del(&blk->list);
|
||||||
newblk = blk;
|
newblk = blk;
|
||||||
} else {
|
} else {
|
||||||
|
/* Fragment caused, split if needed */
|
||||||
|
/* Create block for fragment in the beginning */
|
||||||
|
sp_size = start - blk->start;
|
||||||
|
if (sp_size) {
|
||||||
|
rh_block_t *spblk;
|
||||||
|
|
||||||
|
spblk = get_slot(info);
|
||||||
|
spblk->start = blk->start;
|
||||||
|
spblk->size = sp_size;
|
||||||
|
/* add before the blk */
|
||||||
|
list_add(&spblk->list, blk->list.prev);
|
||||||
|
}
|
||||||
newblk = get_slot(info);
|
newblk = get_slot(info);
|
||||||
newblk->start = blk->start;
|
newblk->start = start;
|
||||||
newblk->size = size;
|
newblk->size = size;
|
||||||
|
|
||||||
/* blk still in free list, with updated start, size */
|
/* blk still in free list, with updated start and size
|
||||||
blk->start += size;
|
* for fragment in the end */
|
||||||
blk->size -= size;
|
blk->start = start + size;
|
||||||
|
blk->size -= sp_size + size;
|
||||||
|
/* No fragment in the end, remove blk */
|
||||||
|
if (blk->size == 0) {
|
||||||
|
list_del(&blk->list);
|
||||||
|
release_slot(info, blk);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
newblk->owner = owner;
|
newblk->owner = owner;
|
||||||
start = newblk->start;
|
|
||||||
attach_taken_block(info, newblk);
|
attach_taken_block(info, newblk);
|
||||||
|
|
||||||
/* for larger alignment return fixed up pointer */
|
|
||||||
/* this is no problem with the deallocator since */
|
|
||||||
/* we scan for pointers that lie in the blocks */
|
|
||||||
if (alignment > info->alignment)
|
|
||||||
start = (start + alignment - 1) & ~(alignment - 1);
|
|
||||||
|
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue