DragonFly kernel List (threaded) for 2003-08
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]
Re: Getting rid of kmem_map
:
: Another thing that would be nice to see is more pro-active action to
: reduce _map_ fragmentation in VM.
:--
:Bosko Milekic * bmilekic@xxxxxxxxxxxxxxxx * bmilekic@xxxxxxxxxxx
One thing that might help here is adding an alignment feature to
vm_map_findspace(). Last night I added such a feature (not yet
tested or committed) as part of the support for the slab allocator
I am bringing in. I have included the patch set below for reference.
Some pretty cool things can be done with this sort of alignment feature.
I am using it to super-align (e.g. 32K, 128K align) the slab reservations
so the base of each slab can be calculated simply by truncating the
pointer being allocated or freed.
In applications which mmap() large files, like databases, having
mmap() reserve VM space with a segment-sized alignment (4MB on IA32)
would allow the page directory page to be shared between unrelated
processes which are mapping the same file, leading to a phenominal
savings of kernel memory for these applications.
For the buffer cache, power-of-2 aligned buffers will theoretically
reduce the fragmentation that occurs when you have mismatched buffer
sizes by preventing larger buffers from being separated by small holes.
-Matt
Matthew Dillon
<dillon@xxxxxxxxxxxxx>
Index: kern/vfs_bio.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_bio.c,v
retrieving revision 1.11
diff -u -r1.11 vfs_bio.c
--- kern/vfs_bio.c 26 Jul 2003 19:42:11 -0000 1.11
+++ kern/vfs_bio.c 25 Aug 2003 02:27:22 -0000
@@ -1713,7 +1713,8 @@
vm_map_lock(buffer_map);
if (vm_map_findspace(buffer_map,
- vm_map_min(buffer_map), maxsize, &addr)) {
+ vm_map_min(buffer_map), maxsize,
+ maxsize, &addr)) {
/*
* Uh oh. Buffer map is to fragmented. We
* must defragment the map.
Index: vm/vm_kern.c
===================================================================
RCS file: /cvs/src/sys/vm/vm_kern.c,v
retrieving revision 1.5
diff -u -r1.5 vm_kern.c
--- vm/vm_kern.c 26 Jul 2003 22:10:02 -0000 1.5
+++ vm/vm_kern.c 25 Aug 2003 02:36:31 -0000
@@ -167,7 +167,7 @@
* offset within the kernel map.
*/
vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
+ if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) {
vm_map_unlock(map);
return (0);
}
@@ -319,7 +319,7 @@
* offset within the kernel map.
*/
vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
+ if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) {
vm_map_unlock(map);
if (map == mb_map) {
mb_map_full = TRUE;
@@ -441,7 +441,7 @@
* to lock out sleepers/wakers.
*/
vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
+ if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) == 0)
break;
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
Index: vm/vm_map.c
===================================================================
RCS file: /cvs/src/sys/vm/vm_map.c,v
retrieving revision 1.8
diff -u -r1.8 vm_map.c
--- vm/vm_map.c 20 Aug 2003 08:03:01 -0000 1.8
+++ vm/vm_map.c 25 Aug 2003 16:19:16 -0000
@@ -662,16 +662,24 @@
/*
* Find sufficient space for `length' bytes in the given map, starting at
* `start'. The map must be locked. Returns 0 on success, 1 on no space.
+ *
+ * This function will returned an arbitrarily aligned pointer. If no
+ * particular alignment is required you should pass align as 1. Note that
+ * the map may return PAGE_SIZE aligned pointers if all the lengths used in
+ * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
+ * argument. The align argument will be adjusted upwards to a power of 2.
*/
int
-vm_map_findspace(map, start, length, addr)
+vm_map_findspace(map, start, length, align, addr)
vm_map_t map;
vm_offset_t start;
vm_size_t length;
+ vm_offset_t align;
vm_offset_t *addr;
{
vm_map_entry_t entry, next;
vm_offset_t end;
+ vm_offset_t align_mask;
if (start < map->min_offset)
start = map->min_offset;
@@ -679,6 +687,15 @@
return (1);
/*
+ * If the alignment is not a power of 2 we will have to use
+ * a mod/division, set align_mask to a special value.
+ */
+ if ((align | (align - 1)) + 1 != (align << 1))
+ align_mask = (vm_offset_t)-1;
+ else
+ align_mask = align - 1;
+
+ /*
* Look for the first possible address; if there's already something
* at this address, we have to start after it.
*/
@@ -699,11 +716,22 @@
*/
for (;; start = (entry = next)->end) {
/*
+ * Adjust the proposed start by the requested alignment,
+ * be sure that we didn't wrap the address.
+ */
+ if (align_mask == (vm_offset_t)-1)
+ end = ((start + align - 1) / align) * align;
+ else
+ end = (start + align_mask) & ~align_mask;
+ if (end < start)
+ return (1);
+ start = end;
+ /*
* Find the end of the proposed new region. Be sure we didn't
- * go beyond the end of the map, or wrap around the address;
- * if so, we lose. Otherwise, if this is the last entry, or
- * if the proposed new region fits before the next entry, we
- * win.
+ * go beyond the end of the map, or wrap around the address.
+ * Then check to see if this is the last entry or if the
+ * proposed end fits in the gap between this and the next
+ * entry.
*/
end = start + length;
if (end > map->max_offset || end < start)
@@ -748,7 +776,7 @@
vm_map_lock(map);
if (find_space) {
- if (vm_map_findspace(map, start, length, addr)) {
+ if (vm_map_findspace(map, start, length, 1, addr)) {
vm_map_unlock(map);
if (map == kmem_map || map == mb_map)
splx(s);
Index: vm/vm_map.h
===================================================================
RCS file: /cvs/src/sys/vm/vm_map.h,v
retrieving revision 1.5
diff -u -r1.5 vm_map.h
--- vm/vm_map.h 20 Aug 2003 08:03:01 -0000 1.5
+++ vm/vm_map.h 25 Aug 2003 02:29:09 -0000
@@ -363,7 +363,7 @@
vm_map_t vm_map_create (struct pmap *, vm_offset_t, vm_offset_t);
int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t);
int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int);
-int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *);
+int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, vm_offset_t *);
int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t);
int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
Index: vm/vm_page.c
===================================================================
RCS file: /cvs/src/sys/vm/vm_page.c,v
retrieving revision 1.7
diff -u -r1.7 vm_page.c
--- vm/vm_page.c 19 Jul 2003 21:14:53 -0000 1.7
+++ vm/vm_page.c 25 Aug 2003 02:29:44 -0000
@@ -1848,7 +1848,7 @@
* return kernel VM pointer.
*/
vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr) !=
+ if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) !=
KERN_SUCCESS) {
/*
* XXX We almost never run out of kernel virtual
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]