[Codefragments-commits] bryce commits r7 - trunk/fragment-slab
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Fri Jan 25 11:57:53 PST 2008
Author: bryce
Date: 2008-01-25 11:57:53 -0800 (Fri, 25 Jan 2008)
New Revision: 7
Added:
trunk/fragment-slab/frag-limit.txt
trunk/fragment-slab/frag-normal.txt
trunk/fragment-slab/fragment.stp
Modified:
trunk/fragment-slab/README
Log:
systemtap update from elena/Wenji Huang
Modified: trunk/fragment-slab/README
===================================================================
--- trunk/fragment-slab/README 2005-11-15 13:10:16 UTC (rev 6)
+++ trunk/fragment-slab/README 2008-01-25 19:57:53 UTC (rev 7)
@@ -1,4 +1,5 @@
-This modeule is used to fragment the slab as much as possible
+This module is used to fragment the slab as much as possible
+(also note this is based for 2.4.x kernels)
insmod fragment.ko (debug=1 for noisy debug)
@@ -9,6 +10,9 @@
cat /proc/slabinfo | grep -v DMA | grep size
+A version using systemtap written by Wenji Huang which will work with 2.6.x
+is provided as well
+
Phil
=--=
Added: trunk/fragment-slab/frag-limit.txt
===================================================================
--- trunk/fragment-slab/frag-limit.txt (rev 0)
+++ trunk/fragment-slab/frag-limit.txt 2008-01-25 19:57:53 UTC (rev 7)
@@ -0,0 +1,3 @@
+Attain to Max 2017
+So free 1031 elements to make kernel recovery
+
Added: trunk/fragment-slab/frag-normal.txt
===================================================================
--- trunk/fragment-slab/frag-normal.txt (rev 0)
+++ trunk/fragment-slab/frag-normal.txt 2008-01-25 19:57:53 UTC (rev 7)
@@ -0,0 +1,4 @@
+Kernel Test: Memory allocation 1000 elements
+Allocated 1000 elements
+Free remain 1000 elements
+
Added: trunk/fragment-slab/fragment.stp
===================================================================
--- trunk/fragment-slab/fragment.stp (rev 0)
+++ trunk/fragment-slab/fragment.stp 2008-01-25 19:57:53 UTC (rev 7)
@@ -0,0 +1,238 @@
+#!/usr/bin/env stap
+#
+# Copyright (C) 2007 Oracle Corp.
+#
+# TEST script for memory fragment;
+# Originally from oss.oracle.com/projects/codefragments/src/chunk
+# by Philip Copeland <Philip.Copeland at oracle.com>
+#
+# Adapt it to stap script by Wenji Huang <wenji.huang at oracle.com>
+#
+# GPL License
+#
+# Instruction: (need to disable oom-killer)
+#
+# old_val =`cat /proc/sys/vm/overcommit_memory`
+# echo 2 > /proc/sys/vm/overcommit_memory
+# stap -g fragment.stp 1234
+# [ -f /proc/slabinfo ] && cat /proc/slabinfo|grep -v DMA|grep size
+# echo $old_val >/proc/sysvm/overcommit_memory
+#
+#
+
+global result
+
+%{
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/stat.h>
+#include <linux/sched.h>
+
+// This is where we get alloc_buffer_head from
+#include <linux/buffer_head.h>
+
+// structure thats going to be burried in linked list hell
+typedef struct node *node_p;
+
+struct node
+{
+ int idx; // node number
+ int chunksize; // node represents X amount of memory
+ long location; // mem address
+ char *page; // a memory chunk
+ struct list_head list; // kernel's list structure
+} node;
+
+struct list_head *pos, *q;
+
+char *malloc_slab_slice (int chunk, int *chunksize);
+
+/* Allocate buffer_head stuff -------------------- */
+int allocation (int requested)
+{
+ // Note: we don't care about taking note of tracking the nodes
+ // all we care about is that alloc_buffer_head() is called and stored
+
+ int x = 0;
+ int loop = 0;
+ int diff = 0;
+ int *ptr = NULL;
+ struct node *tmp = NULL;
+
+ // how many items are there in the list?
+ list_for_each (pos, &node.list) loop++;
+
+ // do we need to add more records?
+ if (loop < requested)
+ {
+ // how many more do we need?
+ diff = (requested - loop);
+ for (x = 0; x < diff; x++)
+ {
+ // malloc some space for the struct
+ // __GPF flags, NORETRY
+ // add | GFP_NOWARN if you dont like the memory stats dump when
+ // kmalloc fails
+ tmp =
+ (struct node *) kmalloc (sizeof (struct node),
+ GFP_KERNEL | __GFP_NORETRY |__GFP_THISNODE);
+ if (tmp == NULL)
+ {
+ //printk
+ // ("WHOA! kmalloc failed. no memory left?"
+ // " bailing out having only created %d elements\n",
+ // (loop + x));
+
+ // abandon loop, we've all the memory
+ // we can handle so before 'dying' we should
+ // try freeing up holes rather than skipping through and
+ // trying malloc again in the next bit of code
+
+ goto interleave;
+ }
+
+ // track what number this node is
+ tmp->idx = loop + x;
+
+ // Finally we get to do the one line of code we actually
+ // truely care about...
+ ptr = &(tmp->chunksize);
+ tmp->page = malloc_slab_slice (131072, ptr);
+
+
+ if ((tmp->page) == NULL)
+ {
+ //printk("WHOA! kmalloc failed. no memory left?"
+ // " bailing out having only created %d elements\n",
+ // (loop + x));
+ goto interleave; // abandon routine and recover some memory
+ }
+
+ // keep track of where this allocation was in memory
+ tmp->location = (long) tmp->page;
+
+ // add to the end of the list
+ list_add_tail (&(tmp->list), &(node.list));
+ } //end for
+
+ return x;
+
+ interleave:
+ // excellent, we've now consumed all available memory
+ // next up, lets delete every other page
+ printk("Attain to Max %d \n",x);
+ loop = 0;
+ list_for_each_safe (pos, q, &node.list)
+ {
+ tmp = list_entry (pos, struct node, list);
+ // Assume we have equal numbers of 'even' and 'odd' memory pages
+ // we get even/odd by using PAGE_SIZE*2
+
+ if (tmp->location % ((tmp->chunksize) * 2) == 0)
+ {
+ list_del (pos);
+ kfree (tmp->page); // This is a pointer to a kmalloced area
+ kfree (tmp);
+ loop++;
+ }
+ }
+ if (loop)
+ printk("Free %d elements \n",loop);
+ }
+
+ // how many items are there in the list?
+ // Just so Joe Bloggs gets an idea this is actually working
+ //loop = 0;
+ //list_for_each (pos, &node.list) loop++;
+ // printk ("List request of %d elements is now composed of %d elements\n",requested, loop);
+
+ return (x);
+}
+
+char *malloc_slab_slice (int chunk, int *chunksize)
+{
+ char *tmp = NULL;
+
+ // Stride down from 'chunk' by power of 2, trying to wedge in a malloc.
+ // Aggressively try to hammer in the big slices as much as possible
+
+ while (chunk >= 32) // don't try anything below 32 bytes
+ {
+ // malloc some space for the struct
+ // __GPF flags, NORETRY
+ // add | GFP_NOWARN if you dont like the memory stats dump when
+ // kmalloc fails
+
+ tmp = kmalloc (chunk, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN| __GFP_THISNODE);
+
+ //make a note of how big this chunk request was
+ *chunksize = chunk;
+
+ if (tmp == NULL) // if we cant get a malloc, try at 1/2 the size
+ {
+ chunk = (chunk >> 1); // 131072 -> 65536 -> ... -> 128 -> 64 -> 32
+ if (chunk > 32)
+ *chunksize = 0;
+ }
+ else
+ {
+ break; // we got what we wanted, lets barge out of the while loop
+ }
+ }
+ return (tmp);
+}
+
+
+/* Entry and Exit -------------------------------- */
+
+static int test_init (void)
+{
+ // Create a start record
+ INIT_LIST_HEAD (&node.list);
+
+ return 0;
+}
+
+
+static int test_exit (void)
+{
+ struct node *tmp;
+ int loop = 0;
+
+ // Clear out the list
+
+ list_for_each_safe (pos, q, &node.list)
+ {
+ tmp = list_entry (pos, struct node, list);
+ list_del (pos);
+ kfree (tmp->page);
+ kfree (tmp);
+ loop++;
+ }
+ // printk ("Freed up %d list elements\n",loop);
+ return loop;
+ }
+%}
+
+function start:long(val:long)
+%{ /* pure */
+ test_init();
+ THIS->__retvalue = allocation(THIS->val);
+%}
+
+function exit:long()
+%{ /* pure */
+ THIS->__retvalue = test_exit();
+%}
+
+probe begin{
+ loops = $1
+ printf("Kernel Test: Memory allocation %d elements \n",loops)
+ result = start(loops)
+ printf("Allocated %d elements\n",result)
+}
+
+probe end{
+ printf("Free remain %d elements\n",exit())
+}
+
More information about the Codefragments-commits
mailing list