]> git.pld-linux.org Git - packages/xfsprogs.git/commitdiff
- up to 3.1.7; po patch merged upstream; add patches: 1) avoid hanging on futexes... auto/th/xfsprogs-3_1_7-1 auto/ti/xfsprogs-3_1_7-1
authorArkadiusz Miśkiewicz <arekm@maven.pl>
Fri, 18 Nov 2011 07:07:52 +0000 (07:07 +0000)
committercvs2git <feedback@pld-linux.org>
Sun, 24 Jun 2012 12:13:13 +0000 (12:13 +0000)
Changed files:
    xfsprogs-repair-mem.patch -> 1.1
    xfsprogs-repair-nofutexhang.patch -> 1.1
    xfsprogs-repair-tcmalloc.patch -> 1.1
    xfsprogs.spec -> 1.146

xfsprogs-repair-mem.patch [new file with mode: 0644]
xfsprogs-repair-nofutexhang.patch [new file with mode: 0644]
xfsprogs-repair-tcmalloc.patch [new file with mode: 0644]
xfsprogs.spec

diff --git a/xfsprogs-repair-mem.patch b/xfsprogs-repair-mem.patch
new file mode 100644 (file)
index 0000000..f055a9a
--- /dev/null
@@ -0,0 +1,264 @@
+
+Instead of allocating inode records in chunks and keeping a freelist of them
+which gets released to the system memory allocator in one go use plain malloc
+and free for them.  The freelist just means adding a global lock instead
+of relying on malloc and free which could be implemented lockless.  In
+addition smart allocators like tcmalloc have far less overhead than our
+chunk and linked list.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+Index: xfsprogs-dev/repair/incore_ext.c
+===================================================================
+--- xfsprogs-dev.orig/repair/incore_ext.c      2011-11-10 14:01:04.905470023 +0000
++++ xfsprogs-dev/repair/incore_ext.c   2011-11-14 10:14:57.696692843 +0000
+@@ -26,20 +26,6 @@
+ #include "err_protos.h"
+ #include "avl64.h"
+ #include "threads.h"
+-#define ALLOC_NUM_EXTS                100
+-
+-/*
+- * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
+- */
+-typedef struct extent_alloc_rec  {
+-      struct list_head        list;
+-      extent_tree_node_t      extents[ALLOC_NUM_EXTS];
+-} extent_alloc_rec_t;
+-
+-typedef struct rt_extent_alloc_rec  {
+-      struct list_head        list;
+-      rt_extent_tree_node_t   extents[ALLOC_NUM_EXTS];
+-} rt_extent_alloc_rec_t;
+ /*
+  * note:  there are 4 sets of incore things handled here:
+@@ -57,21 +43,9 @@
+  * phase 5.  The uncertain inode list goes away at the end of
+  * phase 3.  The inode tree and bno/bnct trees go away after phase 5.
+  */
+-typedef struct ext_flist_s  {
+-      extent_tree_node_t      *list;
+-      int                     cnt;
+-} ext_flist_t;
+-
+-static ext_flist_t ext_flist;
+-
+-typedef struct rt_ext_flist_s  {
+-      rt_extent_tree_node_t   *list;
+-      int                     cnt;
+-} rt_ext_flist_t;
+-
+-static rt_ext_flist_t rt_ext_flist;
+ static avl64tree_desc_t       *rt_ext_tree_ptr;       /* dup extent tree for rt */
++static pthread_mutex_t        rt_ext_tree_lock;
+ static struct btree_root **dup_extent_trees;  /* per ag dup extent trees */
+ static pthread_mutex_t *dup_extent_tree_locks;
+@@ -89,19 +63,6 @@
+                                                */
+ /*
+- * list of allocated "blocks" for easy freeing later
+- */
+-static struct list_head       ba_list;
+-static struct list_head       rt_ba_list;
+-
+-/*
+- * locks.
+- */
+-static pthread_mutex_t        ext_flist_lock;
+-static pthread_mutex_t        rt_ext_tree_lock;
+-static pthread_mutex_t        rt_ext_flist_lock;
+-
+-/*
+  * duplicate extent tree functions
+  */
+@@ -167,60 +128,26 @@
+ mk_extent_tree_nodes(xfs_agblock_t new_startblock,
+       xfs_extlen_t new_blockcount, extent_state_t new_state)
+ {
+-      int i;
+       extent_tree_node_t *new;
+-      extent_alloc_rec_t *rec;
+-
+-      pthread_mutex_lock(&ext_flist_lock);
+-      if (ext_flist.cnt == 0)  {
+-              ASSERT(ext_flist.list == NULL);
+-
+-              if ((rec = malloc(sizeof(extent_alloc_rec_t))) == NULL)
+-                      do_error(
+-                      _("couldn't allocate new extent descriptors.\n"));
+-
+-              list_add(&rec->list, &ba_list);
+-
+-              new = &rec->extents[0];
+-              for (i = 0; i < ALLOC_NUM_EXTS; i++)  {
+-                      new->avl_node.avl_nextino = (avlnode_t *)
+-                                                      ext_flist.list;
+-                      ext_flist.list = new;
+-                      ext_flist.cnt++;
+-                      new++;
+-              }
+-      }
+-
+-      ASSERT(ext_flist.list != NULL);
++      new = malloc(sizeof(*new));
++      if (!new)
++              do_error(_("couldn't allocate new extent descriptor.\n"));
+-      new = ext_flist.list;
+-      ext_flist.list = (extent_tree_node_t *) new->avl_node.avl_nextino;
+-      ext_flist.cnt--;
+       new->avl_node.avl_nextino = NULL;
+-      pthread_mutex_unlock(&ext_flist_lock);
+-
+-      /* initialize node */
+-
+       new->ex_startblock = new_startblock;
+       new->ex_blockcount = new_blockcount;
+       new->ex_state = new_state;
+       new->next = NULL;
+       new->last = NULL;
+-      return(new);
++      return new;
+ }
+ void
+ release_extent_tree_node(extent_tree_node_t *node)
+ {
+-      pthread_mutex_lock(&ext_flist_lock);
+-      node->avl_node.avl_nextino = (avlnode_t *) ext_flist.list;
+-      ext_flist.list = node;
+-      ext_flist.cnt++;
+-      pthread_mutex_unlock(&ext_flist_lock);
+-
+-      return;
++      free(node);
+ }
+ /*
+@@ -630,57 +557,24 @@
+ mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock,
+       xfs_extlen_t new_blockcount, extent_state_t new_state)
+ {
+-      int i;
+       rt_extent_tree_node_t *new;
+-      rt_extent_alloc_rec_t *rec;
+-      pthread_mutex_lock(&rt_ext_flist_lock);
+-      if (rt_ext_flist.cnt == 0)  {
+-              ASSERT(rt_ext_flist.list == NULL);
+-
+-              if ((rec = malloc(sizeof(rt_extent_alloc_rec_t))) == NULL)
+-                      do_error(
+-                      _("couldn't allocate new extent descriptors.\n"));
++      new = malloc(sizeof(*new));
++      if (!new)
++              do_error(_("couldn't allocate new extent descriptor.\n"));
+-              list_add(&rec->list, &rt_ba_list);
+-
+-              new = &rec->extents[0];
+-
+-              for (i = 0; i < ALLOC_NUM_EXTS; i++)  {
+-                      new->avl_node.avl_nextino = (avlnode_t *)
+-                                                      rt_ext_flist.list;
+-                      rt_ext_flist.list = new;
+-                      rt_ext_flist.cnt++;
+-                      new++;
+-              }
+-      }
+-
+-      ASSERT(rt_ext_flist.list != NULL);
+-
+-      new = rt_ext_flist.list;
+-      rt_ext_flist.list = (rt_extent_tree_node_t *) new->avl_node.avl_nextino;
+-      rt_ext_flist.cnt--;
+       new->avl_node.avl_nextino = NULL;
+-      pthread_mutex_unlock(&rt_ext_flist_lock);
+-
+-      /* initialize node */
+-
+       new->rt_startblock = new_startblock;
+       new->rt_blockcount = new_blockcount;
+       new->rt_state = new_state;
+-
+-      return(new);
++      return new;
+ }
+ #if 0
+ void
+ release_rt_extent_tree_node(rt_extent_tree_node_t *node)
+ {
+-      node->avl_node.avl_nextino = (avlnode_t *) rt_ext_flist.list;
+-      rt_ext_flist.list = node;
+-      rt_ext_flist.cnt++;
+-
+-      return;
++      free(node);
+ }
+ void
+@@ -719,18 +613,9 @@
+ void
+ free_rt_dup_extent_tree(xfs_mount_t *mp)
+ {
+-      rt_extent_alloc_rec_t *cur, *tmp;
+-
+       ASSERT(mp->m_sb.sb_rblocks != 0);
+-
+-      list_for_each_entry_safe(cur, tmp, &rt_ba_list, list)
+-              free(cur);
+-
+       free(rt_ext_tree_ptr);
+-
+       rt_ext_tree_ptr = NULL;
+-
+-      return;
+ }
+ /*
+@@ -862,11 +747,7 @@
+       int i;
+       xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
+-      list_head_init(&ba_list);
+-      list_head_init(&rt_ba_list);
+-      pthread_mutex_init(&ext_flist_lock, NULL);
+       pthread_mutex_init(&rt_ext_tree_lock, NULL);
+-      pthread_mutex_init(&rt_ext_flist_lock, NULL);
+       dup_extent_trees = calloc(agcount, sizeof(struct btree_root *));
+       if (!dup_extent_trees)
+@@ -908,11 +789,6 @@
+               do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
+       avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
+-
+-      ext_flist.cnt = 0;
+-      ext_flist.list = NULL;
+-
+-      return;
+ }
+ /*
+@@ -921,12 +797,8 @@
+ void
+ incore_ext_teardown(xfs_mount_t *mp)
+ {
+-      extent_alloc_rec_t *cur, *tmp;
+       xfs_agnumber_t i;
+-      list_for_each_entry_safe(cur, tmp, &ba_list, list)
+-              free(cur);
+-
+       for (i = 0; i < mp->m_sb.sb_agcount; i++)  {
+               btree_destroy(dup_extent_trees[i]);
+               free(extent_bno_ptrs[i]);
+
diff --git a/xfsprogs-repair-nofutexhang.patch b/xfsprogs-repair-nofutexhang.patch
new file mode 100644 (file)
index 0000000..21add3c
--- /dev/null
@@ -0,0 +1,293 @@
+
+Instead of allocating inode records in chunks and keeping a freelist of them
+which never gets released to the system memory allocator use plain malloc
+and free for them.  The freelist just means adding a global lock instead
+of relying on malloc and free which could be implemented lockless, and the
+freelist is almost completely worthless as we are done allocating new
+inode records once we start freeing them in major quantities.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+Index: xfsprogs-dev/repair/incore_ino.c
+===================================================================
+--- xfsprogs-dev.orig/repair/incore_ino.c      2011-11-09 18:52:15.041861085 +0000
++++ xfsprogs-dev/repair/incore_ino.c   2011-11-09 19:36:39.389806408 +0000
+@@ -25,7 +25,6 @@
+ #include "threads.h"
+ #include "err_protos.h"
+-static pthread_mutex_t        ino_flist_lock;
+ extern avlnode_t      *avl_firstino(avlnode_t *root);
+ /*
+@@ -38,18 +37,6 @@
+  */
+ static avltree_desc_t **inode_uncertain_tree_ptrs;
+-#define ALLOC_NUM_INOS                100
+-
+-/* free lists -- inode nodes and extent nodes */
+-
+-typedef struct ino_flist_s  {
+-      ino_tree_node_t         *list;
+-      ino_tree_node_t         *last;
+-      long long               cnt;
+-} ino_flist_t;
+-
+-static ino_flist_t ino_flist; /* free list must be initialized before use */
+-
+ /* memory optimised nlink counting for all inodes */
+ static void nlink_grow_8_to_16(ino_tree_node_t *irec);
+@@ -238,102 +225,63 @@
+ }
+ /*
+- * next is the uncertain inode list -- a sorted (in ascending order)
++ * Next is the uncertain inode list -- a sorted (in ascending order)
+  * list of inode records sorted on the starting inode number.  There
+  * is one list per ag.
+  */
+ /*
+- * common code for creating inode records for use by trees and lists.
++ * Common code for creating inode records for use by trees and lists.
+  * called only from add_inodes and add_inodes_uncertain
+  *
+  * IMPORTANT:  all inodes (inode records) start off as free and
+  *            unconfirmed.
+  */
+-/* ARGSUSED */
+-static ino_tree_node_t *
+-mk_ino_tree_nodes(
++static struct ino_tree_node *
++alloc_ino_node(
+       xfs_agino_t             starting_ino)
+ {
+-      int                     i;
+-      ino_tree_node_t         *ino_rec;
+-      avlnode_t               *node;
+-
+-      pthread_mutex_lock(&ino_flist_lock);
+-      if (ino_flist.cnt == 0)  {
+-              ASSERT(ino_flist.list == NULL);
+-
+-              if ((ino_rec = malloc(sizeof(ino_tree_node_t[ALLOC_NUM_INOS])))
+-                                      == NULL)
+-                      do_error(_("inode map malloc failed\n"));
+-
+-              for (i = 0; i < ALLOC_NUM_INOS; i++)  {
+-                      ino_rec->avl_node.avl_nextino =
+-                              (avlnode_t *) ino_flist.list;
+-                      ino_flist.list = ino_rec;
+-                      ino_flist.cnt++;
+-                      ino_rec++;
+-              }
+-      }
++      struct ino_tree_node    *irec;
+-      ASSERT(ino_flist.list != NULL);
+-
+-      ino_rec = ino_flist.list;
+-      ino_flist.list = (ino_tree_node_t *) ino_rec->avl_node.avl_nextino;
+-      ino_flist.cnt--;
+-      node = &ino_rec->avl_node;
+-      node->avl_nextino = node->avl_forw = node->avl_back = NULL;
+-      pthread_mutex_unlock(&ino_flist_lock);
+-
+-      /* initialize node */
+-
+-      ino_rec->ino_startnum = 0;
+-      ino_rec->ino_confirmed = 0;
+-      ino_rec->ino_isa_dir = 0;
+-      ino_rec->ir_free = (xfs_inofree_t) - 1;
+-      ino_rec->ino_un.ex_data = NULL;
+-      ino_rec->nlinkops = &nlinkops[0];
+-      ino_rec->disk_nlinks = calloc(1, nlinkops[0].nlink_size);
+-      if (ino_rec->disk_nlinks == NULL)
++      irec = malloc(sizeof(*irec));
++      if (!irec)
++              do_error(_("inode map malloc failed\n"));
++
++      irec->avl_node.avl_nextino = NULL;
++      irec->avl_node.avl_forw = NULL;
++      irec->avl_node.avl_back = NULL;
++
++      irec->ino_startnum = starting_ino;
++      irec->ino_confirmed = 0;
++      irec->ino_isa_dir = 0;
++      irec->ir_free = (xfs_inofree_t) - 1;
++      irec->ino_un.ex_data = NULL;
++      irec->nlinkops = &nlinkops[0];
++      irec->disk_nlinks = calloc(1, nlinkops[0].nlink_size);
++      if (!irec->disk_nlinks)
+               do_error(_("could not allocate nlink array\n"));
+-
+-      return(ino_rec);
++      return irec;
+ }
+-/*
+- * return inode record to free list, will be initialized when
+- * it gets pulled off list
+- */
+ static void
+-free_ino_tree_node(ino_tree_node_t *ino_rec)
++free_ino_tree_node(
++      struct ino_tree_node    *irec)
+ {
+-      ino_rec->avl_node.avl_nextino = NULL;
+-      ino_rec->avl_node.avl_forw = NULL;
+-      ino_rec->avl_node.avl_back = NULL;
+-
+-      pthread_mutex_lock(&ino_flist_lock);
+-      if (ino_flist.list != NULL)  {
+-              ASSERT(ino_flist.cnt > 0);
+-              ino_rec->avl_node.avl_nextino = (avlnode_t *) ino_flist.list;
+-      } else  {
+-              ASSERT(ino_flist.cnt == 0);
+-              ino_rec->avl_node.avl_nextino = NULL;
+-      }
++      irec->avl_node.avl_nextino = NULL;
++      irec->avl_node.avl_forw = NULL;
++      irec->avl_node.avl_back = NULL;
+-      ino_flist.list = ino_rec;
+-      ino_flist.cnt++;
+-
+-      free(ino_rec->disk_nlinks);
+-
+-      if (ino_rec->ino_un.ex_data != NULL)  {
++      free(irec->disk_nlinks);
++      if (irec->ino_un.ex_data != NULL)  {
+               if (full_ino_ex_data) {
+-                      free(ino_rec->ino_un.ex_data->parents);
+-                      free(ino_rec->ino_un.ex_data->counted_nlinks);
++                      free(irec->ino_un.ex_data->parents);
++                      free(irec->ino_un.ex_data->counted_nlinks);
+               }
+-              free(ino_rec->ino_un.ex_data);
++              free(irec->ino_un.ex_data);
+       }
+-      pthread_mutex_unlock(&ino_flist_lock);
++
++      free(irec);
+ }
+ /*
+@@ -379,17 +327,15 @@
+        * check to see if record containing inode is already in the tree.
+        * if not, add it
+        */
+-      if ((ino_rec = (ino_tree_node_t *)
+-                      avl_findrange(inode_uncertain_tree_ptrs[agno],
+-                              s_ino)) == NULL)  {
+-              ino_rec = mk_ino_tree_nodes(s_ino);
+-              ino_rec->ino_startnum = s_ino;
+-
+-              if (avl_insert(inode_uncertain_tree_ptrs[agno],
+-                              (avlnode_t *) ino_rec) == NULL)  {
+-                      do_error(_("add_aginode_uncertain - "
+-                                 "duplicate inode range\n"));
+-              }
++      ino_rec = (ino_tree_node_t *)
++              avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
++      if (!ino_rec) {
++              ino_rec = alloc_ino_node(s_ino);
++
++              if (!avl_insert(inode_uncertain_tree_ptrs[agno],
++                              &ino_rec->avl_node))
++                      do_error(
++      _("add_aginode_uncertain - duplicate inode range\n"));
+       }
+       if (free)
+@@ -454,43 +400,38 @@
+ /*
+- * next comes the inode trees.  One per ag.  AVL trees
+- * of inode records, each inode record tracking 64 inodes
++ * Next comes the inode trees.  One per AG,  AVL trees of inode records, each
++ * inode record tracking 64 inodes
+  */
++
+ /*
+- * set up an inode tree record for a group of inodes that will
+- * include the requested inode.
+- *
+- * does NOT error-check for duplicate records.  Caller is
+- * responsible for checking that.
++ * Set up an inode tree record for a group of inodes that will include the
++ * requested inode.
+  *
+- * ino must be the start of an XFS_INODES_PER_CHUNK (64) inode chunk
++ * This does NOT do error-check for duplicate records.  The caller is
++ * responsible for checking that. Ino must be the start of an
++ * XFS_INODES_PER_CHUNK (64) inode chunk
+  *
+- * Each inode resides in a 64-inode chunk which can be part
+- * one or more chunks (MAX(64, inodes-per-block).  The fs allocates
+- * in chunks (as opposed to 1 chunk) when a block can hold more than
+- * one chunk (inodes per block > 64).  Allocating in one chunk pieces
+- * causes us problems when it takes more than one fs block to contain
+- * an inode chunk because the chunks can start on *any* block boundary.
+- * So we assume that the caller has a clue because at this level, we
+- * don't.
+- */
+-static ino_tree_node_t *
+-add_inode(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
++ * Each inode resides in a 64-inode chunk which can be part one or more chunks
++ * (MAX(64, inodes-per-block).  The fs allocates in chunks (as opposed to 1
++ * chunk) when a block can hold more than one chunk (inodes per block > 64).
++ * Allocating in one chunk pieces causes us problems when it takes more than
++ * one fs block to contain an inode chunk because the chunks can start on
++ * *any* block boundary. So we assume that the caller has a clue because at
++ * this level, we don't.
++ */
++static struct ino_tree_node *
++add_inode(
++      struct xfs_mount        *mp,
++      xfs_agnumber_t          agno,
++      xfs_agino_t             agino)
+ {
+-      ino_tree_node_t *ino_rec;
+-
+-      /* no record exists, make some and put them into the tree */
+-
+-      ino_rec = mk_ino_tree_nodes(ino);
+-      ino_rec->ino_startnum = ino;
++      struct ino_tree_node    *irec;
+-      if (avl_insert(inode_tree_ptrs[agno],
+-                      (avlnode_t *) ino_rec) == NULL)  {
++      irec = alloc_ino_node(agino);
++      if (!avl_insert(inode_tree_ptrs[agno],  &irec->avl_node))
+               do_warn(_("add_inode - duplicate inode range\n"));
+-      }
+-
+-      return(ino_rec);
++      return irec;
+ }
+ /*
+@@ -816,7 +757,6 @@
+       int i;
+       int agcount = mp->m_sb.sb_agcount;
+-      pthread_mutex_init(&ino_flist_lock, NULL);
+       if ((inode_tree_ptrs = malloc(agcount *
+                                       sizeof(avltree_desc_t *))) == NULL)
+               do_error(_("couldn't malloc inode tree descriptor table\n"));
+@@ -839,9 +779,6 @@
+               avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
+       }
+-      ino_flist.cnt = 0;
+-      ino_flist.list = NULL;
+-
+       if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
+               do_error(_("couldn't malloc uncertain inode cache area\n"));
+
diff --git a/xfsprogs-repair-tcmalloc.patch b/xfsprogs-repair-tcmalloc.patch
new file mode 100644 (file)
index 0000000..9d4a72a
--- /dev/null
@@ -0,0 +1,83 @@
+From hch@infradead.org Mon Nov 14 16:57:45 2011
+Delivered-To: arekm@maven.pl
+Received: from gmail-pop.l.google.com [74.125.39.108]
+       by localhost with POP3 (fetchmail-6.3.21)
+       for <arekm@localhost> (single-drop); Mon, 14 Nov 2011 18:12:06 +0100 (CET)
+Received: by 10.142.223.21 with SMTP id v21cs23581wfg;
+        Mon, 14 Nov 2011 08:02:21 -0800 (PST)
+Received: by 10.229.247.139 with SMTP id mc11mr3086235qcb.238.1321286539560;
+        Mon, 14 Nov 2011 08:02:19 -0800 (PST)
+Return-Path: <BATV+f273fab3737168e2a79b+3004+infradead.org+hch@bombadil.srs.infradead.org>
+Received: from bombadil.infradead.org (173-166-109-252-newengland.hfc.comcastbusiness.net. [173.166.109.252])
+        by mx.google.com with ESMTPS id l7si5065356qcw.108.2011.11.14.08.02.18
+        (version=TLSv1/SSLv3 cipher=OTHER);
+        Mon, 14 Nov 2011 08:02:18 -0800 (PST)
+Received-SPF: neutral (google.com: 173.166.109.252 is neither permitted nor denied by best guess record for domain of BATV+f273fab3737168e2a79b+3004+infradead.org+hch@bombadil.srs.infradead.org) client-ip=173.166.109.252;
+Authentication-Results: mx.google.com; spf=neutral (google.com: 173.166.109.252 is neither permitted nor denied by best guess record for domain of BATV+f273fab3737168e2a79b+3004+infradead.org+hch@bombadil.srs.infradead.org) smtp.mail=BATV+f273fab3737168e2a79b+3004+infradead.org+hch@bombadil.srs.infradead.org
+Received: from hch by bombadil.infradead.org with local (Exim 4.76 #1 (Red Hat Linux))
+       id 1RPyzB-00079E-KT; Mon, 14 Nov 2011 16:02:17 +0000
+Message-Id: <20111114160217.591812422@bombadil.infradead.org>
+User-Agent: quilt/0.48-1
+Date: Mon, 14 Nov 2011 10:57:45 -0500
+From: Christoph Hellwig <hch@infradead.org>
+To: xfs@oss.sgi.com
+Cc: arekm@maven.pl
+Subject: [PATCH 3/3] xfsprogs: allow linking against libtcmalloc
+References: <20111114155742.285135418@bombadil.infradead.org>
+Content-Disposition: inline; filename=xfsprogs-use-tcmalloc
+X-SRS-Rewrite: SMTP reverse-path rewritten from <hch@infradead.org> by bombadil.infradead.org
+       See http://www.infradead.org/rpr.html
+Status: R
+X-Status: NT
+X-KMail-EncryptionState:  
+X-KMail-SignatureState:  
+X-KMail-MDN-Sent:  
+
+Allow linking against the libtcmalloc library from Google's performance
+tools, which at least for repair reduces the memory usage dramatically.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+--- xfsprogs-3.1.7/configure.in.org    2011-11-18 07:58:49.815439699 +0100
++++ xfsprogs-3.1.7/configure.in        2011-11-18 07:59:49.360093001 +0100
+@@ -31,6 +31,26 @@
+ AC_SUBST(libeditline)
+ AC_SUBST(enable_editline)
++AC_ARG_ENABLE(tcmalloc,
++[ --enable-tcmalloc=[yes/no] Enable tcmalloc [default=no]],,
++      enable_tcmalloc=check)
++
++if test x$enable_tcmalloc != xno; then
++    saved_CPPFLAGS="$CPPFLAGS"
++    CPPFLAGS="$CPPFLAGS -fno-builtin-malloc"
++    AC_CHECK_LIB([tcmalloc_minimal], [malloc], [libtcmalloc="-ltcmalloc_minimal"],
++      [AC_CHECK_LIB([tcmalloc], [malloc], [libtcmalloc="-ltcmalloc"], [
++      if test x$enable_tcmalloc = xyes; then
++              AC_MSG_ERROR([libtcmalloc_minimal or libtcmalloc library not found], 1)
++      fi]
++      )]
++    )
++    if test x$libtcmalloc = x; then
++      CPPFLAGS="$saved_CPPFLAGS"
++    fi
++fi
++AC_SUBST(libtcmalloc)
++
+ AC_ARG_ENABLE(termcap,
+ [ --enable-termcap=[yes/no] Enable terminal capabilities library [default=no]],
+       test $enable_termcap = yes && libtermcap="-ltermcap",)
+Index: xfsprogs-dev/include/builddefs.in
+===================================================================
+--- xfsprogs-dev.orig/include/builddefs.in     2011-08-14 17:00:02.000000000 +0000
++++ xfsprogs-dev/include/builddefs.in  2011-11-14 12:09:52.000000000 +0000
+@@ -22,7 +22,7 @@ _BUILDDEFS_INCLUDED_ = 1
+ DEBUG = @debug_build@
+ OPTIMIZER = @opt_build@
+-MALLOCLIB = @malloc_lib@
++MALLOCLIB = @malloc_lib@ @libtcmalloc@
+ LOADERFLAGS = @LDFLAGS@
+ LTLDFLAGS = @LDFLAGS@
+ CFLAGS = @CFLAGS@
+
index 1728c9aaa5da370cb65e924730b22d19c738f4ea..8816b143197b8cfb03f67232d8348bdb861c3774 100644 (file)
@@ -6,18 +6,21 @@
 Summary:       Tools for the XFS filesystem
 Summary(pl.UTF-8):     Narzędzia do systemu plików XFS
 Name:          xfsprogs
-Version:       3.1.6
-Release:       2
+Version:       3.1.7
+Release:       1
 License:       LGPL v2.1 (libhandle), GPL v2 (the rest)
 Group:         Applications/System
 Source0:       ftp://linux-xfs.sgi.com/projects/xfs/cmd_tars/%{name}-%{version}.tar.gz
-# Source0-md5: fbd2c1c5abed4b11047bea6ce53bc6e4
+# Source0-md5: 049cf9873794ea49d0bb3f12d45748a4
 Source1:       xfs_lsprojid
 Patch0:                %{name}-miscfix-v2.patch
 Patch2:                %{name}-sharedlibs.patch
 Patch3:                %{name}-pl.po-update.patch
 Patch4:                %{name}-dynamic_exe.patch
 Patch5:                %{name}-diet.patch
+Patch6:                xfsprogs-repair-mem.patch
+Patch7:                xfsprogs-repair-nofutexhang.patch
+Patch8:                xfsprogs-repair-tcmalloc.patch
 URL:           http://www.xfs.org/
 BuildRequires: autoconf
 BuildRequires: automake
@@ -32,6 +35,7 @@ BuildRequires:        libuuid-static
        %endif
 %endif
 BuildRequires: gettext-devel
+BuildRequires: google-perftools-devel
 BuildRequires: libblkid-devel
 BuildRequires: libtool
 BuildRequires: libuuid-devel
@@ -123,9 +127,12 @@ Zbiór komend do użytku z systemem plików XFS, włączając w to mkfs.xfs
 %setup -q
 %patch0 -p1
 %patch2 -p1
-%patch3 -p1
+#%patch3 -p1
 %patch4 -p1
 %patch5 -p1
+%patch6 -p1
+%patch7 -p1
+%patch8 -p1
 
 %build
 %{__aclocal} -I m4
This page took 0.066706 seconds and 4 git commands to generate.