]> git.pld-linux.org Git - packages/db.git/commitdiff
- fuckfuckfuckfuckfuckfuckfuck
authorMariusz Mazur <mmazur@pld-linux.org>
Thu, 17 Oct 2002 14:40:39 +0000 (14:40 +0000)
committercvs2git <feedback@pld-linux.org>
Sun, 24 Jun 2012 12:13:13 +0000 (12:13 +0000)
Changed files:
    db-rpm.patch -> 1.3

db-rpm.patch [new file with mode: 0644]

diff --git a/db-rpm.patch b/db-rpm.patch
new file mode 100644 (file)
index 0000000..d8f977b
--- /dev/null
@@ -0,0 +1,96 @@
+Index: db/hash/hash_dup.c
+===================================================================
+RCS file: /cvs/devel/rpm/db/hash/hash_dup.c,v
+retrieving revision 1.1.1.2.2.3
+retrieving revision 1.1.1.2.2.4
+diff -u -1 -0 -r1.1.1.2.2.3 -r1.1.1.2.2.4
+--- db/hash/hash_dup.c 2002/01/08 04:07:30     1.1.1.2.2.3
++++ db/hash/hash_dup.c 2002/02/13 23:22:52     1.1.1.2.2.4
+@@ -606,20 +606,25 @@
+           PGNO(next_pagep), NUM_ENT(next_pagep) - 2)) != 0)
+               goto out;
+       /* Now delete the pair from the current page. */
+       ret = __ham_del_pair(dbc, 0);
+       /*
+        * __ham_del_pair decremented nelem.  This is incorrect;  we
+        * manually copied the element elsewhere, so the total number
+        * of elements hasn't changed.  Increment it again.
++       *
++       * !!!
++       * Note that we still have the metadata page pinned, and
++       * __ham_del_pair dirtied it, so we don't need to set the dirty
++       * flag again.
+        */
+       if (!STD_LOCKING(dbc))
+               hcp->hdr->nelem++;
+ out:
+       (void)mpf->put(mpf, hcp->page, DB_MPOOL_DIRTY);
+       hcp->page = next_pagep;
+       hcp->pgno = PGNO(hcp->page);
+       hcp->indx = NUM_ENT(hcp->page) - 2;
+       F_SET(hcp, H_EXPAND);
+Index: db/hash/hash_page.c
+===================================================================
+RCS file: /cvs/devel/rpm/db/hash/hash_page.c,v
+retrieving revision 1.1.1.2.2.3
+retrieving revision 1.1.1.2.2.4
+diff -u -1 -0 -r1.1.1.2.2.3 -r1.1.1.2.2.4
+--- db/hash/hash_page.c        2002/01/08 04:07:30     1.1.1.2.2.3
++++ db/hash/hash_page.c        2002/02/13 23:22:52     1.1.1.2.2.4
+@@ -636,22 +636,25 @@
+       if ((ret = __ham_c_update(dbc, 0, 0, 0)) != 0)
+               return (ret);
+       /*
+        * If we are locking, we will not maintain this, because it is
+        * a hot spot.
+        *
+        * XXX
+        * Perhaps we can retain incremental numbers and apply them later.
+        */
+-      if (!STD_LOCKING(dbc))
++      if (!STD_LOCKING(dbc)) {
+               --hcp->hdr->nelem;
++              if ((ret = __ham_dirty_meta(dbc)) != 0)
++                      return (ret);
++      }
+       /*
+        * If we need to reclaim the page, then check if the page is empty.
+        * There are two cases.  If it's empty and it's not the first page
+        * in the bucket (i.e., the bucket page) then we can simply remove
+        * it. If it is the first chain in the bucket, then we need to copy
+        * the second page into it and remove the second page.
+        * If its the only page in the bucket we leave it alone.
+        */
+       if (!reclaim_page ||
+@@ -1407,22 +1410,25 @@
+        * next time we come in here.  For other operations, this shouldn't
+        * matter, since odds are this is the last thing that happens before
+        * we return to the user program.
+        */
+       hcp->pgno = PGNO(hcp->page);
+       /*
+        * XXX
+        * Maybe keep incremental numbers here.
+        */
+-      if (!STD_LOCKING(dbc))
++      if (!STD_LOCKING(dbc)) {
+               hcp->hdr->nelem++;
++              if ((ret = __ham_dirty_meta(dbc)) != 0)
++                      return (ret);
++      }
+       if (do_expand || (hcp->hdr->ffactor != 0 &&
+           (u_int32_t)H_NUMPAIRS(hcp->page) > hcp->hdr->ffactor))
+               F_SET(hcp, H_EXPAND);
+       return (0);
+ }
+ /*
+  * Special __putitem call used in splitting -- copies one entry to
+  * another.  Works for all types of hash entries (H_OFFPAGE, H_KEYDATA,
This page took 0.072124 seconds and 4 git commands to generate.