]> git.pld-linux.org Git - packages/db4.1.git/commitdiff
- added 4.1.25.[23] patches master
authorJakub Bogusz <qboosh@pld-linux.org>
Thu, 15 Dec 2011 20:09:55 +0000 (20:09 +0000)
committercvs2git <feedback@pld-linux.org>
Sun, 24 Jun 2012 12:13:13 +0000 (12:13 +0000)
- unified, added default_db,static_libs,tcl bconds and cxx-static,java-devel subpackages

Changed files:
    db4.1.spec -> 1.16
    patch.4.1.25.2 -> 1.1
    patch.4.1.25.3 -> 1.1

db4.1.spec
patch.4.1.25.2 [new file with mode: 0644]
patch.4.1.25.3 [new file with mode: 0644]

index 8c4858c70d9fed8782c90c062c4ffc133d12df5c..e7aa72219f4edc3245bbdf37a1addbf34a317045 100644 (file)
@@ -1,31 +1,41 @@
 #
 # Conditional build:
 #
 # Conditional build:
-%bcond_with    java    # build db-java
+%bcond_with    java            # build Java library (Java 1.4 source with no option specified)
+%bcond_without tcl             # don't build Tcl bindings
+%bcond_without static_libs     # don't build static libraries
+%bcond_with    default_db      # use this db as default system db
 #
 #
-Summary:       BSD database library for C
+%define                ver             4.1.25
+%define                patchlevel      3
+Summary:       Berkeley DB database library for C
 Summary(pl.UTF-8):     Biblioteka C do obsługi baz Berkeley DB
 Name:          db4.1
 Summary(pl.UTF-8):     Biblioteka C do obsługi baz Berkeley DB
 Name:          db4.1
-Version:       4.1.25
+Version:       %{ver}.%{patchlevel}
 Release:       1
 License:       BSD
 Group:         Libraries
 Release:       1
 License:       BSD
 Group:         Libraries
-# alternative site (sometimes working): http://www.berkeleydb.com/
-#Source0Download: http://dev.sleepycat.com/downloads/releasehistorybdb.html
-Source0:       http://downloads.sleepycat.com/db-%{version}.tar.gz
+#Source0Download: http://www.oracle.com/technetwork/database/berkeleydb/downloads/index-082944.html
+Source0:       http://download.oracle.com/berkeley-db/db-%{ver}.tar.gz
 # Source0-md5: df71961002b552c0e72c6e4e358f27e1
 # Source0-md5: df71961002b552c0e72c6e4e358f27e1
+%patchset_source -f http://download.oracle.com/berkeley-db/patches/db/%{ver}/patch.%{ver}.%g 1 %{patchlevel}
 Patch0:                db-o_direct.patch
 Patch0:                db-o_direct.patch
-Patch1:                http://www.sleepycat.com/update/4.1.25/patch.4.1.25.1
-URL:           http://www.sleepycat.com/
+URL:           http://www.oracle.com/technetwork/database/berkeleydb/downloads/index.html
 BuildRequires: autoconf
 BuildRequires: ed
 BuildRequires: autoconf
 BuildRequires: ed
-%{?with_java:BuildRequires:    gcc-java}
+%{?with_java:BuildRequires:    jdk}
 BuildRequires: libstdc++-devel
 BuildRequires: libstdc++-devel
-BuildRequires: tcl-devel >= 8.3.2
+BuildRequires: rpmbuild(macros) >= 1.426
+BuildRequires: sed >= 4.0
+%{?with_tcl:BuildRequires:     tcl-devel >= 8.3.2}
+%if %{with default_db}
 Provides:      db = %{version}-%{release}
 Obsoletes:     db4
 Provides:      db = %{version}-%{release}
 Obsoletes:     db4
+%endif
 BuildRoot:     %{tmpdir}/%{name}-%{version}-root-%(id -u -n)
 
 BuildRoot:     %{tmpdir}/%{name}-%{version}-root-%(id -u -n)
 
+%if %{without default_db}
 %define                _includedir     %{_prefix}/include/db4.1
 %define                _includedir     %{_prefix}/include/db4.1
+%endif
 
 %description
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
 
 %description
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
@@ -44,7 +54,12 @@ Summary:     Header files for Berkeley database library
 Summary(pl.UTF-8):     Pliki nagłówkowe do biblioteki Berkeley Database
 Group:         Development/Libraries
 Requires:      %{name} = %{version}-%{release}
 Summary(pl.UTF-8):     Pliki nagłówkowe do biblioteki Berkeley Database
 Group:         Development/Libraries
 Requires:      %{name} = %{version}-%{release}
+%if %{with default_db}
+Provides:      db-devel = %{version}-%{release}
+Obsoletes:     db-devel
+Obsoletes:     db3-devel
 Obsoletes:     db4-devel
 Obsoletes:     db4-devel
+%endif
 
 %description devel
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
 
 %description devel
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
@@ -73,7 +88,12 @@ Summary:     Static libraries for Berkeley database library
 Summary(pl.UTF-8):     Statyczne biblioteki Berkeley Database
 Group:         Development/Libraries
 Requires:      %{name}-devel = %{version}-%{release}
 Summary(pl.UTF-8):     Statyczne biblioteki Berkeley Database
 Group:         Development/Libraries
 Requires:      %{name}-devel = %{version}-%{release}
+%if %{with default_db}
+Provides:      db-static = %{version}-%{release}
+Obsoletes:     db-static
+Obsoletes:     db3-static
 Obsoletes:     db4-static
 Obsoletes:     db4-static
+%endif
 
 %description static
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
 
 %description static
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
@@ -101,8 +121,10 @@ używających Berkeley DB.
 Summary:       Berkeley database library for C++
 Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla C++
 Group:         Libraries
 Summary:       Berkeley database library for C++
 Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla C++
 Group:         Libraries
+%if %{with default_db}
 Provides:      db-cxx = %{version}-%{release}
 Obsoletes:     db4-cxx
 Provides:      db-cxx = %{version}-%{release}
 Obsoletes:     db4-cxx
+%endif
 
 %description cxx
 Berkeley database library for C++.
 
 %description cxx
 Berkeley database library for C++.
@@ -111,25 +133,49 @@ Berkeley database library for C++.
 Biblioteka baz danych Berkeley dla C++.
 
 %package cxx-devel
 Biblioteka baz danych Berkeley dla C++.
 
 %package cxx-devel
-Summary:       Berkeley database library for C++
-Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla C++
-Group:         Libraries
+Summary:       Header files for db-cxx library
+Summary(pl.UTF-8):     Pliki nagłówkowe biblioteki db-cxx
+Group:         Development/Libraries
 Requires:      %{name}-cxx = %{version}-%{release}
 Requires:      %{name}-devel = %{version}-%{release}
 Requires:      %{name}-cxx = %{version}-%{release}
 Requires:      %{name}-devel = %{version}-%{release}
-Obsoletes:     db4-cxx
-Conflicts:     db-devel < 4.1.25-3
+%if %{with default_db}
+Provides:      db-cxx-devel = %{version}-%{release}
+Obsoletes:     db-cxx-devel
+%endif
+Conflicts:     db4.1-devel < 4.1.25-3
 
 %description cxx-devel
 
 %description cxx-devel
-Berkeley database library for C++.
+Header files for db-cxx library.
 
 %description cxx-devel -l pl.UTF-8
 
 %description cxx-devel -l pl.UTF-8
-Biblioteka baz danych Berkeley dla C++.
+Pliki nagłówkowe biblioteki db-cxx.
+
+%package cxx-static
+Summary:       Static version of db-cxx library
+Summary(pl.UTF-8):     Statyczna wersja biblioteki db-cxx
+Group:         Development/Libraries
+Requires:      %{name}-cxx-devel = %{version}-%{release}
+%if %{with default_db}
+Provides:      db-cxx-static = %{version}-%{release}
+Obsoletes:     db-cxx-static
+%endif
+Conflicts:     db-static < 4.1.25.3
+
+%description cxx-static
+Static version of db-cxx library.
+
+%description cxx-static -l pl.UTF-8
+Statyczna wersja biblioteki db-cxx.
 
 %package java
 Summary:       Berkeley database library for Java
 Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla Javy
 Group:         Libraries
 
 %package java
 Summary:       Berkeley database library for Java
 Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla Javy
 Group:         Libraries
+Requires:      jpackage-utils
+%if %{with default_db}
 Provides:      db-java = %{version}-%{release}
 Provides:      db-java = %{version}-%{release}
+Obsoletes:     db-java
+%endif
 
 %description java
 Berkeley database library for Java.
 
 %description java
 Berkeley database library for Java.
@@ -137,13 +183,32 @@ Berkeley database library for Java.
 %description java -l pl.UTF-8
 Biblioteka baz danych Berkeley dla Javy.
 
 %description java -l pl.UTF-8
 Biblioteka baz danych Berkeley dla Javy.
 
+%package java-devel
+Summary:       Development files for db-java library
+Summary(pl.UTF-8):     Pliki programistyczne biblioteki db-java
+Group:         Development/Languages/Java
+Requires:      %{name}-java = %{version}-%{release}
+%if %{with default_db}
+Provides:      db-java-devel = %{version}-%{release}
+Obsoletes:     db-java-devel
+%endif
+Conflicts:     db4.1-devel < 4.1.25.3
+
+%description java-devel
+Development files for db-java library.
+
+%description java-devel -l pl.UTF-8
+Pliki programistyczne biblioteki db-java.
+
 %package tcl
 Summary:       Berkeley database library for Tcl
 Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla Tcl
 Group:         Development/Languages/Tcl
 Requires:      tcl
 %package tcl
 Summary:       Berkeley database library for Tcl
 Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla Tcl
 Group:         Development/Languages/Tcl
 Requires:      tcl
+%if %{with default_db}
 Provides:      db-tcl = %{version}-%{release}
 Obsoletes:     db4-tcl
 Provides:      db-tcl = %{version}-%{release}
 Obsoletes:     db4-tcl
+%endif
 
 %description tcl
 Berkeley database library for Tcl.
 
 %description tcl
 Berkeley database library for Tcl.
@@ -152,28 +217,33 @@ Berkeley database library for Tcl.
 Biblioteka baz danych Berkeley dla Tcl.
 
 %package tcl-devel
 Biblioteka baz danych Berkeley dla Tcl.
 
 %package tcl-devel
-Summary:       Berkeley database library for Tcl
-Summary(pl.UTF-8):     Biblioteka baz danych Berkeley dla Tcl
+Summary:       Development files for db-tcl library
+Summary(pl.UTF-8):     Pliki programistyczne biblioteki db-tcl
 Group:         Development/Languages/Tcl
 Requires:      %{name}-tcl = %{version}-%{release}
 Group:         Development/Languages/Tcl
 Requires:      %{name}-tcl = %{version}-%{release}
-Requires:      tcl
-Obsoletes:     db4-tcl
+%if %{with default_db}
+Provides:      db-tcl-devel = %{version}-%{release}
+Obsoletes:     db-tcl-devel
+%endif
 Conflicts:     db-devel < 4.1.25-3
 
 %description tcl-devel
 Conflicts:     db-devel < 4.1.25-3
 
 %description tcl-devel
-Berkeley database library for Tcl.
+Development files for db-tcl library.
 
 %description tcl-devel -l pl.UTF-8
 
 %description tcl-devel -l pl.UTF-8
-Biblioteka baz danych Berkeley dla Tcl.
+Pliki programistyczne biblioteki db-tcl.
 
 %package utils
 Summary:       Command line tools for managing Berkeley DB databases
 Summary(pl.UTF-8):     Narzędzia do obsługi baz Berkeley DB z linii poleceń
 Group:         Applications/Databases
 Requires:      %{name} = %{version}-%{release}
 
 %package utils
 Summary:       Command line tools for managing Berkeley DB databases
 Summary(pl.UTF-8):     Narzędzia do obsługi baz Berkeley DB z linii poleceń
 Group:         Applications/Databases
 Requires:      %{name} = %{version}-%{release}
+%if %{with default_db}
 Provides:      db-utils = %{version}-%{release}
 Provides:      db-utils = %{version}-%{release}
-Obsoletes:     db-utils < 4.2
+Obsoletes:     db-utils
+Obsoletes:     db3-utils
 Obsoletes:     db4-utils
 Obsoletes:     db4-utils
+%endif
 
 %description utils
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
 
 %description utils
 The Berkeley Database (Berkeley DB) is a programmatic toolkit that
@@ -198,15 +268,18 @@ Ten pakiet zawiera narzędzia do obsługi baz Berkeley DB z linii
 poleceń.
 
 %prep
 poleceń.
 
 %prep
-%setup -q -n db-%{version}
+%setup -q -n db-%{ver}
+# official patches
+%patchset_patch 1 %{patchlevel}
+
 %patch0 -p1
 %patch0 -p1
-%patch1 -p0
 
 %build
 cd dist
 sh s_config
 cd ..
 
 
 %build
 cd dist
 sh s_config
 cd ..
 
+%if %{with static_libs}
 cp -a build_unix build_unix.static
 
 cd build_unix.static
 cp -a build_unix build_unix.static
 
 cd build_unix.static
@@ -215,68 +288,129 @@ CC="%{__cc}"
 CXX="%{__cxx}"
 CFLAGS="%{rpmcflags}"
 CXXFLAGS="%{rpmcflags} -fno-implicit-templates"
 CXX="%{__cxx}"
 CFLAGS="%{rpmcflags}"
 CXXFLAGS="%{rpmcflags} -fno-implicit-templates"
-export CC CXX CFLAGS CXXFLAGS
+LDFLAGS="%{rpmcflags} %{rpmldflags}"
+export CC CXX CFLAGS CXXFLAGS LDFLAGS
 
 ../dist/%configure \
 
 ../dist/%configure \
-       --enable-compat185 \
        --disable-shared \
        --enable-static \
        --disable-shared \
        --enable-static \
-       --enable-rpc \
-       --enable-cxx
+       --enable-compat185 \
+       --enable-cxx \
+       --enable-rpc
 
 # (temporarily?) disabled because of compilation errors:
 #      --enable-dump185 \
 
 %{__make} library_build
 
 # (temporarily?) disabled because of compilation errors:
 #      --enable-dump185 \
 
 %{__make} library_build
+cd ..
+%endif
 
 
-cd ../build_unix
+cd build_unix
 
 ../dist/%configure \
 
 ../dist/%configure \
-       --enable-compat185 \
+       --prefix=%{_prefix} \
+       --libdir=%{_libdir} \
        --enable-shared \
        --disable-static \
        --enable-shared \
        --disable-static \
-       --enable-rpc \
+       --enable-compat185 \
        --enable-cxx \
        --enable-cxx \
-       --enable-tcl \
-       --with-tcl=/usr/lib \
-       %{?with_java:--enable-java}
+       --enable-rpc \
+       %{?with_java:--enable-java} \
+       %{?with_tcl:--enable-tcl --with-tcl=/usr/lib}
 
 
-%{__make} library_build \
+%{__make} -j1 library_build \
        TCFLAGS='-I$(builddir) -I%{_includedir}'
 
 %install
 rm -rf $RPM_BUILD_ROOT
 install -d $RPM_BUILD_ROOT{%{_includedir},%{_libdir},%{_bindir}}
        TCFLAGS='-I$(builddir) -I%{_includedir}'
 
 %install
 rm -rf $RPM_BUILD_ROOT
 install -d $RPM_BUILD_ROOT{%{_includedir},%{_libdir},%{_bindir}}
+%if %{with java}
+install -d $RPM_BUILD_ROOT%{_javadir}
+%endif
 
 
-cd build_unix.static
-
-%{__make} library_install \
+%if %{with static_libs}
+%{__make} -C build_unix.static library_install \
        bindir=$RPM_BUILD_ROOT%{_bindir} \
        bindir=$RPM_BUILD_ROOT%{_bindir} \
+       docdir=$RPM_BUILD_ROOT%{_docdir}/db-%{version}-docs \
        prefix=$RPM_BUILD_ROOT%{_prefix} \
        libdir=$RPM_BUILD_ROOT%{_libdir} \
        includedir=$RPM_BUILD_ROOT%{_includedir}
        prefix=$RPM_BUILD_ROOT%{_prefix} \
        libdir=$RPM_BUILD_ROOT%{_libdir} \
        includedir=$RPM_BUILD_ROOT%{_includedir}
+%endif
 
 
-cd ../build_unix
-
-%{__make} library_install \
+%{__make} -C build_unix library_install \
+       LIB_INSTALL_FILE_LIST="" \
        bindir=$RPM_BUILD_ROOT%{_bindir} \
        bindir=$RPM_BUILD_ROOT%{_bindir} \
+       docdir=$RPM_BUILD_ROOT%{_docdir}/db-%{version}-docs \
        prefix=$RPM_BUILD_ROOT%{_prefix} \
        libdir=$RPM_BUILD_ROOT%{_libdir} \
        prefix=$RPM_BUILD_ROOT%{_prefix} \
        libdir=$RPM_BUILD_ROOT%{_libdir} \
-       includedir=$RPM_BUILD_ROOT%{_includedir} \
-       LIB_INSTALL_FILE_LIST=""
+       includedir=$RPM_BUILD_ROOT%{_includedir}
 
 
-#rm -rf examples_java
-#cp -a java/src/com/sleepycat/examples examples_java
+%if %{with default_db}
+install -d $RPM_BUILD_ROOT/%{_lib}
+mv $RPM_BUILD_ROOT%{_libdir}/libdb-4.1.so $RPM_BUILD_ROOT/%{_lib}
+%endif
 
 cd $RPM_BUILD_ROOT%{_libdir}
 
 cd $RPM_BUILD_ROOT%{_libdir}
+%if %{with static_libs}
 mv -f libdb.a libdb-4.1.a
 mv -f libdb_cxx.a libdb_cxx-4.1.a
 mv -f libdb.a libdb-4.1.a
 mv -f libdb_cxx.a libdb_cxx-4.1.a
+%endif
+%if %{with java}
+mv -f $RPM_BUILD_ROOT%{_libdir}/db.jar $RPM_BUILD_ROOT%{_javadir}/db-4.1.jar
+%endif
+%if %{with default_db}
+ln -sf /%{_lib}/libdb-4.1.so libdb.so
+ln -sf /%{_lib}/libdb-4.1.so libdb4.so
+ln -sf /%{_lib}/libdb-4.1.so libdb-4.1.so
+ln -sf /%{_lib}/libdb-4.1.so libndbm.so
+ln -sf libdb-4.1.la libdb.la
+ln -sf libdb-4.1.la libdb4.la
+ln -sf libdb-4.1.la libndbm.la
+ln -sf libdb_cxx-4.1.so libdb_cxx.so
+ln -sf libdb_cxx-4.1.la libdb_cxx.la
+%if %{with java}
+ln -sf libdb_java-4.1.la libdb_java.la
+ln -sf db-4.1.jar $RPM_BUILD_ROOT%{_javadir}/db.jar
+%endif
+%if %{with tcl}
+ln -sf libdb_tcl-4.1.so libdb_tcl.so
+ln -sf libdb_tcl-4.1.la libdb_tcl.la
+%endif
+%if %{with static_libs}
+ln -sf libdb-4.1.a libdb.a
+ln -sf libdb-4.1.a libdb4.a
+ln -sf libdb-4.1.a libndbm.a
+ln -sf libdb_cxx-4.1.a libdb_cxx.a
+%endif
+%endif
+
+sed -i "s/old_library=''/old_library='libdb-4.1.a'/" libdb-4.1.la
+sed -i "s/old_library=''/old_library='libdb_cxx-4.1.a'/" libdb_cxx-4.1.la
+
+cd -
+
+cd $RPM_BUILD_ROOT%{_bindir}
+mv -f berkeley_db_svc berkeley_db_svc-4.1
+%{?with_default_db:ln -sf berkeley_db_svc-4.1 berkeley_db_svc}
+for F in db_*; do
+  Fver=$(echo $F|sed 's/db_/db4.1_/')
+  mv $F $Fver
+  %{?with_default_db:ln -sf $Fver $F}
+done
+cd -
+rm -f examples_c*/tags
+install -d $RPM_BUILD_ROOT%{_examplesdir}/db-%{version}
+cp -rf examples_c/* $RPM_BUILD_ROOT%{_examplesdir}/db-%{version}
 
 
-mv -f libdb-4.1.la libdb-4.1.la.tmp
-mv -f libdb_cxx-4.1.la libdb_cxx-4.1.la.tmp
-sed -e "s/old_library=''/old_library='libdb-4.1.a'/" libdb-4.1.la.tmp > libdb-4.1.la
-sed -e "s/old_library=''/old_library='libdb_cxx-4.1.a'/" libdb_cxx-4.1.la.tmp > libdb_cxx-4.1.la
-rm -f libdb*.la.tmp
+install -d $RPM_BUILD_ROOT%{_examplesdir}/db-cxx-%{version}
+cp -rf examples_cxx/* $RPM_BUILD_ROOT%{_examplesdir}/db-cxx-%{version}
+
+%if %{with java}
+install -d $RPM_BUILD_ROOT%{_examplesdir}/db-java-%{version}
+cp -rf examples_java/* $RPM_BUILD_ROOT%{_examplesdir}/db-java-%{version}
+%else
+%{__rm} -r $RPM_BUILD_ROOT%{_docdir}/db-%{version}-docs/api_java
+%endif
 
 %clean
 rm -rf $RPM_BUILD_ROOT
 
 %clean
 rm -rf $RPM_BUILD_ROOT
@@ -293,19 +427,47 @@ rm -rf $RPM_BUILD_ROOT
 %files
 %defattr(644,root,root,755)
 %doc LICENSE README
 %files
 %defattr(644,root,root,755)
 %doc LICENSE README
+%if %{with default_db}
+%attr(755,root,root) /%{_lib}/libdb-4.1.so
+%else
 %attr(755,root,root) %{_libdir}/libdb-4.1.so
 %attr(755,root,root) %{_libdir}/libdb-4.1.so
+%endif
+%dir %{_docdir}/db-%{version}-docs
+%{_docdir}/db-%{version}-docs/sleepycat
+%{_docdir}/db-%{version}-docs/index.html
 
 %files devel
 %defattr(644,root,root,755)
 
 %files devel
 %defattr(644,root,root,755)
-%doc docs/{api*,ref,index.html,sleepycat,images} examples_c*
 %{_libdir}/libdb-4.1.la
 %{_libdir}/libdb-4.1.la
+%if %{with default_db}
+%attr(755,root,root) %{_libdir}/libdb-4.1.so
+%attr(755,root,root) %{_libdir}/libdb4.so
+%attr(755,root,root) %{_libdir}/libdb.so
+%attr(755,root,root) %{_libdir}/libndbm.so
+%{_libdir}/libdb4.la
+%{_libdir}/libdb.la
+%{_libdir}/libndbm.la
+%else
 %dir %{_includedir}
 %dir %{_includedir}
+%endif
 %{_includedir}/db.h
 %{_includedir}/db_185.h
 %{_includedir}/db.h
 %{_includedir}/db_185.h
+%{_docdir}/db-%{version}-docs/api_c
+%{_docdir}/db-%{version}-docs/images
+%{_docdir}/db-%{version}-docs/ref
+%{_docdir}/db-%{version}-docs/reftoc.html
+%{_examplesdir}/db-%{version}
 
 
+%if %{with static_libs}
 %files static
 %defattr(644,root,root,755)
 %files static
 %defattr(644,root,root,755)
-%{_libdir}/lib*-4.1.a
+%{_libdir}/libdb-4.1.a
+%if %{with default_db}
+%{_libdir}/libdb4.a
+%{_libdir}/libdb.a
+%{_libdir}/libndbm.a
+%endif
+%endif
 
 %files cxx
 %defattr(644,root,root,755)
 
 %files cxx
 %defattr(644,root,root,755)
@@ -314,17 +476,46 @@ rm -rf $RPM_BUILD_ROOT
 %files cxx-devel
 %defattr(644,root,root,755)
 %{_libdir}/libdb_cxx-4.1.la
 %files cxx-devel
 %defattr(644,root,root,755)
 %{_libdir}/libdb_cxx-4.1.la
+%if %{with default_db}
+%attr(755,root,root) %{_libdir}/libdb_cxx.so
+%{_libdir}/libdb_cxx.la
+%endif
 %{_includedir}/cxx_common.h
 %{_includedir}/cxx_except.h
 %{_includedir}/db_cxx.h
 %{_includedir}/cxx_common.h
 %{_includedir}/cxx_except.h
 %{_includedir}/db_cxx.h
+%{_docdir}/db-%{version}-docs/api_cxx
+%{_examplesdir}/db-cxx-%{version}
+
+%if %{with static_libs}
+%files cxx-static
+%defattr(644,root,root,755)
+%{_libdir}/libdb_cxx-4.1.a
+%if %{with default_db}
+%{_libdir}/libdb_cxx.a
+%endif
+%endif
 
 %if %{with java}
 %files java
 %defattr(644,root,root,755)
 
 %if %{with java}
 %files java
 %defattr(644,root,root,755)
-%attr(755,root,root) %{_libdir}/libdb_java*.so
-%{_libdir}/db.jar
+%attr(755,root,root) %{_libdir}/libdb_java-4.1.so
+%{_javadir}/db-4.1.jar
+%if %{with default_db}
+%{_javadir}/db.jar
+%endif
+
+%files java-devel
+%defattr(644,root,root,755)
+%{_libdir}/libdb_java-4.1.la
+%if %{with default_db}
+%attr(755,root,root) %{_libdir}/libdb_java.so
+%{_libdir}/libdb_java.la
+%endif
+%{_docdir}/db-%{version}-docs/java
+%{_examplesdir}/db-java-%{version}
 %endif
 
 %endif
 
+%if %{with tcl}
 %files tcl
 %defattr(644,root,root,755)
 %attr(755,root,root) %{_libdir}/libdb_tcl-4.1.so
 %files tcl
 %defattr(644,root,root,755)
 %attr(755,root,root) %{_libdir}/libdb_tcl-4.1.so
@@ -332,19 +523,39 @@ rm -rf $RPM_BUILD_ROOT
 %files tcl-devel
 %defattr(644,root,root,755)
 %{_libdir}/libdb_tcl-4.1.la
 %files tcl-devel
 %defattr(644,root,root,755)
 %{_libdir}/libdb_tcl-4.1.la
+%if %{with default_db}
+%attr(755,root,root) %{_libdir}/libdb_tcl.so
+%{_libdir}/libdb_tcl.la
+%endif
+%{_docdir}/db-%{version}-docs/api_tcl
+%endif
 
 %files utils
 %defattr(644,root,root,755)
 
 %files utils
 %defattr(644,root,root,755)
-%doc docs/utility/*
+%attr(755,root,root) %{_bindir}/berkeley_db_svc-4.1
+%attr(755,root,root) %{_bindir}/db4.1_archive
+%attr(755,root,root) %{_bindir}/db4.1_checkpoint
+%attr(755,root,root) %{_bindir}/db4.1_deadlock
+%attr(755,root,root) %{_bindir}/db4.1_dump
+#%attr(755,root,root) %{_bindir}/db4.1_dump185
+%attr(755,root,root) %{_bindir}/db4.1_load
+%attr(755,root,root) %{_bindir}/db4.1_printlog
+%attr(755,root,root) %{_bindir}/db4.1_recover
+%attr(755,root,root) %{_bindir}/db4.1_stat
+%attr(755,root,root) %{_bindir}/db4.1_upgrade
+%attr(755,root,root) %{_bindir}/db4.1_verify
+%if %{with default_db}
 %attr(755,root,root) %{_bindir}/berkeley_db_svc
 %attr(755,root,root) %{_bindir}/berkeley_db_svc
-%attr(755,root,root) %{_bindir}/db*_archive
-%attr(755,root,root) %{_bindir}/db*_checkpoint
-%attr(755,root,root) %{_bindir}/db*_deadlock
-%attr(755,root,root) %{_bindir}/db*_dump
-#%attr(755,root,root) %{_bindir}/db*_dump185
-%attr(755,root,root) %{_bindir}/db*_load
-%attr(755,root,root) %{_bindir}/db*_printlog
-%attr(755,root,root) %{_bindir}/db*_recover
-%attr(755,root,root) %{_bindir}/db*_stat
-%attr(755,root,root) %{_bindir}/db*_upgrade
-%attr(755,root,root) %{_bindir}/db*_verify
+%attr(755,root,root) %{_bindir}/db_archive
+%attr(755,root,root) %{_bindir}/db_checkpoint
+%attr(755,root,root) %{_bindir}/db_deadlock
+%attr(755,root,root) %{_bindir}/db_dump
+#%attr(755,root,root) %{_bindir}/db_dump185
+%attr(755,root,root) %{_bindir}/db_load
+%attr(755,root,root) %{_bindir}/db_printlog
+%attr(755,root,root) %{_bindir}/db_recover
+%attr(755,root,root) %{_bindir}/db_stat
+%attr(755,root,root) %{_bindir}/db_upgrade
+%attr(755,root,root) %{_bindir}/db_verify
+%endif
+%{_docdir}/db-%{version}-docs/utility
diff --git a/patch.4.1.25.2 b/patch.4.1.25.2
new file mode 100644 (file)
index 0000000..64b5d71
--- /dev/null
@@ -0,0 +1,617 @@
+*** dbinc/mp.h.orig    2004-02-02 10:24:53.000000000 -0800
+--- dbinc/mp.h 2004-02-02 10:26:27.000000000 -0800
+***************
+*** 149,154 ****
+--- 149,161 ----
+        * region lock).
+        */
+       DB_MPOOL_STAT stat;             /* Per-cache mpool statistics. */
++  
++       /*
++        * We track page puts so that we can decide when allocation is never
++        * going to succeed.  We don't lock the field, all we care about is
++        * if it changes.
++        */
++       u_int32_t  put_counter;                /* Count of page put calls. */
+  };
+  
+  struct __db_mpool_hash {
+*** mp/mp_fput.c.orig  2002-08-13 06:26:41.000000000 -0700
+--- mp/mp_fput.c       2004-02-02 10:22:35.000000000 -0800
+***************
+*** 19,24 ****
+--- 19,26 ----
+  #include "dbinc/db_shash.h"
+  #include "dbinc/mp.h"
+  
++ static void __memp_reset_lru __P((DB_ENV *, REGINFO *));
++ 
+  /*
+   * __memp_fput --
+   *   Mpool file put function.
+***************
+*** 198,202 ****
+--- 200,255 ----
+  
+       MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+  
++      /*
++       * On every buffer put we update the buffer generation number and check
++       * for wraparound.
++       */
++      if (++c_mp->lru_count == UINT32_T_MAX)
++              __memp_reset_lru(dbenv, dbmp->reginfo);
++ 
+       return (0);
+  }
++ 
++ /*
++  * __memp_reset_lru --
++  *   Reset the cache LRU counter.
++  */
++ static void
++ __memp_reset_lru(dbenv, memreg)
++      DB_ENV *dbenv;
++      REGINFO *memreg;
++ {
++      BH *bhp;
++      DB_MPOOL_HASH *hp;
++      MPOOL *c_mp;
++      int bucket;
++ 
++      c_mp = memreg->primary;
++ 
++      /*
++       * Update the counter so all future allocations will start at the
++       * bottom.
++       */
++      c_mp->lru_count -= MPOOL_BASE_DECREMENT;
++ 
++      /* Adjust the priority of every buffer in the system. */
++      for (hp = R_ADDR(memreg, c_mp->htab),
++          bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
++              /*
++               * Skip empty buckets.
++               *
++               * We can check for empty buckets before locking as we
++               * only care if the pointer is zero or non-zero.
++               */
++              if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
++                      continue;
++ 
++              MUTEX_LOCK(dbenv, &hp->hash_mutex);
++              for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
++                  bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
++                      if (bhp->priority != UINT32_T_MAX &&
++                          bhp->priority > MPOOL_BASE_DECREMENT)
++                              bhp->priority -= MPOOL_BASE_DECREMENT;
++              MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
++      }
++ }
+*** mp/mp_alloc.c.orig 2002-08-17 07:23:25.000000000 -0700
+--- mp/mp_alloc.c      2004-02-02 10:28:15.000000000 -0800
+***************
+*** 25,31 ****
+  } HS;
+  
+  static void __memp_bad_buffer __P((DB_MPOOL_HASH *));
+- static void __memp_reset_lru __P((DB_ENV *, REGINFO *, MPOOL *));
+  
+  /*
+   * __memp_alloc --
+--- 25,30 ----
+***************
+*** 50,57 ****
+       MPOOL *c_mp;
+       MPOOLFILE *bh_mfp;
+       size_t freed_space;
+!      u_int32_t buckets, buffers, high_priority, max_na, priority;
+!      int aggressive, ret;
+       void *p;
+  
+       dbenv = dbmp->dbenv;
+--- 49,57 ----
+       MPOOL *c_mp;
+       MPOOLFILE *bh_mfp;
+       size_t freed_space;
+!      u_int32_t buckets, buffers, high_priority, priority, put_counter;
+!      u_int32_t total_buckets;
+!      int aggressive, giveup, ret;
+       void *p;
+  
+       dbenv = dbmp->dbenv;
+***************
+*** 59,76 ****
+       dbht = R_ADDR(memreg, c_mp->htab);
+       hp_end = &dbht[c_mp->htab_buckets];
+  
+!      buckets = buffers = 0;
+!      aggressive = 0;
+  
+       c_mp->stat.st_alloc++;
+  
+       /*
+-       * Get aggressive if we've tried to flush the number of pages as are
+-       * in the system without finding space.
+-       */
+-      max_na = 5 * c_mp->htab_buckets;
+- 
+-      /*
+        * If we're allocating a buffer, and the one we're discarding is the
+        * same size, we don't want to waste the time to re-integrate it into
+        * the shared memory free list.  If the DB_MPOOLFILE argument isn't
+--- 59,71 ----
+       dbht = R_ADDR(memreg, c_mp->htab);
+       hp_end = &dbht[c_mp->htab_buckets];
+  
+!      buckets = buffers = put_counter = total_buckets = 0;
+!      aggressive = giveup = 0;
+!      hp_tmp = NULL;
+  
+       c_mp->stat.st_alloc++;
+  
+       /*
+        * If we're allocating a buffer, and the one we're discarding is the
+        * same size, we don't want to waste the time to re-integrate it into
+        * the shared memory free list.  If the DB_MPOOLFILE argument isn't
+***************
+*** 81,99 ****
+               len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
+  
+       R_LOCK(dbenv, memreg);
+- 
+-      /*
+-       * On every buffer allocation we update the buffer generation number
+-       * and check for wraparound.
+-       */
+-      if (++c_mp->lru_count == UINT32_T_MAX)
+-              __memp_reset_lru(dbenv, memreg, c_mp);
+- 
+       /*
+        * Anything newer than 1/10th of the buffer pool is ignored during
+        * allocation (unless allocation starts failing).
+        */
+-      DB_ASSERT(c_mp->lru_count > c_mp->stat.st_pages / 10);
+       high_priority = c_mp->lru_count - c_mp->stat.st_pages / 10;
+  
+       /*
+--- 76,85 ----
+***************
+*** 120,129 ****
+                * We're not holding the region locked here, these statistics
+                * can't be trusted.
+                */
+!              if (buckets != 0) {
+!                      if (buckets > c_mp->stat.st_alloc_max_buckets)
+!                              c_mp->stat.st_alloc_max_buckets = buckets;
+!                      c_mp->stat.st_alloc_buckets += buckets;
+               }
+               if (buffers != 0) {
+                       if (buffers > c_mp->stat.st_alloc_max_pages)
+--- 106,116 ----
+                * We're not holding the region locked here, these statistics
+                * can't be trusted.
+                */
+!              total_buckets += buckets;
+!              if (total_buckets != 0) {
+!                      if (total_buckets > c_mp->stat.st_alloc_max_buckets)
+!                              c_mp->stat.st_alloc_max_buckets = total_buckets;
+!                      c_mp->stat.st_alloc_buckets += total_buckets;
+               }
+               if (buffers != 0) {
+                       if (buffers > c_mp->stat.st_alloc_max_pages)
+***************
+*** 131,136 ****
+--- 118,129 ----
+                       c_mp->stat.st_alloc_pages += buffers;
+               }
+               return (0);
++      } else if (giveup || c_mp->stat.st_pages == 0) {
++              R_UNLOCK(dbenv, memreg);
++ 
++              __db_err(dbenv,
++                  "unable to allocate space from the buffer cache");
++              return (ret);
+       }
+  
+       /*
+***************
+*** 138,163 ****
+        * we need.  Reset our free-space counter.
+        */
+       freed_space = 0;
+  
+       /*
+        * Walk the hash buckets and find the next two with potentially useful
+        * buffers.  Free the buffer with the lowest priority from the buckets'
+        * chains.
+        */
+!      for (hp_tmp = NULL;;) {
+               /* Check for wrap around. */
+               hp = &dbht[c_mp->last_checked++];
+               if (hp >= hp_end) {
+                       c_mp->last_checked = 0;
+! 
+!                      /*
+!                       * If we've gone through all of the hash buckets, try
+!                       * an allocation.  If the cache is small, the old page
+!                       * size is small, and the new page size is large, we
+!                       * might have freed enough memory (but not 3 times the
+!                       * memory).
+!                       */
+!                      goto alloc;
+               }
+  
+               /*
+--- 131,154 ----
+        * we need.  Reset our free-space counter.
+        */
+       freed_space = 0;
++      total_buckets += buckets;
++      buckets = 0;
+  
+       /*
+        * Walk the hash buckets and find the next two with potentially useful
+        * buffers.  Free the buffer with the lowest priority from the buckets'
+        * chains.
+        */
+!      for (;;) {
+!              /* All pages have been freed, make one last try */
+!              if (c_mp->stat.st_pages == 0)
+!                      goto alloc;
+! 
+               /* Check for wrap around. */
+               hp = &dbht[c_mp->last_checked++];
+               if (hp >= hp_end) {
+                       c_mp->last_checked = 0;
+!                      hp = &dbht[c_mp->last_checked++];
+               }
+  
+               /*
+***************
+*** 172,210 ****
+               /*
+                * The failure mode is when there are too many buffers we can't
+                * write or there's not enough memory in the system.  We don't
+!               * have a metric for deciding if allocation has no possible way
+!               * to succeed, so we don't ever fail, we assume memory will be
+!               * available if we wait long enough.
+                *
+!               * Get aggressive if we've tried to flush 5 times the number of
+!               * hash buckets as are in the system -- it's possible we have
+!               * been repeatedly trying to flush the same buffers, although
+!               * it's unlikely.  Aggressive means:
+                *
+                * a: set a flag to attempt to flush high priority buffers as
+                *    well as other buffers.
+                * b: sync the mpool to force out queue extent pages.  While we
+                *    might not have enough space for what we want and flushing
+                *    is expensive, why not?
+!               * c: sleep for a second -- hopefully someone else will run and
+!               *    free up some memory.  Try to allocate memory too, in case
+!               *    the other thread returns its memory to the region.
+!               * d: look at a buffer in every hash bucket rather than choose
+                *    the more preferable of two.
+                *
+                * !!!
+                * This test ignores pathological cases like no buffers in the
+                * system -- that shouldn't be possible.
+                */
+!              if ((++buckets % max_na) == 0) {
+!                      aggressive = 1;
+! 
+                       R_UNLOCK(dbenv, memreg);
+  
+!                      (void)__memp_sync_int(
+!                          dbenv, NULL, 0, DB_SYNC_ALLOC, NULL);
+! 
+!                      (void)__os_sleep(dbenv, 1, 0);
+  
+                       R_LOCK(dbenv, memreg);
+                       goto alloc;
+--- 163,221 ----
+               /*
+                * The failure mode is when there are too many buffers we can't
+                * write or there's not enough memory in the system.  We don't
+!               * have a way to know that allocation has no way to succeed.
+!               * We fail if there were no pages returned to the cache after
+!               * we've been trying for a relatively long time.
+                *
+!               * Get aggressive if we've tried to flush the number of hash
+!               * buckets as are in the system and have not found any more
+!               * space.  Aggressive means:
+                *
+                * a: set a flag to attempt to flush high priority buffers as
+                *    well as other buffers.
+                * b: sync the mpool to force out queue extent pages.  While we
+                *    might not have enough space for what we want and flushing
+                *    is expensive, why not?
+!               * c: look at a buffer in every hash bucket rather than choose
+                *    the more preferable of two.
++               * d: start to think about giving up.
++               *
++               * If we get here twice, sleep for a second, hopefully someone
++               * else will run and free up some memory.
++               *
++               * Always try to allocate memory too, in case some other thread
++               * returns its memory to the region.
+                *
+                * !!!
+                * This test ignores pathological cases like no buffers in the
+                * system -- that shouldn't be possible.
+                */
+!              if ((++buckets % c_mp->htab_buckets) == 0) {
+!                      if (freed_space > 0)
+!                              goto alloc;
+                       R_UNLOCK(dbenv, memreg);
+  
+!                      switch (++aggressive) {
+!                      case 1:
+!                              break;
+!                      case 2:
+!                              put_counter = c_mp->put_counter;
+!                              /* FALLTHROUGH */
+!                      case 3:
+!                      case 4:
+!                      case 5:
+!                      case 6:
+!                              (void)__memp_sync_int(
+!                                  dbenv, NULL, 0, DB_SYNC_ALLOC, NULL);
+! 
+!                              (void)__os_sleep(dbenv, 1, 0);
+!                              break;
+!                      default:
+!                              aggressive = 1;
+!                              if (put_counter == c_mp->put_counter)
+!                                      giveup = 1;
+!                              break;
+!                      }
+  
+                       R_LOCK(dbenv, memreg);
+                       goto alloc;
+***************
+*** 277,283 ****
+                * thread may have acquired this buffer and incremented the ref
+                * count after we wrote it, in which case we can't have it.
+                *
+!               * If there's a write error, avoid selecting this buffer again
+                * by making it the bucket's least-desirable buffer.
+                */
+               if (ret != 0 || bhp->ref != 0) {
+--- 288,295 ----
+                * thread may have acquired this buffer and incremented the ref
+                * count after we wrote it, in which case we can't have it.
+                *
+!               * If there's a write error and we're having problems finding
+!               * something to allocate, avoid selecting this buffer again
+                * by making it the bucket's least-desirable buffer.
+                */
+               if (ret != 0 || bhp->ref != 0) {
+***************
+*** 301,306 ****
+--- 313,320 ----
+  
+               freed_space += __db_shsizeof(bhp);
+               __memp_bhfree(dbmp, hp, bhp, 1);
++              if (aggressive > 1)
++                      aggressive = 1;
+  
+               /*
+                * Unlock this hash bucket and re-acquire the region lock. If
+***************
+*** 362,415 ****
+       hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+  }
+  
+- /*
+-  * __memp_reset_lru --
+-  *   Reset the cache LRU counter.
+-  */
+- static void
+- __memp_reset_lru(dbenv, memreg, c_mp)
+-      DB_ENV *dbenv;
+-      REGINFO *memreg;
+-      MPOOL *c_mp;
+- {
+-      BH *bhp;
+-      DB_MPOOL_HASH *hp;
+-      int bucket;
+- 
+-      /*
+-       * Update the counter so all future allocations will start at the
+-       * bottom.
+-       */
+-      c_mp->lru_count -= MPOOL_BASE_DECREMENT;
+- 
+-      /* Release the region lock. */
+-      R_UNLOCK(dbenv, memreg);
+- 
+-      /* Adjust the priority of every buffer in the system. */
+-      for (hp = R_ADDR(memreg, c_mp->htab),
+-          bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+-              /*
+-               * Skip empty buckets.
+-               *
+-               * We can check for empty buckets before locking as we
+-               * only care if the pointer is zero or non-zero.
+-               */
+-              if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+-                      continue;
+- 
+-              MUTEX_LOCK(dbenv, &hp->hash_mutex);
+-              for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+-                  bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+-                      if (bhp->priority != UINT32_T_MAX &&
+-                          bhp->priority > MPOOL_BASE_DECREMENT)
+-                              bhp->priority -= MPOOL_BASE_DECREMENT;
+-              MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+-      }
+- 
+-      /* Reacquire the region lock. */
+-      R_LOCK(dbenv, memreg);
+- }
+- 
+  #ifdef DIAGNOSTIC
+  /*
+   * __memp_check_order --
+--- 376,381 ----
+*** dbreg/dbreg_rec.c.orig     2002-08-17 07:22:52.000000000 -0700
+--- dbreg/dbreg_rec.c  2003-11-08 10:59:19.000000000 -0800
+***************
+*** 174,192 ****
+                        * Typically, closes should match an open which means
+                        * that if this is a close, there should be a valid
+                        * entry in the dbentry table when we get here,
+!                       * however there is an exception.  If this is an
+                        * OPENFILES pass, then we may have started from
+                        * a log file other than the first, and the
+                        * corresponding open appears in an earlier file.
+!                       * We can ignore that case, but all others are errors.
+                        */
+                       dbe = &dblp->dbentry[argp->fileid];
+                       if (dbe->dbp == NULL && !dbe->deleted) {
+                               /* No valid entry here. */
+!                              if ((argp->opcode != LOG_CLOSE &&
+!                                  argp->opcode != LOG_RCLOSE) ||
+!                                  (op != DB_TXN_OPENFILES &&
+!                                  op !=DB_TXN_POPENFILES)) {
+                                       __db_err(dbenv,
+                                           "Improper file close at %lu/%lu",
+                                           (u_long)lsnp->file,
+--- 174,193 ----
+                        * Typically, closes should match an open which means
+                        * that if this is a close, there should be a valid
+                        * entry in the dbentry table when we get here,
+!                       * however there are exceptions.  1. If this is an
+                        * OPENFILES pass, then we may have started from
+                        * a log file other than the first, and the
+                        * corresponding open appears in an earlier file.
+!                       * 2. If we are undoing an open on an abort or
+!                       * recovery, it's possible that we failed after
+!                       * the log record, but before we actually entered
+!                       * a handle here.
+                        */
+                       dbe = &dblp->dbentry[argp->fileid];
+                       if (dbe->dbp == NULL && !dbe->deleted) {
+                               /* No valid entry here. */
+!                              if (DB_REDO(op) ||
+!                                  argp->opcode == LOG_CHECKPOINT) {
+                                       __db_err(dbenv,
+                                           "Improper file close at %lu/%lu",
+                                           (u_long)lsnp->file,
+*** env/env_recover.c.orig.1   2002-08-22 14:52:51.000000000 -0700
+--- env/env_recover.c  2003-11-15 08:20:59.000000000 -0800
+***************
+*** 232,243 ****
+        * we'll still need to do a vtruncate based on information we haven't
+        * yet collected.
+        */
+!      if (ret == DB_NOTFOUND) {
+               ret = 0;
+!              if (max_lsn == NULL)
+!                      goto done;
+!      }
+!      if (ret != 0)
+               goto err;
+  
+       hi_txn = txnid;
+--- 232,240 ----
+        * we'll still need to do a vtruncate based on information we haven't
+        * yet collected.
+        */
+!      if (ret == DB_NOTFOUND) 
+               ret = 0;
+!      else if (ret != 0)
+               goto err;
+  
+       hi_txn = txnid;
+***************
+*** 331,337 ****
+  
+       /* Find a low txnid. */
+       ret = 0;
+!      do {
+               /* txnid is after rectype, which is a u_int32. */
+               memcpy(&txnid,
+                   (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+--- 328,334 ----
+  
+       /* Find a low txnid. */
+       ret = 0;
+!      if (hi_txn != 0) do {
+               /* txnid is after rectype, which is a u_int32. */
+               memcpy(&txnid,
+                   (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+***************
+*** 344,354 ****
+        * There are no transactions and we're not recovering to an LSN (see
+        * above), so there is nothing to do.
+        */
+!      if (ret == DB_NOTFOUND) {
+               ret = 0;
+-              if (max_lsn == NULL)
+-                      goto done;
+-      }
+  
+       /* Reset to the first lsn. */
+       if (ret != 0 || (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+--- 341,348 ----
+        * There are no transactions and we're not recovering to an LSN (see
+        * above), so there is nothing to do.
+        */
+!      if (ret == DB_NOTFOUND) 
+               ret = 0;
+  
+       /* Reset to the first lsn. */
+       if (ret != 0 || (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+***************
+*** 367,372 ****
+--- 361,370 ----
+           txninfo, &data, &first_lsn, &last_lsn, nfiles, 1)) != 0)
+               goto err;
+  
++      /* If there were no transactions, then we can bail out early. */
++      if (hi_txn == 0 && max_lsn == NULL)
++              goto done;
++              
+       /*
+        * Pass #2.
+        *
+***************
+*** 483,488 ****
+--- 481,487 ----
+       if ((ret = __dbreg_close_files(dbenv)) != 0)
+               goto err;
+  
++ done:
+       if (max_lsn != NULL) {
+               region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+  
+***************
+*** 538,544 ****
+               __db_err(dbenv, "Recovery complete at %.24s", ctime(&now));
+               __db_err(dbenv, "%s %lx %s [%lu][%lu]",
+                   "Maximum transaction ID",
+!                  ((DB_TXNHEAD *)txninfo)->maxid,
+                   "Recovery checkpoint",
+                   (u_long)region->last_ckp.file,
+                   (u_long)region->last_ckp.offset);
+--- 537,544 ----
+               __db_err(dbenv, "Recovery complete at %.24s", ctime(&now));
+               __db_err(dbenv, "%s %lx %s [%lu][%lu]",
+                   "Maximum transaction ID",
+!                  txninfo == NULL ? TXN_MINIMUM :
+!                      ((DB_TXNHEAD *)txninfo)->maxid,
+                   "Recovery checkpoint",
+                   (u_long)region->last_ckp.file,
+                   (u_long)region->last_ckp.offset);
+***************
+*** 550,556 ****
+                   (u_long)lsn.file, (u_long)lsn.offset, pass);
+       }
+  
+- done:
+  err: if (lockid != DB_LOCK_INVALIDID) {
+               if ((t_ret = __rep_unlockpages(dbenv, lockid)) != 0 && ret == 0)
+                       ret = t_ret;
+--- 550,555 ----
diff --git a/patch.4.1.25.3 b/patch.4.1.25.3
new file mode 100644 (file)
index 0000000..5b370ea
--- /dev/null
@@ -0,0 +1,50 @@
+*** mp/mp_fget.c.orig  2002-08-07 08:23:01.000000000 -0700
+--- mp/mp_fget.c       2006-05-30 20:32:20.000000000 -0700
+***************
+*** 506,513 ****
+        */
+       if (state != SECOND_MISS && bhp->ref == 1) {
+               bhp->priority = UINT32_T_MAX;
+!              SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+!              SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+               hp->hash_priority =
+                   SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+       }
+--- 506,517 ----
+        */
+       if (state != SECOND_MISS && bhp->ref == 1) {
+               bhp->priority = UINT32_T_MAX;
+!              /* Move the buffer if there are others in the bucket. */
+!              if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) != bhp
+!                  || SH_TAILQ_NEXT(bhp, hq, __bh) != NULL) {
+!                      SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+!                      SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+!              }
+               hp->hash_priority =
+                   SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+       }
+*** mp/mp_fput.c.orig  2002-08-13 06:26:41.000000000 -0700
+--- mp/mp_fput.c       2006-05-30 20:55:11.000000000 -0700
+***************
+*** 166,171 ****
+--- 166,176 ----
+        * to the correct position in the list.
+        */
+       argbhp = bhp;
++      /* Move the buffer if there are others in the bucket. */
++      if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == bhp
++          && SH_TAILQ_NEXT(bhp, hq, __bh) != NULL)
++              goto done;
++ 
+       SH_TAILQ_REMOVE(&hp->hash_bucket, argbhp, hq, __bh);
+  
+       prev = NULL;
+***************
+*** 178,183 ****
+--- 183,189 ----
+       else
+               SH_TAILQ_INSERT_AFTER(&hp->hash_bucket, prev, argbhp, hq, __bh);
+  
++ done:
+       /* Reset the hash bucket's priority. */
+       hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
This page took 0.103961 seconds and 4 git commands to generate.