%bcond_with accelio # Accelio transport support [needs update for internal API changes]
%bcond_with cryptopp # use cryptopp instead of NSS crypto/SSL
%bcond_with dpdk # DPDK messaging (requires cryptopp instead of nss)
-%bcond_with fcgi # RADOS Gateway FCGI frontend
-%bcond_with fio # FIO engines support
+%bcond_without fcgi # RADOS Gateway FCGI frontend
+%bcond_without fio # FIO engines support
%bcond_without pmem # PMDK (persistent memory) support
%bcond_with spdk # Ceph SPDK support (DPDK based)
%bcond_without system_rocksdb # system RocksDB storage support
-%bcond_with zfs # ZFS support [not ready for zfs 0.8.x]
+%bcond_without zfs # ZFS support [not ready for zfs 0.8.x]
%bcond_without lttng # LTTng tracing
%bcond_without babeltrace # Babeltrace traces support
%bcond_without tcmalloc # tcmalloc allocator
Summary(pl.UTF-8): Działające w przestrzeni użytkownika elementy systemu plików Ceph
Name: ceph
Version: 12.2.13
-Release: 1
+Release: 4
License: LGPL v2.1 (libraries), GPL v2 (some programs)
Group: Base
Source0: http://download.ceph.com/tarballs/%{name}-%{version}.tar.gz
# Source0-md5: 38bd01cf8224c9ca081298e19ab6e5a1
Source1: ceph.sysconfig
-Source2: cephctl
Source3: ceph.tmpfiles
Patch0: %{name}-init-fix.patch
-Patch1: %{name}.logrotate.patch
Patch2: boost.patch
Patch3: %{name}-python.patch
Patch4: %{name}-types.patch
Patch5: %{name}-tcmalloc.patch
Patch6: %{name}-rocksdb.patch
Patch7: %{name}-fcgi.patch
+Patch8: %{name}-fio.patch
+Patch9: %{name}-zfs.patch
+Patch10: %{name}-includes.patch
URL: https://ceph.io/
%{?with_accelio:BuildRequires: accelio-devel}
%{?with_babeltrace:BuildRequires: babeltrace-devel}
BuildRequires: boost-devel >= 1.66
+BuildRequires: boost-python-devel >= 1.66
BuildRequires: cmake >= 2.8.11
%{?with_cryptopp:BuildRequires: cryptopp-devel}
BuildRequires: curl-devel
BuildRequires: python-devel >= 1:2.7
BuildRequires: python-Cython
BuildRequires: python3-devel >= 1:3.2
-%{?with_rocksdb:BuildRequires: rocksdb-devel >= 3.0.0}
+BuildRequires: python3-Cython
+# upstream uses 3.0.0, rocksdb patch adjusts for 5.6.0 API change
+%{?with_system_rocksdb:BuildRequires: rocksdb-devel >= 5.6.0}
BuildRequires: rpmbuild(macros) >= 1.671
BuildRequires: sed >= 4.0
BuildRequires: snappy-devel
%ifarch %{x8664}
BuildRequires: yasm
%endif
-%{?with_zfs:BuildRequires: zfs-devel}
+# zfs patch updates to 0.8.0 API
+%{?with_zfs:BuildRequires: zfs-devel >= 0.8.0}
BuildRequires: zlib-devel
Requires(post,preun): /sbin/chkconfig
Requires(preun): rc-scripts
Obsoletes: hadoop-cephfs
BuildRoot: %{tmpdir}/%{name}-%{version}-root-%(id -u -n)
-%define skip_post_check_so libceph_lz4.so.* libceph_snappy.so.* libceph_zlib.so.* libceph_zstd.so.* libcls_.*.so.* libec_.*.so.*
+%define skip_post_check_so libceph_crypto_isal.so.* libceph_lz4.so.* libceph_snappy.so.* libceph_zlib.so.* libceph_zstd.so.* libcls_.*.so.* libec_.*.so.*
%description
Ceph is a distributed network file system designed to provide
%description resource-agents -l pl.UTF-8
Agenci OCF do monitorowania procesów Cepha.
+%package -n fio-ceph-objectstore
+Summary: FIO engine module for Ceph ObjectStore
+Summary(pl.UTF-8): Moduł silnika FIO do używania Ceph ObjectStore
+Group: Libraries
+Requires: %{name}-libs = %{version}-%{release}
+%if %{with fio}
+%requires_ge_to fio fio-devel
+%endif
+
+%description -n fio-ceph-objectstore
+This FIO engine allows you to mount and use a ceph object store
+directly, without having to build a ceph cluster or start any daemons.
+
+%description -n fio-ceph-objectstore -l pl.UTF-8
+Ten silnik FIO pozwala na bezpośrednie montowanie i używanie
+przestrzeni obiektów ceph, bez potrzeby budowania klastra ceph czy
+uruchamiania demonów.
+
%prep
%setup -q
%patch0 -p1
-%patch1 -p1
%patch2 -p0
%patch3 -p1
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
+%patch8 -p1
+%patch9 -p1
+%patch10 -p1
%{__sed} -i -e '1s,/usr/bin/env python$,%{__python},' \
src/{ceph-create-keys,ceph-rest-api,mount.fuse.ceph} \
%{?with_system_rocksdb:-DWITH_SYSTEM_ROCKSDB=ON} \
-DWITH_SYSTEMD=ON \
%{?with_accelio:-DWITH_XIO=ON} \
- %{?with_zfs:-DWITH_ZFS=ON}
+ %{?with_zfs:-DWITH_ZFS=ON} \
+ -DWITH_REENTRANT_STRSIGNAL=ON
%{__make}
cp -p src/logrotate.conf $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/ceph
cp -p %{SOURCE1} $RPM_BUILD_ROOT/etc/sysconfig/ceph
-install %{SOURCE2} $RPM_BUILD_ROOT%{_bindir}
ln -sf /dev/null $RPM_BUILD_ROOT%{systemdunitdir}/ceph.service
cp -p %{SOURCE3} $RPM_BUILD_ROOT%{systemdtmpfilesdir}/ceph.conf
%attr(755,root,root) %{_bindir}/ceph_smalliobenchrbd
%attr(755,root,root) %{_bindir}/ceph_tpbench
%attr(755,root,root) %{_bindir}/ceph_xattr_bench
-%attr(755,root,root) %{_bindir}/cephctl
%attr(755,root,root) %{_bindir}/cephfs-data-scan
%attr(755,root,root) %{_bindir}/cephfs-journal-tool
%attr(755,root,root) %{_bindir}/cephfs-table-tool
%if "%{_libexecdir}" != "%{_libdir}"
%dir %{_libexecdir}/ceph
%endif
-%attr(755,root,root) %{_libexecdir}/ceph/ceph-monstore-update-crush.sh
+%{_libexecdir}/ceph/ceph_common.sh
%attr(755,root,root) %{_libexecdir}/ceph/ceph-osd-prestart.sh
-%{_libdir}/ceph/ceph_common.sh
+%attr(755,root,root) %{_libdir}/ceph/ceph-monstore-update-crush.sh
%{_libdir}/ceph/mgr
%dir %{_libdir}/ceph/compressor
%attr(755,root,root) %{_libdir}/ceph/compressor/libceph_lz4.so*
%attr(755,root,root) %{_libdir}/ceph/compressor/libceph_snappy.so*
%attr(755,root,root) %{_libdir}/ceph/compressor/libceph_zlib.so*
%attr(755,root,root) %{_libdir}/ceph/compressor/libceph_zstd.so*
+%ifarch %{x8664}
+%dir %{_libdir}/ceph/crypto
+%attr(755,root,root) %{_libdir}/ceph/crypto/libceph_crypto_isal.so*
+%endif
%dir %{_libdir}/ceph/erasure-code
%ifarch %{x8664}
%attr(755,root,root) %{_libdir}/ceph/erasure-code/libec_isa.so*
%defattr(644,root,root,755)
%dir %{_prefix}/lib/ocf/resource.d/ceph
%attr(755,root,root) %{_prefix}/lib/ocf/resource.d/ceph/rbd
+
+%if %{with fio}
+%files -n fio-ceph-objectstore
+%defattr(644,root,root,755)
+%doc src/test/fio/{README.md,ceph-*.conf,ceph-*.fio}
+%attr(755,root,root) %{_libdir}/libfio_ceph_objectstore.so
+%endif