]> git.pld-linux.org Git - packages/ceph.git/blame_incremental - types.patch
- requires gcc 11+ now
[packages/ceph.git] / types.patch
... / ...
CommitLineData
1--- ceph-16.2.7/src/SimpleRADOSStriper.cc~ 2021-12-07 17:15:49.000000000 +0100
2+++ ceph-16.2.7/src/SimpleRADOSStriper.cc 2022-02-12 21:59:28.261721940 +0100
3@@ -140,7 +140,7 @@
4 return 0;
5 }
6
7-int SimpleRADOSStriper::truncate(uint64_t size)
8+int SimpleRADOSStriper::truncate(size_t size)
9 {
10 d(5) << size << dendl;
11
12--- ceph-17.2.7/src/os/bluestore/BlueFS.cc.orig 2024-02-24 21:45:42.755706899 +0100
13+++ ceph-17.2.7/src/os/bluestore/BlueFS.cc 2024-02-24 21:51:20.641018002 +0100
14@@ -4617,7 +4617,7 @@ size_t BlueFS::probe_alloc_avail(int dev
15 if (dist_to_alignment >= len)
16 return;
17 len -= dist_to_alignment;
18- total += p2align(len, alloc_size);
19+ total += p2align((uint64_t)len, alloc_size);
20 };
21 if (alloc[dev]) {
22 alloc[dev]->foreach(iterated_allocation);
23--- ceph-16.2.7/src/tools/neorados.cc~ 2021-12-07 17:15:49.000000000 +0100
24+++ ceph-16.2.7/src/tools/neorados.cc 2022-02-12 22:23:25.836643956 +0100
25@@ -205,7 +205,7 @@
26
27 std::size_t off = 0;
28 ceph::buffer::list bl;
29- while (auto toread = std::max(len - off, io_size)) {
30+ while (auto toread = std::max(len - off, (uint64_t)io_size)) {
31 R::ReadOp op;
32 op.read(off, toread, &bl);
33 r.execute(obj, pool, std::move(op), nullptr, y[ec]);
34--- ceph-16.2.7/src/tools/cephfs_mirror/FSMirror.cc.orig 2021-12-07 17:15:49.000000000 +0100
35+++ ceph-16.2.7/src/tools/cephfs_mirror/FSMirror.cc 2022-02-12 22:30:46.487298972 +0100
36@@ -345,7 +345,7 @@
37 std::scoped_lock locker(m_lock);
38 m_directories.emplace(dir_path);
39 m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
40- m_directories.size());
41+ (uint64_t)m_directories.size());
42
43 for (auto &[peer, peer_replayer] : m_peer_replayers) {
44 dout(10) << ": peer=" << peer << dendl;
45@@ -363,7 +363,7 @@
46 if (it != m_directories.end()) {
47 m_directories.erase(it);
48 m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
49- m_directories.size());
50+ (uint64_t)m_directories.size());
51 for (auto &[peer, peer_replayer] : m_peer_replayers) {
52 dout(10) << ": peer=" << peer << dendl;
53 peer_replayer->remove_directory(dir_path);
54--- ceph-17.2.3/src/seastar/src/core/file.cc.orig 2021-12-19 23:02:10.000000000 +0100
55+++ ceph-17.2.3/src/seastar/src/core/file.cc 2022-08-28 09:19:17.258501014 +0200
56@@ -313,7 +313,7 @@ posix_file_impl::close() noexcept {
57
58 future<uint64_t>
59 blockdev_file_impl::size(void) noexcept {
60- return engine()._thread_pool->submit<syscall_result_extra<size_t>>([this] {
61+ return engine()._thread_pool->submit<syscall_result_extra<uint64_t>>([this] {
62 uint64_t size;
63 int ret = ::ioctl(_fd, BLKGETSIZE64, &size);
64 return wrap_syscall(ret, size);
65@@ -908,7 +908,7 @@ append_challenged_posix_file_impl::trunc
66
67 future<uint64_t>
68 append_challenged_posix_file_impl::size() noexcept {
69- return make_ready_future<size_t>(_logical_size);
70+ return make_ready_future<uint64_t>(_logical_size);
71 }
72
73 future<>
74--- ceph-17.2.3/src/seastar/src/core/fstream.cc.orig 2021-12-19 23:02:10.000000000 +0100
75+++ ceph-17.2.3/src/seastar/src/core/fstream.cc 2022-08-28 09:22:32.072057177 +0200
76@@ -419,7 +419,7 @@ private:
77 if ((buf.size() & (_file.disk_write_dma_alignment() - 1)) != 0) {
78 // If buf size isn't aligned, copy its content into a new aligned buf.
79 // This should only happen when the user calls output_stream::flush().
80- auto tmp = allocate_buffer(align_up(buf.size(), _file.disk_write_dma_alignment()));
81+ auto tmp = allocate_buffer(align_up<uint64_t>(buf.size(), _file.disk_write_dma_alignment()));
82 ::memcpy(tmp.get_write(), buf.get(), buf.size());
83 ::memset(tmp.get_write() + buf.size(), 0, tmp.size() - buf.size());
84 buf = std::move(tmp);
85--- ceph-18.2.3/src/osd/OSDMap.cc.orig 2024-04-24 21:57:21.000000000 +0200
86+++ ceph-18.2.3/src/osd/OSDMap.cc 2024-06-01 18:51:51.915566430 +0200
87@@ -6170,14 +6170,14 @@ int OSDMap::calc_read_balance_score(Ceph
88 }
89 if (prim_pgs_by_osd.count(osd)) {
90 auto n_prims = prim_pgs_by_osd.at(osd).size();
91- max_prims_per_osd = std::max(max_prims_per_osd, n_prims);
92+ max_prims_per_osd = std::max<uint64_t>(max_prims_per_osd, n_prims);
93 if (osd_pa == 0.) {
94 prim_on_zero_pa = true;
95 }
96 }
97 if (acting_prims_by_osd.count(osd)) {
98 auto n_aprims = acting_prims_by_osd.at(osd).size();
99- max_acting_prims_per_osd = std::max(max_acting_prims_per_osd, n_aprims);
100+ max_acting_prims_per_osd = std::max<uint64_t>(max_acting_prims_per_osd, n_aprims);
101 if (osd_pa != 0.) {
102 max_osd_score = std::max(max_osd_score, float(n_aprims) / osd_pa);
103 }
104--- ceph-18.2.3/src/cls/rgw/cls_rgw_types.h.orig 2024-04-24 21:57:21.000000000 +0200
105+++ ceph-18.2.3/src/cls/rgw/cls_rgw_types.h 2024-06-01 20:43:58.381041708 +0200
106@@ -1226,7 +1226,7 @@ struct cls_rgw_lc_obj_head
107 uint64_t t = start_date;
108 encode(t, bl);
109 encode(marker, bl);
110- encode(shard_rollover_date, bl);
111+ encode(static_cast<uint64_t>(shard_rollover_date), bl);
112 ENCODE_FINISH(bl);
113 }
114
This page took 0.044115 seconds and 5 git commands to generate.