]>
Commit | Line | Data |
---|---|---|
19c20e80 JR |
1 | diff -ur ceph-0.94.10/src/client/SyntheticClient.cc ceph-0.94.10-cxx/src/client/SyntheticClient.cc |
2 | --- ceph-0.94.10/src/client/SyntheticClient.cc 2017-02-21 16:13:19.000000000 +0100 | |
3 | +++ ceph-0.94.10-cxx/src/client/SyntheticClient.cc 2017-04-29 12:25:26.633497617 +0200 | |
4 | @@ -601,7 +601,7 @@ | |
5 | int size = iargs.front(); iargs.pop_front(); | |
6 | int inflight = iargs.front(); iargs.pop_front(); | |
7 | if (run_me()) { | |
8 | - dout(2) << "createobjects " << cout << " of " << size << " bytes" | |
9 | + dout(2) << "createobjects " << cout.rdbuf() << " of " << size << " bytes" | |
10 | << ", " << inflight << " in flight" << dendl; | |
11 | create_objects(count, size, inflight); | |
12 | } | |
13 | @@ -617,7 +617,7 @@ | |
14 | int rskew = iargs.front(); iargs.pop_front(); | |
15 | int wskew = iargs.front(); iargs.pop_front(); | |
16 | if (run_me()) { | |
17 | - dout(2) << "objectrw " << cout << " " << size << " " << wrpc | |
18 | + dout(2) << "objectrw " << cout.rdbuf() << " " << size << " " << wrpc | |
19 | << " " << overlap << " " << rskew << " " << wskew << dendl; | |
20 | object_rw(count, size, wrpc, overlap, rskew, wskew); | |
21 | } | |
22 | diff -ur ceph-0.94.10/src/common/admin_socket.cc ceph-0.94.10-cxx/src/common/admin_socket.cc | |
23 | --- ceph-0.94.10/src/common/admin_socket.cc 2017-02-21 16:13:19.000000000 +0100 | |
24 | +++ ceph-0.94.10-cxx/src/common/admin_socket.cc 2017-04-29 09:57:25.062251956 +0200 | |
25 | @@ -352,7 +352,7 @@ | |
26 | stringstream errss; | |
27 | cmdvec.push_back(cmd); | |
28 | if (!cmdmap_from_json(cmdvec, &cmdmap, errss)) { | |
29 | - ldout(m_cct, 0) << "AdminSocket: " << errss << dendl; | |
30 | + ldout(m_cct, 0) << "AdminSocket: " << errss.rdbuf() << dendl; | |
31 | return false; | |
32 | } | |
33 | cmd_getval(m_cct, cmdmap, "format", format); | |
34 | diff -ur ceph-0.94.10/src/common/ceph_json.h ceph-0.94.10-cxx/src/common/ceph_json.h | |
35 | --- ceph-0.94.10/src/common/ceph_json.h 2017-02-21 16:13:19.000000000 +0100 | |
36 | +++ ceph-0.94.10-cxx/src/common/ceph_json.h 2017-04-29 11:13:38.925899980 +0200 | |
37 | @@ -198,7 +198,7 @@ | |
38 | JSONObj *o = *iter; | |
39 | JSONDecoder::decode_json("key", key, o); | |
40 | JSONDecoder::decode_json("val", val, o); | |
41 | - m.insert(make_pair<K, V>(key, val)); | |
42 | + m.insert(make_pair(key, val)); | |
43 | } | |
44 | } | |
45 | ||
46 | diff -ur ceph-0.94.10/src/common/cmdparse.cc ceph-0.94.10-cxx/src/common/cmdparse.cc | |
47 | --- ceph-0.94.10/src/common/cmdparse.cc 2017-02-21 16:13:19.000000000 +0100 | |
48 | +++ ceph-0.94.10-cxx/src/common/cmdparse.cc 2017-04-29 09:58:13.362841243 +0200 | |
49 | @@ -224,7 +224,7 @@ | |
50 | BackTrace bt(1); | |
51 | ostringstream oss; | |
52 | bt.print(oss); | |
53 | - lderr(cct) << oss << dendl; | |
54 | + lderr(cct) << oss.rdbuf() << dendl; | |
55 | if (status == 0) | |
56 | free((char *)typestr); | |
57 | } | |
58 | diff -ur ceph-0.94.10/src/common/LogClient.h ceph-0.94.10-cxx/src/common/LogClient.h | |
59 | --- ceph-0.94.10/src/common/LogClient.h 2017-02-21 16:13:19.000000000 +0100 | |
60 | +++ ceph-0.94.10-cxx/src/common/LogClient.h 2017-04-29 09:55:08.930587122 +0200 | |
61 | @@ -137,7 +137,7 @@ | |
62 | } | |
63 | bool must_log_to_monitors() { return log_to_monitors; } | |
64 | ||
65 | - typedef shared_ptr<LogChannel> Ref; | |
66 | + typedef std::shared_ptr<LogChannel> Ref; | |
67 | ||
68 | /** | |
69 | * update config values from parsed k/v map for each config option | |
70 | diff -ur ceph-0.94.10/src/mds/CInode.cc ceph-0.94.10-cxx/src/mds/CInode.cc | |
71 | --- ceph-0.94.10/src/mds/CInode.cc 2017-02-21 16:13:19.000000000 +0100 | |
72 | +++ ceph-0.94.10-cxx/src/mds/CInode.cc 2017-04-29 10:44:49.563391124 +0200 | |
73 | @@ -3791,7 +3791,7 @@ | |
74 | f->dump_int("read_ret_val", backtrace.ondisk_read_retval); | |
75 | f->dump_stream("ondisk_value") << backtrace.ondisk_value; | |
76 | f->dump_stream("memoryvalue") << backtrace.memory_value; | |
77 | - f->dump_stream("error_str") << backtrace.error_str; | |
78 | + f->dump_stream("error_str") << backtrace.error_str.rdbuf(); | |
79 | } | |
80 | f->close_section(); // backtrace | |
81 | f->open_object_section("raw_rstats"); | |
82 | @@ -3801,7 +3801,7 @@ | |
83 | f->dump_int("read_ret_val", raw_rstats.ondisk_read_retval); | |
84 | f->dump_stream("ondisk_value") << raw_rstats.ondisk_value; | |
85 | f->dump_stream("memory_value") << raw_rstats.memory_value; | |
86 | - f->dump_stream("error_str") << raw_rstats.error_str; | |
87 | + f->dump_stream("error_str") << raw_rstats.error_str.rdbuf(); | |
88 | } | |
89 | f->close_section(); // raw_rstats | |
90 | // dump failure return code | |
91 | diff -ur ceph-0.94.10/src/mds/mdstypes.h ceph-0.94.10-cxx/src/mds/mdstypes.h | |
92 | --- ceph-0.94.10/src/mds/mdstypes.h 2017-02-21 16:13:19.000000000 +0100 | |
93 | +++ ceph-0.94.10-cxx/src/mds/mdstypes.h 2017-04-29 11:15:44.723963614 +0200 | |
94 | @@ -71,7 +71,7 @@ | |
95 | #define MDS_TRAVERSE_DISCOVERXLOCK 3 // succeeds on (foreign?) null, xlocked dentries. | |
96 | ||
97 | ||
98 | -BOOST_STRONG_TYPEDEF(int32_t, mds_rank_t) | |
99 | +typedef int32_t mds_rank_t; | |
100 | BOOST_STRONG_TYPEDEF(uint64_t, mds_gid_t) | |
101 | extern const mds_gid_t MDS_GID_NONE; | |
102 | extern const mds_rank_t MDS_RANK_NONE; | |
103 | diff -ur ceph-0.94.10/src/mon/OSDMonitor.cc ceph-0.94.10-cxx/src/mon/OSDMonitor.cc | |
104 | --- ceph-0.94.10/src/mon/OSDMonitor.cc 2017-02-21 16:13:19.000000000 +0100 | |
105 | +++ ceph-0.94.10-cxx/src/mon/OSDMonitor.cc 2017-04-29 10:17:06.705748922 +0200 | |
106 | @@ -4401,7 +4401,7 @@ | |
107 | if (err == 0) { | |
108 | k = erasure_code->get_data_chunk_count(); | |
109 | } else { | |
110 | - ss << __func__ << " get_erasure_code failed: " << tmp; | |
111 | + ss << __func__ << " get_erasure_code failed: " << tmp.rdbuf(); | |
112 | return err;; | |
113 | } | |
114 | ||
115 | diff -ur ceph-0.94.10/src/msg/simple/Pipe.cc ceph-0.94.10-cxx/src/msg/simple/Pipe.cc | |
116 | --- ceph-0.94.10/src/msg/simple/Pipe.cc 2017-02-21 16:13:19.000000000 +0100 | |
117 | +++ ceph-0.94.10-cxx/src/msg/simple/Pipe.cc 2017-04-29 10:05:09.937087135 +0200 | |
118 | @@ -181,7 +181,7 @@ | |
119 | ||
120 | void Pipe::DelayedDelivery::discard() | |
121 | { | |
122 | - lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::discard" << dendl; | |
123 | + lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::discard" << dendl; | |
124 | Mutex::Locker l(delay_lock); | |
125 | while (!delay_queue.empty()) { | |
126 | Message *m = delay_queue.front().second; | |
127 | @@ -193,7 +193,7 @@ | |
128 | ||
129 | void Pipe::DelayedDelivery::flush() | |
130 | { | |
131 | - lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::flush" << dendl; | |
132 | + lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::flush" << dendl; | |
133 | Mutex::Locker l(delay_lock); | |
134 | flush_count = delay_queue.size(); | |
135 | delay_cond.Signal(); | |
136 | @@ -202,11 +202,11 @@ | |
137 | void *Pipe::DelayedDelivery::entry() | |
138 | { | |
139 | Mutex::Locker locker(delay_lock); | |
140 | - lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::entry start" << dendl; | |
141 | + lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::entry start" << dendl; | |
142 | ||
143 | while (!stop_delayed_delivery) { | |
144 | if (delay_queue.empty()) { | |
145 | - lgeneric_subdout(pipe->msgr->cct, ms, 30) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::entry sleeping on delay_cond because delay queue is empty" << dendl; | |
146 | + lgeneric_subdout(pipe->msgr->cct, ms, 30) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::entry sleeping on delay_cond because delay queue is empty" << dendl; | |
147 | delay_cond.Wait(delay_lock); | |
148 | continue; | |
149 | } | |
150 | @@ -216,11 +216,11 @@ | |
151 | if (!flush_count && | |
152 | (release > ceph_clock_now(pipe->msgr->cct) && | |
153 | (delay_msg_type.empty() || m->get_type_name() == delay_msg_type))) { | |
154 | - lgeneric_subdout(pipe->msgr->cct, ms, 10) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::entry sleeping on delay_cond until " << release << dendl; | |
155 | + lgeneric_subdout(pipe->msgr->cct, ms, 10) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::entry sleeping on delay_cond until " << release << dendl; | |
156 | delay_cond.WaitUntil(delay_lock, release); | |
157 | continue; | |
158 | } | |
159 | - lgeneric_subdout(pipe->msgr->cct, ms, 10) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::entry dequeuing message " << m << " for delivery, past " << release << dendl; | |
160 | + lgeneric_subdout(pipe->msgr->cct, ms, 10) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::entry dequeuing message " << m << " for delivery, past " << release << dendl; | |
161 | delay_queue.pop_front(); | |
162 | if (flush_count > 0) { | |
163 | --flush_count; | |
164 | @@ -245,7 +245,7 @@ | |
165 | } | |
166 | active_flush = false; | |
167 | } | |
168 | - lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout) << "DelayedDelivery::entry stop" << dendl; | |
169 | + lgeneric_subdout(pipe->msgr->cct, ms, 20) << pipe->_pipe_prefix(_dout).rdbuf() << "DelayedDelivery::entry stop" << dendl; | |
170 | return NULL; | |
171 | } | |
172 | ||
173 | diff -ur ceph-0.94.10/src/osd/PG.cc ceph-0.94.10-cxx/src/osd/PG.cc | |
174 | --- ceph-0.94.10/src/osd/PG.cc 2017-02-21 16:13:19.000000000 +0100 | |
175 | +++ ceph-0.94.10-cxx/src/osd/PG.cc 2017-04-29 10:54:40.743267753 +0200 | |
176 | @@ -3144,7 +3144,7 @@ | |
177 | info_struct_v < 8 ? OSD::make_pg_log_oid(pg_id) : pgmeta_oid, | |
178 | info, oss); | |
179 | if (oss.str().length()) | |
180 | - osd->clog->error() << oss; | |
181 | + osd->clog->error() << oss.rdbuf(); | |
182 | ||
183 | // log any weirdness | |
184 | log_weirdness(); | |
185 | diff -ur ceph-0.94.10/src/rgw/rgw_cache.cc ceph-0.94.10-cxx/src/rgw/rgw_cache.cc | |
186 | --- ceph-0.94.10/src/rgw/rgw_cache.cc 2017-02-21 16:13:19.000000000 +0100 | |
187 | +++ ceph-0.94.10-cxx/src/rgw/rgw_cache.cc 2017-04-29 11:09:17.823000713 +0200 | |
188 | @@ -105,7 +105,7 @@ | |
189 | for (liter = cache_entry_list.begin(); liter != cache_entry_list.end(); ++liter) { | |
190 | ObjectCacheEntry *entry = *liter; | |
191 | ||
192 | - entry->chained_entries.push_back(make_pair<RGWChainedCache *, string>(chained_entry->cache, chained_entry->key)); | |
193 | + entry->chained_entries.push_back(make_pair(chained_entry->cache, chained_entry->key)); | |
194 | } | |
195 | ||
196 | return true; | |
197 | diff -ur ceph-0.94.10/src/test/librbd/test_ImageWatcher.cc ceph-0.94.10-cxx/src/test/librbd/test_ImageWatcher.cc | |
198 | --- ceph-0.94.10/src/test/librbd/test_ImageWatcher.cc 2017-02-21 16:13:19.000000000 +0100 | |
199 | +++ ceph-0.94.10-cxx/src/test/librbd/test_ImageWatcher.cc 2017-04-29 11:58:28.225636783 +0200 | |
200 | @@ -383,8 +383,7 @@ | |
201 | ASSERT_EQ(0, open_image(m_image_name, &ictx)); | |
202 | ||
203 | ASSERT_EQ(0, register_image_watch(*ictx)); | |
204 | - m_notify_acks = boost::assign::list_of( | |
205 | - std::make_pair(NOTIFY_OP_ACQUIRED_LOCK, bufferlist())); | |
206 | + m_notify_acks = {{NOTIFY_OP_ACQUIRED_LOCK, {}}}; | |
207 | ||
208 | { | |
209 | RWLock::WLocker l(ictx->owner_lock); | |
210 | @@ -474,8 +473,7 @@ | |
211 | ASSERT_EQ(0, open_image(m_image_name, &ictx)); | |
212 | ||
213 | ASSERT_EQ(0, register_image_watch(*ictx)); | |
214 | - m_notify_acks = boost::assign::list_of( | |
215 | - std::make_pair(NOTIFY_OP_ACQUIRED_LOCK, bufferlist())); | |
216 | + m_notify_acks = {{NOTIFY_OP_ACQUIRED_LOCK, {}}}; | |
217 | ||
218 | { | |
219 | RWLock::WLocker l(ictx->owner_lock); | |
220 | @@ -528,8 +526,7 @@ | |
221 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
222 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
223 | ||
224 | - m_notify_acks = boost::assign::list_of( | |
225 | - std::make_pair(NOTIFY_OP_REQUEST_LOCK, create_response_message(0))); | |
226 | + m_notify_acks = {{NOTIFY_OP_REQUEST_LOCK, create_response_message(0)}}; | |
227 | ||
228 | { | |
229 | RWLock::WLocker l(ictx->owner_lock); | |
230 | @@ -549,9 +546,7 @@ | |
231 | ASSERT_EQ(0, unlock_image()); | |
232 | ||
233 | m_notifies.clear(); | |
234 | - m_notify_acks = boost::assign::list_of( | |
235 | - std::make_pair(NOTIFY_OP_RELEASED_LOCK, bufferlist()))( | |
236 | - std::make_pair(NOTIFY_OP_ACQUIRED_LOCK, bufferlist())); | |
237 | + m_notify_acks = {{NOTIFY_OP_RELEASED_LOCK, {}}, {NOTIFY_OP_ACQUIRED_LOCK, {}}}; | |
238 | ||
239 | bufferlist bl; | |
240 | { | |
241 | @@ -578,8 +573,7 @@ | |
242 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
243 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
244 | ||
245 | - m_notify_acks = boost::assign::list_of( | |
246 | - std::make_pair(NOTIFY_OP_REQUEST_LOCK, bufferlist())); | |
247 | + m_notify_acks = {{NOTIFY_OP_REQUEST_LOCK, {}}}; | |
248 | ||
249 | m_expected_aio_restarts = 1; | |
250 | { | |
251 | @@ -606,8 +600,7 @@ | |
252 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
253 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
254 | ||
255 | - m_notify_acks = boost::assign::list_of( | |
256 | - std::make_pair(NOTIFY_OP_REQUEST_LOCK, create_response_message(0))); | |
257 | + m_notify_acks = {{NOTIFY_OP_REQUEST_LOCK, create_response_message(0)}}; | |
258 | ||
259 | int orig_notify_timeout = ictx->cct->_conf->client_notify_timeout; | |
260 | ictx->cct->_conf->set_val("client_notify_timeout", "0"); | |
261 | @@ -645,8 +638,7 @@ | |
262 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
263 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
264 | ||
265 | - m_notify_acks = boost::assign::list_of( | |
266 | - std::make_pair(NOTIFY_OP_REQUEST_LOCK, create_response_message(0))); | |
267 | + m_notify_acks = {{NOTIFY_OP_REQUEST_LOCK, create_response_message(0)}}; | |
268 | ||
269 | m_expected_aio_restarts = 1; | |
270 | { | |
271 | @@ -662,8 +654,7 @@ | |
272 | ASSERT_EQ(expected_notify_ops, m_notifies); | |
273 | ||
274 | m_notifies.clear(); | |
275 | - m_notify_acks = boost::assign::list_of( | |
276 | - std::make_pair(NOTIFY_OP_RELEASED_LOCK, bufferlist())); | |
277 | + m_notify_acks = {{NOTIFY_OP_RELEASED_LOCK, {}}}; | |
278 | ||
279 | bufferlist bl; | |
280 | { | |
281 | @@ -703,8 +694,7 @@ | |
282 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
283 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
284 | ||
285 | - m_notify_acks = boost::assign::list_of( | |
286 | - std::make_pair(NOTIFY_OP_REQUEST_LOCK, create_response_message(0))); | |
287 | + m_notify_acks = {{NOTIFY_OP_REQUEST_LOCK, create_response_message(0)}}; | |
288 | ||
289 | m_expected_aio_restarts = 1; | |
290 | { | |
291 | @@ -723,8 +713,7 @@ | |
292 | ASSERT_EQ(0, lock_image(*ictx, LOCK_SHARED, "manually 1234")); | |
293 | ||
294 | m_notifies.clear(); | |
295 | - m_notify_acks = boost::assign::list_of( | |
296 | - std::make_pair(NOTIFY_OP_RELEASED_LOCK, bufferlist())); | |
297 | + m_notify_acks = {{NOTIFY_OP_RELEASED_LOCK, {}}}; | |
298 | ||
299 | bufferlist bl; | |
300 | { | |
301 | @@ -744,8 +733,7 @@ | |
302 | ||
303 | ASSERT_EQ(0, register_image_watch(*ictx)); | |
304 | ||
305 | - m_notify_acks = boost::assign::list_of( | |
306 | - std::make_pair(NOTIFY_OP_HEADER_UPDATE, bufferlist())); | |
307 | + m_notify_acks = {{NOTIFY_OP_HEADER_UPDATE, {}}}; | |
308 | librbd::ImageWatcher::notify_header_update(m_ioctx, ictx->header_oid); | |
309 | ||
310 | ASSERT_TRUE(wait_for_notifies(*ictx)); | |
311 | @@ -765,8 +753,7 @@ | |
312 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
313 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
314 | ||
315 | - m_notify_acks = boost::assign::list_of( | |
316 | - std::make_pair(NOTIFY_OP_FLATTEN, create_response_message(0))); | |
317 | + m_notify_acks = {{NOTIFY_OP_FLATTEN, create_response_message(0)}}; | |
318 | ||
319 | ProgressContext progress_context; | |
320 | FlattenTask flatten_task(ictx, &progress_context); | |
321 | @@ -800,8 +787,7 @@ | |
322 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
323 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
324 | ||
325 | - m_notify_acks = boost::assign::list_of( | |
326 | - std::make_pair(NOTIFY_OP_RESIZE, create_response_message(0))); | |
327 | + m_notify_acks = {{NOTIFY_OP_RESIZE, create_response_message(0)}}; | |
328 | ||
329 | ProgressContext progress_context; | |
330 | ResizeTask resize_task(ictx, &progress_context); | |
331 | @@ -835,8 +821,7 @@ | |
332 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
333 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
334 | ||
335 | - m_notify_acks = boost::assign::list_of( | |
336 | - std::make_pair(NOTIFY_OP_SNAP_CREATE, create_response_message(0))); | |
337 | + m_notify_acks = {{NOTIFY_OP_SNAP_CREATE, create_response_message(0)}}; | |
338 | ||
339 | RWLock::RLocker l(ictx->owner_lock); | |
340 | ASSERT_EQ(0, ictx->image_watcher->notify_snap_create("snap")); | |
341 | @@ -856,8 +841,7 @@ | |
342 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
343 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
344 | ||
345 | - m_notify_acks = boost::assign::list_of( | |
346 | - std::make_pair(NOTIFY_OP_SNAP_CREATE, create_response_message(-EEXIST))); | |
347 | + m_notify_acks = {{NOTIFY_OP_SNAP_CREATE, create_response_message(-EEXIST)}}; | |
348 | ||
349 | RWLock::RLocker l(ictx->owner_lock); | |
350 | ASSERT_EQ(-EEXIST, ictx->image_watcher->notify_snap_create("snap")); | |
351 | @@ -877,8 +861,7 @@ | |
352 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
353 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
354 | ||
355 | - m_notify_acks = boost::assign::list_of( | |
356 | - std::make_pair(NOTIFY_OP_FLATTEN, bufferlist())); | |
357 | + m_notify_acks = {{NOTIFY_OP_FLATTEN, {}}}; | |
358 | ||
359 | ProgressContext progress_context; | |
360 | FlattenTask flatten_task(ictx, &progress_context); | |
361 | @@ -898,8 +881,7 @@ | |
362 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
363 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
364 | ||
365 | - m_notify_acks = boost::assign::list_of( | |
366 | - std::make_pair(NOTIFY_OP_FLATTEN, create_response_message(-EIO))); | |
367 | + m_notify_acks = {{NOTIFY_OP_FLATTEN, create_response_message(-EIO)}}; | |
368 | ||
369 | ProgressContext progress_context; | |
370 | FlattenTask flatten_task(ictx, &progress_context); | |
371 | @@ -919,8 +901,7 @@ | |
372 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
373 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
374 | ||
375 | - m_notify_acks = boost::assign::list_of( | |
376 | - std::make_pair(NOTIFY_OP_FLATTEN, create_response_message(0))); | |
377 | + m_notify_acks = {{NOTIFY_OP_FLATTEN, create_response_message(0)}}; | |
378 | ||
379 | ProgressContext progress_context; | |
380 | FlattenTask flatten_task(ictx, &progress_context); | |
381 | @@ -960,8 +941,7 @@ | |
382 | ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE, | |
383 | "auto " + stringify(m_watch_ctx->get_handle()))); | |
384 | ||
385 | - m_notify_acks = boost::assign::list_of( | |
386 | - std::make_pair(NOTIFY_OP_FLATTEN, create_response_message(0))); | |
387 | + m_notify_acks = {{NOTIFY_OP_FLATTEN, create_response_message(0)}}; | |
388 | ||
389 | ProgressContext progress_context; | |
390 | FlattenTask flatten_task(ictx, &progress_context); |