]> git.pld-linux.org Git - packages/zfs.git/blob - kernel-4.19.patch
- up to 0.8.0-rc1
[packages/zfs.git] / kernel-4.19.patch
1 From c13060e4787e9578dafad85a47c62457424bec9c Mon Sep 17 00:00:00 2001
2 From: Tim Schumacher <timschumi@gmx.de>
3 Date: Wed, 26 Sep 2018 19:29:26 +0200
4 Subject: [PATCH] Linux 4.19-rc3+ compat: Remove refcount_t compat
5
6 torvalds/linux@59b57717f ("blkcg: delay blkg destruction until
7 after writeback has finished") added a refcount_t to the blkcg
8 structure. Due to the refcount_t compatibility code, zfs_refcount_t
9 was used by mistake.
10
11 Resolve this by removing the compatibility code and replacing the
12 occurrences of refcount_t with zfs_refcount_t.
13
14 Reviewed-by: Franz Pletz <fpletz@fnordicwalking.de>
15 Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
16 Signed-off-by: Tim Schumacher <timschumi@gmx.de>
17 Closes #7885
18 Closes #7932
19 ---
20  cmd/ztest/ztest.c           |  6 ++---
21  include/linux/vfs_compat.h  |  5 ----
22  include/sys/abd.h           |  2 +-
23  include/sys/arc.h           |  2 +-
24  include/sys/arc_impl.h      |  8 +++---
25  include/sys/dbuf.h          |  2 +-
26  include/sys/dmu_tx.h        |  4 +--
27  include/sys/dnode.h         |  4 +--
28  include/sys/dsl_crypt.h     |  6 ++---
29  include/sys/dsl_dataset.h   |  2 +-
30  include/sys/metaslab_impl.h |  4 +--
31  include/sys/refcount.h      | 52 ++++++++++++++++---------------------
32  include/sys/rrwlock.h       |  4 +--
33  include/sys/sa_impl.h       |  2 +-
34  include/sys/spa_impl.h      |  6 ++---
35  include/sys/zap.h           |  2 +-
36  include/sys/zfs_znode.h     |  2 +-
37  module/zfs/arc.c            | 16 ++++++------
38  module/zfs/dbuf.c           | 10 +++----
39  module/zfs/dmu.c            |  2 +-
40  module/zfs/dmu_tx.c         |  6 ++---
41  module/zfs/dnode.c          |  6 ++---
42  module/zfs/dsl_crypt.c      | 12 ++++-----
43  module/zfs/dsl_dataset.c    |  2 +-
44  module/zfs/dsl_scan.c       |  6 ++---
45  module/zfs/metaslab.c       | 14 +++++-----
46  module/zfs/refcount.c       | 30 ++++++++++-----------
47  module/zfs/rrwlock.c        |  4 +--
48  module/zfs/sa.c             |  2 +-
49  module/zfs/spa_misc.c       |  8 +++---
50  module/zfs/zfs_ctldir.c     | 10 +++----
51  module/zfs/zfs_znode.c      |  2 +-
52  32 files changed, 116 insertions(+), 127 deletions(-)
53
54 diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
55 index 83d057a74d9..ae25957dc13 100644
56 --- a/cmd/ztest/ztest.c
57 +++ b/cmd/ztest/ztest.c
58 @@ -1323,7 +1323,7 @@ ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
59   */
60  typedef struct {
61         list_node_t z_lnode;
62 -       refcount_t z_refcnt;
63 +       zfs_refcount_t z_refcnt;
64         uint64_t z_object;
65         zfs_rlock_t z_range_lock;
66  } ztest_znode_t;
67 @@ -1382,13 +1382,13 @@ ztest_znode_get(ztest_ds_t *zd, uint64_t object)
68         for (zp = list_head(&zll->z_list); (zp);
69             zp = list_next(&zll->z_list, zp)) {
70                 if (zp->z_object == object) {
71 -                       refcount_add(&zp->z_refcnt, RL_TAG);
72 +                       zfs_refcount_add(&zp->z_refcnt, RL_TAG);
73                         break;
74                 }
75         }
76         if (zp == NULL) {
77                 zp = ztest_znode_init(object);
78 -               refcount_add(&zp->z_refcnt, RL_TAG);
79 +               zfs_refcount_add(&zp->z_refcnt, RL_TAG);
80                 list_insert_head(&zll->z_list, zp);
81         }
82         mutex_exit(&zll->z_lock);
83 diff --git a/include/linux/vfs_compat.h b/include/linux/vfs_compat.h
84 index 90b3cca78c0..c01f5850881 100644
85 --- a/include/linux/vfs_compat.h
86 +++ b/include/linux/vfs_compat.h
87 @@ -297,9 +297,6 @@ lseek_execute(
88   * This is several orders of magnitude larger than expected grace period.
89   * At 60 seconds the kernel will also begin issuing RCU stall warnings.
90   */
91 -#ifdef refcount_t
92 -#undef refcount_t
93 -#endif
94  
95  #include <linux/posix_acl.h>
96  
97 @@ -430,8 +427,6 @@ typedef mode_t zpl_equivmode_t;
98  #define        zpl_posix_acl_valid(ip, acl)  posix_acl_valid(acl)
99  #endif
100  
101 -#define        refcount_t      zfs_refcount_t
102 -
103  #endif /* CONFIG_FS_POSIX_ACL */
104  
105  /*
106 diff --git a/include/sys/abd.h b/include/sys/abd.h
107 index 077bb9d1761..3d9fdbf102a 100644
108 --- a/include/sys/abd.h
109 +++ b/include/sys/abd.h
110 @@ -51,7 +51,7 @@ typedef struct abd {
111         abd_flags_t     abd_flags;
112         uint_t          abd_size;       /* excludes scattered abd_offset */
113         struct abd      *abd_parent;
114 -       refcount_t      abd_children;
115 +       zfs_refcount_t  abd_children;
116         union {
117                 struct abd_scatter {
118                         uint_t          abd_offset;
119 diff --git a/include/sys/arc.h b/include/sys/arc.h
120 index a5bdefb56f4..dc2fd03647f 100644
121 --- a/include/sys/arc.h
122 +++ b/include/sys/arc.h
123 @@ -87,7 +87,7 @@ struct arc_prune {
124         void                    *p_private;
125         uint64_t                p_adjust;
126         list_node_t             p_node;
127 -       refcount_t              p_refcnt;
128 +       zfs_refcount_t          p_refcnt;
129  };
130  
131  typedef enum arc_strategy {
132 diff --git a/include/sys/arc_impl.h b/include/sys/arc_impl.h
133 index 52863bba4ee..cd42c0c01a2 100644
134 --- a/include/sys/arc_impl.h
135 +++ b/include/sys/arc_impl.h
136 @@ -75,12 +75,12 @@ typedef struct arc_state {
137         /*
138          * total amount of evictable data in this state
139          */
140 -       refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
141 +       zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
142         /*
143          * total amount of data in this state; this includes: evictable,
144          * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
145          */
146 -       refcount_t arcs_size;
147 +       zfs_refcount_t arcs_size;
148         /*
149          * supports the "dbufs" kstat
150          */
151 @@ -168,7 +168,7 @@ typedef struct l1arc_buf_hdr {
152         uint32_t                b_l2_hits;
153  
154         /* self protecting */
155 -       refcount_t              b_refcnt;
156 +       zfs_refcount_t          b_refcnt;
157  
158         arc_callback_t          *b_acb;
159         abd_t                   *b_pabd;
160 @@ -215,7 +215,7 @@ typedef struct l2arc_dev {
161         kmutex_t                l2ad_mtx;       /* lock for buffer list */
162         list_t                  l2ad_buflist;   /* buffer list */
163         list_node_t             l2ad_node;      /* device list node */
164 -       refcount_t              l2ad_alloc;     /* allocated bytes */
165 +       zfs_refcount_t          l2ad_alloc;     /* allocated bytes */
166  } l2arc_dev_t;
167  
168  typedef struct l2arc_buf_hdr {
169 diff --git a/include/sys/dbuf.h b/include/sys/dbuf.h
170 index ab0950c83c2..eea9e265b00 100644
171 --- a/include/sys/dbuf.h
172 +++ b/include/sys/dbuf.h
173 @@ -230,7 +230,7 @@ typedef struct dmu_buf_impl {
174          * If nonzero, the buffer can't be destroyed.
175          * Protected by db_mtx.
176          */
177 -       refcount_t db_holds;
178 +       zfs_refcount_t db_holds;
179  
180         /* buffer holding our data */
181         arc_buf_t *db_buf;
182 diff --git a/include/sys/dmu_tx.h b/include/sys/dmu_tx.h
183 index 6a4bd3fac2c..36d205e9501 100644
184 --- a/include/sys/dmu_tx.h
185 +++ b/include/sys/dmu_tx.h
186 @@ -97,8 +97,8 @@ typedef struct dmu_tx_hold {
187         dmu_tx_t *txh_tx;
188         list_node_t txh_node;
189         struct dnode *txh_dnode;
190 -       refcount_t txh_space_towrite;
191 -       refcount_t txh_memory_tohold;
192 +       zfs_refcount_t txh_space_towrite;
193 +       zfs_refcount_t txh_memory_tohold;
194         enum dmu_tx_hold_type txh_type;
195         uint64_t txh_arg1;
196         uint64_t txh_arg2;
197 diff --git a/include/sys/dnode.h b/include/sys/dnode.h
198 index 0774e663f1b..48ef927d4ad 100644
199 --- a/include/sys/dnode.h
200 +++ b/include/sys/dnode.h
201 @@ -335,8 +335,8 @@ struct dnode {
202         uint8_t *dn_dirtyctx_firstset;          /* dbg: contents meaningless */
203  
204         /* protected by own devices */
205 -       refcount_t dn_tx_holds;
206 -       refcount_t dn_holds;
207 +       zfs_refcount_t dn_tx_holds;
208 +       zfs_refcount_t dn_holds;
209  
210         kmutex_t dn_dbufs_mtx;
211         /*
212 diff --git a/include/sys/dsl_crypt.h b/include/sys/dsl_crypt.h
213 index 8766ce51ea9..c6d2b0a16ac 100644
214 --- a/include/sys/dsl_crypt.h
215 +++ b/include/sys/dsl_crypt.h
216 @@ -62,7 +62,7 @@ typedef struct dsl_wrapping_key {
217         crypto_key_t wk_key;
218  
219         /* refcount of number of dsl_crypto_key_t's holding this struct */
220 -       refcount_t wk_refcnt;
221 +       zfs_refcount_t wk_refcnt;
222  
223         /* dsl directory object that owns this wrapping key */
224         uint64_t wk_ddobj;
225 @@ -112,7 +112,7 @@ typedef struct dsl_crypto_key {
226         avl_node_t dck_avl_link;
227  
228         /* refcount of dsl_key_mapping_t's holding this key */
229 -       refcount_t dck_holds;
230 +       zfs_refcount_t dck_holds;
231  
232         /* master key used to derive encryption keys */
233         zio_crypt_key_t dck_key;
234 @@ -134,7 +134,7 @@ typedef struct dsl_key_mapping {
235         avl_node_t km_avl_link;
236  
237         /* refcount of how many users are depending on this mapping */
238 -       refcount_t km_refcnt;
239 +       zfs_refcount_t km_refcnt;
240  
241         /* dataset this crypto key belongs to (index) */
242         uint64_t km_dsobj;
243 diff --git a/include/sys/dsl_dataset.h b/include/sys/dsl_dataset.h
244 index dbe4cb706a1..768241483a2 100644
245 --- a/include/sys/dsl_dataset.h
246 +++ b/include/sys/dsl_dataset.h
247 @@ -211,7 +211,7 @@ typedef struct dsl_dataset {
248          * Owning counts as a long hold.  See the comments above
249          * dsl_pool_hold() for details.
250          */
251 -       refcount_t ds_longholds;
252 +       zfs_refcount_t ds_longholds;
253  
254         /* no locking; only for making guesses */
255         uint64_t ds_trysnap_txg;
256 diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h
257 index cc6e8b796d4..aa1c82a0258 100644
258 --- a/include/sys/metaslab_impl.h
259 +++ b/include/sys/metaslab_impl.h
260 @@ -184,7 +184,7 @@ struct metaslab_class {
261          * number of allocations allowed.
262          */
263         uint64_t                *mc_alloc_max_slots;
264 -       refcount_t              *mc_alloc_slots;
265 +       zfs_refcount_t          *mc_alloc_slots;
266  
267         uint64_t                mc_alloc_groups; /* # of allocatable groups */
268  
269 @@ -256,7 +256,7 @@ struct metaslab_group {
270          */
271         uint64_t                mg_max_alloc_queue_depth;
272         uint64_t                *mg_cur_max_alloc_queue_depth;
273 -       refcount_t              *mg_alloc_queue_depth;
274 +       zfs_refcount_t          *mg_alloc_queue_depth;
275         int                     mg_allocators;
276         /*
277          * A metalab group that can no longer allocate the minimum block
278 diff --git a/include/sys/refcount.h b/include/sys/refcount.h
279 index 02002ec2ffd..e20ffbc30f9 100644
280 --- a/include/sys/refcount.h
281 +++ b/include/sys/refcount.h
282 @@ -41,17 +41,6 @@ extern "C" {
283   */
284  #define        FTAG ((char *)(uintptr_t)__func__)
285  
286 -/*
287 - * Starting with 4.11, torvalds/linux@f405df5, the linux kernel defines a
288 - * refcount_t type of its own.  The macro below effectively changes references
289 - * in the ZFS code from refcount_t to zfs_refcount_t at compile time, so that
290 - * existing code need not be altered, reducing conflicts when landing openZFS
291 - * patches.
292 - */
293 -
294 -#define        refcount_t      zfs_refcount_t
295 -#define        refcount_add    zfs_refcount_add
296 -
297  #ifdef ZFS_DEBUG
298  typedef struct reference {
299         list_node_t ref_link;
300 @@ -69,23 +58,28 @@ typedef struct refcount {
301         uint64_t rc_removed_count;
302  } zfs_refcount_t;
303  
304 -/* Note: refcount_t must be initialized with refcount_create[_untracked]() */
305 -
306 -void refcount_create(refcount_t *rc);
307 -void refcount_create_untracked(refcount_t *rc);
308 -void refcount_create_tracked(refcount_t *rc);
309 -void refcount_destroy(refcount_t *rc);
310 -void refcount_destroy_many(refcount_t *rc, uint64_t number);
311 -int refcount_is_zero(refcount_t *rc);
312 -int64_t refcount_count(refcount_t *rc);
313 -int64_t zfs_refcount_add(refcount_t *rc, void *holder_tag);
314 -int64_t refcount_remove(refcount_t *rc, void *holder_tag);
315 -int64_t refcount_add_many(refcount_t *rc, uint64_t number, void *holder_tag);
316 -int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder_tag);
317 -void refcount_transfer(refcount_t *dst, refcount_t *src);
318 -void refcount_transfer_ownership(refcount_t *, void *, void *);
319 -boolean_t refcount_held(refcount_t *, void *);
320 -boolean_t refcount_not_held(refcount_t *, void *);
321 +/*
322 + * Note: zfs_refcount_t must be initialized with
323 + * refcount_create[_untracked]()
324 + */
325 +
326 +void refcount_create(zfs_refcount_t *rc);
327 +void refcount_create_untracked(zfs_refcount_t *rc);
328 +void refcount_create_tracked(zfs_refcount_t *rc);
329 +void refcount_destroy(zfs_refcount_t *rc);
330 +void refcount_destroy_many(zfs_refcount_t *rc, uint64_t number);
331 +int refcount_is_zero(zfs_refcount_t *rc);
332 +int64_t refcount_count(zfs_refcount_t *rc);
333 +int64_t zfs_refcount_add(zfs_refcount_t *rc, void *holder_tag);
334 +int64_t refcount_remove(zfs_refcount_t *rc, void *holder_tag);
335 +int64_t refcount_add_many(zfs_refcount_t *rc, uint64_t number,
336 +    void *holder_tag);
337 +int64_t refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
338 +    void *holder_tag);
339 +void refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src);
340 +void refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
341 +boolean_t refcount_held(zfs_refcount_t *, void *);
342 +boolean_t refcount_not_held(zfs_refcount_t *, void *);
343  
344  void refcount_init(void);
345  void refcount_fini(void);
346 @@ -94,7 +88,7 @@ void refcount_fini(void);
347  
348  typedef struct refcount {
349         uint64_t rc_count;
350 -} refcount_t;
351 +} zfs_refcount_t;
352  
353  #define        refcount_create(rc) ((rc)->rc_count = 0)
354  #define        refcount_create_untracked(rc) ((rc)->rc_count = 0)
355 diff --git a/include/sys/rrwlock.h b/include/sys/rrwlock.h
356 index 7a328fd6803..e1c1756cf29 100644
357 --- a/include/sys/rrwlock.h
358 +++ b/include/sys/rrwlock.h
359 @@ -57,8 +57,8 @@ typedef struct rrwlock {
360         kmutex_t        rr_lock;
361         kcondvar_t      rr_cv;
362         kthread_t       *rr_writer;
363 -       refcount_t      rr_anon_rcount;
364 -       refcount_t      rr_linked_rcount;
365 +       zfs_refcount_t  rr_anon_rcount;
366 +       zfs_refcount_t  rr_linked_rcount;
367         boolean_t       rr_writer_wanted;
368         boolean_t       rr_track_all;
369  } rrwlock_t;
370 diff --git a/include/sys/sa_impl.h b/include/sys/sa_impl.h
371 index b68b7610b25..7eddd8750fa 100644
372 --- a/include/sys/sa_impl.h
373 +++ b/include/sys/sa_impl.h
374 @@ -110,7 +110,7 @@ typedef struct sa_idx_tab {
375         list_node_t     sa_next;
376         sa_lot_t        *sa_layout;
377         uint16_t        *sa_variable_lengths;
378 -       refcount_t      sa_refcount;
379 +       zfs_refcount_t  sa_refcount;
380         uint32_t        *sa_idx_tab;    /* array of offsets */
381  } sa_idx_tab_t;
382  
383 diff --git a/include/sys/spa_impl.h b/include/sys/spa_impl.h
384 index 676e8b8a228..9dbdcfcf528 100644
385 --- a/include/sys/spa_impl.h
386 +++ b/include/sys/spa_impl.h
387 @@ -139,7 +139,7 @@ typedef struct spa_config_lock {
388         kthread_t       *scl_writer;
389         int             scl_write_wanted;
390         kcondvar_t      scl_cv;
391 -       refcount_t      scl_count;
392 +       zfs_refcount_t  scl_count;
393  } spa_config_lock_t;
394  
395  typedef struct spa_config_dirent {
396 @@ -387,12 +387,12 @@ struct spa {
397  
398         /*
399          * spa_refcount & spa_config_lock must be the last elements
400 -        * because refcount_t changes size based on compilation options.
401 +        * because zfs_refcount_t changes size based on compilation options.
402          * In order for the MDB module to function correctly, the other
403          * fields must remain in the same location.
404          */
405         spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
406 -       refcount_t      spa_refcount;           /* number of opens */
407 +       zfs_refcount_t  spa_refcount;           /* number of opens */
408  
409         taskq_t         *spa_upgrade_taskq;     /* taskq for upgrade jobs */
410  };
411 diff --git a/include/sys/zap.h b/include/sys/zap.h
412 index 43b7fbd263c..7acc3becb5a 100644
413 --- a/include/sys/zap.h
414 +++ b/include/sys/zap.h
415 @@ -226,7 +226,7 @@ int zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
416      boolean_t *ncp);
417  
418  int zap_count_write_by_dnode(dnode_t *dn, const char *name,
419 -    int add, refcount_t *towrite, refcount_t *tooverwrite);
420 +    int add, zfs_refcount_t *towrite, zfs_refcount_t *tooverwrite);
421  
422  /*
423   * Create an attribute with the given name and value.
424 diff --git a/include/sys/zfs_znode.h b/include/sys/zfs_znode.h
425 index e82ac9941a2..01f4328f040 100644
426 --- a/include/sys/zfs_znode.h
427 +++ b/include/sys/zfs_znode.h
428 @@ -223,7 +223,7 @@ typedef struct znode_hold {
429         uint64_t        zh_obj;         /* object id */
430         kmutex_t        zh_lock;        /* lock serializing object access */
431         avl_node_t      zh_node;        /* avl tree linkage */
432 -       refcount_t      zh_refcount;    /* active consumer reference count */
433 +       zfs_refcount_t  zh_refcount;    /* active consumer reference count */
434  } znode_hold_t;
435  
436  static inline uint64_t
437 diff --git a/module/zfs/arc.c b/module/zfs/arc.c
438 index 5e53f987961..cd094f39ee7 100644
439 --- a/module/zfs/arc.c
440 +++ b/module/zfs/arc.c
441 @@ -2403,7 +2403,7 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
442  
443         state = hdr->b_l1hdr.b_state;
444  
445 -       if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
446 +       if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
447             (state != arc_anon)) {
448                 /* We don't use the L2-only state list. */
449                 if (state != arc_l2c_only) {
450 @@ -2997,7 +2997,7 @@ arc_return_buf(arc_buf_t *buf, void *tag)
451  
452         ASSERT3P(buf->b_data, !=, NULL);
453         ASSERT(HDR_HAS_L1HDR(hdr));
454 -       (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
455 +       (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
456         (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
457  
458         arc_loaned_bytes_update(-arc_buf_size(buf));
459 @@ -3011,7 +3011,7 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
460  
461         ASSERT3P(buf->b_data, !=, NULL);
462         ASSERT(HDR_HAS_L1HDR(hdr));
463 -       (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
464 +       (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
465         (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
466  
467         arc_loaned_bytes_update(arc_buf_size(buf));
468 @@ -3558,11 +3558,11 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
469         nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
470  
471         /*
472 -        * This refcount_add() exists only to ensure that the individual
473 +        * This zfs_refcount_add() exists only to ensure that the individual
474          * arc buffers always point to a header that is referenced, avoiding
475          * a small race condition that could trigger ASSERTs.
476          */
477 -       (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
478 +       (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
479         nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
480         for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
481                 mutex_enter(&buf->b_evict_lock);
482 @@ -4313,7 +4313,7 @@ arc_prune_async(int64_t adjust)
483                 if (refcount_count(&ap->p_refcnt) >= 2)
484                         continue;
485  
486 -               refcount_add(&ap->p_refcnt, ap->p_pfunc);
487 +               zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
488                 ap->p_adjust = adjust;
489                 if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
490                     ap, TQ_SLEEP) == TASKQID_INVALID) {
491 @@ -6536,7 +6536,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
492         refcount_create(&p->p_refcnt);
493  
494         mutex_enter(&arc_prune_mtx);
495 -       refcount_add(&p->p_refcnt, &arc_prune_list);
496 +       zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
497         list_insert_head(&arc_prune_list, p);
498         mutex_exit(&arc_prune_mtx);
499  
500 @@ -6808,7 +6808,7 @@ arc_release(arc_buf_t *buf, void *tag)
501                 nhdr->b_l1hdr.b_mfu_hits = 0;
502                 nhdr->b_l1hdr.b_mfu_ghost_hits = 0;
503                 nhdr->b_l1hdr.b_l2_hits = 0;
504 -               (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
505 +               (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
506                 buf->b_hdr = nhdr;
507  
508                 mutex_exit(&buf->b_evict_lock);
509 diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
510 index f7376875afe..db7df602344 100644
511 --- a/module/zfs/dbuf.c
512 +++ b/module/zfs/dbuf.c
513 @@ -223,7 +223,7 @@ static boolean_t dbuf_evict_thread_exit;
514   */
515  typedef struct dbuf_cache {
516         multilist_t *cache;
517 -       refcount_t size;
518 +       zfs_refcount_t size;
519  } dbuf_cache_t;
520  dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
521  
522 @@ -2784,7 +2784,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
523  
524         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
525             refcount_count(&dn->dn_holds) > 0);
526 -       (void) refcount_add(&dn->dn_holds, db);
527 +       (void) zfs_refcount_add(&dn->dn_holds, db);
528         atomic_inc_32(&dn->dn_dbufs_count);
529  
530         dprintf_dbuf(db, "db=%p\n", db);
531 @@ -3183,7 +3183,7 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
532                 }
533                 dh->dh_db->db_caching_status = DB_NO_CACHE;
534         }
535 -       (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
536 +       (void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
537         DBUF_VERIFY(dh->dh_db);
538         mutex_exit(&dh->dh_db->db_mtx);
539  
540 @@ -3308,7 +3308,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
541  void
542  dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
543  {
544 -       int64_t holds = refcount_add(&db->db_holds, tag);
545 +       int64_t holds = zfs_refcount_add(&db->db_holds, tag);
546         VERIFY3S(holds, >, 1);
547  }
548  
549 @@ -3328,7 +3328,7 @@ dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
550  
551         if (found_db != NULL) {
552                 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
553 -                       (void) refcount_add(&db->db_holds, tag);
554 +                       (void) zfs_refcount_add(&db->db_holds, tag);
555                         result = B_TRUE;
556                 }
557                 mutex_exit(&found_db->db_mtx);
558 diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
559 index 8779eb3586c..2ff484b6347 100644
560 --- a/module/zfs/dmu.c
561 +++ b/module/zfs/dmu.c
562 @@ -360,7 +360,7 @@ dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag, uint32_t flags,
563         db = dn->dn_bonus;
564  
565         /* as long as the bonus buf is held, the dnode will be held */
566 -       if (refcount_add(&db->db_holds, tag) == 1) {
567 +       if (zfs_refcount_add(&db->db_holds, tag) == 1) {
568                 VERIFY(dnode_add_ref(dn, db));
569                 atomic_inc_32(&dn->dn_dbufs_count);
570         }
571 diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
572 index c268f3c4046..3dc3f595884 100644
573 --- a/module/zfs/dmu_tx.c
574 +++ b/module/zfs/dmu_tx.c
575 @@ -114,7 +114,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
576         dmu_tx_hold_t *txh;
577  
578         if (dn != NULL) {
579 -               (void) refcount_add(&dn->dn_holds, tx);
580 +               (void) zfs_refcount_add(&dn->dn_holds, tx);
581                 if (tx->tx_txg != 0) {
582                         mutex_enter(&dn->dn_mtx);
583                         /*
584 @@ -124,7 +124,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
585                          */
586                         ASSERT(dn->dn_assigned_txg == 0);
587                         dn->dn_assigned_txg = tx->tx_txg;
588 -                       (void) refcount_add(&dn->dn_tx_holds, tx);
589 +                       (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
590                         mutex_exit(&dn->dn_mtx);
591                 }
592         }
593 @@ -932,7 +932,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
594                         if (dn->dn_assigned_txg == 0)
595                                 dn->dn_assigned_txg = tx->tx_txg;
596                         ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
597 -                       (void) refcount_add(&dn->dn_tx_holds, tx);
598 +                       (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
599                         mutex_exit(&dn->dn_mtx);
600                 }
601                 towrite += refcount_count(&txh->txh_space_towrite);
602 diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
603 index 4e2a733830b..b0b7ea7102b 100644
604 --- a/module/zfs/dnode.c
605 +++ b/module/zfs/dnode.c
606 @@ -1304,7 +1304,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
607                 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
608                         return (SET_ERROR(EEXIST));
609                 DNODE_VERIFY(dn);
610 -               (void) refcount_add(&dn->dn_holds, tag);
611 +               (void) zfs_refcount_add(&dn->dn_holds, tag);
612                 *dnp = dn;
613                 return (0);
614         }
615 @@ -1527,7 +1527,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
616                     ENOENT : EEXIST));
617         }
618  
619 -       if (refcount_add(&dn->dn_holds, tag) == 1)
620 +       if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
621                 dbuf_add_ref(db, dnh);
622  
623         mutex_exit(&dn->dn_mtx);
624 @@ -1567,7 +1567,7 @@ dnode_add_ref(dnode_t *dn, void *tag)
625                 mutex_exit(&dn->dn_mtx);
626                 return (FALSE);
627         }
628 -       VERIFY(1 < refcount_add(&dn->dn_holds, tag));
629 +       VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
630         mutex_exit(&dn->dn_mtx);
631         return (TRUE);
632  }
633 diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c
634 index f0878c93477..6beb958c1f5 100644
635 --- a/module/zfs/dsl_crypt.c
636 +++ b/module/zfs/dsl_crypt.c
637 @@ -74,7 +74,7 @@
638  static void
639  dsl_wrapping_key_hold(dsl_wrapping_key_t *wkey, void *tag)
640  {
641 -       (void) refcount_add(&wkey->wk_refcnt, tag);
642 +       (void) zfs_refcount_add(&wkey->wk_refcnt, tag);
643  }
644  
645  static void
646 @@ -605,7 +605,7 @@ dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
647         dsl_wrapping_key_hold(wkey, dck);
648         dck->dck_wkey = wkey;
649         dck->dck_obj = dckobj;
650 -       refcount_add(&dck->dck_holds, tag);
651 +       zfs_refcount_add(&dck->dck_holds, tag);
652  
653         *dck_out = dck;
654         return (0);
655 @@ -641,7 +641,7 @@ spa_keystore_dsl_key_hold_impl(spa_t *spa, uint64_t dckobj, void *tag,
656         }
657  
658         /* increment the refcount */
659 -       refcount_add(&found_dck->dck_holds, tag);
660 +       zfs_refcount_add(&found_dck->dck_holds, tag);
661  
662         *dck_out = found_dck;
663         return (0);
664 @@ -970,9 +970,9 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj,
665         found_km = avl_find(&spa->spa_keystore.sk_key_mappings, km, &where);
666         if (found_km != NULL) {
667                 should_free = B_TRUE;
668 -               refcount_add(&found_km->km_refcnt, tag);
669 +               zfs_refcount_add(&found_km->km_refcnt, tag);
670         } else {
671 -               refcount_add(&km->km_refcnt, tag);
672 +               zfs_refcount_add(&km->km_refcnt, tag);
673                 avl_insert(&spa->spa_keystore.sk_key_mappings, km, where);
674         }
675  
676 @@ -1072,7 +1072,7 @@ spa_keystore_lookup_key(spa_t *spa, uint64_t dsobj, void *tag,
677         }
678  
679         if (found_km && tag)
680 -               refcount_add(&found_km->km_key->dck_holds, tag);
681 +               zfs_refcount_add(&found_km->km_key->dck_holds, tag);
682  
683         rw_exit(&spa->spa_keystore.sk_km_lock);
684  
685 diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
686 index b6e3b9a5c7f..7546a0765c2 100644
687 --- a/module/zfs/dsl_dataset.c
688 +++ b/module/zfs/dsl_dataset.c
689 @@ -727,7 +727,7 @@ void
690  dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
691  {
692         ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
693 -       (void) refcount_add(&ds->ds_longholds, tag);
694 +       (void) zfs_refcount_add(&ds->ds_longholds, tag);
695  }
696  
697  void
698 diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
699 index f3c869538ce..ee12185cdde 100644
700 --- a/module/zfs/dsl_scan.c
701 +++ b/module/zfs/dsl_scan.c
702 @@ -273,7 +273,7 @@ struct dsl_scan_io_queue {
703  
704  /* private data for dsl_scan_prefetch_cb() */
705  typedef struct scan_prefetch_ctx {
706 -       refcount_t spc_refcnt;          /* refcount for memory management */
707 +       zfs_refcount_t spc_refcnt;      /* refcount for memory management */
708         dsl_scan_t *spc_scn;            /* dsl_scan_t for the pool */
709         boolean_t spc_root;             /* is this prefetch for an objset? */
710         uint8_t spc_indblkshift;        /* dn_indblkshift of current dnode */
711 @@ -1327,7 +1327,7 @@ scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
712  
713         spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
714         refcount_create(&spc->spc_refcnt);
715 -       refcount_add(&spc->spc_refcnt, tag);
716 +       zfs_refcount_add(&spc->spc_refcnt, tag);
717         spc->spc_scn = scn;
718         if (dnp != NULL) {
719                 spc->spc_datablkszsec = dnp->dn_datablkszsec;
720 @@ -1345,7 +1345,7 @@ scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
721  static void
722  scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
723  {
724 -       refcount_add(&spc->spc_refcnt, tag);
725 +       zfs_refcount_add(&spc->spc_refcnt, tag);
726  }
727  
728  static boolean_t
729 diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
730 index ac361abb67e..f657128d040 100644
731 --- a/module/zfs/metaslab.c
732 +++ b/module/zfs/metaslab.c
733 @@ -247,7 +247,7 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
734         mc->mc_ops = ops;
735         mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
736         mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
737 -           sizeof (refcount_t), KM_SLEEP);
738 +           sizeof (zfs_refcount_t), KM_SLEEP);
739         mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
740             sizeof (uint64_t), KM_SLEEP);
741         for (int i = 0; i < spa->spa_alloc_count; i++)
742 @@ -268,7 +268,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
743         for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
744                 refcount_destroy(&mc->mc_alloc_slots[i]);
745         kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
746 -           sizeof (refcount_t));
747 +           sizeof (zfs_refcount_t));
748         kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
749             sizeof (uint64_t));
750         mutex_destroy(&mc->mc_lock);
751 @@ -648,8 +648,8 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
752         mg->mg_no_free_space = B_TRUE;
753         mg->mg_allocators = allocators;
754  
755 -       mg->mg_alloc_queue_depth = kmem_zalloc(allocators * sizeof (refcount_t),
756 -           KM_SLEEP);
757 +       mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
758 +           sizeof (zfs_refcount_t), KM_SLEEP);
759         mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
760             sizeof (uint64_t), KM_SLEEP);
761         for (int i = 0; i < allocators; i++) {
762 @@ -687,7 +687,7 @@ metaslab_group_destroy(metaslab_group_t *mg)
763                 mg->mg_cur_max_alloc_queue_depth[i] = 0;
764         }
765         kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
766 -           sizeof (refcount_t));
767 +           sizeof (zfs_refcount_t));
768         kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
769             sizeof (uint64_t));
770  
771 @@ -2905,7 +2905,7 @@ metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
772         if (!mg->mg_class->mc_alloc_throttle_enabled)
773                 return;
774  
775 -       (void) refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
776 +       (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
777  }
778  
779  static void
780 @@ -3852,7 +3852,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
781                  */
782                 for (int d = 0; d < slots; d++) {
783                         reserved_slots =
784 -                           refcount_add(&mc->mc_alloc_slots[allocator],
785 +                           zfs_refcount_add(&mc->mc_alloc_slots[allocator],
786                             zio);
787                 }
788                 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
789 diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c
790 index a151aceaecf..13f9bb6b76e 100644
791 --- a/module/zfs/refcount.c
792 +++ b/module/zfs/refcount.c
793 @@ -55,7 +55,7 @@ refcount_fini(void)
794  }
795  
796  void
797 -refcount_create(refcount_t *rc)
798 +refcount_create(zfs_refcount_t *rc)
799  {
800         mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
801         list_create(&rc->rc_list, sizeof (reference_t),
802 @@ -68,21 +68,21 @@ refcount_create(refcount_t *rc)
803  }
804  
805  void
806 -refcount_create_tracked(refcount_t *rc)
807 +refcount_create_tracked(zfs_refcount_t *rc)
808  {
809         refcount_create(rc);
810         rc->rc_tracked = B_TRUE;
811  }
812  
813  void
814 -refcount_create_untracked(refcount_t *rc)
815 +refcount_create_untracked(zfs_refcount_t *rc)
816  {
817         refcount_create(rc);
818         rc->rc_tracked = B_FALSE;
819  }
820  
821  void
822 -refcount_destroy_many(refcount_t *rc, uint64_t number)
823 +refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
824  {
825         reference_t *ref;
826  
827 @@ -103,25 +103,25 @@ refcount_destroy_many(refcount_t *rc, uint64_t number)
828  }
829  
830  void
831 -refcount_destroy(refcount_t *rc)
832 +refcount_destroy(zfs_refcount_t *rc)
833  {
834         refcount_destroy_many(rc, 0);
835  }
836  
837  int
838 -refcount_is_zero(refcount_t *rc)
839 +refcount_is_zero(zfs_refcount_t *rc)
840  {
841         return (rc->rc_count == 0);
842  }
843  
844  int64_t
845 -refcount_count(refcount_t *rc)
846 +refcount_count(zfs_refcount_t *rc)
847  {
848         return (rc->rc_count);
849  }
850  
851  int64_t
852 -refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
853 +refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
854  {
855         reference_t *ref = NULL;
856         int64_t count;
857 @@ -143,13 +143,13 @@ refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
858  }
859  
860  int64_t
861 -zfs_refcount_add(refcount_t *rc, void *holder)
862 +zfs_refcount_add(zfs_refcount_t *rc, void *holder)
863  {
864         return (refcount_add_many(rc, 1, holder));
865  }
866  
867  int64_t
868 -refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
869 +refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
870  {
871         reference_t *ref;
872         int64_t count;
873 @@ -197,13 +197,13 @@ refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
874  }
875  
876  int64_t
877 -refcount_remove(refcount_t *rc, void *holder)
878 +refcount_remove(zfs_refcount_t *rc, void *holder)
879  {
880         return (refcount_remove_many(rc, 1, holder));
881  }
882  
883  void
884 -refcount_transfer(refcount_t *dst, refcount_t *src)
885 +refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
886  {
887         int64_t count, removed_count;
888         list_t list, removed;
889 @@ -234,7 +234,7 @@ refcount_transfer(refcount_t *dst, refcount_t *src)
890  }
891  
892  void
893 -refcount_transfer_ownership(refcount_t *rc, void *current_holder,
894 +refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
895      void *new_holder)
896  {
897         reference_t *ref;
898 @@ -264,7 +264,7 @@ refcount_transfer_ownership(refcount_t *rc, void *current_holder,
899   * might be held.
900   */
901  boolean_t
902 -refcount_held(refcount_t *rc, void *holder)
903 +refcount_held(zfs_refcount_t *rc, void *holder)
904  {
905         reference_t *ref;
906  
907 @@ -292,7 +292,7 @@ refcount_held(refcount_t *rc, void *holder)
908   * since the reference might not be held.
909   */
910  boolean_t
911 -refcount_not_held(refcount_t *rc, void *holder)
912 +refcount_not_held(zfs_refcount_t *rc, void *holder)
913  {
914         reference_t *ref;
915  
916 diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
917 index 704f76067bf..effff330522 100644
918 --- a/module/zfs/rrwlock.c
919 +++ b/module/zfs/rrwlock.c
920 @@ -183,9 +183,9 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
921         if (rrl->rr_writer_wanted || rrl->rr_track_all) {
922                 /* may or may not be a re-entrant enter */
923                 rrn_add(rrl, tag);
924 -               (void) refcount_add(&rrl->rr_linked_rcount, tag);
925 +               (void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
926         } else {
927 -               (void) refcount_add(&rrl->rr_anon_rcount, tag);
928 +               (void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
929         }
930         ASSERT(rrl->rr_writer == NULL);
931         mutex_exit(&rrl->rr_lock);
932 diff --git a/module/zfs/sa.c b/module/zfs/sa.c
933 index caa91bc4c4e..0856a4b8ff7 100644
934 --- a/module/zfs/sa.c
935 +++ b/module/zfs/sa.c
936 @@ -1347,7 +1347,7 @@ sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
937         ASSERTV(sa_os_t *sa = os->os_sa);
938  
939         ASSERT(MUTEX_HELD(&sa->sa_lock));
940 -       (void) refcount_add(&idx_tab->sa_refcount, NULL);
941 +       (void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
942  }
943  
944  void
945 diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
946 index 343b01dd6aa..c19f48ac526 100644
947 --- a/module/zfs/spa_misc.c
948 +++ b/module/zfs/spa_misc.c
949 @@ -81,7 +81,7 @@
950   *     definition they must have an existing reference, and will never need
951   *     to lookup a spa_t by name.
952   *
953 - * spa_refcount (per-spa refcount_t protected by mutex)
954 + * spa_refcount (per-spa zfs_refcount_t protected by mutex)
955   *
956   *     This reference count keep track of any active users of the spa_t.  The
957   *     spa_t cannot be destroyed or freed while this is non-zero.  Internally,
958 @@ -478,7 +478,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
959                         }
960                         scl->scl_writer = curthread;
961                 }
962 -               (void) refcount_add(&scl->scl_count, tag);
963 +               (void) zfs_refcount_add(&scl->scl_count, tag);
964                 mutex_exit(&scl->scl_lock);
965         }
966         return (1);
967 @@ -511,7 +511,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
968                         }
969                         scl->scl_writer = curthread;
970                 }
971 -               (void) refcount_add(&scl->scl_count, tag);
972 +               (void) zfs_refcount_add(&scl->scl_count, tag);
973                 mutex_exit(&scl->scl_lock);
974         }
975         ASSERT3U(wlocks_held, <=, locks);
976 @@ -841,7 +841,7 @@ spa_open_ref(spa_t *spa, void *tag)
977  {
978         ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
979             MUTEX_HELD(&spa_namespace_lock));
980 -       (void) refcount_add(&spa->spa_refcount, tag);
981 +       (void) zfs_refcount_add(&spa->spa_refcount, tag);
982  }
983  
984  /*
985 diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c
986 index f5cfdb55d79..6a7b7bbb5e7 100644
987 --- a/module/zfs/zfs_ctldir.c
988 +++ b/module/zfs/zfs_ctldir.c
989 @@ -117,7 +117,7 @@ typedef struct {
990         taskqid_t       se_taskqid;     /* scheduled unmount taskqid */
991         avl_node_t      se_node_name;   /* zfs_snapshots_by_name link */
992         avl_node_t      se_node_objsetid; /* zfs_snapshots_by_objsetid link */
993 -       refcount_t      se_refcount;    /* reference count */
994 +       zfs_refcount_t  se_refcount;    /* reference count */
995  } zfs_snapentry_t;
996  
997  static void zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay);
998 @@ -166,7 +166,7 @@ zfsctl_snapshot_free(zfs_snapentry_t *se)
999  static void
1000  zfsctl_snapshot_hold(zfs_snapentry_t *se)
1001  {
1002 -       refcount_add(&se->se_refcount, NULL);
1003 +       zfs_refcount_add(&se->se_refcount, NULL);
1004  }
1005  
1006  /*
1007 @@ -189,7 +189,7 @@ static void
1008  zfsctl_snapshot_add(zfs_snapentry_t *se)
1009  {
1010         ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
1011 -       refcount_add(&se->se_refcount, NULL);
1012 +       zfs_refcount_add(&se->se_refcount, NULL);
1013         avl_add(&zfs_snapshots_by_name, se);
1014         avl_add(&zfs_snapshots_by_objsetid, se);
1015  }
1016 @@ -266,7 +266,7 @@ zfsctl_snapshot_find_by_name(char *snapname)
1017         search.se_name = snapname;
1018         se = avl_find(&zfs_snapshots_by_name, &search, NULL);
1019         if (se)
1020 -               refcount_add(&se->se_refcount, NULL);
1021 +               zfs_refcount_add(&se->se_refcount, NULL);
1022  
1023         return (se);
1024  }
1025 @@ -287,7 +287,7 @@ zfsctl_snapshot_find_by_objsetid(spa_t *spa, uint64_t objsetid)
1026         search.se_objsetid = objsetid;
1027         se = avl_find(&zfs_snapshots_by_objsetid, &search, NULL);
1028         if (se)
1029 -               refcount_add(&se->se_refcount, NULL);
1030 +               zfs_refcount_add(&se->se_refcount, NULL);
1031  
1032         return (se);
1033  }
1034 diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
1035 index f037d4c967c..a4991476289 100644
1036 --- a/module/zfs/zfs_znode.c
1037 +++ b/module/zfs/zfs_znode.c
1038 @@ -264,7 +264,7 @@ zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
1039                 ASSERT3U(zh->zh_obj, ==, obj);
1040                 found = B_TRUE;
1041         }
1042 -       refcount_add(&zh->zh_refcount, NULL);
1043 +       zfs_refcount_add(&zh->zh_refcount, NULL);
1044         mutex_exit(&zfsvfs->z_hold_locks[i]);
1045  
1046         if (found == B_TRUE)
This page took 0.184136 seconds and 3 git commands to generate.